home/zuul/zuul-output/0000755000175000017500000000000015115606400014120 5ustar zuulzuulhome/zuul/zuul-output/logs/0000755000175000017500000000000015115611545015072 5ustar zuulzuulhome/zuul/zuul-output/logs/controller/0000755000175000017500000000000015115611247017254 5ustar zuulzuulhome/zuul/zuul-output/logs/controller/validate_deployment.log0000644000175000017500000001044215115610740024006 0ustar zuulzuulNow using project "service-telemetry" on server "https://api.crc.testing:6443". * [info] Waiting for QDR deployment to complete Error from server (NotFound): deployments.apps "default-interconnect" not found Error from server (NotFound): deployments.apps "default-interconnect" not found Error from server (NotFound): deployments.apps "default-interconnect" not found Error from server (NotFound): deployments.apps "default-interconnect" not found Error from server (NotFound): deployments.apps "default-interconnect" not found Waiting for deployment "default-interconnect" rollout to finish: 0 of 1 updated replicas are available... deployment "default-interconnect" successfully rolled out * [info] Waiting for prometheus deployment to complete Error from server (NotFound): statefulsets.apps "prometheus-default" not found Waiting for 1 pods to be ready... statefulset rolling update complete 1 pods at revision prometheus-default-5f8884584d... * [info] Waiting for alertmanager deployment to complete statefulset rolling update complete 1 pods at revision alertmanager-default-698b6c7ddf... * [info] Waiting for smart-gateway deployment to complete Waiting for deployment "default-cloud1-coll-meter-smartgateway" rollout to finish: 0 of 1 updated replicas are available... deployment "default-cloud1-coll-meter-smartgateway" successfully rolled out Waiting for deployment "default-cloud1-ceil-meter-smartgateway" rollout to finish: 0 of 1 updated replicas are available... deployment "default-cloud1-ceil-meter-smartgateway" successfully rolled out deployment "default-cloud1-sens-meter-smartgateway" successfully rolled out * [info] Waiting for all pods to show Ready/Complete default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 1/2 NotReady 0 15s default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 1/2 CrashLoopBackOff 1 (2s ago) 19s default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v 2/3 CrashLoopBackOff 1 (3s ago) 32s default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 1/2 CrashLoopBackOff 1 (3s ago) 20s default-cloud1-coll-meter-smartgateway-787645d794-4zrzx 2/3 CrashLoopBackOff 1 (3s ago) 34s default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 2/3 CrashLoopBackOff 1 (3s ago) 28s default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 1/2 CrashLoopBackOff 1 (5s ago) 22s default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v 2/3 CrashLoopBackOff 1 (6s ago) 35s default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 1/2 CrashLoopBackOff 1 (6s ago) 23s default-cloud1-coll-meter-smartgateway-787645d794-4zrzx 2/3 CrashLoopBackOff 1 (6s ago) 37s default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 2/3 CrashLoopBackOff 1 (6s ago) 31s default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 1/2 CrashLoopBackOff 1 (8s ago) 25s default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v 2/3 CrashLoopBackOff 1 (9s ago) 38s default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 1/2 CrashLoopBackOff 1 (9s ago) 26s default-cloud1-coll-meter-smartgateway-787645d794-4zrzx 2/3 CrashLoopBackOff 1 (9s ago) 40s default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 2/3 CrashLoopBackOff 1 (9s ago) 34s default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 1/2 CrashLoopBackOff 1 (11s ago) 28s default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v 2/3 CrashLoopBackOff 1 (12s ago) 41s default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 1/2 CrashLoopBackOff 1 (12s ago) 29s default-cloud1-coll-meter-smartgateway-787645d794-4zrzx 2/3 CrashLoopBackOff 1 (12s ago) 43s default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 2/3 CrashLoopBackOff 1 (12s ago) 37s default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 1/2 CrashLoopBackOff 1 (14s ago) 31s default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 1/2 CrashLoopBackOff 1 (15s ago) 32s default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 2/3 CrashLoopBackOff 1 (15s ago) 40s * [info] CI Build complete. You can now run tests. home/zuul/zuul-output/logs/controller/smoketest.log0000644000175000017500000004777715115611231022014 0ustar zuulzuulAlready on project "service-telemetry" on server "https://api.crc.testing:6443". *** [INFO] Working in project service-telemetry *** [INFO] Getting ElasticSearch authentication password *** [INFO] Getting Prometheus authentication token *** [INFO] Creating configmaps... Error from server (NotFound): configmaps "stf-smoketest-healthcheck-log" not found Error from server (NotFound): configmaps "stf-smoketest-collectd-config" not found Error from server (NotFound): configmaps "stf-smoketest-sensubility-config" not found Error from server (NotFound): configmaps "stf-smoketest-collectd-entrypoint-script" not found Error from server (NotFound): configmaps "stf-smoketest-ceilometer-publisher" not found Error from server (NotFound): configmaps "stf-smoketest-ceilometer-entrypoint-script" not found Error from server (NotFound): jobs.batch "stf-smoketest" not found configmap/stf-smoketest-healthcheck-log created configmap/stf-smoketest-collectd-config created configmap/stf-smoketest-sensubility-config created configmap/stf-smoketest-collectd-entrypoint-script created configmap/stf-smoketest-ceilometer-publisher created configmap/stf-smoketest-ceilometer-entrypoint-script created *** [INFO] Waiting for QDR password upgrade *** [INFO] Creating Mock OSP Metrics QDR... Error from server (NotFound): pods "qdr-test" not found Error from server (NotFound): services "qdr-test" not found Error from server (NotFound): configmaps "qdr-test-config" not found configmap/qdr-test-config created Warning: would violate PodSecurity "restricted:latest": runAsNonRoot != true (pod or container "qdr" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container "qdr" must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost") pod/qdr-test created service/qdr-test created * [INFO] Waiting for OSP Metrics QDR pod to be Running pod/qdr-test condition met *** [INFO] Creating smoketest jobs... No resources found job.batch/stf-smoketest-smoke1 created *** [INFO] Triggering an alertmanager notification... *** [INFO] Create alert No resources found Warning: would violate PodSecurity "restricted:latest": allowPrivilegeEscalation != false (container "curl" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container "curl" must set securityContext.capabilities.drop=["ALL"]), runAsNonRoot != true (pod or container "curl" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container "curl" must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost") pod/curl created pod/curl condition met Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/curl/curl": remote error: tls: internal error *** [INFO] Waiting to see SNMP trap message in webhook pod Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp": remote error: tls: internal error *** [INFO] Waiting on job/stf-smoketest-smoke1... job.batch/stf-smoketest-smoke1 condition met *** [INFO] Checking that the qdr certificate has a long expiry *** [INFO] Showing oc get all... Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ NAME READY STATUS RESTARTS AGE pod/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq 0/1 Completed 0 5m29s pod/alertmanager-default-0 3/3 Running 0 4m5s pod/curl 0/1 Completed 0 2m42s pod/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 2/2 Running 2 (3m18s ago) 3m35s pod/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v 3/3 Running 2 (3m19s ago) 3m48s pod/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 2/2 Running 2 (3m19s ago) 3m36s pod/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx 3/3 Running 2 (3m19s ago) 3m50s pod/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 3/3 Running 2 (3m19s ago) 3m44s pod/default-interconnect-55bf8d5cb-rwr2k 1/1 Running 0 3m22s pod/default-snmp-webhook-6774d8dfbc-75fxn 1/1 Running 0 4m8s pod/elastic-operator-c9c86658-4qchz 1/1 Running 0 6m40s pod/elasticsearch-es-default-0 1/1 Running 0 6m14s pod/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx 0/1 Completed 0 5m28s pod/infrawatch-operators-tv99j 1/1 Running 0 5m44s pod/interconnect-operator-78b9bd8798-456sz 1/1 Running 0 5m17s pod/prometheus-default-0 3/3 Running 0 4m19s pod/qdr-test 1/1 Running 0 2m51s pod/service-telemetry-operator-79647f8775-zs8hl 1/1 Running 0 5m15s pod/smart-gateway-operator-5cd794ff55-w8r45 1/1 Running 0 5m14s pod/stf-smoketest-smoke1-pbhxq 0/2 Completed 0 2m42s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 4m10s service/default-alertmanager-proxy ClusterIP 10.217.4.38 9095/TCP 4m1s service/default-cloud1-ceil-meter ClusterIP 10.217.4.132 8083/TCP 3m47s service/default-cloud1-coll-meter ClusterIP 10.217.4.61 8083/TCP 3m49s service/default-cloud1-sens-meter ClusterIP 10.217.5.150 8083/TCP 3m42s service/default-interconnect ClusterIP 10.217.4.193 5672/TCP,55671/TCP,5671/TCP,5673/TCP 4m28s service/default-prometheus-proxy ClusterIP 10.217.4.221 9092/TCP 4m17s service/default-prometheus-webhook-snmp ClusterIP 10.217.4.247 9099/TCP 4m7s service/elastic-operator-service ClusterIP 10.217.5.72 443/TCP 6m40s service/elasticsearch-es-default ClusterIP None 9200/TCP 6m15s service/elasticsearch-es-http ClusterIP 10.217.4.122 9200/TCP 6m15s service/elasticsearch-es-internal-http ClusterIP 10.217.4.189 9200/TCP 6m15s service/elasticsearch-es-transport ClusterIP None 9300/TCP 6m15s service/infrawatch-operators ClusterIP 10.217.4.230 50051/TCP 5m49s service/prometheus-operated ClusterIP None 9090/TCP 4m19s service/qdr-test ClusterIP 10.217.4.87 5672/TCP 2m51s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/default-cloud1-ceil-event-smartgateway 1/1 1 1 3m35s deployment.apps/default-cloud1-ceil-meter-smartgateway 1/1 1 1 3m48s deployment.apps/default-cloud1-coll-event-smartgateway 1/1 1 1 3m36s deployment.apps/default-cloud1-coll-meter-smartgateway 1/1 1 1 3m50s deployment.apps/default-cloud1-sens-meter-smartgateway 1/1 1 1 3m44s deployment.apps/default-interconnect 1/1 1 1 4m28s deployment.apps/default-snmp-webhook 1/1 1 1 4m8s deployment.apps/elastic-operator 1/1 1 1 6m40s deployment.apps/interconnect-operator 1/1 1 1 5m17s deployment.apps/service-telemetry-operator 1/1 1 1 5m15s deployment.apps/smart-gateway-operator 1/1 1 1 5m14s NAME DESIRED CURRENT READY AGE replicaset.apps/default-cloud1-ceil-event-smartgateway-65cf5f4bb8 1 1 1 3m35s replicaset.apps/default-cloud1-ceil-meter-smartgateway-545b564d9f 1 1 1 3m48s replicaset.apps/default-cloud1-coll-event-smartgateway-d956b4648 1 1 1 3m36s replicaset.apps/default-cloud1-coll-meter-smartgateway-787645d794 1 1 1 3m50s replicaset.apps/default-cloud1-sens-meter-smartgateway-66d5b7c5fc 1 1 1 3m44s replicaset.apps/default-interconnect-55bf8d5cb 1 1 1 4m28s replicaset.apps/default-snmp-webhook-6774d8dfbc 1 1 1 4m8s replicaset.apps/elastic-operator-c9c86658 1 1 1 6m40s replicaset.apps/interconnect-operator-78b9bd8798 1 1 1 5m17s replicaset.apps/service-telemetry-operator-79647f8775 1 1 1 5m15s replicaset.apps/smart-gateway-operator-5cd794ff55 1 1 1 5m14s NAME READY AGE statefulset.apps/alertmanager-default 1/1 4m10s statefulset.apps/elasticsearch-es-default 1/1 6m15s statefulset.apps/prometheus-default 1/1 4m19s NAME STATUS COMPLETIONS DURATION AGE job.batch/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 Complete 1/1 9s 5m29s job.batch/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 Complete 1/1 8s 5m28s job.batch/stf-smoketest-smoke1 Complete 1/1 50s 2m42s NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD route.route.openshift.io/default-alertmanager-proxy default-alertmanager-proxy-service-telemetry.apps-crc.testing default-alertmanager-proxy web reencrypt/Redirect None route.route.openshift.io/default-interconnect-5671 default-interconnect-5671-service-telemetry.apps-crc.testing default-interconnect 5671 passthrough/None None route.route.openshift.io/default-prometheus-proxy default-prometheus-proxy-service-telemetry.apps-crc.testing default-prometheus-proxy web reencrypt/Redirect None *** [INFO] Showing scrapeconfigs... apiVersion: v1 items: - apiVersion: monitoring.rhobs/v1alpha1 kind: ScrapeConfig metadata: creationTimestamp: "2025-12-08T17:58:03Z" generation: 1 labels: app: smart-gateway name: default-cloud1-ceil-meter namespace: service-telemetry ownerReferences: - apiVersion: infra.watch/v1beta1 kind: ServiceTelemetry name: default uid: 0e7ba06d-5c93-4017-9e15-cd56d5b04d83 resourceVersion: "45282" uid: b61ecd44-75f9-4910-a52c-5b6d712736b1 spec: authorization: credentials: key: token name: prometheus-stf-token type: bearer metricRelabelings: - action: labeldrop regex: pod - action: labeldrop regex: namespace - action: labeldrop regex: instance - action: replace regex: .*/(.*)$ replacement: $1 sourceLabels: - job targetLabel: service - action: labeldrop regex: job - action: labeldrop regex: publisher - action: replace replacement: sg-core targetLabel: container - action: replace replacement: prom-https targetLabel: endpoint scheme: HTTPS scrapeInterval: 30s staticConfigs: - targets: - default-cloud1-ceil-meter.service-telemetry.svc:8083 tlsConfig: ca: configMap: key: service-ca.crt name: serving-certs-ca-bundle serverName: default-cloud1-ceil-meter.service-telemetry.svc - apiVersion: monitoring.rhobs/v1alpha1 kind: ScrapeConfig metadata: creationTimestamp: "2025-12-08T17:57:59Z" generation: 1 labels: app: smart-gateway name: default-cloud1-coll-meter namespace: service-telemetry ownerReferences: - apiVersion: infra.watch/v1beta1 kind: ServiceTelemetry name: default uid: 0e7ba06d-5c93-4017-9e15-cd56d5b04d83 resourceVersion: "45237" uid: 91e4a284-943a-4727-9fbf-986a1b7e9d03 spec: authorization: credentials: key: token name: prometheus-stf-token type: bearer metricRelabelings: - action: labeldrop regex: pod - action: labeldrop regex: namespace - action: labeldrop regex: instance - action: replace regex: .*/(.*)$ replacement: $1 sourceLabels: - job targetLabel: service - action: labeldrop regex: job - action: labeldrop regex: publisher - action: replace replacement: sg-core targetLabel: container - action: replace replacement: prom-https targetLabel: endpoint scheme: HTTPS scrapeInterval: 30s staticConfigs: - targets: - default-cloud1-coll-meter.service-telemetry.svc:8083 tlsConfig: ca: configMap: key: service-ca.crt name: serving-certs-ca-bundle serverName: default-cloud1-coll-meter.service-telemetry.svc - apiVersion: monitoring.rhobs/v1alpha1 kind: ScrapeConfig metadata: creationTimestamp: "2025-12-08T17:58:07Z" generation: 1 labels: app: smart-gateway name: default-cloud1-sens-meter namespace: service-telemetry ownerReferences: - apiVersion: infra.watch/v1beta1 kind: ServiceTelemetry name: default uid: 0e7ba06d-5c93-4017-9e15-cd56d5b04d83 resourceVersion: "45327" uid: ef90f4a7-daa4-410d-990c-92c4bfb11a20 spec: authorization: credentials: key: token name: prometheus-stf-token type: bearer metricRelabelings: - action: labeldrop regex: pod - action: labeldrop regex: namespace - action: labeldrop regex: instance - action: replace regex: .*/(.*)$ replacement: $1 sourceLabels: - job targetLabel: service - action: labeldrop regex: job - action: labeldrop regex: publisher - action: replace replacement: sg-core targetLabel: container - action: replace replacement: prom-https targetLabel: endpoint scheme: HTTPS scrapeInterval: 30s staticConfigs: - targets: - default-cloud1-sens-meter.service-telemetry.svc:8083 tlsConfig: ca: configMap: key: service-ca.crt name: serving-certs-ca-bundle serverName: default-cloud1-sens-meter.service-telemetry.svc kind: List metadata: resourceVersion: "" *** [INFO] Logs from smoketest containers... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/stf-smoketest-smoke1-pbhxq/smoketest-collectd": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/stf-smoketest-smoke1-pbhxq/smoketest-ceilometer": remote error: tls: internal error *** [INFO] Logs from qdr... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-interconnect-55bf8d5cb-rwr2k/default-interconnect": remote error: tls: internal error *** [INFO] Logs from smart gateways... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx/bridge": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx/sg-core": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn/bridge": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn/sg-core": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v/bridge": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v/sg-core": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk/bridge": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk/sg-core": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp/bridge": remote error: tls: internal error Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp/sg-core": remote error: tls: internal error *** [INFO] Logs from smart gateway operator... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/smart-gateway-operator-5cd794ff55-w8r45/operator": remote error: tls: internal error *** [INFO] Logs from prometheus... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/prometheus-default-0/prometheus": remote error: tls: internal error *** [INFO] Logs from elasticsearch... Defaulted container "elasticsearch" out of: elasticsearch, elastic-internal-init-filesystem (init), elastic-internal-suspend (init) Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/elasticsearch-es-default-0/elasticsearch": remote error: tls: internal error *** [INFO] Logs from snmp webhook... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp": remote error: tls: internal error *** [INFO] Logs from alertmanager... Error from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/alertmanager-default-0/alertmanager": remote error: tls: internal error *** [INFO] Cleanup resources... *** [FAILURE] Smoke test job still not succeeded after 300s home/zuul/zuul-output/logs/controller/post_oc_get_builds.log0000644000175000017500000000027015115611236023623 0ustar zuulzuul*** [INFO] Showing oc get 'builds' No resources found in service-telemetry namespace. [INFO] oc get 'builds' -oyaml apiVersion: v1 items: [] kind: List metadata: resourceVersion: "" home/zuul/zuul-output/logs/controller/post_oc_get_subscriptions.log0000644000175000017500000002722115115611237025256 0ustar zuulzuul*** [INFO] Showing oc get 'subscriptions' NAME PACKAGE SOURCE CHANNEL amq7-interconnect-operator-1.10.x-redhat-operators-openshift-marketplace amq7-interconnect-operator redhat-operators 1.10.x elasticsearch-eck-operator-certified elasticsearch-eck-operator-certified certified-operators stable service-telemetry-operator service-telemetry-operator infrawatch-operators unstable smart-gateway-operator-unstable-infrawatch-operators-service-telemetry smart-gateway-operator infrawatch-operators unstable [INFO] oc get 'subscriptions' -oyaml apiVersion: v1 items: - apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: olm.generated-by: install-s8bl7 creationTimestamp: "2025-12-08T17:56:38Z" generation: 1 labels: olm.managed: "true" operators.coreos.com/amq7-interconnect-operator.service-telemetry: "" name: amq7-interconnect-operator-1.10.x-redhat-operators-openshift-marketplace namespace: service-telemetry resourceVersion: "44719" uid: e71a0482-05b1-4f14-bfbf-b4cc505a9941 spec: channel: 1.10.x installPlanApproval: Automatic name: amq7-interconnect-operator source: redhat-operators sourceNamespace: openshift-marketplace startingCSV: amq7-interconnect-operator.v1.10.20 status: catalogHealth: - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: certified-operators namespace: openshift-marketplace resourceVersion: "40638" uid: 04c2c69e-a9e9-447b-aff2-c2db7de0ee83 healthy: true lastUpdated: "2025-12-08T17:57:13Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: community-operators namespace: openshift-marketplace resourceVersion: "40568" uid: 88a656bd-c52a-4813-892e-7e3363ba9ac0 healthy: true lastUpdated: "2025-12-08T17:57:13Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: redhat-operators namespace: openshift-marketplace resourceVersion: "43686" uid: ca744265-3ae3-4482-8c3d-b10e28fe1042 healthy: true lastUpdated: "2025-12-08T17:57:13Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: infrawatch-operators namespace: service-telemetry resourceVersion: "43692" uid: c3af8943-d5c5-4768-9a5d-ec7e7c876a75 healthy: true lastUpdated: "2025-12-08T17:57:13Z" conditions: - lastTransitionTime: "2025-12-08T17:57:16Z" message: all available catalogsources are healthy reason: AllCatalogSourcesHealthy status: "False" type: CatalogSourcesUnhealthy currentCSV: amq7-interconnect-operator.v1.10.20 installPlanRef: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-s8bl7 namespace: service-telemetry resourceVersion: "43730" uid: 46f43d7a-498c-4074-b276-c969ca1ef029 installedCSV: amq7-interconnect-operator.v1.10.20 installplan: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-s8bl7 uuid: 46f43d7a-498c-4074-b276-c969ca1ef029 lastUpdated: "2025-12-08T17:57:16Z" state: AtLatestKnown - apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: creationTimestamp: "2025-12-08T17:54:56Z" generation: 1 labels: operators.coreos.com/elasticsearch-eck-operator-certified.service-telemetry: "" name: elasticsearch-eck-operator-certified namespace: service-telemetry resourceVersion: "43734" uid: ad2ff51a-b7a1-4ec5-b2cd-65a335a7d945 spec: channel: stable installPlanApproval: Automatic name: elasticsearch-eck-operator-certified source: certified-operators sourceNamespace: openshift-marketplace status: catalogHealth: - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: certified-operators namespace: openshift-marketplace resourceVersion: "40638" uid: 04c2c69e-a9e9-447b-aff2-c2db7de0ee83 healthy: true lastUpdated: "2025-12-08T17:56:31Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: community-operators namespace: openshift-marketplace resourceVersion: "40568" uid: 88a656bd-c52a-4813-892e-7e3363ba9ac0 healthy: true lastUpdated: "2025-12-08T17:56:31Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: redhat-operators namespace: openshift-marketplace resourceVersion: "40688" uid: ca744265-3ae3-4482-8c3d-b10e28fe1042 healthy: true lastUpdated: "2025-12-08T17:56:31Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: infrawatch-operators namespace: service-telemetry resourceVersion: "43578" uid: c3af8943-d5c5-4768-9a5d-ec7e7c876a75 healthy: true lastUpdated: "2025-12-08T17:56:31Z" conditions: - lastTransitionTime: "2025-12-08T17:56:37Z" message: all available catalogsources are healthy reason: AllCatalogSourcesHealthy status: "False" type: CatalogSourcesUnhealthy currentCSV: elasticsearch-eck-operator-certified.v3.2.0 installPlanGeneration: 1 installPlanRef: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-sk9l5 namespace: service-telemetry resourceVersion: "41253" uid: 82b95176-af50-4b00-9baa-569d7ffc4505 installedCSV: elasticsearch-eck-operator-certified.v3.2.0 installplan: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-sk9l5 uuid: 82b95176-af50-4b00-9baa-569d7ffc4505 lastUpdated: "2025-12-08T17:56:37Z" state: AtLatestKnown - apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: creationTimestamp: "2025-12-08T17:56:08Z" generation: 1 labels: operators.coreos.com/service-telemetry-operator.service-telemetry: "" name: service-telemetry-operator namespace: service-telemetry resourceVersion: "44702" uid: abe3736a-da7e-4bf3-9140-6b97576f80aa spec: channel: unstable installPlanApproval: Automatic name: service-telemetry-operator source: infrawatch-operators sourceNamespace: service-telemetry status: catalogHealth: - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: certified-operators namespace: openshift-marketplace resourceVersion: "40638" uid: 04c2c69e-a9e9-447b-aff2-c2db7de0ee83 healthy: true lastUpdated: "2025-12-08T17:57:09Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: community-operators namespace: openshift-marketplace resourceVersion: "40568" uid: 88a656bd-c52a-4813-892e-7e3363ba9ac0 healthy: true lastUpdated: "2025-12-08T17:57:09Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: redhat-operators namespace: openshift-marketplace resourceVersion: "43686" uid: ca744265-3ae3-4482-8c3d-b10e28fe1042 healthy: true lastUpdated: "2025-12-08T17:57:09Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: infrawatch-operators namespace: service-telemetry resourceVersion: "43692" uid: c3af8943-d5c5-4768-9a5d-ec7e7c876a75 healthy: true lastUpdated: "2025-12-08T17:57:09Z" conditions: - lastTransitionTime: "2025-12-08T17:57:12Z" message: all available catalogsources are healthy reason: AllCatalogSourcesHealthy status: "False" type: CatalogSourcesUnhealthy currentCSV: service-telemetry-operator.v1.5.1765147436 installPlanGeneration: 2 installPlanRef: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-s8bl7 namespace: service-telemetry resourceVersion: "43727" uid: 46f43d7a-498c-4074-b276-c969ca1ef029 installedCSV: service-telemetry-operator.v1.5.1765147436 installplan: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-s8bl7 uuid: 46f43d7a-498c-4074-b276-c969ca1ef029 lastUpdated: "2025-12-08T17:57:12Z" state: AtLatestKnown - apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: olm.generated-by: install-s8bl7 creationTimestamp: "2025-12-08T17:56:38Z" generation: 1 labels: olm.managed: "true" operators.coreos.com/smart-gateway-operator.service-telemetry: "" name: smart-gateway-operator-unstable-infrawatch-operators-service-telemetry namespace: service-telemetry resourceVersion: "45057" uid: 7e0fecdb-1f2b-4b0b-86f7-504e2e657ceb spec: channel: unstable installPlanApproval: Automatic name: smart-gateway-operator source: infrawatch-operators sourceNamespace: service-telemetry startingCSV: smart-gateway-operator.v5.0.1765147433 status: catalogHealth: - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: certified-operators namespace: openshift-marketplace resourceVersion: "40638" uid: 04c2c69e-a9e9-447b-aff2-c2db7de0ee83 healthy: true lastUpdated: "2025-12-08T17:57:37Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: community-operators namespace: openshift-marketplace resourceVersion: "40568" uid: 88a656bd-c52a-4813-892e-7e3363ba9ac0 healthy: true lastUpdated: "2025-12-08T17:57:37Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: redhat-operators namespace: openshift-marketplace resourceVersion: "43686" uid: ca744265-3ae3-4482-8c3d-b10e28fe1042 healthy: true lastUpdated: "2025-12-08T17:57:37Z" - catalogSourceRef: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource name: infrawatch-operators namespace: service-telemetry resourceVersion: "43692" uid: c3af8943-d5c5-4768-9a5d-ec7e7c876a75 healthy: true lastUpdated: "2025-12-08T17:57:37Z" conditions: - lastTransitionTime: "2025-12-08T17:57:39Z" message: all available catalogsources are healthy reason: AllCatalogSourcesHealthy status: "False" type: CatalogSourcesUnhealthy currentCSV: smart-gateway-operator.v5.0.1765147433 installPlanRef: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-s8bl7 namespace: service-telemetry resourceVersion: "43828" uid: 46f43d7a-498c-4074-b276-c969ca1ef029 installedCSV: smart-gateway-operator.v5.0.1765147433 installplan: apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan name: install-s8bl7 uuid: 46f43d7a-498c-4074-b276-c969ca1ef029 lastUpdated: "2025-12-08T17:57:39Z" state: AtLatestKnown kind: List metadata: resourceVersion: "" home/zuul/zuul-output/logs/controller/post_oc_get_images.log0000644000175000017500000567121615115611240023624 0ustar zuulzuul*** [INFO] Showing oc get 'images' NAME IMAGE REFERENCE sha256:00cf28cf9a6c427962f922855a6cc32692c760764ce2ce7411cf605dd510367f registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:00cf28cf9a6c427962f922855a6cc32692c760764ce2ce7411cf605dd510367f sha256:01920073bbd480bd34e2d8e17dced64d342257fa9a263d1843edf1cc45a50a7c registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:01920073bbd480bd34e2d8e17dced64d342257fa9a263d1843edf1cc45a50a7c sha256:0194269c95cbc1aeb509611e283fd404791a0bc79911532bd9835bb51a19aff6 registry.redhat.io/ubi8/php-80@sha256:0194269c95cbc1aeb509611e283fd404791a0bc79911532bd9835bb51a19aff6 sha256:022488b1bf697b7dd8c393171a3247bef4ea545a9ab828501e72168f2aac9415 registry.access.redhat.com/ubi8/openjdk-8@sha256:022488b1bf697b7dd8c393171a3247bef4ea545a9ab828501e72168f2aac9415 sha256:02c1739b727e3b15a76cdd44f92cf91336d7dd34d5e830b2fe4ab3f2af48fe60 registry.redhat.io/ubi9/php-82@sha256:02c1739b727e3b15a76cdd44f92cf91336d7dd34d5e830b2fe4ab3f2af48fe60 sha256:04b6d6c9d05857ca39c360caf8698f3081360eeffe85da2eaab3e7e91040030b registry.redhat.io/ubi9/python-311@sha256:04b6d6c9d05857ca39c360caf8698f3081360eeffe85da2eaab3e7e91040030b sha256:0669a28577b41bb05c67492ef18a1d48a299ac54d1500df8f9f8f760ce4be24b registry.access.redhat.com/ubi8/openjdk-11@sha256:0669a28577b41bb05c67492ef18a1d48a299ac54d1500df8f9f8f760ce4be24b sha256:06bbbf9272d5c5161f444388593e9bd8db793d8a2d95a50b429b3c0301fafcdd registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:06bbbf9272d5c5161f444388593e9bd8db793d8a2d95a50b429b3c0301fafcdd sha256:07addbabcfd72212a82efce053a70362a06925ee1522c4dd783be878ffad46cb registry.redhat.io/ubi7/go-toolset@sha256:07addbabcfd72212a82efce053a70362a06925ee1522c4dd783be878ffad46cb sha256:0a1a889dcfb66dfe73d30f6a7a18dace8796e66e9f2203de97955500ad76f4aa quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0a1a889dcfb66dfe73d30f6a7a18dace8796e66e9f2203de97955500ad76f4aa sha256:0a3a55052f6e8df1ea48de0c429c39e072b6aa8818250ccee634f96acacfd7c7 registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:0a3a55052f6e8df1ea48de0c429c39e072b6aa8818250ccee634f96acacfd7c7 sha256:0ab9d7fe68c5aef87b591e40bd6be7479e913798348f86ac43d7c0357794bb3f registry.redhat.io/ubi8/nodejs-20@sha256:0ab9d7fe68c5aef87b591e40bd6be7479e913798348f86ac43d7c0357794bb3f sha256:0d5551af0b9188a88eb39bf9745f992b1ab2ce4839d4a0555b59c58b5f3f6412 registry.redhat.io/ubi9/nodejs-20@sha256:0d5551af0b9188a88eb39bf9745f992b1ab2ce4839d4a0555b59c58b5f3f6412 sha256:0eea1d20aaa26041edf26b925fb204d839e5b93122190191893a0299b2e1b589 registry.access.redhat.com/ubi8/openjdk-17@sha256:0eea1d20aaa26041edf26b925fb204d839e5b93122190191893a0299b2e1b589 sha256:13577236b039ed11e9f1070f884e9836e731944575de2ee59b290b05e08ad5f8 registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:13577236b039ed11e9f1070f884e9836e731944575de2ee59b290b05e08ad5f8 sha256:144941f6745ed291d11d94c66037cfbb1d7cd9cf28db4f2234d9265efe767eff registry.redhat.io/fuse7/fuse-karaf-openshift-jdk11-rhel8@sha256:144941f6745ed291d11d94c66037cfbb1d7cd9cf28db4f2234d9265efe767eff sha256:146789aaa36b11c2194c3a1cd1bbc9d56016a67cf2791401b59c339983dd2a5e registry.redhat.io/rhscl/mariadb-105-rhel7@sha256:146789aaa36b11c2194c3a1cd1bbc9d56016a67cf2791401b59c339983dd2a5e sha256:14de89e89efc97aee3b50141108b7833708c3a93ad90bf89940025ab5267ba86 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:14de89e89efc97aee3b50141108b7833708c3a93ad90bf89940025ab5267ba86 sha256:15ca7a76fdcca80b800ea420857782badd4960d99d120322c255462c098ed641 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:15ca7a76fdcca80b800ea420857782badd4960d99d120322c255462c098ed641 sha256:16aacc57365922ce9329f1306153e444a02c4883b5b6ea648b0e47ef286df396 registry.access.redhat.com/ubi8/dotnet-80-runtime@sha256:16aacc57365922ce9329f1306153e444a02c4883b5b6ea648b0e47ef286df396 sha256:1b7ca459c309d850b031f63618176b460fa348899201a6a82a5a42c75d09563d registry.redhat.io/fuse7/fuse-java-openshift-rhel8@sha256:1b7ca459c309d850b031f63618176b460fa348899201a6a82a5a42c75d09563d sha256:1ce541f489f72d4b354b96e9ad8b8f4e27099534d1cf5cebdfd505a1825f6545 registry.redhat.io/ubi10/python-312-minimal@sha256:1ce541f489f72d4b354b96e9ad8b8f4e27099534d1cf5cebdfd505a1825f6545 sha256:1d526f710f53ea2805441bbd04057242715d6fe2c91257b1ccd53c7a72c499be registry.redhat.io/fuse7/fuse-java-openshift@sha256:1d526f710f53ea2805441bbd04057242715d6fe2c91257b1ccd53c7a72c499be sha256:1d68b58a73f4cf15fcd886ab39fddf18be923b52b24cb8ec3ab1da2d3e9bd5f6 registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:1d68b58a73f4cf15fcd886ab39fddf18be923b52b24cb8ec3ab1da2d3e9bd5f6 sha256:1ef62ca43bc7b5be382470644797926cecd419f1496a471cc230d71147b8f878 registry.redhat.io/ubi8/python-39@sha256:1ef62ca43bc7b5be382470644797926cecd419f1496a471cc230d71147b8f878 sha256:1fe8afafb4a8cfe086dd3c3f59fc717ccc8924e570deaed38f4751962aed4211 registry.redhat.io/fuse7/fuse-java-openshift@sha256:1fe8afafb4a8cfe086dd3c3f59fc717ccc8924e570deaed38f4751962aed4211 sha256:2254dc2f421f496b504aafbbd8ea37e660652c4b6b4f9a0681664b10873be7fe registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:2254dc2f421f496b504aafbbd8ea37e660652c4b6b4f9a0681664b10873be7fe sha256:229ee7b88c5f700c95d557d0b37b8f78dbb6b125b188c3bf050cfdb32aec7962 registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:229ee7b88c5f700c95d557d0b37b8f78dbb6b125b188c3bf050cfdb32aec7962 sha256:230cd475733320b85bef99f7634b0f73ba82e323865d1e46be6d76fe03c337e8 registry.redhat.io/fuse7/fuse-eap-openshift@sha256:230cd475733320b85bef99f7634b0f73ba82e323865d1e46be6d76fe03c337e8 sha256:2406759804b3f8c4ea6a6ba582a1ab82be48362a8a815a82bb4aa04bf81e86e3 registry.redhat.io/jboss-eap-7/eap-xp4-openjdk11-runtime-openshift-rhel8@sha256:2406759804b3f8c4ea6a6ba582a1ab82be48362a8a815a82bb4aa04bf81e86e3 sha256:25f6a298e4505c38e7220e8a654852de3822d40a99b5f47da657251f31c3ffc3 registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:25f6a298e4505c38e7220e8a654852de3822d40a99b5f47da657251f31c3ffc3 sha256:26c16d866c58f4d47901f8a009d39b5f2ddc76a5d7b2cb5b0065ed8cd8eea1b6 registry.access.redhat.com/ubi8/dotnet-80@sha256:26c16d866c58f4d47901f8a009d39b5f2ddc76a5d7b2cb5b0065ed8cd8eea1b6 sha256:2788719b2da9f43a904d670b43cf29445a687a1ad6eb96e4a052e15cd3188a0f registry.redhat.io/ubi8/php-82@sha256:2788719b2da9f43a904d670b43cf29445a687a1ad6eb96e4a052e15cd3188a0f sha256:27901936ab8866d1d1293479bb3448cb3ff98cdcaa8242926a9c49896a864c2f registry.redhat.io/ubi9/nginx-124@sha256:27901936ab8866d1d1293479bb3448cb3ff98cdcaa8242926a9c49896a864c2f sha256:2861514e125903261aa0004883a7f7aeeb4c189b2d0d175372d1edc111942eda registry.redhat.io/ubi9/go-toolset@sha256:2861514e125903261aa0004883a7f7aeeb4c189b2d0d175372d1edc111942eda sha256:29d831cb1f5fe839ab6e8c59d57f695094938a9f97e071bdbc854e54a90ecb94 registry.redhat.io/fuse7/fuse-java-openshift@sha256:29d831cb1f5fe839ab6e8c59d57f695094938a9f97e071bdbc854e54a90ecb94 sha256:2ae058ee7239213fb495491112be8cc7e6d6661864fd399deb27f23f50f05eb4 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:2ae058ee7239213fb495491112be8cc7e6d6661864fd399deb27f23f50f05eb4 sha256:2c7e510949ed2c19504d0aaed1ad891f8aa03cd649d04359cc6a2cdffc40b594 registry.redhat.io/ubi9/nginx-122@sha256:2c7e510949ed2c19504d0aaed1ad891f8aa03cd649d04359cc6a2cdffc40b594 sha256:2cee344e4cfcfdc9a117fd82baa6f2d5daa7eeed450e02cd5d5554b424410439 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:2cee344e4cfcfdc9a117fd82baa6f2d5daa7eeed450e02cd5d5554b424410439 sha256:2ec1207cc75b74c26998f7c3386199a30213b37a05fab6e41a713d4a74bc4d5a registry.redhat.io/rhel9/mysql-80@sha256:2ec1207cc75b74c26998f7c3386199a30213b37a05fab6e41a713d4a74bc4d5a sha256:2f59ad75b66a3169b0b03032afb09aa3cfa531dbd844e3d3a562246e7d09c282 registry.access.redhat.com/ubi8/openjdk-8@sha256:2f59ad75b66a3169b0b03032afb09aa3cfa531dbd844e3d3a562246e7d09c282 sha256:301b093ae4fbc18f7d9d11b803d9da4220dad4556202a8de2f04377ff87c2f4d registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:301b093ae4fbc18f7d9d11b803d9da4220dad4556202a8de2f04377ff87c2f4d sha256:306ce79b5647eb627aebbd6a9c956f9cd09c24ef7b12e42c1de6ba89f8fd8121 registry.redhat.io/rhscl/postgresql-12-rhel7@sha256:306ce79b5647eb627aebbd6a9c956f9cd09c24ef7b12e42c1de6ba89f8fd8121 sha256:32a5e806bd88b40568d46864fd313541498e38fabfc5afb5f3bdfe052c4b4c5f registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:32a5e806bd88b40568d46864fd313541498e38fabfc5afb5f3bdfe052c4b4c5f sha256:3375ec169d274278da56d1401c5c1285f7d2812ea0bde2ac9ad9652b69f80893 registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:3375ec169d274278da56d1401c5c1285f7d2812ea0bde2ac9ad9652b69f80893 sha256:33d4dff40514e91d86b42e90b24b09a5ca770d9f67657c936363d348cd33d188 registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:33d4dff40514e91d86b42e90b24b09a5ca770d9f67657c936363d348cd33d188 sha256:38427fd30565b66ec512fb8d86bf442a7ac4a100d44332e8c42b472fdf821db0 registry.redhat.io/ubi8/python-36@sha256:38427fd30565b66ec512fb8d86bf442a7ac4a100d44332e8c42b472fdf821db0 sha256:3843651d85087f9f19c0047f3b0c09e41f241946867d4a78acfda37ca0a405e2 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3843651d85087f9f19c0047f3b0c09e41f241946867d4a78acfda37ca0a405e2 sha256:38c7e4f7dea04bb536f05d78e0107ebc2a3607cf030db7f5c249f13ce1f52d59 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:38c7e4f7dea04bb536f05d78e0107ebc2a3607cf030db7f5c249f13ce1f52d59 sha256:3b94ccfa422b8ba0014302a3cfc6916b69f0f5a9dfd757b6704049834d4ff0ae registry.access.redhat.com/ubi8/openjdk-17@sha256:3b94ccfa422b8ba0014302a3cfc6916b69f0f5a9dfd757b6704049834d4ff0ae sha256:3bc55cb1bafdd281a784ec7950c8e95914079522f152f642e8172869e83b4585 registry.redhat.io/ocp-tools-4/jenkins-rhel8@sha256:3bc55cb1bafdd281a784ec7950c8e95914079522f152f642e8172869e83b4585 sha256:3f00540ce2a3a01d2a147a7d73825fe78697be213a050bd09edae36266d6bc40 registry.access.redhat.com/ubi8/openjdk-11@sha256:3f00540ce2a3a01d2a147a7d73825fe78697be213a050bd09edae36266d6bc40 sha256:3f01daca201d91f4989f8ffe80625d2e08fc0e69f241a7359d30c15cc7c9a419 registry.redhat.io/ubi8/openjdk-17@sha256:3f01daca201d91f4989f8ffe80625d2e08fc0e69f241a7359d30c15cc7c9a419 sha256:3fdd215cd56a7bfe500482c73271dc1e79bf90457a0f245e6f50d3d63ca4da7c registry.redhat.io/ubi9/nodejs-20-minimal@sha256:3fdd215cd56a7bfe500482c73271dc1e79bf90457a0f245e6f50d3d63ca4da7c sha256:403c4481d100edee6355e3defc17cec304db6f510b0c953c298e171b16dbd8bf registry.redhat.io/jboss-eap-7/eap74-openjdk8-openshift-rhel7@sha256:403c4481d100edee6355e3defc17cec304db6f510b0c953c298e171b16dbd8bf sha256:4100f9633dd7533a6ab847ea2b666de21bbb84baf070945a3c142cb019cd9a5f registry.redhat.io/ubi9/python-312@sha256:4100f9633dd7533a6ab847ea2b666de21bbb84baf070945a3c142cb019cd9a5f sha256:421d1f6a10e263677b7687ccea8e4a59058e2e3c80585505eec9a9c2e6f9f40e registry.redhat.io/redhat-openjdk-18/openjdk18-openshift@sha256:421d1f6a10e263677b7687ccea8e4a59058e2e3c80585505eec9a9c2e6f9f40e sha256:425e2c7c355bea32be238aa2c7bdd363b6ab3709412bdf095efe28a8f6c07d84 registry.access.redhat.com/ubi8/openjdk-11@sha256:425e2c7c355bea32be238aa2c7bdd363b6ab3709412bdf095efe28a8f6c07d84 sha256:431753c8a6a8541fdc0edd3385b2c765925d244fdd2347d2baa61303789696be registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:431753c8a6a8541fdc0edd3385b2c765925d244fdd2347d2baa61303789696be sha256:4361e4a098acb1d1cbd79b6fb1e67a891949e65cdc40e286a8e5da9bfb7fa332 registry.redhat.io/fuse7/fuse-java-openshift@sha256:4361e4a098acb1d1cbd79b6fb1e67a891949e65cdc40e286a8e5da9bfb7fa332 sha256:4396f6b4629ba45fe23c13c91aaa64427e957b15841bc65c84537763f00bcbe0 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4396f6b4629ba45fe23c13c91aaa64427e957b15841bc65c84537763f00bcbe0 sha256:44f6258460b45f78d229e9a3865e7481285d268460760710f69f951ed2b41aa5 registry.redhat.io/ubi9/perl-532@sha256:44f6258460b45f78d229e9a3865e7481285d268460760710f69f951ed2b41aa5 sha256:46a4e73ddb085d1f36b39903ea13ba307bb958789707e9afde048764b3e3cae2 registry.access.redhat.com/ubi8/openjdk-17@sha256:46a4e73ddb085d1f36b39903ea13ba307bb958789707e9afde048764b3e3cae2 sha256:496e23be70520863bce6f7cdc54d280aca2c133d06e992795c4dcbde1a9dd1ab registry.access.redhat.com/ubi8/openjdk-8@sha256:496e23be70520863bce6f7cdc54d280aca2c133d06e992795c4dcbde1a9dd1ab sha256:4a69ef73f4b6afb2819630baf5d169a038ed4def330a44534926aa933cbbd7e5 registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:4a69ef73f4b6afb2819630baf5d169a038ed4def330a44534926aa933cbbd7e5 sha256:4b4e59365194340a2a68d6720b35f4f3434c75c7d504a9e785fb056f7e4212e7 registry.redhat.io/rhel9/postgresql-13@sha256:4b4e59365194340a2a68d6720b35f4f3434c75c7d504a9e785fb056f7e4212e7 sha256:4f35566977c35306a8f2102841ceb7fa10a6d9ac47c079131caed5655140f9b2 registry.access.redhat.com/ubi8/openjdk-21@sha256:4f35566977c35306a8f2102841ceb7fa10a6d9ac47c079131caed5655140f9b2 sha256:4ffd7ccbe8ff0aa2e09e1c8a72410aadc721ed3ed227890f03ce3f8aa2b33700 registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:4ffd7ccbe8ff0aa2e09e1c8a72410aadc721ed3ed227890f03ce3f8aa2b33700 sha256:50729a37cfd9eeb05865038eef01b6a2baa92e2f12fe429de3f43d85ef8824b5 registry.redhat.io/ocp-tools-4/jenkins-agent-base-rhel8@sha256:50729a37cfd9eeb05865038eef01b6a2baa92e2f12fe429de3f43d85ef8824b5 sha256:50752ad94cb38e775d2a65fdc43c3249d6d24d87d15411af7b1525aa8a934277 registry.redhat.io/ubi8/python-312@sha256:50752ad94cb38e775d2a65fdc43c3249d6d24d87d15411af7b1525aa8a934277 sha256:510b3f197ad39f82b828cc8be16e12a193d4dcd6ea27af01ce10c71b87c5cbfc registry.redhat.io/rhel8/postgresql-15@sha256:510b3f197ad39f82b828cc8be16e12a193d4dcd6ea27af01ce10c71b87c5cbfc sha256:52f9b4df3f7833876ee502a6bff2539491db07e060b213b6a0a8fda0c4a881c1 registry.redhat.io/ocp-tools-4/jenkins-agent-base-rhel8@sha256:52f9b4df3f7833876ee502a6bff2539491db07e060b213b6a0a8fda0c4a881c1 sha256:5443b2d3e19c8f540cbe133113a7a4479f3ad98caa1a2a5e6ac48acbe4914b39 registry.redhat.io/fuse7/fuse-eap-openshift@sha256:5443b2d3e19c8f540cbe133113a7a4479f3ad98caa1a2a5e6ac48acbe4914b39 sha256:547a068fddf44318e62b26caa267375dc77acc10ae5dcdd0869a1d60f8b93d5d registry.redhat.io/ubi9/python-312-minimal@sha256:547a068fddf44318e62b26caa267375dc77acc10ae5dcdd0869a1d60f8b93d5d sha256:5522021e9081fa0f0163f75afedb9efaaad25c2a1dde6ce0fab3142ddcc7dd60 registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:5522021e9081fa0f0163f75afedb9efaaad25c2a1dde6ce0fab3142ddcc7dd60 sha256:5555a6031cbb8eb09cfeb73cacaabefd5a5824637b047af69b981bc66bdd8b3c registry.redhat.io/fuse7/fuse-java-openshift@sha256:5555a6031cbb8eb09cfeb73cacaabefd5a5824637b047af69b981bc66bdd8b3c sha256:55dc61c31ea50a8f7a45e993a9b3220097974948b5cd1ab3f317e7702e8cb6fc registry.access.redhat.com/ubi8/openjdk-21-runtime@sha256:55dc61c31ea50a8f7a45e993a9b3220097974948b5cd1ab3f317e7702e8cb6fc sha256:561ade81cb6455e0de35573d7ca68ac3f043c86385aac9e274dd53a7a75d3c16 registry.redhat.io/rh-sso-7/sso76-openshift-rhel8@sha256:561ade81cb6455e0de35573d7ca68ac3f043c86385aac9e274dd53a7a75d3c16 sha256:57ab1f0ad24e02143978fc79c5219a02c4d6a5a27225ee5454c85a47839b6ddc registry.redhat.io/ubi8/openjdk-8-runtime@sha256:57ab1f0ad24e02143978fc79c5219a02c4d6a5a27225ee5454c85a47839b6ddc sha256:57fa0cb158aa31193908df27fc707afcfdd4bdaf93b3286f5602d5f804e1927f registry.redhat.io/fuse7/fuse-java-openshift@sha256:57fa0cb158aa31193908df27fc707afcfdd4bdaf93b3286f5602d5f804e1927f sha256:59b88fb0c467ca43bf3c1af6bfd8777577638dd8079f995cdb20b6f4e20ce0b6 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:59b88fb0c467ca43bf3c1af6bfd8777577638dd8079f995cdb20b6f4e20ce0b6 sha256:5bb11da5abfe6a1bc937f0439f08fa27efc96c438106b6a545be62672a39fc26 registry.redhat.io/fuse7/fuse-eap-openshift@sha256:5bb11da5abfe6a1bc937f0439f08fa27efc96c438106b6a545be62672a39fc26 sha256:5c16087bf81b2285e8815970b44385f7c98612fd6eb1af23b6d89db86004efa3 registry.redhat.io/rhel8/mariadb-103@sha256:5c16087bf81b2285e8815970b44385f7c98612fd6eb1af23b6d89db86004efa3 sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e sha256:5dfcc5b000a1fab4be66bbd43e4db44b61176e2bcba9c24f6fe887dea9b7fd49 registry.access.redhat.com/ubi8/dotnet-60-runtime@sha256:5dfcc5b000a1fab4be66bbd43e4db44b61176e2bcba9c24f6fe887dea9b7fd49 sha256:5e4e0fd08883744f35560eac43b8120f6324d9b488eb7a7716955fb98ddbace5 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5e4e0fd08883744f35560eac43b8120f6324d9b488eb7a7716955fb98ddbace5 sha256:5f474ef095d7b7aabc5b1c60818201aca66343856fb67eb93751f3b4a82d391b registry.redhat.io/rhscl/postgresql-13-rhel7@sha256:5f474ef095d7b7aabc5b1c60818201aca66343856fb67eb93751f3b4a82d391b sha256:5fb3543c0d42146f0506c1ea4d09575131da6a2f27885729b7cfce13a0fa90e3 registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:5fb3543c0d42146f0506c1ea4d09575131da6a2f27885729b7cfce13a0fa90e3 sha256:603d10af5e3476add5b5726fdef893033869ae89824ee43949a46c9f004ef65d registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:603d10af5e3476add5b5726fdef893033869ae89824ee43949a46c9f004ef65d sha256:61555b923dabe4ff734279ed1bdb9eb6d450c760e1cc04463cf88608ac8d1338 registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:61555b923dabe4ff734279ed1bdb9eb6d450c760e1cc04463cf88608ac8d1338 sha256:64acf3403b5c2c85f7a28f326c63f1312b568db059c66d90b34e3c59fde3a74b registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:64acf3403b5c2c85f7a28f326c63f1312b568db059c66d90b34e3c59fde3a74b sha256:663eb81388ae8f824e7920c272f6d2e2274cf6c140d61416607261cdce9d50e2 registry.access.redhat.com/ubi8/openjdk-11@sha256:663eb81388ae8f824e7920c272f6d2e2274cf6c140d61416607261cdce9d50e2 sha256:6740d72db4de99ecb4652cff89a239242afd150d6ccf6ed0ebff89ffcbbc649e registry.redhat.io/ubi8/go-toolset@sha256:6740d72db4de99ecb4652cff89a239242afd150d6ccf6ed0ebff89ffcbbc649e sha256:67a2ae44e1bd87166e5c70f8147ccc9064ddfc8f43170bc92db9b12568cc7f73 registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:67a2ae44e1bd87166e5c70f8147ccc9064ddfc8f43170bc92db9b12568cc7f73 sha256:67fee4b64b269f5666a1051d806635b675903ef56d07b7cc019d3d59ff1aa97c registry.access.redhat.com/ubi8/openjdk-11@sha256:67fee4b64b269f5666a1051d806635b675903ef56d07b7cc019d3d59ff1aa97c sha256:6a9e81b2eea2f32f2750909b6aa037c2c2e68be3bc9daf3c7a3163c9e1df379f registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:6a9e81b2eea2f32f2750909b6aa037c2c2e68be3bc9daf3c7a3163c9e1df379f sha256:6c009f430da02bdcff618a7dcd085d7d22547263eeebfb8d6377a4cf6f58769d registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:6c009f430da02bdcff618a7dcd085d7d22547263eeebfb8d6377a4cf6f58769d sha256:6cb572c7356c9d9ae6d2760491095de4b1797471ea97c50b330bc2ce1168da56 registry.access.redhat.com/ubi8/dotnet-90@sha256:6cb572c7356c9d9ae6d2760491095de4b1797471ea97c50b330bc2ce1168da56 sha256:6d07cbaef7869b2e0d878740ad685b150f3d8ab960544c881916a01f25f9b6ef registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:6d07cbaef7869b2e0d878740ad685b150f3d8ab960544c881916a01f25f9b6ef sha256:6d8afb6d1fced4deee8de43b935e2bf5164c81bc26bee01da0fce69b74b63f83 registry.redhat.io/fuse7/fuse-eap-openshift-jdk11-rhel8@sha256:6d8afb6d1fced4deee8de43b935e2bf5164c81bc26bee01da0fce69b74b63f83 sha256:70a21b3f93c05843ce9d07f125b1464436caf01680bb733754a2a5df5bc3b11b registry.access.redhat.com/ubi8/dotnet-60@sha256:70a21b3f93c05843ce9d07f125b1464436caf01680bb733754a2a5df5bc3b11b sha256:7164a06e9ba98a3ce9991bd7019512488efe30895175bb463e255f00eb9421fd registry.access.redhat.com/ubi8/openjdk-8@sha256:7164a06e9ba98a3ce9991bd7019512488efe30895175bb463e255f00eb9421fd sha256:7201e059b92acc55fe9fe1cc390d44e92f0e2af297fbe52b3f1bb56327f59624 registry.redhat.io/ubi8/dotnet-80@sha256:7201e059b92acc55fe9fe1cc390d44e92f0e2af297fbe52b3f1bb56327f59624 sha256:739fac452e78a21a16b66e0451b85590b9e48ec7a1ed3887fbb9ed85cf564275 registry.access.redhat.com/ubi8/openjdk-17@sha256:739fac452e78a21a16b66e0451b85590b9e48ec7a1ed3887fbb9ed85cf564275 sha256:74051f86b00fb102e34276f03a310c16bc57b9c2a001a56ba66359e15ee48ba6 registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:74051f86b00fb102e34276f03a310c16bc57b9c2a001a56ba66359e15ee48ba6 sha256:74cc70f8d3698d41793d19b0e23b0a79448d02ada93f402a1714d26d164e4c1d registry.redhat.io/jboss-eap-7/eap74-openjdk11-openshift-rhel8@sha256:74cc70f8d3698d41793d19b0e23b0a79448d02ada93f402a1714d26d164e4c1d sha256:74efcab05b844a1c226bb18221a5309e7364b48d52757a809787e9b58e235ed9 registry.redhat.io/rhel8/postgresql-13@sha256:74efcab05b844a1c226bb18221a5309e7364b48d52757a809787e9b58e235ed9 sha256:75fbf4aa5c14bba44b5dfbf6673dc80ce35376f626df3a102a5a2edf8141cd34 registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:75fbf4aa5c14bba44b5dfbf6673dc80ce35376f626df3a102a5a2edf8141cd34 sha256:7711108ef60ef6f0536bfa26914af2afaf6455ce6e4c4abd391e31a2d95d0178 registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:7711108ef60ef6f0536bfa26914af2afaf6455ce6e4c4abd391e31a2d95d0178 sha256:77e5675e066943925eb228f51434080f10bb0be323c9c55ac62d223a0dd1b250 registry.redhat.io/ubi9/ruby-33@sha256:77e5675e066943925eb228f51434080f10bb0be323c9c55ac62d223a0dd1b250 sha256:789f5edf1369a40bf56ca698eafee86b74a8d53b39d100bbb91279aaebceb6d5 registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:789f5edf1369a40bf56ca698eafee86b74a8d53b39d100bbb91279aaebceb6d5 sha256:78af15475eac13d2ff439b33a9c3bdd39147858a824c420e8042fd5f35adce15 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:78af15475eac13d2ff439b33a9c3bdd39147858a824c420e8042fd5f35adce15 sha256:78bf175cecb15524b2ef81bff8cc11acdf7c0f74c08417f0e443483912e4878a registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:78bf175cecb15524b2ef81bff8cc11acdf7c0f74c08417f0e443483912e4878a sha256:7bcc365e0ba823ed020ee6e6c3e0c23be5871c8dea3f7f1a65029002c83f9e55 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:7bcc365e0ba823ed020ee6e6c3e0c23be5871c8dea3f7f1a65029002c83f9e55 sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3 sha256:7de877b0e748cdb47cb702400f3ddaa3c3744a022887e2213c2bb27775ab4b25 registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:7de877b0e748cdb47cb702400f3ddaa3c3744a022887e2213c2bb27775ab4b25 sha256:7ef75cdbc399425105060771cb8e700198cc0bddcfb60bf4311bf87ea62fd440 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:7ef75cdbc399425105060771cb8e700198cc0bddcfb60bf4311bf87ea62fd440 sha256:7ff7af22f08d9dc8043a73013d629ee03277ff18ad94001092de70fd5917e9e8 registry.redhat.io/ubi10/nodejs-22@sha256:7ff7af22f08d9dc8043a73013d629ee03277ff18ad94001092de70fd5917e9e8 sha256:8027301bb8716941e2a15b7d31b055ec7eba327ad3b7c72fb5accfa077a32521 registry.redhat.io/rhscl/postgresql-10-rhel7@sha256:8027301bb8716941e2a15b7d31b055ec7eba327ad3b7c72fb5accfa077a32521 sha256:81684e422367a075ac113e69ea11d8721416ce4bedea035e25313c5e726fd7d1 registry.access.redhat.com/ubi8/openjdk-8@sha256:81684e422367a075ac113e69ea11d8721416ce4bedea035e25313c5e726fd7d1 sha256:85093d0f55d06662420925f64e914ff05499c79c2ede3ef80085a44d40f16a80 registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:85093d0f55d06662420925f64e914ff05499c79c2ede3ef80085a44d40f16a80 sha256:852d8ea448cfb93036fc5f1b69f58249bc2e4454d326bd927839c5de6ce50a7b registry.redhat.io/fuse7/fuse-eap-openshift@sha256:852d8ea448cfb93036fc5f1b69f58249bc2e4454d326bd927839c5de6ce50a7b sha256:865ce4a073a00133133ba2d375cb9529dab8d10cf2aebd5537e9028f21aa261b registry.redhat.io/ubi10/nginx-126@sha256:865ce4a073a00133133ba2d375cb9529dab8d10cf2aebd5537e9028f21aa261b sha256:868224c3b7c309b9e04003af70a5563af8e4c662f0c53f2a7606e0573c9fad85 registry.access.redhat.com/ubi8/openjdk-11@sha256:868224c3b7c309b9e04003af70a5563af8e4c662f0c53f2a7606e0573c9fad85 sha256:88751105dd023552164e3c312742986b011078becb28f1464f1524d134925d73 registry.access.redhat.com/ubi8/dotnet-90-runtime@sha256:88751105dd023552164e3c312742986b011078becb28f1464f1524d134925d73 sha256:887cc9e7fd3ce89adf6233aaf52b0243930e1a958190a09bf37c10f069890ee7 registry.redhat.io/ubi8/nodejs-22@sha256:887cc9e7fd3ce89adf6233aaf52b0243930e1a958190a09bf37c10f069890ee7 sha256:8968c86f5e2831796cf5f464c87a911b5513fd543b7f3485ccc497fe05ad6bca registry.redhat.io/fuse7/fuse-eap-openshift@sha256:8968c86f5e2831796cf5f464c87a911b5513fd543b7f3485ccc497fe05ad6bca sha256:8a5b580b76c2fc2dfe55d13bb0dd53e8c71d718fc1a3773264b1710f49060222 registry.access.redhat.com/ubi8/openjdk-8@sha256:8a5b580b76c2fc2dfe55d13bb0dd53e8c71d718fc1a3773264b1710f49060222 sha256:8a7d4c245418f8099293270f8bbcf7a4207839c4c4ef9974c2e16e303329edf3 registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:8a7d4c245418f8099293270f8bbcf7a4207839c4c4ef9974c2e16e303329edf3 sha256:8abcc25fba7202b68818271353a9203677ac3c2d638dafc84e6b45e68d913f59 registry.redhat.io/ubi8/openjdk-8@sha256:8abcc25fba7202b68818271353a9203677ac3c2d638dafc84e6b45e68d913f59 sha256:8be99c30a4e5b021129310847bddca64a93fb38b0c8dfeac482b4c28de062e5a registry.redhat.io/ubi8/openjdk-11@sha256:8be99c30a4e5b021129310847bddca64a93fb38b0c8dfeac482b4c28de062e5a sha256:8c148fc54caeb860262c3710675c97c58aba79af2d3c76de795b97143aae0e3f registry.redhat.io/fuse7/fuse-java-openshift@sha256:8c148fc54caeb860262c3710675c97c58aba79af2d3c76de795b97143aae0e3f sha256:8c1d5419ebfe1eb1a888c32a456f48ea01e1a6a33c7db59acf689ede1d944516 registry.redhat.io/ubi9/python-39@sha256:8c1d5419ebfe1eb1a888c32a456f48ea01e1a6a33c7db59acf689ede1d944516 sha256:8d57f273f8521c9b2d55756dbff05559184d1aeec46517e46c71de97cd72c12b registry.redhat.io/rhel8/postgresql-12@sha256:8d57f273f8521c9b2d55756dbff05559184d1aeec46517e46c71de97cd72c12b sha256:8e83f3ef3b5ad4bd7c4002d0201e4d5dd26a158c0be3ad29405ff4800d5661b8 registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:8e83f3ef3b5ad4bd7c4002d0201e4d5dd26a158c0be3ad29405ff4800d5661b8 sha256:9036a59a8275f9c205ef5fc674f38c0495275a1a7912029f9a784406bb00b1f5 registry.access.redhat.com/ubi8/openjdk-11@sha256:9036a59a8275f9c205ef5fc674f38c0495275a1a7912029f9a784406bb00b1f5 sha256:920ff7e5efc777cb523669c425fd7b553176c9f4b34a85ceddcb548c2ac5f78a registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:920ff7e5efc777cb523669c425fd7b553176c9f4b34a85ceddcb548c2ac5f78a sha256:936daac34be9105cb05ca1eb7bcf89b280df38060709cf581df03fb69362e4df registry.redhat.io/ubi8/nodejs-20-minimal@sha256:936daac34be9105cb05ca1eb7bcf89b280df38060709cf581df03fb69362e4df sha256:953aeb7c686ebe9359eb9e020aabaa011e47de9a0c38a3e97f85ff038abef5e6 registry.redhat.io/jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8@sha256:953aeb7c686ebe9359eb9e020aabaa011e47de9a0c38a3e97f85ff038abef5e6 sha256:956ad570b06da524a856c6c2b421c8b4aab160fc4565cde798c72fa050c2dedf registry.redhat.io/fuse7/fuse-eap-openshift@sha256:956ad570b06da524a856c6c2b421c8b4aab160fc4565cde798c72fa050c2dedf sha256:963cb8b96a26857034d16753409e0b48eb8c7e2702ad97ea53136a705946e535 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:963cb8b96a26857034d16753409e0b48eb8c7e2702ad97ea53136a705946e535 sha256:9a1ff2292e9e3aa41290373a931e9b52de2b206e4da35dc12dc553f7b0e58146 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9a1ff2292e9e3aa41290373a931e9b52de2b206e4da35dc12dc553f7b0e58146 sha256:9ab26cb4005e9b60fd6349950957bbd0120efba216036da53c547c6f1c9e5e7f registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:9ab26cb4005e9b60fd6349950957bbd0120efba216036da53c547c6f1c9e5e7f sha256:9b5d2fc574a13613f18fa983ac2901593c1e812836e918095bc3d15b6cc4ba57 registry.redhat.io/ubi8/openjdk-17-runtime@sha256:9b5d2fc574a13613f18fa983ac2901593c1e812836e918095bc3d15b6cc4ba57 sha256:9c10ee657f8d4fc4cee2d6f3fca56a8ded4354b90beea00e8274d1927e0fe8c7 registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:9c10ee657f8d4fc4cee2d6f3fca56a8ded4354b90beea00e8274d1927e0fe8c7 sha256:9c8685e9b35e0262af34b42288252f421e0791efd835b5673cf9d10c90863a36 registry.redhat.io/ubi9/httpd-24@sha256:9c8685e9b35e0262af34b42288252f421e0791efd835b5673cf9d10c90863a36 sha256:9d0ed8688df061f7555046a61cec60f909b325e77dee6aec9d2350f81efa0b46 registry.redhat.io/fuse7/fuse-eap-openshift@sha256:9d0ed8688df061f7555046a61cec60f909b325e77dee6aec9d2350f81efa0b46 sha256:9d759db3bb650e5367216ce261779c5a58693fc7ae10f21cd264011562bd746d registry.access.redhat.com/ubi8/openjdk-8@sha256:9d759db3bb650e5367216ce261779c5a58693fc7ae10f21cd264011562bd746d sha256:9d895b1202aaf35bbe7b432736307c965d9d84fd91c06e41f8ac21c7ea0590a0 registry.redhat.io/ubi8/ruby-33@sha256:9d895b1202aaf35bbe7b432736307c965d9d84fd91c06e41f8ac21c7ea0590a0 sha256:9dea3590288a2f7e58af534af889123e044b0e4e03d179664bf5ac2206e9e91d registry.redhat.io/rhel8/mariadb-105@sha256:9dea3590288a2f7e58af534af889123e044b0e4e03d179664bf5ac2206e9e91d sha256:a0a6db2dcdb3d49e36bd0665e3e00f242a690391700e42cab14e86b154152bfd registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:a0a6db2dcdb3d49e36bd0665e3e00f242a690391700e42cab14e86b154152bfd sha256:a1f3f9545a7657a88c4bddbc00f4df862b7658f247e195704b6d9f8a0249c9fa registry.redhat.io/ubi10/httpd-24@sha256:a1f3f9545a7657a88c4bddbc00f4df862b7658f247e195704b6d9f8a0249c9fa sha256:a3ca570ae1293a2d88bd995b972f67a784a416cd9916a3d2bef7b38e23b88df3 registry.redhat.io/ubi8/php-74@sha256:a3ca570ae1293a2d88bd995b972f67a784a416cd9916a3d2bef7b38e23b88df3 sha256:a4c6773a5eb5183f6dad3b029aa2cc4b6715797985dc53a3faf972007e7ad4d3 registry.redhat.io/rhel9/postgresql-15@sha256:a4c6773a5eb5183f6dad3b029aa2cc4b6715797985dc53a3faf972007e7ad4d3 sha256:a4dcf2213376727c3da1d10efbea52f20c74674bfb06643723f195a849171c10 registry.redhat.io/fuse7/fuse-karaf-openshift-rhel8@sha256:a4dcf2213376727c3da1d10efbea52f20c74674bfb06643723f195a849171c10 sha256:a4e8d81c9e54234f84072295425120e358752d58af8297ecfbae5d4ad01e6a2e registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:a4e8d81c9e54234f84072295425120e358752d58af8297ecfbae5d4ad01e6a2e sha256:a8e4081414cfa644e212ded354dfee12706e63afb19a27c0c0ae2c8c64e56ca6 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:a8e4081414cfa644e212ded354dfee12706e63afb19a27c0c0ae2c8c64e56ca6 sha256:aa02a20c2edf83a009746b45a0fd2e0b4a2b224fdef1581046f6afef38c0bee2 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:aa02a20c2edf83a009746b45a0fd2e0b4a2b224fdef1581046f6afef38c0bee2 sha256:aabd8f354cad901a56c029dbd62483262f4c435a329882ba5c3f7041c8cc04f8 registry.redhat.io/ubi9/php-83@sha256:aabd8f354cad901a56c029dbd62483262f4c435a329882ba5c3f7041c8cc04f8 sha256:abf2ce8a3db39549eb583fd38851e51a1d34c3448225d0cf1aede35e5e0cccfd registry.redhat.io/ubi8/perl-526@sha256:abf2ce8a3db39549eb583fd38851e51a1d34c3448225d0cf1aede35e5e0cccfd sha256:acc20d8b3eb50bef50abda0e44d46d353ba28be950b1685cc8053c977b31eaf6 registry.redhat.io/rhel9/mariadb-105@sha256:acc20d8b3eb50bef50abda0e44d46d353ba28be950b1685cc8053c977b31eaf6 sha256:adc5b484c12f915309a95acb71890e4a1a8148d5dadd6cc22d0794cdab81557b registry.redhat.io/fuse7/fuse-java-openshift@sha256:adc5b484c12f915309a95acb71890e4a1a8148d5dadd6cc22d0794cdab81557b sha256:ade3b81041a336ef1a37f3c52f85fc0b92bd62b76f304a86d262770110e3fbab registry.redhat.io/ubi8/nginx-122@sha256:ade3b81041a336ef1a37f3c52f85fc0b92bd62b76f304a86d262770110e3fbab sha256:ae7c07fccaaec3ad4a83a2309893b03e94010b2d046de8c38e3d5af45366f84c registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:ae7c07fccaaec3ad4a83a2309893b03e94010b2d046de8c38e3d5af45366f84c sha256:af6079ab6f381f2f8eb7175c9bacb93d0c72ff022e97f28520c97b1633b109e2 registry.redhat.io/ubi10/perl-540@sha256:af6079ab6f381f2f8eb7175c9bacb93d0c72ff022e97f28520c97b1633b109e2 sha256:af676d250bb4ced265fece19dbb847133717e18341777df3a57550f53f6207cb registry.redhat.io/rhel8/redis-6@sha256:af676d250bb4ced265fece19dbb847133717e18341777df3a57550f53f6207cb sha256:af9c08644ca057d83ef4b7d8de1489f01c5a52ff8670133b8a09162831b7fb34 registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:af9c08644ca057d83ef4b7d8de1489f01c5a52ff8670133b8a09162831b7fb34 sha256:b053401886c06581d3c296855525cc13e0613100a596ed007bb69d5f8e972346 registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:b053401886c06581d3c296855525cc13e0613100a596ed007bb69d5f8e972346 sha256:b163564be6ed5b80816e61a4ee31e42f42dbbf345253daac10ecc9fadf31baa3 registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:b163564be6ed5b80816e61a4ee31e42f42dbbf345253daac10ecc9fadf31baa3 sha256:b25e2af772c13ae5ff3339bc4bbdf52c49011e750f40b37a0b736cb82768a349 registry.redhat.io/rhel8/httpd-24@sha256:b25e2af772c13ae5ff3339bc4bbdf52c49011e750f40b37a0b736cb82768a349 sha256:b4cb02a4e7cb915b6890d592ed5b4ab67bcef19bf855029c95231f51dd071352 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:b4cb02a4e7cb915b6890d592ed5b4ab67bcef19bf855029c95231f51dd071352 sha256:b52d47d62be6b57d4af722f98b3434016c99c54e04574236b017924046d323c0 registry.redhat.io/jboss-eap-7/eap74-openjdk11-runtime-openshift-rhel8@sha256:b52d47d62be6b57d4af722f98b3434016c99c54e04574236b017924046d323c0 sha256:b77dec59a72e9b6323e6fa2617f588f07518f44d2e9f6aa8f2ccd83d90e40203 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b77dec59a72e9b6323e6fa2617f588f07518f44d2e9f6aa8f2ccd83d90e40203 sha256:b80a514f136f738736d6bf654dc3258c13b04a819e001dd8a39ef2f7475fd9d9 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:b80a514f136f738736d6bf654dc3258c13b04a819e001dd8a39ef2f7475fd9d9 sha256:b838fa18dab68d43a19f0c329c3643850691b8f9915823c4f8d25685eb293a11 registry.access.redhat.com/ubi8/openjdk-8@sha256:b838fa18dab68d43a19f0c329c3643850691b8f9915823c4f8d25685eb293a11 sha256:b85cbdbc289752c91ac7f468cffef916fe9ab01865f3e32cfcc44ccdd633b168 registry.access.redhat.com/ubi8/openjdk-11@sha256:b85cbdbc289752c91ac7f468cffef916fe9ab01865f3e32cfcc44ccdd633b168 sha256:b998c58c88dd98365531bacc631dc92deb73de17cd3b6f86466f421c409f8583 registry.redhat.io/ubi8/dotnet-80-runtime@sha256:b998c58c88dd98365531bacc631dc92deb73de17cd3b6f86466f421c409f8583 sha256:ba09ffc90313f71c748e38a6b7c68b20f4e42945eb88349a6596dea61c517637 registry.redhat.io/fuse7/fuse-eap-openshift-jdk8-rhel7@sha256:ba09ffc90313f71c748e38a6b7c68b20f4e42945eb88349a6596dea61c517637 sha256:ba0bb0b1b9bed00d24bd73b59b6a3f7a46714ba1c0e1b900572dc580eedde68c registry.redhat.io/fuse7/fuse-java-openshift@sha256:ba0bb0b1b9bed00d24bd73b59b6a3f7a46714ba1c0e1b900572dc580eedde68c sha256:babbc5613f8ad380b2b85564d74d1edfbb345a9481fa3c1891980bb75169c079 registry.redhat.io/jboss-eap-7/eap-xp4-openjdk11-openshift-rhel8@sha256:babbc5613f8ad380b2b85564d74d1edfbb345a9481fa3c1891980bb75169c079 sha256:bae3cc0e61e2d73c8dd1384cfa97c6e875ff4a5da7f6332075eb5cd95bc90a7c registry.redhat.io/ubi8/nodejs-22-minimal@sha256:bae3cc0e61e2d73c8dd1384cfa97c6e875ff4a5da7f6332075eb5cd95bc90a7c sha256:bc7fd6202c086ba5dec4dc27c888b96505ffbd37d1e124cd4abb72bd13a8f237 registry.redhat.io/fuse7/fuse-java-openshift-jdk11-rhel8@sha256:bc7fd6202c086ba5dec4dc27c888b96505ffbd37d1e124cd4abb72bd13a8f237 sha256:bcb0e15cc9d2d3449f0b1acac7b0275035a80e1b3b835391b5464f7bf4553b89 registry.access.redhat.com/ubi8/openjdk-17@sha256:bcb0e15cc9d2d3449f0b1acac7b0275035a80e1b3b835391b5464f7bf4553b89 sha256:be2c749e20118e80b11526eb52993e2055e035b211b2621dea9094dd4dcd9446 registry.redhat.io/rhel8/mysql-80@sha256:be2c749e20118e80b11526eb52993e2055e035b211b2621dea9094dd4dcd9446 sha256:be51ee43b1596078a17756f38a0017e9338c902f9094f1ad677844d165a02d43 registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:be51ee43b1596078a17756f38a0017e9338c902f9094f1ad677844d165a02d43 sha256:bf5e518dba2aa935829d9db88d933a264e54ffbfa80041b41287fd70c1c35ba5 registry.access.redhat.com/ubi8/openjdk-8@sha256:bf5e518dba2aa935829d9db88d933a264e54ffbfa80041b41287fd70c1c35ba5 sha256:c1360b136eae3f3e6eab6e446d6d7b78c53e829a61e2a0b802d55c7ba134ca0c registry.redhat.io/ubi10/nodejs-22-minimal@sha256:c1360b136eae3f3e6eab6e446d6d7b78c53e829a61e2a0b802d55c7ba134ca0c sha256:c5f7af4c8188bf4619eeaa0f20094bbf5fbfbd824c973ee7da722a48d67300a9 registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:c5f7af4c8188bf4619eeaa0f20094bbf5fbfbd824c973ee7da722a48d67300a9 sha256:c6edc9920c6038890d77d1daa135b7ae4a5a7fe2213b168dbc12311de0445791 registry.redhat.io/rhscl/mysql-80-rhel7@sha256:c6edc9920c6038890d77d1daa135b7ae4a5a7fe2213b168dbc12311de0445791 sha256:c9134d992b1ee68be3450debd70f96b9b353fec15fca331afcf7adb26a7f2f09 registry.redhat.io/rhel9/redis-7@sha256:c9134d992b1ee68be3450debd70f96b9b353fec15fca331afcf7adb26a7f2f09 sha256:c93f8f7e2d9522be4952071044457efde0a6c6fd59bd9adcd01e07d164d8235c registry.redhat.io/fuse7/fuse-karaf-openshift-rhel8@sha256:c93f8f7e2d9522be4952071044457efde0a6c6fd59bd9adcd01e07d164d8235c sha256:caba895933209aa9a4f3121f9ec8e5e8013398ab4f72bd3ff255227aad8d2c3e registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:caba895933209aa9a4f3121f9ec8e5e8013398ab4f72bd3ff255227aad8d2c3e sha256:cbc31c1b0625cb01f2b67d83e7b375d08d6aa756f24cf32fc3e82f0b76a4c976 registry.redhat.io/rhscl/redis-6-rhel7@sha256:cbc31c1b0625cb01f2b67d83e7b375d08d6aa756f24cf32fc3e82f0b76a4c976 sha256:cc6290111b86982db2789ed392d004c2c48ff7e5407a82bbeaa6d9ceb8a7963f registry.redhat.io/fuse7/fuse-java-openshift-rhel8@sha256:cc6290111b86982db2789ed392d004c2c48ff7e5407a82bbeaa6d9ceb8a7963f sha256:ce5c0becf829aca80734b4caf3ab6b76cb00f7d78f4e39fb136636a764dea7f6 registry.access.redhat.com/ubi8/openjdk-11@sha256:ce5c0becf829aca80734b4caf3ab6b76cb00f7d78f4e39fb136636a764dea7f6 sha256:cf965175e27f89fecae4982ff88f15b5711b60af5215fd9ef5ff1b6f6ddf9bcd registry.redhat.io/ubi10/ruby-33@sha256:cf965175e27f89fecae4982ff88f15b5711b60af5215fd9ef5ff1b6f6ddf9bcd sha256:cfd8c4ac1c495b766dd3ff1a85c35afe092858f8f65b52a5b044811719482236 registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:cfd8c4ac1c495b766dd3ff1a85c35afe092858f8f65b52a5b044811719482236 sha256:d09ebdfdd48b8c132bc13df5e20ac3ca00a2a0a25df0b552a6ce575383d71165 registry.redhat.io/ubi8/python-311@sha256:d09ebdfdd48b8c132bc13df5e20ac3ca00a2a0a25df0b552a6ce575383d71165 sha256:d0b4c0b12d8dd76a31a67bd429ce78d327d1c4fcd4896dd77557ee484379defa registry.redhat.io/ubi9/php-80@sha256:d0b4c0b12d8dd76a31a67bd429ce78d327d1c4fcd4896dd77557ee484379defa sha256:d138037e44e951accd8941222908b6d2291c8b2dfd4fad49195f83d0c5e6e77f registry.redhat.io/rhel9/redis-6@sha256:d138037e44e951accd8941222908b6d2291c8b2dfd4fad49195f83d0c5e6e77f sha256:d186c94f8843f854d77b2b05d10efb0d272f88a4bf4f1d8ebe304428b9396392 registry.access.redhat.com/ubi8/openjdk-17@sha256:d186c94f8843f854d77b2b05d10efb0d272f88a4bf4f1d8ebe304428b9396392 sha256:d19dcf07e61e96eaff277f3f1b41a802aee2031c561d63012d99b1f2ed51467e registry.redhat.io/ubi9/nginx-120@sha256:d19dcf07e61e96eaff277f3f1b41a802aee2031c561d63012d99b1f2ed51467e sha256:d212c0ad7ec7b340beff1776c0216ea8c0aa66423538a84910c36e00db2366b5 registry.redhat.io/fuse7/fuse-karaf-openshift-rhel8@sha256:d212c0ad7ec7b340beff1776c0216ea8c0aa66423538a84910c36e00db2366b5 sha256:d2838047b28f1385d57abc68a907830d48df647d3a06bb6ab155567097d940c7 registry.redhat.io/ubi9/nginx-126@sha256:d2838047b28f1385d57abc68a907830d48df647d3a06bb6ab155567097d940c7 sha256:d2e75dac02681ed5aded89115b736ba5e83011294686cd6d04780aebffc0ff5d registry.redhat.io/fuse7/fuse-eap-openshift@sha256:d2e75dac02681ed5aded89115b736ba5e83011294686cd6d04780aebffc0ff5d sha256:d2f17aaf2f871fda5620466d69ac67b9c355c0bae5912a1dbef9a51ca8813e50 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:d2f17aaf2f871fda5620466d69ac67b9c355c0bae5912a1dbef9a51ca8813e50 sha256:d4bd4b422ffcfd52334f42f372d511d68496991b47a24f7dacfb032edb250475 registry.redhat.io/ubi9/nodejs-22@sha256:d4bd4b422ffcfd52334f42f372d511d68496991b47a24f7dacfb032edb250475 sha256:d64489de1b4cb65c6d6de5add7ad6e9f4a6817c8e62987ad5859814085beac06 registry.redhat.io/fuse7/fuse-java-openshift-jdk11-rhel8@sha256:d64489de1b4cb65c6d6de5add7ad6e9f4a6817c8e62987ad5859814085beac06 sha256:d88de3935a4ad6a2a8a04a49733d1e7cba14688cd5c8081b78538a86fc499d5a registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:d88de3935a4ad6a2a8a04a49733d1e7cba14688cd5c8081b78538a86fc499d5a sha256:d8d642e25fc863ed0ee603addee008db975b1c6b73a02a1e5b4bc446fbf5ec76 registry.redhat.io/fuse7/fuse-eap-openshift-jdk8-rhel7@sha256:d8d642e25fc863ed0ee603addee008db975b1c6b73a02a1e5b4bc446fbf5ec76 sha256:da558fa9ad7c357ed794eb549ac12a799eab97951f2e3cbc9501e413a348119a registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:da558fa9ad7c357ed794eb549ac12a799eab97951f2e3cbc9501e413a348119a sha256:db3f5192237bfdab2355304f17916e09bc29d6d529fdec48b09a08290ae35905 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:db3f5192237bfdab2355304f17916e09bc29d6d529fdec48b09a08290ae35905 sha256:dbe9905fe2b20ed30b0e2d64543016fa9c145eeb5a678f720ba9d2055f0c9f88 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:dbe9905fe2b20ed30b0e2d64543016fa9c145eeb5a678f720ba9d2055f0c9f88 sha256:dc84fed0f6f40975a2277c126438c8aa15c70eeac75981dbaa4b6b853eff61a6 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:dc84fed0f6f40975a2277c126438c8aa15c70eeac75981dbaa4b6b853eff61a6 sha256:de3e8e869ab92ab5ad19919034b845b3fb01c7e57d49caf5899b68c0a4461c1b registry.redhat.io/ubi8/nginx-124@sha256:de3e8e869ab92ab5ad19919034b845b3fb01c7e57d49caf5899b68c0a4461c1b sha256:deaaa8255efc84a6a7fd4d1b6e5593eaab6c2753e1f2f84a5b83d4e047f03f3f registry.redhat.io/jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8@sha256:deaaa8255efc84a6a7fd4d1b6e5593eaab6c2753e1f2f84a5b83d4e047f03f3f sha256:df2b4b8e8f4b7b183c443653b6f11ac529d7c6421972078a464b765ab31b075d registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:df2b4b8e8f4b7b183c443653b6f11ac529d7c6421972078a464b765ab31b075d sha256:df8858f0c01ae1657a14234a94f6785cbb2fba7f12c9d0325f427a3f1284481b registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:df8858f0c01ae1657a14234a94f6785cbb2fba7f12c9d0325f427a3f1284481b sha256:e12d4107cfe12b7b9f3817bde90dcb07ff3ee7e3b6482120fec6d37583df727f registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:e12d4107cfe12b7b9f3817bde90dcb07ff3ee7e3b6482120fec6d37583df727f sha256:e241dd0049956f75f989687e80fafe7da2f2f25ef4ea762bdaabfe2161d20f64 registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:e241dd0049956f75f989687e80fafe7da2f2f25ef4ea762bdaabfe2161d20f64 sha256:e3727c305a54ac48c07688089e18e9abbe4ce98712a7ce59aa4b9ad7b7bc6514 registry.redhat.io/ubi9/ruby-30@sha256:e3727c305a54ac48c07688089e18e9abbe4ce98712a7ce59aa4b9ad7b7bc6514 sha256:e379727c63710610a1d6843aac4fe3c9e5d81fc0e58c171e88c55da4be1499f0 registry.redhat.io/fuse7/fuse-java-openshift-rhel8@sha256:e379727c63710610a1d6843aac4fe3c9e5d81fc0e58c171e88c55da4be1499f0 sha256:e37aeaeb0159194a9855350e13e399470f39ce340d6381069933742990741fb8 registry.access.redhat.com/ubi8/openjdk-17@sha256:e37aeaeb0159194a9855350e13e399470f39ce340d6381069933742990741fb8 sha256:e4223a60b887ec24cad7dd70fdb6c3f2c107fb7118331be6f45d626219cfe7f3 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:e4223a60b887ec24cad7dd70fdb6c3f2c107fb7118331be6f45d626219cfe7f3 sha256:e4b1599ba6e88f6df7c4e67d6397371d61b6829d926411184e9855e71e840b8c registry.redhat.io/openjdk/openjdk-11-rhel7@sha256:e4b1599ba6e88f6df7c4e67d6397371d61b6829d926411184e9855e71e840b8c sha256:e4be2fb7216f432632819b2441df42a5a0063f7f473c2923ca6912b2d64b7494 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:e4be2fb7216f432632819b2441df42a5a0063f7f473c2923ca6912b2d64b7494 sha256:e61420525f02c4e85ba9b79b9702880a11907d8ab79b9b0a36dbf559ce0f5234 registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:e61420525f02c4e85ba9b79b9702880a11907d8ab79b9b0a36dbf559ce0f5234 sha256:e6fc695cfd77ccff83ef22148c54f45ba8af41e2b69f6146d7ad588cb2aed780 registry.redhat.io/jboss-eap-7/eap74-openjdk8-runtime-openshift-rhel7@sha256:e6fc695cfd77ccff83ef22148c54f45ba8af41e2b69f6146d7ad588cb2aed780 sha256:e7713979a921ec8d2506fcb3fb3ee960fc757262f4567319ee5aa2b351d4f778 quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e7713979a921ec8d2506fcb3fb3ee960fc757262f4567319ee5aa2b351d4f778 sha256:e851770fd181ef49193111f7afcdbf872ad23f3a8234e0e07a742c4ca2882c3d registry.access.redhat.com/ubi8/openjdk-11@sha256:e851770fd181ef49193111f7afcdbf872ad23f3a8234e0e07a742c4ca2882c3d sha256:e90172ca0f09acf5db1721bd7df304dffd184e00145072132cb71c7f0797adf6 registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:e90172ca0f09acf5db1721bd7df304dffd184e00145072132cb71c7f0797adf6 sha256:ea065c1e423af8de839c84e1520131fe4ed4a1bda0ae8eeb078fc8a8368e9f0c registry.redhat.io/ubi9/nodejs-22-minimal@sha256:ea065c1e423af8de839c84e1520131fe4ed4a1bda0ae8eeb078fc8a8368e9f0c sha256:eaaf8ab6d318d72cc4e465609b213f4d9d9171f222f59ae012fa5b96fb3e4ea9 registry.redhat.io/rhscl/mariadb-103-rhel7@sha256:eaaf8ab6d318d72cc4e465609b213f4d9d9171f222f59ae012fa5b96fb3e4ea9 sha256:eab456afb39ed4607b2ee61c8c7635ab1c5ff8f8bddf7640c557e792504d545f registry.redhat.io/ocp-tools-4/jenkins-rhel8@sha256:eab456afb39ed4607b2ee61c8c7635ab1c5ff8f8bddf7640c557e792504d545f sha256:ebfca7a4e3506ee7f317acc7503ad46f2e1cf5605347a1b75fdd02bc77c7de02 registry.redhat.io/fuse7/fuse-eap-openshift-jdk11-rhel8@sha256:ebfca7a4e3506ee7f317acc7503ad46f2e1cf5605347a1b75fdd02bc77c7de02 sha256:ec1985bf5fca4d79054e4beadf7617c3b6400bad2325e47b00a90f7fe07540de registry.redhat.io/ubi8/ruby-25@sha256:ec1985bf5fca4d79054e4beadf7617c3b6400bad2325e47b00a90f7fe07540de sha256:ec784f172735873a5893504e07c57dcbd56b7b049a395c5629c6058dbfac21a3 registry.redhat.io/ubi8/dotnet-90@sha256:ec784f172735873a5893504e07c57dcbd56b7b049a395c5629c6058dbfac21a3 sha256:ed13779a6051e3b9588f5ebea6b66c0a2979512fdcc99bca1f910a577fb4c34a quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ed13779a6051e3b9588f5ebea6b66c0a2979512fdcc99bca1f910a577fb4c34a sha256:ed2da0eed3f495f5455f490cdf7f7943420f64b0cf541271a2d315a3f9e9744c registry.redhat.io/rhel8/postgresql-10@sha256:ed2da0eed3f495f5455f490cdf7f7943420f64b0cf541271a2d315a3f9e9744c sha256:ee797c115858fef35cad6ce8a13fc15b482d7672e37f485cd65579f009d51f0d registry.redhat.io/ubi8/dotnet-90-runtime@sha256:ee797c115858fef35cad6ce8a13fc15b482d7672e37f485cd65579f009d51f0d sha256:eeb0c539ee7ffbd2f1e6eb326204c6f69c554ac5acf0454e9d68d75ffe954f7c registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:eeb0c539ee7ffbd2f1e6eb326204c6f69c554ac5acf0454e9d68d75ffe954f7c sha256:eed7e29bf583e4f01e170bb9f22f2a78098bf15243269b670c307caa6813b783 registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:eed7e29bf583e4f01e170bb9f22f2a78098bf15243269b670c307caa6813b783 sha256:f438230ed2c2e609d0d7dbc430ccf1e9bad2660e6410187fd6e9b14a2952e70b registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:f438230ed2c2e609d0d7dbc430ccf1e9bad2660e6410187fd6e9b14a2952e70b sha256:f4fecc3946301cef5130f3eaaa33d9c15426437d46efe70b507ac90f03364307 registry.redhat.io/ubi10/php-83@sha256:f4fecc3946301cef5130f3eaaa33d9c15426437d46efe70b507ac90f03364307 sha256:f74e72bc7bb13dec9f38ef9e00a8665a7c08a386176ca4b3c41075491e8d07e7 registry.redhat.io/fuse7/fuse-eap-openshift@sha256:f74e72bc7bb13dec9f38ef9e00a8665a7c08a386176ca4b3c41075491e8d07e7 sha256:f7ca08a8dda3610fcc10cc1fe5f5d0b9f8fc7a283b01975d0fe2c1e77ae06193 registry.access.redhat.com/ubi8/openjdk-8@sha256:f7ca08a8dda3610fcc10cc1fe5f5d0b9f8fc7a283b01975d0fe2c1e77ae06193 sha256:f7d4386680e3a44e3bf8bacc3ebfe3224232e44e4d1e2e7167aa1b4970f2866c registry.redhat.io/fuse7/fuse-eap-openshift-jdk8-rhel7@sha256:f7d4386680e3a44e3bf8bacc3ebfe3224232e44e4d1e2e7167aa1b4970f2866c sha256:f89a54e6d1340be8ddd84a602cb4f1f27c1983417f655941645bf11809d49f18 registry.access.redhat.com/ubi8/openjdk-17@sha256:f89a54e6d1340be8ddd84a602cb4f1f27c1983417f655941645bf11809d49f18 sha256:f953734d89252219c3dcd8f703ba8b58c9c8a0f5dfa9425c9e56ec0834f7d288 registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:f953734d89252219c3dcd8f703ba8b58c9c8a0f5dfa9425c9e56ec0834f7d288 sha256:fa9556628c15b8eb22cafccb737b3fbcecfd681a5c2cfea3302dd771c644a7db registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:fa9556628c15b8eb22cafccb737b3fbcecfd681a5c2cfea3302dd771c644a7db [INFO] oc get 'images' -oyaml apiVersion: v1 items: - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54e56e6f85721741ee7bf0336de8ad3bf138a56769a6d0097b600a0e361be58d size: 39618910 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f8ddd7f5a755f537dd9d5f553c8c78171dcf3018c5fc96676a07380d3e14e20 size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fa8e7ec856879c5c416f30aeaca26473064c725889c4a66f9f664909c5657afb size: 72401376 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: 696547b1166cc2d13341e0f5b0c183f4736e71d62b55b12046160d5877f17c09 Labels: architecture: x86_64 build-date: 2022-04-29T13:49:43.606616 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1651233090" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.12-1.1651233090 vcs-ref: d68a2b1f3342c920689ae6f7c0a9614570f9b9a0 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: sha256:1e817c050a87c981c80f34e27d52976449c423544defb888938b6d71aafd4fe4 Labels: architecture: x86_64 build-date: 2022-04-29T13:49:43.606616 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1651233090" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.12-1.1651233090 vcs-ref: d68a2b1f3342c920689ae6f7c0a9614570f9b9a0 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-04-29T13:51:52Z" DockerVersion: 1.13.1 Id: sha256:3bf069ed2b338f38b5eed9edb7916876ed2deca8dbc13fcb8d61ef6ba0f588a8 Size: 112027067 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:00cf28cf9a6c427962f922855a6cc32692c760764ce2ce7411cf605dd510367f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:00cf28cf9a6c427962f922855a6cc32692c760764ce2ce7411cf605dd510367f resourceVersion: "14220" uid: bf9f1d58-cbe4-43b7-a7f4-69fd862c09f0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9281c141a1bfec06e291d2ad29bfdedfd10a99d583fc0f48d3c26723ebe0761 size: 75827357 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:31114e120ca0c7dc51e01721c5a689a614edb6c86de11301d503c72be1540c79 size: 1325 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:97908a410b902748f50f7a00125b2bd6762668e88bba5f4a0c0d4e6f916fd7fb size: 346120986 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.2.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.2 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.2.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: c16126310035 Image: 3eb3eeda3399171b4c298813e2219138f8853ba30a4a7a1d3cb9eb441c9bdb1a Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T10:02:17.641107 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.2.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.2.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.2.GA release: "2.1567588117" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.2-2.1567588117 vcs-ref: 67deb13da66750a5d3c41f77b006da42a9ee76e8 vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.2.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.2 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.2.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: c16126310035 Image: sha256:99da8c1dc65f6d50835ebddf30111054b940f7a3f94b01821ab04539535961c5 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T10:02:17.641107 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.2.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.2.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.2.GA release: "2.1567588117" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.2-2.1567588117 vcs-ref: 67deb13da66750a5d3c41f77b006da42a9ee76e8 vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss Created: "2019-09-04T10:09:52Z" DockerVersion: 1.13.1 Id: sha256:9f07e18e8f8794425f581712f0daf8aa0d7fe4cc6298ce4c4bf2f05055637e02 Size: 421957660 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:01920073bbd480bd34e2d8e17dced64d342257fa9a263d1843edf1cc45a50a7c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:01920073bbd480bd34e2d8e17dced64d342257fa9a263d1843edf1cc45a50a7c resourceVersion: "13890" uid: ac550310-b854-44b6-b2eb-056ef12f8eff - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:79c20e727b94ea36ae8776eb9e4065b60dc1d396564a6a91ebb6ee334dfb5cea size: 79001473 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:526725917c850c5382c5cf83a3e8feebf44e29e34e293ba88a4336884a294be7 size: 18609572 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:029d17802350ff772a29106200a4b88238f0db6e3fec96c14435e92e18294a56 size: 154879763 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:18bc4f86cb290f4205bc684663bc9cea29e8cb1b9892289c6399f9c3138b0af8 size: 41134775 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=20 - PHP_VERSION=8.0 - PHP_VER_SHORT=80 - NAME=php - SUMMARY=Platform for building and running PHP 8.0 applications - DESCRIPTION=PHP 8.0 available as container is a base platform for building and running various PHP 8.0 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-01-21T18:01:06 com.redhat.component: php-80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: PHP 8.0 available as container is a base platform for building and running various PHP 8.0 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.33.8 io.k8s.description: PHP 8.0 available as container is a base platform for building and running various PHP 8.0 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 8.0 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php80,php-80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/php-80 release: "157" summary: Platform for building and running PHP 8.0 applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/php-80/images/1-157 usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=8.0/test/test-app ubi8/php-80 sample-server vcs-ref: ba2d688239bd5f982694de1f2fd86a2cf14db214 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-01-21T18:04:06Z" Id: sha256:2569ec6fa768b179b3708810892f8d9f4170dea29a575168ed37d365254b55bc Size: 293651467 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/php-80@sha256:0194269c95cbc1aeb509611e283fd404791a0bc79911532bd9835bb51a19aff6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:0194269c95cbc1aeb509611e283fd404791a0bc79911532bd9835bb51a19aff6 resourceVersion: "14079" uid: b4e6b8df-6d14-4dbf-bdc9-c2051e527552 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9e23b64ace00a199db21d302292b434e9d3956d79319d958ecc19603d00c946 size: 39622437 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:38b71301a1d9df24c98b5a5ee8515404f42c929003ad8b13ab83d2de7de34dec size: 1742 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b2f0e3786ba7d5fe750dc1adb8c654bc3a7e43ddc2980708ba1d93dfd56a796c size: 112815087 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 02d87ca75d31 Image: eacfec936445734e0a7266541a3bd3f8e2cdb579174f4655e197e023ea328f6d Labels: architecture: x86_64 build-date: 2022-03-28T09:44:20.823364 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1648459552" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.11-1.1648459552 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: cabd719a48515652b21a9a98ca3094610b3df338 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 02d87ca75d31 Image: sha256:3320c88e3477ed4089373445c70ee7d8549ad3af435409c0179ba14e4fe951bc Labels: architecture: x86_64 build-date: 2022-03-28T09:44:20.823364 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1648459552" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.11-1.1648459552 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: cabd719a48515652b21a9a98ca3094610b3df338 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T09:50:43Z" DockerVersion: 1.13.1 Id: sha256:1ec3bc3a3fa2aa2b01e7b49d9c9bd98bfccccdbe8aec5b933b278a3ab58bdc0c Size: 152446782 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:022488b1bf697b7dd8c393171a3247bef4ea545a9ab828501e72168f2aac9415 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:022488b1bf697b7dd8c393171a3247bef4ea545a9ab828501e72168f2aac9415 resourceVersion: "14208" uid: 5cf3489e-f50a-4641-a300-c6b1184e2b03 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:24f3b28ce6cdd681f947607add74a19f36e917c4fd4c0c1267c344fb045af850 size: 23487344 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=20 - PHP_VERSION=8.2 - PHP_VER_SHORT=82 - NAME=php - SUMMARY=Platform for building and running PHP 8.2 applications - DESCRIPTION=PHP 8.2 available as container is a base platform for building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - PHP_FPM_CONF_D_PATH=/etc/php-fpm.d - PHP_FPM_CONF_FILE=www.conf - PHP_FPM_RUN_DIR=/run/php-fpm - PHP_CLEAR_ENV=ON - PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf - PHP_FPM_LOG_PATH=/var/log/php-fpm - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:08:01Z" com.redhat.component: php-82-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: PHP 8.2 available as container is a base platform for building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.41.4 io.k8s.description: PHP 8.2 available as container is a base platform for building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 8.2 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php82,php-82 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/php-82 org.opencontainers.image.revision: 8877791cc654b87b090d1ccabc3aa73c5c972a9a release: "1760386053" summary: Platform for building and running PHP 8.2 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=8.2/test/test-app ubi9/php-82 sample-server vcs-ref: 8877791cc654b87b090d1ccabc3aa73c5c972a9a vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:08:08Z" Id: sha256:3703f0311b7409b93c12089ee53bbd06803e01f55ebfa8354791757c66ef2f90 Size: 335792412 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/php-82@sha256:02c1739b727e3b15a76cdd44f92cf91336d7dd34d5e830b2fe4ab3f2af48fe60 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:02c1739b727e3b15a76cdd44f92cf91336d7dd34d5e830b2fe4ab3f2af48fe60 resourceVersion: "14082" uid: cc0f4cab-c927-4b75-8fb3-5fd706390f0a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:68214f4c07c8b187e0330525f5bde9ee8d07de0cbe382bc2a71142a1adf37630 size: 74581578 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el9 - NODEJS_VER=20 - PYTHON_VERSION=3.11 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi9-python-311 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.11 applications - DESCRIPTION=Python 3.11 available as container is a base platform for building and running various Python 3.11 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-14T05:46:35Z" com.redhat.component: python-311-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Python 3.11 available as container is a base platform for building and running various Python 3.11 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi9-python-311 io.k8s.description: Python 3.11 available as container is a base platform for building and running various Python 3.11 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.11 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python311,python-311,rh-python311 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/python-311 org.opencontainers.image.revision: 84052fc1eca7c447b0e3054540aa8cc7f5e993ed release: "1760420762" summary: Platform for building and running Python 3.11 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.11/test/setup-test-app/ ubi9/python-311 python-sample-app vcs-ref: 84052fc1eca7c447b0e3054540aa8cc7f5e993ed vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-14T05:46:50Z" Id: sha256:b709bd41f2bb4c7afdcce17e5fe447b290f07477f2af8d43b581c5ec25d3ea46 Size: 386885017 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/python-311@sha256:04b6d6c9d05857ca39c360caf8698f3081360eeffe85da2eaab3e7e91040030b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:04b6d6c9d05857ca39c360caf8698f3081360eeffe85da2eaab3e7e91040030b resourceVersion: "14126" uid: 174ca5bc-4bf3-4fc0-b62b-78b50cfbc29f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d7c06497d5cebd39c0a4feb14981ec940b5c863e49903d320f630805b049cbff size: 39279912 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b212228af90322a6b2d2422f3c21f1c2e04cf4ed316c6e789eccc4b8fd5c37d1 size: 111170397 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-02-07T17:22:26 com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "12.1675788288" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.14-12.1675788288 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: a305d5df96a43cd9cc90d723bcea4e824f917836 vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-02-07T17:33:43Z" Id: sha256:eb612086819b66209ef77e4e424c38cf311c5490594dc6401da1c6ceeeb87c0f Size: 150480601 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:0669a28577b41bb05c67492ef18a1d48a299ac54d1500df8f9f8f760ce4be24b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:0669a28577b41bb05c67492ef18a1d48a299ac54d1500df8f9f8f760ce4be24b resourceVersion: "14175" uid: b95fed3c-965d-4baf-853f-e2a4af3b3015 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9281c141a1bfec06e291d2ad29bfdedfd10a99d583fc0f48d3c26723ebe0761 size: 75827357 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:31114e120ca0c7dc51e01721c5a689a614edb6c86de11301d503c72be1540c79 size: 1325 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ebb6f9f5a86f545f3089e00644d627ac4efa1caa2bf80524a27d2aabe13621e4 size: 107810095 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HOME=/home/jboss - JAVA_DATA_DIR=/deployments/data - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.6 - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: c16126310035 Image: 9c8cdff623ea06fcf4498a5e32e90868a9d9a393a50762cdf05d3034d15cffbc Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T09:59:10.737369 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: 8080/tcp:webcache,8443/tcp:pcsync-https io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 2.2.1 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "23.1567588116" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.6-23.1567588116 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 5bdb7e62aa6179193d5fe001006126c053d4f820 vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HOME=/home/jboss - JAVA_DATA_DIR=/deployments/data - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.6 - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: c16126310035 Image: sha256:a7c217f8b8ba716e0093809740b942db647b6af05dafa60bcb5185e7a2d02531 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T09:59:10.737369 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: 8080/tcp:webcache,8443/tcp:pcsync-https io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 2.2.1 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "23.1567588116" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.6-23.1567588116 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 5bdb7e62aa6179193d5fe001006126c053d4f820 vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss Created: "2019-09-04T10:03:46Z" DockerVersion: 1.13.1 Id: sha256:74c8511ec481bb881c4c297ce6bd05fbf188587cadb9f7895c70bc0e3162b645 Size: 183646588 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:06bbbf9272d5c5161f444388593e9bd8db793d8a2d95a50b429b3c0301fafcdd kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:06bbbf9272d5c5161f444388593e9bd8db793d8a2d95a50b429b3c0301fafcdd resourceVersion: "14174" uid: 6726d73c-7d2d-4da4-821c-4a5727fc8a29 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2e449f0d8596b91719b366a7f134954cd2a03e99408e3899b004182af82a6979 size: 79829956 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7fff4c4748a270604546349c7386ae461ba01a71b193d651cfcb0abc5fa88a34 size: 7552327 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0b8fa8a9a8dc81f189373242af953709ea22ffa2b0a9c6d6070dc5027c12d432 size: 106012003 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:126a494c3c5f3513f4482bdcc5f0608a41c2f413c33b350e3ef2daef61cbe07c size: 164861497 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - BASH_ENV=/opt/app-root/etc/scl_enable - ENV=/opt/app-root/etc/scl_enable - PROMPT_COMMAND=. /opt/app-root/etc/scl_enable - NODEJS_SCL=rh-nodejs14 - NAME=golang - VERSION=1.18.10 - SUMMARY=Platform for building and running Go applications - DESCRIPTION=Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. Labels: architecture: x86_64 build-date: 2023-04-12T16:30:01 com.redhat.component: go-toolset-container com.redhat.license_terms: https://www.redhat.com/agreements description: Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. distribution-scope: public io.buildah.version: 1.27.3 io.k8s.description: Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. io.k8s.display-name: Go 1.18.10 io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,golang,golang117,rh-golang117,go io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: devtools/go-toolset-rhel7 release: "6.1681314820" summary: Platform for building and running Go applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/devtools/go-toolset-rhel7/images/1.18.10-6.1681314820 vcs-ref: 10d1ace5725c33abb5ceaf9c297d54bd34a13de9 vcs-type: git vendor: Red Hat, Inc. version: 1.18.10 User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2023-04-12T16:32:45Z" Id: sha256:524410e529e4a936dfcc37a6f8fa90f068ff9b3419fe5e9a264e8e359c11531b Size: 358277196 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi7/go-toolset@sha256:07addbabcfd72212a82efce053a70362a06925ee1522c4dd783be878ffad46cb kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:07addbabcfd72212a82efce053a70362a06925ee1522c4dd783be878ffad46cb resourceVersion: "13443" uid: 1f245879-53c0-4fff-97bf-7ba4293f16b9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e556fa14a02c7e2f1c7f24af127ffe7b0d4375fe2f1f4622fab0817cfc8b3ae7 size: 1603140567 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510211040.p2.ge238076.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510211040.p2.ge238076.assembly.stream.el9-e238076 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=ose-installer-artifacts - __doozer_uuid_tag=ose-installer-artifacts-rhel9-v4.20.0-20251021.105557 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=e238076 - SOURCE_DATE_EPOCH=1760727874 - SOURCE_GIT_COMMIT=e23807689ec464da30e771dda70fd8989680a011 - SOURCE_GIT_TAG=v1.4.19-ec5-379-ge23807689e - SOURCE_GIT_URL=https://github.com/openshift/installer Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T13:37:06Z" com.redhat.component: ose-installer-artifacts-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Empty io.k8s.display-name: Empty io.openshift.build.commit.id: e23807689ec464da30e771dda70fd8989680a011 io.openshift.build.commit.url: https://github.com/openshift/installer/commit/e23807689ec464da30e771dda70fd8989680a011 io.openshift.build.source-location: https://github.com/openshift/installer io.openshift.expose-services: "" io.openshift.maintainer.component: Installer / openshift-installer io.openshift.maintainer.project: OCPBUGS io.openshift.release.operator: "true" io.openshift.tags: Empty maintainer: Red Hat, Inc. name: openshift/ose-installer-artifacts-rhel9 org.opencontainers.image.revision: 2b6efba3ac3cacf5a896b33a86cafef7789bb1a2 release: 202510211040.p2.ge238076.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: 2b6efba3ac3cacf5a896b33a86cafef7789bb1a2 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T14:09:09Z" Id: sha256:d6c3fc36ab859b726e47238af5ca312464463e1d1c6ffcbf9071acbd87c2e82c Size: 1741576693 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0a1a889dcfb66dfe73d30f6a7a18dace8796e66e9f2203de97955500ad76f4aa kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:0a1a889dcfb66dfe73d30f6a7a18dace8796e66e9f2203de97955500ad76f4aa resourceVersion: "13330" uid: 5534c026-f3c7-42d5-ba54-0eac527dd513 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d69140bdce18c2f525b2ad0cc3998a1c6f2bc0a850353b7b7feac66eca1da526 size: 75854078 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a82dd37af30d5ff9e805ceea67ea615a17dfaafba3135b12e6b2dab29ee2cff2 size: 1264 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9912708161e166fd25db1f05e024296dd82854ab2c2fab9e91bb5d1bca8864e9 size: 273648845 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_DATAGRID_VERSION=7.3.0.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.0 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - PRODUCT_VERSION=7.3.0.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: ed75d4430067 Image: 93e4722f3a6f12bdaefa2157c427c28d85451b6a6740d97925a9bc65f7581d5e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-05-06T22:52:07.700549 com.redhat.build-host: cpt-0003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/licenses/eulas description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.0.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.3.0.GA release: "3" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.0-3 vcs-ref: ced81ccfb5b1202379e1ef311e0128f62a7abfb7 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_DATAGRID_VERSION=7.3.0.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.0 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - PRODUCT_VERSION=7.3.0.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: ed75d4430067 Image: sha256:a03a0d2d69476e9d4f4e83882afa8718fa902df9d3cfcc941f2df5c7e87528de Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-05-06T22:52:07.700549 com.redhat.build-host: cpt-0003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/licenses/eulas description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.0.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.3.0.GA release: "3" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.0-3 vcs-ref: ced81ccfb5b1202379e1ef311e0128f62a7abfb7 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss Created: "2019-05-06T22:57:56Z" DockerVersion: 1.13.1 Id: sha256:d4b11285c6b753f4bc2bba205a56aeba8c069a95df61ac2c44be585077fde53b Size: 349512144 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:0a3a55052f6e8df1ea48de0c429c39e072b6aa8818250ccee634f96acacfd7c7 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:0a3a55052f6e8df1ea48de0c429c39e072b6aa8818250ccee634f96acacfd7c7 resourceVersion: "13883" uid: 0d0891fa-4574-4c67-830a-ced35b1e4273 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c26c53d1c36b15639046521ee8385d1ecf266b8bd2666a5386ac609586826fd size: 129289489 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el8 - NODEJS_VERSION=20 - NPM_RUN=start - NAME=nodejs - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - CNB_STACK_ID=com.redhat.stacks.ubi8-nodejs-20 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - SUMMARY=Platform for building and running Node.js 20 applications - DESCRIPTION=Node.js 20 available as container is a base platform for building and running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-28T04:27:47Z" com.redhat.component: nodejs-20-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Node.js 20 available as container is a base platform for building and running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi8-nodejs-20 io.k8s.description: Node.js 20 available as container is a base platform for building and running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 20 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs20 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/nodejs-20 org.opencontainers.image.revision: e3b2d882ca8bb840b7ff927ee91d2b24733e7502 release: "1761625603" summary: Platform for building and running Node.js 20 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi8/nodejs-20:latest vcs-ref: e3b2d882ca8bb840b7ff927ee91d2b24733e7502 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-28T04:28:15Z" Id: sha256:2262103dee868fb05af4d5744be2e5fb65d96cad5d8ce3698c67a036dc362ba8 Size: 224848286 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/nodejs-20@sha256:0ab9d7fe68c5aef87b591e40bd6be7479e913798348f86ac43d7c0357794bb3f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:0ab9d7fe68c5aef87b591e40bd6be7479e913798348f86ac43d7c0357794bb3f resourceVersion: "14088" uid: 39a20b22-012d-44a4-b840-4f90e25c794c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:55fe005a89f86f773a64202c819856af27f5435cd8e3c9af19672a0ca72c0a7c size: 130299748 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el9 - NODEJS_VERSION=20 - NPM_RUN=start - NAME=nodejs - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - CNB_STACK_ID=com.redhat.stacks.ubi9-nodejs-20 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - SUMMARY=Platform for building and running Node.js 20 applications - DESCRIPTION=Node.js 20 available as container is a base platform for building and running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-28T04:29:50Z" com.redhat.component: nodejs-20-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Node.js 20 available as container is a base platform for building and running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi9-nodejs-20 io.k8s.description: Node.js 20 available as container is a base platform for building and running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 20 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs20 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nodejs-20 org.opencontainers.image.revision: ba51077abb8f36c029a729dfd10d03f5f4ff3590 release: "1761625725" summary: Platform for building and running Node.js 20 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/nodejs-20:latest vcs-ref: ba51077abb8f36c029a729dfd10d03f5f4ff3590 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-28T04:30:05Z" Id: sha256:8685effefab381bd14d40df6c9c513c0833ddc7da8a897caf85baa4c62c45481 Size: 227400252 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nodejs-20@sha256:0d5551af0b9188a88eb39bf9745f992b1ab2ce4839d4a0555b59c58b5f3f6412 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:0d5551af0b9188a88eb39bf9745f992b1ab2ce4839d4a0555b59c58b5f3f6412 resourceVersion: "14089" uid: f6343cc8-8498-4c90-aeda-b28646a4b369 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3ba3333d1709318bb6e11b393259e7fb8977b0afb1489f4f030ea640ecf428e3 size: 116813486 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.15 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-21T04:57:59 com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1682053058" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.15-1.1682053058 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 64e82bc4e12a24837e440904151526afb2a2fe1b vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-21T05:10:17Z" Id: sha256:4d3db921ed0efa6fba3a14865cd9a17457693ad7d20c7701394c831358766acb Size: 156109729 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:0eea1d20aaa26041edf26b925fb204d839e5b93122190191893a0299b2e1b589 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:0eea1d20aaa26041edf26b925fb204d839e5b93122190191893a0299b2e1b589 resourceVersion: "14146" uid: abfd7565-c806-4d69-95b6-f3115ee3f103 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:76608b6b9d54251299c5d3be69fdf53e05f97a3735bbcd5889c30ebb78608428 size: 75827462 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3c81a5d20855a6cef8b997d709410e047e2839b5ad113f4c34d25e9fae9e3beb size: 1266 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:cf313e71c9fd8209b9df2ee0fb7471c014b3ec6f7144546b88ad7c46b5fb2cd4 size: 3891763 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:07c69678314f4cb7384c115ffd5040765fe1fe42db1b8c789af11ce865771f7b size: 84375848 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7ed15802bf28dc9bb8b0cd6444082661b0afb1c78519d6b89ed3634a96db8e10 size: 26652483 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.2 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-720061-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 4072265dcbc2 Image: d8d8b46372545d04c96dc972b55419fc9c5a27e5ff7e9be0949c210eaf6628bc Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-04-17T17:07:52.793607 com.redhat.build-host: cpt-0002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/licenses/eulas description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-720061-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Dhiraj Bokde name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "10.1555516883" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.2-10.1555516883 vcs-ref: cd49f262c6da2881ddc7ebb46cd69009e472c78d vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.2 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-720061-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 4072265dcbc2 Image: sha256:2f9245e0f02f784c280d2be34ec9e58fded9a51121ada54d4c9adebf87a9ec62 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-04-17T17:07:52.793607 com.redhat.build-host: cpt-0002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/licenses/eulas description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-720061-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Dhiraj Bokde name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "10.1555516883" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.2-10.1555516883 vcs-ref: cd49f262c6da2881ddc7ebb46cd69009e472c78d vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss Created: "2019-04-17T17:10:45Z" DockerVersion: 1.13.1 Id: sha256:47e204e5513257f9d649674aef3bf75227d79368ee6b4e08c8d3ea2757779d40 Size: 190755025 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:13577236b039ed11e9f1070f884e9836e731944575de2ee59b290b05e08ad5f8 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:13577236b039ed11e9f1070f884e9836e731944575de2ee59b290b05e08ad5f8 resourceVersion: "14026" uid: 35da11b0-1ace-4bac-80dc-3f20ac7ba064 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e74659e3e033616f4f0731f4c22814ff4543cb3c1b85a05f6484647b4fea7b3d size: 136155585 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2a2353c2e8f9aca8ce43a2cb751b126340cb9455e418eaa9cf3515fec24f2668 size: 5389944 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift-jdk11-rhel8 - FUSE_KARAF_IMAGE_VERSION=1.12 - JOLOKIA_VERSION=1.7.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.4.3.fuse-7_12_1-00009-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.18.0.redhat-00001 - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-23T17:52:18 com.redhat.component: fuse-karaf-openshift-jdk11-rhel-8-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.7.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.4.3.fuse-7_12_1-00009-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.18.0.redhat-00001 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift-jdk11-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "20.1716485725" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift-jdk11-rhel8/images/1.12-20.1716485725 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 6036739709302e2d07ffc82f65aac994e47173ee vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-05-23T17:54:46Z" Id: sha256:cba70185de70abde58315a01c3d4d07c680b12d031dfd1ff23d661e48c2ab63c Size: 180894665 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift-jdk11-rhel8@sha256:144941f6745ed291d11d94c66037cfbb1d7cd9cf28db4f2234d9265efe767eff kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:144941f6745ed291d11d94c66037cfbb1d7cd9cf28db4f2234d9265efe767eff resourceVersion: "13284" uid: aa56382f-fc51-47ca-aa84-32a6f9df3edb - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea092d7970b26c24007a670fc6d0810dbf9531dc0d3a9d6ea514134ba5686724 size: 7541063 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:512b052e86a3145118bb3f5b4c88a627e33696c374a01ea6363daebbb8868a65 size: 66269555 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - MYSQL_VERSION=10.5 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MariaDB 10.5 SQL database server - DESCRIPTION=MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/opt/rh/rh-mariadb105/root/usr - ENABLED_COLLECTIONS=rh-mariadb105 - BASH_ENV=/usr/share/container-scripts/mysql/scl_enable - ENV=/usr/share/container-scripts/mysql/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/mysql/scl_enable ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-30T11:32:54 com.redhat.component: rh-mariadb105-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. io.k8s.display-name: MariaDB 10.5 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mariadb,mariadb105,rh-mariadb105 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/mariadb-105-rhel7 release: "117" summary: MariaDB 10.5 SQL database server url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/mariadb-105-rhel7/images/1-117 usage: docker run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhscl/mariadb-105-rhel7 vcs-ref: 27d1f72a7ae349bd764e4339e98760ecb4bc209e vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-05-30T11:35:39Z" Id: sha256:2ec1a3e868f0672eeea1d5d8087bf28ab4dd0922ed6880a95ace70d33206e465 Size: 153835055 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/mariadb-105-rhel7@sha256:146789aaa36b11c2194c3a1cd1bbc9d56016a67cf2791401b59c339983dd2a5e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:146789aaa36b11c2194c3a1cd1bbc9d56016a67cf2791401b59c339983dd2a5e resourceVersion: "13841" uid: 151bcab6-0eef-41a5-9792-4b7b9af235cf - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:122eac69054b22f81a29f37eb7effe0a3038861b977db932717c5e068f649107 size: 92495022 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.15 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-21T04:57:58 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1682053056" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.15-1.1682053056 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 6a5c5590c7d59e1b896774e585212805732b5471 vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-21T05:07:45Z" Id: sha256:8fbbe930e704e3c67bcb081b63dcf705304e35f7a42a7a9dac8b169ce4c94aa6 Size: 131781843 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:14de89e89efc97aee3b50141108b7833708c3a93ad90bf89940025ab5267ba86 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:14de89e89efc97aee3b50141108b7833708c3a93ad90bf89940025ab5267ba86 resourceVersion: "14155" uid: b2044c96-0f73-410c-aebd-c11c8c3ad90c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50be7bb6ce3ddb41606e1956ba5c61072699ac536980f260a0db6dc59c8013fe size: 39575081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7646767ee9c1a95da81f72abc27df878a7d269206a1b13b6c9800ab249e506fc size: 91102728 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.23 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-10-22T20:10:44 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.13.0.dev0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "3.1761163790" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.23-3.1761163790 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: b40c568e2fa6d032648af2c70f3fe4f0cbf5ce66 vcs-type: git vendor: Red Hat, Inc. version: "1.23" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-10-22T20:21:26Z" Id: sha256:dd5977348fec2f471126802b174ae69c312e40255d812da77a5020200b13ccd5 Size: 130697700 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:15ca7a76fdcca80b800ea420857782badd4960d99d120322c255462c098ed641 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:15ca7a76fdcca80b800ea420857782badd4960d99d120322c255462c098ed641 resourceVersion: "13512" uid: 3642623d-aa7a-4ca4-877e-efc18b15ef52 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2444aca3edc3c9e28e56a5c4d7c88c3051c17434b6856023181a3334b6674430 size: 62785065 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - '"./${DOTNET_DEFAULT_CMD}"' Env: - container=oci - HOME=/opt/app-root - PATH=/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=8.0.21 - ASPNET_VERSION=8.0.21 ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:25:34Z" com.redhat.component: dotnet-80-runtime-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for running .NET 8 applications distribution-scope: public dotnet_version: 8.0.21 io.buildah.version: 1.41.4 io.k8s.description: Platform for running .NET 8 applications io.k8s.display-name: .NET 8 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: runtime,.net,dotnet,dotnetcore,dotnet80-runtime io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-80-runtime org.opencontainers.image.revision: 4907080e4fc083e8b03727da3ea81a299a8b009c release: "1761063846" summary: .NET 8 runtime url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: 4907080e4fc083e8b03727da3ea81a299a8b009c vcs-type: git vendor: Red Hat, Inc. version: "8.0" User: "1001" WorkingDir: /opt/app-root/app ContainerConfig: {} Created: "2025-10-21T16:25:46Z" Id: sha256:9237965377fe470e6f8145631e1cb8b993b464406af3bca26d8da22d8f2bdf62 Size: 102531391 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/dotnet-80-runtime@sha256:16aacc57365922ce9329f1306153e444a02c4883b5b6ea648b0e47ef286df396 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:16aacc57365922ce9329f1306153e444a02c4883b5b6ea648b0e47ef286df396 resourceVersion: "13456" uid: b7e5d4d4-f9fd-476e-b4c8-fb6b70aeea80 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1500af7621666bdae42658c39c0dc66ea092cda488d5e24241f2d81d1ad8afe1 size: 166780168 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:52c18fec11548362c672912bbfced716a5d386ca9d5bb66f7203b4dfbb223037 size: 5921596 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift-rhel8 - FUSE_JAVA_IMAGE_VERSION=1.11 - JOLOKIA_VERSION=1.7.1.redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-19T14:50:39 com.redhat.component: fuse-java-openshift-rhel-8-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "51.1687184685" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift-rhel8/images/1.11-51.1687184685 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: ab24e36120b1d3ad6a0b7d193f09dba8b20892d2 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-19T14:54:07Z" Id: sha256:5aceffb1d18a3fdef160339fe059108396ce0c9130fcdb5c3f35fe1e6253f0b0 Size: 212010284 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift-rhel8@sha256:1b7ca459c309d850b031f63618176b460fa348899201a6a82a5a42c75d09563d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:1b7ca459c309d850b031f63618176b460fa348899201a6a82a5a42c75d09563d resourceVersion: "14039" uid: 3ed941ea-b396-4615-8e42-3896877bff3b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:551849931ba0dec31d2e0b8b4490c168ccf5c5c75215fa094860547b6ae6a94e size: 33442256 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:dc337e84388ed5a4fa04bbbdbb0c9bf6ed33b1e3e615edce4e1529805a3832fe size: 25291412 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Env: - container=oci - PYTHON_VERSION=3.12 - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi10-python-312 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el10 - PATH=/opt/app-root/bin:/opt/app-root/src/bin:/opt/app-root/src/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - BASH_ENV=/opt/app-root/etc/scl_enable - ENV=/opt/app-root/etc/scl_enable - PROMPT_COMMAND=. /opt/app-root/etc/scl_enable - SUMMARY=Minimal platform for building and running Python 3.12 applications - DESCRIPTION=Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-09-24T20:11:21Z" com.redhat.component: python-312-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.3 io.buildpacks.stack.id: com.redhat.stacks.ubi10-python-312-minimal io.k8s.description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.12 io.openshift.expose-services: 8080:http io.openshift.tags: builder,python,python312,python-312,rh-python312 maintainer: SoftwareCollections.org name: ubi10/python-312-minimal org.opencontainers.image.revision: ce7da1608545e0c2787de380fac2ebcc3d170bb6 release: "1758744656" summary: Minimal platform for building and running Python 3.12 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.12-minimal/test/setup-test-app/ ubi10/python-312-minimal python-sample-app vcs-ref: ce7da1608545e0c2787de380fac2ebcc3d170bb6 vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-09-24T20:11:28Z" Id: sha256:36731e5a8253474ecf2e70403210051c451908672c289e5db52ecb29beb5aaa3 Size: 58746949 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/python-312-minimal@sha256:1ce541f489f72d4b354b96e9ad8b8f4e27099534d1cf5cebdfd505a1825f6545 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:1ce541f489f72d4b354b96e9ad8b8f4e27099534d1cf5cebdfd505a1825f6545 resourceVersion: "14127" uid: e302893c-9010-44e3-941e-4cf00971f37a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c43687042a41aad69fc526985ef2b82012c011db7e0e26faba4fc860ad32d88e size: 75837780 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b7b014ba1b80abb29391141385bd32668571313647317d1d64d8b5cebb1f228 size: 1331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2825299765472d0b62c1ed19ebb564a8a191b88ce49639471a274d03e7f9151e size: 3910026 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5832c11acc9bb0420072ec62a6cdcea1d8226ed89e430e3086f81bc7866631c2 size: 84374210 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f1bb2f47b521fb59f8a4590286fa7203e8abd6f5e689f8e657e8b9b18e9874c6 size: 15586020 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.3 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: 5b9d0512d6b123ac381febcad123629e032d0792908a1e96b7bc1608a38ac78e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T22:26:13.092645 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Dhiraj Bokde name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "13.1561751841" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.3-13.1561751841 vcs-ref: 5ceb5b20cb9d869438898007c2b654565213a789 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.3 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: sha256:c4ad52bee4ce91350bfeba9e154fb1ba8e54eaf2c4530a0867548fbbefce03a0 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T22:26:13.092645 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Dhiraj Bokde name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "13.1561751841" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.3-13.1561751841 vcs-ref: 5ceb5b20cb9d869438898007c2b654565213a789 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2019-06-28T22:28:04Z" DockerVersion: 1.13.1 Id: sha256:7ef3a2c26dbf1c3ac74d9d42a26d1666fd31de7f8eed883a267c47d71090c4be Size: 179715363 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:1d526f710f53ea2805441bbd04057242715d6fe2c91257b1ccd53c7a72c499be kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:1d526f710f53ea2805441bbd04057242715d6fe2c91257b1ccd53c7a72c499be resourceVersion: "14048" uid: 01866d35-623c-43e7-9b84-89f33728d0f0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8e663e919f6cd0805d39d14202a5c0b789e7df3c3051c54170b428086a1c9a91 size: 76431158 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1e6175de2c956530fa18c8a30722bf828d70a720afa2f2e481bfb4c520963c91 size: 1550 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2f43ddde7a16cdb478bdd65bc11fa9d36ce8f9b4d79a9fd14fb55eca50896695 size: 114124591 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.1 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f1d2c1bacaf9 Image: 52c600ad0bf93b882f403ca5caf69c0892efc98d71b70c520f10e623d9022e3e Labels: architecture: x86_64 build-date: 2021-07-19T14:11:58.537508 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "14" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.1-14 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0d5a34f0cee16a892d09e0256bfd9f447b72497b vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.1 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f1d2c1bacaf9 Image: sha256:78765e2d17dad7478c83077d01d1c5d50b6d9425f59e33991c0b2c553737dbe0 Labels: architecture: x86_64 build-date: 2021-07-19T14:11:58.537508 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "14" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.1-14 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0d5a34f0cee16a892d09e0256bfd9f447b72497b vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss Created: "2021-07-19T14:17:41Z" DockerVersion: 1.13.1 Id: sha256:129324a49414cc20ac92ca68484b9fb50f7d17881f908602305671db24918672 Size: 190564655 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:1d68b58a73f4cf15fcd886ab39fddf18be923b52b24cb8ec3ab1da2d3e9bd5f6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:1d68b58a73f4cf15fcd886ab39fddf18be923b52b24cb8ec3ab1da2d3e9bd5f6 resourceVersion: "14098" uid: f984a47d-dab7-42c9-8ac4-1f8755d0d4c3 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e88444eb2ec6d1c6fb00d9a743941e9840594479617d797c0b5cf81d8f4d5d5f size: 81543968 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el8 - NODEJS_VER=20 - PYTHON_VERSION=3.9 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi8-python-39 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.9 applications - DESCRIPTION=Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:33:14Z" com.redhat.component: python-39-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi8-python-39 io.k8s.description: Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.9 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python39,python-39,rh-python39 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/python-39 org.opencontainers.image.revision: 4600607c4ef2758fdb8cd425e7eb66493a22f2aa release: "1761841936" summary: Platform for building and running Python 3.9 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.9/test/setup-test-app/ ubi8/python-39 python-sample-app vcs-ref: 4600607c4ef2758fdb8cd425e7eb66493a22f2aa vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:33:36Z" Id: sha256:d1d5d4d918f0e995fc6971ccc8da1a42ea259154c7ea1398224e5c88ddcb240f Size: 331411863 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/python-39@sha256:1ef62ca43bc7b5be382470644797926cecd419f1496a471cc230d71147b8f878 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:1ef62ca43bc7b5be382470644797926cecd419f1496a471cc230d71147b8f878 resourceVersion: "14132" uid: adddc388-175d-4aea-93bc-2b9b5b8cd8bd - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:872582724f337bcc41b829d3b854567e146ab62aa3c7de0b37e18617d38f5d08 size: 76246809 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b13ffc206103620a7d59e4f5b72279b53e317ade5d545a3daa06ab9bed270f92 size: 1409 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b7f1305ca252f66148776525dde2bb4df6c83494633a8164b3fc6b1560b711bf size: 4028980 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9fcb9eb95cb77715beb5cf6e769bfb055fe46ac0cad1cdf99d229bce80c5b3b9 size: 87034685 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ead4a319242f483b7e6020227d3351779b2a02250160e2c859c109d8acb2a139 size: 15179641 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.7 - JOLOKIA_VERSION=1.6.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: d0feb8001879 Image: feaf94cd74565b914b1ae1d2e0e5bf488105c208b19ba847f41daa74f1f41cba Labels: architecture: x86_64 build-date: 2020-11-04T17:18:27.721406 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1604505243" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.7-12.1604505243 vcs-ref: cc40f7e04d3a96ec8a71069f58939519a05f565c vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.7 - JOLOKIA_VERSION=1.6.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: d0feb8001879 Image: sha256:7079b1364ee9644e1bec5a59428f3788bd245a8a94c11a95a5bb4486028c81ef Labels: architecture: x86_64 build-date: 2020-11-04T17:18:27.721406 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1604505243" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.7-12.1604505243 vcs-ref: cc40f7e04d3a96ec8a71069f58939519a05f565c vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss Created: "2020-11-04T17:19:51Z" DockerVersion: 1.13.1 Id: sha256:48fa17c7dad9bcbf3021e3923f9c1520a06be86b36b59630ce17a3f2cd7d6c51 Size: 182498026 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:1fe8afafb4a8cfe086dd3c3f59fc717ccc8924e570deaed38f4751962aed4211 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:1fe8afafb4a8cfe086dd3c3f59fc717ccc8924e570deaed38f4751962aed4211 resourceVersion: "14052" uid: f0be263c-f364-4139-984b-43a8a4f93b6f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a20dc09567a04bdff2ebfaa3d3917f64d7620555e6354d53b43dd7ebb0e0f575 size: 79751689 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:131f13a17b9e93af2594a586fed39af6ce5b77915f91df6b8a250f6117f20c95 size: 126692656 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.16 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-11-14T15:34:44 com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.16-1 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 26df1d32c23571c75bce3a5832334f8eb5dbcccc vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-11-14T15:38:26Z" Id: sha256:c00ed3cbc5358621df245781025be71249f17c03105f030018ecf6de29e3f88e Size: 206474283 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:2254dc2f421f496b504aafbbd8ea37e660652c4b6b4f9a0681664b10873be7fe kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:2254dc2f421f496b504aafbbd8ea37e660652c4b6b4f9a0681664b10873be7fe resourceVersion: "14104" uid: 2127959d-590e-47a5-9946-89e7cef1237a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ac3ec768cc7844a4e7923545731691c54ac62eeac11245c532b348cc01cdbfd3 size: 109500783 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T20:31:06 com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705602291" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.18-2.1705602291 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: db4b85aa377961470d01443597a69e4ef6daf294 vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T20:39:24Z" Id: sha256:6f15e81bfb09e0e3d2a9d8e8cbb08622cf2bd19aa248e7f5abfd6bb3fbb36e37 Size: 148827594 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:229ee7b88c5f700c95d557d0b37b8f78dbb6b125b188c3bf050cfdb32aec7962 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:229ee7b88c5f700c95d557d0b37b8f78dbb6b125b188c3bf050cfdb32aec7962 resourceVersion: "14195" uid: 7e75e29d-702e-4451-9fe2-2c4c3e9fc86d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8d9c78c7f9887170d08c57ec73b21e469b4120682a2e82883217535294878c5d size: 3805344 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f4350d5126d0895bb50c2c082a415ff417578d34508a0ef07ec20cebf661ebb7 size: 70368140 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:22d34e5ceb75822a5bc6be36447b7cee001ca3dbd61bae6a1e4fcd70db076460 size: 209163998 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0e3082a97b6f1819035738c7a146ca68a8f3652dbdd52eb05ec3b7f779e1b73e size: 268674990 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4380ed6ced5123a36dc514aa4180b10c1c9acca2ceedf4c5e0c2c7e8cee82188 size: 285285600 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-eap-7/eap71-openshift - JBOSS_IMAGE_VERSION=1.3 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_EAP_VERSION=7.1.4.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.1.4.GA - HTTPS_ENABLE_HTTP2=true - JOLOKIA_VERSION=1.5.0 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MAVEN_VERSION=3.5 - WILDFLY_CAMEL_VERSION=5.2.0.fuse-710021-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: c449653522f4e40ec8b87d5a846191c1e1ecfd39d0ab78d36aef85c5068c10f0 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T22:42:58.963999 com.redhat.build-host: cpt-0001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.1.4 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-eap-openshift org.concrt.version: 1.4.1 org.jboss.product: eap org.jboss.product.eap.version: 7.1.4.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.1.4.GA release: "5.1539812399" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.1-5.1539812399 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: afdbbdca5de2000dace048c344f9a15a962a393b vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-eap-7/eap71-openshift - JBOSS_IMAGE_VERSION=1.3 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_EAP_VERSION=7.1.4.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.1.4.GA - HTTPS_ENABLE_HTTP2=true - JOLOKIA_VERSION=1.5.0 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MAVEN_VERSION=3.5 - WILDFLY_CAMEL_VERSION=5.2.0.fuse-710021-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: sha256:4ab827020ad87739706046e64d592b99d5d8dc5c3899ded508ff1ac78aeeaf8e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T22:42:58.963999 com.redhat.build-host: cpt-0001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.1.4 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-eap-openshift org.concrt.version: 1.4.1 org.jboss.product: eap org.jboss.product.eap.version: 7.1.4.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.1.4.GA release: "5.1539812399" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.1-5.1539812399 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: afdbbdca5de2000dace048c344f9a15a962a393b vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss Created: "2018-10-17T22:43:58Z" DockerVersion: 1.13.1 Id: sha256:ba2baa466bb3166e9c5b51236146e10445f23586eb900902047a5b4477e5e3c2 Size: 912229825 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:230cd475733320b85bef99f7634b0f73ba82e323865d1e46be6d76fe03c337e8 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:230cd475733320b85bef99f7634b0f73ba82e323865d1e46be6d76fe03c337e8 resourceVersion: "14020" uid: 6147b18d-26d6-4d23-864a-43490dec8c35 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:79c20e727b94ea36ae8776eb9e4065b60dc1d396564a6a91ebb6ee334dfb5cea size: 79001473 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ae4cda028ce1cdcaf1f079c166ecd8e396e8c387627cf1951367ee98807a5e27 size: 111642334 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap-xp4-openjdk11-runtime-openshift-rhel8 - JBOSS_IMAGE_VERSION=4.0 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api - LAUNCH_JBOSS_IN_BACKGROUND=true - MICROPROFILE_CONFIG_DIR_ORDINAL=500 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-02-05T10:36:53 com.redhat.component: jboss-eap-xp4-openjdk11-runtime-openshift-rhel8-container com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform XP 4.0 OpenShift runtime image with OpenJDK 11 distribution-scope: public io.buildah.version: 1.33.8 io.cekit.version: 4.12.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.k8s.description: Base runtime image to run EAP XP server and application io.k8s.display-name: JBoss EAP XP runtime image io.openshift.expose-services: 8080:http io.openshift.tags: javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap-xp4-openjdk11-runtime-openshift-rhel8 org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "89" summary: Red Hat JBoss Enterprise Application Platform XP 4.0 OpenShift runtime image with OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap-xp4-openjdk11-runtime-openshift-rhel8/images/4.0-89 vcs-ref: 00387077df72f21059a527aa478178149208b1ba vcs-type: git vendor: Red Hat, Inc. version: "4.0" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-02-05T10:40:42Z" Id: sha256:d010d63fec4a6f088010579bd3f3b07052213204a69eb8122d96db754b17594b Size: 190667964 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap-xp4-openjdk11-runtime-openshift-rhel8@sha256:2406759804b3f8c4ea6a6ba582a1ab82be48362a8a815a82bb4aa04bf81e86e3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:2406759804b3f8c4ea6a6ba582a1ab82be48362a8a815a82bb4aa04bf81e86e3 resourceVersion: "13319" uid: a33f1392-2eef-4f84-9e1d-9f0aadf8b716 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:90ce8aca36136fcb5647ba06362d28586593fd2bfa1e83535e2d9d530eb930f3 size: 79225652 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a62cdec22511d0921fcd3d4a71e091a8461e7c9eb7767793a61e511c0bbbd996 size: 121935734 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ae77b936d1da82edf080dfd85b164277a30b1325095a8ed7f70c85ce48b7c446 size: 34792954 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.3 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.3 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - HOME=/home/jboss - AB_PROMETHEUS_ENABLE=True - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jws-jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9404 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk11-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.3 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9404/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-19T15:03:22 com.redhat.component: jboss-webserver-57-openjdk11-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.6.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK11 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK11 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk11-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 5.7.3 org.jboss.product.webserver-tomcat9.version: 5.7.3 release: "2.1687186259" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk11-rhel8-openshift/images/5.7.3-2.1687186259 vcs-ref: 0ccb4d1a73fd7227e8ade7173e71dce4d74ddf2d vcs-type: git vendor: Red Hat, Inc. version: 5.7.3 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-19T15:09:29Z" Id: sha256:4a538bbff8f5f1e215af6c5879498d47452671f6a71a8c4ec4ddaf12fb9b7558 Size: 235996644 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:25f6a298e4505c38e7220e8a654852de3822d40a99b5f47da657251f31c3ffc3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:25f6a298e4505c38e7220e8a654852de3822d40a99b5f47da657251f31c3ffc3 resourceVersion: "13701" uid: 5544c692-51d7-4cde-af1f-c403e45f2c0e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:78ba342552b07b7f64acdfee9b6cb3ae0a484e6bb39e5ee9d8fd99759eca0224 size: 39673781 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:23cac4d9b8155ea96002c5e7a388bc9b7ce57817342fc8ea166e7ba5865a38cd size: 62786710 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5b141278de81150c0fb400b1b75a39f53c238ed5165bfcd79aa5f22361cf2301 size: 150590071 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - /bin/bash Env: - container=oci - HOME=/opt/app-root - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=8.0.21 - ASPNET_VERSION=8.0.21 - PATH=/opt/app-root/src/.local/bin:/opt/app-root/src/bin:/opt/app-root/.dotnet/tools:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - STI_SCRIPTS_PATH=/usr/libexec/s2i - DOTNET_GENERATE_ASPNET_CERTIFICATE=false - DOTNET_NOLOGO=true - DOTNET_SDK_VERSION=8.0.121 - DOTNET_USE_POLLING_FILE_WATCHER=true ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-22T02:39:40Z" com.redhat.component: dotnet-80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for building and running .NET 8 applications distribution-scope: public dotnet_version: 8.0.21 io.buildah.version: 1.41.4 io.k8s.description: Platform for building and running .NET 8 applications io.k8s.display-name: .NET 8 SDK io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,.net,dotnet,dotnetcore,dotnet-80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-80 org.opencontainers.image.revision: 84854597b481c0a66f4f91103c2cd97c939bb22e release: "1761100714" sdk_version: 8.0.121 summary: .NET 8 SDK url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: 84854597b481c0a66f4f91103c2cd97c939bb22e vcs-type: git vendor: Red Hat, Inc. version: "8.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-22T02:39:56Z" Id: sha256:27a25c7f56d86d81fc6213242e2f31683cf6679777a750ea01f2f6a058a91f48 Size: 253069220 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/dotnet-80@sha256:26c16d866c58f4d47901f8a009d39b5f2ddc76a5d7b2cb5b0065ed8cd8eea1b6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:26c16d866c58f4d47901f8a009d39b5f2ddc76a5d7b2cb5b0065ed8cd8eea1b6 resourceVersion: "13397" uid: a8cf9840-1b68-4e97-adaa-1647f04f079d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1238570471bcd397e18009d29d1dac73c8db95daf823622782d740ad3bd280dd size: 40584254 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=20 - PHP_VERSION=8.2 - PHP_VER_SHORT=82 - NAME=php - SUMMARY=Platform for building and running PHP 8.2 applications - DESCRIPTION=PHP 8.2 available as container is a base platform for building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:32:24Z" com.redhat.component: php-82-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: PHP 8.2 available as container is a base platform for building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.41.4 io.k8s.description: PHP 8.2 available as container is a base platform for building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 8.2 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php82,php-82 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/php-82 org.opencontainers.image.revision: e9685c355d0f2ed4ac052b288fede6fb199d1695 release: "1761841874" summary: Platform for building and running PHP 8.2 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=8.2/test/test-app ubi8/php-82 sample-server vcs-ref: e9685c355d0f2ed4ac052b288fede6fb199d1695 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:32:39Z" Id: sha256:0fbefa5d8a27e793390446e325849dfc6ca1ac8276984c833fdf4448c87414de Size: 290452878 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/php-82@sha256:2788719b2da9f43a904d670b43cf29445a687a1ad6eb96e4a052e15cd3188a0f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:2788719b2da9f43a904d670b43cf29445a687a1ad6eb96e4a052e15cd3188a0f resourceVersion: "14081" uid: d9d7a912-d7ab-410c-a8bc-35cd9672ce70 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2399a674882f7d84f588927a959aeebea8b0332dcce4eac3cc1ae4ac5a8e54df size: 20385592 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NAME=nginx - NGINX_VERSION=1.24 - NGINX_SHORT_VER=124 - VERSION=0 - SUMMARY=Platform for running nginx 1.24 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.24 daemon. The image can be used as a base image for other applications based on nginx 1.24 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:11:23Z" com.redhat.component: nginx-124-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.24 daemon. The image can be used as a base image for other applications based on nginx 1.24 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.24 daemon. The image can be used as a base image for other applications based on nginx 1.24 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.24 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-124 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nginx-124 org.opencontainers.image.revision: b3c974eee06912b47116c9135d3c8562e0e44ee4 release: "1760386249" summary: Platform for running nginx 1.24 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/nginx-124:latest vcs-ref: b3c974eee06912b47116c9135d3c8562e0e44ee4 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:11:33Z" Id: sha256:541d6ee22bb238479aa8d473b97608f13deb3f1be08a60ed925b0a6aebcaf325 Size: 117486923 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nginx-124@sha256:27901936ab8866d1d1293479bb3448cb3ff98cdcaa8242926a9c49896a864c2f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:27901936ab8866d1d1293479bb3448cb3ff98cdcaa8242926a9c49896a864c2f resourceVersion: "14059" uid: f15635b7-feaf-4d78-95fd-5112f7c378ed - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:72d37ae8760a66c6d3507cc766ab29e2e49082a565e2a531e4b0bea3c4385392 size: 79141222 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:710801b761b9b0683fc2c26ab4fe6d20214d236e60e23b1723bf37c30410d3a9 size: 17335836 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:deafbb8752779eee15f2f78eadddcff9f44c9abd53d93af6713bfe97fb5410ee size: 197753182 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:89357b5590a75c788c2e98c7c1c12be14ee0a3775ef687455f47a4a2b48d1363 size: 167451580 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=16 - NAME=golang - GO_MAJOR_VERSION=1 - GO_MINOR_VERSION=18 - GO_PATCH_VERSION=10 - CONTAINER_NAME=rhel9/go-toolset - VERSION=1.18.10 - SUMMARY=Platform for building and running Go Applications - DESCRIPTION=Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. Labels: architecture: x86_64 build-date: 2023-05-02T08:21:04 com.redhat.component: go-toolset-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. distribution-scope: public io.buildah.version: 1.27.3 io.k8s.description: Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. io.k8s.display-name: Go 1.18.10 io.openshift.expose-services: "" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,golang,golang118,rh-golang118,go io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: rhel9/go-toolset release: "4.1683015641" summary: Platform for building and running Go Applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel9/go-toolset/images/1.18.10-4.1683015641 vcs-ref: 7348cae7eba784f56a23908bc7a9104d0af9009c vcs-type: git vendor: Red Hat, Inc. version: 1.18.10 User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2023-05-02T08:23:45Z" Id: sha256:89dca1e330cacd3845f8dd96b7d8c422ad018cf8dc7b9b9c719f4702c26916de Size: 461702829 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/go-toolset@sha256:2861514e125903261aa0004883a7f7aeeb4c189b2d0d175372d1edc111942eda kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:2861514e125903261aa0004883a7f7aeeb4c189b2d0d175372d1edc111942eda resourceVersion: "13452" uid: 2d1723b9-e6b7-432a-8581-4db21e5284f5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:76608b6b9d54251299c5d3be69fdf53e05f97a3735bbcd5889c30ebb78608428 size: 75827462 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3c81a5d20855a6cef8b997d709410e047e2839b5ad113f4c34d25e9fae9e3beb size: 1266 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:cf313e71c9fd8209b9df2ee0fb7471c014b3ec6f7144546b88ad7c46b5fb2cd4 size: 3891763 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:07c69678314f4cb7384c115ffd5040765fe1fe42db1b8c789af11ce865771f7b size: 84375848 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e7c59289a7545b67b0e12162f8ce6b637cabaf93171e0d7e1bf3e98d286456c7 size: 26655623 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.2 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 4072265dcbc2 Image: dc97831ef9bf7072787e2aa389524b0307d3fa3f8034d1c21c67d82b14f771e2 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-04-17T17:00:50.328059 com.redhat.build-host: cpt-0010.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/licenses/eulas description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Dhiraj Bokde name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1555516864" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.2-12.1555516864 vcs-ref: 6bdabdeb4498074b6702f3c010f198d7176593da vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.2 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 4072265dcbc2 Image: sha256:46a2220b289e3c56f193a39c4b8510370f60e67ebf43a746f5f040e2dbf4f8c2 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-04-17T17:00:50.328059 com.redhat.build-host: cpt-0010.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/licenses/eulas description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Dhiraj Bokde name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1555516864" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.2-12.1555516864 vcs-ref: 6bdabdeb4498074b6702f3c010f198d7176593da vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss Created: "2019-04-17T17:03:28Z" DockerVersion: 1.13.1 Id: sha256:62f2af090f787d63bf85381a756334bfda633d175937b2dba90742cb6e16de95 Size: 190757902 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:29d831cb1f5fe839ab6e8c59d57f695094938a9f97e071bdbc854e54a90ecb94 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:29d831cb1f5fe839ab6e8c59d57f695094938a9f97e071bdbc854e54a90ecb94 resourceVersion: "14046" uid: ea5c0a9d-7d69-493b-aee0-439ade3631f0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d246e53da6241a619b7dcea7d4403071b9e1961797aa4f6c766786e29732651c size: 76526594 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:91bd2c541a17a588359eb054815718e41f871d03b4d4daf7b3584b25fbdcbb67 size: 1563 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:118a6721a11912d1a6b148b280cbac16e4d55fbafb7d347ac5ccf1cec9abbe66 size: 109507768 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f18f459d223a Image: e5bb351eb440daf702d40be0e683db2a675d02bb2a892d56046ba4d19228f8c5 Labels: architecture: x86_64 build-date: 2021-12-02T07:23:04.378078 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1638429538" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.10-1.1638429538 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0041074ffb8ad7c1f24a784a9a16da09ec7f0493 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f18f459d223a Image: sha256:2c422c19e229e6ceeec8689dbdc20af3334ba5413a394d9ef3ba7403ba366341 Labels: architecture: x86_64 build-date: 2021-12-02T07:23:04.378078 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1638429538" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.10-1.1638429538 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0041074ffb8ad7c1f24a784a9a16da09ec7f0493 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2021-12-02T07:26:57Z" DockerVersion: 1.13.1 Id: sha256:c282d2158da6fc28f5d2cf159a8d1788ac3e822c43a06cec4108f89cbaa99696 Size: 186043467 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:2ae058ee7239213fb495491112be8cc7e6d6661864fd399deb27f23f50f05eb4 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:2ae058ee7239213fb495491112be8cc7e6d6661864fd399deb27f23f50f05eb4 resourceVersion: "14161" uid: e3bffec7-fa9d-4100-b35e-ce3f1f7b4e39 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0866a63d5aaea50186eee6e8cf59eadea0b79fdfc3fa6c6ef75080baf6e6b9f9 size: 20330989 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NAME=nginx - NGINX_VERSION=1.22 - NGINX_SHORT_VER=122 - VERSION=0 - SUMMARY=Platform for running nginx 1.22 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.22 daemon. The image can be used as a base image for other applications based on nginx 1.22 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-28T04:21:38Z" com.redhat.component: nginx-122-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.22 daemon. The image can be used as a base image for other applications based on nginx 1.22 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.22 daemon. The image can be used as a base image for other applications based on nginx 1.22 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.22 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-122 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nginx-122 org.opencontainers.image.revision: f59505117b0a342c3c7dd28004ca2c60276eb0cf release: "1761625269" summary: Platform for running nginx 1.22 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/nginx-122:latest vcs-ref: f59505117b0a342c3c7dd28004ca2c60276eb0cf vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-28T04:21:47Z" Id: sha256:90463b415e63367ba5747d0b326e43e474360aa78e9e0f22c47e6fa82d3e94e3 Size: 117432320 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nginx-122@sha256:2c7e510949ed2c19504d0aaed1ad891f8aa03cd649d04359cc6a2cdffc40b594 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:2c7e510949ed2c19504d0aaed1ad891f8aa03cd649d04359cc6a2cdffc40b594 resourceVersion: "14057" uid: 1a08a60e-52d4-459f-bcfd-ebc4dce9e0ed - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4752687a61a97d6f352ae62c381c87564bcb2f5b6523a05510ca1fb60d640216 size: 36442442 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0344366a246a0f7590c2bae4536c01f15f20c6d802b4654ce96ac81047bc23f3 size: 1740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:90be64a48170f43260bea55930b0ebe9f7bd5fc847a09b4d1022a95b20d8b854 size: 72355508 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: d86420a5475900b43b2abf25ea61375bf6fef38baf6569782ed7241ce5e8e232 Labels: architecture: x86_64 build-date: 2022-06-15T16:29:14.850840 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1655306436" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.13-1.1655306436 vcs-ref: 3d6b138d69cf66fef47fe6e8c74e3c8145acfa38 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: sha256:bf001a5d4970d8ad95fc54d04f68a812c5e43305dd8a41bd336e0d06a39ef3e1 Labels: architecture: x86_64 build-date: 2022-06-15T16:29:14.850840 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1655306436" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.13-1.1655306436 vcs-ref: 3d6b138d69cf66fef47fe6e8c74e3c8145acfa38 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:31:58Z" DockerVersion: 1.13.1 Id: sha256:63ed77f7c4776e00061be61cb217a0aa2ec33e971df94b5c8fea0f4710c1ec1a Size: 108804727 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:2cee344e4cfcfdc9a117fd82baa6f2d5daa7eeed450e02cd5d5554b424410439 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:2cee344e4cfcfdc9a117fd82baa6f2d5daa7eeed450e02cd5d5554b424410439 resourceVersion: "14221" uid: dea4bfef-b23a-4e11-9250-c76e7423ec89 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e60f3410b0ae270c41e17f13f49e1e97f743d222162f589c4e4d5a3f51da928f size: 88591273 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - MYSQL_VERSION=8.0 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MySQL 8.0 SQL database server - DESCRIPTION=MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/usr ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:15:54Z" com.redhat.component: mysql-80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. io.k8s.display-name: MySQL 8.0 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mysql80,mysql-80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/mysql-80 org.opencontainers.image.revision: c1fa8a6addd490e169d843595ca59aa902d7a7b7 release: "1760386499" summary: MySQL 8.0 SQL database server url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel9/mysql-80 vcs-ref: c1fa8a6addd490e169d843595ca59aa902d7a7b7 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:16:07Z" Id: sha256:e0d69a306b0f28204964dc50d393acbd1df568fe83fdc5df511f21fb309312f8 Size: 185690619 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel9/mysql-80@sha256:2ec1207cc75b74c26998f7c3386199a30213b37a05fab6e41a713d4a74bc4d5a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:2ec1207cc75b74c26998f7c3386199a30213b37a05fab6e41a713d4a74bc4d5a resourceVersion: "13787" uid: 24cfde81-bbba-45da-9912-82e2ad1337bb - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5329d7039f252afc1c5d69521ef7e674f71c36b50db99b369cbb52aa9e0a6782 size: 39330100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e2cf0521dced3a2dcaf10c83994ba00f009244b0515623829202b8f8c28a2b1c size: 107338232 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.16 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-19T16:04:12 com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "2" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.16-2 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 1e1e4cf1bf2d68ad0e1a2803e94e4627d30a2f8c vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-07-19T16:21:30Z" Id: sha256:36812abee8139cf91d6567b616299fbe6e03508b736605cdbf0d9634293865d3 Size: 146697982 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:2f59ad75b66a3169b0b03032afb09aa3cfa531dbd844e3d3a562246e7d09c282 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:2f59ad75b66a3169b0b03032afb09aa3cfa531dbd844e3d3a562246e7d09c282 resourceVersion: "14213" uid: 2f98c6a6-a4cd-4c75-afa2-886bf1854ac1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:90ce8aca36136fcb5647ba06362d28586593fd2bfa1e83535e2d9d530eb930f3 size: 79225652 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:12926432467a23e8b5478a1586ea3142a76e3fc5a16ea1903bdee9d0af226065 size: 103110488 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:12bea5e832dcaf62a54ba28101cce9d90c254ae13f2c8480c105791c93ac43c4 size: 30963473 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.3 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.3 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - HOME=/home/jboss - AB_PROMETHEUS_ENABLE=True - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jws-jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9404 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk8-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.3 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9404/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-19T15:02:36 com.redhat.component: jboss-webserver-57-openjdk8-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.6.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK8 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK8 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk8-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 5.7.3 org.jboss.product.webserver-tomcat9.version: 5.7.3 release: "2.1687186197" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk8-rhel8-openshift/images/5.7.3-2.1687186197 vcs-ref: 465f6bbc1badd3c93f9f9273410f1b4369279836 vcs-type: git vendor: Red Hat, Inc. version: 5.7.3 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-19T15:08:24Z" Id: sha256:350073fc9719547602f4d809e8e73b708f320110ed17949d71357e6292e390b4 Size: 213341876 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:301b093ae4fbc18f7d9d11b803d9da4220dad4556202a8de2f04377ff87c2f4d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:301b093ae4fbc18f7d9d11b803d9da4220dad4556202a8de2f04377ff87c2f4d resourceVersion: "13798" uid: 93b45e02-ada6-4d84-bd22-b2b2e708dd5a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea092d7970b26c24007a670fc6d0810dbf9531dc0d3a9d6ea514134ba5686724 size: 7541063 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9f34f3d634490c688b24ea6308ba4eb38d98227c030f87fb6ba3fe5bf68fc86 size: 46253494 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - POSTGRESQL_VERSION=12 - POSTGRESQL_PREV_VERSION=10 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS=rh-postgresql12 - BASH_ENV=/usr/share/container-scripts/postgresql/scl_enable - ENV=/usr/share/container-scripts/postgresql/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/postgresql/scl_enable ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-30T09:34:16 com.redhat.component: rh-postgresql12-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 12 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql12,rh-postgresql12 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/postgresql-12-rhel7 release: "145" summary: PostgreSQL is an advanced Object-Relational database management system url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/postgresql-12-rhel7/images/1-145 usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhscl/postgresql-12-rhel7 vcs-ref: 06913d0f260f4515284a2ab0a659f00217a432c8 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-05-30T09:38:03Z" Id: sha256:faaa9b1718aa7e90b9c101aa719c0207b4a98c6f3192002b40c0613af3d7e8cd Size: 133819444 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/postgresql-12-rhel7@sha256:306ce79b5647eb627aebbd6a9c956f9cd09c24ef7b12e42c1de6ba89f8fd8121 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:306ce79b5647eb627aebbd6a9c956f9cd09c24ef7b12e42c1de6ba89f8fd8121 resourceVersion: "14108" uid: 9d994295-a05c-4ae6-9f25-ae34cf9967d5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:017f542c0c5a83cf71321d76c7233cc3782038061e250a1be7e1be92bd81fe44 size: 88623070 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-23T16:06:08 com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "5" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.17-5 vcs-ref: 70947114745f06cb153005c197bcd8390045485b vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-23T16:11:55Z" Id: sha256:157e77d936fa1ffb2b7f7aa71eda401f997796cfe8ac5c6cd22d5aeaeeb1b332 Size: 127949437 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:32a5e806bd88b40568d46864fd313541498e38fabfc5afb5f3bdfe052c4b4c5f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:32a5e806bd88b40568d46864fd313541498e38fabfc5afb5f3bdfe052c4b4c5f resourceVersion: "14194" uid: 3a8629b7-e384-45ac-964d-2d9d6249d589 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c85ac87d44df4b64d7c273886fc5aed55a28422df33dcb641884ffa419db218 size: 76240885 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:51e9f237b750efcda2d5755785cdb8dd080d51585ae35d368e4f9b29a11b1994 size: 1329 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4339109fed706e213373c80a8e81481dbf63a9d30505dad20fe3801c024fa46a size: 347600654 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.5.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.5 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.5.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: 534540aef153 Image: cbb843e4683a7d56a7505fdc2c5659f13c18a3244c9ce0378a34100ff5d8baf5 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-03-17T16:44:39.221740 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.5.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.5.GA release: "2.1584463358" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.5-2.1584463358 vcs-ref: 740a8016ebd909ab092589f95cfebf5c15d5a281 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.5.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.5 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.5.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: 534540aef153 Image: sha256:9ffa9b1b6afd72decedb408ebf5c42ab908cd441c1f9885d9de45873a34c1822 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-03-17T16:44:39.221740 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.5.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.5.GA release: "2.1584463358" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.5-2.1584463358 vcs-ref: 740a8016ebd909ab092589f95cfebf5c15d5a281 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss Created: "2020-03-17T16:52:06Z" DockerVersion: 1.13.1 Id: sha256:48e4640bc12c43aa7c07c38fb8f76bcd23721951c41d5826fbebef9fc3f464b2 Size: 423850861 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:3375ec169d274278da56d1401c5c1285f7d2812ea0bde2ac9ad9652b69f80893 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:3375ec169d274278da56d1401c5c1285f7d2812ea0bde2ac9ad9652b69f80893 resourceVersion: "13900" uid: 870da698-d7e4-4311-9632-5d1a1ee331f4 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4752687a61a97d6f352ae62c381c87564bcb2f5b6523a05510ca1fb60d640216 size: 36442442 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0344366a246a0f7590c2bae4536c01f15f20c6d802b4654ce96ac81047bc23f3 size: 1740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2f6d0806a79dff81a406906c6f9c757a398407c0d3345fabdfe3b238afee6da4 size: 86586578 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: 271110da4538991404447fa5678366b7a276fb55f85af624d8c1e0292bcdd43f Labels: architecture: x86_64 build-date: 2022-06-15T16:15:55.785399 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1655306368" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.13-1.1655306368 vcs-ref: 7f244524449d43f35d1370cc10ec0cc48f521a42 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: sha256:afcad67a9a05ee0f28dc9c20da6d4a5a35712622f382a00156a653eaebdecf37 Labels: architecture: x86_64 build-date: 2022-06-15T16:15:55.785399 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1655306368" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.13-1.1655306368 vcs-ref: 7f244524449d43f35d1370cc10ec0cc48f521a42 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:18:26Z" DockerVersion: 1.13.1 Id: sha256:89061b4069d7f30c7432668e8ae7e27b17596a63354bd9bc2ac2586501c51b66 Size: 123035783 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:33d4dff40514e91d86b42e90b24b09a5ca770d9f67657c936363d348cd33d188 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:33d4dff40514e91d86b42e90b24b09a5ca770d9f67657c936363d348cd33d188 resourceVersion: "14190" uid: 4203b0fa-0475-47a7-8d7d-e1a43e1782c9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b294370e16a52df0c9f043fce2048ca6d059e5f4a9ea1f432b28ab32b456c1ce size: 65687544 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el8 - NODEJS_VER=20 - PYTHON_VERSION=3.6 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi8-python-36 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.6 applications - DESCRIPTION=Python 3.6 available as container is a base platform for building and running various Python 3.6 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:35:12Z" com.redhat.component: python-36-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Python 3.6 available as container is a base platform for building and running various Python 3.6 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi8-python-36 io.k8s.description: Python 3.6 available as container is a base platform for building and running various Python 3.6 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.6 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python36,python-36,rh-python36 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/python-36 org.opencontainers.image.revision: 997d83720c24e6dfa4402f799676970ab2b723dd release: "1761842005" summary: Platform for building and running Python 3.6 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.6/test/setup-test-app/ ubi8/python-36 python-sample-app vcs-ref: 997d83720c24e6dfa4402f799676970ab2b723dd vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:35:51Z" Id: sha256:648f1759d6cad13606fe9d58c8fa3cbf6c51ea5bd361812a63d06906259a7249 Size: 315555214 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/python-36@sha256:38427fd30565b66ec512fb8d86bf442a7ac4a100d44332e8c42b472fdf821db0 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:38427fd30565b66ec512fb8d86bf442a7ac4a100d44332e8c42b472fdf821db0 resourceVersion: "14131" uid: f961a18a-2cb6-4557-96e3-f81acf4320c8 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3019826b26b93fdb39b6e29614bc6b4d1ab879c596261851db4ff70706fa6c55 size: 183535774 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510212154.p2.g7f1d6f8.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510212154.p2.g7f1d6f8.assembly.stream.el9-7f1d6f8 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=ose-tools - __doozer_uuid_tag=ose-tools-rhel9-v4.20.0-20251021.223340 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=7f1d6f8 - SOURCE_DATE_EPOCH=1761075552 - SOURCE_GIT_COMMIT=7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 - SOURCE_GIT_TAG=openshift-clients-4.12.0-202208031327-1168-g7f1d6f88c - SOURCE_GIT_URL=https://github.com/openshift/oc Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T22:55:56Z" com.redhat.component: ose-tools-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Contains debugging and diagnostic tools for use with an OpenShift cluster. io.k8s.display-name: OpenShift Tools io.openshift.build.commit.id: 7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 io.openshift.build.commit.url: https://github.com/openshift/oc/commit/7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 io.openshift.build.source-location: https://github.com/openshift/oc io.openshift.build.versions: kubectl=1.33.3 io.openshift.expose-services: "" io.openshift.maintainer.component: oc io.openshift.maintainer.project: OCPBUGS io.openshift.tags: openshift,tools maintainer: Red Hat, Inc. name: openshift/ose-tools-rhel9 org.opencontainers.image.revision: cf8659ff1b7103f7d4b367026e60dea4406e5268 release: 202510212154.p2.g7f1d6f8.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: cf8659ff1b7103f7d4b367026e60dea4406e5268 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T22:57:41Z" Id: sha256:519ab5d05ba33f2164e24bf3505c43498624ee893f155583311878d37d373eb5 Size: 321972411 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3843651d85087f9f19c0047f3b0c09e41f241946867d4a78acfda37ca0a405e2 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:3843651d85087f9f19c0047f3b0c09e41f241946867d4a78acfda37ca0a405e2 resourceVersion: "14074" uid: 17754b38-b940-477b-b53e-c957d23bc2bf - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54e56e6f85721741ee7bf0336de8ad3bf138a56769a6d0097b600a0e361be58d size: 39618910 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f8ddd7f5a755f537dd9d5f553c8c78171dcf3018c5fc96676a07380d3e14e20 size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9790122be13ebce9b9c16d544b7f391cbf2d381d0efc0add7c4c8c0b554125f3 size: 86444530 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: 9d2d5144cc6c24a3d835c1b484e9e2c5906e05640072f4f72264bf657785a6ac Labels: architecture: x86_64 build-date: 2022-04-29T13:47:33.189049 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1651233097" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.12-1.1651233097 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8b193c120d3f6f9df9d2db10d4ec77a45fff797d vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: sha256:1b39c85695f2e9da2622310ed29696aaa33005dfbf72493fab250f6d7936eeba Labels: architecture: x86_64 build-date: 2022-04-29T13:47:33.189049 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1651233097" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.12-1.1651233097 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8b193c120d3f6f9df9d2db10d4ec77a45fff797d vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-04-29T13:50:43Z" DockerVersion: 1.13.1 Id: sha256:a7113ef97be034e38d3fe27aee31482811b54df0a5eaf4106b0bff4adb1be47c Size: 126070475 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:38c7e4f7dea04bb536f05d78e0107ebc2a3607cf030db7f5c249f13ce1f52d59 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:38c7e4f7dea04bb536f05d78e0107ebc2a3607cf030db7f5c249f13ce1f52d59 resourceVersion: "14152" uid: 2f982ef4-69b5-4ff2-8c9f-1dcbe88d5a6d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5329d7039f252afc1c5d69521ef7e674f71c36b50db99b369cbb52aa9e0a6782 size: 39330100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c29bbc660b4ec7e40639e8e74734c70db36cc6379574ca7c1a7abe3503a6d280 size: 115296436 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.16 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-19T16:14:53 com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "2" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.16-2 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: f3b78885ddc4a0f11fad92c8c52c4e8366510b9d vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-07-19T16:24:50Z" Id: sha256:6da9e746c5efe391addc022b93883e9f7eb997b21c7ca21634a7646b6823c9ec Size: 154654163 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:3b94ccfa422b8ba0014302a3cfc6916b69f0f5a9dfd757b6704049834d4ff0ae kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:3b94ccfa422b8ba0014302a3cfc6916b69f0f5a9dfd757b6704049834d4ff0ae resourceVersion: "14147" uid: 9f9ee43e-e235-4660-b0b7-107507748ea6 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c7965aa7086045a59bdb113a1fb8a19d7ccf7af4133e59af8ecefd39cda8e0b1 size: 78964242 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a47b24a64cfa083053cbc2215f10f9a84ef11dd992d4c2755f3f91e8ab9a38f size: 68231466 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:71ab0d8c6a4c41db5c7a2cde1f284080513b081bf45af5891eb8dd5a8cc4c373 size: 12192264 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3b84f6150455cc1a21301aa835b6c307f678241265c88e36cca2517be9a87a94 size: 61654235 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8de98ffb6690c14b53771251ef29ad27e82ec78db7f7e888c008552c0736f774 size: 508507060 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: OpenShift Developer Services Config: Entrypoint: - /usr/bin/go-init - -main - /usr/libexec/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - __doozer=merge - BUILD_RELEASE=202509140700.p0.gd192e90.assembly.stream.el8 - BUILD_VERSION=v4.13.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=13 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.13.0-202509140700.p0.gd192e90.assembly.stream.el8-d192e90 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.13 - __doozer_key=openshift-enterprise-cli - __doozer_version=v4.13.0 - OS_GIT_COMMIT=d192e90 - SOURCE_DATE_EPOCH=1712056277 - SOURCE_GIT_COMMIT=d192e901ece237d9ae1580d73e78f423ec2ef322 - SOURCE_GIT_TAG=openshift-clients-4.13.0-202304190216-95-gd192e901e - SOURCE_GIT_URL=https://github.com/openshift/oc - ART_BUILD_ENGINE=brew - ART_BUILD_DEPS_METHOD=cachito - ART_BUILD_NETWORK=internal-only - JENKINS_VERSION=2 - HOME=/var/lib/jenkins - JENKINS_HOME=/var/lib/jenkins - JENKINS_UC=https://updates.jenkins.io - OPENSHIFT_JENKINS_IMAGE_VERSION=4.13 - LANG=en_US.UTF-8 - LC_ALL=en_US.UTF-8 - INSTALL_JENKINS_VIA_RPMS=true ExposedPorts: 8080/tcp: {} 50000/tcp: {} Labels: License: GPLv2+ architecture: x86_64 build-date: 2025-09-22T10:45:44 com.redhat.component: openshift-jenkins-2-container com.redhat.license_terms: https://www.redhat.com/agreements description: Jenkins is a continuous integration server distribution-scope: public io.buildah.version: 1.33.12 io.jenkins.version: 2.516.3 io.k8s.description: Jenkins is a continuous integration server io.k8s.display-name: Jenkins 2 io.openshift.build.commit.id: cfb3a904331d93327ba0eb7145b78ac0b24f4675 io.openshift.build.commit.url: https://github.com/openshift/jenkins/commit/cfb3a904331d93327ba0eb7145b78ac0b24f4675 io.openshift.build.source-location: https://github.com/openshift/jenkins io.openshift.expose-services: 8080:http io.openshift.maintainer.component: Jenkins io.openshift.maintainer.product: OpenShift Container Platform io.openshift.maintainer.project: OCPBUGS io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: jenkins,jenkins2,ci maintainer: openshift-dev-services+jenkins@redhat.com name: openshift/ose-jenkins release: "1758537892" summary: Provides the latest release of Red Hat Universal Base Image 8. url: https://access.redhat.com/containers/#/registry.access.redhat.com/openshift/ose-jenkins/images/v4.13.0-1758537892 vcs-ref: 976802c3a15462ab81a28c7cd01c1c6765ddf8ed vcs-type: git vendor: Red Hat, Inc. version: v4.13.0 User: "1001" Volumes: /var/lib/jenkins: {} ContainerConfig: {} Created: "2025-09-22T10:52:31Z" Id: sha256:24a08856ee5258c2a25c61362db60a3dffec61997756f9b58e2c3cf2d25b84e3 Size: 729584584 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ocp-tools-4/jenkins-rhel8@sha256:3bc55cb1bafdd281a784ec7950c8e95914079522f152f642e8172869e83b4585 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:3bc55cb1bafdd281a784ec7950c8e95914079522f152f642e8172869e83b4585 resourceVersion: "13766" uid: 757352d1-87d2-430a-b4ae-a5a6f3f8c55e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54e56e6f85721741ee7bf0336de8ad3bf138a56769a6d0097b600a0e361be58d size: 39618910 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f8ddd7f5a755f537dd9d5f553c8c78171dcf3018c5fc96676a07380d3e14e20 size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bd7fede8be96f90f2657b88f88354c8b6589694c0ca7eb8800babca6e24674cb size: 117829840 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 65ec992ef2e6 Image: 5a26ca0d70ec13dbe4df66eba9eb35d58c0d38101682fc806ddf63a443024230 Labels: architecture: x86_64 build-date: 2022-04-29T13:51:35.051397 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1651233100" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.12-1.1651233100 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: ef89a1b14e17c770cc25cefddf1a67b084eaaa66 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 65ec992ef2e6 Image: sha256:c43c95bfb42baf8a0ecfe2a8bc338bd99e53ffd1f85a0d4a7cc7b366d1ea08f0 Labels: architecture: x86_64 build-date: 2022-04-29T13:51:35.051397 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1651233100" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.12-1.1651233100 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: ef89a1b14e17c770cc25cefddf1a67b084eaaa66 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-04-29T13:58:02Z" DockerVersion: 1.13.1 Id: sha256:e229a9be4f0b1494f655306d43c244853bd14d6515c50ff30085756fae147a68 Size: 157457992 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:3f00540ce2a3a01d2a147a7d73825fe78697be213a050bd09edae36266d6bc40 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:3f00540ce2a3a01d2a147a7d73825fe78697be213a050bd09edae36266d6bc40 resourceVersion: "14171" uid: 42db77f0-1f4f-4bf9-ba10-e58291d46e26 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50be7bb6ce3ddb41606e1956ba5c61072699ac536980f260a0db6dc59c8013fe size: 39575081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:58d74dec22f4fd2877a82b6d78f2c1b8ec40c5c6afab78d4d7daebc426ffe62b size: 114687323 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.23 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-10-22T20:10:51 com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.13.0.dev0 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "3.1761163783" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.23-3.1761163783 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: 24cbf05a8339c91f2ea7bd92a14704b97044859f vcs-type: git vendor: Red Hat, Inc. version: "1.23" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-10-22T20:25:12Z" Id: sha256:f560c666bc9eb16c34ffe6c73e4e99e56fa382a946dc391c63800af46241fc8c Size: 154290830 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/openjdk-17@sha256:3f01daca201d91f4989f8ffe80625d2e08fc0e69f241a7359d30c15cc7c9a419 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:3f01daca201d91f4989f8ffe80625d2e08fc0e69f241a7359d30c15cc7c9a419 resourceVersion: "13688" uid: 72240a14-c318-4519-ba30-66cb5dc4e5d5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2920d84eafa0cf94806ab58f0a2124f7b2d35bcbb06fc89a9106dcc28efe397a size: 39653524 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0393e1c5dfe9ede3e742e787ae5d00503a4f0687aa66415c3ca87cac6e87880d size: 40503436 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - container=oci - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - NPM_RUN=start - PLATFORM=el9 - NODEJS_VERSION=20 - NAME=nodejs - SUMMARY=Minimal image for running Node.js 20 applications - DESCRIPTION=Node.js 20 available as container is a base platform for running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-15T16:15:05Z" com.redhat.component: nodejs-20-minimal-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Node.js 20 available as container is a base platform for running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.k8s.description: Node.js 20 available as container is a base platform for running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 20 Micro io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs20 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nodejs-20-minimal org.opencontainers.image.revision: e92372fde5f0369030c2af49d3c12ebf7f5f53d3 release: "1760544727" summary: Minimal image for running Node.js 20 applications url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: e92372fde5f0369030c2af49d3c12ebf7f5f53d3 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-15T16:15:10Z" Id: sha256:bb62c40b1e77fe2abab3e1b355e8f93cb953ba9c9cb33e3d9e29b7e5b409dfce Size: 80169352 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nodejs-20-minimal@sha256:3fdd215cd56a7bfe500482c73271dc1e79bf90457a0f245e6f50d3d63ca4da7c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:3fdd215cd56a7bfe500482c73271dc1e79bf90457a0f245e6f50d3d63ca4da7c resourceVersion: "14087" uid: ac0e9a45-8974-4c05-ad10-b1eb08b70882 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3fcbead5564d28a05c64093d61810ddf9336fd6a7d3132cbde762bd287b05b30 size: 423922375 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.18 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.18 - WILDFLY_VERSION=7.4.18.GA-redhat-00001 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.9.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - S2I_FP_VERSION=23.0.0.Final - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.4.18 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-07-22T13:35:54 com.redhat.component: jboss-eap-74-openjdk8-builder-openshift-rhel7-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform 7.4 OpenShift container image. distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running JavaEE applications on JBoss EAP 7.4 io.k8s.display-name: JBoss EAP 7.4 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap74-openjdk8-openshift-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.18 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.4.18 release: "2" summary: Red Hat JBoss Enterprise Application Platform 7.4 OpenShift container image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap74-openjdk8-openshift-rhel7/images/7.4.18-2 vcs-ref: 3989b0b160e6978695f83ec2fb79ba404572528d vcs-type: git vendor: Red Hat, Inc. version: 7.4.18 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-07-22T13:40:44Z" Id: sha256:852a703631f7a7cd4d48ed06fa933279c4436435a08416469777e96ff8ceccd0 Size: 503974300 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap74-openjdk8-openshift-rhel7@sha256:403c4481d100edee6355e3defc17cec304db6f510b0c953c298e171b16dbd8bf kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:403c4481d100edee6355e3defc17cec304db6f510b0c953c298e171b16dbd8bf resourceVersion: "13500" uid: e7960d75-9c0b-4384-a64b-92649412f6d9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4bf8bbfa97006b1ddb3d00e28ee8e476a94d23ad0c9b7d27dfbab02d3fa59957 size: 69975754 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el9 - NODEJS_VER=20 - PYTHON_VERSION=3.12 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi9-python-312 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.12 applications - DESCRIPTION=Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T16:22:08Z" com.redhat.component: python-312-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi9-python-312 io.k8s.description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.12 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python312,python-312,rh-python312 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/python-312 org.opencontainers.image.revision: 65d9db1c2b55904cec2bc20b7fe5c31bab89e7e1 release: "1760372467" summary: Platform for building and running Python 3.12 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.12/test/setup-test-app/ ubi9/python-312 python-sample-app vcs-ref: 65d9db1c2b55904cec2bc20b7fe5c31bab89e7e1 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T16:22:20Z" Id: sha256:a1ed2585f89a627604d7b2487507afa4d4c6fcd6f144fc604233c5fea7f33f31 Size: 382279194 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/python-312@sha256:4100f9633dd7533a6ab847ea2b666de21bbb84baf070945a3c142cb019cd9a5f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:4100f9633dd7533a6ab847ea2b666de21bbb84baf070945a3c142cb019cd9a5f resourceVersion: "14130" uid: a30c750f-4ce0-4f1f-9f4d-02e82f499e25 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:690c847f419672788dca52f9eb17b10133919a0aae947934f7bdf5ccf30f1546 size: 79990748 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8b0190b73e2fbfc495259da2dd7cb3dd11858057a17aba8e69b494f184278222 size: 113428012 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.17 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-06-04T14:54:34 com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.10.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "3.1717512819" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.17-3.1717512819 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: d57f866d5459ae47560699f643a117753a2f1ee0 vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-06-04T15:04:31Z" Id: sha256:e48dd217b99967a40735d2ac4dc2d1f1b438fe009280a869de143e47e6930d13 Size: 193448361 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/redhat-openjdk-18/openjdk18-openshift@sha256:421d1f6a10e263677b7687ccea8e4a59058e2e3c80585505eec9a9c2e6f9f40e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:421d1f6a10e263677b7687ccea8e4a59058e2e3c80585505eec9a9c2e6f9f40e resourceVersion: "13683" uid: 595390c4-28a1-4cfc-ab89-d5c0808a02df - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5329d7039f252afc1c5d69521ef7e674f71c36b50db99b369cbb52aa9e0a6782 size: 39330100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:70fe1bf188594ef42f2faa617005d3f397a7a395c9b41f0ebe25140adfb323a1 size: 111342558 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.16 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-20T20:11:28 com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "3" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.16-3 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 63a002ff432d7760adc5486db1a121a78718a340 vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-07-20T20:24:29Z" Id: sha256:998e41ed2c35c256fabfb4884b115e3b8e6f22458afd8a6fbf9e512f3a5fb100 Size: 150702816 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:425e2c7c355bea32be238aa2c7bdd363b6ab3709412bdf095efe28a8f6c07d84 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:425e2c7c355bea32be238aa2c7bdd363b6ab3709412bdf095efe28a8f6c07d84 resourceVersion: "14179" uid: 9f266725-ba2e-4032-961a-d8bd33261941 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2a99c93da16827d9a6254f86f495d2c72c62a916f9c398577577221d35d2c790 size: 39641757 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4418ace46c3dd933f98d83f357f31048e72d5db3d97bccfdb0acef769ee8234f size: 1743 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9675ea79d90c914f2530be70a2c90072eed62580297ca69aa1ab9d21290a3555 size: 87219093 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: be4e58a52d40 Image: 4f8b58094a49e52735fd2f81b53d0eb1e7a19e33777cba5893dbc7d69fdcfdec Labels: architecture: x86_64 build-date: 2021-12-01T18:39:55.002944 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "10.1638383033" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.10-10.1638383033 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 5dbba0eecb1056908b8875ef8df210953c55c03a vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: be4e58a52d40 Image: sha256:dc41aba32c4b97f20cbe61c6a4ccb9212445c80f4d02a97e0a18a2f65e8b779f Labels: architecture: x86_64 build-date: 2021-12-01T18:39:55.002944 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "10.1638383033" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.10-10.1638383033 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 5dbba0eecb1056908b8875ef8df210953c55c03a vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2021-12-01T18:42:25Z" DockerVersion: 1.13.1 Id: sha256:2ad55ed2b7c7daf918966cd955c79dd2b1eb534bffe90c8988c11bc90fca73f7 Size: 126867370 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:431753c8a6a8541fdc0edd3385b2c765925d244fdd2347d2baa61303789696be kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:431753c8a6a8541fdc0edd3385b2c765925d244fdd2347d2baa61303789696be resourceVersion: "14187" uid: cba71974-f4f8-4e06-887f-08f392eda4e3 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1f1202c893ce2775c72b2a3f42ac33b25231d16ca978244bb0c6d1453dc1f39e size: 76250035 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:32be9843afa050552a66345576a59497ba7c81c272aa895d67e6e349841714da size: 1320 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9f74ee800fdb2242405ca2ee7e74f612973956d1725766a1f2199339f92b8381 size: 4013823 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c47adc077129316d733e991a2da2c4bf7ec3d93b7836e0b97ddc5885f0e512ba size: 85699059 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:030ef875c16299c58b1fe1f5232cae9d08cbfc35d1e357ee72bfc8190cda2f40 size: 15165108 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.6 - JOLOKIA_VERSION=1.6.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 08c71bdb7e2a Image: 3ee01967bae94329abd65082510b9c33d2958d82a6e7964fa249518274fddf84 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-04-21T12:04:49.688935 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "20.1587470195" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.6-20.1587470195 vcs-ref: 31565b6a7a80bedca64476301fd32f1edaa04da4 vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.6 - JOLOKIA_VERSION=1.6.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 08c71bdb7e2a Image: sha256:38e2a78a809f259293280b80b098cf5674b594a54ea377d70e58df22ab789e97 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-04-21T12:04:49.688935 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "20.1587470195" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.6-20.1587470195 vcs-ref: 31565b6a7a80bedca64476301fd32f1edaa04da4 vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss Created: "2020-04-21T12:06:38Z" DockerVersion: 1.13.1 Id: sha256:e8dc9fce7fca406094a2871389ad7734d45e6c15fe7b5fb59e9508f86de065f7 Size: 181136027 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:4361e4a098acb1d1cbd79b6fb1e67a891949e65cdc40e286a8e5da9bfb7fa332 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:4361e4a098acb1d1cbd79b6fb1e67a891949e65cdc40e286a8e5da9bfb7fa332 resourceVersion: "14051" uid: 0fae2fb1-814e-4e34-bf2b-f33dbaf23a78 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:55f517bff7f6fc3f62bf9d37135053370f0c7192b76017b1280f3ee23f51a8e3 size: 1071942759 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510220756.p2.g9d55fd1.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510220756.p2.g9d55fd1.assembly.stream.el9-9d55fd1 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=driver-toolkit - __doozer_uuid_tag=driver-toolkit-rhel9-v4.20.0-20251022.081026 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=9d55fd1 - SOURCE_DATE_EPOCH=1750755771 - SOURCE_GIT_COMMIT=9d55fd1aaba05830f857132bd149ee3cf18cc20f - SOURCE_GIT_TAG=9d55fd1a - SOURCE_GIT_URL=https://github.com/openshift/driver-toolkit Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-22T08:48:37Z" com.redhat.component: driver-toolkit-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: driver-toolkit is a container with the kernel packages necessary for building driver containers for deploying kernel modules/drivers on OpenShift io.k8s.display-name: Empty io.openshift.build.commit.id: 9d55fd1aaba05830f857132bd149ee3cf18cc20f io.openshift.build.commit.url: https://github.com/openshift/driver-toolkit/commit/9d55fd1aaba05830f857132bd149ee3cf18cc20f io.openshift.build.source-location: https://github.com/openshift/driver-toolkit io.openshift.expose-services: "" io.openshift.maintainer.component: Driver Toolkit io.openshift.maintainer.project: OCPBUGS io.openshift.release.operator: "true" io.openshift.tags: Empty maintainer: Red Hat, Inc. name: openshift/driver-toolkit-rhel9 org.opencontainers.image.revision: a0aa3a715de00eeed02beef413c6c06bdfadfdc1 release: 202510220756.p2.g9d55fd1.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: a0aa3a715de00eeed02beef413c6c06bdfadfdc1 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-22T08:51:02Z" Id: sha256:58d01ed48d76969a67687d2971bcffcf254217244cf137ba6dd2849ebaa02f82 Size: 1210383461 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4396f6b4629ba45fe23c13c91aaa64427e957b15841bc65c84537763f00bcbe0 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:4396f6b4629ba45fe23c13c91aaa64427e957b15841bc65c84537763f00bcbe0 resourceVersion: "13296" uid: 50657682-4f9c-487f-b1a3-f356f1e7bd0f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:434c740598f75b4980d8de08b0ef259c5a66bbef05e784da9731b3fea33ef719 size: 22371746 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=20 - PERL_VERSION=5.32 - PERL_SHORT_VER=532 - NAME=perl - SUMMARY=Platform for building and running Perl 5.32 applications - DESCRIPTION=Perl 5.32 available as container is a base platform for building and running various Perl 5.32 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:08:27Z" com.redhat.component: perl-532-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Perl 5.32 available as container is a base platform for building and running various Perl 5.32 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-perl-container io.buildah.version: 1.41.4 io.k8s.description: Perl 5.32 available as container is a base platform for building and running various Perl 5.32 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. io.k8s.display-name: Apache 2.4 with mod_fcgid and Perl 5.32 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,perl,perl532,perl-532 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/perl-532 org.opencontainers.image.revision: 4c47d79a2b9652e6646d3826a4c4d9f235a10690 release: "1760386076" summary: Platform for building and running Perl 5.32 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/perl-532:latest vcs-ref: 4c47d79a2b9652e6646d3826a4c4d9f235a10690 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:08:36Z" Id: sha256:0979d51cefe37e42dbf0c650cb7f4e88307219bc6fecc6882132b6560a22f474 Size: 334674324 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/perl-532@sha256:44f6258460b45f78d229e9a3865e7481285d268460760710f69f951ed2b41aa5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:44f6258460b45f78d229e9a3865e7481285d268460760710f69f951ed2b41aa5 resourceVersion: "14013" uid: e0ecbf42-634d-4282-89da-f506c4eb4299 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:157428bbe422689f642c824a512700c63a930f9e9d7da34290fd559f624363cb size: 114679860 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-23T16:06:10 com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "4" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.17-4 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0903a764fc8f6f41c6003906542e9f7dbe6b0f7b vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-23T16:17:40Z" Id: sha256:ae3c380c4feff75a6e10386f81da0fee2c54364dc1d28920e1b266c91fc83ae1 Size: 154015376 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:46a4e73ddb085d1f36b39903ea13ba307bb958789707e9afde048764b3e3cae2 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:46a4e73ddb085d1f36b39903ea13ba307bb958789707e9afde048764b3e3cae2 resourceVersion: "14148" uid: d5c31426-7e47-4218-9057-501bc26d8a68 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2a99c93da16827d9a6254f86f495d2c72c62a916f9c398577577221d35d2c790 size: 39641757 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4418ace46c3dd933f98d83f357f31048e72d5db3d97bccfdb0acef769ee8234f size: 1743 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d0e25578e51bf59d53b2a82277e41fed5111f2c676c3a545ff1c9b5ffbddeb8f size: 113478789 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: be4e58a52d40 Image: 78ddbf525e8ed616920530e5e572cb514ac20237e5d502abf94d099cfbe256b3 Labels: architecture: x86_64 build-date: 2021-12-01T18:40:35.357924 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "10.1638383025" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.10-10.1638383025 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: fbc47d72a59516ad145cb5349a99a7b0a9deb333 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: be4e58a52d40 Image: sha256:f80060f8ab16e6d8d497c985ba706ceb278a075a28bcfc8e1f25d02fdeb08f03 Labels: architecture: x86_64 build-date: 2021-12-01T18:40:35.357924 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "10.1638383025" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.10-10.1638383025 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: fbc47d72a59516ad145cb5349a99a7b0a9deb333 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2021-12-01T18:46:56Z" DockerVersion: 1.13.1 Id: sha256:20c41960b22f85bc6433730098313516c0bcce9cfc7214cd69d892e8b938f055 Size: 153129803 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:496e23be70520863bce6f7cdc54d280aca2c133d06e992795c4dcbde1a9dd1ab kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:496e23be70520863bce6f7cdc54d280aca2c133d06e992795c4dcbde1a9dd1ab resourceVersion: "14207" uid: b1ed8e10-f98c-4f33-a79a-e0de1166ed4f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d246e53da6241a619b7dcea7d4403071b9e1961797aa4f6c766786e29732651c size: 76526594 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:91bd2c541a17a588359eb054815718e41f871d03b4d4daf7b3584b25fbdcbb67 size: 1563 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:be722ba66cc6549b4778e0f8d184a93fb0da47bf39c1046b0c45c163502b7cfe size: 353009795 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.8.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.8 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.8.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: f18f459d223a Image: ef2286cf701720e28215d073370ae5e7984c40bfcfeceef7ecaa7fb529d58f67 Labels: architecture: x86_64 build-date: 2021-12-02T07:22:02.281837 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.7.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.8.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.8.GA release: "6.1638429593" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.8-6.1638429593 vcs-ref: 9152a4fabf772370752a00282ad5f14ba0008d3f vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.8.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.8 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.8.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: f18f459d223a Image: sha256:241bc1bd19857024762f24ec6b5c747d4f82a6f313970d36395cb60618b075a0 Labels: architecture: x86_64 build-date: 2021-12-02T07:22:02.281837 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.7.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.8.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.8.GA release: "6.1638429593" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.8-6.1638429593 vcs-ref: 9152a4fabf772370752a00282ad5f14ba0008d3f vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss Created: "2021-12-02T07:28:37Z" DockerVersion: 1.13.1 Id: sha256:22a6577c0ae43c83f3e93bbe1495ba06066c164d9ad80b47295e322b28cd888a Size: 429545765 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:4a69ef73f4b6afb2819630baf5d169a038ed4def330a44534926aa933cbbd7e5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:4a69ef73f4b6afb2819630baf5d169a038ed4def330a44534926aa933cbbd7e5 resourceVersion: "13911" uid: 866ae5bf-1523-4f3e-8dea-f0262de38c6c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c3056c1dbb67649cd282429a7b39aaaae9866a9298cb78c197a3b4e099a75a85 size: 46059398 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - POSTGRESQL_VERSION=13 - POSTGRESQL_PREV_VERSION=12 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS= ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T16:12:36Z" com.redhat.component: postgresql-13-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 13 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql13,postgresql-13 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/postgresql-13 org.opencontainers.image.revision: 2b3e00a78de726dde88775e8037dd6ec891171d1 release: "1760371920" summary: PostgreSQL is an advanced Object-Relational database management system url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhel9/postgresql-13 vcs-ref: 2b3e00a78de726dde88775e8037dd6ec891171d1 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T16:12:49Z" Id: sha256:b1c48bb0da2b9f7532acc54cfc1f18109e5a86ac90b03c8a9dfbd48bccc9d813 Size: 143159246 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel9/postgresql-13@sha256:4b4e59365194340a2a68d6720b35f4f3434c75c7d504a9e785fb056f7e4212e7 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:4b4e59365194340a2a68d6720b35f4f3434c75c7d504a9e785fb056f7e4212e7 resourceVersion: "14111" uid: ab4c3940-25ed-4a3a-8912-fd8f916c0e9c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:eca9236fb686825c1ec7ba1f1b339f6300ed2d4fffdf50611dde66cb8f6eeaa9 size: 140240268 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-21 - JAVA_VENDOR=openjdk - JAVA_VERSION=21 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-21 - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-17T20:01:48 com.redhat.component: openjdk-21-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 21 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-21 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "21" org.jboss.product.version: "21" org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "3.1705519633" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 21 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-21/images/1.18-3.1705519633 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: c5efa867ee7f0301fb355289f8e839c9236007d8 vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-17T20:10:45Z" Id: sha256:6cc5e74a214a5396231402ca4386dceb067e112b8aad022961c5b51ded628172 Size: 179574802 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-21@sha256:4f35566977c35306a8f2102841ceb7fa10a6d9ac47c079131caed5655140f9b2 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:4f35566977c35306a8f2102841ceb7fa10a6d9ac47c079131caed5655140f9b2 resourceVersion: "14119" uid: d84c85f2-1c44-4035-abf9-69109c8327f0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:872582724f337bcc41b829d3b854567e146ab62aa3c7de0b37e18617d38f5d08 size: 76246809 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b13ffc206103620a7d59e4f5b72279b53e317ade5d545a3daa06ab9bed270f92 size: 1409 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b7f1305ca252f66148776525dde2bb4df6c83494633a8164b3fc6b1560b711bf size: 4028980 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9fcb9eb95cb77715beb5cf6e769bfb055fe46ac0cad1cdf99d229bce80c5b3b9 size: 87034685 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f6270eaa927580862840be94cdc8174a19ee9fc4aeb6d17580fde4dce18649d size: 15185511 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.7 - JOLOKIA_VERSION=1.6.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.2.6.fuse-770019-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: d0feb8001879 Image: 0d972d0393973e02536414b0b3b2347554ccb2035aa6cbf9b95c47d962077977 Labels: architecture: x86_64 build-date: 2020-11-04T17:19:28.608741 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.6.fuse-770019-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1604505258" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.7-12.1604505258 vcs-ref: c426cd20318ca9f4d60d8cf5d41f62dd13250608 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.7 - JOLOKIA_VERSION=1.6.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.2.6.fuse-770019-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: d0feb8001879 Image: sha256:3fd95b4d7581b6cfedace98588d128d9cd07e9b32593c6d9cf3eadb7ad56d0eb Labels: architecture: x86_64 build-date: 2020-11-04T17:19:28.608741 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.6.fuse-770019-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1604505258" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.7-12.1604505258 vcs-ref: c426cd20318ca9f4d60d8cf5d41f62dd13250608 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss Created: "2020-11-04T17:20:49Z" DockerVersion: 1.13.1 Id: sha256:7a51cb9d7ec1e5fde71dfa74773808c3f6238be8cb3d570ee1bee6356211bb2e Size: 182503791 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:4ffd7ccbe8ff0aa2e09e1c8a72410aadc721ed3ed227890f03ce3f8aa2b33700 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:4ffd7ccbe8ff0aa2e09e1c8a72410aadc721ed3ed227890f03ce3f8aa2b33700 resourceVersion: "14038" uid: 1c2abfe7-041c-48f8-b17b-38f1de62fafa - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:97da74cc6d8fa5d1634eb1760fd1da5c6048619c264c23e62d75f3bf6b8ef5c4 size: 79524639 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d8190195889efb5333eeec18af9b6c82313edd4db62989bd3a357caca4f13f0e size: 1438 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:37fcebd665b9bf280b3a7b7fc8cbbdd35c40de9fde97eec88a9efbb1a416cf0f size: 31542956 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:03bf2f9ff79ce68fdf647999d3c96dd98a59121fae75dd2c1dcce34e3e159eeb size: 13107144 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b42f43a3d9df8228ab00afc8ece1dbfafae24fbd2b3ea72b6234bb68dc2c1bf size: 59202343 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6a236d3f7133294b18fc16ae91db25789f7bc787026b7c8a9652066b26396ff7 size: 251378864 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: OpenShift Developer Services Config: Entrypoint: - /usr/bin/go-init - -main - /usr/local/bin/run-jnlp-client Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - __doozer=merge - BUILD_RELEASE=202306070816.p0.g05d83ef.assembly.stream - BUILD_VERSION=v4.13.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=13 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.13.0-202306070816.p0.g05d83ef.assembly.stream-05d83ef - SOURCE_GIT_TREE_STATE=clean - OS_GIT_COMMIT=05d83ef - SOURCE_DATE_EPOCH=1685556672 - SOURCE_GIT_COMMIT=05d83eff7e17160e679898a2a5cd6019ec252c49 - SOURCE_GIT_TAG=openshift-clients-4.13.0-202304190216-4-g05d83eff7 - SOURCE_GIT_URL=https://github.com/openshift/oc - HOME=/home/jenkins - LANG=en_US.UTF-8 - LC_ALL=en_US.UTF-8 Labels: License: GPLv2+ architecture: x86_64 build-date: 2023-06-13T18:31:11 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jenkins-agent-base-rhel8-container com.redhat.license_terms: https://www.redhat.com/agreements description: The jenkins agent base image is intended to be built on top of, to add your own tools that your jenkins job needs. The agent base image includes all the jenkins logic to operate as a agent, so users just have to yum install any additional packages their specific jenkins job will need distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: The jenkins agent base image is intended to be built on top of, to add your own tools that your jenkins job needs. The agent base image includes all the jenkins logic to operate as a agent, so users just have to yum install any additional packages their specific jenkins job will need io.k8s.display-name: Jenkins Agent Base io.openshift.build.commit.id: 418b910a5af2d9a46c4259fbdbe9a851f6a39820 io.openshift.build.commit.url: https://github.com/openshift/jenkins/commit/418b910a5af2d9a46c4259fbdbe9a851f6a39820 io.openshift.build.source-location: https://github.com/openshift/jenkins io.openshift.expose-services: "" io.openshift.maintainer.component: Jenkins io.openshift.maintainer.product: OpenShift Container Platform io.openshift.maintainer.project: OCPBUGS io.openshift.tags: openshift,jenkins,agent maintainer: openshift-dev-services+jenkins@redhat.com name: openshift/jenkins-agent-base release: "1686680363" summary: Provides the latest release of the Red Hat Extended Life Base Image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/openshift/jenkins-agent-base/images/v4.13.0-1686680363 vcs-ref: 512bc80b8c0842a55ce67759deb32d87dcc499ff vcs-type: git vendor: Red Hat, Inc. version: v4.13.0 User: root ContainerConfig: {} Created: "2023-06-13T18:37:00Z" Id: sha256:5e70ac6eee70fc29d831dd15aea72b5db1b18ca661116dad79ba8d51ed00a6ab Size: 434784606 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ocp-tools-4/jenkins-agent-base-rhel8@sha256:50729a37cfd9eeb05865038eef01b6a2baa92e2f12fe429de3f43d85ef8824b5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:50729a37cfd9eeb05865038eef01b6a2baa92e2f12fe429de3f43d85ef8824b5 resourceVersion: "13668" uid: 8e158c55-8622-4d24-8ed6-b0c27c96743a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:929840606b28e3c864a81c9f400404f190b84f76c4de3b364adb059c3051d45f size: 88849944 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el8 - NODEJS_VER=20 - PYTHON_VERSION=3.12 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi8-python-312 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.12 applications - DESCRIPTION=Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:33:11Z" com.redhat.component: python-312-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi8-python-312 io.k8s.description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.12 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python311,python-312,rh-python312 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/python-312 org.opencontainers.image.revision: 3f3c9bd8006baf6d2d2b8f43234653883eb93b13 release: "1761841938" summary: Platform for building and running Python 3.12 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.12/test/setup-test-app/ ubi8/python-312 python-sample-app vcs-ref: 3f3c9bd8006baf6d2d2b8f43234653883eb93b13 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:33:31Z" Id: sha256:1c22cc5c552c6e25081f04f207f1639163bf34ef4e249760f046eb408ba44808 Size: 338717870 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/python-312@sha256:50752ad94cb38e775d2a65fdc43c3249d6d24d87d15411af7b1525aa8a934277 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:50752ad94cb38e775d2a65fdc43c3249d6d24d87d15411af7b1525aa8a934277 resourceVersion: "14129" uid: 3d1d0144-5ac3-47c2-8d4f-50c4f59ab83c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:87df8a98f4ba365b04e9b199d1b37f1cb531f7ad3ba45202e39a6c7677f01ff0 size: 99998283 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - POSTGRESQL_VERSION=15 - POSTGRESQL_PREV_VERSION=13 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS= ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:15:24Z" com.redhat.component: postgresql-15-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 15 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql15,postgresql-15 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/postgresql-15 org.opencontainers.image.revision: 8c7bbd9aca352297ce1a0fb7876f79290eb33c43 release: "1761063286" summary: PostgreSQL is an advanced Object-Relational database management system url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhel8/postgresql-15 vcs-ref: 8c7bbd9aca352297ce1a0fb7876f79290eb33c43 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T16:15:43Z" Id: sha256:69739bf27af2b807dfc7e81309c24ede17165663d8c58a866cc71a1aba1befb2 Size: 195556514 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/postgresql-15@sha256:510b3f197ad39f82b828cc8be16e12a193d4dcd6ea27af01ce10c71b87c5cbfc kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:510b3f197ad39f82b828cc8be16e12a193d4dcd6ea27af01ce10c71b87c5cbfc resourceVersion: "14113" uid: 03042aae-f0de-4b07-80e6-e2b829c2f5da - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c7965aa7086045a59bdb113a1fb8a19d7ccf7af4133e59af8ecefd39cda8e0b1 size: 78964242 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a47b24a64cfa083053cbc2215f10f9a84ef11dd992d4c2755f3f91e8ab9a38f size: 68231466 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:71ab0d8c6a4c41db5c7a2cde1f284080513b081bf45af5891eb8dd5a8cc4c373 size: 12192264 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3b84f6150455cc1a21301aa835b6c307f678241265c88e36cca2517be9a87a94 size: 61654235 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:344f7237a2476a01eacbac1d548100b6222edea8b86f3e38ca1dc6011103d82d size: 198980913 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: OpenShift Developer Services Config: Entrypoint: - /usr/bin/go-init - -main - /usr/local/bin/run-jnlp-client Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - __doozer=merge - BUILD_RELEASE=202509140700.p0.gd192e90.assembly.stream.el8 - BUILD_VERSION=v4.13.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=13 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.13.0-202509140700.p0.gd192e90.assembly.stream.el8-d192e90 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.13 - __doozer_key=openshift-enterprise-cli - __doozer_version=v4.13.0 - OS_GIT_COMMIT=d192e90 - SOURCE_DATE_EPOCH=1712056277 - SOURCE_GIT_COMMIT=d192e901ece237d9ae1580d73e78f423ec2ef322 - SOURCE_GIT_TAG=openshift-clients-4.13.0-202304190216-95-gd192e901e - SOURCE_GIT_URL=https://github.com/openshift/oc - ART_BUILD_ENGINE=brew - ART_BUILD_DEPS_METHOD=cachito - ART_BUILD_NETWORK=internal-only - HOME=/home/jenkins - LANG=en_US.UTF-8 - LC_ALL=en_US.UTF-8 Labels: License: GPLv2+ architecture: x86_64 build-date: 2025-09-22T10:03:18 com.redhat.component: jenkins-agent-base-rhel8-container com.redhat.license_terms: https://www.redhat.com/agreements description: The jenkins agent base image is intended to be built on top of, to add your own tools that your jenkins job needs. The agent base image includes all the jenkins logic to operate as a agent, so users just have to yum install any additional packages their specific jenkins job will need distribution-scope: public io.buildah.version: 1.33.12 io.k8s.description: The jenkins agent base image is intended to be built on top of, to add your own tools that your jenkins job needs. The agent base image includes all the jenkins logic to operate as a agent, so users just have to yum install any additional packages their specific jenkins job will need io.k8s.display-name: Jenkins Agent Base io.openshift.build.commit.id: cfb3a904331d93327ba0eb7145b78ac0b24f4675 io.openshift.build.commit.url: https://github.com/openshift/jenkins/commit/cfb3a904331d93327ba0eb7145b78ac0b24f4675 io.openshift.build.source-location: https://github.com/openshift/jenkins io.openshift.expose-services: "" io.openshift.maintainer.component: Jenkins io.openshift.maintainer.product: OpenShift Container Platform io.openshift.maintainer.project: OCPBUGS io.openshift.tags: openshift,jenkins,agent maintainer: openshift-dev-services+jenkins@redhat.com name: openshift/jenkins-agent-base release: "1758535340" summary: Provides the latest release of Red Hat Universal Base Image 8. url: https://access.redhat.com/containers/#/registry.access.redhat.com/openshift/jenkins-agent-base/images/v4.13.0-1758535340 vcs-ref: 2d2a4f0dd7c20792a4308ed677a2e415846c1091 vcs-type: git vendor: Red Hat, Inc. version: v4.13.0 User: root ContainerConfig: {} Created: "2025-09-22T10:08:52Z" Id: sha256:8d2f1d46a14c4db10c03f38ddee5649dfc8eb652a20d0816f94dc5c315b77db8 Size: 420056604 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ocp-tools-4/jenkins-agent-base-rhel8@sha256:52f9b4df3f7833876ee502a6bff2539491db07e060b213b6a0a8fda0c4a881c1 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:52f9b4df3f7833876ee502a6bff2539491db07e060b213b6a0a8fda0c4a881c1 resourceVersion: "13674" uid: 23b166ba-7a04-42b8-ab48-7e0032844987 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9281c141a1bfec06e291d2ad29bfdedfd10a99d583fc0f48d3c26723ebe0761 size: 75827357 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:31114e120ca0c7dc51e01721c5a689a614edb6c86de11301d503c72be1540c79 size: 1325 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:733e0a770c8de64c133a2b22ac6258c124dcaab4c6efadbb1db3f5612206533b size: 436562513 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:333560b12f367c723c14d593bc2fe88f5b982bb4faee46e3108cea7a146cd741 size: 332935784 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - DEFAULT_ADMIN_USERNAME=eapadmin - HOME=/home/jboss - HTTPS_ENABLE_HTTP2=true - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_EAP_VERSION=7.2.1.GA - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.0 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=eap - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - PRODUCT_VERSION=7.2.1.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - WILDFLY_CAMEL_VERSION=5.3.0.fuse-740022-redhat-00002 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: c16126310035 Image: f5cd6e5c804e4cb7375b71e518aa9db68198247c7882137f1b9354c495c590d9 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T11:56:11.942792 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.1.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.1.GA release: "5.1567588144" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.4-5.1567588144 vcs-ref: fa998a5261ed25ce1445504d742098c8caf3bf10 vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - DEFAULT_ADMIN_USERNAME=eapadmin - HOME=/home/jboss - HTTPS_ENABLE_HTTP2=true - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_EAP_VERSION=7.2.1.GA - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.0 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=eap - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - PRODUCT_VERSION=7.2.1.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - WILDFLY_CAMEL_VERSION=5.3.0.fuse-740022-redhat-00002 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: c16126310035 Image: sha256:08d78aaef734f47c91cb4bf8d424275566304a5769258da5c4ae88fb0f886a1e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T11:56:11.942792 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.1.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.1.GA release: "5.1567588144" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.4-5.1567588144 vcs-ref: fa998a5261ed25ce1445504d742098c8caf3bf10 vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss Created: "2019-09-04T11:58:53Z" DockerVersion: 1.13.1 Id: sha256:eadfb5a143ec4bd3de7f37e16d2603a590eaccde0b5aadca389811a958c2ab5b Size: 845335492 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:5443b2d3e19c8f540cbe133113a7a4479f3ad98caa1a2a5e6ac48acbe4914b39 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:5443b2d3e19c8f540cbe133113a7a4479f3ad98caa1a2a5e6ac48acbe4914b39 resourceVersion: "14034" uid: ae14503f-4686-46f2-8383-c3e4ee1556d7 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2920d84eafa0cf94806ab58f0a2124f7b2d35bcbb06fc89a9106dcc28efe397a size: 39653524 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bb4ec6fa4af1ca6bd41e3cd542ef1eb3cf64d0a2d7dc5663106de7eb5e2c4b4a size: 25436791 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Env: - container=oci - PYTHON_VERSION=3.12 - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi9-python-312 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el9 - PATH=/opt/app-root/bin:/opt/app-root/src/bin:/opt/app-root/src/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - BASH_ENV=/opt/app-root/etc/scl_enable - ENV=/opt/app-root/etc/scl_enable - PROMPT_COMMAND=. /opt/app-root/etc/scl_enable - SUMMARY=Minimal platform for building and running Python 3.12 applications - DESCRIPTION=Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-15T09:56:13Z" com.redhat.component: python-312-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi9-python-312-minimal io.k8s.description: Python 3.12 available as container is a base platform for building and running various Python 3.12 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.12 io.openshift.expose-services: 8080:http io.openshift.tags: builder,python,python312,python-312,rh-python312 maintainer: SoftwareCollections.org name: ubi9/python-312-minimal org.opencontainers.image.revision: 3b0ddde7d89fee50cf4760c3de6d4f53add14f48 release: "1760522101" summary: Minimal platform for building and running Python 3.12 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.12-minimal/test/setup-test-app/ ubi9/python-312-minimal python-sample-app vcs-ref: 3b0ddde7d89fee50cf4760c3de6d4f53add14f48 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-15T09:56:19Z" Id: sha256:7c51211cb73e7d77a0a72a56e48f2ea637bfb5beb68f83b39f0ee9d64659268b Size: 65103547 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/python-312-minimal@sha256:547a068fddf44318e62b26caa267375dc77acc10ae5dcdd0869a1d60f8b93d5d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:547a068fddf44318e62b26caa267375dc77acc10ae5dcdd0869a1d60f8b93d5d resourceVersion: "14128" uid: 830d4c8c-c877-4bec-9091-d9cd057b55c7 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a6577091999bb0bb54af7b808b41c58aa9a79a61f12310b734f235b548159d75 size: 39651444 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a12747800228fe5e4dadcb597f69381b522d98a33585b5bb636e10b3f0355e9f size: 540283081 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JBOSS_HOME=/opt/eap - HOME=/home/jboss - LD_PRELOAD=libnss_wrapper.so - MAVEN_OPTS=-Duser.home= - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - CONFIG_ADJUSTMENT_MODE=xml_cli - DELETE_BUILD_ARTIFACTS=true - EAP_FULL_GROUPID=org.jboss.eap - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - JBOSS_EAP_VERSION=7.4.6 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - JBOSS_PRODUCT=sso - JOLOKIA_VERSION=1.7.1 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.6 - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - OFFLINER_VERSION=1.6 - PRODUCT_VERSION=7.5.3.GA - S2I_COPY_SERVER=true - S2I_FP_VERSION=23.0.0.Final - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - SSO_FORCE_LEGACY_SECURITY=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - WILDFLY_VERSION=7.4.6.GA-redhat-00002 - JBOSS_IMAGE_NAME=rh-sso-7/sso75-openshift-rhel8 - JBOSS_IMAGE_VERSION=7.5 - JBOSS_SSO_VERSION=7.5.3.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2022-10-28T16:22:34 com.redhat.component: redhat-sso-7-sso75-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat Single Sign-On 7.5 on OpenJDK OpenShift container image, based on the Red Hat Universal Base Image 8 Minimal container image distribution-scope: public io.buildah.version: 1.26.2 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for running Red Hat SSO io.k8s.display-name: Red Hat SSO 7.5 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: sso,sso75,keycloak maintainer: Red Hat, Inc. name: rh-sso-7/sso75-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: sso org.jboss.product.eap.version: 7.4.6 org.jboss.product.openjdk.version: "11" org.jboss.product.sso.version: 7.5.3.GA org.jboss.product.version: 7.5.3.GA release: "35" summary: Red Hat Single Sign-On 7.5 on OpenJDK OpenShift container image, based on the Red Hat Universal Base Image 8 Minimal container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/rh-sso-7/sso75-openshift-rhel8/images/7.5-35 vcs-ref: 841bdb7ce652ae6cd6faa6e8a1a379b3f9e6aa32 vcs-type: git vendor: Red Hat, Inc. version: "7.5" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2022-10-28T16:35:01Z" Id: sha256:89016e63254832f6f151dc964d0eb52dfbd4b33d4a033cc97475231cfcff3604 Size: 579966123 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:5522021e9081fa0f0163f75afedb9efaaad25c2a1dde6ce0fab3142ddcc7dd60 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:5522021e9081fa0f0163f75afedb9efaaad25c2a1dde6ce0fab3142ddcc7dd60 resourceVersion: "14063" uid: 14bfd18e-6c24-48ac-81ca-1884af8b352b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c43687042a41aad69fc526985ef2b82012c011db7e0e26faba4fc860ad32d88e size: 75837780 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b7b014ba1b80abb29391141385bd32668571313647317d1d64d8b5cebb1f228 size: 1331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2825299765472d0b62c1ed19ebb564a8a191b88ce49639471a274d03e7f9151e size: 3910026 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8df146e29e789eb3e8bec37172cca00cda60cf40f6924dda00379b283e2ce6db size: 85123374 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ace684c254cd984313cffa37b07e1ef036d8620ecf0c845567b758ffb58214db size: 22761222 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.4 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: a18474b8598a16e4515539f2ad4d40406409de5a319c983d2a9ee84509bdef23 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T11:27:17.557941 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1567588136" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.4-14.1567588136 vcs-ref: ea10f76c372c8fc587f741263211a3612179c660 vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.4 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: sha256:5e742973cb785b1573db78aa7928f46cdfacb4efa8f39ea4da2c78d68150e73a Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T11:27:17.557941 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1567588136" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.4-14.1567588136 vcs-ref: ea10f76c372c8fc587f741263211a3612179c660 vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss Created: "2019-09-04T11:29:40Z" DockerVersion: 1.13.1 Id: sha256:ad4d6e5244e83de4ddfb147e5bf12e47ad08f7a4a77e878141eafed93b3b4b32 Size: 187640400 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:5555a6031cbb8eb09cfeb73cacaabefd5a5824637b047af69b981bc66bdd8b3c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:5555a6031cbb8eb09cfeb73cacaabefd5a5824637b047af69b981bc66bdd8b3c resourceVersion: "14049" uid: d28d4c4e-f905-48bd-a8e7-16c2d7a9f256 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:79cf9a52657973381735680ff3e176fd5bc82924252f003178f258d16506319f size: 115252902 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=21 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-21-runtime - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-17T20:02:32 com.redhat.component: openjdk-21-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 21 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-21-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "21" org.jboss.product.version: "21" org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705519635" summary: Image for Red Hat OpenShift providing OpenJDK 21 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-21-runtime/images/1.18-2.1705519635 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: a6fac39569ba28195a4fbf27c1df65148afee408 vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-17T20:08:14Z" Id: sha256:196ba0ddcfa8f047fa7275966236356c2cf75d54b3ede1697f61a6ca3b5b533c Size: 154579690 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-21-runtime@sha256:55dc61c31ea50a8f7a45e993a9b3220097974948b5cd1ab3f317e7702e8cb6fc kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:55dc61c31ea50a8f7a45e993a9b3220097974948b5cd1ab3f317e7702e8cb6fc resourceVersion: "14123" uid: 3e7ee87f-253d-4093-bc3d-9fe0bad95d0d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a318ad6eeafd4f939586b6d0ca41adb994bcd581c9e548681ee3fc6071b1ba1b size: 39707507 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:04d49ef24fbfa7ac7d8ff5bfdf2b15330de0988960e0c494c0d08a13e8af7e8b size: 577875855 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JBOSS_HOME=/opt/eap - HOME=/home/jboss - LD_PRELOAD=libnss_wrapper.so - MAVEN_OPTS=-Duser.home= - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - CONFIG_ADJUSTMENT_MODE=xml_cli - DELETE_BUILD_ARTIFACTS=true - EAP_FULL_GROUPID=org.jboss.eap - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - GALLEON_VERSION=4.2.9.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - HTTPS_ENABLE_HTTP2=true - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - JBOSS_EAP_VERSION=7.4.23 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - JBOSS_PRODUCT=sso - JOLOKIA_VERSION=1.7.1 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.6 - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - OFFLINER_VERSION=1.6 - PRODUCT_VERSION=7.6.12.GA - S2I_COPY_SERVER=true - S2I_FP_VERSION=23.0.0.Final - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - SSO_FORCE_LEGACY_SECURITY=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - WILDFLY_VERSION=7.4.23.GA-redhat-00002 - JBOSS_IMAGE_NAME=rh-sso-7/sso76-openshift-rhel8 - JBOSS_IMAGE_VERSION=7.6 - JBOSS_SSO_VERSION=7.6.12.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-09-23T16:53:13 com.redhat.component: redhat-sso-7-sso76-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat Single Sign-On 7.6 on OpenJDK OpenShift container image, based on the Red Hat Universal Base Image 8 Minimal container image distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.10.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for running Red Hat SSO io.k8s.display-name: Red Hat SSO 7.6 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: sso,sso76,keycloak maintainer: Red Hat, Inc. name: rh-sso-7/sso76-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: sso org.jboss.product.eap.version: 7.4.23 org.jboss.product.openjdk.version: "11.0" org.jboss.product.sso.version: 7.6.12.GA org.jboss.product.version: 7.6.12.GA release: "71" summary: Red Hat Single Sign-On 7.6 on OpenJDK OpenShift container image, based on the Red Hat Universal Base Image 8 Minimal container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/rh-sso-7/sso76-openshift-rhel8/images/7.6-71 vcs-ref: a91ff864f43f537dcd775df2e17dfa60b463fa96 vcs-type: git vendor: Red Hat, Inc. version: "7.6" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-09-23T17:17:04Z" Id: sha256:9ac7f79e942bc8bc3a75bde325ddbbcfadf868774495fe95f235225777aad101 Size: 617614450 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rh-sso-7/sso76-openshift-rhel8@sha256:561ade81cb6455e0de35573d7ca68ac3f043c86385aac9e274dd53a7a75d3c16 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:561ade81cb6455e0de35573d7ca68ac3f043c86385aac9e274dd53a7a75d3c16 resourceVersion: "14067" uid: a350e0de-fad8-40db-a67d-021be325b2d9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50be7bb6ce3ddb41606e1956ba5c61072699ac536980f260a0db6dc59c8013fe size: 39575081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2df43de46d33e2fb030fde7397d323fd018dcc6279e8821ff18a51738eb71cd7 size: 81897201 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.23 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-11-25T10:28:24 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.13.0.dev0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "3.1764066421" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.23-3.1764066421 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: 1525ac5b44f35db161f3d3efa207ccd05b700efa vcs-type: git vendor: Red Hat, Inc. version: "1.23" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-11-25T10:40:22Z" Id: sha256:7f52c54192ccabfbf2e7e4d19561a61e22387830ae44dce3c2ad1ffdf837935c Size: 121492188 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/openjdk-8-runtime@sha256:57ab1f0ad24e02143978fc79c5219a02c4d6a5a27225ee5454c85a47839b6ddc kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-12-08T17:53:29Z" name: sha256:57ab1f0ad24e02143978fc79c5219a02c4d6a5a27225ee5454c85a47839b6ddc resourceVersion: "40381" uid: 9f33c55f-7d1e-41c7-9f15-eb8bf3482392 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0eeb656bc1e64b6c3ba63f2fa9450feaef3c60159d48eb2171ad1f25f5e655d9 size: 3805266 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:77187f02cbeff1e8de9cdd8e850f300e7267ddf19991dcbc588a498c14df3ff0 size: 70351735 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:863fb864126eaa9a42cc66096894ac7c91c5c82c467d86e149894bf874155679 size: 15177175 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.0 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: 3320f15730e3d95d98b084a00d040f8e97053ec1aa0858da207a0f4b332871b8 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T18:00:39.090809 com.redhat.build-host: osbs-cpt-004.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-java-openshift org.concrt.version: 2.0.0 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "11.1533127955" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.0-11.1533127955 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 93a32d6c79061022f016ecba4d6766fb0c10f876 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.0 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: sha256:297cabf44d7b083ba0895af2398a0e0204de7212a1e4cff8835c5c348ed7520c Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T18:00:39.090809 com.redhat.build-host: osbs-cpt-004.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-java-openshift org.concrt.version: 2.0.0 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "11.1533127955" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.0-11.1533127955 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 93a32d6c79061022f016ecba4d6766fb0c10f876 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss Created: "2018-08-01T18:02:34Z" DockerVersion: 1.12.6 Id: sha256:f01a081e1d1c2fd6682dab80abcf8a08ad66206d7d58a9033f3d53c51d95d636 Size: 164265393 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:57fa0cb158aa31193908df27fc707afcfdd4bdaf93b3286f5602d5f804e1927f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:57fa0cb158aa31193908df27fc707afcfdd4bdaf93b3286f5602d5f804e1927f resourceVersion: "14028" uid: c3c45f2c-8c8f-43bc-9c10-9f9b52dcf396 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6b42020daf7b58dee6d307d4868ab617031169687edc44eed7641b881391e6b6 size: 76864770 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.15 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-25T05:06:59 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1682399166" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.15-1.1682399166 vcs-ref: 490ce28b7e2fbd1f744e85d04aadcc31d252440e vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-25T05:16:48Z" Id: sha256:8b45a4a7e12236549e0e21ab9913daea195db2f5426b8e2898655db015957b9c Size: 116151330 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:59b88fb0c467ca43bf3c1af6bfd8777577638dd8079f995cdb20b6f4e20ce0b6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:59b88fb0c467ca43bf3c1af6bfd8777577638dd8079f995cdb20b6f4e20ce0b6 resourceVersion: "14223" uid: 4b41ce42-01e5-4c1e-a5e7-e496e22fa4de - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c43687042a41aad69fc526985ef2b82012c011db7e0e26faba4fc860ad32d88e size: 75837780 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b7b014ba1b80abb29391141385bd32668571313647317d1d64d8b5cebb1f228 size: 1331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:81b4c0b347a47353299773adfb6985fc16c599160cb67f748d2cd1196a3da72c size: 435771809 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bb83fc4990dbb7c52c1ae523f75fc498e1a353edd0a9f26adb59ec0fe198c823 size: 323115251 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - DEFAULT_ADMIN_USERNAME=eapadmin - HOME=/home/jboss - HTTPS_ENABLE_HTTP2=true - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_EAP_VERSION=7.2.1.GA - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.0 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=eap - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - PRODUCT_VERSION=7.2.1.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - WILDFLY_CAMEL_VERSION=5.3.0.fuse-731003-redhat-00002 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 8ea581222c6a Image: 4ac3e6c8c275deed8e72190fc5aad1fa217c585d9c895d304f98b67c688d37b3 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T17:23:13.179952 com.redhat.build-host: cpt-1004.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse name: fuse7/fuse-eap-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.1.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.1.GA release: "10.1561731101" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.3-10.1561731101 vcs-ref: 25f87039bfbc4b14263a1663da9e7f41f1eb13fd vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - DEFAULT_ADMIN_USERNAME=eapadmin - HOME=/home/jboss - HTTPS_ENABLE_HTTP2=true - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_EAP_VERSION=7.2.1.GA - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.0 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=eap - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - PRODUCT_VERSION=7.2.1.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - WILDFLY_CAMEL_VERSION=5.3.0.fuse-731003-redhat-00002 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 8ea581222c6a Image: sha256:5945902096c4dc3ab71ca6faf376b478cb5c2afbb8f71fc551044b083292b3e0 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T17:23:13.179952 com.redhat.build-host: cpt-1004.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse name: fuse7/fuse-eap-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.1.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.1.GA release: "10.1561731101" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.3-10.1561731101 vcs-ref: 25f87039bfbc4b14263a1663da9e7f41f1eb13fd vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2019-06-28T17:25:09Z" DockerVersion: 1.13.1 Id: sha256:28c2475c2961874ea89fb3681e4afd8a2271f55297255ff6ce2fd10ceeedc112 Size: 834734581 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:5bb11da5abfe6a1bc937f0439f08fa27efc96c438106b6a545be62672a39fc26 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:5bb11da5abfe6a1bc937f0439f08fa27efc96c438106b6a545be62672a39fc26 resourceVersion: "14032" uid: 8d55fdb6-f208-48c2-ab13-a0de64de975a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:90a82be6f2a43e7a2a943502b2f22b6bbca066d9427526f93f733c2a492596ed size: 77837369 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8bd6f3302451da24fd3b0df386f34bc127935a1e20aa71c61819bc742f2e1336 size: 17705582 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:682ed892da8c2b5be3d68dc4a07c0019f6a8d414e80f7847c561405c5fd1ff20 size: 98454985 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - MYSQL_VERSION=10.3 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MariaDB 10.3 SQL database server - DESCRIPTION=MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/usr ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-14T06:12:29Z" com.redhat.component: mariadb-103-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. io.k8s.display-name: MariaDB 10.3 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mariadb,mariadb103,mariadb-103 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/mariadb-103 org.opencontainers.image.revision: 985d3420c59d9f45e30b2a3743794fb2047f5e92 release: "1760422313" summary: MariaDB 10.3 SQL database server url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel8/mariadb-103 vcs-ref: 985d3420c59d9f45e30b2a3743794fb2047f5e92 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-14T06:12:47Z" Id: sha256:e8b408fb20efc01a03a2ce947a68ba0994abc106571e326fc86fe7095e9625ed Size: 194013667 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/mariadb-103@sha256:5c16087bf81b2285e8815970b44385f7c98612fd6eb1af23b6d89db86004efa3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:5c16087bf81b2285e8815970b44385f7c98612fd6eb1af23b6d89db86004efa3 resourceVersion: "13839" uid: 06f91639-e590-4d7c-9a04-c127a004a83f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b954cb90d4571440ed86627198be2d74d7c3d264fe72e0af0f35f40f0da99ea8 size: 75745362 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510212154.p2.g7f1d6f8.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510212154.p2.g7f1d6f8.assembly.stream.el9-7f1d6f8 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=openshift-enterprise-cli - __doozer_uuid_tag=ose-cli-rhel9-v4.20.0-20251021.223340 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=7f1d6f8 - SOURCE_DATE_EPOCH=1761075552 - SOURCE_GIT_COMMIT=7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 - SOURCE_GIT_TAG=openshift-clients-4.12.0-202208031327-1168-g7f1d6f88c - SOURCE_GIT_URL=https://github.com/openshift/oc Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T22:51:33Z" com.redhat.component: openshift-enterprise-cli-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: OpenShift is a platform for developing, building, and deploying containerized applications. io.k8s.display-name: OpenShift Client io.openshift.build.commit.id: 7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 io.openshift.build.commit.url: https://github.com/openshift/oc/commit/7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 io.openshift.build.source-location: https://github.com/openshift/oc io.openshift.build.versions: kubectl=1.33.3 io.openshift.expose-services: "" io.openshift.maintainer.component: oc io.openshift.maintainer.project: OCPBUGS io.openshift.tags: openshift,cli maintainer: Red Hat, Inc. name: openshift/ose-cli-rhel9 org.opencontainers.image.revision: 8374844a9ef7a43b422ac806f4844ef29b0bae98 release: 202510212154.p2.g7f1d6f8.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: 8374844a9ef7a43b422ac806f4844ef29b0bae98 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T22:53:05Z" Id: sha256:d200844501bef80d82a3a592b66ff17cee232d887c785e2e4530312e8fe3be0a Size: 214181073 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e resourceVersion: "13294" uid: 55a5d521-6f4b-43f5-bf81-5da11eade304 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:de1a4526c3d6cdf0e04f24b1888f5ef31425209f1c26e5a6ae7694cdad6e8688 size: 78973191 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:68e455cb0aea90f85be197ceadef7a56ca5a4d7bf6761a3a58b0ab36a65f770e size: 74468695 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - '"./${DOTNET_DEFAULT_CMD}"' Entrypoint: - container-entrypoint Env: - container=oci - HOME=/opt/app-root - PATH=/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_CORE_VERSION=6.0 - DOTNET_FRAMEWORK=net6.0 - DOTNET_RUNNING_IN_CONTAINER=true - DOTNET_SSL_CERT_DIR=/opt/app-root/ssl_dir - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - CONTAINER_SCRIPTS_PATH=/opt/app-root ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: 2024-11-13T22:14:36 com.redhat.component: dotnet-60-runtime-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Platform for running .NET 6 applications distribution-scope: public io.buildah.version: 1.33.8 io.k8s.description: Platform for running .NET 6 applications io.k8s.display-name: .NET 6 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: runtime,.net,dotnet,dotnetcore,dotnet60-runtime io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-60-runtime release: "56" summary: Provides the latest release of Red Hat Universal Base Image 8. url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/dotnet-60-runtime/images/6.0-56 vcs-ref: 052346f7c37602fffdeac6e8baa9163154cc2f35 vcs-type: git vendor: Red Hat, Inc. version: "6.0" User: "1001" WorkingDir: /opt/app-root/app ContainerConfig: {} Created: "2024-11-13T22:17:33Z" Id: sha256:617cbf0cc01d4c323d6ae1f01a1c706aa077d9a3e992e7a729d973ab5da75fdd Size: 153454771 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/dotnet-60-runtime@sha256:5dfcc5b000a1fab4be66bbd43e4db44b61176e2bcba9c24f6fe887dea9b7fd49 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:5dfcc5b000a1fab4be66bbd43e4db44b61176e2bcba9c24f6fe887dea9b7fd49 resourceVersion: "13454" uid: 635f30cb-762a-4a51-8cd8-3bab12f6564e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3019826b26b93fdb39b6e29614bc6b4d1ab879c596261851db4ff70706fa6c55 size: 183535774 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d93f73221d8935bd85cec6a9afac79e6b0af9c30fcfb56cd340fe1774720bb23 size: 319801066 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=open - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510212154.p2.g26d0917.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510212154.p2.g26d0917.assembly.stream.el9-26d0917 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=ose-network-tools - __doozer_uuid_tag=network-tools-rhel9-v4.20.0-20251021.223340 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=26d0917 - SOURCE_DATE_EPOCH=1756190912 - SOURCE_GIT_COMMIT=26d09174cbd92386469e777e3bf49bfa95d035d5 - SOURCE_GIT_TAG=26d09174 - SOURCE_GIT_URL=https://github.com/openshift/network-tools Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T23:43:54Z" com.redhat.component: ose-network-tools-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Empty io.k8s.display-name: Empty io.openshift.build.commit.id: 26d09174cbd92386469e777e3bf49bfa95d035d5 io.openshift.build.commit.url: https://github.com/openshift/network-tools/commit/26d09174cbd92386469e777e3bf49bfa95d035d5 io.openshift.build.source-location: https://github.com/openshift/network-tools io.openshift.build.versions: kubectl=1.33.3 io.openshift.expose-services: "" io.openshift.maintainer.component: Networking / network-tools io.openshift.maintainer.project: OCPBUGS io.openshift.tags: Empty maintainer: Red Hat, Inc. name: openshift/network-tools-rhel9 org.opencontainers.image.revision: 360618f7e1f1a5ed03b403962ae30851067beb72 release: 202510212154.p2.g26d0917.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: 360618f7e1f1a5ed03b403962ae30851067beb72 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 User: "0" ContainerConfig: {} Created: "2025-10-21T23:46:38Z" Id: sha256:889090cd25ede277ac9e464ae06bc072929108423054c02ff986650c20008cca Size: 641778969 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5e4e0fd08883744f35560eac43b8120f6324d9b488eb7a7716955fb98ddbace5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:5e4e0fd08883744f35560eac43b8120f6324d9b488eb7a7716955fb98ddbace5 resourceVersion: "13797" uid: 6f863fe7-2ea9-4b38-acac-0633054ada6b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea092d7970b26c24007a670fc6d0810dbf9531dc0d3a9d6ea514134ba5686724 size: 7541063 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4804e0fbd1e621e9faf332a12d9b81e87fba75f564744cad8c51ce03630375bc size: 62136250 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - POSTGRESQL_VERSION=13 - POSTGRESQL_PREV_VERSION=12 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS=rh-postgresql13 - BASH_ENV=/usr/share/container-scripts/postgresql/scl_enable - ENV=/usr/share/container-scripts/postgresql/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/postgresql/scl_enable ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-30T09:38:47 com.redhat.component: rh-postgresql13-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 13 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql13,rh-postgresql13 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/postgresql-13-rhel7 release: "112" summary: PostgreSQL is an advanced Object-Relational database management system url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/postgresql-13-rhel7/images/1-112 usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhscl/postgresql-13-rhel7 vcs-ref: 650a3080cdadfaf127db12676d7367028832a281 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-05-30T09:43:55Z" Id: sha256:d4bb2842be9d4c229a7b92c310850541e9c521e58968d69364012dd9eb361a05 Size: 149702202 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/postgresql-13-rhel7@sha256:5f474ef095d7b7aabc5b1c60818201aca66343856fb67eb93751f3b4a82d391b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:5f474ef095d7b7aabc5b1c60818201aca66343856fb67eb93751f3b4a82d391b resourceVersion: "14110" uid: 0ae99d61-0321-4a7e-a2d5-22217f3ca632 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9281c141a1bfec06e291d2ad29bfdedfd10a99d583fc0f48d3c26723ebe0761 size: 75827357 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:31114e120ca0c7dc51e01721c5a689a614edb6c86de11301d503c72be1540c79 size: 1325 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:64d1ef3b7da93a80b0f0dbb170bd0dae897197330eeca5d4b28b32406ce05bf5 size: 113496170 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HOME=/home/jboss - JAVA_DATA_DIR=/deployments/data - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.0 - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: c16126310035 Image: 0cb8f43f3c0ee74902bca4db6b407206e5e33ab673b54ce6b6a02e87931cd1d8 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T10:14:06.277221 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: 8080/tcp:webcache,8443/tcp:pcsync-https io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java name: openjdk/openjdk-11-rhel7 org.concrt.version: 2.2.1 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: "11.0" release: "16.1567588131" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.0-16.1567588131 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 1adfa44bec655763e1d25adea89281cc970ac1b2 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HOME=/home/jboss - JAVA_DATA_DIR=/deployments/data - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_HAWKULAR_MODULE=/opt/jboss/container/hawkular - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.0 - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: c16126310035 Image: sha256:29789ea7e724a2cbf6d875cdd47f0ba1965698730e597d32b5d08d4a662b8c08 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T10:14:06.277221 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: 8080/tcp:webcache,8443/tcp:pcsync-https io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java name: openjdk/openjdk-11-rhel7 org.concrt.version: 2.2.1 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: "11.0" release: "16.1567588131" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.0-16.1567588131 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 1adfa44bec655763e1d25adea89281cc970ac1b2 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss Created: "2019-09-04T10:19:20Z" DockerVersion: 1.13.1 Id: sha256:781deed5dc9a9b694bf99bc690876b040c3cd3a21cbbf91dc8d4e334774c8e58 Size: 189332535 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:5fb3543c0d42146f0506c1ea4d09575131da6a2f27885729b7cfce13a0fa90e3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:5fb3543c0d42146f0506c1ea4d09575131da6a2f27885729b7cfce13a0fa90e3 resourceVersion: "14097" uid: 3b4a180b-d365-4db2-8ac1-d5ec58452ee0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5329d7039f252afc1c5d69521ef7e674f71c36b50db99b369cbb52aa9e0a6782 size: 39330100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a39b89b913c41f6eef52956f4f2a7454ea948eb923171e858702e4eb01636fe2 size: 75754606 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.16 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-19T16:06:41 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "2" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.16-2 vcs-ref: 5c6db76f65c8629dda7e9b07fe613296f7db4ae0 vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-07-19T16:18:04Z" Id: sha256:d983d5dc836b5f862aa15132c161d29daa4fc81c170c35d054a505911678e37b Size: 115103153 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:603d10af5e3476add5b5726fdef893033869ae89824ee43949a46c9f004ef65d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:603d10af5e3476add5b5726fdef893033869ae89824ee43949a46c9f004ef65d resourceVersion: "14224" uid: 6d7ab759-df27-48ff-87a7-702d6adec1de - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a017d456fb3a760722ba4895579d8a412aec74e61d6805b04df6527b70fce6b size: 80807726 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:80a2bcb42ca25702f1e5b3b71dd25c6882ae0a2a03bb87d0b76c579cef9806a4 size: 1607 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4cef03da710b75dc7d3790dd132e70e8e0bdf5ec986c9ea019a18a41dd2a556b size: 115626485 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.14 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2022-10-17T14:03:16 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.26.2 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "4" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.14-4 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8b58570eb3bf3ee0233b3b719f570669f44b48d8 vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2022-10-17T14:10:23Z" Id: sha256:0ad46880060555ce33df90b0701e88a92f6888e0d4d2f09bfaf6f82800c4ad1e Size: 196461538 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:61555b923dabe4ff734279ed1bdb9eb6d450c760e1cc04463cf88608ac8d1338 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:61555b923dabe4ff734279ed1bdb9eb6d450c760e1cc04463cf88608ac8d1338 resourceVersion: "14102" uid: aedba0b1-0031-4f3f-a57a-a4f36f6e5fad - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9e23b64ace00a199db21d302292b434e9d3956d79319d958ecc19603d00c946 size: 39622437 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:38b71301a1d9df24c98b5a5ee8515404f42c929003ad8b13ab83d2de7de34dec size: 1742 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:21e3f6d4169e6fb80dc9f9f7e9d5c229320a19d8cd688742ed2dc8fdb9aacaf4 size: 86648421 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: 005148a7d0b230be58796f80ebff36dc276416b308d0616f226f95e91477533e Labels: architecture: x86_64 build-date: 2022-03-28T09:43:33.279257 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "2.1648459559" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.11-2.1648459559 vcs-ref: 61c1c4ec845dbdfee56c0e6e9c9371fb43f7d2c0 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: sha256:67b9e1b4ea15320d4b79b92b6652184e99f02cc13236284b8281be687d280f2b Labels: architecture: x86_64 build-date: 2022-03-28T09:43:33.279257 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "2.1648459559" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.11-2.1648459559 vcs-ref: 61c1c4ec845dbdfee56c0e6e9c9371fb43f7d2c0 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T09:46:42Z" DockerVersion: 1.13.1 Id: sha256:baa381a6295925b5de5c23b06999adf864e8b06511271e0bb45ef0d7e6fd60f8 Size: 126277133 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:64acf3403b5c2c85f7a28f326c63f1312b568db059c66d90b34e3c59fde3a74b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:64acf3403b5c2c85f7a28f326c63f1312b568db059c66d90b34e3c59fde3a74b resourceVersion: "14188" uid: 09192df6-f4d0-4934-853e-44944f0e9dea - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:158b4527561fa6bd9dc89217fff5b1f4cce16fdc5a5aef36345db0554ba996fc size: 39501292 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a3ba00ce78fe80837f49d37f5f538d9f7dc9eb8b1627350041496a99028cdf26 size: 1751 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:96ed3211a1fdf39a2b337f485d061e7858eff93691c264d7dc88e82ca16d1a0d size: 117629593 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.3 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fa28dc65189d Image: 58729790e9a33cac990813c816c66f9894b930d910ff5a9e0d3770d68f22acf8 Labels: architecture: x86_64 build-date: 2021-07-23T17:36:41.656022 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "18" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.3-18 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: b751d29a1fc06c8bc7945e965162ca258d9a155a vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.3 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fa28dc65189d Image: sha256:7f1090033a054029fe4cd7d9ccd90cf8702821e13718db2ba4613346d7600290 Labels: architecture: x86_64 build-date: 2021-07-23T17:36:41.656022 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "18" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.3-18 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: b751d29a1fc06c8bc7945e965162ca258d9a155a vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2021-07-23T17:38:58Z" DockerVersion: 1.13.1 Id: sha256:5df8a34ef16500f3fcff12d1c388dfafacad19f5ad3fb2d827c25f8ee663060a Size: 157140080 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:663eb81388ae8f824e7920c272f6d2e2274cf6c140d61416607261cdce9d50e2 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:663eb81388ae8f824e7920c272f6d2e2274cf6c140d61416607261cdce9d50e2 resourceVersion: "14184" uid: fe7e0a19-edb4-4f7e-8550-11ca1b5ae4f3 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6208c5a2e205726f3a2cd42a392c5e4f05256850d13197a711000c4021ede87b size: 79073674 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:87b6121ef647e82c2efa8e6489d94c7668d88af38c138236592c6675acdf055a size: 18346487 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f6efb7833548fe17294f057c70215a3789c7ac86e39f698f00c4e7a895ccadf3 size: 148568415 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4186a94a1e5b175d7bc3dd68bc28daf97c822f6e56c9d8aee432af1508f245e7 size: 168253660 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=14 - NAME=golang - GO_MAJOR_VERSION=1 - GO_MINOR_VERSION=18 - GO_PATCH_VERSION=10 - CONTAINER_NAME=rhel8/go-toolset - VERSION=1.18.10 - SUMMARY=Platform for building and running Go Applications - DESCRIPTION=Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. Labels: architecture: x86_64 build-date: 2023-05-02T08:02:23 com.redhat.component: go-toolset-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. distribution-scope: public io.buildah.version: 1.27.3 io.k8s.description: Go Toolset available as a container is a base platform for building and running various Go applications and frameworks. Go is an easy to learn, powerful, statically typed language in the C/C++ tradition with garbage collection, concurrent programming support, and memory safety features. io.k8s.display-name: Go 1.18.10 io.openshift.expose-services: "" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,golang,golang118,rh-golang118,go io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: rhel8/go-toolset release: "1.1683014505" summary: Platform for building and running Go Applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel8/go-toolset/images/1.18.10-1.1683014505 vcs-ref: 0a0f3d90d55f2c2ce687a113e6b0ec4f6d5385f4 vcs-type: git vendor: Red Hat, Inc. version: 1.18.10 User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2023-05-02T08:06:40Z" Id: sha256:7deee1f302eac4ab5424e1c47cf2c33f8ae965cd09f9d9f476576eaba2171202 Size: 414263381 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/go-toolset@sha256:6740d72db4de99ecb4652cff89a239242afd150d6ccf6ed0ebff89ffcbbc649e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:6740d72db4de99ecb4652cff89a239242afd150d6ccf6ed0ebff89ffcbbc649e resourceVersion: "13447" uid: 51a3b05c-be7b-4696-ad15-1680689f6a3b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:872582724f337bcc41b829d3b854567e146ab62aa3c7de0b37e18617d38f5d08 size: 76246809 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b13ffc206103620a7d59e4f5b72279b53e317ade5d545a3daa06ab9bed270f92 size: 1409 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:600592def98ffdb45f9711ecab0f64a77461f9a274b30fc4fdaa94c1c595c4d6 size: 351208290 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.7.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.7 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.7.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: d0feb8001879 Image: 626700f3921fe594404fa4af91e1b528fc969cd693d12e85de97b25dff2abbef Labels: architecture: x86_64 build-date: 2020-11-04T17:56:28.528192 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.7.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.7.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.7.GA release: "1.1604508637" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.7-1.1604508637 vcs-ref: 96354c3fa6be20ad2282ccf66c3c9e0f116e4a31 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.7.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.7 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.7.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: d0feb8001879 Image: sha256:4659d5d8283f992a052a294cdafac078e42c8a28bd3b97020f8e4dacf085e879 Labels: architecture: x86_64 build-date: 2020-11-04T17:56:28.528192 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.7.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.7.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.7.GA release: "1.1604508637" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.7-1.1604508637 vcs-ref: 96354c3fa6be20ad2282ccf66c3c9e0f116e4a31 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss Created: "2020-11-04T18:03:48Z" DockerVersion: 1.13.1 Id: sha256:f93539c39983467734ce536a25b5e7488c2de0c0739baaafabc32330f51f02ae Size: 427464320 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:67a2ae44e1bd87166e5c70f8147ccc9064ddfc8f43170bc92db9b12568cc7f73 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:67a2ae44e1bd87166e5c70f8147ccc9064ddfc8f43170bc92db9b12568cc7f73 resourceVersion: "13906" uid: 05b083f9-217a-4112-9ae0-51187e0d1a41 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4009473c181390dadf086f42ecb4b10fb87f5e79de8d0195f6c22239985b2da0 size: 111002424 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-24T10:41:27 com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "9" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.17-9 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 7fe2971fe71fcf186dd65f26dc8d2ccacc59a7fa vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-24T10:48:08Z" Id: sha256:e043ac429b3b2377a20fccf384174860e43b30861514516c11461dd90459b337 Size: 150340465 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:67fee4b64b269f5666a1051d806635b675903ef56d07b7cc019d3d59ff1aa97c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:67fee4b64b269f5666a1051d806635b675903ef56d07b7cc019d3d59ff1aa97c resourceVersion: "14181" uid: 055ae67e-9e44-4bb7-9f37-bfeca8b3b1dd - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9e23b64ace00a199db21d302292b434e9d3956d79319d958ecc19603d00c946 size: 39622437 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:38b71301a1d9df24c98b5a5ee8515404f42c929003ad8b13ab83d2de7de34dec size: 1742 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:aec435fea4b5bc7075142be90e1a5f25f18ee0a579b426c353fa9023c57b7c42 size: 72381581 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: 7ece4ac7875a3b0fb858ee67afd96da4b03a11167a1b855ada8f9d7b947ad905 Labels: architecture: x86_64 build-date: 2022-03-28T09:54:50.779196 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1648459718" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.11-1.1648459718 vcs-ref: bd1e0ec3ad9d61f6e620f6e77bf65901cbf284c8 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: sha256:8bafa44b4af8bd05c194dc9e8cd756975bd19c786601a0575013b07e30d3618d Labels: architecture: x86_64 build-date: 2022-03-28T09:54:50.779196 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1648459718" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.11-1.1648459718 vcs-ref: bd1e0ec3ad9d61f6e620f6e77bf65901cbf284c8 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T09:56:57Z" DockerVersion: 1.13.1 Id: sha256:a8eff7d388f5bfe9a30050edd235326bd2f1832f142734ee37a4ba89988475f5 Size: 112010305 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:6a9e81b2eea2f32f2750909b6aa037c2c2e68be3bc9daf3c7a3163c9e1df379f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:6a9e81b2eea2f32f2750909b6aa037c2c2e68be3bc9daf3c7a3163c9e1df379f resourceVersion: "14219" uid: d1d21823-9515-4065-9fd7-ba877d3be5d8 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e0f71f706c2a1ff9efee4025e27d4dfd4f328190f31d16e11ef3283bc16d6842 size: 74922253 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:121ab4741000471a7e2ddc687dedb440060bf9845ca415a45e99e361706f1098 size: 1249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f3e8100df18b6435295877c8636d35e1b4dda1bec0feb4b4fb0e29524cd2a6f3 size: 3814449 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:442397635a987d4e69150861559f0a2fa5f70e76527f0b3b8455a9230fd7ebdb size: 70318532 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:05be2d56722fd9e005e3b97068207ca9e063318577c1c8dbd30fb5616495c81b size: 26047198 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.3 - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JAVA_DATA_DIR=/deployments/data - JOLOKIA_VERSION=1.5.0 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 275866c14a28 Image: 14d1397b80b912c40d9db41a956ab5c37241bb9d4c6a1a02dbb62173b373c795 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-05-25T09:36:46.776111 com.redhat.build-host: ip-10-29-120-249.ec2.internal com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Cloud Enablement Feedback name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 1.4.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "9" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.3-9 vcs-ref: 95a196009d4a0578f7322e736a14b855022025c0 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.3 - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JAVA_DATA_DIR=/deployments/data - JOLOKIA_VERSION=1.5.0 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 275866c14a28 Image: sha256:4f8141e8a5d50794ca5e21fa94ba3e97d1922230c9ffb34102d9959550293802 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-05-25T09:36:46.776111 com.redhat.build-host: ip-10-29-120-249.ec2.internal com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Cloud Enablement Feedback name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 1.4.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "9" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.3-9 vcs-ref: 95a196009d4a0578f7322e736a14b855022025c0 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2018-05-25T09:51:14Z" DockerVersion: 1.12.6 Id: sha256:3bb0d233f67591f36ffbecaef447c0733e1ce84b2e7cb69ede1767f50b38293b Size: 175109052 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:6c009f430da02bdcff618a7dcd085d7d22547263eeebfb8d6377a4cf6f58769d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:6c009f430da02bdcff618a7dcd085d7d22547263eeebfb8d6377a4cf6f58769d resourceVersion: "14168" uid: 3ffa58f9-ccc8-4cb2-8f62-8ce29833df4c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9abb5e74a5f821b8cab5d489b7a3ebe6bad4b3ee1eb3e7748583419f1ec6c43a size: 63943566 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8c8f181aaa5f9c2fdbb82bd10bfed7e2ecd90e482daf03b5a72a2df7d5e2858a size: 146125818 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - /bin/bash Env: - container=oci - HOME=/opt/app-root - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=9.0.10 - ASPNET_VERSION=9.0.10 - PATH=/opt/app-root/src/.local/bin:/opt/app-root/src/bin:/opt/app-root/.dotnet/tools:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - STI_SCRIPTS_PATH=/usr/libexec/s2i - DOTNET_GENERATE_ASPNET_CERTIFICATE=false - DOTNET_NOLOGO=true - DOTNET_SDK_VERSION=9.0.111 - DOTNET_USE_POLLING_FILE_WATCHER=true ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-23T04:19:56Z" com.redhat.component: dotnet-90-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for building and running .NET 9 applications distribution-scope: public dotnet_version: 9.0.10 io.buildah.version: 1.41.4 io.k8s.description: Platform for building and running .NET 9 applications io.k8s.display-name: .NET 9 SDK io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,.net,dotnet,dotnetcore,dotnet-90 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-90 org.opencontainers.image.revision: 99c3e369a8ad3eea3249fd405db03b23afc1b3bd release: "1761193002" sdk_version: 9.0.111 summary: .NET 9 SDK url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: 99c3e369a8ad3eea3249fd405db03b23afc1b3bd vcs-type: git vendor: Red Hat, Inc. version: "9.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-23T04:20:12Z" Id: sha256:2686d48a4222af19efa514806c5fe13f7d33d461c5e6d6889122b321227f0ddb Size: 249821433 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/dotnet-90@sha256:6cb572c7356c9d9ae6d2760491095de4b1797471ea97c50b330bc2ce1168da56 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:6cb572c7356c9d9ae6d2760491095de4b1797471ea97c50b330bc2ce1168da56 resourceVersion: "13402" uid: 62a8cd5d-bf08-4641-86b1-9e43a65f1c40 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6208c5a2e205726f3a2cd42a392c5e4f05256850d13197a711000c4021ede87b size: 79073674 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9114bbce1cf696d619180f9f7ff70c43b64cad2c61fee8484baf4937c0719df size: 103042110 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:63199b580b1b4782ed98f87da9c93405ef9adf963b642eb676dac6b4f9e69790 size: 30522080 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.2 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.2 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - HOME=/home/jboss - AB_PROMETHEUS_ENABLE=True - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jws-jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9404 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk8-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.2 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9404/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-26T09:25:21 com.redhat.component: jboss-webserver-57-openjdk8-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK8 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK8 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk8-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 5.7.2 org.jboss.product.webserver-tomcat9.version: 5.7.2 release: "8" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk8-rhel8-openshift/images/5.7.2-8 vcs-ref: 61b26d085598c85070ec2d9b38a315c0b8f991fb vcs-type: git vendor: Red Hat, Inc. version: 5.7.2 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-26T09:28:59Z" Id: sha256:ae400f247fd0a84dec56c018fb71e765bcb9f0492b8b61457133e03fcba53636 Size: 212680271 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:6d07cbaef7869b2e0d878740ad685b150f3d8ab960544c881916a01f25f9b6ef kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:6d07cbaef7869b2e0d878740ad685b150f3d8ab960544c881916a01f25f9b6ef resourceVersion: "13795" uid: 6c26885c-8c78-4c4c-b700-cbfa43605156 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:94343313ec1512ab02267e4bc3ce09eecb01fda5bf26c56e2f028ecc72e80b18 size: 79299514 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:daff7315ef7e4097ed9e5313418c762a6b7c3c33f6f8a6ce3c192659e1eb808a size: 467456758 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8a2ce3ce10b696b49ce3aab124b031a01fc32a4eb797cbf85957057bbfec603c size: 639203159 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.13 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.13 - WILDFLY_VERSION=7.4.13.GA-redhat-00001 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.9.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - S2I_FP_VERSION=23.0.0.Final - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk11-openshift-rhel8 - JBOSS_IMAGE_VERSION=7.4.13 - JOLOKIA_VERSION=1.7.2.redhat-00002 - WILDFLY_CAMEL_VERSION=5.10.0.fuse-7_12_1-00009-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T23:04:42 com.redhat.component: fuse-eap-openshift-jdk11-rhel-8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.4 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.2.redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.4 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Thomas Diesler name: fuse7/fuse-eap-openshift-jdk11-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.13 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 7.4.13 release: "23.1705611192" summary: Platform for building and running Apache Camel applications on EAP 7.4 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift-jdk11-rhel8/images/1.12-23.1705611192 vcs-ref: fb4db3d2f131e744a18bd1182cb386356aa87e41 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T23:09:14Z" Id: sha256:654f54c42824dc58d4369bb62b36219bc334a80410802bc3ac4739c751af4205 Size: 1186013410 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift-jdk11-rhel8@sha256:6d8afb6d1fced4deee8de43b935e2bf5164c81bc26bee01da0fce69b74b63f83 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:6d8afb6d1fced4deee8de43b935e2bf5164c81bc26bee01da0fce69b74b63f83 resourceVersion: "13333" uid: aa9e5690-5ac7-4e2a-a81f-47ec07fcfadc - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:de1a4526c3d6cdf0e04f24b1888f5ef31425209f1c26e5a6ae7694cdad6e8688 size: 78973191 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:68e455cb0aea90f85be197ceadef7a56ca5a4d7bf6761a3a58b0ab36a65f770e size: 74468695 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9ea21ed8be2224d8905e2b61fcfa7b7c3b28de2e57b2a57e1ed64106a67ab99 size: 166776237 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - /usr/libexec/s2i/usage Entrypoint: - container-entrypoint Env: - container=oci - HOME=/opt/app-root - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_CORE_VERSION=6.0 - DOTNET_FRAMEWORK=net6.0 - DOTNET_RUNNING_IN_CONTAINER=true - DOTNET_SSL_CERT_DIR=/opt/app-root/ssl_dir - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - CONTAINER_SCRIPTS_PATH=/opt/app-root - PATH=/opt/app-root/src/.local/bin:/opt/app-root/src/bin:/opt/app-root/node_modules/.bin:/opt/app-root/.dotnet/tools/:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - STI_SCRIPTS_PATH=/usr/libexec/s2i - DOTNET_GENERATE_ASPNET_CERTIFICATE=false - DOTNET_NOLOGO=true - DOTNET_USE_POLLING_FILE_WATCHER=true ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: 2024-11-13T23:08:15 com.redhat.component: dotnet-60-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Platform for building and running .NET 6 applications distribution-scope: public io.buildah.version: 1.33.8 io.k8s.description: Platform for building and running .NET 6 applications io.k8s.display-name: .NET 6 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,.net,dotnet,dotnetcore,dotnet-60 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-60 release: "56" summary: Provides the latest release of Red Hat Universal Base Image 8. url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/dotnet-60/images/6.0-56 vcs-ref: e084a6c82df0a7d768ee24821f4c3d6df23989e0 vcs-type: git vendor: Red Hat, Inc. version: "6.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-11-13T23:11:22Z" Id: sha256:a33af805134171f684198c9cc51cafc4e503192a39ee8cf8ef8c31fcf5fbcf87 Size: 320236550 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/dotnet-60@sha256:70a21b3f93c05843ce9d07f125b1464436caf01680bb733754a2a5df5bc3b11b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:70a21b3f93c05843ce9d07f125b1464436caf01680bb733754a2a5df5bc3b11b resourceVersion: "13393" uid: ec9c2072-4809-495e-b58b-3ba6a04f1496 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54e56e6f85721741ee7bf0336de8ad3bf138a56769a6d0097b600a0e361be58d size: 39618910 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f8ddd7f5a755f537dd9d5f553c8c78171dcf3018c5fc96676a07380d3e14e20 size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b77215bc44b3aef7ed84f29a90a5958ecd050911e76c25976762aba8350c33b7 size: 112841081 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 65ec992ef2e6 Image: 55dda0fa5938fcaf1dba5c82789815d9089c5b6f00c535de4b25d4e4f9e2e9b2 Labels: architecture: x86_64 build-date: 2022-04-29T13:50:53.410788 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1651233087" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.12-1.1651233087 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 3f38ac3e8fb9711c4f4ef88bbefcd7a2e4f23641 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 65ec992ef2e6 Image: sha256:27fa7839928d26abab22bf783e14f02e4a8e92b190cf567a166dac2fe3c7e0ea Labels: architecture: x86_64 build-date: 2022-04-29T13:50:53.410788 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1651233087" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.12-1.1651233087 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 3f38ac3e8fb9711c4f4ef88bbefcd7a2e4f23641 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-04-29T13:57:11Z" DockerVersion: 1.13.1 Id: sha256:b50aec0cad0399dde1d973e4b5d7572f5b6af9b3f8ff45952765178971535697 Size: 152469251 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:7164a06e9ba98a3ce9991bd7019512488efe30895175bb463e255f00eb9421fd kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:7164a06e9ba98a3ce9991bd7019512488efe30895175bb463e255f00eb9421fd resourceVersion: "14209" uid: fb60b5bf-a201-4453-a30d-bf8b2ae8d39b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a393588d7ce4791b5a481ad21808aca7eb0f342431a9f36b67ad14421c801862 size: 62821119 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4add4c8630229b186f76faa6d9b5e02453219ca65f300c550e54bae7eeaca9d1 size: 150291145 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - /bin/bash Env: - container=oci - HOME=/opt/app-root - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=8.0.22 - ASPNET_VERSION=8.0.22 - PATH=/opt/app-root/src/.local/bin:/opt/app-root/src/bin:/opt/app-root/.dotnet/tools:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - STI_SCRIPTS_PATH=/usr/libexec/s2i - DOTNET_GENERATE_ASPNET_CERTIFICATE=false - DOTNET_NOLOGO=true - DOTNET_SDK_VERSION=8.0.122 - DOTNET_USE_POLLING_FILE_WATCHER=true ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-12-02T13:31:27Z" com.redhat.component: dotnet-80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for building and running .NET 8 applications distribution-scope: public dotnet_version: 8.0.22 io.buildah.version: 1.41.4 io.k8s.description: Platform for building and running .NET 8 applications io.k8s.display-name: .NET 8 SDK io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,.net,dotnet,dotnetcore,dotnet-80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-80 org.opencontainers.image.created: "2025-12-02T13:31:27Z" org.opencontainers.image.revision: b1b4a6d9f69a6d37c7648ae3425bebcec9ae436b release: "1764682204" sdk_version: 8.0.122 summary: .NET 8 SDK url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: b1b4a6d9f69a6d37c7648ae3425bebcec9ae436b vcs-type: git vendor: Red Hat, Inc. version: "8.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-12-02T13:31:43Z" Id: sha256:bf642b10d531dd915ecb5053613dfcf62fb78dab5c90f8b19c06dd0dbabc0fa0 Size: 252865117 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/dotnet-80@sha256:7201e059b92acc55fe9fe1cc390d44e92f0e2af297fbe52b3f1bb56327f59624 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-12-08T17:53:27Z" name: sha256:7201e059b92acc55fe9fe1cc390d44e92f0e2af297fbe52b3f1bb56327f59624 resourceVersion: "40304" uid: bfb25c68-1a70-4062-87f8-2bf2cc0cf97b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d7c06497d5cebd39c0a4feb14981ec940b5c863e49903d320f630805b049cbff size: 39279912 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4a641477fd623eb5d13b9745c982b80afe91edd23076b1d351e94399b0d062c1 size: 115112568 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-02-07T17:20:49 com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "10.1675788279" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.14-10.1675788279 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 26c4e5bd1ffe379b617c1bc35be67a640fe496ec vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-02-07T17:33:34Z" Id: sha256:69b336536858bca7041cca9615f8e4cff18576dcad0bbfdea40760fb0034702c Size: 154420768 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:739fac452e78a21a16b66e0451b85590b9e48ec7a1ed3887fbb9ed85cf564275 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:739fac452e78a21a16b66e0451b85590b9e48ec7a1ed3887fbb9ed85cf564275 resourceVersion: "14145" uid: f3d8c053-1aff-49dd-bff0-be93acb9743f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54e56e6f85721741ee7bf0336de8ad3bf138a56769a6d0097b600a0e361be58d size: 39618910 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f8ddd7f5a755f537dd9d5f553c8c78171dcf3018c5fc96676a07380d3e14e20 size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ab02516cb47ff2ec425e12a109cf913f14e3444fc733c19d241f3e28cc7080db size: 86708756 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: d6c30419e2f08a4cc97f60318cd1cdfb9a334ff8b6e15ba8e3fcb122721fd52f Labels: architecture: x86_64 build-date: 2022-04-29T13:51:37.153929 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1651233103" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.12-1.1651233103 vcs-ref: 178c0c78042b665fdd32e3eed34c52377f6ff4a3 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: sha256:cfcfa013cb87d2a2715ac503134cef699c6178f084eb8d7c78258670e1ae426c Labels: architecture: x86_64 build-date: 2022-04-29T13:51:37.153929 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1651233103" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.12-1.1651233103 vcs-ref: 178c0c78042b665fdd32e3eed34c52377f6ff4a3 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-04-29T13:54:24Z" DockerVersion: 1.13.1 Id: sha256:943deef450894b400c5db2d98b4f1c84ffcdc3e8ed20effa3b3e99edeb8a53f3 Size: 126334433 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:74051f86b00fb102e34276f03a310c16bc57b9c2a001a56ba66359e15ee48ba6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:74051f86b00fb102e34276f03a310c16bc57b9c2a001a56ba66359e15ee48ba6 resourceVersion: "14189" uid: d819ce7d-f27c-42b8-b12e-06a5e4ea72ac - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a06cfa6e5ed77521218eaa75d023f86e156295cb20de1bda73e67b69c667962c size: 77840791 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:94130d9f48556208ec760273e324c3e5fdd13869d82c8eb8df8f97e1abe31641 size: 465790491 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.23 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.23 - WILDFLY_VERSION=7.4.23.GA-redhat-00002 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.9.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - S2I_FP_VERSION=23.0.0.Final - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk11-openshift-rhel8 - JBOSS_IMAGE_VERSION=7.4.23 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-07-28T22:21:52 com.redhat.component: jboss-eap-74-openjdk11-builder-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform 7.4 OpenShift container image. distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.12.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running JavaEE applications on JBoss EAP 7.4 io.k8s.display-name: JBoss EAP 7.4 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap74-openjdk11-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.23 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 7.4.23 release: "5.1753741010" summary: Red Hat JBoss Enterprise Application Platform 7.4 OpenShift container image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap74-openjdk11-openshift-rhel8/images/7.4.23-5.1753741010 vcs-ref: f1c3b1ac77f5103c962f07b0411ad9b14b5925b0 vcs-type: git vendor: Red Hat, Inc. version: 7.4.23 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-07-28T22:37:24Z" Id: sha256:f7f88d0ce38bf0f9c50ab92bd4fef694432cff97270032406cbbc75db4447773 Size: 543694450 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap74-openjdk11-openshift-rhel8@sha256:74cc70f8d3698d41793d19b0e23b0a79448d02ada93f402a1714d26d164e4c1d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:74cc70f8d3698d41793d19b0e23b0a79448d02ada93f402a1714d26d164e4c1d resourceVersion: "13358" uid: fde9ef5a-2c55-47e3-bba7-49b7fa5a3ed0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3146793ed37b0ecdd0f68824544cb4162400480c9a0d06ae7fc5bca6ac6a3a0d size: 98978618 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - POSTGRESQL_VERSION=13 - POSTGRESQL_PREV_VERSION=12 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS= ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:15:38Z" com.redhat.component: postgresql-13-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 13 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql13,postgresql-13 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/postgresql-13 org.opencontainers.image.revision: b12316bd67aee5e7048fa1b520505cbcf075a896 release: "1761063302" summary: PostgreSQL is an advanced Object-Relational database management system url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhel8/postgresql-13 vcs-ref: b12316bd67aee5e7048fa1b520505cbcf075a896 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T16:16:02Z" Id: sha256:2bb99a08b727d648d6ab6ade091e688ddf21681276dadff4410126185aeb21a6 Size: 194536851 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/postgresql-13@sha256:74efcab05b844a1c226bb18221a5309e7364b48d52757a809787e9b58e235ed9 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:74efcab05b844a1c226bb18221a5309e7364b48d52757a809787e9b58e235ed9 resourceVersion: "13981" uid: 2b488403-6dd0-4606-9ad2-b4f3c4903c9a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d66a972c08964d487e0b43062210ac00ce3dca3fe569ac665c4324ea6522507d size: 78753765 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:43c5ecd7e21e1320202951bf3c997c21d9536185b960a2cfe5263395e5aee621 size: 122268092 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:72e0ec2e9be9fab697250e486dda80e2b674ceabe4e0306176a289b8eb1130e4 size: 31500769 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.8 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.8 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - HOME=/home/jboss - AB_PROMETHEUS_ENABLE=True - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jws-jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9404 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk11-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.8 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9404/tcp: {} Labels: architecture: x86_64 build-date: 2024-03-05T15:41:58 com.redhat.component: jboss-webserver-57-openjdk11-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK11 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK11 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk11-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 5.7.8 org.jboss.product.webserver-tomcat9.version: 5.7.8 release: "4" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk11-rhel8-openshift/images/5.7.8-4 vcs-ref: 0a0a7d4d45f027ae0d48f69dd994aa4164eee2e0 vcs-type: git vendor: Red Hat, Inc. version: 5.7.8 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-03-05T15:47:14Z" Id: sha256:05ba63e3251af34924497f68fbfdec042537af397df7633333ceb3f6822497ab Size: 232565770 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:75fbf4aa5c14bba44b5dfbf6673dc80ce35376f626df3a102a5a2edf8141cd34 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:75fbf4aa5c14bba44b5dfbf6673dc80ce35376f626df3a102a5a2edf8141cd34 resourceVersion: "13703" uid: a14d22af-3cd6-4a35-b19b-940a3c56bfce - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d7c06497d5cebd39c0a4feb14981ec940b5c863e49903d320f630805b049cbff size: 39279912 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0840bf80f1ca1e24ec1664dbf77ee26aec2ed283560b6935dfdc1fd1d4155021 size: 88372182 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-02-07T17:35:49 com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "12.1675788327" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.14-12.1675788327 vcs-ref: 1d6d54438b510cefbe66061c3cf846f0f071658b vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-02-07T17:41:33Z" Id: sha256:b5eea4dda264f007084103bfd6966b49856638cc9c434984d3e616fa45fdbb35 Size: 127670678 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:7711108ef60ef6f0536bfa26914af2afaf6455ce6e4c4abd391e31a2d95d0178 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:7711108ef60ef6f0536bfa26914af2afaf6455ce6e4c4abd391e31a2d95d0178 resourceVersion: "14191" uid: a34d1dc9-cfc3-47ed-8f52-a8c0a704b833 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5fb1024fc39d4712fd639f83cdc55f79884635ea0dbffd5155e3e3d690e6f92c size: 18822709 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=20 - RUBY_MAJOR_VERSION=3 - RUBY_MINOR_VERSION=3 - RUBY_VERSION=3.3 - RUBY_SCL_NAME_VERSION=33 - RUBY_SCL=ruby-33 - IMAGE_NAME=ubi9/ruby-33 - SUMMARY=Platform for building and running Ruby 3.3 applications - DESCRIPTION=Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T16:11:32Z" com.redhat.component: ruby-33-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. io.k8s.display-name: Ruby 3.3 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,ruby,ruby33,ruby-33 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/ruby-33 org.opencontainers.image.revision: bfe1bee9763f056da6f889c824ccac428e9eecb7 release: "1760371851" summary: Platform for building and running Ruby 3.3 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-ruby-container.git --context-dir=3.3/test/puma-test-app/ ubi9/ruby-33 ruby-sample-app vcs-ref: bfe1bee9763f056da6f889c824ccac428e9eecb7 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T16:11:41Z" Id: sha256:352be925d76d335387ae901dbd4998706c3a84d3ede978998c8447b5501a47af Size: 331124694 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/ruby-33@sha256:77e5675e066943925eb228f51434080f10bb0be323c9c55ac62d223a0dd1b250 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:77e5675e066943925eb228f51434080f10bb0be323c9c55ac62d223a0dd1b250 resourceVersion: "14120" uid: 5c79851a-1cb5-4c26-920d-2358164f4610 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d66a972c08964d487e0b43062210ac00ce3dca3fe569ac665c4324ea6522507d size: 78753765 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5e0f5766bf222b480ac1f5f2b7f1cf48286aca5e7f3b5c54d15b8bf4b3e50abd size: 103172532 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7bac85c1acdb755667bc9c14ad5d1eae07632d73ebe3c41efebfaccaba397485 size: 30943484 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.8 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.8 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - HOME=/home/jboss - AB_PROMETHEUS_ENABLE=True - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jws-jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9404 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk8-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.8 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9404/tcp: {} Labels: architecture: x86_64 build-date: 2024-03-05T15:41:20 com.redhat.component: jboss-webserver-57-openjdk8-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.10.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK8 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK8 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk8-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 5.7.8 org.jboss.product.webserver-tomcat9.version: 5.7.8 release: "4" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk8-rhel8-openshift/images/5.7.8-4 vcs-ref: 5a9d2a110fb72b7fb28733440d129547f74a34f4 vcs-type: git vendor: Red Hat, Inc. version: 5.7.8 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-03-05T15:46:35Z" Id: sha256:1c36d88704e8e6f9cf66cfb9048f0c4d784d560ff92362e2522c1ad2cdae7930 Size: 212912911 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:789f5edf1369a40bf56ca698eafee86b74a8d53b39d100bbb91279aaebceb6d5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:789f5edf1369a40bf56ca698eafee86b74a8d53b39d100bbb91279aaebceb6d5 resourceVersion: "13801" uid: 253ef7bf-ce48-4c49-9f50-234cef50e904 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8d9c78c7f9887170d08c57ec73b21e469b4120682a2e82883217535294878c5d size: 3805344 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f4350d5126d0895bb50c2c082a415ff417578d34508a0ef07ec20cebf661ebb7 size: 70368140 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ff675fb12750095d377db23232ae2b63df8026ecf3008fcbe5c82431773e573a size: 25576914 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.5 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JAVA_DATA_DIR=/deployments/data - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: 874cb60ae76db201cf859a058c3682666664a852a1234bb6c551a151a030aa05 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T22:10:52.566133 com.redhat.build-host: cpt-0007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 2.1.2 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: 8778/tcp:uec,8080/tcp:webcache,8443/tcp:pcsync-https io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Cloud Enablement Feedback name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 2.1.2 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1539812388" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.5-14.1539812388 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 5d5b141069a0b956c0382d59a8384857da9fc950 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.5 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JAVA_DATA_DIR=/deployments/data - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: sha256:c35044a707a9fd042b68f5fd91151e355200f2a945dec8dcb029feadf696e5fc Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T22:10:52.566133 com.redhat.build-host: cpt-0007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 2.1.2 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: 8778/tcp:uec,8080/tcp:webcache,8443/tcp:pcsync-https io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Cloud Enablement Feedback name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 2.1.2 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1539812388" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.5-14.1539812388 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 5d5b141069a0b956c0382d59a8384857da9fc950 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss Created: "2018-10-17T22:12:34Z" DockerVersion: 1.13.1 Id: sha256:c1bf72469139cd567ebc2b432f68e99a592e29bc7fc688ae81e846be9a21c816 Size: 174681381 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:78af15475eac13d2ff439b33a9c3bdd39147858a824c420e8042fd5f35adce15 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:78af15475eac13d2ff439b33a9c3bdd39147858a824c420e8042fd5f35adce15 resourceVersion: "14172" uid: e9d068b1-637b-4a07-889e-2f54e1e97425 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:96965a3a84248c364364702c0fb90543e329f86044b3394f97701f25b516b9ee size: 39507581 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4d0d850cd4adc37289686142206a183ccbd4e286765ce8fc9890539bbfd38827 size: 1735 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7504d0b31a9a6b70806a2c804c147b554524faabd19ae0d3172c75879fbb3f52 size: 88388953 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.9 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: e321cccbfab0 Image: 436032b41e1c9d4a06283e12624febac8c9e4dfc8068b9abe86406401a0bfb9a Labels: architecture: x86_64 build-date: 2021-07-21T09:25:08.039193 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "3" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.9-3 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 2c06861d0e0374d2c9493eaf59b2b8e129264d9d vcs-type: git vendor: Red Hat, Inc. version: "1.9" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.9 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: e321cccbfab0 Image: sha256:c2baa0a125c0b2910a423e9850f74f32c39a6178a19aa9ee1446e8f83c4e54ed Labels: architecture: x86_64 build-date: 2021-07-21T09:25:08.039193 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "3" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.9-3 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 2c06861d0e0374d2c9493eaf59b2b8e129264d9d vcs-type: git vendor: Red Hat, Inc. version: "1.9" User: "185" WorkingDir: /home/jboss Created: "2021-07-21T09:27:21Z" DockerVersion: 1.13.1 Id: sha256:8bc1ee1eb4c673b5aa3d2beb7c0de560dfe0a3e2672b88cb61bd098f79201a6d Size: 127903000 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:78bf175cecb15524b2ef81bff8cc11acdf7c0f74c08417f0e443483912e4878a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:78bf175cecb15524b2ef81bff8cc11acdf7c0f74c08417f0e443483912e4878a resourceVersion: "14196" uid: e608e06b-96d0-4f23-96d7-962bb76c0a70 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2a99c93da16827d9a6254f86f495d2c72c62a916f9c398577577221d35d2c790 size: 39641757 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4418ace46c3dd933f98d83f357f31048e72d5db3d97bccfdb0acef769ee8234f size: 1743 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3b94dc93b5dd50b77a3571bf8c83cd098425eaf04c3a86cf65eb823be8fe1a6e size: 73021285 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: be4e58a52d40 Image: cd6a2b0b52646e801ccf83a59221a56ae40c43593e0f88fbe0cc10162b2276bd Labels: architecture: x86_64 build-date: 2021-12-01T18:44:09.391033 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "11.1638383197" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.10-11.1638383197 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: c0151768fb10aa1bee0151ab5cbd5b9cbe107bea vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: be4e58a52d40 Image: sha256:537a71ebf8fe34479f79da33ceb0ffec71d10f7a412d859327dcfec33a3ffe09 Labels: architecture: x86_64 build-date: 2021-12-01T18:44:09.391033 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "11.1638383197" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.10-11.1638383197 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: c0151768fb10aa1bee0151ab5cbd5b9cbe107bea vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2021-12-01T18:46:28Z" DockerVersion: 1.13.1 Id: sha256:8a91374c6932c030f3ef070f4ef29aca06e60fc619ab61302e9d5b9829839071 Size: 112669576 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:7bcc365e0ba823ed020ee6e6c3e0c23be5871c8dea3f7f1a65029002c83f9e55 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:7bcc365e0ba823ed020ee6e6c3e0c23be5871c8dea3f7f1a65029002c83f9e55 resourceVersion: "14218" uid: 8879dcf4-7ccd-4b5e-9ef1-830d167d00cd - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b954cb90d4571440ed86627198be2d74d7c3d264fe72e0af0f35f40f0da99ea8 size: 75745362 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a473864e64707b90812a20a3cfe329daa0a7ec4fb2659d24094a29fc050bbacb size: 954091093 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510212154.p2.g7f1d6f8.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510212154.p2.g7f1d6f8.assembly.stream.el9-7f1d6f8 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=ose-cli-artifacts - __doozer_uuid_tag=ose-cli-artifacts-rhel9-v4.20.0-20251021.223340 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=7f1d6f8 - SOURCE_DATE_EPOCH=1761075552 - SOURCE_GIT_COMMIT=7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 - SOURCE_GIT_TAG=openshift-clients-4.12.0-202208031327-1168-g7f1d6f88c - SOURCE_GIT_URL=https://github.com/openshift/oc Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T23:17:58Z" com.redhat.component: ose-cli-artifacts-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: OpenShift is a platform for developing, building, and deploying containerized applications. io.k8s.display-name: OpenShift Clients io.openshift.build.commit.id: 7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 io.openshift.build.commit.url: https://github.com/openshift/oc/commit/7f1d6f88cb0c8ed5c877fc0ae2bd99298c6339f2 io.openshift.build.source-location: https://github.com/openshift/oc io.openshift.build.versions: kubectl=1.33.3 io.openshift.expose-services: "" io.openshift.maintainer.component: oc io.openshift.maintainer.project: OCPBUGS io.openshift.tags: openshift,cli maintainer: Red Hat, Inc. name: openshift/ose-cli-artifacts-rhel9 org.opencontainers.image.revision: 1b5c23575fef47279ac307305b759a33fe1233c4 release: 202510212154.p2.g7f1d6f8.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: 1b5c23575fef47279ac307305b759a33fe1233c4 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T23:28:28Z" Id: sha256:c4de69b5cefe6e6f8d32565b3bce1d50c55aca367aa292e87ea165eb663a5925 Size: 1168278638 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3 resourceVersion: "13317" uid: 85fcad32-bf2c-42b5-b77f-af290bc0e371 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d246e53da6241a619b7dcea7d4403071b9e1961797aa4f6c766786e29732651c size: 76526594 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:91bd2c541a17a588359eb054815718e41f871d03b4d4daf7b3584b25fbdcbb67 size: 1563 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:aab734dcdabc81999b756ecd16b1b1c04fb52722314e2d5ff6e17b96d8a23fe3 size: 114160840 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f18f459d223a Image: 12bca108ee5d8cf1072eff2df3a23c55ee6c997a880cc49d70e95c3e094b5c99 Labels: architecture: x86_64 build-date: 2021-12-02T07:25:12.616806 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1638429558" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.10-1.1638429558 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: a137d0be0569c27ef415a990f8d2270938cd0663 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f18f459d223a Image: sha256:882a9c75ba965d44352aee03e17d3041bc59013a73b8dda503ce95a086505e4e Labels: architecture: x86_64 build-date: 2021-12-02T07:25:12.616806 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1638429558" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.10-1.1638429558 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: a137d0be0569c27ef415a990f8d2270938cd0663 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2021-12-02T07:44:18Z" DockerVersion: 1.13.1 Id: sha256:d496146df3a8259eca744feff56d02f72d80ca71e536df20954c7135b61dbc44 Size: 190696399 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:7de877b0e748cdb47cb702400f3ddaa3c3744a022887e2213c2bb27775ab4b25 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:7de877b0e748cdb47cb702400f3ddaa3c3744a022887e2213c2bb27775ab4b25 resourceVersion: "14099" uid: 6526584f-3a2f-4e0f-b172-575b87183894 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:96965a3a84248c364364702c0fb90543e329f86044b3394f97701f25b516b9ee size: 39507581 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4d0d850cd4adc37289686142206a183ccbd4e286765ce8fc9890539bbfd38827 size: 1735 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc3f90f22ede9c1de11eddbc3e136e684bf80148929c8428bd14a39577420b6c size: 74106555 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.9 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: e321cccbfab0 Image: a7a02e661dc58c86283a3a273e95b435b00b501b9ceeb06834521db56c8d66bf Labels: architecture: x86_64 build-date: 2021-07-21T09:24:53.073347 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "3" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.9-3 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 27c8267ebad228142ab22492448953134db1c957 vcs-type: git vendor: Red Hat, Inc. version: "1.9" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.9 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: e321cccbfab0 Image: sha256:a3622fbebff55a0e4b6d12bbfd88c33e6cc9d2a5654b4545f7eb1df41f299d28 Labels: architecture: x86_64 build-date: 2021-07-21T09:24:53.073347 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "3" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.9-3 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 27c8267ebad228142ab22492448953134db1c957 vcs-type: git vendor: Red Hat, Inc. version: "1.9" User: "185" WorkingDir: /home/jboss Created: "2021-07-21T09:26:56Z" DockerVersion: 1.13.1 Id: sha256:ab46d56801e32ef028fb5ba139a6df1998650ad83bf1065611d48343056292ee Size: 113620622 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:7ef75cdbc399425105060771cb8e700198cc0bddcfb60bf4311bf87ea62fd440 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:7ef75cdbc399425105060771cb8e700198cc0bddcfb60bf4311bf87ea62fd440 resourceVersion: "14227" uid: fd8232eb-2bea-4f5f-8076-07ad88a6c9c2 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:af063699af1c142fce6707dc9306d122355e61bd23ded0d18f8a4ecfbf3aa89a size: 78847792 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:c81d6d556e6e3a4255dd2709ce18578bfbbf3eed10a4efb966bf99ab69c79e05 size: 9405288 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:40ebe11cc2334d0d9d9ed39393e6d921c4529efb2eb74cbbe16b6bae313b2061 size: 152207353 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - VERSION=10 - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el10 - NODEJS_VERSION=22 - NPM_RUN=start - NAME=nodejs - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - CNB_STACK_ID=com.redhat.stacks.c10s-nodejs-22 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - SUMMARY=Platform for building and running Node.js 22 applications - DESCRIPTION=Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-28T04:23:44Z" com.redhat.component: nodejs-22-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.c10s-nodejs-22 io.k8s.description: Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 22 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs22 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi10/nodejs-22 org.opencontainers.image.revision: 35fce3d2788a2879aeaa4c3d1bcc7362c248149b release: "1761625375" summary: Platform for building and running Node.js 22 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi10/nodejs-22 vcs-ref: 35fce3d2788a2879aeaa4c3d1bcc7362c248149b vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-28T04:23:59Z" Id: sha256:e54e0543361777408e7cdfbdde86102ebffe0e6e33a6148cf17cc3f8d2549d13 Size: 240477435 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/nodejs-22@sha256:7ff7af22f08d9dc8043a73013d629ee03277ff18ad94001092de70fd5917e9e8 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:7ff7af22f08d9dc8043a73013d629ee03277ff18ad94001092de70fd5917e9e8 resourceVersion: "14093" uid: 52b4be48-4fa0-42e2-9970-89b63e3ed21a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea092d7970b26c24007a670fc6d0810dbf9531dc0d3a9d6ea514134ba5686724 size: 7541063 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:df8f2f39be965b80a9b8ae52de53e3a71a587009acd1b972541eb0b74424f50b size: 35281086 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - POSTGRESQL_VERSION=10 - POSTGRESQL_PREV_VERSION=9.6 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS=rh-postgresql10 - BASH_ENV=/usr/share/container-scripts/postgresql/scl_enable - ENV=/usr/share/container-scripts/postgresql/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/postgresql/scl_enable ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-30T09:35:40 com.redhat.component: rh-postgresql10-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 10 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql10,rh-postgresql10 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/postgresql-10-rhel7 release: "185" summary: PostgreSQL is an advanced Object-Relational database management system url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/postgresql-10-rhel7/images/1-185 usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhscl/postgresql-10-rhel7 vcs-ref: f48185d986ac9bcb68b79dd9c3fd23967e61fba4 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-05-30T09:37:58Z" Id: sha256:79eaf6f17e462f0bb3abad43b13072b170e7310a619dd31eee96cddd7b6c3cec Size: 122846968 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/postgresql-10-rhel7@sha256:8027301bb8716941e2a15b7d31b055ec7eba327ad3b7c72fb5accfa077a32521 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:8027301bb8716941e2a15b7d31b055ec7eba327ad3b7c72fb5accfa077a32521 resourceVersion: "14106" uid: 733693e3-1efa-487a-aea8-fab62cd50b4d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4752687a61a97d6f352ae62c381c87564bcb2f5b6523a05510ca1fb60d640216 size: 36442442 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0344366a246a0f7590c2bae4536c01f15f20c6d802b4654ce96ac81047bc23f3 size: 1740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:27394721a6937e88a8f0b4e64ba92f04c32d0f91637b25dbbbd382880c255f36 size: 113467165 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fd16d6f2e774 Image: b24fd32b5f44d139cf40f237958aa6f4a340ca563e7e9c56101bab0b4b9f496d Labels: architecture: x86_64 build-date: 2022-06-15T16:29:25.438581 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1655306434" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.13-1.1655306434 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: b311957b2aafa86184f328b45510b317e72ea9c1 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fd16d6f2e774 Image: sha256:a82928d8da6c9096e81ebf31be43aa30c85918e46cb0c6e3e6e6ad3f3f75ca53 Labels: architecture: x86_64 build-date: 2022-06-15T16:29:25.438581 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1655306434" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.13-1.1655306434 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: b311957b2aafa86184f328b45510b317e72ea9c1 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:35:36Z" DockerVersion: 1.13.1 Id: sha256:f6e320209fcf654664094417273c9275e710e28a7e0393b6ff3c5fac472d7627 Size: 149918863 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:81684e422367a075ac113e69ea11d8721416ce4bedea035e25313c5e726fd7d1 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:81684e422367a075ac113e69ea11d8721416ce4bedea035e25313c5e726fd7d1 resourceVersion: "14210" uid: c5be21e6-0bd2-469b-88db-f486faa5f05c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c4877503c8d2f934dcdfd76623f2b9935529fe73a1432cae4abba022c6951afd size: 79158758 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7c48fd933e32825452e3b72ff9e56a3d4db20281e05205aa4de1a44101718288 size: 102994046 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e98abb245aac78ad91bd13290e6bd1e669049593c7ed8324739f01e747f3b5aa size: 29967136 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.1 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.1 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk8-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.1 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-03-22T11:42:21 com.redhat.component: jboss-webserver-57-openjdk8-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK8 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK8 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk8-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 5.7.1 org.jboss.product.webserver-tomcat9.version: 5.7.1 release: "2.1679484388" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk8-rhel8-openshift/images/5.7.1-2.1679484388 vcs-ref: b76974c8ca96f84dd046416e215d6bb6c6143809 vcs-type: git vendor: Red Hat, Inc. version: 5.7.1 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-03-22T11:46:08Z" Id: sha256:534ebd82522d920ad16a57815a20458bd5c8f1287794e1ed4f5c6bfc437d1b91 Size: 212161123 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:85093d0f55d06662420925f64e914ff05499c79c2ede3ef80085a44d40f16a80 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:85093d0f55d06662420925f64e914ff05499c79c2ede3ef80085a44d40f16a80 resourceVersion: "13791" uid: bb3a31c3-826f-44da-915e-cd276c53411e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fc5b206e9329a1674dd9e8efbee45c9be28d0d0dcbabba3c6bb67a2f22cfcf2a size: 76240726 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e7021e0589e97471d99c4265b7c8e64da328e48f116b5f260353b2e0a2adb373 size: 1744 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:aef76d9b756f4b41636967f6f95dc208dc083ff9330fa218bd2e2b066d48d9bd size: 369657334 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:437304b712427332554361693d6f14f04b53117528365b179b460ea3c0d731b4 size: 387995280 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_EAP_VERSION=7.3.2 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.2 - WILDFLY_VERSION=7.3.2.GA-redhat-00002 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.4.Final - GALLEON_WILDFLY_VERSION=4.2.7.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=18.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api,com.sun.crypto.provider - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap73-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.3.2 - WILDFLY_CAMEL_VERSION=5.6.0.fuse-780027-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 51124b2d4953 Image: 4a3600a70fd852a914595cce9bbd668dda44064bebf0ef91972d53757a29ba17 Labels: architecture: x86_64 build-date: 2021-06-22T10:26:50.499540 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.3 distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.3 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.3.2 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.3.2 release: "17" summary: Platform for building and running Apache Camel applications on EAP 7.3 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.8-17 vcs-ref: 3c5cf944b8a38ac0fbf6837278aba31d5155a185 vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_EAP_VERSION=7.3.2 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.2 - WILDFLY_VERSION=7.3.2.GA-redhat-00002 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.4.Final - GALLEON_WILDFLY_VERSION=4.2.7.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=18.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api,com.sun.crypto.provider - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap73-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.3.2 - WILDFLY_CAMEL_VERSION=5.6.0.fuse-780027-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 51124b2d4953 Image: sha256:4232cd4533a083abe38562a5778f59b40681c639a4716f8da57b7fa9c6dfb9e6 Labels: architecture: x86_64 build-date: 2021-06-22T10:26:50.499540 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.3 distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.3 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.3.2 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.3.2 release: "17" summary: Platform for building and running Apache Camel applications on EAP 7.3 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.8-17 vcs-ref: 3c5cf944b8a38ac0fbf6837278aba31d5155a185 vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss Created: "2021-06-22T10:28:43Z" DockerVersion: 1.13.1 Id: sha256:7fd513295ba07faabb1b854647ddc5efd7aaf5e1da154b62a76706281cbe93ad Size: 833907243 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:852d8ea448cfb93036fc5f1b69f58249bc2e4454d326bd927839c5de6ce50a7b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:852d8ea448cfb93036fc5f1b69f58249bc2e4454d326bd927839c5de6ce50a7b resourceVersion: "14045" uid: b639ff9d-e3cc-4164-9bba-2068a0077977 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:af063699af1c142fce6707dc9306d122355e61bd23ded0d18f8a4ecfbf3aa89a size: 78847792 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:c81d6d556e6e3a4255dd2709ce18578bfbbf3eed10a4efb966bf99ab69c79e05 size: 9405288 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:17685c6e7b61aa5529bde52c46f996d37a3a1e7b434b243f0a93af071b73c3b0 size: 14548139 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el10 - NAME=nginx - NGINX_VERSION=1.26 - NGINX_SHORT_VER=126 - VERSION=0 - SUMMARY=Platform for running nginx 1.26 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.26 daemon. The image can be used as a base image for other applications based on nginx 1.26 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-16T00:15:38Z" com.redhat.component: nginx-126-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.26 daemon. The image can be used as a base image for other applications based on nginx 1.26 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.26 daemon. The image can be used as a base image for other applications based on nginx 1.26 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.26 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-126 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi10/nginx-126 org.opencontainers.image.revision: 5f49473c03c662a2dd2cda010888ba458eaf82ea release: "1760573692" summary: Platform for running nginx 1.26 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi10/nginx-126 vcs-ref: 5f49473c03c662a2dd2cda010888ba458eaf82ea vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-16T00:15:44Z" Id: sha256:ee41d8c7a2e378523c7ec25c7706336823e6c11bc957f3b0b1b78f6ac79f748a Size: 102818888 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/nginx-126@sha256:865ce4a073a00133133ba2d375cb9529dab8d10cf2aebd5537e9028f21aa261b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:865ce4a073a00133133ba2d375cb9529dab8d10cf2aebd5537e9028f21aa261b resourceVersion: "14060" uid: 7fd1d5e1-c258-47bd-939d-51195fcb43f2 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4752687a61a97d6f352ae62c381c87564bcb2f5b6523a05510ca1fb60d640216 size: 36442442 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0344366a246a0f7590c2bae4536c01f15f20c6d802b4654ce96ac81047bc23f3 size: 1740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2d0ad29676d908739bbe514232a9d435a6846938a597d376bb8323e0c52c5fc2 size: 117783682 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fd16d6f2e774 Image: e68803b1f8baa603307a9ff6909df95f45ccb9d227d163263fb1da2f6e141c32 Labels: architecture: x86_64 build-date: 2022-06-15T16:19:30.415682 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1655306377" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.13-1.1655306377 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 5c420ef50250635153dd2f037402814b3555412f vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fd16d6f2e774 Image: sha256:1e9249edf3c6ae61ab3dc9e75321bb5fb114f71fbdbd5876ab64a745811d9872 Labels: architecture: x86_64 build-date: 2022-06-15T16:19:30.415682 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1655306377" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.13-1.1655306377 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 5c420ef50250635153dd2f037402814b3555412f vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:26:00Z" DockerVersion: 1.13.1 Id: sha256:f2fb2918903b2a27c34df5bc33958a39d01fc478e6940bde6fbb918cb841ee33 Size: 154235364 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:868224c3b7c309b9e04003af70a5563af8e4c662f0c53f2a7606e0573c9fad85 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:868224c3b7c309b9e04003af70a5563af8e4c662f0c53f2a7606e0573c9fad85 resourceVersion: "14173" uid: 8673a3ac-628a-461d-969d-81452ebfec3e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9abb5e74a5f821b8cab5d489b7a3ebe6bad4b3ee1eb3e7748583419f1ec6c43a size: 63943566 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - '"./${DOTNET_DEFAULT_CMD}"' Env: - container=oci - HOME=/opt/app-root - PATH=/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=9.0.10 - ASPNET_VERSION=9.0.10 ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:23:40Z" com.redhat.component: dotnet-90-runtime-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for running .NET 9 applications distribution-scope: public dotnet_version: 9.0.10 io.buildah.version: 1.41.4 io.k8s.description: Platform for running .NET 9 applications io.k8s.display-name: .NET 9 io.openshift.expose-services: 8080:http io.openshift.tags: runtime,.net,dotnet,dotnetcore,dotnet90-runtime maintainer: Red Hat, Inc. name: ubi8/dotnet-90-runtime org.opencontainers.image.revision: bb2560ddf8f7bde1e795ce741e9021d87dbf007b release: "1761063774" summary: .NET 9 runtime url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: bb2560ddf8f7bde1e795ce741e9021d87dbf007b vcs-type: git vendor: Red Hat, Inc. version: "9.0" User: "1001" WorkingDir: /opt/app-root/app ContainerConfig: {} Created: "2025-10-21T16:23:47Z" Id: sha256:9f658b1b18972ec44a4e3b7cebb13ec240b200dd4cd04b05e5c481dd291f837d Size: 103689289 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/dotnet-90-runtime@sha256:88751105dd023552164e3c312742986b011078becb28f1464f1524d134925d73 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:88751105dd023552164e3c312742986b011078becb28f1464f1524d134925d73 resourceVersion: "13461" uid: 53c20b81-5b20-4ae5-8583-23b0e91ec178 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:cbec86182abf32d32e3a23824ea1b33138ab11eacd2791db4a9cd206f29762b4 size: 204149931 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el8 - NODEJS_VERSION=22 - NPM_RUN=start - NAME=nodejs - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - CNB_STACK_ID=com.redhat.stacks.ubi8-nodejs-22 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - SUMMARY=Platform for building and running Node.js 22 applications - DESCRIPTION=Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. - BASH_ENV=/opt/rh/gcc-toolset-13/enable - ENV=/opt/rh/gcc-toolset-13/enable - PROMPT_COMMAND=. /opt/rh/gcc-toolset-13/enable ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:22:36Z" com.redhat.component: nodejs-22-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi8-nodejs-22 io.k8s.description: Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 22 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs22 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/nodejs-22 org.opencontainers.image.revision: 49b8b2f2a34142b3b914203f829d6e96d9a2683d release: "1761063689" summary: Platform for building and running Node.js 22 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi8/nodejs-22:latest vcs-ref: 49b8b2f2a34142b3b914203f829d6e96d9a2683d vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T16:23:03Z" Id: sha256:db248d0ec5a7ea1a2f95f44b96f93595609a7215d8e02010960c8bc6562fd830 Size: 299709077 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/nodejs-22@sha256:887cc9e7fd3ce89adf6233aaf52b0243930e1a958190a09bf37c10f069890ee7 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:887cc9e7fd3ce89adf6233aaf52b0243930e1a958190a09bf37c10f069890ee7 resourceVersion: "14094" uid: b882c3ce-1465-4d5c-9580-7c78039a4b2e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ec0a4551131fd4f14d0a75627716987d815e6a7d000c6aec0ad2250db63285fb size: 76260862 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:448f7cafed668a949885b3817a3154f2d7f933119c6bc15f497e5889ba562000 size: 1319 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:09046bcf6b9ce63012e726b1303b9bfe5d8267a8d69bc652cfe15ce360906ae8 size: 358721836 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:959b6ec1c4635f73fc3e1116039ec4924ad272a0bca2250c54b93048fcf3b878 size: 348675297 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_EAP_VERSION=7.2.8.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.2.8.GA - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.2 - WILDFLY_CAMEL_VERSION=5.4.0.fuse-760021-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: eec8c28ca87f Image: 1dec826b36b51f1cb896136514b1350aed992762fe4e2b1e18822e01a27d3bb5 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-06-11T14:36:50.138967 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.8.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.8.GA release: "23.1591885742" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.6-23.1591885742 vcs-ref: 6b471823b8f4348a63e3ce19cb5b05b4c46f6d2f vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_EAP_VERSION=7.2.8.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.2.8.GA - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.2 - WILDFLY_CAMEL_VERSION=5.4.0.fuse-760021-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: eec8c28ca87f Image: sha256:d178cb6268264a07bb6fccc8a95c20057b8eb16e5717e1e8885533aeba109d9b Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-06-11T14:36:50.138967 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.8.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.8.GA release: "23.1591885742" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.6-23.1591885742 vcs-ref: 6b471823b8f4348a63e3ce19cb5b05b4c46f6d2f vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss Created: "2020-06-11T14:37:49Z" DockerVersion: 1.13.1 Id: sha256:a539fa5a2747ab4170a2965b45d87714801a81010359cb90a8bae7b1841b706f Size: 783667654 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:8968c86f5e2831796cf5f464c87a911b5513fd543b7f3485ccc497fe05ad6bca kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:8968c86f5e2831796cf5f464c87a911b5513fd543b7f3485ccc497fe05ad6bca resourceVersion: "14040" uid: 6410eb36-632b-47c9-9d9d-7e7fe7ac6d73 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b7df3aef01631b78d685e3ac9288f9670fc617b99af7198c052e2ad54085150e size: 108996511 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.15 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-25T05:07:18 com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1682399183" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.15-1.1682399183 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: abf4e05b84271b5340aee7845f3c982034b4b70a vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-25T05:22:09Z" Id: sha256:df2c1c9fe6c03102b3715d957eba61d386cba34ba89ffa1283f7664a807ed12e Size: 148294269 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:8a5b580b76c2fc2dfe55d13bb0dd53e8c71d718fc1a3773264b1710f49060222 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:8a5b580b76c2fc2dfe55d13bb0dd53e8c71d718fc1a3773264b1710f49060222 resourceVersion: "14212" uid: 08988084-56c5-426d-b5cf-4e08844cd751 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8abc860612807800afc77e8631e844c183642c3fd7bf28098329a9a471c51bfa size: 79390915 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:97b035c29708db1135c3cf7073001a81c20971955c0c36ac92e2ffd4f9171c2d size: 119428849 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:12311e71d6e48d14a6017415e1f8be8ac187a74a92fc102876c1728de7ea0468 size: 33661931 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.0 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.0 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk11-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.0 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2022-10-28T10:50:40 com.redhat.component: jboss-webserver-57-openjdk11-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 distribution-scope: public io.buildah.version: 1.26.2 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK11 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK11 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk11-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 5.7.0 org.jboss.product.webserver-tomcat9.version: 5.7.0 release: "6" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk11-rhel8-openshift/images/5.7.0-6 vcs-ref: cbf826cef3c5ebd698d9a52a2819dad1f0228bf4 vcs-type: git vendor: Red Hat, Inc. version: 5.7.0 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2022-10-28T10:54:50Z" Id: sha256:8b30e9c7537de1962f24c17565a9f8b9ff19a7a06f3f31671926bd9fac428a1c Size: 232522643 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:8a7d4c245418f8099293270f8bbcf7a4207839c4c4ef9974c2e16e303329edf3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:8a7d4c245418f8099293270f8bbcf7a4207839c4c4ef9974c2e16e303329edf3 resourceVersion: "13695" uid: cd973a16-ec29-4198-90d1-efc075a01254 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50be7bb6ce3ddb41606e1956ba5c61072699ac536980f260a0db6dc59c8013fe size: 39575081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ff358ef63d0b0476b2197592c761610d777f3f52226ba4e1db7df1aedc6de3c7 size: 107181264 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.23 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-10-23T20:03:09 com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.13.0.dev0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "3.1761249764" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.23-3.1761249764 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: 085819642ed3bd1514fc178d5c27c6ca99781b74 vcs-type: git vendor: Red Hat, Inc. version: "1.23" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-10-23T20:17:06Z" Id: sha256:4c55761852e50dbdc4c9f62ac6d4322e7b375dcc3cbf0efee03fe015f8039003 Size: 146786806 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/openjdk-8@sha256:8abcc25fba7202b68818271353a9203677ac3c2d638dafc84e6b45e68d913f59 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:8abcc25fba7202b68818271353a9203677ac3c2d638dafc84e6b45e68d913f59 resourceVersion: "13690" uid: f16c313c-5584-4753-96b7-006072345b71 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e0348fdb2685077d22116d294a90a253709aba78815882a57fcc536b22dcae2f size: 39488293 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50d776090f4e8d167cbe918c0da58f7b67533ab58d59ffa6acb6f2fad084834a size: 113694045 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.21 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-01-08T12:07:12 com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.33.8 io.cekit.version: 4.13.0.dev0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "1.1736337912" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.21-1.1736337912 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: 1a9ebd45b5208ff39ad07a2699430945b7d00eda vcs-type: git vendor: Red Hat, Inc. version: "1.21" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-01-08T12:35:33Z" Id: sha256:0c46377f1021ce6db9f2457907fe43fd1001b2ecc1ae2ac8ad2e699377813dea Size: 153213042 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/openjdk-11@sha256:8be99c30a4e5b021129310847bddca64a93fb38b0c8dfeac482b4c28de062e5a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:8be99c30a4e5b021129310847bddca64a93fb38b0c8dfeac482b4c28de062e5a resourceVersion: "13686" uid: 003d986f-aa2d-4ef7-b224-a906805a94a9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:96bd051f1942fe5ab409f17e3e423d97832439ba0ff2a8e505a039d6d08bfb73 size: 76410645 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:159a7b5d1b30a534b218f68e14a0f86ec7e6968fbf582c899d60754f2a063f20 size: 1500 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0e3a6ea4497c7683ea0b09b9b426edaf96e9dea0e3a448d48bc893b77b850738 size: 109409861 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b8a8fa65e643e0f369106eb809d0471ed5214056191a5a4c573ad4ef8bf1b649 size: 17127007 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2.redhat-00002 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.8 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: b621bb5542e9 Image: 6cd30300db17eeb6079dde6b2884979aaaf45b4caecd6f6fd1b0032c05d7d11e Labels: architecture: x86_64 build-date: 2021-06-22T13:46:32.233791 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "19" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.8-19 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 9bc77b209025a4153be16c8f1c2aa12e43d1f0a2 vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2.redhat-00002 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.8 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: b621bb5542e9 Image: sha256:edc6088e532f9ac489c5c9b4cb0c73b67c6ce16ee4447d7ee63c0c249f453e55 Labels: architecture: x86_64 build-date: 2021-06-22T13:46:32.233791 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "19" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.8-19 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 9bc77b209025a4153be16c8f1c2aa12e43d1f0a2 vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss Created: "2021-06-22T13:49:14Z" DockerVersion: 1.13.1 Id: sha256:4c5c8b73561437cf5c6e0f629b05b150ef1815ed011d8663088ee856b751ed1f Size: 202957959 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:8c148fc54caeb860262c3710675c97c58aba79af2d3c76de795b97143aae0e3f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:8c148fc54caeb860262c3710675c97c58aba79af2d3c76de795b97143aae0e3f resourceVersion: "14053" uid: 10d1343c-b57f-45f7-ab3f-beac30bd545b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fbacd34c462506669dc03b1b82d7b82d81294c00bba26f1d3768de23c66b1bf9 size: 48612301 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el9 - NODEJS_VER=20 - PYTHON_VERSION=3.9 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi9-python-39 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.9 applications - DESCRIPTION=Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T16:22:20Z" com.redhat.component: python-39-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi9-python-39 io.k8s.description: Python 3.9 available as container is a base platform for building and running various Python 3.9 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.9 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python39,python-39,rh-python39 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/python-39 org.opencontainers.image.revision: 8a625fa3d73e8e925536aa57b3b34af04b6d61a1 release: "1760372481" summary: Platform for building and running Python 3.9 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.9/test/setup-test-app/ ubi9/python-39 python-sample-app vcs-ref: 8a625fa3d73e8e925536aa57b3b34af04b6d61a1 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T16:22:34Z" Id: sha256:b544bb26771b4c0ca36a51ef3de16ab2b3a0d13c93bdc2a318509cb56032a0e9 Size: 360915719 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/python-39@sha256:8c1d5419ebfe1eb1a888c32a456f48ea01e1a6a33c7db59acf689ede1d944516 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:8c1d5419ebfe1eb1a888c32a456f48ea01e1a6a33c7db59acf689ede1d944516 resourceVersion: "14133" uid: bf56d930-176a-4e8d-b394-4bad482d912a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:235e42580a3cfee97f71e29c6ec70496e11ed2f12a7aa75234398ebbc328e586 size: 98096183 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - POSTGRESQL_VERSION=12 - POSTGRESQL_PREV_VERSION=10 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS= ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:20:00Z" com.redhat.component: postgresql-12-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 12 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql12,postgresql-12 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/postgresql-12 org.opencontainers.image.revision: 34b340acc69e166be1ace1971cec764be1bd32be release: "1761063442" summary: PostgreSQL is an advanced Object-Relational database management system url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhel8/postgresql-12 vcs-ref: 34b340acc69e166be1ace1971cec764be1bd32be vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T16:20:21Z" Id: sha256:63ac874262365667e82296277bcd9ddba79c4ee9183b83a242c06a8ac51fe98e Size: 193654414 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/postgresql-12@sha256:8d57f273f8521c9b2d55756dbff05559184d1aeec46517e46c71de97cd72c12b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:8d57f273f8521c9b2d55756dbff05559184d1aeec46517e46c71de97cd72c12b resourceVersion: "14109" uid: 92336955-41f7-4aad-adcd-63211f4ccf0b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:96bd051f1942fe5ab409f17e3e423d97832439ba0ff2a8e505a039d6d08bfb73 size: 76410645 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:159a7b5d1b30a534b218f68e14a0f86ec7e6968fbf582c899d60754f2a063f20 size: 1500 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0e3a6ea4497c7683ea0b09b9b426edaf96e9dea0e3a448d48bc893b77b850738 size: 109409861 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:eb9c9fe2d77eb7fc3c382d2a817ad73a42753bbb3264cde397ca1956461082dd size: 17129421 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2.redhat-00002 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.8 - KARAF_FRAMEWORK_VERSION=4.2.9.fuse-780023-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: b621bb5542e9 Image: c7de340e3efa553fdb8638489a9e9a3e20caaf712f7045da505cdd7c40524367 Labels: architecture: x86_64 build-date: 2021-06-22T13:20:43.822235 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.9.fuse-780023-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "27" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.8-27 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8cd8b16f18f4e66c80db9c8e34f4a25931beecac vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2.redhat-00002 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.8 - KARAF_FRAMEWORK_VERSION=4.2.9.fuse-780023-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: b621bb5542e9 Image: sha256:4ce058a449ae714e130b49bd15101cac9e7ed5e19995086476c9f04930b3bf4f Labels: architecture: x86_64 build-date: 2021-06-22T13:20:43.822235 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.9.fuse-780023-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "27" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.8-27 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8cd8b16f18f4e66c80db9c8e34f4a25931beecac vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss Created: "2021-06-22T13:23:40Z" DockerVersion: 1.13.1 Id: sha256:05b513e1291369e7ba146b899e91248c77c5c60f0f0d7de69c6847f937e38541 Size: 202960338 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:8e83f3ef3b5ad4bd7c4002d0201e4d5dd26a158c0be3ad29405ff4800d5661b8 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:8e83f3ef3b5ad4bd7c4002d0201e4d5dd26a158c0be3ad29405ff4800d5661b8 resourceVersion: "14041" uid: 031bb92a-af53-44a5-a92c-789a8cecf141 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d09aca24592b99820eb623c3a56914ab82562e5a4e37aa67ece0402d832e3100 size: 112844388 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.15 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-21T06:45:36 com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1682059493" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.15-1.1682059493 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 90e40029d38fdb9478a55b716811d1cb08fd31f8 vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-21T07:01:06Z" Id: sha256:d1ce871371c268991ea2f4c4dd5b5dcd972f9a68bc55f48b320afe6fa43482b9 Size: 152142633 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:9036a59a8275f9c205ef5fc674f38c0495275a1a7912029f9a784406bb00b1f5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:9036a59a8275f9c205ef5fc674f38c0495275a1a7912029f9a784406bb00b1f5 resourceVersion: "14177" uid: 6bd04940-d553-41ba-aa4c-5bdc1794ee94 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5329d7039f252afc1c5d69521ef7e674f71c36b50db99b369cbb52aa9e0a6782 size: 39330100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4010c146a5d3c1fc55941f0f2d867dcec12af55b5f2f76ef470fb867848424a5 size: 88987292 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.16 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-20T20:08:20 com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "3" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.16-3 vcs-ref: eeccc001a658b49635214b48ff00acf4fa195ada vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-07-20T20:13:11Z" Id: sha256:b3ab963ea6f591c8d7d66719238097970502eabdc5e76ce15ee35ad31e2515f4 Size: 128335849 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:920ff7e5efc777cb523669c425fd7b553176c9f4b34a85ceddcb548c2ac5f78a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:920ff7e5efc777cb523669c425fd7b553176c9f4b34a85ceddcb548c2ac5f78a resourceVersion: "14193" uid: 5b0435d0-2cb9-43a5-938e-f599b45d67f4 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:49a47137b1a2136617a3a472bc16821c293335abafc0672f42561e1bf16a0f88 size: 39940782 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - container=oci - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - NPM_RUN=start - PLATFORM=el8 - NODEJS_VERSION=20 - NAME=nodejs - SUMMARY=Minimal image for running Node.js 20 applications - DESCRIPTION=Node.js 20 available as container is a base platform for running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:14:58Z" com.redhat.component: nodejs-20-minimal-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Node.js 20 available as container is a base platform for running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.k8s.description: Node.js 20 available as container is a base platform for running various Node.js 20 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 20 Minimal io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs20 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/nodejs-20-minimal org.opencontainers.image.revision: cb238afef55d7c411b9322efeba590433e424d52 release: "1761063274" summary: Minimal image for running Node.js 20 applications url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: cb238afef55d7c411b9322efeba590433e424d52 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T16:15:09Z" Id: sha256:6b7383f3a11cf290881ca9dacadc5025b578a8432b13b182037e69e040134b6c Size: 79686598 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/nodejs-20-minimal@sha256:936daac34be9105cb05ca1eb7bcf89b280df38060709cf581df03fb69362e4df kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:936daac34be9105cb05ca1eb7bcf89b280df38060709cf581df03fb69362e4df resourceVersion: "14086" uid: 2f90468a-f59e-48f7-9592-6949468df17b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:db0f4cd412505c5cc2f31cf3c65db80f84d8656c4bfa9ef627a6f532c0459fc4 size: 78359137 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7e3624512448126fd29504b9af9bc034538918c54f0988fb08c03ff7a3a9a4cb size: 1789 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:14f31f43ca6d86bcd2e6a968f079af4a3c7e5bde11c32fe252cff5e99d41364b size: 450111139 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_EAP_VERSION=7.4.6 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap-xp - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=3.0.0 - WILDFLY_VERSION=3.0.0.GA-redhat-00016 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=3.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8 - JBOSS_IMAGE_VERSION=3.0 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 7323ecf6dc56 Image: 7c29609a8c38f5c5df600979d42b6face2facfbe29a9e30d7462cac65d5d08f5 Labels: architecture: x86_64 build-date: 2022-08-03T11:41:48.394081 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-eap-xp3-openjdk11-builder-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift container image. distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running JavaEE applications on JBoss EAP XP 3.0 io.k8s.display-name: JBoss EAP XP io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,eap-xp maintainer: Red Hat name: jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap-xp org.jboss.product.eap.version: 7.4.6 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 3.0.0 release: "21" summary: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift container image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8/images/3.0-21 vcs-ref: cfdfab38abf90a24b6ae340095d579fae34ac19f vcs-type: git vendor: Red Hat, Inc. version: "3.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_EAP_VERSION=7.4.6 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap-xp - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=3.0.0 - WILDFLY_VERSION=3.0.0.GA-redhat-00016 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=3.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8 - JBOSS_IMAGE_VERSION=3.0 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 7323ecf6dc56 Image: sha256:87b6aca14aa56e8872822e964f37b69a378849208dd9e7f9eed9f16a10ca0c34 Labels: architecture: x86_64 build-date: 2022-08-03T11:41:48.394081 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-eap-xp3-openjdk11-builder-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift container image. distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running JavaEE applications on JBoss EAP XP 3.0 io.k8s.display-name: JBoss EAP XP io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,eap-xp maintainer: Red Hat name: jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap-xp org.jboss.product.eap.version: 7.4.6 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 3.0.0 release: "21" summary: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift container image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8/images/3.0-21 vcs-ref: cfdfab38abf90a24b6ae340095d579fae34ac19f vcs-type: git vendor: Red Hat, Inc. version: "3.0" User: "185" WorkingDir: /home/jboss Created: "2022-08-03T11:47:40Z" DockerVersion: 1.13.1 Id: sha256:651ba477f1729377820e3c4d37b16cb7c970b4c616387639d7a2c3e75604addb Size: 528484179 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap-xp3-openjdk11-openshift-rhel8@sha256:953aeb7c686ebe9359eb9e020aabaa011e47de9a0c38a3e97f85ff038abef5e6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:953aeb7c686ebe9359eb9e020aabaa011e47de9a0c38a3e97f85ff038abef5e6 resourceVersion: "13382" uid: e2937c86-2422-4b3e-bd8c-6be83ebe66c1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:34cbb242d65bd3b5ea98fd0bf5be8ce2b2316994693b130adb043cd6537ee9ca size: 76239722 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a4b6512aa42577405f0f324407ee3140e668e9bb470c3fb472e11266482468f size: 1414 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:43af5cb8726181f66a714e7b2e0fedeaafc94f41d9da7b8bbaf60cccbd282947 size: 360161439 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9189bb850e4ef7fc92a22a5e0d669a118f5ec3e16ec679c116d63a3af3840f7b size: 362224783 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_EAP_VERSION=7.2.8.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.2.8.GA - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.2 - WILDFLY_CAMEL_VERSION=5.5.0.fuse-770010-redhat-00003 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: e1afd63c27cb Image: cd014bc1a847cd3fed2f90a3fbc523a426e4ec7008cebec062413b1810264433 Labels: architecture: x86_64 build-date: 2020-11-04T18:20:14.968019 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.8.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.8.GA release: "12.1604512956" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.7-12.1604512956 vcs-ref: eefa649e2597e8e14192fabab6d39c751a947fa7 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_EAP_VERSION=7.2.8.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.2.8.GA - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.2 - WILDFLY_CAMEL_VERSION=5.5.0.fuse-770010-redhat-00003 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: e1afd63c27cb Image: sha256:285de3f29c8aa04b61c47d78bb30fda0cef3f92acb0e7130ab1d8e555aa70467 Labels: architecture: x86_64 build-date: 2020-11-04T18:20:14.968019 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.8.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.8.GA release: "12.1604512956" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.7-12.1604512956 vcs-ref: eefa649e2597e8e14192fabab6d39c751a947fa7 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss Created: "2020-11-04T18:21:27Z" DockerVersion: 1.13.1 Id: sha256:be40aa00260f66d90539c0d1cd5026c62b7911c1c7342db50c7580e98e0a828f Size: 798635518 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:956ad570b06da524a856c6c2b421c8b4aab160fc4565cde798c72fa050c2dedf kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:956ad570b06da524a856c6c2b421c8b4aab160fc4565cde798c72fa050c2dedf resourceVersion: "14043" uid: 5cb6f520-db7b-4ee2-ab5f-b41168cc7821 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50be7bb6ce3ddb41606e1956ba5c61072699ac536980f260a0db6dc59c8013fe size: 39575081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:911b8848d090c0978905e7ddcc09c6d1b296d99a98f1d5c74c4d20e96b994577 size: 75953565 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.23 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-10-23T20:03:09 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.13.0.dev0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "3.1761249763" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.23-3.1761249763 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: 1525ac5b44f35db161f3d3efa207ccd05b700efa vcs-type: git vendor: Red Hat, Inc. version: "1.23" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-10-23T20:12:43Z" Id: sha256:3f0bcc6c0d5b6748f2ad906c60fff669c4098b54a58af97082bb036d4ac4deb7 Size: 115548551 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:963cb8b96a26857034d16753409e0b48eb8c7e2702ad97ea53136a705946e535 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:963cb8b96a26857034d16753409e0b48eb8c7e2702ad97ea53136a705946e535 resourceVersion: "13514" uid: bd8ab23f-c9ac-4f68-bb3e-1c6155e0217d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3019826b26b93fdb39b6e29614bc6b4d1ab879c596261851db4ff70706fa6c55 size: 183535774 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:24ecfccd693adf38a95b31fa28a1832484a4b78bf63d1e7ca71d60123c17ead0 size: 127962077 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510212154.p2.g69ff479.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510212154.p2.g69ff479.assembly.stream.el9-69ff479 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=openshift-enterprise-tests - __doozer_uuid_tag=ose-tests-rhel9-v4.20.0-20251021.223340 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=69ff479 - SOURCE_DATE_EPOCH=1760012512 - SOURCE_GIT_COMMIT=69ff479373a432c4c48b76ff5ae1c5c56b9ffc5b - SOURCE_GIT_TAG=v4.1.0-9941-g69ff479373 - SOURCE_GIT_URL=https://github.com/openshift/origin Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T23:23:44Z" com.redhat.component: openshift-enterprise-tests-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: OpenShift is a platform for developing, building, and deploying containerized applications. io.k8s.display-name: OpenShift End-to-End Tests io.openshift.build.commit.id: 69ff479373a432c4c48b76ff5ae1c5c56b9ffc5b io.openshift.build.commit.url: https://github.com/openshift/origin/commit/69ff479373a432c4c48b76ff5ae1c5c56b9ffc5b io.openshift.build.source-location: https://github.com/openshift/origin io.openshift.build.versions: kubernetes-tests=1.33.4 io.openshift.expose-services: "" io.openshift.maintainer.component: Test Framework io.openshift.maintainer.project: OCPBUGS io.openshift.release.operator: "true" io.openshift.tags: openshift,tests,e2e maintainer: Red Hat, Inc. name: openshift/ose-tests-rhel9 org.opencontainers.image.revision: cc08071546e345c534a6083c6b2ca879e49007a9 release: 202510212154.p2.g69ff479.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: cc08071546e345c534a6083c6b2ca879e49007a9 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T23:26:44Z" Id: sha256:8a04a9c6687001c900f85968c7ab65743914c5abff0bbd6e76241d46cad011fc Size: 449939838 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9a1ff2292e9e3aa41290373a931e9b52de2b206e4da35dc12dc553f7b0e58146 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:9a1ff2292e9e3aa41290373a931e9b52de2b206e4da35dc12dc553f7b0e58146 resourceVersion: "14076" uid: 4105d75c-fd5f-4a23-8837-1febdd98746d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a20dc09567a04bdff2ebfaa3d3917f64d7620555e6354d53b43dd7ebb0e0f575 size: 79751689 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ac59b0c289e29a4926bf37ba68fd96d88bfb088939fb2141d2fd7663f7a43a65 size: 119102707 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.15 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-17T21:52:36 com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "10" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.15-10 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: bc2e8cbed5194676f5e562e97468861dc04c055f vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-17T22:20:18Z" Id: sha256:e9847fcee45301c4851138e5224a14e27a2c94844d178c65dd2784d4cdb5628c Size: 198884342 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:9ab26cb4005e9b60fd6349950957bbd0120efba216036da53c547c6f1c9e5e7f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:9ab26cb4005e9b60fd6349950957bbd0120efba216036da53c547c6f1c9e5e7f resourceVersion: "14103" uid: 68b084a9-f039-4e0a-a93d-800f75ea0732 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:50be7bb6ce3ddb41606e1956ba5c61072699ac536980f260a0db6dc59c8013fe size: 39575081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9fda820972db65f84f083456d1ef6c072602f72f8849f448f7cb5b051e38af20 size: 97135313 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.23 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-11-25T10:27:55 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.13.0.dev0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "3.1764066412" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.23-3.1764066412 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: b40c568e2fa6d032648af2c70f3fe4f0cbf5ce66 vcs-type: git vendor: Red Hat, Inc. version: "1.23" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-11-25T10:40:37Z" Id: sha256:aa8e84022629e81f8870b1c37d3fc34cd9cea85a45ddc5634740763624f3c364 Size: 136730282 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/openjdk-17-runtime@sha256:9b5d2fc574a13613f18fa983ac2901593c1e812836e918095bc3d15b6cc4ba57 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-12-08T17:53:29Z" name: sha256:9b5d2fc574a13613f18fa983ac2901593c1e812836e918095bc3d15b6cc4ba57 resourceVersion: "40380" uid: c9d6df32-eb20-4093-9c07-78adee59265a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8abc860612807800afc77e8631e844c183642c3fd7bf28098329a9a471c51bfa size: 79390915 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:646e25f094bfdf115abaa765c7e87d0695757439d2d01c5a96950d1a8230095c size: 102110606 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9f629aedc79c0f56aca3877c00f3d459e5c4e1cd8c2462b3b2f50d374f5051da size: 29827212 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.0 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.0 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk8-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.0 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2022-10-28T09:24:39 com.redhat.component: jboss-webserver-57-openjdk8-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 distribution-scope: public io.buildah.version: 1.26.2 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK8 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK8 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk8-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 5.7.0 org.jboss.product.webserver-tomcat9.version: 5.7.0 release: "6" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK8 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk8-rhel8-openshift/images/5.7.0-6 vcs-ref: 30c25e5f7426300944d59bdf2441b77a306eec9a vcs-type: git vendor: Red Hat, Inc. version: 5.7.0 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2022-10-28T09:28:29Z" Id: sha256:fc147f625fd89d8e9a8e60e53d35939809477d4254f43722faafecc1ee2ae2bb Size: 211369649 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk8-openshift-rhel8@sha256:9c10ee657f8d4fc4cee2d6f3fca56a8ded4354b90beea00e8274d1927e0fe8c7 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:9c10ee657f8d4fc4cee2d6f3fca56a8ded4354b90beea00e8274d1927e0fe8c7 resourceVersion: "13788" uid: f265bf7a-7a2e-4fcb-87bb-715647526958 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e2305491eb6ffd1d8265760823bba728f83c81461eff41237c57e74c5fb2e7a3 size: 11193462 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/bin/run-httpd Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - HTTPD_VERSION=2.4 - HTTPD_SHORT_VERSION=24 - NAME=httpd - SUMMARY=Platform for running Apache httpd 2.4 or building httpd-based application - DESCRIPTION=Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. - HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ - HTTPD_APP_ROOT=/opt/app-root - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/httpd.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_MODULES_D_PATH=/etc/httpd/conf.modules.d - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_TLS_CERT_PATH=/etc/httpd/tls - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_LOG_PATH=/var/log/httpd ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:11:21Z" com.redhat.component: httpd-24-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. io.k8s.display-name: Apache httpd 2.4 io.openshift.expose-services: 8080:http,8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,httpd,httpd-24 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/httpd-24 org.opencontainers.image.revision: 03c614bc240603ef13f05d4366c5027b8f79a40b release: "1760386250" summary: Platform for running Apache httpd 2.4 or building httpd-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/httpd-container.git --context-dir=examples/sample-test-app/ rhel9/httpd-24 sample-server vcs-ref: 03c614bc240603ef13f05d4366c5027b8f79a40b vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:11:25Z" Id: sha256:3a17e28bd30d0ed0fd57d89760e591d16f93c412a8166f5560d4596b9909a74e Size: 108293615 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/httpd-24@sha256:9c8685e9b35e0262af34b42288252f421e0791efd835b5673cf9d10c90863a36 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:9c8685e9b35e0262af34b42288252f421e0791efd835b5673cf9d10c90863a36 resourceVersion: "13589" uid: 7ccc37db-beff-432b-8ea3-e1379779e1e7 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c9ff3e9281bcbcadd57f37cc0e47a4081cc20a091749d7a33d56496a60a2c1be size: 76240719 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f897b9608c98d944929bd778316439ac000b43d974c70efb678187e436f095fa size: 1320 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:84d82243c4d526fb64212dbf143c4bda2e708fdfaf270853a2d588c185b1bbfe size: 358310228 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2ac19899f11cfecf13fcc6858cec9aa84a088eebd01245dc25cd9c9ffc9efc32 size: 351119588 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_EAP_VERSION=7.2.7.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.2.7.GA - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.2 - WILDFLY_CAMEL_VERSION=5.3.0.fuse-750026-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 7158789667d6 Image: 791bedb1a4c1b02af8001a2b4aaae305444773523d7b83cb8a99e9e34d69e5c1 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-03-17T17:08:43.683394 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 3.2.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.7.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.7.GA release: "12.1584463377" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.5-12.1584463377 vcs-ref: 1e76ab152c0fb1ff811ef366b74092352dc2fe65 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_EAP_VERSION=7.2.7.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.2.7.GA - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap72-openshift - JBOSS_IMAGE_VERSION=1.2 - WILDFLY_CAMEL_VERSION=5.3.0.fuse-750026-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 7158789667d6 Image: sha256:382de91c612b07160f62b17aaaa4cc84d1015b829d0f886d824e2be05515cfdd Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-03-17T17:08:43.683394 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Camel applications on EAP 7.2.1 distribution-scope: public io.cekit.version: 3.2.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.2.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Otavio Piske name: fuse7/fuse-eap-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.2.7.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.2.7.GA release: "12.1584463377" summary: Platform for building and running Apache Camel applications on EAP 7.2.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.5-12.1584463377 vcs-ref: 1e76ab152c0fb1ff811ef366b74092352dc2fe65 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss Created: "2020-03-17T17:10:28Z" DockerVersion: 1.13.1 Id: sha256:dd66efe4ea6ed2453c3eaad23b8fd09f7ac35203cefee8e997c4b718a2ff129d Size: 785680195 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:9d0ed8688df061f7555046a61cec60f909b325e77dee6aec9d2350f81efa0b46 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:9d0ed8688df061f7555046a61cec60f909b325e77dee6aec9d2350f81efa0b46 resourceVersion: "14037" uid: cdf30894-7900-4273-b02a-9792d5df0697 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:96cf2b875e2a36c8825aadbf02bb8c31fe8e9de2f715f939a18f669de38ee76c size: 106992460 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-23T16:01:28 com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "4" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.17-4 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 733fe9b2adb674b4b549a51a35f6a4a6a38ad037 vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-23T16:17:36Z" Id: sha256:35d26bac52276039582d07b462c291fea0355814ab44f5b68fd3df8c060562aa Size: 146329998 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:9d759db3bb650e5367216ce261779c5a58693fc7ae10f21cd264011562bd746d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:9d759db3bb650e5367216ce261779c5a58693fc7ae10f21cd264011562bd746d resourceVersion: "14214" uid: 212b2942-dfd8-4665-8e18-ff37bdc44f6a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:52f2e6ec5ef8a4dbdb3c4274ef76ba2037ebbd724573f2057856b01a2a7ea1b8 size: 19585623 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=20 - RUBY_MAJOR_VERSION=3 - RUBY_MINOR_VERSION=3 - RUBY_VERSION=3.3 - RUBY_SCL_NAME_VERSION=33 - RUBY_SCL=ruby-33 - IMAGE_NAME=ubi8/ruby-33 - SUMMARY=Platform for building and running Ruby 3.3 applications - DESCRIPTION=Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T12:10:39Z" com.redhat.component: ruby-33-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. io.k8s.display-name: Ruby 3.3 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,ruby,ruby33,ruby-33 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/ruby-33 org.opencontainers.image.revision: ef7b2eae3d26a61613cc71dd1291080399bce4be release: "1761826171" summary: Platform for building and running Ruby 3.3 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-ruby-container.git --context-dir=3.3/test/puma-test-app/ ubi8/ruby-33 ruby-sample-app vcs-ref: ef7b2eae3d26a61613cc71dd1291080399bce4be vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T12:10:48Z" Id: sha256:c2496e7ea1b21694f5bcf9dd6902807d8f852f4eef30466504885ea8f1e2286b Size: 269452030 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/ruby-33@sha256:9d895b1202aaf35bbe7b432736307c965d9d84fd91c06e41f8ac21c7ea0590a0 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:9d895b1202aaf35bbe7b432736307c965d9d84fd91c06e41f8ac21c7ea0590a0 resourceVersion: "14118" uid: 5f5938c0-5510-4115-93d9-a072382b27a1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d9e82098d9baea2e101f08de3cadfb5eb9155ed9b0c3cc58d0dd84acd4f8f6ec size: 103715605 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - MYSQL_VERSION=10.5 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MariaDB 10.5 SQL database server - DESCRIPTION=MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/usr ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T20:14:59Z" com.redhat.component: mariadb-105-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. io.k8s.display-name: MariaDB 10.5 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mariadb,mariadb105,mariadb-105 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/mariadb-105 org.opencontainers.image.revision: fd3cb58cc55ba53455c39286fd22514602d64ae0 release: "1761077645" summary: MariaDB 10.5 SQL database server url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel8/mariadb-105 vcs-ref: fd3cb58cc55ba53455c39286fd22514602d64ae0 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T20:15:20Z" Id: sha256:62490f22b0ec3d319419337e5fb84c1841bfd3501b23fe94782ac7dde285c879 Size: 199273271 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/mariadb-105@sha256:9dea3590288a2f7e58af534af889123e044b0e4e03d179664bf5ac2206e9e91d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:9dea3590288a2f7e58af534af889123e044b0e4e03d179664bf5ac2206e9e91d resourceVersion: "13846" uid: 3bed6e22-edbc-44db-8b1c-41b88432cadc - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a20dc09567a04bdff2ebfaa3d3917f64d7620555e6354d53b43dd7ebb0e0f575 size: 79751689 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6ff043a38df4eb0e1a08336063c455b1d291b3d5e449b30447ea6ddbb53c6019 size: 116748485 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.15 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-17T22:18:35 com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "8" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.15-8 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: b16c13bab105cc94b77b4618298049fffd555436 vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-17T22:25:03Z" Id: sha256:15dc0efab317ab7c66332e12636ebf6db08387dc5c8e3f2860fe5e8f39fa6973 Size: 196529781 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:a0a6db2dcdb3d49e36bd0665e3e00f242a690391700e42cab14e86b154152bfd kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:a0a6db2dcdb3d49e36bd0665e3e00f242a690391700e42cab14e86b154152bfd resourceVersion: "14165" uid: fced16ec-331b-4ac2-bd70-ffe48bb75529 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:af063699af1c142fce6707dc9306d122355e61bd23ded0d18f8a4ecfbf3aa89a size: 78847792 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:c81d6d556e6e3a4255dd2709ce18578bfbbf3eed10a4efb966bf99ab69c79e05 size: 9405288 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:59dd25a7c2c2bfb972a6bfb598baa03168f94c7c6baba45e3938b157001c799a size: 10317076 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/bin/run-httpd Entrypoint: - container-entrypoint Env: - container=oci - VERSION=10 - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el10 - HTTPD_VERSION=2.4 - HTTPD_SHORT_VERSION=24 - NAME=httpd - ARCH=x86_64 - SUMMARY=Platform for running Apache httpd 2.4 or building httpd-based application - DESCRIPTION=Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. - HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ - HTTPD_APP_ROOT=/opt/app-root - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/httpd.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_MODULES_D_PATH=/etc/httpd/conf.modules.d - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_TLS_CERT_PATH=/etc/httpd/tls - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_LOG_PATH=/var/log/httpd ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-16T00:10:06Z" com.redhat.component: httpd-24-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. io.k8s.display-name: Apache httpd 2.4 io.openshift.expose-services: 8080:http,8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,httpd,httpd-24 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi10/httpd-24 org.opencontainers.image.revision: 449b2e89f3530eb9c28bedbaa094dc0fad934fc5 release: "1760573367" summary: Platform for running Apache httpd 2.4 or building httpd-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/httpd-container.git --context-dir=examples/sample-test-app/ ubi10/httpd-24 sample-server vcs-ref: 449b2e89f3530eb9c28bedbaa094dc0fad934fc5 vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-16T00:10:14Z" Id: sha256:f3b8e2913562adc779d57d8404ac62844571fbea11c93544ec50d7906429e362 Size: 98586659 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/httpd-24@sha256:a1f3f9545a7657a88c4bddbc00f4df862b7658f247e195704b6d9f8a0249c9fa kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:a1f3f9545a7657a88c4bddbc00f4df862b7658f247e195704b6d9f8a0249c9fa resourceVersion: "13584" uid: ae481df1-77ed-4c22-836f-881bf7d76399 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9b71c3290efcf757caf7bdafce1c7203a98ff3556a9e4141b456c5914839cb07 size: 38715659 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=20 - PHP_VERSION=7.4 - PHP_VER_SHORT=74 - NAME=php - SUMMARY=Platform for building and running PHP 7.4 applications - DESCRIPTION=PHP 7.4 available as container is a base platform for building and running various PHP 7.4 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:32:28Z" com.redhat.component: php-74-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: PHP 7.4 available as container is a base platform for building and running various PHP 7.4 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.41.4 io.k8s.description: PHP 7.4 available as container is a base platform for building and running various PHP 7.4 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 7.4 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php74,php-74 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/php-74 org.opencontainers.image.revision: 430df8993713269f2565e87e177403da7a38aea7 release: "1761841890" summary: Platform for building and running PHP 7.4 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=7.4/test/test-app ubi8/php-74 sample-server vcs-ref: 430df8993713269f2565e87e177403da7a38aea7 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:32:43Z" Id: sha256:f3770d7717e39e6d29572469c3ffcff31832649a905cc7347243ca102890af27 Size: 288584302 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/php-74@sha256:a3ca570ae1293a2d88bd995b972f67a784a416cd9916a3d2bef7b38e23b88df3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:a3ca570ae1293a2d88bd995b972f67a784a416cd9916a3d2bef7b38e23b88df3 resourceVersion: "14078" uid: 00420108-d6e4-4b70-88fd-a833c793ebf3 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f0eb64f6b4304af6cc936a29678a7b7703a81003803268163e95597bbd6aa023 size: 46988995 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - POSTGRESQL_VERSION=15 - POSTGRESQL_PREV_VERSION=13 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS= ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T16:13:57Z" com.redhat.component: postgresql-15-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 15 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql15,postgresql-15 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/postgresql-15 org.opencontainers.image.revision: c6a2ac2bc62d533e26f617990996bf2e1cbf83d0 release: "1760372009" summary: PostgreSQL is an advanced Object-Relational database management system url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhel9/postgresql-15 vcs-ref: c6a2ac2bc62d533e26f617990996bf2e1cbf83d0 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T16:14:11Z" Id: sha256:b861c547e3bac8eb09c55dd74c4b91a3d83364cd101b057d35bf3b45c04cb3a4 Size: 144088905 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel9/postgresql-15@sha256:a4c6773a5eb5183f6dad3b029aa2cc4b6715797985dc53a3faf972007e7ad4d3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:a4c6773a5eb5183f6dad3b029aa2cc4b6715797985dc53a3faf972007e7ad4d3 resourceVersion: "14115" uid: af85429e-2044-4c84-a5ed-7d86c3b5d2ed - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7ef1e3a3a6838c7a9be47c4fdc5a8b177583baa77397397e76933831c0379d45 size: 132132261 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d6210f4b9831987f0f0ee1b4d658e8745e16f3185aeb15918185be6e5e30e813 size: 5397755 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift-rhel8 - FUSE_KARAF_IMAGE_VERSION=1.12 - JOLOKIA_VERSION=1.7.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.4.3.fuse-7_12_1-00009-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.18.0.redhat-00001 - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-23T17:52:21 com.redhat.component: fuse-karaf-openshift-rhel-8-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.7.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.4.3.fuse-7_12_1-00009-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.18.0.redhat-00001 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "24.1716486067" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift-rhel8/images/1.12-24.1716486067 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 9b5c4d1d209ef01d3ac0c65027716152267938bd vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-05-23T17:55:01Z" Id: sha256:c9d720d89bdd36cd4657a20663839a59e89f88ab2269907c99b839928aa33aab Size: 176878546 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift-rhel8@sha256:a4dcf2213376727c3da1d10efbea52f20c74674bfb06643723f195a849171c10 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:a4dcf2213376727c3da1d10efbea52f20c74674bfb06643723f195a849171c10 resourceVersion: "14023" uid: d51abb6e-117c-46e4-85c1-bd8935cfd3ef - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:00f17e0b37b0515380a4aece3cb72086c0356fc780ef4526f75476bea36a2c8b size: 76243402 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:305d73a95c8fece2b53a34e040df1c97eb6b7f7cc4e0a7933465f0b7325e3d72 size: 1329 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:37de28a834ce4d54511a99e150f4c70ff0754aabff9c9f0b28fdee25deca1ad0 size: 347613312 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.4.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.4 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.4.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: 9d3d66a8bfcc Image: 7eb94d3c73ed835c09c1b8813a8f6e987fa5fe5529b3ec4b22fb0df3397d9a50 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-01-16T13:46:51.487692 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.5.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.4.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.4.GA release: "1.1579182050" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.4-1.1579182050 vcs-ref: c9b538c57579aed85692246a7d8bc2bab8b0be56 vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.4.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.4 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.4.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: 9d3d66a8bfcc Image: sha256:a9d14d8a70648d60c0f677ee633180046573d5ce8d1e897855fee8da99ddac13 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-01-16T13:46:51.487692 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.5.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.4.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.4.GA release: "1.1579182050" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.4-1.1579182050 vcs-ref: c9b538c57579aed85692246a7d8bc2bab8b0be56 vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss Created: "2020-01-16T13:54:25Z" DockerVersion: 1.13.1 Id: sha256:19452076ffee7516280344d1889aeffb4d3208391ee4b2ffa937214e0a090f94 Size: 423866036 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:a4e8d81c9e54234f84072295425120e358752d58af8297ecfbae5d4ad01e6a2e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:a4e8d81c9e54234f84072295425120e358752d58af8297ecfbae5d4ad01e6a2e resourceVersion: "13895" uid: 32f3c3bc-0386-4373-a81f-7bff34c7d66b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9e23b64ace00a199db21d302292b434e9d3956d79319d958ecc19603d00c946 size: 39622437 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:38b71301a1d9df24c98b5a5ee8515404f42c929003ad8b13ab83d2de7de34dec size: 1742 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1c2420d17c6f75270607383f1a32012b5819be784c002fa6a756b3824a069450 size: 86385743 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: 1e3b9ba31f7cfb0e7e3e77274b62a3a343bcc69a6e7329fa417b8b66608d60ca Labels: architecture: x86_64 build-date: 2022-03-28T09:59:21.117794 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "2.1648459728" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.11-2.1648459728 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0fb451aea3eff8370b37e5f3d692a298a0bc0499 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: sha256:53c2cd8aa6cf62155798343d7f9f6d5e7288e3c1f88e332e6eeb78682793d35a Labels: architecture: x86_64 build-date: 2022-03-28T09:59:21.117794 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "2.1648459728" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.11-2.1648459728 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 0fb451aea3eff8370b37e5f3d692a298a0bc0499 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T10:01:39Z" DockerVersion: 1.13.1 Id: sha256:cc5623b2d45529c4ea9e9fe64dc1c252a12b09410c1e582132f75b108cb56bfa Size: 126014723 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:a8e4081414cfa644e212ded354dfee12706e63afb19a27c0c0ae2c8c64e56ca6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:a8e4081414cfa644e212ded354dfee12706e63afb19a27c0c0ae2c8c64e56ca6 resourceVersion: "14151" uid: ee1360fe-9f4c-450b-98aa-5d7a39a889b7 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d7c06497d5cebd39c0a4feb14981ec940b5c863e49903d320f630805b049cbff size: 39279912 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea34baa9442f6f18b965e373ba72ccd388af746f26cd13a57d4b0f3978d70252 size: 75251776 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-02-07T17:35:05 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "12.1675788326" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.14-12.1675788326 vcs-ref: 3c231ab5dfb5c3160b156c67544b0a2f09a46c7e vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-02-07T17:42:27Z" Id: sha256:67d8a30d80c9ce332312a73ec1cc72ce355ee66bbd300f57cfd8d914fc047022 Size: 114550290 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:aa02a20c2edf83a009746b45a0fd2e0b4a2b224fdef1581046f6afef38c0bee2 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:aa02a20c2edf83a009746b45a0fd2e0b4a2b224fdef1581046f6afef38c0bee2 resourceVersion: "14222" uid: 7df72731-2440-4a30-b660-688bfe476159 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54d004b54a5584b6a6762c182b47e78caec9c907cc299fb8025d94f7450a1acb size: 25767662 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=20 - PHP_VERSION=8.3 - PHP_VER_SHORT=83 - NAME=php - SUMMARY=Platform for building and running PHP 8.3 applications - DESCRIPTION=PHP 8.3 available as container is a base platform for building and running various PHP 8.3 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - PHP_FPM_CONF_D_PATH=/etc/php-fpm.d - PHP_FPM_CONF_FILE=www.conf - PHP_FPM_RUN_DIR=/run/php-fpm - PHP_CLEAR_ENV=ON - PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf - PHP_FPM_LOG_PATH=/var/log/php-fpm - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:08:14Z" com.redhat.component: php-83-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: PHP 8.3 available as container is a base platform for building and running various PHP 8.3 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.41.4 io.k8s.description: PHP 8.3 available as container is a base platform for building and running various PHP 8.3 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 8.3 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php83,php-83 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/php-83 org.opencontainers.image.revision: ff65cb718897c8cbf24d68ffe1e50cf086618888 release: "1760386068" summary: Platform for building and running PHP 8.3 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=8.3/test/test-app ubi9/php-83 sample-server vcs-ref: ff65cb718897c8cbf24d68ffe1e50cf086618888 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:08:26Z" Id: sha256:4ca50a412a6c1eadd443bf5ee00cca9c5a9750f4d4cc2c0b5221c8dbd61c1813 Size: 338073105 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/php-83@sha256:aabd8f354cad901a56c029dbd62483262f4c435a329882ba5c3f7041c8cc04f8 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:aabd8f354cad901a56c029dbd62483262f4c435a329882ba5c3f7041c8cc04f8 resourceVersion: "14084" uid: 36de5d4e-6134-42ad-a084-82d3a9283898 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e8ae870cf5fcc3cd0cba15ea38346df88f604a80291c11db01739e63b9ccc818 size: 19569683 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=20 - PERL_VERSION=5.26 - PERL_SHORT_VER=526 - NAME=perl - SUMMARY=Platform for building and running Perl 5.26 applications - DESCRIPTION=Perl 5.26 available as container is a base platform for building and running various Perl 5.26 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:32:44Z" com.redhat.component: perl-526-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Perl 5.26 available as container is a base platform for building and running various Perl 5.26 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-perl-container io.buildah.version: 1.41.4 io.k8s.description: Perl 5.26 available as container is a base platform for building and running various Perl 5.26 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. io.k8s.display-name: Apache 2.4 with mod_fcgid and Perl 5.26 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,perl,perl526,perl-526 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/perl-526 org.opencontainers.image.revision: 64887b602551cd69e73f7bb2ec62867ee1860c3d release: "1761841872" summary: Platform for building and running Perl 5.26 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi8/perl-526:latest vcs-ref: 64887b602551cd69e73f7bb2ec62867ee1860c3d vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:33:04Z" Id: sha256:58a046cf8359a65ef865458801bb5894f3140b951e0095fbb78e06ea795ed2d5 Size: 269436903 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/perl-526@sha256:abf2ce8a3db39549eb583fd38851e51a1d34c3448225d0cf1aede35e5e0cccfd kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:abf2ce8a3db39549eb583fd38851e51a1d34c3448225d0cf1aede35e5e0cccfd resourceVersion: "14012" uid: f2c63986-c86c-4b6b-9fe1-9288c71e3494 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ff7f9283a88b236942df38c7ee59d7ff36fff7c14ad453263361be85c2f83924 size: 51636332 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - MYSQL_VERSION=10.5 - MYSQL_SHORT_VERSION=105 - VERSION=10.5 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - NAME=mariadb - SUMMARY=MariaDB 10.5 SQL database server - DESCRIPTION=MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/usr ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-14T05:46:43Z" com.redhat.component: mariadb-105-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. io.k8s.display-name: MariaDB 10.5 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mariadb,mariadb105,mariadb-105 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/mariadb-105 org.opencontainers.image.revision: da3a493ec7749f7b71dafb4985b134df39a6550b release: "1760420779" summary: MariaDB 10.5 SQL database server url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel9/mariadb-105 vcs-ref: da3a493ec7749f7b71dafb4985b134df39a6550b vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-14T05:46:50Z" Id: sha256:214233ff6a1c53648e90d99cf1de3973c486c2f461e61da35dd3ac61df8a738f Size: 148736029 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel9/mariadb-105@sha256:acc20d8b3eb50bef50abda0e44d46d353ba28be950b1685cc8053c977b31eaf6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:acc20d8b3eb50bef50abda0e44d46d353ba28be950b1685cc8053c977b31eaf6 resourceVersion: "13851" uid: 4e33f54a-e28c-4224-b81f-ae19257725b1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c85ac87d44df4b64d7c273886fc5aed55a28422df33dcb641884ffa419db218 size: 76240885 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:51e9f237b750efcda2d5755785cdb8dd080d51585ae35d368e4f9b29a11b1994 size: 1329 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5ccfe6e48f4f71d761b34c61586ac1808cca10bf7e543a3666b802f38625c5a9 size: 4013312 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35875365be086462ee5d275b62cfc13046029a9a084880c18583b932a5b23632 size: 85346475 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:95ba73f94fc8f4c85e9b37f9b95f4f15fda80446024f985171f454b03e194462 size: 15177610 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.5 - JOLOKIA_VERSION=1.6.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 534540aef153 Image: 5f54774f24df8f7779413d55677257eb8d1f2c57e1ddbd52056bb5d13db9a8b3 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-01-27T12:37:34.107857 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "19.1580118028" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.5-19.1580118028 vcs-ref: 26b81f6597d0da63d9f8d1bc0d8f4cd10566a149 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.5 - JOLOKIA_VERSION=1.6.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages - JAVACONFDIRS=/opt/rh/rh-maven35/root/etc/java - XDG_CONFIG_DIRS=/opt/rh/rh-maven35/root/etc/xdg:/etc/xdg - XDG_DATA_DIRS=/opt/rh/rh-maven35/root/usr/share:/usr/local/share:/usr/share ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 534540aef153 Image: sha256:991a18ae1ad710759516c2ab8d5e2f09b20b442cb0ad47ea82db9d1b46455987 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-01-27T12:37:34.107857 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "19.1580118028" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.5-19.1580118028 vcs-ref: 26b81f6597d0da63d9f8d1bc0d8f4cd10566a149 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss Created: "2020-01-27T12:39:42Z" DockerVersion: 1.13.1 Id: sha256:8af4fe9149f32a43d83ebf4f38a44447324e8bf9509bcffb9c6540c19c5af26d Size: 180786295 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:adc5b484c12f915309a95acb71890e4a1a8148d5dadd6cc22d0794cdab81557b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:adc5b484c12f915309a95acb71890e4a1a8148d5dadd6cc22d0794cdab81557b resourceVersion: "14050" uid: 7c1e6225-b5e2-4f1b-821f-c12d1a208467 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:59f5134b8f53d5010d7921666aac69cac4622f8da09fe053bd167d1d0245b1e6 size: 69305150 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NAME=nginx - NGINX_VERSION=1.22 - NGINX_SHORT_VER=122 - VERSION=0 - SUMMARY=Platform for running nginx 1.22 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.22 daemon. The image can be used as a base image for other applications based on nginx 1.22 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T20:13:40Z" com.redhat.component: nginx-122-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.22 daemon. The image can be used as a base image for other applications based on nginx 1.22 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.22 daemon. The image can be used as a base image for other applications based on nginx 1.22 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.22 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-122 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/nginx-122 org.opencontainers.image.revision: 59f8de1161ae9544103554406bee3514cad28e67 release: "1761077586" summary: Platform for running nginx 1.22 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi8/nginx-122:latest vcs-ref: 59f8de1161ae9544103554406bee3514cad28e67 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T20:13:59Z" Id: sha256:eab4b30b2ffa298e34fba65ad21d6c5088a8e3712036e47f9c9d58aa4fcb4441 Size: 164864869 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/nginx-122@sha256:ade3b81041a336ef1a37f3c52f85fc0b92bd62b76f304a86d262770110e3fbab kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:ade3b81041a336ef1a37f3c52f85fc0b92bd62b76f304a86d262770110e3fbab resourceVersion: "14056" uid: 370a2e2b-9302-4661-b3e6-cc68a75ead42 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c43687042a41aad69fc526985ef2b82012c011db7e0e26faba4fc860ad32d88e size: 75837780 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b7b014ba1b80abb29391141385bd32668571313647317d1d64d8b5cebb1f228 size: 1331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5dcc89faa7e0fc98d4ed9da03b2e3498f33372bace47228179ed794bc10ed474 size: 273870240 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_DATAGRID_VERSION=7.3.1.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.1 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - PRODUCT_VERSION=7.3.1.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: 8ea581222c6a Image: f68aa65ca7049d78d71065e100f917db5ba6547f5bb153c50eadd1db2813ea52 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T16:19:09.007404 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.1.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.3.1.GA release: "1.1561730992" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.1-1.1561730992 vcs-ref: 6d070f0036864b7609858c5bb9549f4d7c78d827 vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_DATAGRID_VERSION=7.3.1.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.1 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - JOLOKIA_VERSION=1.5.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - MAVEN_VERSION=3.5 - PRODUCT_VERSION=7.3.1.GA - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: 8ea581222c6a Image: sha256:b2a19f9545b2b0b3129158e46b3cdbfc293ad8546ed037ab87d11f79e8c4ec6e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T16:19:09.007404 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 2.2.7 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.concrt.version: 2.2.7 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.1.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.3.1.GA release: "1.1561730992" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.1-1.1561730992 vcs-ref: 6d070f0036864b7609858c5bb9549f4d7c78d827 vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss Created: "2019-06-28T16:24:24Z" DockerVersion: 1.13.1 Id: sha256:611247135e7a777add961048b86506b83c6794ccaf096ae8e39b4440178af485 Size: 349717412 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:ae7c07fccaaec3ad4a83a2309893b03e94010b2d046de8c38e3d5af45366f84c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:ae7c07fccaaec3ad4a83a2309893b03e94010b2d046de8c38e3d5af45366f84c resourceVersion: "13886" uid: ec5f5192-a8fb-4f15-9ebf-1f03c7234733 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:af063699af1c142fce6707dc9306d122355e61bd23ded0d18f8a4ecfbf3aa89a size: 78847792 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:c81d6d556e6e3a4255dd2709ce18578bfbbf3eed10a4efb966bf99ab69c79e05 size: 9405288 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:d9e405c2a1a004feffa67c8e53dc0965b77b9c67d91626366ee9e53dd24e3de4 size: 199332407 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:7a55cac5354fba5a1f03b73b3034cf29119ae01c24d39e990271dcfbba9988e8 size: 27645571 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - VERSION=10 - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el10 - NODEJS_VER=22 - PERL_VERSION=5.40 - PERL_SHORT_VER=540 - NAME=perl - SUMMARY=Platform for building and running Perl 5.40 applications - DESCRIPTION=Perl 5.40 available as container is a base platform for building and running various Perl 5.40 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-29T04:09:14Z" com.redhat.component: perl-540-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Perl 5.40 available as container is a base platform for building and running various Perl 5.40 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-perl-container io.buildah.version: 1.41.4 io.k8s.description: Perl 5.40 available as container is a base platform for building and running various Perl 5.40 applications and frameworks. Perl is a high-level programming language with roots in C, sed, awk and shell scripting. Perl is good at handling processes and files, and is especially good at handling text. Perl's hallmarks are practicality and efficiency. While it is used to do a lot of different things, Perl's most common applications are system administration utilities and web programming. io.k8s.display-name: Apache 2.4 with mod_fcgid and Perl 5.40 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,perl,perl540,perl-540 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi10/perl-540 org.opencontainers.image.revision: 7d7b85e40a84102741c1b0a2a39e850e64c82377 release: "1761710868" summary: Platform for building and running Perl 5.40 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi10/perl-540 vcs-ref: 7d7b85e40a84102741c1b0a2a39e850e64c82377 vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-29T04:09:21Z" Id: sha256:51774f0232204c84d437a2dc118f8021a5c917d49efd196372a749b65cc09106 Size: 315250419 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/perl-540@sha256:af6079ab6f381f2f8eb7175c9bacb93d0c72ff022e97f28520c97b1633b109e2 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:af6079ab6f381f2f8eb7175c9bacb93d0c72ff022e97f28520c97b1633b109e2 resourceVersion: "14014" uid: 725cfa36-aa71-4a1c-a1d1-afc4035d2d46 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bbdea0ea4cfcb830755d69ff087eb173b3cd6ce711fc4266094beb10942aa814 size: 7511919 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-redis Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - REDIS_VERSION=6 - HOME=/var/lib/redis - SUMMARY=Redis in-memory data structure store, used as database, cache and message broker - DESCRIPTION=Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/redis - REDIS_PREFIX=/usr - REDIS_CONF=/etc/redis.conf ExposedPorts: 6379/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-29T20:21:51Z" com.redhat.component: redis-6-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. io.k8s.display-name: Redis 6 io.openshift.expose-services: 6379:redis io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,redis,redis6,redis-6 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/redis-6 org.opencontainers.image.revision: 80c0f0e5463c2391b7f40881d425988852ba24af release: "1761769259" summary: Redis in-memory data structure store, used as database, cache and message broker url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name redis_database -p 6379:6379 rhel8/redis-6 vcs-ref: 80c0f0e5463c2391b7f40881d425988852ba24af vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" Volumes: /var/lib/redis/data: {} WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-29T20:21:58Z" Id: sha256:540836c38bf0d9bee27b45bb7e26784cb5ccea590191ede24b07ea46a6fa3256 Size: 103070929 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/redis-6@sha256:af676d250bb4ced265fece19dbb847133717e18341777df3a57550f53f6207cb kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:af676d250bb4ced265fece19dbb847133717e18341777df3a57550f53f6207cb resourceVersion: "14070" uid: 21bfeb15-362c-46c3-ba5b-5d13ac999927 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:751bf1af528874dba437014af54078013e46b2eca91e82aab200d452b0165af9 size: 76529550 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:71bd5e95c80acea5839e4c515a585f43158bffd718c2be1795b4825b043975a3 size: 1565 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e702d958f3035edf60c80c93b45f1a170c2160ae9a580d59267584434c1ae1a2 size: 114191748 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.11 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 1c8f39c8f8b8 Image: e4929ae9a5fbdb97b858381fba014fcb191cf1723a49766f613f834e7f121f31 Labels: architecture: x86_64 build-date: 2022-03-28T13:32:20.449707 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1648472919" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.11-1.1648472919 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: a224a5bd86d644ba003811652b2ace5d3fc217aa vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.11 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 1c8f39c8f8b8 Image: sha256:f699d571b436296fdb1b633cec0b4061b1782920263560dbfe8d653ff1f8fa7d Labels: architecture: x86_64 build-date: 2022-03-28T13:32:20.449707 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1648472919" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.11-1.1648472919 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: a224a5bd86d644ba003811652b2ace5d3fc217aa vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T13:36:44Z" DockerVersion: 1.13.1 Id: sha256:016ab3c8b22634c1850dd1dd0edb10113e0a08de60126436768f03f4f0fffcc0 Size: 190730265 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:af9c08644ca057d83ef4b7d8de1489f01c5a52ff8670133b8a09162831b7fb34 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:af9c08644ca057d83ef4b7d8de1489f01c5a52ff8670133b8a09162831b7fb34 resourceVersion: "14100" uid: 87da1a13-79ff-4530-a66a-2f95be7ce08e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:71104d043b69aa31878ef66c6d1f40dbbd9f37c12bdec9e915d592a38c07903d size: 76550480 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fde28b70fde89c673900ca961a8c0abf6a0e6c405de070ed53aba447d6678093 size: 1565 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ad3a5abe8fcd7caca242109ea38318432cbaff8fb6b8e3cd1962d631c7b1e9be size: 130952044 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.12 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 57b2f60fd930 Image: ecfd0fb8d1633ddd2d2d1149949d32265f14f0b9674fbcf25dc55b0a5afb77bf Labels: architecture: x86_64 build-date: 2022-08-24T22:41:49.577295 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1661378019" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.12-1.1661378019 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: ca8b57a053f33ff9aa8c3ba9e045a18f62d9a21d vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.12 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 57b2f60fd930 Image: sha256:a361be97d9ff5e76f35f122a97cfbb500e5789603df2b3758a107cbb37519442 Labels: architecture: x86_64 build-date: 2022-08-24T22:41:49.577295 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1661378019" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.12-1.1661378019 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: ca8b57a053f33ff9aa8c3ba9e045a18f62d9a21d vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-08-24T22:47:13Z" DockerVersion: 1.13.1 Id: sha256:a338d26dd42dd15057389d95bb79f53dc9a51880e38a8237ce6b6b680c85fd3c Size: 207511491 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/openjdk/openjdk-11-rhel7@sha256:b053401886c06581d3c296855525cc13e0613100a596ed007bb69d5f8e972346 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:b053401886c06581d3c296855525cc13e0613100a596ed007bb69d5f8e972346 resourceVersion: "14101" uid: d1ec9b2e-83c5-4a70-acec-c69083610127 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d4bfe8be4e0654a9ecfb806061b5a762d86293abd82aa45c817cb070ddb5e085 size: 90090661 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.15 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-21T06:46:01 com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "1.1682059521" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.15-1.1682059521 vcs-ref: 0fbf133aeed4118c485646cc3101c8b3df8c301d vcs-type: git vendor: Red Hat, Inc. version: "1.15" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-21T06:58:19Z" Id: sha256:cb4d904bda259d7a9bc535393ce1a24ce92ce04a61cf11723ea85cfbf58e27d9 Size: 129377201 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:b163564be6ed5b80816e61a4ee31e42f42dbbf345253daac10ecc9fadf31baa3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:b163564be6ed5b80816e61a4ee31e42f42dbbf345253daac10ecc9fadf31baa3 resourceVersion: "14192" uid: b6e6b291-0209-471e-9c56-ae8b05771378 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c253a5224a10e39430451b18c897d0006e39de12e5730a7f76402fa6e252a839 size: 72342076 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/bin/run-httpd Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - HTTPD_VERSION=2.4 - SUMMARY=Platform for running Apache httpd 2.4 or building httpd-based application - DESCRIPTION=Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. - HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ - HTTPD_APP_ROOT=/opt/app-root - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/httpd.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_MODULES_D_PATH=/etc/httpd/conf.modules.d - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_TLS_CERT_PATH=/etc/httpd/tls - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_LOG_PATH=/var/log/httpd ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T20:13:41Z" com.redhat.component: httpd-24-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Apache httpd 2.4 available as container, is a powerful, efficient, and extensible web server. Apache supports a variety of features, many implemented as compiled modules which extend the core functionality. These can range from server-side programming language support to authentication schemes. Virtual hosting allows one Apache installation to serve many different Web sites. io.k8s.display-name: Apache httpd 2.4 io.openshift.expose-services: 8080:http,8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,httpd,httpd-24 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/httpd-24 org.opencontainers.image.revision: f57227a4ba2b888b2f3cb78ff9c0a1f9650ed11a release: "1761077590" summary: Platform for running Apache httpd 2.4 or building httpd-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/httpd-container.git --context-dir=examples/sample-test-app/ rhel8/httpd-24 sample-server vcs-ref: f57227a4ba2b888b2f3cb78ff9c0a1f9650ed11a vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T20:13:57Z" Id: sha256:c2f9d24c33cf2adf540b309202af42f8b7f5631abbb56edbbe4533eef37313ae Size: 167900505 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/httpd-24@sha256:b25e2af772c13ae5ff3339bc4bbdf52c49011e750f40b37a0b736cb82768a349 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:b25e2af772c13ae5ff3339bc4bbdf52c49011e750f40b37a0b736cb82768a349 resourceVersion: "13579" uid: f8686563-e875-4328-964e-1eb1909f8321 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:71104d043b69aa31878ef66c6d1f40dbbd9f37c12bdec9e915d592a38c07903d size: 76550480 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fde28b70fde89c673900ca961a8c0abf6a0e6c405de070ed53aba447d6678093 size: 1565 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3b35f0ac7e52b492dabd9f9844ea2b5437163636fa07884dfc6ac4dfce4ae715 size: 128628837 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.12 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 57b2f60fd930 Image: 3b64936c5fae0217c844f01b11192af76cc16ce72fc9a8b9cec7606beb2fc986 Labels: architecture: x86_64 build-date: 2022-08-24T22:40:56.439104 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1661378017" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.12-1.1661378017 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 023752341349e641b29886492a623cc8af26b7a7 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.12 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 57b2f60fd930 Image: sha256:9d606ca24547a4b4bc19bc738b94db15ca6739c9be9e617db37d2636e5eb07de Labels: architecture: x86_64 build-date: 2022-08-24T22:40:56.439104 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1661378017" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.12-1.1661378017 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 023752341349e641b29886492a623cc8af26b7a7 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-08-24T22:45:36Z" DockerVersion: 1.13.1 Id: sha256:b412ff26e5487d30ee0d188ee5dc3c748a635a378edcea4c834d0535d78d9b6c Size: 205188424 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:b4cb02a4e7cb915b6890d592ed5b4ab67bcef19bf855029c95231f51dd071352 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:b4cb02a4e7cb915b6890d592ed5b4ab67bcef19bf855029c95231f51dd071352 resourceVersion: "14163" uid: f15648c0-6467-4dec-ab74-722969ad7e9a - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a06cfa6e5ed77521218eaa75d023f86e156295cb20de1bda73e67b69c667962c size: 77840791 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a8ed13ed8366237556c3d4c3e812ce913366275ddc02b9daa458e7bb3aea46a4 size: 112317807 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk11-runtime-openshift-rhel8 - JBOSS_IMAGE_VERSION=7.4.23 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api - LAUNCH_JBOSS_IN_BACKGROUND=true - SSO_FORCE_LEGACY_SECURITY=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-07-28T22:21:45 com.redhat.component: jboss-eap-74-openjdk11-runtime-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.license_terms: https://www.redhat.com/agreements description: The JBoss EAP 7.4 OpenJDK 11 runtime image distribution-scope: public io.buildah.version: 1.33.12 io.cekit.version: 4.12.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.k8s.description: Base image to run an EAP server and application io.k8s.display-name: JBoss EAP runtime image io.openshift.expose-services: 8080:http io.openshift.tags: javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap74-openjdk11-runtime-openshift-rhel8 org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "5.1753741009" summary: The JBoss EAP 7.4 OpenJDK 11 runtime image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap74-openjdk11-runtime-openshift-rhel8/images/7.4.23-5.1753741009 vcs-ref: 01ae679e0f6a42f995f880902de2d245eae7d26f vcs-type: git vendor: Red Hat, Inc. version: 7.4.23 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-07-28T22:29:23Z" Id: sha256:aaef104173d392640a16093b0375ab1848ab77c390f057d4432e3cf13ecc14bb Size: 190181406 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap74-openjdk11-runtime-openshift-rhel8@sha256:b52d47d62be6b57d4af722f98b3434016c99c54e04574236b017924046d323c0 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:b52d47d62be6b57d4af722f98b3434016c99c54e04574236b017924046d323c0 resourceVersion: "13298" uid: f420861d-0e16-4593-b55e-442c437ac95d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:603a40d66abe93fd4805fe8f433e5c21fcc2671bde3798f573282e03296d55d3 size: 37049867 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Entrypoint: - /usr/bin/oauth-proxy Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510211040.p2.g6649cb8.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510211040.p2.g6649cb8.assembly.stream.el9-6649cb8 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=golang-github-openshift-oauth-proxy - __doozer_uuid_tag=ose-oauth-proxy-rhel9-v4.20.0-20251021.105557 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=6649cb8 - SOURCE_DATE_EPOCH=1756110665 - SOURCE_GIT_COMMIT=6649cb8a7c4dd7d1c4e03c9b0bc7594089fba991 - SOURCE_GIT_TAG=v4.4-imagestream-1-137-g6649cb8a - SOURCE_GIT_URL=https://github.com/openshift/oauth-proxy Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T12:59:58Z" com.redhat.component: golang-github-openshift-oauth-proxy-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: OpenShift OAuth Proxy. io.k8s.display-name: OpenShift OAuth Proxy io.openshift.build.commit.id: 6649cb8a7c4dd7d1c4e03c9b0bc7594089fba991 io.openshift.build.commit.url: https://github.com/openshift/oauth-proxy/commit/6649cb8a7c4dd7d1c4e03c9b0bc7594089fba991 io.openshift.build.source-location: https://github.com/openshift/oauth-proxy io.openshift.expose-services: "" io.openshift.maintainer.component: apiserver-auth io.openshift.maintainer.project: OCPBUGS io.openshift.tags: oauth maintainer: Red Hat, Inc. name: openshift/ose-oauth-proxy-rhel9 org.opencontainers.image.revision: 48d86f0ebc64bbddc42cb2f3953cd22a470f09ed release: 202510211040.p2.g6649cb8.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: 48d86f0ebc64bbddc42cb2f3953cd22a470f09ed vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T13:00:46Z" Id: sha256:8667c6123d18f3974181dd6bcc4d9a6de908c9f1c9028e62699dc33d19f53304 Size: 175485249 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b77dec59a72e9b6323e6fa2617f588f07518f44d2e9f6aa8f2ccd83d90e40203 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:b77dec59a72e9b6323e6fa2617f588f07518f44d2e9f6aa8f2ccd83d90e40203 resourceVersion: "13845" uid: f6d233f9-1923-4eb6-927d-f2ea2cb4ef30 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea87c7dcd2a872d002f8496004b71abd95b626a9b357c9ecd052d57fab6205b8 size: 96274787 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T10:15:53 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705572797" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.18-2.1705572797 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: b3fd77ac1425a4ff4908049b21b743ab30e063aa vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T10:37:55Z" Id: sha256:2aa213d5224d35c38242ad04f0e1ec5a61a81cd04bc5aaa7f4454122a201e804 Size: 135601584 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:b80a514f136f738736d6bf654dc3258c13b04a819e001dd8a39ef2f7475fd9d9 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:b80a514f136f738736d6bf654dc3258c13b04a819e001dd8a39ef2f7475fd9d9 resourceVersion: "14226" uid: d4d0a17b-169a-48a3-a329-966713d880b0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d7c06497d5cebd39c0a4feb14981ec940b5c863e49903d320f630805b049cbff size: 39279912 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8f863159a00163b597034b832591613ce6016e7029a6351f07d1a500e9cc4b28 size: 107301065 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-02-07T17:21:35 com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1675788284" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.14-14.1675788284 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 8e04a5356a8315ea64fd704987c0014137fb0559 vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-02-07T17:33:06Z" Id: sha256:079f46516ee586b6f5e921c55135ece0457d66cfe60c206cc886c134e7db796f Size: 146610789 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:b838fa18dab68d43a19f0c329c3643850691b8f9915823c4f8d25685eb293a11 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:b838fa18dab68d43a19f0c329c3643850691b8f9915823c4f8d25685eb293a11 resourceVersion: "14211" uid: 8d91fe9b-87fd-49b6-a5bc-c76a0adee45b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:00002eebe0f599331474f07547167ad83154a7a902f19a133af9a6f5e08a1dfa size: 131864212 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T20:22:45 com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705602259" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.18-2.1705602259 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: ca7ddac9f7758dacb1a7a347130d7d8d6ad50922 vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T20:37:30Z" Id: sha256:17d970f7d5f789087e305ec68b8146195107db18aca710ae8c7de6ac91051504 Size: 171202432 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:b85cbdbc289752c91ac7f468cffef916fe9ab01865f3e32cfcc44ccdd633b168 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:b85cbdbc289752c91ac7f468cffef916fe9ab01865f3e32cfcc44ccdd633b168 resourceVersion: "14182" uid: 8c17324b-f890-40a9-b05a-d79a008ebd91 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:daf29424ccde8c00ce2d4a5419f0e0ab1f25086d4fff2ab3760095371c742680 size: 39754259 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:84d37627e4bba0f01c788eafc7c723c1f627da4553b0218b0fd8b4d6a7f947ec size: 62830946 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - '"./${DOTNET_DEFAULT_CMD}"' Env: - container=oci - HOME=/opt/app-root - PATH=/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=8.0.22 - ASPNET_VERSION=8.0.22 ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-12-02T13:30:28Z" com.redhat.component: dotnet-80-runtime-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for running .NET 8 applications distribution-scope: public dotnet_version: 8.0.22 io.buildah.version: 1.41.4 io.k8s.description: Platform for running .NET 8 applications io.k8s.display-name: .NET 8 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: runtime,.net,dotnet,dotnetcore,dotnet80-runtime io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-80-runtime org.opencontainers.image.created: "2025-12-02T13:30:28Z" org.opencontainers.image.revision: 77738c1f6e91ee24a008ab9d47f062bd401cf98f release: "1764682148" summary: .NET 8 runtime url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: 77738c1f6e91ee24a008ab9d47f062bd401cf98f vcs-type: git vendor: Red Hat, Inc. version: "8.0" User: "1001" WorkingDir: /opt/app-root/app ContainerConfig: {} Created: "2025-12-02T13:30:38Z" Id: sha256:71db85ab9b48fca2b839daf41e2f94eee174d8b7caee633ac38ce3923ff4876b Size: 102598542 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/dotnet-80-runtime@sha256:b998c58c88dd98365531bacc631dc92deb73de17cd3b6f86466f421c409f8583 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-12-08T17:53:27Z" name: sha256:b998c58c88dd98365531bacc631dc92deb73de17cd3b6f86466f421c409f8583 resourceVersion: "40309" uid: b66fb3da-6de9-4fad-8119-f41f845e3b1c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8aa7c030671f84c4f190806311febadc9d9ba286d9992325e2dca46970eca591 size: 79774934 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:66741bf06f08d8eb2f9c74cd995a0da2dafe94e6a770a4d7cdaf66b3d7cd9036 size: 422978473 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:34a590fa7e41e139b2ea3b81c70b50da6c11fbd3ab3a414038e98d20ce38db26 size: 1153561491 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.13 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.13 - WILDFLY_VERSION=7.4.13.GA-redhat-00001 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.9.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - S2I_FP_VERSION=23.0.0.Final - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.4.13 - LANG=C.utf8 - JOLOKIA_VERSION=1.7.2.redhat-00002 - WILDFLY_CAMEL_VERSION=5.10.0.fuse-7_12_1-00009-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-11-07T10:26:31 com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.4 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.2.redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.4 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Thomas Diesler name: fuse7/fuse-eap-openshift-jdk8-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.13 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.4.13 release: "33" summary: Platform for building and running Apache Camel applications on EAP 7.4 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift-jdk8-rhel7/images/1.12-33 vcs-ref: b52098af4a5dd2f87cb51b4f0fac33d2ec7f0c68 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-11-07T10:30:23Z" Id: sha256:fc5c81041b0b8b9b3b0e5511873397a44b77ae5f0ac04cc9f693e17ec66d233c Size: 1656368397 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift-jdk8-rhel7@sha256:ba09ffc90313f71c748e38a6b7c68b20f4e42945eb88349a6596dea61c517637 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:ba09ffc90313f71c748e38a6b7c68b20f4e42945eb88349a6596dea61c517637 resourceVersion: "14025" uid: b30f4618-e380-4e41-8f7c-d14b4ac292d1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8d9c78c7f9887170d08c57ec73b21e469b4120682a2e82883217535294878c5d size: 3805344 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f4350d5126d0895bb50c2c082a415ff417578d34508a0ef07ec20cebf661ebb7 size: 70368140 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f14ceb8ccc89fe79eddcedf96ffffcc68e8b376a7bfee81da406c8825cabb254 size: 30912001 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.1 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: 6d7d8134d45d36ea1654ff54f10affaef7a20322ab846455d3f6881dd0890204 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T21:54:48.549067 com.redhat.build-host: cpt-0009.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.1.4 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-java-openshift org.concrt.version: 2.1.4 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "4.1539812368" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.1-4.1539812368 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 68c37e19a2e9f85e503652418dfa19c2d2f71349 vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift - FUSE_JAVA_IMAGE_VERSION=1.1 - JOLOKIA_VERSION=1.5.0.redhat-1 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: sha256:27fd2d68b6c16f4c29e862ed42b06417d370f20e4a4e96b9d8a7271f3fe0db51 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T21:54:48.549067 com.redhat.build-host: cpt-0009.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 2.1.4 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-java-openshift org.concrt.version: 2.1.4 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "4.1539812368" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift/images/1.1-4.1539812368 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 68c37e19a2e9f85e503652418dfa19c2d2f71349 vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss Created: "2018-10-17T21:57:10Z" DockerVersion: 1.13.1 Id: sha256:1398678780b6a8488a7407b7fc5f2c6a784b4212b682a3fe2911125ed95fb147 Size: 180016684 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift@sha256:ba0bb0b1b9bed00d24bd73b59b6a3f7a46714ba1c0e1b900572dc580eedde68c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:ba0bb0b1b9bed00d24bd73b59b6a3f7a46714ba1c0e1b900572dc580eedde68c resourceVersion: "14031" uid: 4db4407d-e010-4d0d-9812-5be98937692d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:79c20e727b94ea36ae8776eb9e4065b60dc1d396564a6a91ebb6ee334dfb5cea size: 79001473 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bf8eef1d7ebdf53d35a677908dd07dc73e7afa302fa4b3375a7729d92640097b size: 470350057 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_EAP_VERSION=7.4.21 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap-xp - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=4.0.2 - WILDFLY_VERSION=4.0.2.GA-redhat-00010 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.9.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - S2I_FP_VERSION=4.0.0.Final - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - OFFLINER_VERSION=1.6 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap-xp4-openjdk11-openshift-rhel8 - JBOSS_IMAGE_VERSION=4.0 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2025-02-05T10:36:55 com.redhat.component: jboss-eap-xp4-openjdk11-builder-openshift-rhel8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform XP 4.0 OpenShift container image. distribution-scope: public io.buildah.version: 1.33.8 io.cekit.version: 4.12.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running JavaEE applications on JBoss EAP XP 4.0 io.k8s.display-name: JBoss EAP XP io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,eap-xp maintainer: Red Hat name: jboss-eap-7/eap-xp4-openjdk11-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap-xp org.jboss.product.eap.version: 7.4.21 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 4.0.2 release: "83" summary: Red Hat JBoss Enterprise Application Platform XP 4.0 OpenShift container image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap-xp4-openjdk11-openshift-rhel8/images/4.0-83 vcs-ref: abc2234f37ee2dac02e16a7e65775cba8bbf8e40 vcs-type: git vendor: Red Hat, Inc. version: "4.0" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-02-05T10:46:22Z" Id: sha256:8230c513a7a2ac6b05fc08213144ce749631e57ee94bb872d761fcff7d65ab36 Size: 549416678 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap-xp4-openjdk11-openshift-rhel8@sha256:babbc5613f8ad380b2b85564d74d1edfbb345a9481fa3c1891980bb75169c079 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:babbc5613f8ad380b2b85564d74d1edfbb345a9481fa3c1891980bb75169c079 resourceVersion: "13346" uid: 292eb814-18c6-45d9-bc75-750692de4cf9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b3f8e70833f9e7ef0aa29948526b214496524ca42b8de4326fe5720275dbc259 size: 49612025 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - container=oci - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - NPM_RUN=start - PLATFORM=el8 - NODEJS_VERSION=22 - NAME=nodejs - SUMMARY=Minimal image for running Node.js 22 applications - DESCRIPTION=Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T16:11:37Z" com.redhat.component: nodejs-22-minimal-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.k8s.description: Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 22 Minimal io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs22 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/nodejs-22-minimal org.opencontainers.image.revision: 036610bcf0884b09cd66a4cf998af0d7d63e7842 release: "1761063072" summary: Minimal image for running Node.js 22 applications url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: 036610bcf0884b09cd66a4cf998af0d7d63e7842 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T16:11:48Z" Id: sha256:1adff3a9f4ac4cf97c5cded69314721442c670cd69767d11a303d2ac50fa6ed6 Size: 89357843 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/nodejs-22-minimal@sha256:bae3cc0e61e2d73c8dd1384cfa97c6e875ff4a5da7f6332075eb5cd95bc90a7c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:bae3cc0e61e2d73c8dd1384cfa97c6e875ff4a5da7f6332075eb5cd95bc90a7c resourceVersion: "14091" uid: 849487c3-a5cc-4fdc-9d17-1232da4612df - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a33df5fac14bd7628194e14e695a825432b6aa4d9c795c83e3229ea9ef2f6d44 size: 170473264 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a44a7d80cb9334975310d04287283937a5384129d34cb2196e8da1289234ed03 size: 5896388 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift-jdk11-rhel8 - FUSE_JAVA_IMAGE_VERSION=1.11 - JOLOKIA_VERSION=1.7.1.redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-19T14:53:12 com.redhat.component: fuse-java-openshift-jdk11-rhel-8-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift-jdk11-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "46.1687184935" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift-jdk11-rhel8/images/1.11-46.1687184935 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 3c42e10f87d022df6d119889367e08bf6ca02870 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-19T14:55:47Z" Id: sha256:ca576f3f73d2ec54d1a5a8c8d11da3a2ac3b3f92106caa72a9648a0eac042579 Size: 215678711 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift-jdk11-rhel8@sha256:bc7fd6202c086ba5dec4dc27c888b96505ffbd37d1e124cd4abb72bd13a8f237 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:bc7fd6202c086ba5dec4dc27c888b96505ffbd37d1e124cd4abb72bd13a8f237 resourceVersion: "13357" uid: 483c08ce-77e4-479b-99d9-dca42620545c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a450a68cb6f27c66e8e7eb103f60d8c45ece038e05a5296aded6ab51b9792a0b size: 135499428 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T10:37:37 com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705573234" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.18-2.1705573234 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: 3e770bd714724a77d062c8bd4113bbc2a69f1024 vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T10:49:24Z" Id: sha256:a46c965d1c38df2fa4417ddccafa0c1d63e071ffc1892c44be25e35b793c7dde Size: 174835100 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:bcb0e15cc9d2d3449f0b1acac7b0275035a80e1b3b835391b5464f7bf4553b89 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:bcb0e15cc9d2d3449f0b1acac7b0275035a80e1b3b835391b5464f7bf4553b89 resourceVersion: "14149" uid: 782357b6-85ce-40c7-89a5-3bbba40c1af4 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5396332cda6c9dfb73ed1971c8a7f1db24cb7f627c2533281db28851b94d1aae size: 142828242 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - MYSQL_VERSION=8.0 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MySQL 8.0 SQL database server - DESCRIPTION=MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/usr ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T20:14:21Z" com.redhat.component: mysql-80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. io.k8s.display-name: MySQL 8.0 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mysql80,mysql-80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/mysql-80 org.opencontainers.image.revision: 8d1c59058e8eb28303db2dc941e91618c91d5078 release: "1761077583" summary: MySQL 8.0 SQL database server url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel8/mysql-80 vcs-ref: 8d1c59058e8eb28303db2dc941e91618c91d5078 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T20:14:47Z" Id: sha256:749185756a4fa7efa62e641706ba2af7f38fae9706189ba10ee5fbf9dd431c40 Size: 238386021 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/mysql-80@sha256:be2c749e20118e80b11526eb52993e2055e035b211b2621dea9094dd4dcd9446 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:be2c749e20118e80b11526eb52993e2055e035b211b2621dea9094dd4dcd9446 resourceVersion: "13782" uid: b0f29749-c87b-4d06-add2-1f714e217a16 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0eeb656bc1e64b6c3ba63f2fa9450feaef3c60159d48eb2171ad1f25f5e655d9 size: 3805266 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:77187f02cbeff1e8de9cdd8e850f300e7267ddf19991dcbc588a498c14df3ff0 size: 70351735 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e5ce1c514681e586df1d1bc14ebc3e37b9ef2164202b54267be700e3fd445fa1 size: 15174769 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Dhiraj Bokde Config: Cmd: - usage Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.0 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-000280-redhat-2 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: 3320f15730e3d95d98b084a00d040f8e97053ec1aa0858da207a0f4b332871b8 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T18:19:12.507455 com.redhat.build-host: osbs-cpt-003.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-000280-redhat-2 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Cloud Enablement Feedback name: fuse7/fuse-karaf-openshift org.concrt.version: 2.0.0 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "13.1533127989" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.0-13.1533127989 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 01db3ab4a812607ef299e01de1ad666c75d170be vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.0 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-000280-redhat-2 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: sha256:d36ff9014d3950ec0cd231dffbb7b8506923bad04203505d38e32cdd5891593d Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T18:19:12.507455 com.redhat.build-host: osbs-cpt-003.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-000280-redhat-2 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Cloud Enablement Feedback name: fuse7/fuse-karaf-openshift org.concrt.version: 2.0.0 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "13.1533127989" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.0-13.1533127989 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 01db3ab4a812607ef299e01de1ad666c75d170be vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss Created: "2018-08-01T18:20:49Z" DockerVersion: 1.12.6 Id: sha256:02c9b9ef185aade797ca6141ca66a53afa7f6bf70ee0ec336643d0470ebc3042 Size: 164263281 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:be51ee43b1596078a17756f38a0017e9338c902f9094f1ad677844d165a02d43 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:be51ee43b1596078a17756f38a0017e9338c902f9094f1ad677844d165a02d43 resourceVersion: "14016" uid: 8d1449f9-2c88-4129-94ba-f2c60f8f4a40 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:881beec6738623be08c01402ad3ff01db770251227a38829fa2a65f00db2624f size: 127909585 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T10:15:55 com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.8" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705572794" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.18-2.1705572794 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: 48715c823f569ad02c9840e572698f88fe5be83e vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T10:42:18Z" Id: sha256:eb4b532f0cd1dd337cbb3e6cfc2adbc4b92e3af4974b973f2365e8f6065e6978 Size: 167247302 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:bf5e518dba2aa935829d9db88d933a264e54ffbfa80041b41287fd70c1c35ba5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:bf5e518dba2aa935829d9db88d933a264e54ffbfa80041b41287fd70c1c35ba5 resourceVersion: "14215" uid: ff70890b-bc5d-4ac0-9429-9800e042cdfe - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:551849931ba0dec31d2e0b8b4490c168ccf5c5c75215fa094860547b6ae6a94e size: 33442256 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:6db543cc86de251a80f1ff8eb92ee1e9db423350ed5389a1a80bac5820af205e size: 49344347 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - container=oci - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - NPM_RUN=start - PLATFORM=el10 - NODEJS_VERSION=22 - NAME=nodejs - SUMMARY=Minimal image for running Node.js 22 applications - DESCRIPTION=Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-09-24T20:12:57Z" com.redhat.component: nodejs-22-minimal-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.3 io.k8s.description: Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 22 Micro io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs22 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nodejs-22-minimal org.opencontainers.image.revision: dc5f2425bbb71d01f4e07ce058f5ec5140ce7bed release: "1758744753" summary: Minimal image for running Node.js 22 applications url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: dc5f2425bbb71d01f4e07ce058f5ec5140ce7bed vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-09-24T20:13:01Z" Id: sha256:39375a53734dc6b989c9dee3d76dc6865790d8bfc8837bc79e27ba77a791ae50 Size: 82798859 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/nodejs-22-minimal@sha256:c1360b136eae3f3e6eab6e446d6d7b78c53e829a61e2a0b802d55c7ba134ca0c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:c1360b136eae3f3e6eab6e446d6d7b78c53e829a61e2a0b802d55c7ba134ca0c resourceVersion: "14090" uid: 977c08b5-54a9-4387-a5e2-34a829a67e86 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1f1202c893ce2775c72b2a3f42ac33b25231d16ca978244bb0c6d1453dc1f39e size: 76250035 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:32be9843afa050552a66345576a59497ba7c81c272aa895d67e6e349841714da size: 1320 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9f74ee800fdb2242405ca2ee7e74f612973956d1725766a1f2199339f92b8381 size: 4013823 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c47adc077129316d733e991a2da2c4bf7ec3d93b7836e0b97ddc5885f0e512ba size: 85699059 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e829e5fa91fb38993da6e85432f8dab8bfec2f2414833e0fa76b718d00de53ad size: 15166148 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.6 - JOLOKIA_VERSION=1.6.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.2.6.fuse-760032-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 08c71bdb7e2a Image: 36850ef4cd3173c72395f1aac96aa90a2c066aa2c0f88fc824d7763162843bd5 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-04-21T12:13:12.350188 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.6.fuse-760032-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "18.1587470206" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.6-18.1587470206 vcs-ref: e20dbf2d77a8e6138f22685c91d477de5da0647b vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.6 - JOLOKIA_VERSION=1.6.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.2.6.fuse-760032-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 08c71bdb7e2a Image: sha256:e9d11a3997dd125da1a40ea98732a32a3bfb4a01bf6e49f6cca9bff8a0d935de Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-04-21T12:13:12.350188 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.6.fuse-760032-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "18.1587470206" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.6-18.1587470206 vcs-ref: e20dbf2d77a8e6138f22685c91d477de5da0647b vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss Created: "2020-04-21T12:15:11Z" DockerVersion: 1.13.1 Id: sha256:42fef5b8761737fa5ed970d007f015bf7d0ae6a33a9f3c9f960618fd09c7ca6e Size: 181136962 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:c5f7af4c8188bf4619eeaa0f20094bbf5fbfbd824c973ee7da722a48d67300a9 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:c5f7af4c8188bf4619eeaa0f20094bbf5fbfbd824c973ee7da722a48d67300a9 resourceVersion: "14036" uid: 78809bc7-f2ae-4929-8bf5-4d9ea2b1f3e1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea092d7970b26c24007a670fc6d0810dbf9531dc0d3a9d6ea514134ba5686724 size: 7541063 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d67269c45d19cfd63d3f53259d7d133172dc412b125cb18bca8908ec6076db2f size: 98202646 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - MYSQL_VERSION=8.0 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MySQL 8.0 SQL database server - DESCRIPTION=MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/opt/rh/rh-mysql80/root/usr - ENABLED_COLLECTIONS=rh-mysql80 - BASH_ENV=/usr/share/container-scripts/mysql/scl_enable - ENV=/usr/share/container-scripts/mysql/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/mysql/scl_enable ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-30T09:34:55 com.redhat.component: rh-mysql80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: MySQL is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MySQL mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MySQL databases on behalf of the clients. io.k8s.display-name: MySQL 8.0 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mysql80,rh-mysql80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/mysql-80-rhel7 release: "169" summary: MySQL 8.0 SQL database server url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/mysql-80-rhel7/images/8.0-169 usage: docker run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhscl/mysql-80-rhel7 vcs-ref: ac5e8692458a3f710e4eb220961251f49bd6aaed vcs-type: git vendor: Red Hat, Inc. version: "8.0" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-05-30T09:38:25Z" Id: sha256:2249d8cbe7e4d466373623d1225bab06fa658fb88b54665be29d6c947a59ff8a Size: 185768272 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/mysql-80-rhel7@sha256:c6edc9920c6038890d77d1daa135b7ae4a5a7fe2213b168dbc12311de0445791 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:c6edc9920c6038890d77d1daa135b7ae4a5a7fe2213b168dbc12311de0445791 resourceVersion: "13781" uid: 92efcc5c-c595-4fca-ba33-ba866e2bd0eb - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b10a2aeb4cf985c4be41816387a0bd677b81d034bb43163651162de8ea98c654 size: 7732234 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-redis Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - REDIS_VERSION=7 - HOME=/var/lib/redis - SUMMARY=Redis in-memory data structure store, used as database, cache and message broker - DESCRIPTION=Redis 7 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/redis - REDIS_PREFIX=/usr - REDIS_CONF=/etc/redis/redis.conf ExposedPorts: 6379/tcp: {} Labels: architecture: x86_64 build-date: "2025-11-03T00:14:42Z" com.redhat.component: redis-7-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Redis 7 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Redis 7 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. io.k8s.display-name: Redis 7 io.openshift.expose-services: 6379:redis io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,redis,redis7,redis-7 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/redis-7 org.opencontainers.image.revision: 330dc8fd9187f2fe8a1334ee7a7aabea889f86e8 release: "1762128860" summary: Redis in-memory data structure store, used as database, cache and message broker url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name redis_database -p 6379:6379 rhel9/redis-7 vcs-ref: 330dc8fd9187f2fe8a1334ee7a7aabea889f86e8 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" Volumes: /var/lib/redis/data: {} WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-11-03T00:14:53Z" Id: sha256:eab2e91ca850a59599f1c0ace5a0f8e9d2637f02512fd5a15135d6914623649a Size: 104833093 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel9/redis-7@sha256:c9134d992b1ee68be3450debd70f96b9b353fec15fca331afcf7adb26a7f2f09 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:c9134d992b1ee68be3450debd70f96b9b353fec15fca331afcf7adb26a7f2f09 resourceVersion: "14072" uid: ce22dffc-b4b1-4593-9b40-86bc4c8cc43f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:06f86e50a0b74ff9eb161a7d781228877c90e8ff57e9689e8cb8b0f092a2a9f9 size: 39268171 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1500af7621666bdae42658c39c0dc66ea092cda488d5e24241f2d81d1ad8afe1 size: 166780168 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a83f74eef045dca9d4bd2b650fda62543999849f80d4ab4a8bcda33a19998746 size: 5902520 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift-rhel8 - FUSE_KARAF_IMAGE_VERSION=1.11 - JOLOKIA_VERSION=1.7.1.redhat-00001 - KARAF_FRAMEWORK_VERSION=4.2.15.fuse-7_11_1-00017-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-19T14:50:43 com.redhat.component: fuse-karaf-openshift-rhel-8-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.karaf: 4.2.15.fuse-7_11_1-00017-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "37.1687184687" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift-rhel8/images/1.11-37.1687184687 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 01516c4a1897f4dc6761d0738897971a9dca1d74 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-19T14:54:20Z" Id: sha256:c3b90c523bb27ad0b6651583718da08fb31a7310141f311e65a7a250fc39f5e8 Size: 211991770 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift-rhel8@sha256:c93f8f7e2d9522be4952071044457efde0a6c6fd59bd9adcd01e07d164d8235c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:c93f8f7e2d9522be4952071044457efde0a6c6fd59bd9adcd01e07d164d8235c resourceVersion: "14021" uid: 771e734c-1830-46d1-8a11-c153ab58ad9c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:00f17e0b37b0515380a4aece3cb72086c0356fc780ef4526f75476bea36a2c8b size: 76243402 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:305d73a95c8fece2b53a34e040df1c97eb6b7f7cc4e0a7933465f0b7325e3d72 size: 1329 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6b6fe63f06045c5597f7f548d6d4a7f27873232f1df1329a8777854db2d53877 size: 96601064 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.7 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 9d3d66a8bfcc Image: 6f334e5707aa88cd74baf5f1191a984b6ab9652184956460a93fa570097dec2e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-12-10T17:45:20.904669 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.5.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "5.1575996300" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.7-5.1575996300 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: c47a11f2fd7bf261b6db74ff9bc60b96e7b1b340 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.7 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 9d3d66a8bfcc Image: sha256:314dd01fece012f6910bfedbd9c1144669c4c26706e28fe98a2550a030a28dba Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-12-10T17:45:20.904669 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.5.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "5.1575996300" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.7-5.1575996300 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: c47a11f2fd7bf261b6db74ff9bc60b96e7b1b340 vcs-type: git vendor: Red Hat, Inc. version: "1.7" User: "185" WorkingDir: /home/jboss Created: "2019-12-10T17:50:03Z" DockerVersion: 1.13.1 Id: sha256:cac978dfd8347708b1aeb7a2e4b0200b78e6838a6920fabfbf2f26a608c58d6d Size: 172853383 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:caba895933209aa9a4f3121f9ec8e5e8013398ab4f72bd3ff255227aad8d2c3e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:caba895933209aa9a4f3121f9ec8e5e8013398ab4f72bd3ff255227aad8d2c3e resourceVersion: "14176" uid: 3c74d758-90f3-4950-ae45-073e3886b95b - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ea092d7970b26c24007a670fc6d0810dbf9531dc0d3a9d6ea514134ba5686724 size: 7541063 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:eea016ca58c4fab7ad66cb94edce0f1dac9aa75ca3c0870b686985ef13b3f139 size: 8177824 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-redis Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - REDIS_VERSION=6 - HOME=/var/lib/redis - SUMMARY=Redis in-memory data structure store, used as database, cache and message broker - DESCRIPTION=Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/redis - REDIS_PREFIX=/opt/rh/rh-redis6/root/usr - ENABLED_COLLECTIONS=rh-redis6 - REDIS_CONF=/etc/redis.conf - BASH_ENV=/usr/share/container-scripts/redis/scl_enable - ENV=/usr/share/container-scripts/redis/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/redis/scl_enable ExposedPorts: 6379/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-30T09:35:38 com.redhat.component: rh-redis6-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. io.k8s.display-name: Redis 6 io.openshift.expose-services: 6379:redis io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,redis,redis6,rh-redis6 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/redis-6-rhel7 release: "71" summary: Redis in-memory data structure store, used as database, cache and message broker url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/redis-6-rhel7/images/6-71 usage: podman run -d --name redis_database -p 6379:6379 rhscl/redis-6-rhel7 vcs-ref: 586a73398776013846e6cc6c4bc2660c113c7b3d vcs-type: git vendor: Red Hat, Inc. version: "6" User: "1001" Volumes: /var/lib/redis/data: {} WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-05-30T09:37:23Z" Id: sha256:fa2824ab06c9166639a8c0857f4c15dd3bb3f441376297cd7f8b354482534319 Size: 95744613 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/redis-6-rhel7@sha256:cbc31c1b0625cb01f2b67d83e7b375d08d6aa756f24cf32fc3e82f0b76a4c976 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:cbc31c1b0625cb01f2b67d83e7b375d08d6aa756f24cf32fc3e82f0b76a4c976 resourceVersion: "14069" uid: 4f504656-8e3f-4bfa-bebb-e95ee26d88e5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bd3de4798eadff692d5419649a6ba8eca9914d6fdb51d150d669643645898104 size: 36581100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8c19addc28dd2002288fa9a90333e5048784868aeb080286355c2d6731715d4a size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:59f7a530b4030b4e0863fd682a8037648ffae5246a8b56b236c01e25f9526ec0 size: 177830421 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:63f28e9cbf4e4f467de5e09f5230baac60604301b4c85c45f67ceca80b7acb8a size: 796867 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1.redhat-00001 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift-rhel8 - FUSE_JAVA_IMAGE_VERSION=1.10 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: 877ccd18b302 Image: 4d7821d2c772ca5c7991d6ee11d1adffdb058f4d96a7eb435266bced2399fe5d Labels: architecture: x86_64 build-date: 2022-06-15T19:29:11.010231 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-rhel-8-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "25.1655314763" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift-rhel8/images/1.10-25.1655314763 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 43a7424b0412e1f017e02872d779b2560450cd15 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1.redhat-00001 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift-rhel8 - FUSE_JAVA_IMAGE_VERSION=1.10 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: 877ccd18b302 Image: sha256:a4774b2e9b5c153470c333e2ddea57230634857bb59f65e069a0d065251f9813 Labels: architecture: x86_64 build-date: 2022-06-15T19:29:11.010231 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-java-openshift-rhel-8-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "25.1655314763" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift-rhel8/images/1.10-25.1655314763 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: 43a7424b0412e1f017e02872d779b2560450cd15 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T19:29:48Z" DockerVersion: 1.13.1 Id: sha256:8245537b67f8092bc57dc6cd9d563b8f43213fcaec511a21c2c9c663c0b820bd Size: 215218543 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift-rhel8@sha256:cc6290111b86982db2789ed392d004c2c48ff7e5407a82bbeaa6d9ceb8a7963f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:cc6290111b86982db2789ed392d004c2c48ff7e5407a82bbeaa6d9ceb8a7963f resourceVersion: "14035" uid: 9637a78d-2d44-4cce-9c75-a90ad47179d6 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9e23b64ace00a199db21d302292b434e9d3956d79319d958ecc19603d00c946 size: 39622437 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:38b71301a1d9df24c98b5a5ee8515404f42c929003ad8b13ab83d2de7de34dec size: 1742 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:439121684ff1f1dbfe8da567fa15a71a40d2149fd6fbc110f6445a3e55145b5d size: 117810118 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 02d87ca75d31 Image: 5a41a6a038b563327a70e646ddb8b620257e97900589766d84e857455f8bf41f Labels: architecture: x86_64 build-date: 2022-03-28T09:45:03.388815 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "2.1648459569" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.11-2.1648459569 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: cc09248411a10e536e6b541f2ce7f8d409fef523 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 02d87ca75d31 Image: sha256:39e6e04e3d15579f8e1b73a1e60ef2000d912c47d499a9394b1fa0167544d464 Labels: architecture: x86_64 build-date: 2022-03-28T09:45:03.388815 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "2.1648459569" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.11-2.1648459569 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: cc09248411a10e536e6b541f2ce7f8d409fef523 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T09:51:56Z" DockerVersion: 1.13.1 Id: sha256:90a04ce690841235ca437be771fce7a4ea51a6511aae3906adb68827b860fdd1 Size: 157441797 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:ce5c0becf829aca80734b4caf3ab6b76cb00f7d78f4e39fb136636a764dea7f6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:ce5c0becf829aca80734b4caf3ab6b76cb00f7d78f4e39fb136636a764dea7f6 resourceVersion: "14169" uid: e38dae74-8a02-4a53-a92a-7e2b3b0c694e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:af063699af1c142fce6707dc9306d122355e61bd23ded0d18f8a4ecfbf3aa89a size: 78847792 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:c81d6d556e6e3a4255dd2709ce18578bfbbf3eed10a4efb966bf99ab69c79e05 size: 9405288 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:d9e405c2a1a004feffa67c8e53dc0965b77b9c67d91626366ee9e53dd24e3de4 size: 199332407 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:e7ee0b12d498d3af9dcfe16241baaf8e8d0b79d5cb9f8826d719189092b982ff size: 18647985 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - VERSION=10 - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el10 - NODEJS_VER=22 - NAME=s2i-base - RUBY_MAJOR_VERSION=3 - RUBY_MINOR_VERSION=3 - RUBY_VERSION=3.3 - RUBY_SCL_NAME_VERSION=33 - RUBY_SCL=ruby-33 - IMAGE_NAME=ubi10/ruby-33 - SUMMARY=Platform for building and running Ruby 3.3 applications - DESCRIPTION=Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-29T04:09:10Z" com.redhat.component: ruby-33-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Ruby 3.3 available as container is a base platform for building and running various Ruby 3.3 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. io.k8s.display-name: Ruby 3.3 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,ruby,ruby33,ruby-33 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi10/ruby-33 org.opencontainers.image.revision: 0099448b57ea01623d6738ce02d9a166c76a667a release: "1761710926" summary: Platform for building and running Ruby 3.3 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-ruby-container.git --context-dir=3.3/test/puma-test-app/ ubi10/ruby-33 ruby-sample-app vcs-ref: 0099448b57ea01623d6738ce02d9a166c76a667a vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-29T04:09:16Z" Id: sha256:162811fbac5572f40d84e45492457a1c1b9386344f8ec3777f395585120be2c1 Size: 306252278 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/ruby-33@sha256:cf965175e27f89fecae4982ff88f15b5711b60af5215fd9ef5ff1b6f6ddf9bcd kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:cf965175e27f89fecae4982ff88f15b5711b60af5215fd9ef5ff1b6f6ddf9bcd resourceVersion: "14116" uid: 984ffe39-fff8-459d-9605-5bc0b41dc54d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:17942523bc4bb2db6eb9f7519db38bbb70e47356d3f0ae0f15b967c0628234c6 size: 76229428 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4c98734f24339b059854b6f7ad77928ffb6b84756ecd4eeec4a15870b082d906 size: 1283 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bf6c4b8e57dfb8d977021b349c8198d68a042c99fc7a4d799519890dcff81b63 size: 343983498 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.3.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.3 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.3.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: bd4e8b9572dd Image: ba7679b1847ecccb4610537e6ead4e8bb449e2901673ab3576edf0df635d1c25 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-10-16T15:04:47.036161 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.4.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.3.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.3.GA release: "2.1571238163" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.3-2.1571238163 vcs-ref: 78674a771ad66e4d799081bee305010e5d2d6609 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.3.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.3 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.3.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: bd4e8b9572dd Image: sha256:505111cf48572d6f44fd5851cfd6de30f45dae1089a4a09cc1ac49fa56aa2420 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-10-16T15:04:47.036161 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.4.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.3.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.3.GA release: "2.1571238163" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.3-2.1571238163 vcs-ref: 78674a771ad66e4d799081bee305010e5d2d6609 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2019-10-16T15:13:42Z" DockerVersion: 1.13.1 Id: sha256:8676dc6ddc5c3a1757d7d50d5d1e6b1e30e405212f01f066e8437a9943f67901 Size: 420222201 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:cfd8c4ac1c495b766dd3ff1a85c35afe092858f8f65b52a5b044811719482236 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:cfd8c4ac1c495b766dd3ff1a85c35afe092858f8f65b52a5b044811719482236 resourceVersion: "13893" uid: 28662931-803e-40b9-b644-869ecb63eb63 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3f5bd5395b121000ac160ee0f28db065b18174ed7001d8c8dca68dbb9c3fc98a size: 93224067 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el8 - NODEJS_VER=20 - PYTHON_VERSION=3.11 - PATH=/opt/app-root/src/.local/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PYTHONUNBUFFERED=1 - PYTHONIOENCODING=UTF-8 - LC_ALL=en_US.UTF-8 - LANG=en_US.UTF-8 - CNB_STACK_ID=com.redhat.stacks.ubi8-python-311 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - PIP_NO_CACHE_DIR=off - SUMMARY=Platform for building and running Python 3.11 applications - DESCRIPTION=Python 3.11 available as container is a base platform for building and running various Python 3.11 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. - BASH_ENV=/opt/app-root/bin/activate - ENV=/opt/app-root/bin/activate - PROMPT_COMMAND=. /opt/app-root/bin/activate ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T16:35:42Z" com.redhat.component: python-311-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Python 3.11 available as container is a base platform for building and running various Python 3.11 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. distribution-scope: public io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi8-python-311 io.k8s.description: Python 3.11 available as container is a base platform for building and running various Python 3.11 applications and frameworks. Python is an easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming. Python's elegant syntax and dynamic typing, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. io.k8s.display-name: Python 3.11 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,python,python311,python-311,rh-python311 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/python-311 org.opencontainers.image.revision: 86f6ba63ace0416eedaf4a37645e9cc96c402bf8 release: "1761841987" summary: Platform for building and running Python 3.11 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-python-container.git --context-dir=3.11/test/setup-test-app/ ubi8/python-311 python-sample-app vcs-ref: 86f6ba63ace0416eedaf4a37645e9cc96c402bf8 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T16:36:04Z" Id: sha256:c52cfb0a5dc8339fd4fa9096e42836c347a839d38e76925f284eabe3e7f6b763 Size: 343091990 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/python-311@sha256:d09ebdfdd48b8c132bc13df5e20ac3ca00a2a0a25df0b552a6ce575383d71165 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:06Z" name: sha256:d09ebdfdd48b8c132bc13df5e20ac3ca00a2a0a25df0b552a6ce575383d71165 resourceVersion: "14125" uid: 0e8cb77d-1577-451b-95ac-9ba729abc531 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:82c85555fc90687a7373da5a5f6c9d13ace683105995255164339610dc2b5731 size: 22521219 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=20 - PHP_VERSION=8.0 - PHP_VER_SHORT=80 - NAME=php - SUMMARY=Platform for building and running PHP 8.0 applications - DESCRIPTION=PHP 8.0 available as container is a base platform for building and running various PHP 8.0 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - PHP_FPM_CONF_D_PATH=/etc/php-fpm.d - PHP_FPM_CONF_FILE=www.conf - PHP_FPM_RUN_DIR=/run/php-fpm - PHP_CLEAR_ENV=ON - PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf - PHP_FPM_LOG_PATH=/var/log/php-fpm - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-14T05:43:23Z" com.redhat.component: php-80-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: PHP 8.0 available as container is a base platform for building and running various PHP 8.0 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.41.4 io.k8s.description: PHP 8.0 available as container is a base platform for building and running various PHP 8.0 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 8.0 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php80,php-80 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/php-80 org.opencontainers.image.revision: c62fb82ae700fbde24b80ce66c6ea1613291b2a0 release: "1760420580" summary: Platform for building and running PHP 8.0 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=8.0/test/test-app ubi9/php-80 sample-server vcs-ref: c62fb82ae700fbde24b80ce66c6ea1613291b2a0 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-14T05:43:29Z" Id: sha256:648f601acba4f5b492725482d61cabf7b14a4b775cfa1edd9e4c86f6a33e9075 Size: 334826231 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/php-80@sha256:d0b4c0b12d8dd76a31a67bd429ce78d327d1c4fcd4896dd77557ee484379defa kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:d0b4c0b12d8dd76a31a67bd429ce78d327d1c4fcd4896dd77557ee484379defa resourceVersion: "14080" uid: 4579b568-c9bb-4bc3-ba6c-5cba65557c3d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:36fc70f9a3e5e588e4b861b4e8a25ce702f902f79945e8608dc289fc8a829b74 size: 7299368 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-redis Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - REDIS_VERSION=6 - HOME=/var/lib/redis - SUMMARY=Redis in-memory data structure store, used as database, cache and message broker - DESCRIPTION=Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/redis - REDIS_PREFIX=/usr - REDIS_CONF=/etc/redis/redis.conf ExposedPorts: 6379/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-29T20:19:08Z" com.redhat.component: redis-6-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Redis 6 available as container, is an advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. You can run atomic operations on these types, like appending to a string; incrementing the value in a hash; pushing to a list; computing set intersection, union and difference; or getting the member with highest ranking in a sorted set. In order to achieve its outstanding performance, Redis works with an in-memory dataset. Depending on your use case, you can persist it either by dumping the dataset to disk every once in a while, or by appending each command to a log. io.k8s.display-name: Redis 6 io.openshift.expose-services: 6379:redis io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,redis,redis6,redis-6 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel9/redis-6 org.opencontainers.image.revision: 7741b3b4d51d1c99b4a6715c14ba99227121ed55 release: "1761769097" summary: Redis in-memory data structure store, used as database, cache and message broker url: https://catalog.redhat.com/en/search?searchType=containers usage: podman run -d --name redis_database -p 6379:6379 rhel9/redis-6 vcs-ref: 7741b3b4d51d1c99b4a6715c14ba99227121ed55 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" Volumes: /var/lib/redis/data: {} WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-29T20:19:10Z" Id: sha256:fcaea22f5bf4c9321db7b2cf866a03114b45cad202af5f812d3144d3ad020918 Size: 104399952 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel9/redis-6@sha256:d138037e44e951accd8941222908b6d2291c8b2dfd4fad49195f83d0c5e6e77f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:d138037e44e951accd8941222908b6d2291c8b2dfd4fad49195f83d0c5e6e77f resourceVersion: "14071" uid: 598ac33f-35b7-4783-95eb-818c5e0f20fe - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a9e23b64ace00a199db21d302292b434e9d3956d79319d958ecc19603d00c946 size: 39622437 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:38b71301a1d9df24c98b5a5ee8515404f42c929003ad8b13ab83d2de7de34dec size: 1742 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f7b4b66acf4505d5bd283601f810ded19c2f40df11bd59f44fc824c1c5895f79 size: 108674770 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: 6f6b991d3055d8eb98f901f1714f9845c029d05fe23f80f12d7fe50d486e9d83 Labels: architecture: x86_64 build-date: 2022-03-28T09:59:20.384463 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "2.1648459725" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.11-2.1648459725 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 21ecff68424e72e8c23c4ee3c91afee2eff02ab2 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.11 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 02d87ca75d31 Image: sha256:758aee90e87fb437215e51e1410e455fd310f123412653c7194ecc3d722fc7f3 Labels: architecture: x86_64 build-date: 2022-03-28T09:59:20.384463 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "2.1648459725" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.11-2.1648459725 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 21ecff68424e72e8c23c4ee3c91afee2eff02ab2 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T10:04:36Z" DockerVersion: 1.13.1 Id: sha256:b8a60b7b036137f59ef1ae634e92cbb6cdb714d8b4bcfe51977b76f5ab05fa7e Size: 148305773 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:d186c94f8843f854d77b2b05d10efb0d272f88a4bf4f1d8ebe304428b9396392 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:d186c94f8843f854d77b2b05d10efb0d272f88a4bf4f1d8ebe304428b9396392 resourceVersion: "14142" uid: 928e1d6f-216d-4e57-8c3f-4cc78d0209e5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d586dbf955a96ae6f2e98084e23c2221bba37c37d96406cb7eaab67a00e9cd27 size: 37762932 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NAME=nginx - NGINX_VERSION=1.20 - NGINX_SHORT_VER=120 - VERSION=0 - SUMMARY=Platform for running nginx 1.20 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.20 daemon. The image can be used as a base image for other applications based on nginx 1.20 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-28T04:21:26Z" com.redhat.component: nginx-120-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.20 daemon. The image can be used as a base image for other applications based on nginx 1.20 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.20 daemon. The image can be used as a base image for other applications based on nginx 1.20 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.20 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-120 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nginx-120 org.opencontainers.image.revision: e317f97237142a78b31e5928e3eac651abc6e2e3 release: "1761625252" summary: Platform for running nginx 1.20 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/nginx-120:latest vcs-ref: e317f97237142a78b31e5928e3eac651abc6e2e3 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-28T04:21:33Z" Id: sha256:32f9d8bbd3e177aa319aa13820e12eb60f31cbc006ab9fb1cdbe9026c1507bce Size: 134864201 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nginx-120@sha256:d19dcf07e61e96eaff277f3f1b41a802aee2031c561d63012d99b1f2ed51467e kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:d19dcf07e61e96eaff277f3f1b41a802aee2031c561d63012d99b1f2ed51467e resourceVersion: "14055" uid: 58f58ce0-33c1-40e6-82e9-203f17ada00e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bd3de4798eadff692d5419649a6ba8eca9914d6fdb51d150d669643645898104 size: 36581100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8c19addc28dd2002288fa9a90333e5048784868aeb080286355c2d6731715d4a size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:59f7a530b4030b4e0863fd682a8037648ffae5246a8b56b236c01e25f9526ec0 size: 177830421 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:57792bf6bcdf0f6f01f10a3a49f34f0c26792350151e7cd45b2a833c4fbcfb7f size: 794525 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1.redhat-00001 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift-rhel8 - FUSE_KARAF_IMAGE_VERSION=1.10 - KARAF_FRAMEWORK_VERSION=4.2.12.fuse-7_10_2-00002-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: 877ccd18b302 Image: b0c0979fe11582f33509094a2436aa5f81c7a37444cfb3e268f9b721463aa1e9 Labels: architecture: x86_64 build-date: 2022-06-15T18:47:17.496427 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-rhel-8-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.karaf: 4.2.12.fuse-7_10_2-00002-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "26.1655314764" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift-rhel8/images/1.10-26.1655314764 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: d9a489d2f70213966675d5a84d68c22707386be8 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1.redhat-00001 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift-rhel8 - FUSE_KARAF_IMAGE_VERSION=1.10 - KARAF_FRAMEWORK_VERSION=4.2.12.fuse-7_10_2-00002-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Hostname: 877ccd18b302 Image: sha256:0b9bf9c45a550a1bee13e260db6a1b9c2085682d834d26a5c68144950baabcd7 Labels: architecture: x86_64 build-date: 2022-06-15T18:47:17.496427 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-rhel-8-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.karaf: 4.2.12.fuse-7_10_2-00002-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "26.1655314764" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift-rhel8/images/1.10-26.1655314764 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: d9a489d2f70213966675d5a84d68c22707386be8 vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T18:48:01Z" DockerVersion: 1.13.1 Id: sha256:93093892a482ac1f257f578d028aa1f9d1777a8d33cf930ed19c85878d140435 Size: 215216562 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift-rhel8@sha256:d212c0ad7ec7b340beff1776c0216ea8c0aa66423538a84910c36e00db2366b5 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:d212c0ad7ec7b340beff1776c0216ea8c0aa66423538a84910c36e00db2366b5 resourceVersion: "14019" uid: 39bfb7ec-b9b6-420e-97f3-af659b115a13 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a5be390fa01f3f78ea6913ede564517ee9621c40efa71a3466f9f9e72d79a5fd size: 20421583 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NAME=nginx - NGINX_VERSION=1.26 - NGINX_SHORT_VER=126 - VERSION=0 - SUMMARY=Platform for running nginx 1.26 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.26 daemon. The image can be used as a base image for other applications based on nginx 1.26 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T20:11:35Z" com.redhat.component: nginx-126-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.26 daemon. The image can be used as a base image for other applications based on nginx 1.26 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.26 daemon. The image can be used as a base image for other applications based on nginx 1.26 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.26 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-126 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nginx-126 org.opencontainers.image.revision: 5f8ce4a574aabfe3f73e75e8f4b33038def1e044 release: "1760386259" summary: Platform for running nginx 1.26 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/nginx-126:latest vcs-ref: 5f8ce4a574aabfe3f73e75e8f4b33038def1e044 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T20:11:50Z" Id: sha256:52b3fb383d8c89dd21390d27bce5bbbd8b629eff408b6069f3f10b74db7736c8 Size: 117522913 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nginx-126@sha256:d2838047b28f1385d57abc68a907830d48df647d3a06bb6ab155567097d940c7 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:d2838047b28f1385d57abc68a907830d48df647d3a06bb6ab155567097d940c7 resourceVersion: "14061" uid: d04255f3-cd9f-4933-b59c-538573e3966e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0eeb656bc1e64b6c3ba63f2fa9450feaef3c60159d48eb2171ad1f25f5e655d9 size: 3805266 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:77187f02cbeff1e8de9cdd8e850f300e7267ddf19991dcbc588a498c14df3ff0 size: 70351735 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:08665aa8b0b6412d51fa56dad388c147dd7625d1bfd2b39a7b6e26e58f1b17e0 size: 208478086 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:330e257ad115763faa220fe3358d9d1606b01628ecfa750111a16677ca14eff8 size: 268095081 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:cd1e6817f6d19716cffe8dca60922c869613a8bfbeee9e77817f5ef2a5849e01 size: 285771422 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-eap-7/eap71-openshift - JBOSS_IMAGE_VERSION=1.3 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - JBOSS_PRODUCT=eap - JBOSS_EAP_VERSION=7.1.3.GA - PRODUCT_VERSION=7.1.3.GA - JBOSS_HOME=/opt/eap - HTTPS_ENABLE_HTTP2=true - JOLOKIA_VERSION=1.5.0 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: 4ad69aa23cfb9b892e7aa2c52e1c86d14f17b78daf50fa008d176947f0c2d68e Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T21:57:33.376903 com.redhat.build-host: osbs-cpt-001.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-eap-openshift org.concrt.version: 1.4.1 org.jboss.product: eap org.jboss.product.eap.version: 7.1.3.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.1.3.GA release: "15.1533128084" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.0-15.1533128084 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 13dc6bd54a7966b1a10762193c0f65690991c351 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-eap-7/eap71-openshift - JBOSS_IMAGE_VERSION=1.3 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - LAUNCH_JBOSS_IN_BACKGROUND=true - JBOSS_PRODUCT=eap - JBOSS_EAP_VERSION=7.1.3.GA - PRODUCT_VERSION=7.1.3.GA - JBOSS_HOME=/opt/eap - HTTPS_ENABLE_HTTP2=true - JOLOKIA_VERSION=1.5.0 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - DEFAULT_ADMIN_USERNAME=eapadmin - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: sha256:4ef9eecb5632e9bf83b9d40c895b92f3013f73f90e1b64fede0acdf97e79e06f Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T21:57:33.376903 com.redhat.build-host: osbs-cpt-001.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-eap-openshift org.concrt.version: 1.4.1 org.jboss.product: eap org.jboss.product.eap.version: 7.1.3.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.1.3.GA release: "15.1533128084" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.0-15.1533128084 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: 13dc6bd54a7966b1a10762193c0f65690991c351 vcs-type: git vendor: Red Hat, Inc. version: "1.0" User: "185" WorkingDir: /home/jboss Created: "2018-08-01T21:58:39Z" DockerVersion: 1.12.6 Id: sha256:b50c5d57a003d8a0559558bdb58c1fd491474308d8ede5e0349ff59e7d922435 Size: 911433251 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:d2e75dac02681ed5aded89115b736ba5e83011294686cd6d04780aebffc0ff5d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:d2e75dac02681ed5aded89115b736ba5e83011294686cd6d04780aebffc0ff5d resourceVersion: "14018" uid: 34bc2e21-d4d9-400e-8c71-6df54bf63884 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4752687a61a97d6f352ae62c381c87564bcb2f5b6523a05510ca1fb60d640216 size: 36442442 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0344366a246a0f7590c2bae4536c01f15f20c6d802b4654ce96ac81047bc23f3 size: 1740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1df969f83266d1d6babc933f3905ecf8ed6121632b2291d337ec8825c3287228 size: 86345800 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: ed43a9acbbcac68ad808bc163372b006bb9b47bffe2498c136a3469958f9e1ed Labels: architecture: x86_64 build-date: 2022-06-15T16:16:56.687745 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1655306380" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.13-1.1655306380 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 9a9c6a4fdb98eb95ee6b0c854104d865f0f7ccea vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: sha256:01b3c3845ea792b83a73e0ca7c751d608c72213b4e8a2d6e3028d46fe877e3ef Labels: architecture: x86_64 build-date: 2022-06-15T16:16:56.687745 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1655306380" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.13-1.1655306380 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 9a9c6a4fdb98eb95ee6b0c854104d865f0f7ccea vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:19:38Z" DockerVersion: 1.13.1 Id: sha256:eadf411b954405e4febecea4f7e9b7e2da59a2f9f33f8cfebe4c11522ade1c23 Size: 122795273 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:d2f17aaf2f871fda5620466d69ac67b9c355c0bae5912a1dbef9a51ca8813e50 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:d2f17aaf2f871fda5620466d69ac67b9c355c0bae5912a1dbef9a51ca8813e50 resourceVersion: "14153" uid: 34350304-813c-4ea9-905e-e269ac2d99f5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f91ed22a103a436900108dad5caaa0ee9f83605296df3d5164ec58bc5ab37d7b size: 140292386 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PLATFORM=el9 - NODEJS_VERSION=22 - NPM_RUN=start - NAME=nodejs - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - CNB_STACK_ID=com.redhat.stacks.ubi9-nodejs-20 - CNB_USER_ID=1001 - CNB_GROUP_ID=0 - SUMMARY=Platform for building and running Node.js 22 applications - DESCRIPTION=Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-28T04:26:51Z" com.redhat.component: nodejs-22-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.buildpacks.stack.id: com.redhat.stacks.ubi9-nodejs-20 io.k8s.description: Node.js 22 available as container is a base platform for building and running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 22 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs22 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nodejs-22 org.opencontainers.image.revision: 1c0398f9866ff799beb429e1f088ff3bd30e0431 release: "1761625559" summary: Platform for building and running Node.js 22 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi9/nodejs-22:latest vcs-ref: 1c0398f9866ff799beb429e1f088ff3bd30e0431 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-28T04:27:05Z" Id: sha256:7e94f6aafe4801710b4ecc6c784eb2bd38100ac8930520765981c0c3321b44c7 Size: 237392889 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nodejs-22@sha256:d4bd4b422ffcfd52334f42f372d511d68496991b47a24f7dacfb032edb250475 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:d4bd4b422ffcfd52334f42f372d511d68496991b47a24f7dacfb032edb250475 resourceVersion: "14095" uid: c66ff5a6-e757-4c4c-875f-8f6e57a9b299 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e74659e3e033616f4f0731f4c22814ff4543cb3c1b85a05f6484647b4fea7b3d size: 136155585 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d4ef397ac9c5a3062631f4563f08fe2a5f2f2f63c78082be9a57a2961ca9f577 size: 233168554 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift-jdk11-rhel8 - FUSE_JAVA_IMAGE_VERSION=1.12 - JOLOKIA_VERSION=1.7.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.18.0.redhat-00001 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-23T17:52:16 com.redhat.component: fuse-java-openshift-jdk11-rhel-8-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.7.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.18.0.redhat-00001 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift-jdk11-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "18.1716485727" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift-jdk11-rhel8/images/1.12-18.1716485727 usage: https://access.redhat.com/documentation/en-us/openjdk/11/html/using_openjdk_11_source-to-image_for_openshift/index vcs-ref: 980be2285f68b4a7eb9a182c894bd0624ae8ba9b vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-05-23T17:55:58Z" Id: sha256:2611e0e61605a190ac6fd2304ab7713a3961e12d9909766fb43e7eab4790894b Size: 408672834 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift-jdk11-rhel8@sha256:d64489de1b4cb65c6d6de5add7ad6e9f4a6817c8e62987ad5859814085beac06 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:d64489de1b4cb65c6d6de5add7ad6e9f4a6817c8e62987ad5859814085beac06 resourceVersion: "13361" uid: e4845dec-de0f-4be3-97f2-ae50522c6ab7 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c43687042a41aad69fc526985ef2b82012c011db7e0e26faba4fc860ad32d88e size: 75837780 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b7b014ba1b80abb29391141385bd32668571313647317d1d64d8b5cebb1f228 size: 1331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2825299765472d0b62c1ed19ebb564a8a191b88ce49639471a274d03e7f9151e size: 3910026 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8df146e29e789eb3e8bec37172cca00cda60cf40f6924dda00379b283e2ce6db size: 85123374 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:850e610772239a316cb32b95f0db47b4f86d97f53de39c62741f70e618799232 size: 22750438 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.4 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-740027-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: bb51c0358eb39adbae046487f6e4c23a7787741b6006759fd05f7cb8caacd754 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T11:36:32.262209 com.redhat.build-host: cpt-1004.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-740027-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "5.1567588143" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.4-5.1567588143 vcs-ref: 33e2d89f70a9779f7acf9bafde91ce363a7147ea vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.4 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-740027-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: sha256:4cc8443c7956b69a425ddc4f570ac545138e4dfb4863964a1c5d3107c15a2cb1 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-09-04T11:36:32.262209 com.redhat.build-host: cpt-1004.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-740027-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "5.1567588143" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.4-5.1567588143 vcs-ref: 33e2d89f70a9779f7acf9bafde91ce363a7147ea vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss Created: "2019-09-04T11:39:05Z" DockerVersion: 1.13.1 Id: sha256:b493d8f46bf13e5356aab08bd2bfeda4e457fe06d5605cea87cc49fc9f6dd58f Size: 187629507 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:d88de3935a4ad6a2a8a04a49733d1e7cba14688cd5c8081b78538a86fc499d5a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:d88de3935a4ad6a2a8a04a49733d1e7cba14688cd5c8081b78538a86fc499d5a resourceVersion: "14030" uid: 1b652439-0c90-45a6-a6ce-dd36fab1f991 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fb81340b71482eacd09e8c4908880e35352653ddb5edb14e1fd8b12c06bdac9e size: 76548605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:76e5e9028347afe28f3682b2d663ac154c0c9523fe03874b18ad8ad5c6e931f9 size: 1817 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:29c0ad2914112972d23ebf1b10ae9c04c800344caa8294ab7911d249923c6f4e size: 400776328 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a4a72eedab58ee3cfdd8432e34b85da0be50f6c39e649617934b755313a52e5 size: 632585890 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.2 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.2 - WILDFLY_VERSION=7.4.2.GA-redhat-00002 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.1.2.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=23.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.4.2 - WILDFLY_CAMEL_VERSION=5.8.0.fuse-7_10_2-00001-redhat-00005 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: be161c538685 Image: f15e1a7ef28f62202923f0c4571a60a22e4773abe33e14c68c5fa30644dfc779 Labels: architecture: x86_64 build-date: 2022-06-15T16:48:32.571018 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.4 distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.4 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Thomas Diesler name: fuse7/fuse-eap-openshift-jdk8-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.2 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.4.2 release: "31.1655306691" summary: Platform for building and running Apache Camel applications on EAP 7.4 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift-jdk8-rhel7/images/1.10-31.1655306691 vcs-ref: 0e0c78a6b40a9b0c638b08375587fcb368bbb7de vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.2 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.2 - WILDFLY_VERSION=7.4.2.GA-redhat-00002 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.1.2.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=23.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.4.2 - WILDFLY_CAMEL_VERSION=5.8.0.fuse-7_10_2-00001-redhat-00005 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: be161c538685 Image: sha256:d51a7aa81bc636826a578f96ce9d30132ce93881c49d988dc90ce013a20de91b Labels: architecture: x86_64 build-date: 2022-06-15T16:48:32.571018 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.4 distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.4 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Thomas Diesler name: fuse7/fuse-eap-openshift-jdk8-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.2 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.4.2 release: "31.1655306691" summary: Platform for building and running Apache Camel applications on EAP 7.4 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift-jdk8-rhel7/images/1.10-31.1655306691 vcs-ref: 0e0c78a6b40a9b0c638b08375587fcb368bbb7de vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:50:19Z" DockerVersion: 1.13.1 Id: sha256:641183ac81d50ec5ed2f3940403ff4023133e0dac0cb762fcd9d2772b12de418 Size: 1109925063 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift-jdk8-rhel7@sha256:d8d642e25fc863ed0ee603addee008db975b1c6b73a02a1e5b4bc446fbf5ec76 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:d8d642e25fc863ed0ee603addee008db975b1c6b73a02a1e5b4bc446fbf5ec76 resourceVersion: "14022" uid: f9f16912-cbbf-496f-a92b-2567b64a5176 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:34cbb242d65bd3b5ea98fd0bf5be8ce2b2316994693b130adb043cd6537ee9ca size: 76239722 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a4b6512aa42577405f0f324407ee3140e668e9bb470c3fb472e11266482468f size: 1414 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3822f045e5c5f05ab31bc7884c10f4b349f1e7f53f6cab701196045f2b1acac1 size: 349609792 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/datagrid/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.6.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.6 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.6.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: e1afd63c27cb Image: 51ef416e309aac0c4832a4ec44b115ed1b6e1a681e77c41ee7901080de344d84 Labels: architecture: x86_64 build-date: 2020-08-05T02:32:18.482432 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.7.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.6.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.6.GA release: "3" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.6-3 vcs-ref: ad8cdc7342bc6d7fe3e4da0467c51cd59818b54e vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_EAP_S2I_MODULE=/opt/jboss/container/eap/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.5.0 - AB_PROMETHEUS_ENABLE=false - AB_PROMETHEUS_JMX_EXPORTER_PORT=9779 - DEFAULT_ADMIN_USERNAME=jdgadmin - JBOSS_DATAGRID_VERSION=7.3.6.GA - JBOSS_HOME=/opt/datagrid - JBOSS_IMAGE_NAME=jboss-datagrid-7/datagrid73-openshift - JBOSS_IMAGE_VERSION=1.6 - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JBOSS_PRODUCT=datagrid - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.3.6.GA ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} 11211/tcp: {} 11222/tcp: {} 11333/tcp: {} Hostname: e1afd63c27cb Image: sha256:1d86aa65659094520db0c6fa32cff6b7742a5611f61932da1b198aa3fa46e45b Labels: architecture: x86_64 build-date: 2020-08-05T02:32:18.482432 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-datagrid-7-datagrid73-openshift-container com.redhat.deployments-dir: /opt/datagrid/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Data Grid 7.3 for OpenShift container image distribution-scope: public io.cekit.version: 3.7.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Provides a scalable in-memory distributed database designed for fast access to large volumes of data. io.k8s.display-name: JBoss Data Grid 7.3 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: datagrid,java,jboss,xpaas name: jboss-datagrid-7/datagrid73-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/datagrid/standalone/deployments org.jboss.product: datagrid org.jboss.product.datagrid.version: 7.3.6.GA org.jboss.product.openjdk.version: "11.0" org.jboss.product.version: 7.3.6.GA release: "3" summary: Red Hat JBoss Data Grid 7.3 for OpenShift container image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-datagrid-7/datagrid73-openshift/images/1.6-3 vcs-ref: ad8cdc7342bc6d7fe3e4da0467c51cd59818b54e vcs-type: git vendor: Red Hat, Inc. version: "1.6" User: "185" WorkingDir: /home/jboss Created: "2020-08-05T02:40:36Z" DockerVersion: 1.13.1 Id: sha256:7c793a770769ee3c6fc74a6d6c8c3d590a24674a554cc3ec01fda70f1f6722d6 Size: 425858697 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-datagrid-7/datagrid73-openshift@sha256:da558fa9ad7c357ed794eb549ac12a799eab97951f2e3cbc9501e413a348119a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:02Z" name: sha256:da558fa9ad7c357ed794eb549ac12a799eab97951f2e3cbc9501e413a348119a resourceVersion: "13902" uid: e45bd5c2-e7fb-45d6-9ac9-52821f410679 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:751bf1af528874dba437014af54078013e46b2eca91e82aab200d452b0165af9 size: 76529550 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:71bd5e95c80acea5839e4c515a585f43158bffd718c2be1795b4825b043975a3 size: 1565 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1340551e53a0ce649afcd25813bdc14222bc37d0bb3e7b829da3b7db1038c58e size: 109759662 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.11 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 1c8f39c8f8b8 Image: dc17dbe64962e44e928d2232fe07e31b03e3521d15d6085ac633c36bd193653a Labels: architecture: x86_64 build-date: 2022-03-28T13:32:16.229370 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1648472891" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.11-1.1648472891 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 94c118a151e3ff78308dff9b2e00f847fa40acec vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.11 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 1c8f39c8f8b8 Image: sha256:c7e9b52fd53b0088d71ec229796dec7e6340022b55033abbeaf5a95514234e89 Labels: architecture: x86_64 build-date: 2022-03-28T13:32:16.229370 com.redhat.build-host: cpt-1005.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1.1648472891" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.11-1.1648472891 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 94c118a151e3ff78308dff9b2e00f847fa40acec vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss Created: "2022-03-28T13:36:36Z" DockerVersion: 1.13.1 Id: sha256:4e13cb8b2a169481a53ce6cc4e1e12434dac7d7171285331fbb914c88931f146 Size: 186298319 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:db3f5192237bfdab2355304f17916e09bc29d6d529fdec48b09a08290ae35905 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:db3f5192237bfdab2355304f17916e09bc29d6d529fdec48b09a08290ae35905 resourceVersion: "14162" uid: 25dfdf2e-0d42-48c5-a49b-3a7a2d54cc05 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8e663e919f6cd0805d39d14202a5c0b789e7df3c3051c54170b428086a1c9a91 size: 76431158 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1e6175de2c956530fa18c8a30722bf828d70a720afa2f2e481bfb4c520963c91 size: 1550 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b82cf656d66b57b9fb0c59738c187b4e18d6d595000ca270a8a82ceed87a4333 size: 109471667 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f1d2c1bacaf9 Image: e79f508f51bdd8e69bb312a4312de33c167d592a105ed97de8c7d0772cbff6fa Labels: architecture: x86_64 build-date: 2021-07-21T13:17:35.687663 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "30.1626872912" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.8-30.1626872912 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 9deec894b992116cb4d7a0a9f656b48f14f63ef5 vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f1d2c1bacaf9 Image: sha256:f041e64b091906d78a657370488791753e15dc123ba389392358b74cd1151ed6 Labels: architecture: x86_64 build-date: 2021-07-21T13:17:35.687663 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.cekit.version: 3.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "30.1626872912" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.8-30.1626872912 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 9deec894b992116cb4d7a0a9f656b48f14f63ef5 vcs-type: git vendor: Red Hat, Inc. version: "1.8" User: "185" WorkingDir: /home/jboss Created: "2021-07-21T13:24:07Z" DockerVersion: 1.13.1 Id: sha256:11c20bcfd2f3b136893b899f7f1d71bc0434ea3a3bcec6b25c08ae4a7af220ac Size: 185911913 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:dbe9905fe2b20ed30b0e2d64543016fa9c145eeb5a678f720ba9d2055f0c9f88 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:dbe9905fe2b20ed30b0e2d64543016fa9c145eeb5a678f720ba9d2055f0c9f88 resourceVersion: "14178" uid: 297f13a5-f23e-47f3-bfc6-b46db51a7722 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0eeb656bc1e64b6c3ba63f2fa9450feaef3c60159d48eb2171ad1f25f5e655d9 size: 3805266 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:77187f02cbeff1e8de9cdd8e850f300e7267ddf19991dcbc588a498c14df3ff0 size: 70351735 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c74ad84907a7cc736fea212bfcfff402c892a0a34e31f31b117ce8de7ed7f84d size: 25587180 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.4 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JAVA_DATA_DIR=/deployments/data - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: 3320f15730e3d95d98b084a00d040f8e97053ec1aa0858da207a0f4b332871b8 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T18:24:20.705731 com.redhat.build-host: osbs-cpt-001.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Cloud Enablement Feedback name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 2.0.0 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "6.1533127995" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.4-6.1533127995 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: f9975670f0ca1779afb4d9c0b36a388af39d00ae vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.4 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JAVA_DATA_DIR=/deployments/data - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 3183206101f3 Image: sha256:6a0570f2e2c7fcfbcce59d977caa7b10135131c3b13eb44225b6266bb1860eaf Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-08-01T18:24:20.705731 com.redhat.build-host: osbs-cpt-001.ocp.osbs.upshift.eng.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 2.0.0 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Cloud Enablement Feedback name: redhat-openjdk-18/openjdk18-openshift org.concrt.version: 2.0.0 org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "6.1533127995" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.4-6.1533127995 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: f9975670f0ca1779afb4d9c0b36a388af39d00ae vcs-type: git vendor: Red Hat, Inc. version: "1.4" User: "185" WorkingDir: /home/jboss Created: "2018-08-01T18:26:00Z" DockerVersion: 1.12.6 Id: sha256:7b46b9b6f72aec4ee9638e2e13c508df2e7db860a6eaa9475a6fbb7cb42f2928 Size: 174675038 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:dc84fed0f6f40975a2277c126438c8aa15c70eeac75981dbaa4b6b853eff61a6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:dc84fed0f6f40975a2277c126438c8aa15c70eeac75981dbaa4b6b853eff61a6 resourceVersion: "14170" uid: aba4a7f3-49f7-4123-a54a-edb01fe0daf1 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f84507046cf9a3a75793faf6f9259d563ae08337fd9eba430318be22110ab718 size: 69296290 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NAME=nginx - NGINX_VERSION=1.24 - NGINX_SHORT_VER=124 - VERSION=0 - SUMMARY=Platform for running nginx 1.24 or building nginx-based application - DESCRIPTION=Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.24 daemon. The image can be used as a base image for other applications based on nginx 1.24 web server. Nginx server image can be extended using source-to-image tool. - NGINX_CONFIGURATION_PATH=/opt/app-root/etc/nginx.d - NGINX_CONF_PATH=/etc/nginx/nginx.conf - NGINX_DEFAULT_CONF_PATH=/opt/app-root/etc/nginx.default.d - NGINX_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/nginx - NGINX_APP_ROOT=/opt/app-root - NGINX_LOG_PATH=/var/log/nginx - NGINX_PERL_MODULE_PATH=/opt/app-root/etc/perl ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-21T20:12:54Z" com.redhat.component: nginx-124-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.24 daemon. The image can be used as a base image for other applications based on nginx 1.24 web server. Nginx server image can be extended using source-to-image tool. distribution-scope: public help: For more information visit https://github.com/sclorg/nginx-container io.buildah.version: 1.41.4 io.k8s.description: Nginx is a web server and a reverse proxy server for HTTP, SMTP, POP3 and IMAP protocols, with a strong focus on high concurrency, performance and low memory usage. The container image provides a containerized packaging of the nginx 1.24 daemon. The image can be used as a base image for other applications based on nginx 1.24 web server. Nginx server image can be extended using source-to-image tool. io.k8s.display-name: Nginx 1.24 io.openshift.expose-services: 8443:https io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nginx,nginx-124 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/nginx-124 org.opencontainers.image.revision: fdf2a6fd7c37b9afe85e8fb9c97cce8d17a5b913 release: "1761077540" summary: Platform for running nginx 1.24 or building nginx-based application url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build ubi8/nginx-124:latest vcs-ref: fdf2a6fd7c37b9afe85e8fb9c97cce8d17a5b913 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-21T20:13:13Z" Id: sha256:ad8014cfc43d3e0063b5ee0ebcbb4d2e54b1de52bd97cc0e0b517c1a11f43036 Size: 164856009 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/nginx-124@sha256:de3e8e869ab92ab5ad19919034b845b3fb01c7e57d49caf5899b68c0a4461c1b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:de3e8e869ab92ab5ad19919034b845b3fb01c7e57d49caf5899b68c0a4461c1b resourceVersion: "14058" uid: 650d1e06-01ef-49e5-b649-45fcab3c2f89 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:db0f4cd412505c5cc2f31cf3c65db80f84d8656c4bfa9ef627a6f532c0459fc4 size: 78359137 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7e3624512448126fd29504b9af9bc034538918c54f0988fb08c03ff7a3a9a4cb size: 1789 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b9404c765c209f7b6c036000e21903a15cf69485d699f09967f0aba458381d8b size: 106246762 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8 - JBOSS_IMAGE_VERSION=3.0 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api - LAUNCH_JBOSS_IN_BACKGROUND=true - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - SSO_FORCE_LEGACY_SECURITY=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 7323ecf6dc56 Image: 9cb50e53a79e8f0f3435761b450f6db4429fad23b42f97ddbf63a46cd062be91 Labels: architecture: x86_64 build-date: 2022-08-03T11:41:15.058748 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-eap-xp3-openjdk11-runtime-openshift-rhel8-container com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift runtime image with OpenJDK 11 distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.k8s.description: Base runtime image to run EAP XP server and application io.k8s.display-name: JBoss EAP XP runtime image io.openshift.expose-services: 8080:http io.openshift.tags: javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8 org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "20" summary: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift runtime image with OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8/images/3.0-20 vcs-ref: 0abf15128641d27740a7ef71f0a5f1207b0218ef vcs-type: git vendor: Red Hat, Inc. version: "3.0" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8 - JBOSS_IMAGE_VERSION=3.0 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api - LAUNCH_JBOSS_IN_BACKGROUND=true - MICROPROFILE_CONFIG_DIR_ORDINAL=500 - SSO_FORCE_LEGACY_SECURITY=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: 7323ecf6dc56 Image: sha256:1fb08a410caa49e5f8b17f4230704c6ace838c69797493c2c113c595b1799de6 Labels: architecture: x86_64 build-date: 2022-08-03T11:41:15.058748 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: jboss-eap-xp3-openjdk11-runtime-openshift-rhel8-container com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift runtime image with OpenJDK 11 distribution-scope: public io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.k8s.description: Base runtime image to run EAP XP server and application io.k8s.display-name: JBoss EAP XP runtime image io.openshift.expose-services: 8080:http io.openshift.tags: javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8 org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "20" summary: Red Hat JBoss Enterprise Application Platform XP 3.0 OpenShift runtime image with OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8/images/3.0-20 vcs-ref: 0abf15128641d27740a7ef71f0a5f1207b0218ef vcs-type: git vendor: Red Hat, Inc. version: "3.0" User: "185" WorkingDir: /home/jboss Created: "2022-08-03T11:43:29Z" DockerVersion: 1.13.1 Id: sha256:21dea2b05afe0affbdf8aedb6249b73b1f1f903144eebb827c027f094e9ad094 Size: 184613957 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap-xp3-openjdk11-runtime-openshift-rhel8@sha256:deaaa8255efc84a6a7fd4d1b6e5593eaab6c2753e1f2f84a5b83d4e047f03f3f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:deaaa8255efc84a6a7fd4d1b6e5593eaab6c2753e1f2f84a5b83d4e047f03f3f resourceVersion: "13413" uid: 2fa11596-7557-458c-9302-01f9be7cb829 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c85ac87d44df4b64d7c273886fc5aed55a28422df33dcb641884ffa419db218 size: 76240885 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:51e9f237b750efcda2d5755785cdb8dd080d51585ae35d368e4f9b29a11b1994 size: 1329 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5ccfe6e48f4f71d761b34c61586ac1808cca10bf7e543a3666b802f38625c5a9 size: 4013312 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35875365be086462ee5d275b62cfc13046029a9a084880c18583b932a5b23632 size: 85346475 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3a3fb8d8a857d4b21711172a18b68ff8739599f21ba0264b9caad55100e36571 size: 15178667 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.5 - JOLOKIA_VERSION=1.6.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.2.6.fuse-750016-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 534540aef153 Image: 4ceea0094875275ec73d3687995a675070b48d2d5714ed5d7e33145a368afa76 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-01-27T12:38:41.799969 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.6.fuse-750016-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1580118141" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.5-14.1580118141 vcs-ref: 415e7fbfd545dfa7a134c6e095a5c34f2a0a22a2 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/opt/rh/rh-maven35/root/usr/bin:/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.5 - JOLOKIA_VERSION=1.6.2.redhat-00002 - KARAF_FRAMEWORK_VERSION=4.2.6.fuse-750016-redhat-00001 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - 'MANPATH=/opt/rh/rh-maven35/root/usr/share/man:' - PYTHONPATH=/opt/rh/rh-maven35/root/usr/lib/python2.7/site-packages ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 534540aef153 Image: sha256:681bf5e54f14d808c4a60d22bc9a135c1a01f8f73761ed5d29d9cda095ba5533 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2020-01-27T12:38:41.799969 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.6.2.redhat-00002 io.fabric8.s2i.version.karaf: 4.2.6.fuse-750016-redhat-00001 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Otavio Piske name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "14.1580118141" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.5-14.1580118141 vcs-ref: 415e7fbfd545dfa7a134c6e095a5c34f2a0a22a2 vcs-type: git vendor: Red Hat, Inc. version: "1.5" User: "185" WorkingDir: /home/jboss Created: "2020-01-27T12:40:18Z" DockerVersion: 1.13.1 Id: sha256:85deacd9f44c18327242bef91783c68732fe6a82ccefdc70f9d5f957422c94e9 Size: 180787247 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:df2b4b8e8f4b7b183c443653b6f11ac529d7c6421972078a464b765ab31b075d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:df2b4b8e8f4b7b183c443653b6f11ac529d7c6421972078a464b765ab31b075d resourceVersion: "14033" uid: 481f6e27-da3d-46aa-a43e-0b070be924d5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e0348fdb2685077d22116d294a90a253709aba78815882a57fcc536b22dcae2f size: 39488293 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:689ca74a7d3bce7261a934f55c5f2cb819b684beaf0330ead965091221c3f4e2 size: 91664214 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-11-runtime - JBOSS_IMAGE_VERSION=1.21 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2025-01-08T12:07:07 com.redhat.component: openjdk-11-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 11 runtime distribution-scope: public io.buildah.version: 1.33.8 io.cekit.version: 4.13.0.dev0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" org.opencontainers.image.documentation: https://rh-openjdk.github.io/redhat-openjdk-containers/ release: "1.1736337918" summary: Image for Red Hat OpenShift providing OpenJDK 11 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11-runtime/images/1.21-1.1736337918 usage: https://rh-openjdk.github.io/redhat-openjdk-containers/ vcs-ref: c7e21327e7823f0c54fbed52da81fa426d3d6d59 vcs-type: git vendor: Red Hat, Inc. version: "1.21" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2025-01-08T12:28:07Z" Id: sha256:92f5ca828ad33dd55c896ad2b4d3820979cf4f14353f219d2f7e6ddc4eac59e5 Size: 131172143 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11-runtime@sha256:df8858f0c01ae1657a14234a94f6785cbb2fba7f12c9d0325f427a3f1284481b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:df8858f0c01ae1657a14234a94f6785cbb2fba7f12c9d0325f427a3f1284481b resourceVersion: "13508" uid: 5cfd3195-ac03-4606-ab75-79c8fb14618d - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c43687042a41aad69fc526985ef2b82012c011db7e0e26faba4fc860ad32d88e size: 75837780 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b7b014ba1b80abb29391141385bd32668571313647317d1d64d8b5cebb1f228 size: 1331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2825299765472d0b62c1ed19ebb564a8a191b88ce49639471a274d03e7f9151e size: 3910026 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5832c11acc9bb0420072ec62a6cdcea1d8226ed89e430e3086f81bc7866631c2 size: 84374210 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:08887a20d77482db42bf0b3302e0fb5080b0e292086c3ad3bc440a46a0847049 size: 15577091 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - usage Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.3 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-731003-redhat-00003 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: 6b6ffd776185a38822468b3667a44cab0e1f21f13a14cda9078cbfff54fcec6c Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T22:30:18.045503 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-731003-redhat-00003 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Dhiraj Bokde name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "10.1561752280" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.3-10.1561752280 vcs-ref: 8f1e45d934761ae48ada4113b985e8b907552e74 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.3 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-731003-redhat-00003 - PROMETHEUS_JMX_EXPORTER_VERSION=0.3.1.redhat-00006 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 8ea581222c6a Image: sha256:ff5f5f580c891a6d4f7513093335bc83fa2be4316bc21bbe8b85e4df301532d8 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-06-28T22:30:18.045503 com.redhat.build-host: cpt-1008.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.2.1 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-731003-redhat-00003 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.3.1.redhat-00006 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Dhiraj Bokde name: fuse7/fuse-karaf-openshift org.concrt.version: 2.2.1 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "10.1561752280" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.3-10.1561752280 vcs-ref: 8f1e45d934761ae48ada4113b985e8b907552e74 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2019-06-28T22:32:10Z" DockerVersion: 1.13.1 Id: sha256:2d5c68055f82485218766edee2968eae15cef0423947a341f7ead7a75e601153 Size: 179706701 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:e12d4107cfe12b7b9f3817bde90dcb07ff3ee7e3b6482120fec6d37583df727f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:e12d4107cfe12b7b9f3817bde90dcb07ff3ee7e3b6482120fec6d37583df727f resourceVersion: "14029" uid: a03e254d-ddab-41cc-872a-fb1821948c4c - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6208c5a2e205726f3a2cd42a392c5e4f05256850d13197a711000c4021ede87b size: 79073674 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7a18d7eaea2d291c5d580d166e4ceaf15ac961db4ea2abe773a353e088a1b74b size: 121891551 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:69ddf622f141ad37579e8ee0b486f8a4289b4406915985db7082014ec65d31c7 size: 34326547 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.2 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.2 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - HOME=/home/jboss - AB_PROMETHEUS_ENABLE=True - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jws-jmx-exporter-config.yaml - AB_PROMETHEUS_JMX_EXPORTER_PORT=9404 - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk11-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.2 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9404/tcp: {} Labels: architecture: x86_64 build-date: 2023-04-26T09:26:53 com.redhat.component: jboss-webserver-57-openjdk11-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.6.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK11 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK11 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk11-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 5.7.2 org.jboss.product.webserver-tomcat9.version: 5.7.2 release: "8" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk11-rhel8-openshift/images/5.7.2-8 vcs-ref: 768e16aeb3ed190e276d819840a2de074fda37d1 vcs-type: git vendor: Red Hat, Inc. version: 5.7.2 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-04-26T09:30:29Z" Id: sha256:7a6b7b88ed8e89e32fc56e35f07608e2357d6fa3e3a996bec2f65c63fb271151 Size: 235334209 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:e241dd0049956f75f989687e80fafe7da2f2f25ef4ea762bdaabfe2161d20f64 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:e241dd0049956f75f989687e80fafe7da2f2f25ef4ea762bdaabfe2161d20f64 resourceVersion: "13699" uid: 938832de-81fc-41f0-9e3f-c5bd5b711fdb - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:35d6a5cce6a146ea7213377edde7d1d27f34dda18ce9fac98ea2ab40a6b110f6 size: 79593305 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26e557f159f6398eefc1611d2b3bd0fe06872b96a779bcf01a3acaa819938c56 size: 17490249 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d2bb55755c2edf5423e38bd42c48b277e0cb4c5c765218247001a8c8eb479a87 size: 215199578 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e6bff1e34aa6582524f2497681213cc7697e9596d432796d3d5f29b24248cd58 size: 17064811 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el9 - NODEJS_VER=20 - RUBY_MAJOR_VERSION=3 - RUBY_MINOR_VERSION=0 - RUBY_VERSION=3.0 - RUBY_SCL_NAME_VERSION=30 - RUBY_SCL=ruby-30 - IMAGE_NAME=ubi9/ruby-30 - SUMMARY=Platform for building and running Ruby 3.0 applications - DESCRIPTION=Ruby 3.0 available as container is a base platform for building and running various Ruby 3.0 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-13T16:12:15Z" com.redhat.component: ruby-30-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Ruby 3.0 available as container is a base platform for building and running various Ruby 3.0 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Ruby 3.0 available as container is a base platform for building and running various Ruby 3.0 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. io.k8s.display-name: Ruby 3.0 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,ruby,ruby30,ruby-30 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/ruby-30 org.opencontainers.image.revision: a15c9c337bac22b7a02fde2215d14046d2418bca release: "1760371911" summary: Platform for building and running Ruby 3.0 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-ruby-container.git --context-dir=3.0/test/puma-test-app/ ubi9/ruby-30 ruby-sample-app vcs-ref: a15c9c337bac22b7a02fde2215d14046d2418bca vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-13T16:12:20Z" Id: sha256:55edf0b42b55dab2ccf52b5e19e10c4821f47fa94fff9fb5b3a9c72a8c7540be Size: 329366717 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/ruby-30@sha256:e3727c305a54ac48c07688089e18e9abbe4ce98712a7ce59aa4b9ad7b7bc6514 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:e3727c305a54ac48c07688089e18e9abbe4ce98712a7ce59aa4b9ad7b7bc6514 resourceVersion: "14114" uid: 3876e657-5ceb-484c-8532-50efc059eea8 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7ef1e3a3a6838c7a9be47c4fdc5a8b177583baa77397397e76933831c0379d45 size: 132132261 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:17659dbf6db2c126d42ce17461e7ebffa0681e9c07bb5ac44f7f70ad4c05bf17 size: 5393909 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_HTTPS=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JBOSS_CONTAINER_MAVEN_38_MODULE=/opt/jboss/container/maven/38/ - MAVEN_VERSION=3.8 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar quarkus-app - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 - FUSE_JAVA_IMAGE_NAME=fuse7/fuse-java-openshift-rhel8 - FUSE_JAVA_IMAGE_VERSION=1.12 - JOLOKIA_VERSION=1.7.2.redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.18.0.redhat-00001 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i:/usr/local/s2i - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true - JAVA_DATA_DIR=/deployments/data ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} 9779/tcp: {} Labels: architecture: x86_64 build-date: 2024-05-23T17:52:19 com.redhat.component: fuse-java-openshift-rhel-8-container com.redhat.deployments-dir: /deployments com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 com.redhat.license_terms: https://www.redhat.com/agreements description: Build and run Spring Boot-based integration applications distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.fabric8.s2i.version.jolokia: 1.7.2.redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.18.0.redhat-00001 io.k8s.description: Build and run Spring Boot-based integration applications io.k8s.display-name: Fuse for OpenShift io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,fuse maintainer: Otavio Piske name: fuse7/fuse-java-openshift-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "26.1716486065" summary: Build and run Spring Boot-based integration applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-java-openshift-rhel8/images/1.12-26.1716486065 usage: https://access.redhat.com/documentation/en-us/openjdk/8/html/using_openjdk_8_source-to-image_for_openshift/index vcs-ref: c71a1efc48cdb3fbbd3afac4577b3dcc6b4e847c vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-05-23T17:54:51Z" Id: sha256:740a5d928f171dde6da2bd1563c71946f07c90b21bf32ab1917e2c0476e29a0b Size: 176874147 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-java-openshift-rhel8@sha256:e379727c63710610a1d6843aac4fe3c9e5d81fc0e58c171e88c55da4be1499f0 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:e379727c63710610a1d6843aac4fe3c9e5d81fc0e58c171e88c55da4be1499f0 resourceVersion: "14042" uid: 6d304100-713b-48f1-bbe4-edd5ebbb8511 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:54e56e6f85721741ee7bf0336de8ad3bf138a56769a6d0097b600a0e361be58d size: 39618910 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4f8ddd7f5a755f537dd9d5f553c8c78171dcf3018c5fc96676a07380d3e14e20 size: 1745 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6a26daebeda0b951f71cb9428ea142d5b29f8b9d34ade08ee7c9f323b43a580f size: 108734553 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: 9123ac2298437939745a0cdb21826da267ffacb7eb36c5ab1b10e081dbb4b39c Labels: architecture: x86_64 build-date: 2022-04-29T13:52:42.413180 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1651233093" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.12-1.1651233093 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 934846907ba3af890fb4de2c384c9f21d3f7ab29 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.12 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: 65ec992ef2e6 Image: sha256:04a95b886edf0abd76bd342b161b53d93e066e3e55ed9fa2a5a82087584a3ba4 Labels: architecture: x86_64 build-date: 2022-04-29T13:52:42.413180 com.redhat.build-host: cpt-1003.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1651233093" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.12-1.1651233093 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 934846907ba3af890fb4de2c384c9f21d3f7ab29 vcs-type: git vendor: Red Hat, Inc. version: "1.12" User: "185" WorkingDir: /home/jboss Created: "2022-04-29T13:58:21Z" DockerVersion: 1.13.1 Id: sha256:de955e624ee3104841bbab009a43a1e1e9a30a38bbfa5ea966f1c2bbc33578bf Size: 148362031 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:e37aeaeb0159194a9855350e13e399470f39ce340d6381069933742990741fb8 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:e37aeaeb0159194a9855350e13e399470f39ce340d6381069933742990741fb8 resourceVersion: "14143" uid: d23b1f49-1ae8-42be-ac53-93628316081f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0e6a27b858072801aa26e46f548f8e4a9d823aa2ded217561242f1aaa50912c8 size: 111652172 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GECOS=JBoss user - HOME=/home/jboss - UID=185 - USER=jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.18 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2024-01-18T10:36:52 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" org.opencontainers.image.documentation: https://jboss-container-images.github.io/openjdk/ release: "2.1705573231" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.18-2.1705573231 usage: https://jboss-container-images.github.io/openjdk/ vcs-ref: 12aac04fffa038f171574a2c3f057a2c253f5c27 vcs-type: git vendor: Red Hat, Inc. version: "1.18" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-01-18T10:45:46Z" Id: sha256:f2df3bbcae9065c5976d45b3e1c4e5717d8f3bc38910ce02f04b2c40d3239e6b Size: 150978954 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:e4223a60b887ec24cad7dd70fdb6c3f2c107fb7118331be6f45d626219cfe7f3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:e4223a60b887ec24cad7dd70fdb6c3f2c107fb7118331be6f45d626219cfe7f3 resourceVersion: "14158" uid: d1835de4-11db-461a-8299-335d906df746 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:690c847f419672788dca52f9eb17b10133919a0aae947934f7bdf5ccf30f1546 size: 79990748 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0f257b509d08e949ce3d20cf20f1a21b7ec8a6b4b6edfef6a67a23d11b8da3ef size: 115756384 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=openjdk/openjdk-11-rhel7 - JBOSS_IMAGE_VERSION=1.17 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-06-04T14:59:17 com.redhat.component: openjdk-11-rhel7-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.10.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: openjdk/openjdk-11-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "3.1717512827" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7/images/1.17-3.1717512827 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 354a181e3ce930de9fb8a9a34e74118bfa657ad7 vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-06-04T15:08:18Z" Id: sha256:cdc667485ef8fdf408862233fc5f22556665356272a83f2784dd5c28e24c28a5 Size: 195777053 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/openjdk/openjdk-11-rhel7@sha256:e4b1599ba6e88f6df7c4e67d6397371d61b6829d926411184e9855e71e840b8c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:e4b1599ba6e88f6df7c4e67d6397371d61b6829d926411184e9855e71e840b8c resourceVersion: "13678" uid: 10f5e7a9-9ded-439f-ac6d-43587d9d20f2 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d7c06497d5cebd39c0a4feb14981ec940b5c863e49903d320f630805b049cbff size: 39279912 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:26a68d65f3a58a10359cbec9abc880891c92df5adc10551e0830c2c517b60167 size: 90728260 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.14 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-02-07T17:21:37 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 3.11.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "9.1675788286" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.14-9.1675788286 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 043854ec5a8a145469c0504968ee45e7c1566392 vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-02-07T17:29:14Z" Id: sha256:ea9bcdde5d9e1ec04f89e02ad1d7dc59bec3711e572b80cc43a63f4190f2d320 Size: 130027029 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:e4be2fb7216f432632819b2441df42a5a0063f7f473c2923ca6912b2d64b7494 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:e4be2fb7216f432632819b2441df42a5a0063f7f473c2923ca6912b2d64b7494 resourceVersion: "14154" uid: 070441fd-b49d-421d-8cc4-529daeb2a7ef - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:378837c0e24ad4a2e33f0eb3d68dc0c31d9a7dbbd5357d4acafec1d3a7930602 size: 74923740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e17262bc23414bd3c0e9808ad7a87b055fe5afec386da42115a839ea2083d233 size: 1303 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8d9c78c7f9887170d08c57ec73b21e469b4120682a2e82883217535294878c5d size: 3805344 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:f4350d5126d0895bb50c2c082a415ff417578d34508a0ef07ec20cebf661ebb7 size: 70368140 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:79cc30b4ba3e86f08bd7ddc3a33d86bf36534d8ccb32b1c8c1f66a57b1170bb9 size: 30906970 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Dhiraj Bokde Config: Cmd: - usage Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.1 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-710024-redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: 043f153b9d5b36ae5f5dd5da8923edab1bd13d357c8909bf2e808da6932c5cb5 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T22:07:46.745472 com.redhat.build-host: cpt-0006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.1.4 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-710024-redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Cloud Enablement Feedback name: fuse7/fuse-karaf-openshift org.concrt.version: 2.1.4 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "4.1539812382" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.1-4.1539812382 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: b1de626b2e1de9de9af75367b70862009e65df91 vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/s2i:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss/openjdk18-rhel7 - JBOSS_IMAGE_VERSION=1.1 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - FUSE_KARAF_IMAGE_NAME=fuse7/fuse-karaf-openshift - FUSE_KARAF_IMAGE_VERSION=1.1 - JOLOKIA_VERSION=1.5.0.redhat-1 - KARAF_FRAMEWORK_VERSION=4.2.0.fuse-710024-redhat-00002 - PROMETHEUS_JMX_EXPORTER_VERSION=0.10.0.redhat-2 - AB_JOLOKIA_PASSWORD_RANDOM=true - AB_JOLOKIA_AUTH_OPENSHIFT=true ExposedPorts: 8778/tcp: {} 9779/tcp: {} Hostname: 3183206101f3 Image: sha256:18998c62224921fd54ef88babf0d5a0fc13c00ff60dab143bc946fb958892bde Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2018-10-17T22:07:46.745472 com.redhat.build-host: cpt-0006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-karaf-openshift-container com.redhat.deployments-dir: /deployments/karaf com.redhat.dev-mode: JAVA_DEBUG:false com.redhat.dev-mode.port: JAVA_DEBUG_PORT:5005 description: Platform for building and running Apache Karaf OSGi applications distribution-scope: public io.cekit.version: 2.1.4 io.fabric8.s2i.version.jolokia: 1.5.0.redhat-1 io.fabric8.s2i.version.karaf: 4.2.0.fuse-710024-redhat-00002 io.fabric8.s2i.version.maven: 3.3.3-1.el7 io.fabric8.s2i.version.prometheus.jmx_exporter: 0.10.0.redhat-2 io.k8s.description: Platform for building and running Apache Karaf OSGi applications io.k8s.display-name: Fuse for OpenShift - Karaf based io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,karaf maintainer: Cloud Enablement Feedback name: fuse7/fuse-karaf-openshift org.concrt.version: 2.1.4 org.jboss.deployments-dir: /deployments/karaf org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "4.1539812382" summary: Platform for building and running Apache Karaf OSGi applications url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-karaf-openshift/images/1.1-4.1539812382 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: b1de626b2e1de9de9af75367b70862009e65df91 vcs-type: git vendor: Red Hat, Inc. version: "1.1" User: "185" WorkingDir: /home/jboss Created: "2018-10-17T22:10:08Z" DockerVersion: 1.13.1 Id: sha256:e2a9658ebf12193de8f236125832ea2941b95ea3188bfbb788bbb346351a6ee6 Size: 180011963 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-karaf-openshift@sha256:e61420525f02c4e85ba9b79b9702880a11907d8ab79b9b0a36dbf559ce0f5234 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:e61420525f02c4e85ba9b79b9702880a11907d8ab79b9b0a36dbf559ce0f5234 resourceVersion: "14017" uid: fa7da5ba-8f2c-46eb-a3d3-06f48b8f006e - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f2c2c4492b6b2d181be862a0a1d1b6f6851cb07244efbcb43d44f9936aa78d5 size: 80005019 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:abbe4d8a684d1158a98a0d568e683035c9379b514c6e27cfb85fdb989e849368 size: 91217246 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_HOME=/opt/eap - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk8-runtime-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.4.18 - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - LAUNCH_JBOSS_IN_BACKGROUND=true - SSO_FORCE_LEGACY_SECURITY=true ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2024-07-22T13:35:03 com.redhat.component: jboss-eap-74-openjdk8-runtime-openshift-rhel7-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.license_terms: https://www.redhat.com/agreements description: The JBoss EAP 7.4 OpenJDK 8 runtime image distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.k8s.description: Base image to run an EAP server and application io.k8s.display-name: JBoss EAP runtime image io.openshift.expose-services: 8080:http io.openshift.tags: javaee,eap,eap7 maintainer: Red Hat name: jboss-eap-7/eap74-openjdk8-runtime-openshift-rhel7 org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "6" summary: The JBoss EAP 7.4 OpenJDK 8 runtime image url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-eap-7/eap74-openjdk8-runtime-openshift-rhel7/images/7.4.18-6 vcs-ref: 3bd793c5d56a86086de93246a63bfc3312df588a vcs-type: git vendor: Red Hat, Inc. version: 7.4.18 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2024-07-22T13:38:30Z" Id: sha256:055765bee165f34aea152152acabd2b49594742c636a426afe0f9d851122dd36 Size: 171242646 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-eap-7/eap74-openjdk8-runtime-openshift-rhel7@sha256:e6fc695cfd77ccff83ef22148c54f45ba8af41e2b69f6146d7ad588cb2aed780 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:e6fc695cfd77ccff83ef22148c54f45ba8af41e2b69f6146d7ad588cb2aed780 resourceVersion: "13313" uid: 1c4ee5c6-f94d-4f62-97ea-20a36d0ac4da - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:65b1f5b0fed902160219dd5c084e53de4f052d43d177a5ae3a3549fb62bb85cb size: 415348847 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Entrypoint: - /bin/openshift-install Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=open - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510211040.p2.ge238076.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510211040.p2.ge238076.assembly.stream.el9-e238076 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=ose-installer - __doozer_uuid_tag=ose-installer-rhel9-v4.20.0-20251021.105557 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=e238076 - SOURCE_DATE_EPOCH=1760727874 - SOURCE_GIT_COMMIT=e23807689ec464da30e771dda70fd8989680a011 - SOURCE_GIT_TAG=v1.4.19-ec5-379-ge23807689e - SOURCE_GIT_URL=https://github.com/openshift/installer - HOME=/output Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T13:33:19Z" com.redhat.component: ose-installer-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Empty io.k8s.display-name: Empty io.openshift.build.commit.id: e23807689ec464da30e771dda70fd8989680a011 io.openshift.build.commit.url: https://github.com/openshift/installer/commit/e23807689ec464da30e771dda70fd8989680a011 io.openshift.build.source-location: https://github.com/openshift/installer io.openshift.expose-services: "" io.openshift.maintainer.component: Installer / openshift-installer io.openshift.maintainer.project: OCPBUGS io.openshift.release.operator: "true" io.openshift.tags: Empty maintainer: Red Hat, Inc. name: openshift/ose-installer-rhel9 org.opencontainers.image.revision: 79818eff324aee33104736839fba6775d39383d7 release: 202510211040.p2.ge238076.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: 79818eff324aee33104736839fba6775d39383d7 vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 User: 1000:1000 WorkingDir: /output ContainerConfig: {} Created: "2025-10-21T13:43:59Z" Id: sha256:b437aabdb1940863e4510026463d0fcfe0e88f534ebaeedbc663115821d29154 Size: 553786370 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e7713979a921ec8d2506fcb3fb3ee960fc757262f4567319ee5aa2b351d4f778 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:e7713979a921ec8d2506fcb3fb3ee960fc757262f4567319ee5aa2b351d4f778 resourceVersion: "13292" uid: fd93268b-d615-4f70-8cdc-993cadb95c44 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2a99c93da16827d9a6254f86f495d2c72c62a916f9c398577577221d35d2c790 size: 39641757 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4418ace46c3dd933f98d83f357f31048e72d5db3d97bccfdb0acef769ee8234f size: 1743 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:21d80832abff381642c2152c7d9ae05bc0d2683be5f6cfbe25024738f7a5895b size: 118407698 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: be4e58a52d40 Image: 3eec95fdbe14c33d646a38ecfa0ffcd9b21f02758ebff6139972f2996b4d504a Labels: architecture: x86_64 build-date: 2021-12-01T18:42:06.107591 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "10.1638383051" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.10-10.1638383051 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: ca0a104dbc88f2b53c59a11da2aacb90f0bb479c vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-11 - JBOSS_IMAGE_VERSION=1.10 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: be4e58a52d40 Image: sha256:e87ffd7792f5de35a5622f520bc825e692d51d8c063da8385687889c098f5717 Labels: architecture: x86_64 build-date: 2021-12-01T18:42:06.107591 com.redhat.build-host: cpt-1006.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-11-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-11 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "11" org.jboss.product.version: "11" release: "10.1638383051" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 11 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-11/images/1.10-10.1638383051 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: ca0a104dbc88f2b53c59a11da2aacb90f0bb479c vcs-type: git vendor: Red Hat, Inc. version: "1.10" User: "185" WorkingDir: /home/jboss Created: "2021-12-01T18:48:13Z" DockerVersion: 1.13.1 Id: sha256:26261c21856ce658c807d2a7172be92faf7469efc753dd69fac16cc856fb9471 Size: 158058692 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-11@sha256:e851770fd181ef49193111f7afcdbf872ad23f3a8234e0e07a742c4ca2882c3d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:e851770fd181ef49193111f7afcdbf872ad23f3a8234e0e07a742c4ca2882c3d resourceVersion: "14167" uid: ab005a4b-51b2-4b40-afd6-324daede91e9 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a20dc09567a04bdff2ebfaa3d3917f64d7620555e6354d53b43dd7ebb0e0f575 size: 79751689 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e7b8202be3fe050cb969cfab5cfb888dadab37a8fe818411e9674df89e0a989c size: 124303924 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.16 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-11-14T15:35:29 com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.9.1 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "1" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.16-1 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 3fe6df1e2c390588459d91f828a74bf0500b82cc vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-11-14T15:38:35Z" Id: sha256:0dbe7f254fb89345741da46333fda218550459fc3991c019639585c84cf22e70 Size: 204085222 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:e90172ca0f09acf5db1721bd7df304dffd184e00145072132cb71c7f0797adf6 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:e90172ca0f09acf5db1721bd7df304dffd184e00145072132cb71c7f0797adf6 resourceVersion: "14166" uid: 59fccd7d-2b98-4fe1-aa40-6dd676ee3101 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2920d84eafa0cf94806ab58f0a2124f7b2d35bcbb06fc89a9106dcc28efe397a size: 39653524 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7180a091b58fc553a5990486ab5028d0fe8361e9c62ca537c2324fe3997d339a size: 50614128 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - container=oci - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - NPM_RUN=start - PLATFORM=el9 - NODEJS_VERSION=22 - NAME=nodejs - SUMMARY=Minimal image for running Node.js 22 applications - DESCRIPTION=Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. - NPM_CONFIG_PREFIX=/opt/app-root/src/.npm-global - PATH=/opt/app-root/src/node_modules/.bin/:/opt/app-root/src/.npm-global/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-15T16:11:24Z" com.redhat.component: nodejs-22-minimal-container com.redhat.deployments-dir: /opt/app-root/src com.redhat.dev-mode: DEV_MODE:false com.redhat.dev-mode.port: DEBUG_PORT:5858 com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:9::appstream description: Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-nodejs-container io.buildah.version: 1.41.4 io.k8s.description: Node.js 22 available as container is a base platform for running various Node.js 22 applications and frameworks. Node.js is a platform built on Chrome's JavaScript runtime for easily building fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and efficient, perfect for data-intensive real-time applications that run across distributed devices. io.k8s.display-name: Node.js 22 Micro io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,nodejs,nodejs22 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi9/nodejs-22-minimal org.opencontainers.image.revision: e19300a1ca03c44fa04c9dfd76d31f292aee7cca release: "1760544659" summary: Minimal image for running Node.js 22 applications url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: e19300a1ca03c44fa04c9dfd76d31f292aee7cca vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-15T16:11:29Z" Id: sha256:2ac495ca952b9d2ee4ad236f7d5d31cd3b3641e1aaa01a4d4c5bb039a7cb82ca Size: 90280042 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi9/nodejs-22-minimal@sha256:ea065c1e423af8de839c84e1520131fe4ed4a1bda0ae8eeb078fc8a8368e9f0c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:ea065c1e423af8de839c84e1520131fe4ed4a1bda0ae8eeb078fc8a8368e9f0c resourceVersion: "14092" uid: da460b6b-80e6-44b6-ac2e-2f0d3e699938 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d43c95783c3d99a3c275f4c278f8d68a1dfda166c399fd55aee8c1dce7d76611 size: 79767891 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:e53ac5fae1ac340de75c4c78c6eea9df409b45b2ffee95cd8085a8ed3b9cbf6c size: 7515417 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0df98180b96394ec85a9f587d6e3304fa7628ec4a3029269cb44f30b4dd38a5a size: 63419156 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - run-mysqld Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el7 - MYSQL_VERSION=10.3 - APP_DATA=/opt/app-root/src - HOME=/var/lib/mysql - SUMMARY=MariaDB 10.3 SQL database server - DESCRIPTION=MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql - MYSQL_PREFIX=/opt/rh/rh-mariadb103/root/usr - ENABLED_COLLECTIONS=rh-mariadb103 - BASH_ENV=/usr/share/container-scripts/mysql/scl_enable - ENV=/usr/share/container-scripts/mysql/scl_enable - PROMPT_COMMAND=. /usr/share/container-scripts/mysql/scl_enable ExposedPorts: 3306/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-19T11:53:14 com.redhat.component: rh-mariadb103-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: MariaDB is a multi-user, multi-threaded SQL database server. The container image provides a containerized packaging of the MariaDB mysqld daemon and client application. The mysqld server daemon accepts connections from clients and provides access to content from MariaDB databases on behalf of the clients. io.k8s.display-name: MariaDB 10.3 io.openshift.expose-services: 3306:mysql io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,mysql,mariadb,mariadb103,rh-mariadb103 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhscl/mariadb-103-rhel7 release: "157" summary: MariaDB 10.3 SQL database server url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/mariadb-103-rhel7/images/1-157 usage: docker run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhscl/mariadb-103-rhel7 vcs-ref: b2fd6429b719ad746d34aa403ec724eab594969d vcs-type: git vendor: Red Hat, Inc. version: "1" User: "27" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2023-07-19T11:57:03Z" Id: sha256:b76c7d766cfbe3c5a9dd260aab10d3ad34c4f6e9f5eda825ea41bba9fe9709ca Size: 150721951 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhscl/mariadb-103-rhel7@sha256:eaaf8ab6d318d72cc4e465609b213f4d9d9171f222f59ae012fa5b96fb3e4ea9 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:eaaf8ab6d318d72cc4e465609b213f4d9d9171f222f59ae012fa5b96fb3e4ea9 resourceVersion: "13836" uid: ef06e2e6-4dab-4aae-8ea3-5c1e7479a976 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:97da74cc6d8fa5d1634eb1760fd1da5c6048619c264c23e62d75f3bf6b8ef5c4 size: 79524639 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d8190195889efb5333eeec18af9b6c82313edd4db62989bd3a357caca4f13f0e size: 1438 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:37fcebd665b9bf280b3a7b7fc8cbbdd35c40de9fde97eec88a9efbb1a416cf0f size: 31542956 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:03bf2f9ff79ce68fdf647999d3c96dd98a59121fae75dd2c1dcce34e3e159eeb size: 13107144 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:2b42f43a3d9df8228ab00afc8ece1dbfafae24fbd2b3ea72b6234bb68dc2c1bf size: 59202343 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:fe12f4f7de241e6f0f92729bb06b5400f8936a47d5bbaa4b521d4b656ad61ae1 size: 589260181 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: OpenShift Developer Services Config: Entrypoint: - /usr/bin/go-init - -main - /usr/libexec/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - __doozer=merge - BUILD_RELEASE=202306070816.p0.g05d83ef.assembly.stream - BUILD_VERSION=v4.13.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=13 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.13.0-202306070816.p0.g05d83ef.assembly.stream-05d83ef - SOURCE_GIT_TREE_STATE=clean - OS_GIT_COMMIT=05d83ef - SOURCE_DATE_EPOCH=1685556672 - SOURCE_GIT_COMMIT=05d83eff7e17160e679898a2a5cd6019ec252c49 - SOURCE_GIT_TAG=openshift-clients-4.13.0-202304190216-4-g05d83eff7 - SOURCE_GIT_URL=https://github.com/openshift/oc - JENKINS_VERSION=2 - HOME=/var/lib/jenkins - JENKINS_HOME=/var/lib/jenkins - JENKINS_UC=https://updates.jenkins.io - OPENSHIFT_JENKINS_IMAGE_VERSION=4.13 - LANG=en_US.UTF-8 - LC_ALL=en_US.UTF-8 - INSTALL_JENKINS_VIA_RPMS=true ExposedPorts: 8080/tcp: {} 50000/tcp: {} Labels: License: GPLv2+ architecture: x86_64 build-date: 2023-06-13T18:51:10 com.redhat.build-host: cpt-1001.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openshift-jenkins-2-container com.redhat.license_terms: https://www.redhat.com/agreements description: Jenkins is a continuous integration server distribution-scope: public io.buildah.version: 1.29.0 io.jenkins.version: 2.401.1 io.k8s.description: Jenkins is a continuous integration server io.k8s.display-name: Jenkins 2 io.openshift.build.commit.id: 418b910a5af2d9a46c4259fbdbe9a851f6a39820 io.openshift.build.commit.url: https://github.com/openshift/jenkins/commit/418b910a5af2d9a46c4259fbdbe9a851f6a39820 io.openshift.build.source-location: https://github.com/openshift/jenkins io.openshift.expose-services: 8080:http io.openshift.maintainer.component: Jenkins io.openshift.maintainer.product: OpenShift Container Platform io.openshift.maintainer.project: OCPBUGS io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: jenkins,jenkins2,ci maintainer: openshift-dev-services+jenkins@redhat.com name: openshift/ose-jenkins release: "1686682222" summary: Provides the latest release of the Red Hat Extended Life Base Image. url: https://access.redhat.com/containers/#/registry.access.redhat.com/openshift/ose-jenkins/images/v4.13.0-1686682222 vcs-ref: 938ba00e0b9ebc6013b8da4c8466e78680c8908c vcs-type: git vendor: Red Hat, Inc. version: v4.13.0 User: "1001" Volumes: /var/lib/jenkins: {} ContainerConfig: {} Created: "2023-06-13T18:58:20Z" Id: sha256:fbe7cc900bb77ed63ee417950f83153544d870fa450890667cedb5492b9be228 Size: 772667538 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ocp-tools-4/jenkins-rhel8@sha256:eab456afb39ed4607b2ee61c8c7635ab1c5ff8f8bddf7640c557e792504d545f kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:01Z" name: sha256:eab456afb39ed4607b2ee61c8c7635ab1c5ff8f8bddf7640c557e792504d545f resourceVersion: "13760" uid: 054532a0-f182-42d2-bc10-27b80a53a3cc - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:969b2129f884091816f6451dc7954be84cf70867f64c7f3448a4d7045c405fed size: 79215420 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:3b71c1233e1e26d1d75f29179b892cb4d3f5b87540f0d4f377bc9c8b040e77fa size: 468504636 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:29f302bc38599f9c2abd333785f5a01382dd7b3008e8430d3f6c619488e03b3c size: 627309857 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.7 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.7 - WILDFLY_VERSION=7.4.7.GA-redhat-00003 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=23.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk11-openshift-rhel8 - JBOSS_IMAGE_VERSION=7.4.7 - WILDFLY_CAMEL_VERSION=5.9.0.fuse-7_11_1-00014-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-19T15:17:59 com.redhat.component: fuse-eap-openshift-jdk11-rhel-8-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.4 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.4 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Thomas Diesler name: fuse7/fuse-eap-openshift-jdk11-rhel8 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.7 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 7.4.7 release: "28.1687187287" summary: Platform for building and running Apache Camel applications on EAP 7.4 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift-jdk11-rhel8/images/1.11-28.1687187287 vcs-ref: f45d02aaf2c4c13f2a24ced8fbda98fe24a4db35 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-19T15:22:01Z" Id: sha256:436462f5a261850bb0367468bcbe4ac7f6bceeae89b9620f660aa184b40d7936 Size: 1175083214 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift-jdk11-rhel8@sha256:ebfca7a4e3506ee7f317acc7503ad46f2e1cf5605347a1b75fdd02bc77c7de02 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:10:59Z" name: sha256:ebfca7a4e3506ee7f317acc7503ad46f2e1cf5605347a1b75fdd02bc77c7de02 resourceVersion: "13331" uid: 0efd9cf8-b97b-4567-9c02-7e3328dfb8eb - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:6c502b378234995dc83300d3008b7d19853c50e1b89dc9c0d5917de51b589452 size: 77837331 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:01326448827ea7f69fcd30abaf1d354438c9c1dc2cc2d6529832431a7dae1afb size: 17704605 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:172ffdf04fe0ffd5a0117272da0333271a3c558dbeaf357e7412638d99a1e462 size: 154305621 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9a64d33ea5fbab73d73042b20b6c62bf0e07762623ac1ec63cb42bed2f673f58 size: 17205967 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - NODEJS_VER=20 - RUBY_MAJOR_VERSION=2 - RUBY_MINOR_VERSION=5 - RUBY_VERSION=2.5 - RUBY_SCL_NAME_VERSION=25 - RUBY_SCL=ruby-25 - IMAGE_NAME=ubi8/ruby-25 - SUMMARY=Platform for building and running Ruby 2.5 applications - DESCRIPTION=Ruby 2.5 available as container is a base platform for building and running various Ruby 2.5 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-30T12:10:33Z" com.redhat.component: ruby-25-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Ruby 2.5 available as container is a base platform for building and running various Ruby 2.5 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Ruby 2.5 available as container is a base platform for building and running various Ruby 2.5 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. io.k8s.display-name: Ruby 2.5 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,ruby,ruby25,ruby-25 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi8/ruby-25 org.opencontainers.image.revision: 324e7447cf2a64a60ebd3933f0a913d135a35dc9 release: "1761826163" summary: Platform for building and running Ruby 2.5 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-ruby-container.git --context-dir=2.5/test/puma-test-app/ ubi8/ruby-25 ruby-sample-app vcs-ref: 324e7447cf2a64a60ebd3933f0a913d135a35dc9 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-30T12:10:44Z" Id: sha256:4719337b9191f74d84f235858b5f264d4f259e64364228aaef2e7a26260a8c2a Size: 267072353 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/ruby-25@sha256:ec1985bf5fca4d79054e4beadf7617c3b6400bad2325e47b00a90f7fe07540de kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:ec1985bf5fca4d79054e4beadf7617c3b6400bad2325e47b00a90f7fe07540de resourceVersion: "14112" uid: 709a2874-5dbf-45be-97ed-64598de48114 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c8c24bf4ef435ee29d7495ca732a4d82374c1a11c25ca6aae12f997f45ca5a26 size: 39733597 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:ad40b487e8868da46f3b7e7132544b5ef3e0b50704629e039a8128a1cc36078d size: 63940431 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:60b639def07c06efcb08c22201a35f1f049f35e81ac616f57ddfb8548fbcd0d1 size: 146167365 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - /bin/bash Env: - container=oci - HOME=/opt/app-root - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=9.0.11 - ASPNET_VERSION=9.0.11 - PATH=/opt/app-root/src/.local/bin:/opt/app-root/src/bin:/opt/app-root/.dotnet/tools:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - STI_SCRIPTS_PATH=/usr/libexec/s2i - DOTNET_GENERATE_ASPNET_CERTIFICATE=false - DOTNET_NOLOGO=true - DOTNET_SDK_VERSION=9.0.112 - DOTNET_USE_POLLING_FILE_WATCHER=true ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-12-02T13:26:21Z" com.redhat.component: dotnet-90-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for building and running .NET 9 applications distribution-scope: public dotnet_version: 9.0.11 io.buildah.version: 1.41.4 io.k8s.description: Platform for building and running .NET 9 applications io.k8s.display-name: .NET 9 SDK io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,.net,dotnet,dotnetcore,dotnet-90 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: Red Hat, Inc. name: ubi8/dotnet-90 org.opencontainers.image.created: "2025-12-02T13:26:21Z" org.opencontainers.image.revision: c3c19ed75250cb6c67f90925540f8ae29e952ff1 release: "1764681922" sdk_version: 9.0.112 summary: .NET 9 SDK url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: c3c19ed75250cb6c67f90925540f8ae29e952ff1 vcs-type: git vendor: Red Hat, Inc. version: "9.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-12-02T13:26:38Z" Id: sha256:bbd3fbbe4fd412ea9ee27d8d5db2239a0d2e7dc8e60d848dd6ed8e7cf61e7604 Size: 249860453 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/dotnet-90@sha256:ec784f172735873a5893504e07c57dcbd56b7b049a395c5629c6058dbfac21a3 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-12-08T17:53:27Z" name: sha256:ec784f172735873a5893504e07c57dcbd56b7b049a395c5629c6058dbfac21a3 resourceVersion: "40305" uid: 3458052a-cf24-4059-b729-4b4e138e5ef0 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:25c75c34b2e2b68ba9245d9cddeb6b8a0887371ed30744064f85241a75704d87 size: 79262296 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:67705065e025181e4faca8aabe1305bdd92f5bdf8a2b8009cdb69183ac2e2c47 size: 49851946 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9d458e2e81cb0fa811f569aaf711628309c0372c7d5eed4a8ea9ec96b4aeeb42 size: 9300456 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:b954cb90d4571440ed86627198be2d74d7c3d264fe72e0af0f35f40f0da99ea8 size: 75745362 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:92c7fb0cb7f3bfe054a3c805669529daa76dec4e9e05fcf4a097579c78dd575c size: 6212869 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/bash Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - GODEBUG=x509ignoreCN=0,madvdontneed=1 - ART_BUILD_ENGINE=konflux - ART_BUILD_DEPS_METHOD=cachi2 - ART_BUILD_NETWORK=hermetic - ART_BUILD_DEPS_MODE=default - __doozer=merge - BUILD_RELEASE=202510212154.p2.gf0c6474.assembly.stream.el9 - BUILD_VERSION=v4.20.0 - OS_GIT_MAJOR=4 - OS_GIT_MINOR=20 - OS_GIT_PATCH=0 - OS_GIT_TREE_STATE=clean - OS_GIT_VERSION=4.20.0-202510212154.p2.gf0c6474.assembly.stream.el9-f0c6474 - SOURCE_GIT_TREE_STATE=clean - __doozer_group=openshift-4.20 - __doozer_key=ose-must-gather - __doozer_uuid_tag=ose-must-gather-rhel9-v4.20.0-20251021.223340 - __doozer_version=v4.20.0 - OS_GIT_COMMIT=f0c6474 - SOURCE_DATE_EPOCH=1755014140 - SOURCE_GIT_COMMIT=f0c64742c79afce5e367ca2c7b830ad448abe1df - SOURCE_GIT_TAG=f0c64742 - SOURCE_GIT_URL=https://github.com/openshift/must-gather Labels: License: GPLv2+ architecture: x86_64 build-date: "2025-10-21T23:11:28Z" com.redhat.component: ose-must-gather-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:openshift:4.20::el9 description: Empty distribution-scope: public io.buildah.version: 1.41.4 io.k8s.description: Empty io.k8s.display-name: Empty io.openshift.build.commit.id: f0c64742c79afce5e367ca2c7b830ad448abe1df io.openshift.build.commit.url: https://github.com/openshift/must-gather/commit/f0c64742c79afce5e367ca2c7b830ad448abe1df io.openshift.build.source-location: https://github.com/openshift/must-gather io.openshift.build.versions: kubectl=1.33.3 io.openshift.expose-services: "" io.openshift.maintainer.component: oc io.openshift.maintainer.project: OCPBUGS io.openshift.tags: Empty maintainer: Red Hat, Inc. name: openshift/ose-must-gather-rhel9 org.opencontainers.image.revision: be5bc98f6ab4fd760cb24ef79b794f829a6cca0d release: 202510212154.p2.gf0c6474.assembly.stream.el9 summary: Empty url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel-els/images/9.4-847.1719484506 vcs-ref: be5bc98f6ab4fd760cb24ef79b794f829a6cca0d vcs-type: git vendor: Red Hat, Inc. version: v4.20.0 ContainerConfig: {} Created: "2025-10-21T23:11:33Z" Id: sha256:59d431a1f16db8ec0cac568e88b46898f9ce3ecdcb913aafcd9c7cc9f60ab2bd Size: 220397991 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ed13779a6051e3b9588f5ebea6b66c0a2979512fdcc99bca1f910a577fb4c34a kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:ed13779a6051e3b9588f5ebea6b66c0a2979512fdcc99bca1f910a577fb4c34a resourceVersion: "13671" uid: 31dd898c-aba6-4eb1-aeca-092ef136ec86 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c7965aa7086045a59bdb113a1fb8a19d7ccf7af4133e59af8ecefd39cda8e0b1 size: 78964242 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:629f5551eb35561edd4fb4df147019d1b61fecb5cff49308880466f66c7c3287 size: 18497941 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:401a443a6b88e0eaecefc030affbc1dd5b32713ba8253d2bb6553083ea3f0b4e size: 78897336 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - run-postgresql Entrypoint: - container-entrypoint Env: - container=oci - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el8 - POSTGRESQL_VERSION=10 - POSTGRESQL_PREV_VERSION=9.6 - HOME=/var/lib/pgsql - PGUSER=postgres - APP_DATA=/opt/app-root - SUMMARY=PostgreSQL is an advanced Object-Relational database management system - DESCRIPTION=PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql - ENABLED_COLLECTIONS= ExposedPorts: 5432/tcp: {} Labels: architecture: x86_64 build-date: 2024-06-05T11:48:51 com.redhat.component: postgresql-10-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. distribution-scope: public io.buildah.version: 1.29.0 io.k8s.description: PostgreSQL is an advanced Object-Relational database management system (DBMS). The image contains the client and server programs that you'll need to create, run, maintain and access a PostgreSQL DBMS server. io.k8s.display-name: PostgreSQL 10 io.openshift.expose-services: 5432:postgresql io.openshift.s2i.assemble-user: "26" io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: database,postgresql,postgresql10,postgresql-10 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: rhel8/postgresql-10 release: "245.1717586538" summary: PostgreSQL is an advanced Object-Relational database management system url: https://access.redhat.com/containers/#/registry.access.redhat.com/rhel8/postgresql-10/images/1-245.1717586538 usage: podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhel8/postgresql-10 vcs-ref: 1f9e79b1dd5ff57cc1d06493e1c759c06caebd31 vcs-type: git vendor: Red Hat, Inc. version: "1" User: "26" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2024-06-05T11:51:40Z" Id: sha256:28986f9837d55db9e0d95235c90362d2f8d13120e6445c32ffee00a74f6a5309 Size: 176378368 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/rhel8/postgresql-10@sha256:ed2da0eed3f495f5455f490cdf7f7943420f64b0cf541271a2d315a3f9e9744c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:05Z" name: sha256:ed2da0eed3f495f5455f490cdf7f7943420f64b0cf541271a2d315a3f9e9744c resourceVersion: "14107" uid: 2c14eb43-97be-4531-906c-a55d7d1032b6 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:aa3221337c7c31adc56e55ab17a4d7f84e98319cb67a11d54a165c161f8ed0e5 size: 39754072 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:69e55fe1182ec4bc6209d03b88dd3cdc9e2bfbcd4aa5a8b4b704b2021c349ead size: 63934471 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - '"./${DOTNET_DEFAULT_CMD}"' Env: - container=oci - HOME=/opt/app-root - PATH=/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - DOTNET_APP_PATH=/opt/app-root/app - DOTNET_DATA_PATH=/opt/app-root/data - DOTNET_DEFAULT_CMD=default-cmd.sh - DOTNET_RUNNING_IN_CONTAINER=true - NUGET_XMLDOC_MODE=skip - ASPNETCORE_URLS=http://*:8080 - APP_UID=1001 - DOTNET_VERSION=9.0.11 - ASPNET_VERSION=9.0.11 ExposedPorts: 8080/tcp: {} Labels: architecture: x86_64 build-date: "2025-11-20T16:29:09Z" com.redhat.component: dotnet-90-runtime-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/a:redhat:enterprise_linux:8::appstream description: Platform for running .NET 9 applications distribution-scope: public dotnet_version: 9.0.11 io.buildah.version: 1.41.4 io.k8s.description: Platform for running .NET 9 applications io.k8s.display-name: .NET 9 io.openshift.expose-services: 8080:http io.openshift.tags: runtime,.net,dotnet,dotnetcore,dotnet90-runtime maintainer: Red Hat, Inc. name: ubi8/dotnet-90-runtime org.opencontainers.image.revision: d108b33559fea5de8d297d419942ba308f1aa402 release: "1763655979" summary: .NET 9 runtime url: https://catalog.redhat.com/en/search?searchType=containers vcs-ref: d108b33559fea5de8d297d419942ba308f1aa402 vcs-type: git vendor: Red Hat, Inc. version: "9.0" User: "1001" WorkingDir: /opt/app-root/app ContainerConfig: {} Created: "2025-11-20T16:29:18Z" Id: sha256:6b1be0f9cba7cc70f335727ac246c44494eb40bccc30130be79a1a54e9713803 Size: 103701099 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi8/dotnet-90-runtime@sha256:ee797c115858fef35cad6ce8a13fc15b482d7672e37f485cd65579f009d51f0d kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-12-08T17:53:27Z" name: sha256:ee797c115858fef35cad6ce8a13fc15b482d7672e37f485cd65579f009d51f0d resourceVersion: "40310" uid: 06cb1e25-ae81-4cd8-ba4f-70e47f956714 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c4877503c8d2f934dcdfd76623f2b9935529fe73a1432cae4abba022c6951afd size: 79158758 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:c6ace093db543ef6bb833893c286f6ea30dbca731056cc2609a543935b53b61b size: 121874290 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a56226cc9e15515d8100879ba29506372250c387adb71cc3d5b382242374aa59 size: 33823585 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jws-5.7/tomcat/bin/launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - JAVA_HOME=/usr/lib/jvm/java-11 - JAVA_VENDOR=openjdk - JAVA_VERSION=11 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - CATALINA_OPTS=-Djava.security.egd=file:/dev/./urandom - JBOSS_PRODUCT=webserver - JBOSS_WEBSERVER_VERSION=5.7.1 - JPDA_ADDRESS=8000 - JWS_HOME=/opt/jws-5.7/tomcat - PRODUCT_VERSION=5.7.1 - TOMCAT_VERSION=9.0.62 - JBOSS_CONTAINER_MAVEN_35_MODULE=/opt/jboss/container/maven/35/ - MAVEN_VERSION=3.5 - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JBOSS_CONTAINER_JWS_S2I_MODULE=/opt/jboss/container/jws/s2i - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-webserver-5/jws57-openjdk11-rhel8-openshift - JBOSS_IMAGE_VERSION=5.7.1 - STI_BUILDER=jee ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-03-22T11:54:19 com.redhat.component: jboss-webserver-57-openjdk11-rhel8-openshift-container com.redhat.deployments-dir: /opt/jws-5.7/tomcat/webapps com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: JPDA_ADDRESS:8000 com.redhat.license_terms: https://www.redhat.com/agreements description: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 distribution-scope: public io.buildah.version: 1.27.3 io.cekit.version: 4.3.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running web applications on JBoss Web Server 5.7 with OpenJDK11 - Tomcat v9 io.k8s.display-name: JBoss Web Server 5.7 OpenJDK11 io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java,tomcat9 maintainer: szappis@redhat.com name: jboss-webserver-5/jws57-openjdk11-rhel8-openshift org.jboss.container.deployments-dir: /deployments org.jboss.deployments-dir: /opt/jws-5.7/tomcat/webapps org.jboss.product: webserver-tomcat9 org.jboss.product.openjdk.version: "11" org.jboss.product.version: 5.7.1 org.jboss.product.webserver-tomcat9.version: 5.7.1 release: "2.1679484703" summary: Red Hat JBoss Web Server 5.7 - Tomcat 9 OpenShift container image with OpenJDK11 on UBI8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/jboss-webserver-5/jws57-openjdk11-rhel8-openshift/images/5.7.1-2.1679484703 vcs-ref: 8863cf5802c30ccddffcde5d418af282f73367b8 vcs-type: git vendor: Red Hat, Inc. version: 5.7.1 User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-03-22T11:57:08Z" Id: sha256:f60910cd13151fcb24fa8547d4b08aad0c9df0b11a1770d7e72b320d400bb2f7 Size: 234897858 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/jboss-webserver-5/jws57-openjdk11-openshift-rhel8@sha256:eeb0c539ee7ffbd2f1e6eb326204c6f69c554ac5acf0454e9d68d75ffe954f7c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:00Z" name: sha256:eeb0c539ee7ffbd2f1e6eb326204c6f69c554ac5acf0454e9d68d75ffe954f7c resourceVersion: "13697" uid: 3277721d-50b7-44ee-986e-5fb133460c02 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:47a1453ca54b1db59a81a44dc7ad1efc177b68a6e40a98e297c059f754e6356f size: 75360688 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-8-runtime - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-23T16:02:59 com.redhat.component: openjdk-8-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "4" summary: Image for Red Hat OpenShift providing OpenJDK 1.8 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8-runtime/images/1.17-4 vcs-ref: 1e1eb748239d1923f81e45eaa6e9f67b93293ccb vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-23T16:12:48Z" Id: sha256:11f24744085d08e760dc2d1ee7519ee89aea128730ebebb300ab20a05fa6a9ae Size: 114687050 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8-runtime@sha256:eed7e29bf583e4f01e170bb9f22f2a78098bf15243269b670c307caa6813b783 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:eed7e29bf583e4f01e170bb9f22f2a78098bf15243269b670c307caa6813b783 resourceVersion: "14225" uid: bff4fa90-26bc-465d-be84-75085ea9bf96 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5329d7039f252afc1c5d69521ef7e674f71c36b50db99b369cbb52aa9e0a6782 size: 39330100 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:d8b515f03629826cbe0e8d4333e6c211a736172af3d5a42f4837356bc5cccc68 size: 91380360 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.16 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-07-19T16:18:52 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "2" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.16-2 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 3aa842acd0430ce1e0a6ba3219d036dd6ec6337b vcs-type: git vendor: Red Hat, Inc. version: "1.16" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-07-19T16:25:51Z" Id: sha256:88d418a60a56957c2c9b225d1b28de2cc6aeab1986f6cb6f01497eb1953612ef Size: 130729177 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:f438230ed2c2e609d0d7dbc430ccf1e9bad2660e6410187fd6e9b14a2952e70b kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:f438230ed2c2e609d0d7dbc430ccf1e9bad2660e6410187fd6e9b14a2952e70b resourceVersion: "14156" uid: 86f38bb8-6ae4-4753-bd9d-03187c290fea - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:af063699af1c142fce6707dc9306d122355e61bd23ded0d18f8a4ecfbf3aa89a size: 78847792 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:c81d6d556e6e3a4255dd2709ce18578bfbbf3eed10a4efb966bf99ab69c79e05 size: 9405288 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:d9e405c2a1a004feffa67c8e53dc0965b77b9c67d91626366ee9e53dd24e3de4 size: 199332407 - mediaType: application/vnd.oci.image.layer.v1.tar+gzip name: sha256:37aac202078354aebdb36c3ecb09116035479525ca00deb05fe74fdfd585060e size: 25713137 dockerImageManifestMediaType: application/vnd.oci.image.manifest.v1+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /bin/sh - -c - $STI_SCRIPTS_PATH/usage Entrypoint: - container-entrypoint Env: - container=oci - VERSION=10 - STI_SCRIPTS_URL=image:///usr/libexec/s2i - STI_SCRIPTS_PATH=/usr/libexec/s2i - APP_ROOT=/opt/app-root - HOME=/opt/app-root/src - PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PLATFORM=el10 - NODEJS_VER=22 - PHP_VERSION=8.3 - PHP_VER_SHORT=83 - NAME=php - SUMMARY=Platform for building and running PHP 8.3 applications - DESCRIPTION=PHP 8.3 available as container is a base platform for building and running various PHP 8.3 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. - PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ - APP_DATA=/opt/app-root/src - PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear - PHP_SYSCONF_PATH=/etc - PHP_HTTPD_CONF_FILE=php.conf - PHP_FPM_CONF_D_PATH=/etc/php-fpm.d - PHP_FPM_CONF_FILE=www.conf - PHP_FPM_RUN_DIR=/run/php-fpm - PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf - PHP_FPM_LOG_PATH=/var/log/php-fpm - HTTPD_CONFIGURATION_PATH=/opt/app-root/etc/conf.d - HTTPD_MAIN_CONF_PATH=/etc/httpd/conf - HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d - HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d - HTTPD_VAR_RUN=/var/run/httpd - HTTPD_DATA_PATH=/var/www - HTTPD_DATA_ORIG_PATH=/var/www - HTTPD_VAR_PATH=/var ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: "2025-10-29T04:08:18Z" com.redhat.component: php-83-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI cpe: cpe:/o:redhat:enterprise_linux:10.0 description: PHP 8.3 available as container is a base platform for building and running various PHP 8.3 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. distribution-scope: public help: For more information visit https://github.com/sclorg/s2i-php-container io.buildah.version: 1.41.4 io.k8s.description: PHP 8.3 available as container is a base platform for building and running various PHP 8.3 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding is probably as a replacement for CGI scripts. io.k8s.display-name: Apache 2.4 with PHP 8.3 io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/libexec/s2i io.openshift.tags: builder,php,php83,php-83 io.s2i.scripts-url: image:///usr/libexec/s2i maintainer: SoftwareCollections.org name: ubi10/php-83 org.opencontainers.image.revision: 8ded5302552207bde31b13b6c323e5b6d584dc8d release: "1761710868" summary: Platform for building and running PHP 8.3 applications url: https://catalog.redhat.com/en/search?searchType=containers usage: s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=8.3/test/test-app ubi10/php-83 sample-server vcs-ref: 8ded5302552207bde31b13b6c323e5b6d584dc8d vcs-type: git vendor: Red Hat, Inc. version: "10.0" User: "1001" WorkingDir: /opt/app-root/src ContainerConfig: {} Created: "2025-10-29T04:08:25Z" Id: sha256:df662744b3599c199c217d916b2a4f5afda6970d90d1ac07c202f0251a8a0417 Size: 313320303 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/ubi10/php-83@sha256:f4fecc3946301cef5130f3eaaa33d9c15426437d46efe70b507ac90f03364307 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:04Z" name: sha256:f4fecc3946301cef5130f3eaaa33d9c15426437d46efe70b507ac90f03364307 resourceVersion: "14083" uid: e4dff983-c1a7-4e9b-aab3-21631ae67699 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:256c6fd715f2ea75bdb9f2447d80820d93b9cd01f2aec8167c0717c1800549b1 size: 75829357 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:aa58f5fa817d02a915c95c39e12d6b15ffc8c346799058bb158d0cdf120e00c1 size: 1306 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:42e80fd47395a049eb10df53b857280d5965de326d0bc3df09c0d37875fa94e4 size: 3898037 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9bdac4114b88f8681c1643e16b330112d01c5b71ba72c7f54254e683d919cd48 size: 84354649 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7f37d65200f0aab4e6f5c0df8d6ae10e729ccca9bcf5e27525157d61842a105e size: 209220958 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:70657bf8cc141aa7c7f0fb3566a5b3d8564ac595018edd6cbd27e573327a829a size: 269185973 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:95cb831de8d57784b131903926fdfb1c94c336c41c3f942c2bd5240421a602a1 size: 306234365 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-eap-7/eap71-openshift - JBOSS_IMAGE_VERSION=1.3 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_EAP_VERSION=7.1.5.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.1.5.GA - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 - WILDFLY_CAMEL_VERSION=5.2.0.fuse-720023-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f8f52fae6eae Image: 78b7765799c1c765b05c95e3acba83e0cd5081f43cb103e11d79f2a875c3bb82 Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-04-09T18:05:48.549573 com.redhat.build-host: cpt-0012.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.2.5 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-eap-openshift org.concrt.version: 2.2.5 org.jboss.product: eap org.jboss.product.eap.version: 7.1.5.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.1.5.GA release: "4.1554788912" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.2-4.1554788912 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: a754f58b06f9194e7a8f10b049868bd6fac80ac7 vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JBOSS_IMAGE_NAME=jboss-eap-7/eap71-openshift - JBOSS_IMAGE_VERSION=1.3 - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_EAP_VERSION=7.1.5.GA - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.1.5.GA - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - DEFAULT_ADMIN_USERNAME=eapadmin - HTTPS_ENABLE_HTTP2=true - JBOSS_MODULES_SYSTEM_PKGS=org.jboss.logmanager,jdk.nashorn.api - JOLOKIA_VERSION=1.5.0 - MAVEN_VERSION=3.5 - WILDFLY_CAMEL_VERSION=5.2.0.fuse-720023-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: f8f52fae6eae Image: sha256:92e39fef0f39140a6b78b1b623ace311310899682484124361485fdfb523198c Labels: architecture: x86_64 authoritative-source-url: registry.access.redhat.com build-date: 2019-04-09T18:05:48.549573 com.redhat.build-host: cpt-0012.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 description: Platform for building and running Apache Camel applications on EAP 7.1 distribution-scope: public io.cekit.version: 2.2.5 io.fabric8.s2i.version.jolokia: 1.5.0-redhat-1 io.fabric8.s2i.version.maven: "3.5" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.1 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Cloud Enablement Feedback name: fuse7/fuse-eap-openshift org.concrt.version: 2.2.5 org.jboss.product: eap org.jboss.product.eap.version: 7.1.5.GA org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.1.5.GA release: "4.1554788912" summary: Platform for building and running Apache Camel applications on EAP 7.1 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift/images/1.2-4.1554788912 usage: This image is very generic and does not serve a single use case. Use it as a base to build your own images. vcs-ref: a754f58b06f9194e7a8f10b049868bd6fac80ac7 vcs-type: git vendor: Red Hat, Inc. version: "1.2" User: "185" WorkingDir: /home/jboss Created: "2019-04-09T18:08:29Z" DockerVersion: 1.13.1 Id: sha256:2db07314cd40f1cfde19fa979f6b970b84d1a7dc418b057d09900b3511a370b9 Size: 948731354 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift@sha256:f74e72bc7bb13dec9f38ef9e00a8665a7c08a386176ca4b3c41075491e8d07e7 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:f74e72bc7bb13dec9f38ef9e00a8665a7c08a386176ca4b3c41075491e8d07e7 resourceVersion: "14027" uid: 7f39cbd5-17f0-4532-8b12-35dd7c7f7359 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:158b4527561fa6bd9dc89217fff5b1f4cce16fdc5a5aef36345db0554ba996fc size: 39501292 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:a3ba00ce78fe80837f49d37f5f538d9f7dc9eb8b1627350041496a99028cdf26 size: 1751 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1701390f3ce7d7dd4e2c941918153066ba87cefb617a6760b39db8ab5dbc2f05 size: 112626814 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.3 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fa28dc65189d Image: 7ddccef463e2d1c41d67c0b509163c8026ed422eeb86fb251a7a38f0aaa24687 Labels: architecture: x86_64 build-date: 2021-07-23T17:36:40.565755 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "15" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.3-15 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: fa3b855666b09a7f693162b72d089b43e2a493f0 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-8 - JBOSS_IMAGE_VERSION=1.3 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Hostname: fa28dc65189d Image: sha256:9f574112daf9dd02a36a7941d215ed2ec978a741e40d952f4ae6a202ce3265d1 Labels: architecture: x86_64 build-date: 2021-07-23T17:36:40.565755 com.redhat.build-host: cpt-1007.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-8-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-8 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "15" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 1.8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-8/images/1.3-15 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: fa3b855666b09a7f693162b72d089b43e2a493f0 vcs-type: git vendor: Red Hat, Inc. version: "1.3" User: "185" WorkingDir: /home/jboss Created: "2021-07-23T17:38:32Z" DockerVersion: 1.13.1 Id: sha256:6e220244fc89ed9fed3120a4ffbd12c0556e49457829fc46f24bd7079ffd21e3 Size: 152137321 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-8@sha256:f7ca08a8dda3610fcc10cc1fe5f5d0b9f8fc7a283b01975d0fe2c1e77ae06193 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:09Z" name: sha256:f7ca08a8dda3610fcc10cc1fe5f5d0b9f8fc7a283b01975d0fe2c1e77ae06193 resourceVersion: "14216" uid: 6194a097-3738-45f0-81b0-064869fa98c5 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:5ad1bac7764c70a157f75114885b9f1f9e6c5931b4e440f6fae93b0fa0af5a91 size: 79790422 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:7c8c73b6c7403cefa799c92d3a77d9fbbed6514c744e5e32dd77a9d8c79aecb1 size: 428797001 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:9c8f327aa720dabe8098b92fc3a7cbbf7d7869869a36c6fe79f49963ac848ee2 size: 1098367117 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /opt/eap/bin/openshift-launch.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - EAP_FULL_GROUPID=org.jboss.eap - JBOSS_EAP_VERSION=7.4.7 - JBOSS_HOME=/opt/eap - JBOSS_PRODUCT=eap - LAUNCH_JBOSS_IN_BACKGROUND=true - PRODUCT_VERSION=7.4.7 - WILDFLY_VERSION=7.4.7.GA-redhat-00003 - SSO_FORCE_LEGACY_SECURITY=true - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - S2I_SOURCE_DEPLOYMENTS_FILTER=*.war *.ear *.rar *.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/galleon-m2-repository - GALLEON_MAVEN_BUILD_IMG_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/build-image-settings.xml - GALLEON_MAVEN_SETTINGS_XML=/opt/jboss/container/wildfly/s2i/galleon/settings.xml - GALLEON_VERSION=4.2.8.Final - GALLEON_WILDFLY_VERSION=5.2.6.Final - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_DIR=/opt/jboss/container/wildfly/s2i/galleon - JBOSS_CONTAINER_WILDFLY_S2I_GALLEON_PROVISION=/opt/jboss/container/wildfly/s2i/galleon/provisioning/generic_provisioning - JBOSS_CONTAINER_WILDFLY_S2I_MODULE=/opt/jboss/container/wildfly/s2i - S2I_COPY_SERVER=true - TMP_GALLEON_LOCAL_MAVEN_REPO=/opt/jboss/container/wildfly/s2i/galleon/tmp-galleon-m2-repository - WILDFLY_S2I_OUTPUT_DIR=/s2i-output - DELETE_BUILD_ARTIFACTS=true - GALLEON_BUILD_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_DEFAULT_FAT_SERVER=/opt/jboss/container/eap/galleon/definitions/fat-default-server - GALLEON_DEFAULT_SERVER=/opt/jboss/container/eap/galleon/definitions/slim-default-server - GALLEON_DEFINITIONS=/opt/jboss/container/eap/galleon/definitions - GALLEON_FP_COMMON_PKG_NAME=eap.s2i.common - GALLEON_FP_PATH=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack - GALLEON_MAVEN_REPO_HOOK_SCRIPT=/opt/jboss/container/eap/galleon/patching.sh - GALLEON_PROVISON_FP_MAVEN_ARGS_APPEND=-Dcom.redhat.xpaas.repo.jbossorg - GALLEON_S2I_FP_ARTIFACT_ID=eap-s2i-galleon-pack - GALLEON_S2I_FP_GROUP_ID=org.jboss.eap.galleon.s2i - GALLEON_S2I_PRODUCER_NAME=eap-s2i - JBOSS_CONTAINER_EAP_GALLEON_FP_PACKAGES=/opt/jboss/container/eap/galleon/eap-s2i-galleon-pack/src/main/resources/packages - OFFLINER_URLS=--url https://repo1.maven.org/maven2/ --url https://repository.jboss.org/nexus/content/groups/public/ --url https://maven.repository.redhat.com/ga/ - S2I_FP_VERSION=23.0.0.Final - WILDFLY_DIST_MAVEN_LOCATION=https://repository.jboss.org/nexus/content/groups/public/org/wildfly/wildfly-dist - DEFAULT_ADMIN_USERNAME=eapadmin - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - JBOSS_MODULES_SYSTEM_PKGS=jdk.nashorn.api,com.sun.crypto.provider - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.7.1 - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - OFFLINER_VERSION=1.6 - AB_PROMETHEUS_JMX_EXPORTER_PORT=9799 - HTTPS_ENABLE_HTTP2=true - JBOSS_IMAGE_NAME=jboss-eap-7/eap74-openjdk8-openshift-rhel7 - JBOSS_IMAGE_VERSION=7.4.7 - LANG=C.utf8 - WILDFLY_CAMEL_VERSION=5.9.0.fuse-7_11_1-00014-redhat-00001 ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2023-06-20T12:51:18 com.redhat.component: fuse-eap-openshift-container com.redhat.deployments-dir: /opt/eap/standalone/deployments com.redhat.dev-mode: DEBUG:true com.redhat.dev-mode.port: DEBUG_PORT:8787 com.redhat.license_terms: https://www.redhat.com/agreements description: Platform for building and running Apache Camel applications on EAP 7.4 distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 3.2.1 io.fabric8.s2i.version.jolokia: 1.7.1.redhat-00001 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running Apache Camel applications on EAP 7.4 io.k8s.display-name: Fuse for OpenShift - EAP based io.openshift.expose-services: 8080:http io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,javaee,eap,eap7,fuse maintainer: Thomas Diesler name: fuse7/fuse-eap-openshift-jdk8-rhel7 org.jboss.container.deployments-dir: /deployments org.jboss.product: eap org.jboss.product.eap.version: 7.4.7 org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 7.4.7 release: "35.1687265402" summary: Platform for building and running Apache Camel applications on EAP 7.4 url: https://access.redhat.com/containers/#/registry.access.redhat.com/fuse7/fuse-eap-openshift-jdk8-rhel7/images/1.11-35.1687265402 vcs-ref: fb59c4aa5b79f841b05c9da17745ccf540244d31 vcs-type: git vendor: Red Hat, Inc. version: "1.11" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-06-20T12:54:43Z" Id: sha256:efb5ba3eaa6f2b624e612e278f56f5578588f34b7282eaa87eeef69a0a11ce16 Size: 1607007164 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.redhat.io/fuse7/fuse-eap-openshift-jdk8-rhel7@sha256:f7d4386680e3a44e3bf8bacc3ebfe3224232e44e4d1e2e7167aa1b4970f2866c kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:03Z" name: sha256:f7d4386680e3a44e3bf8bacc3ebfe3224232e44e4d1e2e7167aa1b4970f2866c resourceVersion: "14024" uid: d68b0529-2c81-4723-91f0-58c7d1914d1f - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:4752687a61a97d6f352ae62c381c87564bcb2f5b6523a05510ca1fb60d640216 size: 36442442 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0344366a246a0f7590c2bae4536c01f15f20c6d802b4654ce96ac81047bc23f3 size: 1740 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:1e443c4e5fd4cedbd0a83cfd29362c370b2a7f9713a6af5bfd1d824275aef75c size: 108563166 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /usr/local/s2i/run Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: 539deff60bbd7cdfcae72170baecc0d4ae6e920a39f8afbc3bd80b305ae42fee Labels: architecture: x86_64 build-date: 2022-06-15T16:27:27.479076 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1655306439" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.13-1.1655306439 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8734c13ed58f0a634230e9ea70b053f2aceaeab6 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss ContainerConfig: Cmd: - /bin/sh - -c - '#(nop) ' - USER [185] Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-17 - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=ubi8/openjdk-17 - JBOSS_IMAGE_VERSION=1.13 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Hostname: fd16d6f2e774 Image: sha256:b2b94c298f51760d3b97a2751b6b6961368cbddd87f237ea04fb6596edfbcac7 Labels: architecture: x86_64 build-date: 2022-06-15T16:27:27.479076 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: openjdk-17-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 distribution-scope: public io.cekit.version: 3.11.0 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17 org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "1.1655306439" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 17 url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17/images/1.13-1.1655306439 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 8734c13ed58f0a634230e9ea70b053f2aceaeab6 vcs-type: git vendor: Red Hat, Inc. version: "1.13" User: "185" WorkingDir: /home/jboss Created: "2022-06-15T16:32:40Z" DockerVersion: 1.13.1 Id: sha256:60d04f65477a71cf8fb3347dc0e7bf18ba670ac421920938944e912414f329fb Size: 145014172 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17@sha256:f89a54e6d1340be8ddd84a602cb4f1f27c1983417f655941645bf11809d49f18 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:f89a54e6d1340be8ddd84a602cb4f1f27c1983417f655941645bf11809d49f18 resourceVersion: "14144" uid: 0beba5fd-c4ef-4ae5-b3d8-b9ac7dad4238 - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:dc35b837139a95d1b9f7f7b0435a024a74ab972416bdc248f3f608c9f917a753 size: 39307955 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:8876cf1d7108be79268dd431b736ebe667a42066087a2f59dcbe11f09ed0e453 size: 90699505 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Config: Cmd: - /opt/jboss/container/java/run/run-java.sh Env: - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/jre - JAVA_VENDOR=openjdk - JAVA_VERSION=17 - JBOSS_CONTAINER_OPENJDK_JRE_MODULE=/opt/jboss/container/openjdk/jre - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_IMAGE_NAME=ubi8/openjdk-17-runtime - JBOSS_IMAGE_VERSION=1.17 - LANG=C.utf8 ExposedPorts: 8080/tcp: {} 8443/tcp: {} Labels: architecture: x86_64 build-date: 2023-10-23T16:08:26 com.redhat.component: openjdk-17-runtime-ubi8-container com.redhat.license_terms: https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI description: Image for Red Hat OpenShift providing OpenJDK 17 runtime distribution-scope: public io.buildah.version: 1.29.0 io.cekit.version: 4.7.0 io.k8s.description: Platform for running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.expose-services: "" io.openshift.tags: java maintainer: Red Hat OpenJDK name: ubi8/openjdk-17-runtime org.jboss.product: openjdk org.jboss.product.openjdk.version: "17" org.jboss.product.version: "17" release: "4" summary: Image for Red Hat OpenShift providing OpenJDK 17 runtime url: https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/openjdk-17-runtime/images/1.17-4 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 3889748da09fe5896b09a6b79b9d3fb95244e361 vcs-type: git vendor: Red Hat, Inc. version: "1.17" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2023-10-23T16:15:41Z" Id: sha256:1d67330fe844c4813d1cb372e06f8c06b1741fa2b77e6592f8390bfd0fe8fa89 Size: 130026126 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/ubi8/openjdk-17-runtime@sha256:f953734d89252219c3dcd8f703ba8b58c9c8a0f5dfa9425c9e56ec0834f7d288 kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:07Z" name: sha256:f953734d89252219c3dcd8f703ba8b58c9c8a0f5dfa9425c9e56ec0834f7d288 resourceVersion: "14157" uid: 6c1a3719-2b73-40f4-8bc8-d8978bf485fa - apiVersion: image.openshift.io/v1 dockerImageLayers: - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:0a017d456fb3a760722ba4895579d8a412aec74e61d6805b04df6527b70fce6b size: 80807726 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:80a2bcb42ca25702f1e5b3b71dd25c6882ae0a2a03bb87d0b76c579cef9806a4 size: 1607 - mediaType: application/vnd.docker.image.rootfs.diff.tar.gzip name: sha256:bbea88449c03fd029a5d4e6bf0457f140aacd45f0639d581045c0d1dea2efc9f size: 113937969 dockerImageManifestMediaType: application/vnd.docker.distribution.manifest.v2+json dockerImageMetadata: Architecture: amd64 Author: Red Hat, Inc. Config: Cmd: - /usr/local/s2i/run Env: - container=oci - HOME=/home/jboss - JAVA_HOME=/usr/lib/jvm/java-1.8.0 - JAVA_VENDOR=openjdk - JAVA_VERSION=1.8.0 - JBOSS_CONTAINER_OPENJDK_JDK_MODULE=/opt/jboss/container/openjdk/jdk - AB_PROMETHEUS_JMX_EXPORTER_CONFIG=/opt/jboss/container/prometheus/etc/jmx-exporter-config.yaml - JBOSS_CONTAINER_PROMETHEUS_MODULE=/opt/jboss/container/prometheus - AB_JOLOKIA_AUTH_OPENSHIFT=true - AB_JOLOKIA_HTTPS=true - AB_JOLOKIA_PASSWORD_RANDOM=true - JBOSS_CONTAINER_JOLOKIA_MODULE=/opt/jboss/container/jolokia - JOLOKIA_VERSION=1.6.2 - LD_PRELOAD=libnss_wrapper.so - NSS_WRAPPER_GROUP=/etc/group - NSS_WRAPPER_PASSWD=/home/jboss/passwd - S2I_SOURCE_DEPLOYMENTS_FILTER=*.jar - JBOSS_CONTAINER_S2I_CORE_MODULE=/opt/jboss/container/s2i/core/ - JBOSS_CONTAINER_JAVA_PROXY_MODULE=/opt/jboss/container/java/proxy - JBOSS_CONTAINER_JAVA_JVM_MODULE=/opt/jboss/container/java/jvm - JBOSS_CONTAINER_MAVEN_36_MODULE=/opt/jboss/container/maven/36/ - MAVEN_VERSION=3.6 - JBOSS_CONTAINER_UTIL_LOGGING_MODULE=/opt/jboss/container/util/logging/ - JBOSS_CONTAINER_MAVEN_DEFAULT_MODULE=/opt/jboss/container/maven/default/ - JBOSS_CONTAINER_MAVEN_S2I_MODULE=/opt/jboss/container/maven/s2i - JAVA_DATA_DIR=/deployments/data - JBOSS_CONTAINER_JAVA_RUN_MODULE=/opt/jboss/container/java/run - JBOSS_CONTAINER_JAVA_S2I_MODULE=/opt/jboss/container/java/s2i - JBOSS_IMAGE_NAME=redhat-openjdk-18/openjdk18-openshift - JBOSS_IMAGE_VERSION=1.14 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/s2i ExposedPorts: 8080/tcp: {} 8443/tcp: {} 8778/tcp: {} Labels: architecture: x86_64 build-date: 2022-10-17T13:55:04 com.redhat.build-host: cpt-1002.osbs.prod.upshift.rdu2.redhat.com com.redhat.component: redhat-openjdk-18-openjdk18-openshift-container com.redhat.license_terms: https://www.redhat.com/agreements description: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 distribution-scope: public io.buildah.version: 1.26.2 io.cekit.version: 3.11.0 io.fabric8.s2i.version.jolokia: 1.6.2-redhat-00002 io.fabric8.s2i.version.maven: "3.6" io.k8s.description: Platform for building and running plain Java applications (fat-jar and flat classpath) io.k8s.display-name: Java Applications io.openshift.s2i.destination: /tmp io.openshift.s2i.scripts-url: image:///usr/local/s2i io.openshift.tags: builder,java maintainer: Red Hat OpenJDK name: redhat-openjdk-18/openjdk18-openshift org.jboss.container.deployments-dir: /deployments org.jboss.product: openjdk org.jboss.product.openjdk.version: 1.8.0 org.jboss.product.version: 1.8.0 release: "3" summary: Source To Image (S2I) image for Red Hat OpenShift providing OpenJDK 8 url: https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift/images/1.14-3 usage: https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_java_s2i_for_openshift/ vcs-ref: 7f66438357437cedfc17471d30cc6ae81cdc7e47 vcs-type: git vendor: Red Hat, Inc. version: "1.14" User: "185" WorkingDir: /home/jboss ContainerConfig: {} Created: "2022-10-17T14:04:57Z" Id: sha256:c3bd9b0150b83dc99fe250e474b9cc3e60efa871da8cd960e3f93f3fbed7f207 Size: 194772699 apiVersion: image.openshift.io/1.0 kind: DockerImage dockerImageMetadataVersion: "1.0" dockerImageReference: registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift@sha256:fa9556628c15b8eb22cafccb737b3fbcecfd681a5c2cfea3302dd771c644a7db kind: Image metadata: annotations: image.openshift.io/dockerLayersOrder: ascending creationTimestamp: "2025-11-02T08:11:08Z" name: sha256:fa9556628c15b8eb22cafccb737b3fbcecfd681a5c2cfea3302dd771c644a7db resourceVersion: "14164" uid: 81d5a322-e2ef-4baa-999c-09f91b848a3f kind: List metadata: resourceVersion: "" home/zuul/zuul-output/logs/controller/post_oc_get_imagestream.log0000644000175000017500000000030215115611240024626 0ustar zuulzuul*** [INFO] Showing oc get 'imagestream' No resources found in service-telemetry namespace. [INFO] oc get 'imagestream' -oyaml apiVersion: v1 items: [] kind: List metadata: resourceVersion: "" home/zuul/zuul-output/logs/controller/post_oc_get_pods.log0000644000175000017500000072215515115611240023316 0ustar zuulzuul*** [INFO] Showing oc get 'pods' NAME READY STATUS RESTARTS AGE 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq 0/1 Completed 0 5m41s alertmanager-default-0 3/3 Running 0 4m17s curl 0/1 Completed 0 2m54s default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk 2/2 Running 2 (3m30s ago) 3m47s default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v 3/3 Running 2 (3m31s ago) 4m default-cloud1-coll-event-smartgateway-d956b4648-jwkwn 2/2 Running 2 (3m31s ago) 3m48s default-cloud1-coll-meter-smartgateway-787645d794-4zrzx 3/3 Running 2 (3m31s ago) 4m2s default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp 3/3 Running 2 (3m31s ago) 3m56s default-interconnect-55bf8d5cb-rwr2k 1/1 Running 0 3m34s default-snmp-webhook-6774d8dfbc-75fxn 1/1 Running 0 4m20s elastic-operator-c9c86658-4qchz 1/1 Running 0 6m52s elasticsearch-es-default-0 1/1 Running 0 6m26s f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx 0/1 Completed 0 5m40s infrawatch-operators-tv99j 1/1 Running 0 5m56s interconnect-operator-78b9bd8798-456sz 1/1 Running 0 5m29s prometheus-default-0 3/3 Running 0 4m31s qdr-test 1/1 Running 0 3m3s service-telemetry-operator-79647f8775-zs8hl 1/1 Running 0 5m27s smart-gateway-operator-5cd794ff55-w8r45 1/1 Running 0 5m26s stf-smoketest-smoke1-pbhxq 0/2 Completed 0 2m54s [INFO] oc get 'pods' -oyaml apiVersion: v1 items: - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.61/23"],"mac_address":"0a:58:0a:d9:00:3d","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.61/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.61" ], "mac": "0a:58:0a:d9:00:3d", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:56:27Z" generateName: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73- generation: 1 labels: batch.kubernetes.io/controller-uid: 0bf0ca7a-b827-4094-8a72-9ae29534ef04 batch.kubernetes.io/job-name: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 controller-uid: 0bf0ca7a-b827-4094-8a72-9ae29534ef04 job-name: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 olm.managed: "true" operatorframework.io/bundle-unpack-ref: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 name: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq namespace: service-telemetry ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 uid: 0bf0ca7a-b827-4094-8a72-9ae29534ef04 resourceVersion: "43712" uid: 8dfcd1bd-ac9d-4eba-b160-b7f4335fb440 spec: containers: - command: - opm - alpha - bundle - extract - -m - /bundle/ - -n - service-telemetry - -c - 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 - -z env: - name: CONTAINER_IMAGE value: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 imagePullPolicy: IfNotPresent name: extract resources: requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /bundle name: bundle - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-4zj26 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-dockercfg-t7fjv initContainers: - command: - /bin/cp - -Rv - /bin/cpb - /util/cpb image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f imagePullPolicy: IfNotPresent name: util resources: requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-4zj26 readOnly: true - command: - /util/cpb - /bundle image: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d imagePullPolicy: IfNotPresent name: pull resources: requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /bundle name: bundle - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-4zj26 readOnly: true nodeName: crc nodeSelector: kubernetes.io/os: linux preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - key: kubernetes.io/arch operator: Equal value: amd64 - key: kubernetes.io/arch operator: Equal value: arm64 - key: kubernetes.io/arch operator: Equal value: ppc64le - key: kubernetes.io/arch operator: Equal value: s390x - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 120 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 120 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - emptyDir: {} name: bundle - emptyDir: {} name: util - name: kube-api-access-4zj26 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:35Z" status: "False" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:33Z" reason: PodCompleted status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:28Z" reason: PodCompleted status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:28Z" reason: PodCompleted status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:27Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://d3e8561202cc89d83ca353a282a58fe1a93cb345811d80c4f8d79bbece0f3150 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 lastState: {} name: extract ready: false resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://d3e8561202cc89d83ca353a282a58fe1a93cb345811d80c4f8d79bbece0f3150 exitCode: 0 finishedAt: "2025-12-08T17:56:33Z" reason: Completed startedAt: "2025-12-08T17:56:33Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /bundle name: bundle - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-4zj26 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 initContainerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://4c35d8974d5888bf524dbe5eefde9e191c58b3803db6ffa3939b4ce04352185f image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f lastState: {} name: util ready: true resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://4c35d8974d5888bf524dbe5eefde9e191c58b3803db6ffa3939b4ce04352185f exitCode: 0 finishedAt: "2025-12-08T17:56:30Z" reason: Completed startedAt: "2025-12-08T17:56:30Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-4zj26 readOnly: true recursiveReadOnly: Disabled - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://0ca132d5c8c9872037343085b432294d20a42d1355b62c8997516c468986533d image: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d imageID: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d lastState: {} name: pull ready: true resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://0ca132d5c8c9872037343085b432294d20a42d1355b62c8997516c468986533d exitCode: 0 finishedAt: "2025-12-08T17:56:32Z" reason: Completed startedAt: "2025-12-08T17:56:32Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /bundle name: bundle - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-4zj26 readOnly: true recursiveReadOnly: Disabled phase: Succeeded podIP: 10.217.0.61 podIPs: - ip: 10.217.0.61 qosClass: Burstable startTime: "2025-12-08T17:56:28Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.76/23"],"mac_address":"0a:58:0a:d9:00:4c","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.76/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.76" ], "mac": "0a:58:0a:d9:00:4c", "default": true, "dns": {} }] kubectl.kubernetes.io/default-container: alertmanager openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:57:51Z" generateName: alertmanager-default- generation: 1 labels: alertmanager: default app.kubernetes.io/instance: default app.kubernetes.io/managed-by: prometheus-operator app.kubernetes.io/name: alertmanager app.kubernetes.io/version: 0.28.1 apps.kubernetes.io/pod-index: "0" controller-revision-hash: alertmanager-default-698b6c7ddf statefulset.kubernetes.io/pod-name: alertmanager-default-0 name: alertmanager-default-0 namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: StatefulSet name: alertmanager-default uid: e4c0643f-fe1c-4159-a1ad-162fd447b579 resourceVersion: "45438" uid: 81e17e77-b0f9-4df6-8c85-e06d1fd7a46a spec: containers: - args: - --config.file=/etc/alertmanager/config_out/alertmanager.env.yaml - --storage.path=/alertmanager - --data.retention=120h - --cluster.listen-address= - --web.listen-address=127.0.0.1:9093 - --web.route-prefix=/ - --cluster.label=service-telemetry/default - --cluster.peer=alertmanager-default-0.alertmanager-operated:9094 - --cluster.reconnect-timeout=5m - --web.config.file=/etc/alertmanager/web_config/web-config.yaml env: - name: POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP image: quay.io/prometheus/alertmanager:latest imagePullPolicy: Always name: alertmanager ports: - containerPort: 9094 name: mesh-tcp protocol: TCP - containerPort: 9094 name: mesh-udp protocol: UDP resources: requests: memory: 200Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/alertmanager/config name: config-volume - mountPath: /etc/alertmanager/config_out name: config-out readOnly: true - mountPath: /etc/alertmanager/certs name: tls-assets readOnly: true - mountPath: /alertmanager name: alertmanager-default-db subPath: alertmanager-db - mountPath: /etc/alertmanager/secrets/default-alertmanager-proxy-tls name: secret-default-alertmanager-proxy-tls readOnly: true - mountPath: /etc/alertmanager/secrets/default-session-secret name: secret-default-session-secret readOnly: true - mountPath: /etc/alertmanager/web_config/web-config.yaml name: web-config readOnly: true subPath: web-config.yaml - mountPath: /etc/alertmanager/cluster_tls_config/cluster-tls-config.yaml name: cluster-tls-config readOnly: true subPath: cluster-tls-config.yaml - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true - args: - --listen-address=localhost:8080 - --web-config-file=/etc/alertmanager/web_config/web-config.yaml - --reload-url=http://localhost:9093/-/reload - --config-file=/etc/alertmanager/config/alertmanager.yaml.gz - --config-envsubst-file=/etc/alertmanager/config_out/alertmanager.env.yaml - --watched-dir=/etc/alertmanager/config - --watched-dir=/etc/alertmanager/secrets/default-alertmanager-proxy-tls - --watched-dir=/etc/alertmanager/secrets/default-session-secret command: - /bin/prometheus-config-reloader env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: SHARD value: "-1" image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imagePullPolicy: IfNotPresent name: config-reloader resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/alertmanager/config name: config-volume readOnly: true - mountPath: /etc/alertmanager/config_out name: config-out - mountPath: /etc/alertmanager/secrets/default-alertmanager-proxy-tls name: secret-default-alertmanager-proxy-tls readOnly: true - mountPath: /etc/alertmanager/secrets/default-session-secret name: secret-default-session-secret readOnly: true - mountPath: /etc/alertmanager/web_config/web-config.yaml name: web-config readOnly: true subPath: web-config.yaml - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true - args: - -https-address=:9095 - -tls-cert=/etc/tls/private/tls.crt - -tls-key=/etc/tls/private/tls.key - -upstream=http://localhost:9093/ - -cookie-secret-file=/etc/proxy/secrets/session_secret - -openshift-service-account=alertmanager-stf - '-openshift-sar={"namespace":"service-telemetry", "resource": "alertmanagers", "resourceAPIGroup":"monitoring.rhobs", "verb":"get"}' - '-openshift-delegate-urls={"/": {"namespace":"service-telemetry", "resource": "alertmanagers", "group":"monitoring.rhobs", "verb":"get"}}' image: quay.io/openshift/origin-oauth-proxy:latest imagePullPolicy: Always name: oauth-proxy ports: - containerPort: 9095 name: https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/tls/private name: secret-default-alertmanager-proxy-tls - mountPath: /etc/proxy/secrets name: secret-default-session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true hostname: alertmanager-default-0 imagePullSecrets: - name: alertmanager-stf-dockercfg-5zfwx initContainers: - args: - --watch-interval=0 - --listen-address=:8081 - --config-file=/etc/alertmanager/config/alertmanager.yaml.gz - --config-envsubst-file=/etc/alertmanager/config_out/alertmanager.env.yaml - --watched-dir=/etc/alertmanager/config - --watched-dir=/etc/alertmanager/secrets/default-alertmanager-proxy-tls - --watched-dir=/etc/alertmanager/secrets/default-session-secret command: - /bin/prometheus-config-reloader env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: SHARD value: "-1" image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imagePullPolicy: IfNotPresent name: init-config-reloader ports: - containerPort: 8081 name: reloader-init protocol: TCP resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/alertmanager/config name: config-volume readOnly: true - mountPath: /etc/alertmanager/config_out name: config-out - mountPath: /etc/alertmanager/secrets/default-alertmanager-proxy-tls name: secret-default-alertmanager-proxy-tls readOnly: true - mountPath: /etc/alertmanager/secrets/default-session-secret name: secret-default-session-secret readOnly: true - mountPath: /etc/alertmanager/web_config/web-config.yaml name: web-config readOnly: true subPath: web-config.yaml - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: alertmanager-stf serviceAccountName: alertmanager-stf subdomain: alertmanager-operated terminationGracePeriodSeconds: 120 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: alertmanager-default-db persistentVolumeClaim: claimName: alertmanager-default-db-alertmanager-default-0 - name: config-volume secret: defaultMode: 420 secretName: alertmanager-default-generated - name: tls-assets projected: defaultMode: 420 sources: - secret: name: alertmanager-default-tls-assets-0 - emptyDir: medium: Memory name: config-out - name: secret-default-alertmanager-proxy-tls secret: defaultMode: 420 secretName: default-alertmanager-proxy-tls - name: secret-default-session-secret secret: defaultMode: 420 secretName: default-session-secret - name: web-config secret: defaultMode: 420 secretName: alertmanager-default-web-config - name: cluster-tls-config secret: defaultMode: 420 secretName: alertmanager-default-cluster-tls-config - name: kube-api-access-vwmm9 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:59Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:06Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:14Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:14Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:52Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: memory: 200Mi containerID: cri-o://47b56057ab69d50eaee94d5ad589d1657980422aeed4786992d93858dfda7b5e image: quay.io/prometheus/alertmanager:latest imageID: quay.io/prometheus/alertmanager@sha256:86ed3780fa25d23de5110c97a63a3061e7841cef87bf5183568bc97437764af2 lastState: {} name: alertmanager ready: true resources: requests: memory: 200Mi restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:11Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/alertmanager/config name: config-volume - mountPath: /etc/alertmanager/config_out name: config-out readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/certs name: tls-assets readOnly: true recursiveReadOnly: Disabled - mountPath: /alertmanager name: alertmanager-default-db - mountPath: /etc/alertmanager/secrets/default-alertmanager-proxy-tls name: secret-default-alertmanager-proxy-tls readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/secrets/default-session-secret name: secret-default-session-secret readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/web_config/web-config.yaml name: web-config readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/cluster_tls_config/cluster-tls-config.yaml name: cluster-tls-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true recursiveReadOnly: Disabled - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://d503ea6f67263f1580abd070342e448288584249bd193a8e3ee37d94da7770df image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imageID: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 lastState: {} name: config-reloader ready: true resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:13Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/alertmanager/config name: config-volume readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/config_out name: config-out - mountPath: /etc/alertmanager/secrets/default-alertmanager-proxy-tls name: secret-default-alertmanager-proxy-tls readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/secrets/default-session-secret name: secret-default-session-secret readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/web_config/web-config.yaml name: web-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://d964899c9cadbc945842e48de7dab037aa53cab0c73225b3e9ef0722ad7962e1 image: quay.io/openshift/origin-oauth-proxy:latest imageID: quay.io/openshift/origin-oauth-proxy@sha256:c740bf089d5a81db4715efa881fd7f706407de56bf9727bfe47c0d45cfc5834e lastState: {} name: oauth-proxy ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:13Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/tls/private name: secret-default-alertmanager-proxy-tls - mountPath: /etc/proxy/secrets name: secret-default-session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 initContainerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://e1ca3709454dde0f975cd625db200e6158016352dea0eb93866c2ae48719f8b2 image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imageID: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 lastState: {} name: init-config-reloader ready: true resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://e1ca3709454dde0f975cd625db200e6158016352dea0eb93866c2ae48719f8b2 exitCode: 0 finishedAt: "2025-12-08T17:58:06Z" reason: Completed startedAt: "2025-12-08T17:57:59Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/alertmanager/config name: config-volume readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/config_out name: config-out - mountPath: /etc/alertmanager/secrets/default-alertmanager-proxy-tls name: secret-default-alertmanager-proxy-tls readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/secrets/default-session-secret name: secret-default-session-secret readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/alertmanager/web_config/web-config.yaml name: web-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-vwmm9 readOnly: true recursiveReadOnly: Disabled phase: Running podIP: 10.217.0.76 podIPs: - ip: 10.217.0.76 qosClass: Burstable startTime: "2025-12-08T17:57:52Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.85/23"],"mac_address":"0a:58:0a:d9:00:55","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.85/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.85" ], "mac": "0a:58:0a:d9:00:55", "default": true, "dns": {} }] openshift.io/scc: anyuid security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:59:14Z" generation: 1 labels: run: curl name: curl namespace: service-telemetry resourceVersion: "45976" uid: f1d063fa-3d6b-49c3-aa66-288dd70351b0 spec: containers: - args: - sh - -c - 'curl -v -k -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM1NCwiaWF0IjoxNzY1MjE2NzU0LCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiJmMTVjZjE3NC1hMDQxLTRlNzMtODUzNS0yZDU4NDRiYjU4YWUiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InByb21ldGhldXMtc3RmIiwidWlkIjoiNzc0ZGE4MzQtODY3YS00N2UzLWE1MmMtYmZmMzRhMzlmM2Q4In19LCJuYmYiOjE3NjUyMTY3NTQsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpzZXJ2aWNlLXRlbGVtZXRyeTpwcm9tZXRoZXVzLXN0ZiJ9.QPxiNrDZ6fnSoQLCE8bQx8WL4s7PmGgB7XIKsKPknC05ykNETWp6bPJsBPq61zCjGdy6eJ46AJc6HCGNnk-gPMjPUgIyWR9icaKf9L_cs_4KKs85QH7dsHDN9BaEjXbvXvEAXqGY0KKreFa6Bk3zR1j4BwU62kNQycIvo2VxaMwp8JM2mmFzbeSZc_mjJ9o19jVi5kKf9JvXgNtcU0bxldSStVu3CfBbjMZZRWFiwYt91-gUV4cK2E8IgKa77YFywucGhWMYXq2aDeasrjBF9yMoDizucMWlJZamREkzvVfVmccTozpAYDpct-z153dobmP3fnZyqC_pWhmpS6QkggnRqvmo5MgxeFOi0Cm6q06ZmEERA6PvMW9QrSYasxXXvkDyznNGrGP7DhtVCgB8q3veU9K2zQheEAdS9SeyqnECbofewDowxMkjEqhA4L2BenBuQPg_lnPHGcVCcHQxoe7znTWw8SzXp1fBnG3Fmt0et8PRS4IbL8ds5GiSdZf4o4dQA2OXzkCgIvLTyNfxNCUlt-4Kgf6Sb7k7FmeT4U76TVwneAiAIFhlxbYtfLpHwwqcd36w4kFSOXc6haPWzhEryjX3jRIPk5ID6TbdV0n6Kp23woEQzh2B0Dn9MXC9wZC49U_RfOxL6uQLRnzfKQRkafI6tzgrBb1A3X_H8_A" -d ''[{"status":"firing","labels":{"alertname":"smoketest","severity":"warning"},"startsAt":"2025-12-08T17:59:14+00:00"}]'' https://default-alertmanager-proxy:9095/api/v2/alerts' image: quay.io/infrawatch/busyboxplus:curl imagePullPolicy: IfNotPresent name: curl resources: {} securityContext: capabilities: drop: - MKNOD terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-5rv8s readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-dockercfg-t7fjv nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: seLinuxOptions: level: s0:c26,c10 serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-5rv8s projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:24Z" status: "False" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:14Z" reason: PodCompleted status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:14Z" reason: PodCompleted status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:14Z" reason: PodCompleted status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:14Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://a7928a57f2a8dbff9fceeb51195188b3a2cd0237b3fbfeaf0fd5213020d1106a image: quay.io/infrawatch/busyboxplus:curl imageID: quay.io/infrawatch/busyboxplus@sha256:66f2d29e9735508135deb16917cdee84957a0e891d035f14b6557df277d10afc lastState: {} name: curl ready: false resources: {} restartCount: 0 started: false state: terminated: containerID: cri-o://a7928a57f2a8dbff9fceeb51195188b3a2cd0237b3fbfeaf0fd5213020d1106a exitCode: 0 finishedAt: "2025-12-08T17:59:16Z" reason: Completed startedAt: "2025-12-08T17:59:16Z" user: linux: gid: 0 supplementalGroups: - 0 - 10 uid: 0 volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-5rv8s readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Succeeded podIP: 10.217.0.85 podIPs: - ip: 10.217.0.85 qosClass: BestEffort startTime: "2025-12-08T17:59:14Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.81/23"],"mac_address":"0a:58:0a:d9:00:51","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.81/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.81" ], "mac": "0a:58:0a:d9:00:51", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:58:21Z" generateName: default-cloud1-ceil-event-smartgateway-65cf5f4bb8- generation: 1 labels: app: smart-gateway pod-template-hash: 65cf5f4bb8 sg-config-resource-name: default-cloud1-ceil-event-sg-core-configmap-b8c2b65kt9 smart-gateway: default-cloud1-ceil-event name: default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-cloud1-ceil-event-smartgateway-65cf5f4bb8 uid: 71c3802b-5a3e-447b-9c26-759090b5719d resourceVersion: "45837" uid: 35c3d7e4-3ad4-4184-a22e-86654ad7867b spec: containers: - args: - --amqp_url - amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/anycast/ceilometer/cloud1-event.sample - --block - --stat_period - "60" - --rbc - "15000" - --count - "0" - --gw_unix - /tmp/smartgateway - --rbs - "16384" image: quay.io/infrawatch/sg-bridge:latest imagePullPolicy: Always name: bridge resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-svplm readOnly: true - args: - -config - /etc/sg-core/sg-core.conf.yaml env: - name: MY_POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/infrawatch/sg-core:latest imagePullPolicy: Always name: sg-core resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true - mountPath: /config/certs name: elastic-certs - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-svplm readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: smart-gateway-dockercfg-vjrnk nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: smart-gateway serviceAccountName: smart-gateway terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: socket-dir - configMap: defaultMode: 420 name: default-cloud1-ceil-event-sg-core-configmap name: sg-core-config - name: elastic-certs secret: defaultMode: 420 secretName: elasticsearch-es-cert - name: session-secret secret: defaultMode: 420 secretName: smart-gateway-session-secret - name: kube-api-access-svplm projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:36Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:21Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:55Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:55Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:21Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://02776249f57ab371a5a6aab418d5768f7763e3f5df82b007a5de74a7b3f43508 image: quay.io/infrawatch/sg-bridge:latest imageID: quay.io/infrawatch/sg-bridge@sha256:fd353c2f7c6038b4e7e5575704facafdb7841eefbcd4afb8d65fd198dde4f92b lastState: terminated: containerID: cri-o://7c20eecf4a107ff956d250900c9a4c7fdcedf6cb6ad2e39b1a081f57fd9d46ee exitCode: 0 finishedAt: "2025-12-08T17:58:38Z" reason: Completed startedAt: "2025-12-08T17:58:37Z" name: bridge ready: true resources: {} restartCount: 2 started: true state: running: startedAt: "2025-12-08T17:58:55Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-svplm readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://0e6cedcd862cd28b09ea5cf8da497c04c433583582df96033357f6d68f4581de image: quay.io/infrawatch/sg-core:latest imageID: quay.io/infrawatch/sg-core@sha256:71043bdeb285c5d38bda71d8b3782b6c179468ebde984d439c866607dafa6dc0 lastState: {} name: sg-core ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:35Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true recursiveReadOnly: Disabled - mountPath: /config/certs name: elastic-certs - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-svplm readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.81 podIPs: - ip: 10.217.0.81 qosClass: BestEffort startTime: "2025-12-08T17:58:21Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.78/23"],"mac_address":"0a:58:0a:d9:00:4e","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.78/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.78" ], "mac": "0a:58:0a:d9:00:4e", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:58:08Z" generateName: default-cloud1-ceil-meter-smartgateway-545b564d9f- generation: 1 labels: app: smart-gateway pod-template-hash: 545b564d9f sg-config-resource-name: default-cloud1-ceil-meter-sg-core-configmap-4km8f4fkch smart-gateway: default-cloud1-ceil-meter name: default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-cloud1-ceil-meter-smartgateway-545b564d9f uid: df434c64-8152-466d-af86-52f310d64c04 resourceVersion: "45802" uid: ef58ecee-c967-4d4f-946b-8c8123a73084 spec: containers: - args: - -https-address=:8083 - -tls-cert=/etc/tls/private/tls.crt - -tls-key=/etc/tls/private/tls.key - -cookie-secret-file=/etc/proxy/secrets/session_secret - -openshift-service-account=smart-gateway - -upstream=http://localhost:8081/ - '-openshift-delegate-urls={"/": {"namespace": "service-telemetry", "resource": "smartgateways", "group": "smartgateway.infra.watch", "verb": "get"}}' image: quay.io/openshift/origin-oauth-proxy:latest imagePullPolicy: Always name: oauth-proxy ports: - containerPort: 8083 name: https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/tls/private name: default-cloud1-ceil-meter-proxy-tls - mountPath: /etc/proxy/secrets name: session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-wlrqv readOnly: true - args: - --amqp_url - amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/anycast/ceilometer/cloud1-metering.sample - --block - --stat_period - "60" - --rbc - "15000" - --count - "0" - --gw_unix - /tmp/smartgateway - --rbs - "16384" image: quay.io/infrawatch/sg-bridge:latest imagePullPolicy: Always name: bridge resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-wlrqv readOnly: true - args: - -config - /etc/sg-core/sg-core.conf.yaml env: - name: MY_POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/infrawatch/sg-core:latest imagePullPolicy: Always name: sg-core ports: - containerPort: 8083 name: prom-https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-wlrqv readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: smart-gateway-dockercfg-vjrnk nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: smart-gateway serviceAccountName: smart-gateway terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-cloud1-ceil-meter-proxy-tls secret: defaultMode: 420 secretName: default-cloud1-ceil-meter-proxy-tls - emptyDir: {} name: socket-dir - configMap: defaultMode: 420 name: default-cloud1-ceil-meter-sg-core-configmap name: sg-core-config - name: session-secret secret: defaultMode: 420 secretName: smart-gateway-session-secret - name: kube-api-access-wlrqv projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:35Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:08Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:51Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:51Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:08Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://93e1626c71822f2aa5375e8f2926ab72e61f3eea1f82c2156c274c2af81adc13 image: quay.io/infrawatch/sg-bridge:latest imageID: quay.io/infrawatch/sg-bridge@sha256:fd353c2f7c6038b4e7e5575704facafdb7841eefbcd4afb8d65fd198dde4f92b lastState: terminated: containerID: cri-o://4aa1569377aee5369fcf43349de769f0f5fad3e99ebbedfb73df81ee6b8c54f6 exitCode: 0 finishedAt: "2025-12-08T17:58:37Z" reason: Completed startedAt: "2025-12-08T17:58:35Z" name: bridge ready: true resources: {} restartCount: 2 started: true state: running: startedAt: "2025-12-08T17:58:51Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-wlrqv readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://7907b31ed60ebeb22abca5fbcdacad14c616f75c8d4c5a868fd6cb4f261929a5 image: quay.io/openshift/origin-oauth-proxy:latest imageID: quay.io/openshift/origin-oauth-proxy@sha256:c740bf089d5a81db4715efa881fd7f706407de56bf9727bfe47c0d45cfc5834e lastState: {} name: oauth-proxy ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:12Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/tls/private name: default-cloud1-ceil-meter-proxy-tls - mountPath: /etc/proxy/secrets name: session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-wlrqv readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://14f9abfc42b74a6f617ce3b9fb37ab19ecb2e7388bb02af946de77d393bdb9b8 image: quay.io/infrawatch/sg-core:latest imageID: quay.io/infrawatch/sg-core@sha256:71043bdeb285c5d38bda71d8b3782b6c179468ebde984d439c866607dafa6dc0 lastState: {} name: sg-core ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:34Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-wlrqv readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.78 podIPs: - ip: 10.217.0.78 qosClass: BestEffort startTime: "2025-12-08T17:58:08Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.80/23"],"mac_address":"0a:58:0a:d9:00:50","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.80/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.80" ], "mac": "0a:58:0a:d9:00:50", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:58:20Z" generateName: default-cloud1-coll-event-smartgateway-d956b4648- generation: 1 labels: app: smart-gateway pod-template-hash: d956b4648 sg-config-resource-name: default-cloud1-coll-event-sg-core-configmap-mgfbh457cb smart-gateway: default-cloud1-coll-event name: default-cloud1-coll-event-smartgateway-d956b4648-jwkwn namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-cloud1-coll-event-smartgateway-d956b4648 uid: 08afce78-a47e-4785-a9a7-2433f15a73a4 resourceVersion: "45818" uid: 8ecda967-3335-4158-839b-9b4048b8f049 spec: containers: - args: - --amqp_url - amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/collectd/cloud1-notify - --block - --stat_period - "60" - --rbc - "15000" - --count - "0" - --gw_unix - /tmp/smartgateway - --rbs - "16384" image: quay.io/infrawatch/sg-bridge:latest imagePullPolicy: Always name: bridge resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2bx5s readOnly: true - args: - -config - /etc/sg-core/sg-core.conf.yaml env: - name: MY_POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/infrawatch/sg-core:latest imagePullPolicy: Always name: sg-core resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true - mountPath: /config/certs name: elastic-certs - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2bx5s readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: smart-gateway-dockercfg-vjrnk nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: smart-gateway serviceAccountName: smart-gateway terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: socket-dir - configMap: defaultMode: 420 name: default-cloud1-coll-event-sg-core-configmap name: sg-core-config - name: elastic-certs secret: defaultMode: 420 secretName: elasticsearch-es-cert - name: session-secret secret: defaultMode: 420 secretName: smart-gateway-session-secret - name: kube-api-access-2bx5s projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:35Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:20Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:53Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:53Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:20Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://fc718c9ed77354160036f95cf499ca1e883fd2d0775d91649095085d642e4d8d image: quay.io/infrawatch/sg-bridge:latest imageID: quay.io/infrawatch/sg-bridge@sha256:fd353c2f7c6038b4e7e5575704facafdb7841eefbcd4afb8d65fd198dde4f92b lastState: terminated: containerID: cri-o://31feeea8562884312474580d6cb82af6829f58ad5da6bd017e3939a9787a0721 exitCode: 0 finishedAt: "2025-12-08T17:58:37Z" reason: Completed startedAt: "2025-12-08T17:58:36Z" name: bridge ready: true resources: {} restartCount: 2 started: true state: running: startedAt: "2025-12-08T17:58:53Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2bx5s readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://269101b95debae1db230c25bfa8ad7cd5db3eaa90da0c90ea3fda968096073bb image: quay.io/infrawatch/sg-core:latest imageID: quay.io/infrawatch/sg-core@sha256:71043bdeb285c5d38bda71d8b3782b6c179468ebde984d439c866607dafa6dc0 lastState: {} name: sg-core ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:34Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true recursiveReadOnly: Disabled - mountPath: /config/certs name: elastic-certs - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2bx5s readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.80 podIPs: - ip: 10.217.0.80 qosClass: BestEffort startTime: "2025-12-08T17:58:20Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.77/23"],"mac_address":"0a:58:0a:d9:00:4d","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.77/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.77" ], "mac": "0a:58:0a:d9:00:4d", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:58:06Z" generateName: default-cloud1-coll-meter-smartgateway-787645d794- generation: 1 labels: app: smart-gateway pod-template-hash: 787645d794 sg-config-resource-name: default-cloud1-coll-meter-sg-core-configmap-t727t56tgd smart-gateway: default-cloud1-coll-meter name: default-cloud1-coll-meter-smartgateway-787645d794-4zrzx namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-cloud1-coll-meter-smartgateway-787645d794 uid: 46c56519-f815-408d-97f7-b3e5c2505719 resourceVersion: "45789" uid: 0e2a1994-199f-4b38-903b-cba9061dfcad spec: containers: - args: - -https-address=:8083 - -tls-cert=/etc/tls/private/tls.crt - -tls-key=/etc/tls/private/tls.key - -cookie-secret-file=/etc/proxy/secrets/session_secret - -openshift-service-account=smart-gateway - -upstream=http://localhost:8081/ - '-openshift-delegate-urls={"/": {"namespace": "service-telemetry", "resource": "smartgateways", "group": "smartgateway.infra.watch", "verb": "get"}}' image: quay.io/openshift/origin-oauth-proxy:latest imagePullPolicy: Always name: oauth-proxy ports: - containerPort: 8083 name: https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/tls/private name: default-cloud1-coll-meter-proxy-tls - mountPath: /etc/proxy/secrets name: session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2828m readOnly: true - args: - --amqp_url - amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/collectd/cloud1-telemetry - --block - --stat_period - "60" - --rbc - "15000" - --count - "0" - --gw_unix - /tmp/smartgateway - --rbs - "16384" image: quay.io/infrawatch/sg-bridge:latest imagePullPolicy: Always name: bridge resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2828m readOnly: true - args: - -config - /etc/sg-core/sg-core.conf.yaml env: - name: MY_POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/infrawatch/sg-core:latest imagePullPolicy: Always name: sg-core ports: - containerPort: 8083 name: prom-https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2828m readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: smart-gateway-dockercfg-vjrnk nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: smart-gateway serviceAccountName: smart-gateway terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-cloud1-coll-meter-proxy-tls secret: defaultMode: 420 secretName: default-cloud1-coll-meter-proxy-tls - emptyDir: {} name: socket-dir - configMap: defaultMode: 420 name: default-cloud1-coll-meter-sg-core-configmap name: sg-core-config - name: session-secret secret: defaultMode: 420 secretName: smart-gateway-session-secret - name: kube-api-access-2828m projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:34Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:06Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:50Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:50Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:06Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://34f95e7ed1db53f5b91cb626986d526a33e7174895dadbad5b4413da20e01b34 image: quay.io/infrawatch/sg-bridge:latest imageID: quay.io/infrawatch/sg-bridge@sha256:fd353c2f7c6038b4e7e5575704facafdb7841eefbcd4afb8d65fd198dde4f92b lastState: terminated: containerID: cri-o://ccb2417346bcabce07b84d961e110a73d232ebc2d4fce2978412cecb91e94e38 exitCode: 0 finishedAt: "2025-12-08T17:58:37Z" reason: Completed startedAt: "2025-12-08T17:58:36Z" name: bridge ready: true resources: {} restartCount: 2 started: true state: running: startedAt: "2025-12-08T17:58:49Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2828m readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://5c3e98bcff2f87a945c98b381b0613e2a751f0d75e63eb280a27b4ce39b93dc8 image: quay.io/openshift/origin-oauth-proxy:latest imageID: quay.io/openshift/origin-oauth-proxy@sha256:c740bf089d5a81db4715efa881fd7f706407de56bf9727bfe47c0d45cfc5834e lastState: {} name: oauth-proxy ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:11Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/tls/private name: default-cloud1-coll-meter-proxy-tls - mountPath: /etc/proxy/secrets name: session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2828m readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://8382b9a6b877c27bee6c98b5eee4b8a13ff89186f914673d42e5f5f7a058c90c image: quay.io/infrawatch/sg-core:latest imageID: quay.io/infrawatch/sg-core@sha256:71043bdeb285c5d38bda71d8b3782b6c179468ebde984d439c866607dafa6dc0 lastState: {} name: sg-core ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:34Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-2828m readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.77 podIPs: - ip: 10.217.0.77 qosClass: BestEffort startTime: "2025-12-08T17:58:06Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.79/23"],"mac_address":"0a:58:0a:d9:00:4f","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.79/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.79" ], "mac": "0a:58:0a:d9:00:4f", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:58:12Z" generateName: default-cloud1-sens-meter-smartgateway-66d5b7c5fc- generation: 1 labels: app: smart-gateway pod-template-hash: 66d5b7c5fc sg-config-resource-name: default-cloud1-sens-meter-sg-core-configmap-gckfgcf96t smart-gateway: default-cloud1-sens-meter name: default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-cloud1-sens-meter-smartgateway-66d5b7c5fc uid: f6684724-947f-4871-9658-18fbfd7def1f resourceVersion: "45827" uid: f486b0de-c62f-46a2-8649-dca61a92506c spec: containers: - args: - -https-address=:8083 - -tls-cert=/etc/tls/private/tls.crt - -tls-key=/etc/tls/private/tls.key - -cookie-secret-file=/etc/proxy/secrets/session_secret - -openshift-service-account=smart-gateway - -upstream=http://localhost:8081/ - '-openshift-delegate-urls={"/": {"namespace": "service-telemetry", "resource": "smartgateways", "group": "smartgateway.infra.watch", "verb": "get"}}' image: quay.io/openshift/origin-oauth-proxy:latest imagePullPolicy: Always name: oauth-proxy ports: - containerPort: 8083 name: https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/tls/private name: default-cloud1-sens-meter-proxy-tls - mountPath: /etc/proxy/secrets name: session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ndth6 readOnly: true - args: - --amqp_url - amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/sensubility/cloud1-telemetry - --block - --stat_period - "60" - --rbc - "15000" - --count - "0" - --gw_unix - /tmp/smartgateway - --rbs - "65535" image: quay.io/infrawatch/sg-bridge:latest imagePullPolicy: Always name: bridge resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ndth6 readOnly: true - args: - -config - /etc/sg-core/sg-core.conf.yaml env: - name: MY_POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: quay.io/infrawatch/sg-core:latest imagePullPolicy: Always name: sg-core ports: - containerPort: 8083 name: prom-https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ndth6 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: smart-gateway-dockercfg-vjrnk nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: smart-gateway serviceAccountName: smart-gateway terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-cloud1-sens-meter-proxy-tls secret: defaultMode: 420 secretName: default-cloud1-sens-meter-proxy-tls - emptyDir: {} name: socket-dir - configMap: defaultMode: 420 name: default-cloud1-sens-meter-sg-core-configmap name: sg-core-config - name: session-secret secret: defaultMode: 420 secretName: smart-gateway-session-secret - name: kube-api-access-ndth6 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:35Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:13Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:54Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:54Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:12Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://7aecb59e0b625816d2d03f468469d78fc98b90b9fb9fcc62c5181c80da4dd465 image: quay.io/infrawatch/sg-bridge:latest imageID: quay.io/infrawatch/sg-bridge@sha256:fd353c2f7c6038b4e7e5575704facafdb7841eefbcd4afb8d65fd198dde4f92b lastState: terminated: containerID: cri-o://22081593e01430234cfb5dbf7c618890033249b7e77d517269dcbe6686c62ed4 exitCode: 0 finishedAt: "2025-12-08T17:58:37Z" reason: Completed startedAt: "2025-12-08T17:58:35Z" name: bridge ready: true resources: {} restartCount: 2 started: true state: running: startedAt: "2025-12-08T17:58:53Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ndth6 readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://b93d45fd80d9854c47e6371943e22c048604e896742cde2d1e57590f24e5ab49 image: quay.io/openshift/origin-oauth-proxy:latest imageID: quay.io/openshift/origin-oauth-proxy@sha256:c740bf089d5a81db4715efa881fd7f706407de56bf9727bfe47c0d45cfc5834e lastState: {} name: oauth-proxy ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:20Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/tls/private name: default-cloud1-sens-meter-proxy-tls - mountPath: /etc/proxy/secrets name: session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ndth6 readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://5fbe44206cb5eaed099304c50789cc259f31beb0bc469c4c34ef038e6233d43d image: quay.io/infrawatch/sg-core:latest imageID: quay.io/infrawatch/sg-core@sha256:71043bdeb285c5d38bda71d8b3782b6c179468ebde984d439c866607dafa6dc0 lastState: {} name: sg-core ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:34Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp name: socket-dir - mountPath: /etc/sg-core/ name: sg-core-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ndth6 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.79 podIPs: - ip: 10.217.0.79 qosClass: BestEffort startTime: "2025-12-08T17:58:13Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.82/23"],"mac_address":"0a:58:0a:d9:00:52","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.82/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.82" ], "mac": "0a:58:0a:d9:00:52", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 prometheus.io/port: "8888" prometheus.io/scrape: "true" seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:58:34Z" generateName: default-interconnect-55bf8d5cb- generation: 1 labels: application: default-interconnect com.company: Red_Hat interconnect_cr: default-interconnect pod-template-hash: 55bf8d5cb rht.comp: Interconnect rht.comp_t: application rht.comp_ver: "1.10" rht.prod_name: Red_Hat_Integration rht.prod_ver: 2021.Q4 name: default-interconnect-55bf8d5cb-rwr2k namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-interconnect-55bf8d5cb uid: d9c87381-9697-4964-8b68-40cbab3a00ca resourceVersion: "45677" uid: d839602b-f183-45c8-af76-72a0d292aa33 spec: affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: application operator: In values: - default-interconnect topologyKey: kubernetes.io/hostname containers: - env: - name: APPLICATION_NAME value: default-interconnect - name: QDROUTERD_CONF value: |2+ router { mode: interior id: ${HOSTNAME} } listener { host: 127.0.0.1 port: 5672 role: normal } listener { name: health-and-stats port: 8888 http: true healthz: true metrics: true websockets: false httpRootDir: invalid } listener { role: inter-router port: 55671 saslMechanisms: EXTERNAL authenticatePeer: true sslProfile: inter-router } listener { role: edge port: 5671 saslMechanisms: PLAIN authenticatePeer: true sslProfile: openstack } listener { role: edge port: 5673 linkCapacity: 25000 } sslProfile { name: openstack certFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.crt privateKeyFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.key caCertFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca/tls.crt } sslProfile { name: inter-router certFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.crt privateKeyFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.key caCertFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca/tls.crt } address { prefix: closest distribution: closest } address { prefix: multicast distribution: multicast } address { prefix: unicast distribution: closest } address { prefix: exclusive distribution: closest } address { prefix: broadcast distribution: multicast } address { prefix: collectd distribution: multicast } address { prefix: ceilometer distribution: multicast } - name: QDROUTERD_AUTO_CREATE_SASLDB_SOURCE value: /etc/qpid-dispatch/sasl-users/ - name: QDROUTERD_AUTO_CREATE_SASLDB_PATH value: /tmp/qdrouterd.sasldb - name: POD_COUNT value: "1" - name: POD_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: QDROUTERD_AUTO_MESH_DISCOVERY value: QUERY image: registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: /healthz port: 8888 scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 name: default-interconnect ports: - containerPort: 5672 name: port-5672 protocol: TCP - containerPort: 55671 name: port-55671 protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials name: default-interconnect-openstack-credentials - mountPath: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca name: default-interconnect-openstack-ca - mountPath: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials name: default-interconnect-inter-router-credentials - mountPath: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca name: default-interconnect-inter-router-ca - mountPath: /etc/qpid-dispatch/sasl-users name: sasl-users - mountPath: /etc/sasl2 name: sasl-config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-t47sc readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-interconnect-dockercfg-nxt7g nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: default-interconnect serviceAccountName: default-interconnect terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-interconnect-openstack-credentials secret: defaultMode: 420 secretName: default-interconnect-openstack-credentials - name: default-interconnect-openstack-ca secret: defaultMode: 420 secretName: default-interconnect-openstack-ca - name: default-interconnect-inter-router-credentials secret: defaultMode: 420 secretName: default-interconnect-inter-router-credentials - name: default-interconnect-inter-router-ca secret: defaultMode: 420 secretName: default-interconnect-inter-router-ca - name: sasl-users secret: defaultMode: 420 secretName: default-interconnect-users - configMap: defaultMode: 420 name: default-interconnect-sasl-config name: sasl-config - name: kube-api-access-t47sc projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:36Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:34Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:36Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:36Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:34Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://4ed1d85bf0b758a1fda41070aaab1f2a6869067e761846e0bd5bb9fb92174804 image: registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9 imageID: registry.redhat.io/amq7/amq-interconnect@sha256:06ac9dd1544cec3885a8362c7685d6c5bb22aab0e46d476480b463b699ebc192 lastState: {} name: default-interconnect ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:35Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials name: default-interconnect-openstack-credentials - mountPath: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca name: default-interconnect-openstack-ca - mountPath: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials name: default-interconnect-inter-router-credentials - mountPath: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca name: default-interconnect-inter-router-ca - mountPath: /etc/qpid-dispatch/sasl-users name: sasl-users - mountPath: /etc/sasl2 name: sasl-config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-t47sc readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.82 podIPs: - ip: 10.217.0.82 qosClass: BestEffort startTime: "2025-12-08T17:58:34Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.75/23"],"mac_address":"0a:58:0a:d9:00:4b","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.75/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.75" ], "mac": "0a:58:0a:d9:00:4b", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:57:48Z" generateName: default-snmp-webhook-6774d8dfbc- generation: 1 labels: app: default-snmp-webhook pod-template-hash: 6774d8dfbc name: default-snmp-webhook-6774d8dfbc-75fxn namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: default-snmp-webhook-6774d8dfbc uid: de46a844-246e-442c-927b-08ae891166af resourceVersion: "45230" uid: 37bee34a-f42e-4493-85f3-7f5e5cbd7301 spec: containers: - env: - name: SNMP_COMMUNITY value: public - name: SNMP_RETRIES value: "5" - name: SNMP_HOST value: 192.168.24.254 - name: SNMP_PORT value: "162" - name: SNMP_TIMEOUT value: "1" - name: ALERT_OID_LABEL value: oid - name: TRAP_OID_PREFIX value: 1.3.6.1.4.1.50495.15 - name: TRAP_DEFAULT_OID value: 1.3.6.1.4.1.50495.15.1.2.1 - name: TRAP_DEFAULT_SEVERITY image: quay.io/infrawatch/prometheus-webhook-snmp:latest imagePullPolicy: Always name: prometheus-webhook-snmp ports: - containerPort: 9099 protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-l59v2 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-dockercfg-t7fjv nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-l59v2 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:58Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:48Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:58Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:58Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:48Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://fc7a9b49acda648c324702fc105887c467f79d40143309a6e1da85462c175c0c image: quay.io/infrawatch/prometheus-webhook-snmp:latest imageID: quay.io/infrawatch/prometheus-webhook-snmp@sha256:63b0cdb1f049f50911195944e2d17dd412b5b5975c4e4731c3ae8b30deb53ab0 lastState: {} name: prometheus-webhook-snmp ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:57:58Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-l59v2 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.75 podIPs: - ip: 10.217.0.75 qosClass: BestEffort startTime: "2025-12-08T17:57:48Z" - apiVersion: v1 kind: Pod metadata: annotations: alm-examples: |- [ { "apiVersion": "elasticsearch.k8s.elastic.co/v1", "kind": "Elasticsearch", "metadata": { "name": "elasticsearch-sample" }, "spec": { "version": "9.2.0", "nodeSets": [ { "name": "default", "config": { "node.roles": ["master", "data"], "node.attr.attr_name": "attr_value", "node.store.allow_mmap": false }, "podTemplate": { "metadata": { "labels": { "foo": "bar" } }, "spec": { "containers": [ { "name": "elasticsearch", "resources": { "requests": { "memory": "4Gi", "cpu": 1 }, "limits": { "memory": "4Gi", "cpu": 2 } } } ] } }, "count": 3 } ] } }, { "apiVersion": "kibana.k8s.elastic.co/v1", "kind": "Kibana", "metadata": { "name": "kibana-sample" }, "spec": { "version": "9.2.0", "count": 1, "elasticsearchRef": { "name": "elasticsearch-sample" }, "podTemplate": { "metadata": { "labels": { "foo": "bar" } }, "spec": { "containers": [ { "name": "kibana", "resources": { "requests": { "memory": "1Gi", "cpu": 0.5 }, "limits": { "memory": "2Gi", "cpu": 2 } } } ] } } } }, { "apiVersion": "apm.k8s.elastic.co/v1", "kind": "ApmServer", "metadata": { "name": "apmserver-sample" }, "spec": { "version": "9.2.0", "count": 1, "elasticsearchRef": { "name": "elasticsearch-sample" } } }, { "apiVersion": "enterprisesearch.k8s.elastic.co/v1", "kind": "EnterpriseSearch", "metadata": { "name": "ent-sample" }, "spec": { "version": "9.2.0", "config": { "ent_search.external_url": "https://localhost:3002" }, "count": 1, "elasticsearchRef": { "name": "elasticsearch-sample" } } }, { "apiVersion": "beat.k8s.elastic.co/v1beta1", "kind": "Beat", "metadata": { "name": "heartbeat-sample" }, "spec": { "type": "heartbeat", "version": "9.2.0", "elasticsearchRef": { "name": "elasticsearch-sample" }, "config": { "heartbeat.monitors": [ { "type": "tcp", "schedule": "@every 5s", "hosts": [ "elasticsearch-sample-es-http.default.svc:9200" ] } ] }, "deployment": { "replicas": 1, "podTemplate": { "spec": { "securityContext": { "runAsUser": 0 } } } } } }, { "apiVersion": "agent.k8s.elastic.co/v1alpha1", "kind": "Agent", "metadata": { "name": "agent-sample" }, "spec": { "version": "9.2.0", "elasticsearchRefs": [ { "name": "elasticsearch-sample" } ], "daemonSet": {}, "config": { "inputs": [ { "name": "system-1", "revision": 1, "type": "system/metrics", "use_output": "default", "meta": { "package": { "name": "system", "version": "0.9.1" } }, "data_stream": { "namespace": "default" }, "streams": [ { "id": "system/metrics-system.cpu", "data_stream": { "dataset": "system.cpu", "type": "metrics" }, "metricsets": [ "cpu" ], "cpu.metrics": [ "percentages", "normalized_percentages" ], "period": "10s" } ] } ] } } }, { "apiVersion": "maps.k8s.elastic.co/v1alpha1", "kind": "ElasticMapsServer", "metadata": { "name": "ems-sample" }, "spec": { "version": "9.2.0", "count": 1, "elasticsearchRef": { "name": "elasticsearch-sample" } } }, { "apiVersion": "logstash.k8s.elastic.co/v1alpha1", "kind": "Logstash", "metadata" : { "name": "logstash-sample" }, "spec": { "version": "9.2.0", "count": 1 } } ] capabilities: Deep Insights categories: Database certified: "false" co.elastic.logs/raw: '[{"type":"filestream","enabled":true,"id":"eck-container-logs-${data.kubernetes.container.id}","paths":["/var/log/containers/*${data.kubernetes.container.id}.log"],"parsers":[{"container":{}},{"ndjson":{"keys_under_root":true}}],"prospector.scanner.symlinks":true,"processors":[{"convert":{"mode":"rename","ignore_missing":true,"fields":[{"from":"error","to":"_error"}]}},{"convert":{"mode":"rename","ignore_missing":true,"fields":[{"from":"_error","to":"error.message"}]}},{"convert":{"mode":"rename","ignore_missing":true,"fields":[{"from":"source","to":"_source"}]}},{"convert":{"mode":"rename","ignore_missing":true,"fields":[{"from":"_source","to":"event.source"}]}}]}]' containerImage: registry.connect.redhat.com/elastic/eck-operator@sha256:28925fffef8f7c920b2510810cbcfc0f3dadab5f8a80b01fd5ae500e5c070105 createdAt: "2025-10-31 20:52:32" description: Run Elasticsearch, Kibana, APM Server, Beats, Enterprise Search, Elastic Agent, Elastic Maps Server and Logstash on Kubernetes and OpenShift features.operators.openshift.io/disconnected: "false" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" features.operators.openshift.io/tls-profiles: "false" features.operators.openshift.io/token-auth-aws: "false" features.operators.openshift.io/token-auth-azure: "false" features.operators.openshift.io/token-auth-gcp: "false" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.50/23"],"mac_address":"0a:58:0a:d9:00:32","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.50/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.50" ], "mac": "0a:58:0a:d9:00:32", "default": true, "dns": {} }] olm.operatorGroup: service-telemetry-operator-group olm.operatorNamespace: service-telemetry olm.targetNamespaces: service-telemetry olmcahash: 5abaf59fa8e56f8a010159d4e4cd22b1bebc48aac7401d970c570f452b357052 openshift.io/scc: restricted-v2 operatorframework.io/properties: '{"properties":[{"type":"olm.gvk","value":{"group":"agent.k8s.elastic.co","kind":"Agent","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"apm.k8s.elastic.co","kind":"ApmServer","version":"v1"}},{"type":"olm.gvk","value":{"group":"apm.k8s.elastic.co","kind":"ApmServer","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"apm.k8s.elastic.co","kind":"ApmServer","version":"v1beta1"}},{"type":"olm.gvk","value":{"group":"autoscaling.k8s.elastic.co","kind":"ElasticsearchAutoscaler","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"beat.k8s.elastic.co","kind":"Beat","version":"v1beta1"}},{"type":"olm.gvk","value":{"group":"elasticsearch.k8s.elastic.co","kind":"Elasticsearch","version":"v1"}},{"type":"olm.gvk","value":{"group":"elasticsearch.k8s.elastic.co","kind":"Elasticsearch","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"elasticsearch.k8s.elastic.co","kind":"Elasticsearch","version":"v1beta1"}},{"type":"olm.gvk","value":{"group":"enterprisesearch.k8s.elastic.co","kind":"EnterpriseSearch","version":"v1"}},{"type":"olm.gvk","value":{"group":"enterprisesearch.k8s.elastic.co","kind":"EnterpriseSearch","version":"v1beta1"}},{"type":"olm.gvk","value":{"group":"kibana.k8s.elastic.co","kind":"Kibana","version":"v1"}},{"type":"olm.gvk","value":{"group":"kibana.k8s.elastic.co","kind":"Kibana","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"kibana.k8s.elastic.co","kind":"Kibana","version":"v1beta1"}},{"type":"olm.gvk","value":{"group":"logstash.k8s.elastic.co","kind":"Logstash","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"maps.k8s.elastic.co","kind":"ElasticMapsServer","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"stackconfigpolicy.k8s.elastic.co","kind":"StackConfigPolicy","version":"v1alpha1"}},{"type":"olm.package","value":{"packageName":"elasticsearch-eck-operator-certified","version":"3.2.0"}}]}' operators.openshift.io/valid-subscription: Elastic Basic license repository: https://github.com/elastic/cloud-on-k8s seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user support: elastic.co creationTimestamp: "2025-12-08T17:55:16Z" generateName: elastic-operator-c9c86658- generation: 1 labels: control-plane: elastic-operator pod-template-hash: c9c86658 name: elastic-operator-c9c86658-4qchz namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: elastic-operator-c9c86658 uid: b48daabf-c1dc-4ab1-bb80-c1e3360c27ed resourceVersion: "42664" uid: 1899106f-2682-474e-ad41-4dd00dbc7d4b spec: containers: - args: - manager - --config=/conf/eck.yaml - --manage-webhook-certs=false - --enable-webhook - --ubi-only - --distribution-channel=certified-operators env: - name: NAMESPACES valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.annotations['olm.targetNamespaces'] - name: OPERATOR_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.annotations['olm.operatorNamespace'] - name: OPERATOR_IMAGE value: registry.connect.redhat.com/elastic/eck-operator@sha256:28925fffef8f7c920b2510810cbcfc0f3dadab5f8a80b01fd5ae500e5c070105 - name: OPERATOR_CONDITION_NAME value: elasticsearch-eck-operator-certified.v3.2.0 image: registry.connect.redhat.com/elastic/eck-operator@sha256:28925fffef8f7c920b2510810cbcfc0f3dadab5f8a80b01fd5ae500e5c070105 imagePullPolicy: IfNotPresent name: manager ports: - containerPort: 9443 name: https-webhook protocol: TCP resources: limits: cpu: "1" memory: 1Gi requests: cpu: 100m memory: 150Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /apiserver.local.config/certificates name: apiservice-cert - mountPath: /tmp/k8s-webhook-server/serving-certs name: webhook-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-swd2b readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: elastic-operator-dockercfg-2vdv5 nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: elastic-operator serviceAccountName: elastic-operator terminationGracePeriodSeconds: 10 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: apiservice-cert secret: defaultMode: 420 items: - key: tls.crt path: apiserver.crt - key: tls.key path: apiserver.key secretName: elastic-operator-service-cert - name: webhook-cert secret: defaultMode: 420 items: - key: tls.crt path: tls.crt - key: tls.key path: tls.key secretName: elastic-operator-service-cert - name: kube-api-access-swd2b projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:41Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:16Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:41Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:41Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:16Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: 100m memory: 150Mi containerID: cri-o://d23e5d578858b72e0366caf0026d96ebb7932ce9d7016adb967d25f47ad151dc image: registry.connect.redhat.com/elastic/eck-operator@sha256:28925fffef8f7c920b2510810cbcfc0f3dadab5f8a80b01fd5ae500e5c070105 imageID: registry.connect.redhat.com/elastic/eck-operator@sha256:28925fffef8f7c920b2510810cbcfc0f3dadab5f8a80b01fd5ae500e5c070105 lastState: {} name: manager ready: true resources: limits: cpu: "1" memory: 1Gi requests: cpu: 100m memory: 150Mi restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:55:40Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /apiserver.local.config/certificates name: apiservice-cert - mountPath: /tmp/k8s-webhook-server/serving-certs name: webhook-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-swd2b readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.50 podIPs: - ip: 10.217.0.50 qosClass: Burstable startTime: "2025-12-08T17:55:16Z" - apiVersion: v1 kind: Pod metadata: annotations: co.elastic.logs/module: elasticsearch elasticsearch.k8s.elastic.co/config-hash: "513175922" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.53/23"],"mac_address":"0a:58:0a:d9:00:35","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.53/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.53" ], "mac": "0a:58:0a:d9:00:35", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 policy.k8s.elastic.co/elasticsearch-config-mounts-hash: "" seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user update.k8s.elastic.co/timestamp: "2025-12-08T17:55:58.209440458Z" creationTimestamp: "2025-12-08T17:55:42Z" generateName: elasticsearch-es-default- generation: 1 labels: apps.kubernetes.io/pod-index: "0" common.k8s.elastic.co/type: elasticsearch controller-revision-hash: elasticsearch-es-default-9774f4d96 elasticsearch.k8s.elastic.co/cluster-name: elasticsearch elasticsearch.k8s.elastic.co/http-scheme: https elasticsearch.k8s.elastic.co/node-data: "true" elasticsearch.k8s.elastic.co/node-data_cold: "false" elasticsearch.k8s.elastic.co/node-data_content: "false" elasticsearch.k8s.elastic.co/node-data_frozen: "false" elasticsearch.k8s.elastic.co/node-data_hot: "false" elasticsearch.k8s.elastic.co/node-data_warm: "false" elasticsearch.k8s.elastic.co/node-ingest: "true" elasticsearch.k8s.elastic.co/node-master: "true" elasticsearch.k8s.elastic.co/node-ml: "false" elasticsearch.k8s.elastic.co/node-remote_cluster_client: "false" elasticsearch.k8s.elastic.co/node-transform: "false" elasticsearch.k8s.elastic.co/node-voting_only: "false" elasticsearch.k8s.elastic.co/statefulset-name: elasticsearch-es-default elasticsearch.k8s.elastic.co/version: 7.17.20 statefulset.kubernetes.io/pod-name: elasticsearch-es-default-0 tuned.openshift.io/elasticsearch: elasticsearch name: elasticsearch-es-default-0 namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: StatefulSet name: elasticsearch-es-default uid: c20609bb-b109-4c12-9856-c9099a3911eb resourceVersion: "43522" uid: 72b61c1d-040f-465f-bea8-e024f5879f98 spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: elasticsearch.k8s.elastic.co/cluster-name: elasticsearch topologyKey: kubernetes.io/hostname weight: 100 automountServiceAccountToken: false containers: - env: - name: POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: HEADLESS_SERVICE_NAME value: elasticsearch-es-default - name: PROBE_PASSWORD_PATH value: /mnt/elastic-internal/pod-mounted-users/elastic-internal-probe - name: PROBE_USERNAME value: elastic-internal-probe - name: READINESS_PROBE_PROTOCOL value: https - name: NSS_SDB_USE_CACHE value: "no" image: registry.connect.redhat.com/elastic/elasticsearch:7.17.20 imagePullPolicy: IfNotPresent lifecycle: preStop: exec: command: - bash - -c - /mnt/elastic-internal/scripts/pre-stop-hook-script.sh name: elasticsearch ports: - containerPort: 9200 name: https protocol: TCP - containerPort: 9300 name: transport protocol: TCP readinessProbe: exec: command: - bash - -c - /mnt/elastic-internal/scripts/readiness-probe-script.sh failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 5 resources: limits: cpu: "2" memory: 2Gi requests: cpu: "1" memory: 2Gi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /mnt/elastic-internal/downward-api name: downward-api readOnly: true - mountPath: /usr/share/elasticsearch/bin name: elastic-internal-elasticsearch-bin-local - mountPath: /mnt/elastic-internal/elasticsearch-config name: elastic-internal-elasticsearch-config readOnly: true - mountPath: /usr/share/elasticsearch/config name: elastic-internal-elasticsearch-config-local - mountPath: /usr/share/elasticsearch/plugins name: elastic-internal-elasticsearch-plugins-local - mountPath: /usr/share/elasticsearch/config/http-certs name: elastic-internal-http-certificates readOnly: true - mountPath: /mnt/elastic-internal/pod-mounted-users name: elastic-internal-probe-user readOnly: true - mountPath: /usr/share/elasticsearch/config/transport-remote-certs/ name: elastic-internal-remote-certificate-authorities readOnly: true - mountPath: /mnt/elastic-internal/scripts name: elastic-internal-scripts readOnly: true - mountPath: /usr/share/elasticsearch/config/transport-certs name: elastic-internal-transport-certificates readOnly: true - mountPath: /mnt/elastic-internal/unicast-hosts name: elastic-internal-unicast-hosts readOnly: true - mountPath: /mnt/elastic-internal/xpack-file-realm name: elastic-internal-xpack-file-realm readOnly: true - mountPath: /usr/share/elasticsearch/data name: elasticsearch-data - mountPath: /usr/share/elasticsearch/logs name: elasticsearch-logs - mountPath: /tmp name: tmp-volume dnsPolicy: ClusterFirst enableServiceLinks: true hostname: elasticsearch-es-default-0 imagePullSecrets: - name: default-dockercfg-t7fjv initContainers: - command: - bash - -c - /mnt/elastic-internal/scripts/prepare-fs.sh env: - name: POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: HEADLESS_SERVICE_NAME value: elasticsearch-es-default - name: PROBE_PASSWORD_PATH value: /mnt/elastic-internal/pod-mounted-users/elastic-internal-probe - name: PROBE_USERNAME value: elastic-internal-probe - name: READINESS_PROBE_PROTOCOL value: https - name: NSS_SDB_USE_CACHE value: "no" image: registry.connect.redhat.com/elastic/elasticsearch:7.17.20 imagePullPolicy: IfNotPresent name: elastic-internal-init-filesystem resources: limits: cpu: 100m memory: 50Mi requests: cpu: 100m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /mnt/elastic-internal/downward-api name: downward-api readOnly: true - mountPath: /mnt/elastic-internal/elasticsearch-bin-local name: elastic-internal-elasticsearch-bin-local - mountPath: /mnt/elastic-internal/elasticsearch-config name: elastic-internal-elasticsearch-config readOnly: true - mountPath: /mnt/elastic-internal/elasticsearch-config-local name: elastic-internal-elasticsearch-config-local - mountPath: /mnt/elastic-internal/elasticsearch-plugins-local name: elastic-internal-elasticsearch-plugins-local - mountPath: /usr/share/elasticsearch/config/http-certs name: elastic-internal-http-certificates readOnly: true - mountPath: /mnt/elastic-internal/pod-mounted-users name: elastic-internal-probe-user readOnly: true - mountPath: /usr/share/elasticsearch/config/transport-remote-certs/ name: elastic-internal-remote-certificate-authorities readOnly: true - mountPath: /mnt/elastic-internal/scripts name: elastic-internal-scripts readOnly: true - mountPath: /mnt/elastic-internal/transport-certificates name: elastic-internal-transport-certificates readOnly: true - mountPath: /mnt/elastic-internal/unicast-hosts name: elastic-internal-unicast-hosts readOnly: true - mountPath: /mnt/elastic-internal/xpack-file-realm name: elastic-internal-xpack-file-realm readOnly: true - mountPath: /usr/share/elasticsearch/data name: elasticsearch-data - mountPath: /usr/share/elasticsearch/logs name: elasticsearch-logs - mountPath: /tmp name: tmp-volume - command: - bash - -c - /mnt/elastic-internal/scripts/suspend.sh env: - name: POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: HEADLESS_SERVICE_NAME value: elasticsearch-es-default - name: PROBE_PASSWORD_PATH value: /mnt/elastic-internal/pod-mounted-users/elastic-internal-probe - name: PROBE_USERNAME value: elastic-internal-probe - name: READINESS_PROBE_PROTOCOL value: https - name: NSS_SDB_USE_CACHE value: "no" image: registry.connect.redhat.com/elastic/elasticsearch:7.17.20 imagePullPolicy: IfNotPresent name: elastic-internal-suspend resources: limits: cpu: "2" memory: 2Gi requests: cpu: "1" memory: 2Gi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /mnt/elastic-internal/downward-api name: downward-api readOnly: true - mountPath: /usr/share/elasticsearch/bin name: elastic-internal-elasticsearch-bin-local - mountPath: /mnt/elastic-internal/elasticsearch-config name: elastic-internal-elasticsearch-config readOnly: true - mountPath: /usr/share/elasticsearch/config name: elastic-internal-elasticsearch-config-local - mountPath: /usr/share/elasticsearch/plugins name: elastic-internal-elasticsearch-plugins-local - mountPath: /usr/share/elasticsearch/config/http-certs name: elastic-internal-http-certificates readOnly: true - mountPath: /mnt/elastic-internal/pod-mounted-users name: elastic-internal-probe-user readOnly: true - mountPath: /usr/share/elasticsearch/config/transport-remote-certs/ name: elastic-internal-remote-certificate-authorities readOnly: true - mountPath: /mnt/elastic-internal/scripts name: elastic-internal-scripts readOnly: true - mountPath: /usr/share/elasticsearch/config/transport-certs name: elastic-internal-transport-certificates readOnly: true - mountPath: /mnt/elastic-internal/unicast-hosts name: elastic-internal-unicast-hosts readOnly: true - mountPath: /mnt/elastic-internal/xpack-file-realm name: elastic-internal-xpack-file-realm readOnly: true - mountPath: /usr/share/elasticsearch/data name: elasticsearch-data - mountPath: /usr/share/elasticsearch/logs name: elasticsearch-logs - mountPath: /tmp name: tmp-volume nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: default serviceAccountName: default subdomain: elasticsearch-es-default terminationGracePeriodSeconds: 180 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - downwardAPI: defaultMode: 420 items: - fieldRef: apiVersion: v1 fieldPath: metadata.labels path: labels name: downward-api - emptyDir: {} name: elastic-internal-elasticsearch-bin-local - name: elastic-internal-elasticsearch-config secret: defaultMode: 420 optional: false secretName: elasticsearch-es-default-es-config - emptyDir: {} name: elastic-internal-elasticsearch-config-local - emptyDir: {} name: elastic-internal-elasticsearch-plugins-local - name: elastic-internal-http-certificates secret: defaultMode: 420 optional: false secretName: elasticsearch-es-http-certs-internal - name: elastic-internal-probe-user secret: defaultMode: 420 items: - key: elastic-internal-probe path: elastic-internal-probe - key: elastic-internal-pre-stop path: elastic-internal-pre-stop optional: false secretName: elasticsearch-es-internal-users - name: elastic-internal-remote-certificate-authorities secret: defaultMode: 420 optional: false secretName: elasticsearch-es-remote-ca - configMap: defaultMode: 493 name: elasticsearch-es-scripts optional: false name: elastic-internal-scripts - name: elastic-internal-transport-certificates secret: defaultMode: 420 optional: false secretName: elasticsearch-es-default-es-transport-certs - configMap: defaultMode: 420 name: elasticsearch-es-unicast-hosts optional: false name: elastic-internal-unicast-hosts - name: elastic-internal-xpack-file-realm secret: defaultMode: 420 optional: false secretName: elasticsearch-es-xpack-file-realm - emptyDir: {} name: elasticsearch-data - emptyDir: {} name: elasticsearch-logs - emptyDir: {} name: tmp-volume status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:57Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:00Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:18Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:18Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:55:42Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: "1" memory: 2Gi containerID: cri-o://3476d8e324616206c8c9516ffdb12ae160415d08919044454d72bb5d30b56124 image: registry.connect.redhat.com/elastic/elasticsearch:7.17.20 imageID: registry.connect.redhat.com/elastic/elasticsearch@sha256:d2ba643f52fa7935d0428814d23aaef06cde96830a8cd59ddb2e078749b98856 lastState: {} name: elasticsearch ready: true resources: limits: cpu: "2" memory: 2Gi requests: cpu: "1" memory: 2Gi restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:56:01Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /mnt/elastic-internal/downward-api name: downward-api readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/bin name: elastic-internal-elasticsearch-bin-local - mountPath: /mnt/elastic-internal/elasticsearch-config name: elastic-internal-elasticsearch-config readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config name: elastic-internal-elasticsearch-config-local - mountPath: /usr/share/elasticsearch/plugins name: elastic-internal-elasticsearch-plugins-local - mountPath: /usr/share/elasticsearch/config/http-certs name: elastic-internal-http-certificates readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/pod-mounted-users name: elastic-internal-probe-user readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config/transport-remote-certs/ name: elastic-internal-remote-certificate-authorities readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/scripts name: elastic-internal-scripts readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config/transport-certs name: elastic-internal-transport-certificates readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/unicast-hosts name: elastic-internal-unicast-hosts readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/xpack-file-realm name: elastic-internal-xpack-file-realm readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/data name: elasticsearch-data - mountPath: /usr/share/elasticsearch/logs name: elasticsearch-logs - mountPath: /tmp name: tmp-volume hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 initContainerStatuses: - allocatedResources: cpu: 100m memory: 50Mi containerID: cri-o://d4b6d296db8cd435cf0480348ffac62552cdd1707b75d603ad65c90c27b92bdb image: registry.connect.redhat.com/elastic/elasticsearch:7.17.20 imageID: registry.connect.redhat.com/elastic/elasticsearch@sha256:d2ba643f52fa7935d0428814d23aaef06cde96830a8cd59ddb2e078749b98856 lastState: {} name: elastic-internal-init-filesystem ready: true resources: limits: cpu: 100m memory: 50Mi requests: cpu: 100m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://d4b6d296db8cd435cf0480348ffac62552cdd1707b75d603ad65c90c27b92bdb exitCode: 0 finishedAt: "2025-12-08T17:55:59Z" reason: Completed startedAt: "2025-12-08T17:55:57Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /mnt/elastic-internal/downward-api name: downward-api readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/elasticsearch-bin-local name: elastic-internal-elasticsearch-bin-local - mountPath: /mnt/elastic-internal/elasticsearch-config name: elastic-internal-elasticsearch-config readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/elasticsearch-config-local name: elastic-internal-elasticsearch-config-local - mountPath: /mnt/elastic-internal/elasticsearch-plugins-local name: elastic-internal-elasticsearch-plugins-local - mountPath: /usr/share/elasticsearch/config/http-certs name: elastic-internal-http-certificates readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/pod-mounted-users name: elastic-internal-probe-user readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config/transport-remote-certs/ name: elastic-internal-remote-certificate-authorities readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/scripts name: elastic-internal-scripts readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/transport-certificates name: elastic-internal-transport-certificates readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/unicast-hosts name: elastic-internal-unicast-hosts readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/xpack-file-realm name: elastic-internal-xpack-file-realm readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/data name: elasticsearch-data - mountPath: /usr/share/elasticsearch/logs name: elasticsearch-logs - mountPath: /tmp name: tmp-volume - allocatedResources: cpu: "1" memory: 2Gi containerID: cri-o://00e2823aa99de0262bb709a524b98c6b94dfff12d05a2b9ba23f536a7d2aa62d image: registry.connect.redhat.com/elastic/elasticsearch:7.17.20 imageID: registry.connect.redhat.com/elastic/elasticsearch@sha256:d2ba643f52fa7935d0428814d23aaef06cde96830a8cd59ddb2e078749b98856 lastState: {} name: elastic-internal-suspend ready: true resources: limits: cpu: "2" memory: 2Gi requests: cpu: "1" memory: 2Gi restartCount: 0 started: false state: terminated: containerID: cri-o://00e2823aa99de0262bb709a524b98c6b94dfff12d05a2b9ba23f536a7d2aa62d exitCode: 0 finishedAt: "2025-12-08T17:56:00Z" reason: Completed startedAt: "2025-12-08T17:56:00Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /mnt/elastic-internal/downward-api name: downward-api readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/bin name: elastic-internal-elasticsearch-bin-local - mountPath: /mnt/elastic-internal/elasticsearch-config name: elastic-internal-elasticsearch-config readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config name: elastic-internal-elasticsearch-config-local - mountPath: /usr/share/elasticsearch/plugins name: elastic-internal-elasticsearch-plugins-local - mountPath: /usr/share/elasticsearch/config/http-certs name: elastic-internal-http-certificates readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/pod-mounted-users name: elastic-internal-probe-user readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config/transport-remote-certs/ name: elastic-internal-remote-certificate-authorities readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/scripts name: elastic-internal-scripts readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/config/transport-certs name: elastic-internal-transport-certificates readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/unicast-hosts name: elastic-internal-unicast-hosts readOnly: true recursiveReadOnly: Disabled - mountPath: /mnt/elastic-internal/xpack-file-realm name: elastic-internal-xpack-file-realm readOnly: true recursiveReadOnly: Disabled - mountPath: /usr/share/elasticsearch/data name: elasticsearch-data - mountPath: /usr/share/elasticsearch/logs name: elasticsearch-logs - mountPath: /tmp name: tmp-volume phase: Running podIP: 10.217.0.53 podIPs: - ip: 10.217.0.53 qosClass: Burstable startTime: "2025-12-08T17:55:42Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.66/23"],"mac_address":"0a:58:0a:d9:00:42","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.66/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.66" ], "mac": "0a:58:0a:d9:00:42", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:56:28Z" generateName: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199- generation: 1 labels: batch.kubernetes.io/controller-uid: f1a99e81-3d6a-46e7-93df-17d6d3d195dd batch.kubernetes.io/job-name: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 controller-uid: f1a99e81-3d6a-46e7-93df-17d6d3d195dd job-name: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 olm.managed: "true" operatorframework.io/bundle-unpack-ref: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 name: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx namespace: service-telemetry ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 uid: f1a99e81-3d6a-46e7-93df-17d6d3d195dd resourceVersion: "43716" uid: f97402a7-57a3-4f4a-af9f-478d646d2cbc spec: containers: - command: - opm - alpha - bundle - extract - -m - /bundle/ - -n - service-telemetry - -c - f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 - -z env: - name: CONTAINER_IMAGE value: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 imagePullPolicy: IfNotPresent name: extract resources: requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /bundle name: bundle - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-46hts readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-dockercfg-t7fjv initContainers: - command: - /bin/cp - -Rv - /bin/cpb - /util/cpb image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f imagePullPolicy: IfNotPresent name: util resources: requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-46hts readOnly: true - command: - /util/cpb - /bundle image: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d imagePullPolicy: IfNotPresent name: pull resources: requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /bundle name: bundle - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-46hts readOnly: true nodeName: crc nodeSelector: kubernetes.io/os: linux preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - key: kubernetes.io/arch operator: Equal value: amd64 - key: kubernetes.io/arch operator: Equal value: arm64 - key: kubernetes.io/arch operator: Equal value: ppc64le - key: kubernetes.io/arch operator: Equal value: s390x - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 120 - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 120 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - emptyDir: {} name: bundle - emptyDir: {} name: util - name: kube-api-access-46hts projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:35Z" status: "False" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:33Z" reason: PodCompleted status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:29Z" reason: PodCompleted status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:29Z" reason: PodCompleted status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:28Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://87fea37bc9cf6f903e07e23f2df1da34cc7a8ef0682d180e4755d99a1b948e15 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 lastState: {} name: extract ready: false resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://87fea37bc9cf6f903e07e23f2df1da34cc7a8ef0682d180e4755d99a1b948e15 exitCode: 0 finishedAt: "2025-12-08T17:56:33Z" reason: Completed startedAt: "2025-12-08T17:56:33Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /bundle name: bundle - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-46hts readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 initContainerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://b4f8f2eceaa49eb50c4831feb67ddacd68cb76a38dd21c782141f9b5dde7d0fc image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f lastState: {} name: util ready: true resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://b4f8f2eceaa49eb50c4831feb67ddacd68cb76a38dd21c782141f9b5dde7d0fc exitCode: 0 finishedAt: "2025-12-08T17:56:30Z" reason: Completed startedAt: "2025-12-08T17:56:30Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-46hts readOnly: true recursiveReadOnly: Disabled - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://05a0f73f3535c0d5c2a0ebe8006219a712dfb9e9efa1cdc79f315fd1e3633fee image: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d imageID: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d lastState: {} name: pull ready: true resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://05a0f73f3535c0d5c2a0ebe8006219a712dfb9e9efa1cdc79f315fd1e3633fee exitCode: 0 finishedAt: "2025-12-08T17:56:32Z" reason: Completed startedAt: "2025-12-08T17:56:32Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /bundle name: bundle - mountPath: /util name: util - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-46hts readOnly: true recursiveReadOnly: Disabled phase: Succeeded podIP: 10.217.0.66 podIPs: - ip: 10.217.0.66 qosClass: Burstable startTime: "2025-12-08T17:56:29Z" - apiVersion: v1 kind: Pod metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.60/23"],"mac_address":"0a:58:0a:d9:00:3c","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.60/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.60" ], "mac": "0a:58:0a:d9:00:3c", "default": true, "dns": {} }] openshift.io/scc: anyuid security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:56:12Z" generateName: infrawatch-operators- generation: 1 labels: olm.catalogSource: infrawatch-operators olm.managed: "true" olm.pod-spec-hash: 8bi0SO0H2bQCFFkn5w6nAsNYnvndz6AVYxKKZw name: infrawatch-operators-tv99j namespace: service-telemetry ownerReferences: - apiVersion: operators.coreos.com/v1alpha1 blockOwnerDeletion: false controller: true kind: CatalogSource name: infrawatch-operators uid: c3af8943-d5c5-4768-9a5d-ec7e7c876a75 resourceVersion: "43561" uid: 020b4835-c362-478d-b714-bb42757ae9e2 spec: containers: - image: quay.io/infrawatch-operators/infrawatch-catalog:nightly imagePullPolicy: Always livenessProbe: exec: command: - grpc_health_probe - -addr=:50051 failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 name: registry-server ports: - containerPort: 50051 name: grpc protocol: TCP readinessProbe: exec: command: - grpc_health_probe - -addr=:50051 failureThreshold: 3 initialDelaySeconds: 5 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 resources: requests: cpu: 10m memory: 50Mi securityContext: capabilities: drop: - MKNOD readOnlyRootFilesystem: false startupProbe: exec: command: - grpc_health_probe - -addr=:50051 failureThreshold: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-rkfp7 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: infrawatch-operators-dockercfg-bcx4t nodeName: crc nodeSelector: kubernetes.io/os: linux preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: seLinuxOptions: level: s0:c26,c10 serviceAccount: infrawatch-operators serviceAccountName: infrawatch-operators terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: kube-api-access-rkfp7 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:15Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:12Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:24Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:24Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:12Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://7da1c86e59effcad5e9275193a79e0174074938b2d66280a112539c2a4e2f482 image: quay.io/infrawatch-operators/infrawatch-catalog:nightly imageID: quay.io/infrawatch-operators/infrawatch-catalog@sha256:9dced5b9c55addbc568f20aa309ec46af4534f941ff6828cc825c0620894d648 lastState: {} name: registry-server ready: true resources: requests: cpu: 10m memory: 50Mi restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:56:14Z" user: linux: gid: 0 supplementalGroups: - 0 - 1 - 2 - 3 - 4 - 6 - 10 - 11 - 20 - 26 - 27 uid: 0 volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-rkfp7 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.60 podIPs: - ip: 10.217.0.60 qosClass: Burstable startTime: "2025-12-08T17:56:12Z" - apiVersion: v1 kind: Pod metadata: annotations: alm-examples: '[{"apiVersion":"interconnectedcloud.github.io/v1alpha1","kind":"Interconnect","metadata":{"name":"amq-interconnect"},"spec":{"deploymentPlan":{"size":2,"role":"interior","placement":"Any"}}}]' capabilities: Basic Install categories: Networking, Streaming & Messaging certified: "false" containerImage: registry.redhat.io/amq7/amq-interconnect-operator@sha256:a8b621237c872ded2a1d1d948fbebd693429e4a1ced1d7922406241a078d3d43 createdAt: "2019-06-28T22:00:00Z" description: Layer 7 Networking k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.70/23"],"mac_address":"0a:58:0a:d9:00:46","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.70/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.70" ], "mac": "0a:58:0a:d9:00:46", "default": true, "dns": {} }] olm.operatorGroup: service-telemetry-operator-group olm.operatorNamespace: service-telemetry olm.targetNamespaces: service-telemetry openshift.io/scc: restricted-v2 operatorframework.io/properties: '{"properties":[{"type":"olm.gvk","value":{"group":"interconnectedcloud.github.io","kind":"Interconnect","version":"v1alpha1"}},{"type":"olm.package","value":{"packageName":"amq7-interconnect-operator","version":"1.10.20"}}]}' operators.openshift.io/valid-subscription: '["Red Hat Integration", "Red Hat AMQ"]' repository: https://github.com/interconnectedcloud/qdr-operator seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user support: Red Hat creationTimestamp: "2025-12-08T17:56:39Z" generateName: interconnect-operator-78b9bd8798- generation: 1 labels: com.company: Red_Hat name: interconnect-operator pod-template-hash: 78b9bd8798 rht.comp: Interconnect rht.comp_t: application rht.comp_ver: "1.10" rht.prod_name: Red_Hat_Integration rht.prod_ver: 2022.Q2 name: interconnect-operator-78b9bd8798-456sz namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: interconnect-operator-78b9bd8798 uid: e201ee8a-c306-415f-88f4-e6bb33430460 resourceVersion: "44605" uid: 871b0dde-aad5-4e54-bd14-1c4bc8779b60 spec: containers: - command: - qdr-operator env: - name: WATCH_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: OPERATOR_NAME value: qdr-operator - name: RELATED_IMAGE_QDROUTERD_IMAGE value: registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9 - name: OPERATOR_CONDITION_NAME value: amq7-interconnect-operator.v1.10.20 image: registry.redhat.io/amq7/amq-interconnect-operator@sha256:a8b621237c872ded2a1d1d948fbebd693429e4a1ced1d7922406241a078d3d43 imagePullPolicy: Always name: interconnect-operator ports: - containerPort: 60000 name: metrics protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-l9wvz readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: interconnect-operator-dockercfg-xvhmb nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: interconnect-operator serviceAccountName: interconnect-operator terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-l9wvz projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:06Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:39Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:06Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:06Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:39Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://78c7a6304373b9d8bbf8ce68e516149981c19313797eb0d7d1947051ee876f85 image: registry.redhat.io/amq7/amq-interconnect-operator@sha256:a8b621237c872ded2a1d1d948fbebd693429e4a1ced1d7922406241a078d3d43 imageID: registry.redhat.io/amq7/amq-interconnect-operator@sha256:a8b621237c872ded2a1d1d948fbebd693429e4a1ced1d7922406241a078d3d43 lastState: {} name: interconnect-operator ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:57:06Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-l9wvz readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.70 podIPs: - ip: 10.217.0.70 qosClass: BestEffort startTime: "2025-12-08T17:56:39Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.74/23"],"mac_address":"0a:58:0a:d9:00:4a","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.74/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.74" ], "mac": "0a:58:0a:d9:00:4a", "default": true, "dns": {} }] kubectl.kubernetes.io/default-container: prometheus openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:57:37Z" generateName: prometheus-default- generation: 1 labels: app.kubernetes.io/instance: default app.kubernetes.io/managed-by: prometheus-operator app.kubernetes.io/name: prometheus app.kubernetes.io/version: 3.6.0 apps.kubernetes.io/pod-index: "0" controller-revision-hash: prometheus-default-5f8884584d operator.prometheus.io/name: default operator.prometheus.io/shard: "0" prometheus: default statefulset.kubernetes.io/pod-name: prometheus-default-0 name: prometheus-default-0 namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: StatefulSet name: prometheus-default uid: 8bc8dd81-665a-4d11-bb42-7c006159f90e resourceVersion: "45558" uid: 3d62a6f6-b57c-48e0-9279-d8dadd01a921 spec: automountServiceAccountToken: true containers: - args: - --config.file=/etc/prometheus/config_out/prometheus.env.yaml - --web.enable-lifecycle - --web.route-prefix=/ - --web.listen-address=127.0.0.1:9090 - --storage.tsdb.retention.time=24h - --storage.tsdb.path=/prometheus - --web.config.file=/etc/prometheus/web_config/web-config.yaml image: quay.io/prometheus/prometheus:latest imagePullPolicy: Always livenessProbe: exec: command: - sh - -c - if [ -x "$(command -v curl)" ]; then exec curl --fail http://localhost:9090/-/healthy; elif [ -x "$(command -v wget)" ]; then exec wget -q -O /dev/null http://localhost:9090/-/healthy; else exit 1; fi failureThreshold: 6 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 3 name: prometheus readinessProbe: exec: command: - sh - -c - if [ -x "$(command -v curl)" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x "$(command -v wget)" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi failureThreshold: 3 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 3 resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 startupProbe: exec: command: - sh - -c - if [ -x "$(command -v curl)" ]; then exec curl --fail http://localhost:9090/-/ready; elif [ -x "$(command -v wget)" ]; then exec wget -q -O /dev/null http://localhost:9090/-/ready; else exit 1; fi failureThreshold: 60 periodSeconds: 15 successThreshold: 1 timeoutSeconds: 3 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/prometheus/config_out name: config-out readOnly: true - mountPath: /etc/prometheus/certs name: tls-assets readOnly: true - mountPath: /prometheus name: prometheus-default-db subPath: prometheus-db - mountPath: /etc/prometheus/secrets/default-prometheus-proxy-tls name: secret-default-prometheus-proxy-tls readOnly: true - mountPath: /etc/prometheus/secrets/default-session-secret name: secret-default-session-secret readOnly: true - mountPath: /etc/prometheus/configmaps/serving-certs-ca-bundle name: configmap-serving-certs-ca-bundle readOnly: true - mountPath: /etc/prometheus/rules/prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - mountPath: /etc/prometheus/web_config/web-config.yaml name: web-config readOnly: true subPath: web-config.yaml - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true - args: - --listen-address=localhost:8080 - --reload-url=http://localhost:9090/-/reload - --config-file=/etc/prometheus/config/prometheus.yaml.gz - --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml - --watched-dir=/etc/prometheus/rules/prometheus-default-rulefiles-0 command: - /bin/prometheus-config-reloader env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: SHARD value: "0" image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imagePullPolicy: IfNotPresent name: config-reloader resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/prometheus/config name: config - mountPath: /etc/prometheus/config_out name: config-out - mountPath: /etc/prometheus/rules/prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true - args: - -https-address=:9092 - -tls-cert=/etc/tls/private/tls.crt - -tls-key=/etc/tls/private/tls.key - -upstream=http://localhost:9090/ - -cookie-secret-file=/etc/proxy/secrets/session_secret - -openshift-service-account=prometheus-stf - '-openshift-sar={"namespace":"service-telemetry","resource": "prometheuses", "resourceAPIGroup":"monitoring.rhobs", "verb":"get"}' - '-openshift-delegate-urls={"/":{"namespace":"service-telemetry","resource": "prometheuses", "group":"monitoring.rhobs", "verb":"get"}}' image: quay.io/openshift/origin-oauth-proxy:latest imagePullPolicy: Always name: oauth-proxy ports: - containerPort: 9092 name: https protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/tls/private name: secret-default-prometheus-proxy-tls - mountPath: /etc/proxy/secrets name: secret-default-session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true hostname: prometheus-default-0 imagePullSecrets: - name: prometheus-stf-dockercfg-p6qm4 initContainers: - args: - --watch-interval=0 - --listen-address=:8081 - --config-file=/etc/prometheus/config/prometheus.yaml.gz - --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml - --watched-dir=/etc/prometheus/rules/prometheus-default-rulefiles-0 command: - /bin/prometheus-config-reloader env: - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: SHARD value: "0" image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imagePullPolicy: IfNotPresent name: init-config-reloader ports: - containerPort: 8081 name: reloader-init protocol: TCP resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/prometheus/config name: config - mountPath: /etc/prometheus/config_out name: config-out - mountPath: /etc/prometheus/rules/prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: prometheus-stf serviceAccountName: prometheus-stf shareProcessNamespace: false subdomain: prometheus-operated terminationGracePeriodSeconds: 600 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: prometheus-default-db persistentVolumeClaim: claimName: prometheus-default-db-prometheus-default-0 - name: config secret: defaultMode: 420 secretName: prometheus-default - name: tls-assets projected: defaultMode: 420 sources: - secret: name: prometheus-default-tls-assets-0 - emptyDir: medium: Memory name: config-out - name: secret-default-prometheus-proxy-tls secret: defaultMode: 420 secretName: default-prometheus-proxy-tls - name: secret-default-session-secret secret: defaultMode: 420 secretName: default-session-secret - configMap: defaultMode: 420 name: serving-certs-ca-bundle name: configmap-serving-certs-ca-bundle - configMap: defaultMode: 420 name: prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - name: web-config secret: defaultMode: 420 secretName: prometheus-default-web-config - name: kube-api-access-plr5j projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:44Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:51Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:26Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:58:26Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:38Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://ee81b895036c6012904a6833b72ac5621ad0a9e15989493babe55f30e2423c84 image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imageID: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 lastState: {} name: config-reloader ready: true resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:03Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/prometheus/config name: config - mountPath: /etc/prometheus/config_out name: config-out - mountPath: /etc/prometheus/rules/prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://d347fa72e7ecd1e64dc39248e8ab08b737fbc63513146e89720dfbcf3e2e96ef image: quay.io/openshift/origin-oauth-proxy:latest imageID: quay.io/openshift/origin-oauth-proxy@sha256:c740bf089d5a81db4715efa881fd7f706407de56bf9727bfe47c0d45cfc5834e lastState: {} name: oauth-proxy ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:10Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/tls/private name: secret-default-prometheus-proxy-tls - mountPath: /etc/proxy/secrets name: secret-default-session-secret - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://e92a60bad24bc4c1c9b1c7ed70cfe2f487db1e9b5df9925a0d905ec0c54aa1f6 image: quay.io/prometheus/prometheus:latest imageID: quay.io/prometheus/prometheus@sha256:67abe619da66a6b14cc0e185205f93a29d5b03cedd1aaa06930745b21f95a374 lastState: {} name: prometheus ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:58:01Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/prometheus/config_out name: config-out readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/prometheus/certs name: tls-assets readOnly: true recursiveReadOnly: Disabled - mountPath: /prometheus name: prometheus-default-db - mountPath: /etc/prometheus/secrets/default-prometheus-proxy-tls name: secret-default-prometheus-proxy-tls readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/prometheus/secrets/default-session-secret name: secret-default-session-secret readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/prometheus/configmaps/serving-certs-ca-bundle name: configmap-serving-certs-ca-bundle readOnly: true recursiveReadOnly: Disabled - mountPath: /etc/prometheus/rules/prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - mountPath: /etc/prometheus/web_config/web-config.yaml name: web-config readOnly: true recursiveReadOnly: Disabled - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 initContainerStatuses: - allocatedResources: cpu: 10m memory: 50Mi containerID: cri-o://e6c7573c2cef367549fd3f598e39a14ffec6661e883c00be8596d9edc848c423 image: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 imageID: registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62 lastState: {} name: init-config-reloader ready: true resources: limits: cpu: 10m memory: 50Mi requests: cpu: 10m memory: 50Mi restartCount: 0 started: false state: terminated: containerID: cri-o://e6c7573c2cef367549fd3f598e39a14ffec6661e883c00be8596d9edc848c423 exitCode: 0 finishedAt: "2025-12-08T17:57:51Z" reason: Completed startedAt: "2025-12-08T17:57:44Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/prometheus/config name: config - mountPath: /etc/prometheus/config_out name: config-out - mountPath: /etc/prometheus/rules/prometheus-default-rulefiles-0 name: prometheus-default-rulefiles-0 - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-plr5j readOnly: true recursiveReadOnly: Disabled phase: Running podIP: 10.217.0.74 podIPs: - ip: 10.217.0.74 qosClass: Burstable startTime: "2025-12-08T17:57:38Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.83/23"],"mac_address":"0a:58:0a:d9:00:53","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.83/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.83" ], "mac": "0a:58:0a:d9:00:53", "default": true, "dns": {} }] openshift.io/scc: anyuid security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:59:05Z" generation: 1 labels: qdr: qdr-test name: qdr-test namespace: service-telemetry resourceVersion: "45918" uid: 73a290f7-fdfb-4484-9e5f-e3f80b72dec3 spec: containers: - command: - /usr/sbin/qdrouterd - -c - /etc/qpid-dispatch/qdrouterd.conf image: quay.io/tripleowallabycentos9/openstack-qdrouterd:current-tripleo imagePullPolicy: IfNotPresent name: qdr ports: - containerPort: 5672 name: amqp protocol: TCP resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - MKNOD terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/pki/tls/certs/ name: default-interconnect-selfsigned-cert - mountPath: /etc/qpid-dispatch/ name: qdr-test-config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-6vr9f readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-dockercfg-t7fjv nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: seLinuxOptions: level: s0:c26,c10 serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-interconnect-selfsigned-cert secret: defaultMode: 420 secretName: default-interconnect-selfsigned - configMap: defaultMode: 420 name: qdr-test-config name: qdr-test-config - name: kube-api-access-6vr9f projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:13Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:05Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:13Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:13Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:05Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://4ebfce0c843489025b02d48ca209aeac66c8e0702d0a2aa2fa4cac00352330d3 image: quay.io/tripleowallabycentos9/openstack-qdrouterd:current-tripleo imageID: quay.io/tripleowallabycentos9/openstack-qdrouterd@sha256:79a6221bcd066540937e3d5b2c0fdc3c917ba293f44b3ba85dba269245d33902 lastState: {} name: qdr ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:59:13Z" user: linux: gid: 42465 supplementalGroups: - 42465 - 42400 uid: 42465 volumeMounts: - mountPath: /etc/pki/tls/certs/ name: default-interconnect-selfsigned-cert - mountPath: /etc/qpid-dispatch/ name: qdr-test-config - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-6vr9f readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.83 podIPs: - ip: 10.217.0.83 qosClass: BestEffort startTime: "2025-12-08T17:59:05Z" - apiVersion: v1 kind: Pod metadata: annotations: alm-examples: |- [ { "apiVersion": "infra.watch/v1beta1", "kind": "ServiceTelemetry", "metadata": { "name": "default" }, "spec": { "alerting": { "alertmanager": { "receivers": { "snmpTraps": { "alertOidLabel": "oid", "community": "public", "enabled": false, "port": 162, "retries": 5, "target": "192.168.24.254", "timeout": 1, "trapDefaultOid": "1.3.6.1.4.1.50495.15.1.2.1", "trapDefaultSeverity": "", "trapOidPrefix": "1.3.6.1.4.1.50495.15" } }, "storage": { "persistent": { "pvcStorageRequest": "20G" }, "strategy": "persistent" } }, "enabled": true }, "backends": { "events": { "elasticsearch": { "certificates": { "caCertDuration": "70080h", "endpointCertDuration": "70080h" }, "enabled": false, "forwarding": { "hostUrl": "https://elasticsearch-es-http:9200", "tlsSecretName": "elasticsearch-es-cert", "tlsServerName": "", "useBasicAuth": true, "useTls": true, "userSecretName": "elasticsearch-es-elastic-user" }, "storage": { "persistent": { "pvcStorageRequest": "20Gi" }, "strategy": "persistent" }, "version": "7.16.1" } }, "metrics": { "prometheus": { "enabled": true, "scrapeInterval": "30s", "storage": { "persistent": { "pvcStorageRequest": "20G" }, "retention": "24h", "strategy": "persistent" } } } }, "clouds": [ { "metrics": { "collectors": [ { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 16384, "verbose": false }, "collectorType": "collectd", "debugEnabled": false, "subscriptionAddress": "collectd/cloud1-telemetry" }, { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 16384, "verbose": false }, "collectorType": "ceilometer", "debugEnabled": false, "subscriptionAddress": "anycast/ceilometer/cloud1-metering.sample" }, { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 65535, "verbose": false }, "collectorType": "sensubility", "debugEnabled": false, "subscriptionAddress": "sensubility/cloud1-telemetry" } ] }, "name": "cloud1" } ], "graphing": { "enabled": false, "grafana": { "baseImage": "registry.redhat.io/rhel8/grafana:9", "dashboards": { "enabled": true }, "disableSignoutMenu": false, "ingressEnabled": true } }, "highAvailability": { "enabled": false }, "observabilityStrategy": "use_redhat", "transports": { "qdr": { "auth": "basic", "certificates": { "caCertDuration": "70080h", "endpointCertDuration": "70080h" }, "enabled": true, "web": { "enabled": false } } } } } ] capabilities: Basic Install categories: Monitoring certified: "false" containerImage: quay.io/infrawatch/service-telemetry-operator:latest createdAt: "2025-12-07T22:43:56Z" description: Service Telemetry Framework. Umbrella Operator for instantiating the required dependencies and configuration of various components to build a Service Telemetry platform for telco grade monitoring. features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" features.operators.openshift.io/tls-profiles: "false" features.operators.openshift.io/token-auth-aws: "false" features.operators.openshift.io/token-auth-azure: "false" features.operators.openshift.io/token-auth-gcp: "false" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.71/23"],"mac_address":"0a:58:0a:d9:00:47","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.71/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.71" ], "mac": "0a:58:0a:d9:00:47", "default": true, "dns": {} }] olm.operatorGroup: service-telemetry-operator-group olm.operatorNamespace: service-telemetry olm.targetNamespaces: service-telemetry openshift.io/scc: restricted-v2 operatorframework.io/properties: '{"properties":[{"type":"olm.maxOpenShiftVersion","value":"4.18"},{"type":"olm.constraint","value":{"all":{"constraints":[{"failureMessage":"Package smart-gateway-operator is needed for Service Telemetry Framework","package":{"packageName":"smart-gateway-operator","versionRange":"\u003e=5.0.0"}}]},"failureMessage":"Require Smart Gateway for Service Telemetry Framework"}},{"type":"olm.constraint","value":{"all":{"constraints":[{"failureMessage":"Package amq7-interconnect-operator is needed for data transport with STF","package":{"packageName":"amq7-interconnect-operator","versionRange":"\u003e=1.10.0"}}]},"failureMessage":"Require data transport for Service Telemetry Framework"}},{"type":"olm.package","value":{"packageName":"service-telemetry-operator","version":"1.5.1765147436"}},{"type":"olm.gvk","value":{"group":"infra.watch","kind":"ServiceTelemetry","version":"v1beta1"}}]}' operatorframework.io/suggested-namespace: service-telemetry operators.openshift.io/valid-subscription: '["OpenStack Platform", "Cloud Infrastructure", "Cloud Suite"]' operators.operatorframework.io/builder: operator-sdk-v1.39.2 operators.operatorframework.io/project_layout: unknown repository: https://github.com/infrawatch/service-telemetry-operator seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user support: Red Hat creationTimestamp: "2025-12-08T17:56:41Z" generateName: service-telemetry-operator-79647f8775- generation: 1 labels: name: service-telemetry-operator pod-template-hash: 79647f8775 name: service-telemetry-operator-79647f8775-zs8hl namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: service-telemetry-operator-79647f8775 uid: 588a0d15-7bcb-4166-b153-4f117b4ebc3d resourceVersion: "44608" uid: b4cd1da4-b555-42d4-b09a-38f141ee7dc4 spec: containers: - env: - name: WATCH_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.annotations['olm.targetNamespaces'] - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: OPERATOR_NAME value: service-telemetry-operator - name: ANSIBLE_GATHERING value: explicit - name: RELATED_IMAGE_PROMETHEUS_WEBHOOK_SNMP_IMAGE value: quay.io/infrawatch/prometheus-webhook-snmp:latest - name: RELATED_IMAGE_OAUTH_PROXY_IMAGE value: quay.io/openshift/origin-oauth-proxy:latest - name: RELATED_IMAGE_PROMETHEUS_IMAGE value: quay.io/prometheus/prometheus:latest - name: RELATED_IMAGE_ALERTMANAGER_IMAGE value: quay.io/prometheus/alertmanager:latest - name: OPERATOR_CONDITION_NAME value: service-telemetry-operator.v1.5.1765147436 image: quay.io/infrawatch/service-telemetry-operator:latest imagePullPolicy: Always name: operator resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp/ansible-operator/runner name: runner - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-xj895 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: service-telemetry-operator-dockercfg-tqm5c nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: service-telemetry-operator serviceAccountName: service-telemetry-operator terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: runner - name: kube-api-access-xj895 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:06Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:41Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:06Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:06Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:41Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://3a21cd1b2f501351e58b3781f2bed15afd0ea184615e7ddb1e5be0235b1a7775 image: quay.io/infrawatch/service-telemetry-operator:latest imageID: quay.io/infrawatch/service-telemetry-operator@sha256:a75f7173968326a393ea8b8cde0a30ff783a83cfad8c59b3b1c72d28acf0a45d lastState: {} name: operator ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:57:06Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp/ansible-operator/runner name: runner - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-xj895 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.71 podIPs: - ip: 10.217.0.71 qosClass: BestEffort startTime: "2025-12-08T17:56:41Z" - apiVersion: v1 kind: Pod metadata: annotations: alm-examples: |- [ { "apiVersion": "smartgateway.infra.watch/v2", "kind": "SmartGateway", "metadata": { "name": "demo" }, "spec": { "applications": [ { "config": "host: localhost\nport: 8081\nwithtimestamp: false\n", "name": "prometheus" } ], "blockEventBus": false, "bridge": { "amqpBlock": false, "amqpUrl": "amqp://amq-interconnect:5672/collectd", "enabled": true, "ringBufferCount": 15000, "ringBufferSize": 2048, "socketBlock": true, "statsPeriod": 60, "stopCount": 0, "unixSocketPath": "/tmp", "verbose": true }, "handleErrors": false, "logLevel": "info", "pluginDir": "/usr/lib64/sg-core", "services": [ { "name": "prometheus", "ports": [ { "name": "prom-http", "port": 8081, "protocol": "TCP", "targetPort": 8081 } ] } ], "size": 1, "transports": [ { "config": "path: /tmp/smartgateway\n", "handlers": [ { "config": "", "name": "collectd-metrics" } ], "name": "socket" } ] } } ] capabilities: Basic Install categories: Monitoring certified: "false" containerImage: quay.io/infrawatch/smart-gateway-operator:latest createdAt: "2025-12-07T22:43:54Z" description: Operator for managing the Smart Gateway Custom Resources, resulting in deployments of the Smart Gateway. features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" features.operators.openshift.io/tls-profiles: "false" features.operators.openshift.io/token-auth-aws: "false" features.operators.openshift.io/token-auth-azure: "false" features.operators.openshift.io/token-auth-gcp: "false" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.72/23"],"mac_address":"0a:58:0a:d9:00:48","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.72/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.72" ], "mac": "0a:58:0a:d9:00:48", "default": true, "dns": {} }] olm.operatorGroup: service-telemetry-operator-group olm.operatorNamespace: service-telemetry olm.targetNamespaces: service-telemetry openshift.io/scc: restricted-v2 operatorframework.io/properties: '{"properties":[{"type":"olm.maxOpenShiftVersion","value":"4.18"},{"type":"olm.package","value":{"packageName":"smart-gateway-operator","version":"5.0.1765147433"}},{"type":"olm.gvk","value":{"group":"smartgateway.infra.watch","kind":"SmartGateway","version":"v2"}}]}' operators.openshift.io/valid-subscription: '["OpenStack Platform", "Cloud Infrastructure", "Cloud Suite"]' operators.operatorframework.io/builder: operator-sdk-v1.39.2 operators.operatorframework.io/project_layout: unknown repository: https://github.com/infrawatch/smart-gateway-operator seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user support: Red Hat creationTimestamp: "2025-12-08T17:56:42Z" generateName: smart-gateway-operator-5cd794ff55- generation: 1 labels: app: smart-gateway-operator pod-template-hash: 5cd794ff55 name: smart-gateway-operator-5cd794ff55-w8r45 namespace: service-telemetry ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: smart-gateway-operator-5cd794ff55 uid: d49a7f1a-145e-4471-8189-ce3068fa25e7 resourceVersion: "44660" uid: 88186169-23e9-44fb-a70c-0f6fe06b2800 spec: containers: - env: - name: WATCH_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.annotations['olm.targetNamespaces'] - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: OPERATOR_NAME value: smart-gateway-operator - name: ANSIBLE_GATHERING value: explicit - name: ANSIBLE_VERBOSITY_SMARTGATEWAY_SMARTGATEWAY_INFRA_WATCH value: "4" - name: ANSIBLE_DEBUG_LOGS value: "true" - name: RELATED_IMAGE_CORE_SMARTGATEWAY_IMAGE value: quay.io/infrawatch/sg-core:latest - name: RELATED_IMAGE_BRIDGE_SMARTGATEWAY_IMAGE value: quay.io/infrawatch/sg-bridge:latest - name: RELATED_IMAGE_OAUTH_PROXY_IMAGE value: quay.io/openshift/origin-oauth-proxy:latest - name: OPERATOR_CONDITION_NAME value: smart-gateway-operator.v5.0.1765147433 image: quay.io/infrawatch/smart-gateway-operator:latest imagePullPolicy: Always name: operator resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /tmp/ansible-operator/runner name: runner - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ttxjn readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: smart-gateway-operator-dockercfg-7jw7l nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: smart-gateway-operator serviceAccountName: smart-gateway-operator terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - emptyDir: {} name: runner - name: kube-api-access-ttxjn projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:07Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:42Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:07Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:57:07Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:56:42Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://018ca33c45bb426da7c629b89f77b64ff95ca602ae0cf4b3e1436dbf94487999 image: quay.io/infrawatch/smart-gateway-operator:latest imageID: quay.io/infrawatch/smart-gateway-operator@sha256:23680628fbfd934de997267991c4529edf489c2385f220baa3e0cd51655468b1 lastState: {} name: operator ready: true resources: {} restartCount: 0 started: true state: running: startedAt: "2025-12-08T17:57:06Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /tmp/ansible-operator/runner name: runner - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-ttxjn readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Running podIP: 10.217.0.72 podIPs: - ip: 10.217.0.72 qosClass: BestEffort startTime: "2025-12-08T17:56:42Z" - apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.84/23"],"mac_address":"0a:58:0a:d9:00:54","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.84/23","gateway_ip":"10.217.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.84" ], "mac": "0a:58:0a:d9:00:54", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user creationTimestamp: "2025-12-08T17:59:14Z" generateName: stf-smoketest-smoke1- generation: 1 labels: app: stf-smoketest batch.kubernetes.io/controller-uid: 6d15069a-7ade-4308-8933-707b8f1ca4e3 batch.kubernetes.io/job-name: stf-smoketest-smoke1 controller-uid: 6d15069a-7ade-4308-8933-707b8f1ca4e3 job-name: stf-smoketest-smoke1 name: stf-smoketest-smoke1-pbhxq namespace: service-telemetry ownerReferences: - apiVersion: batch/v1 blockOwnerDeletion: true controller: true kind: Job name: stf-smoketest-smoke1 uid: 6d15069a-7ade-4308-8933-707b8f1ca4e3 resourceVersion: "46146" uid: 612790c4-c2da-4318-89f8-c7745da26ece spec: containers: - command: - /smoketest_collectd_entrypoint.sh env: - name: CLOUDNAME value: smoke1 - name: ELASTICSEARCH_AUTH_PASS value: Vi9VYYssS8Rt2DltbKq6JXZC - name: PROMETHEUS_AUTH_TOKEN value: eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM0MCwiaWF0IjoxNzY1MjE2NzQwLCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiI2ZWY1YWIwNi03YjhiLTQ4OGItODkwYi02ZDM0Y2Y5YzVhMDYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InN0Zi1wcm9tZXRoZXVzLXJlYWRlciIsInVpZCI6ImI0MDQ2N2U4LWE0OWQtNGQxOC04Y2Y4LTIxNjIwMjRkYTQ1YSJ9fSwibmJmIjoxNzY1MjE2NzQwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6c2VydmljZS10ZWxlbWV0cnk6c3RmLXByb21ldGhldXMtcmVhZGVyIn0.O5UEY8wJ0JBFa4c4plOcpEkjisII8ssj1wcszQiMZfrph5ajeEV182dvVyEH14huoCZlDjjnwWlmZv2b8SRnMU6QAw5U7MQqXu4Zo7Ay2Al7Dec7pXWMPgkjEz-vDBP7ay-CN9ENZ4U2FHqKi4FER95_n5NdCPDuoMMT8gImOAZUTyrC7FGNN_BvMhlhW0BLawA8jYs2RCkyakVnVgn57G0QBOywE-pMAoajnq8ycH2GUG2KckzBq8b38aOETug8uVr4KO55i5rDaCn0UK1Qi322UZBaIm3dDcKqh0yUX3uzBAX_tByhmXxSNyRhb2kgWTxgKjySQdvpGc7kErLSwtAG5pq8Sy5wgm8sEix1_OuGQcrQK9hG6nluPCbn-ifLUlnkpK5SilWirEJp7tEWCj8aRCJmmyw_A0P6UcU2nSqhv9fdkON-veTeGSygpxSqwEkfAATxNi3A2RGufm6ZZm0r1c34zC66unHx4W1FUk9KlO89cGGybyQh6bQfgrMpkiGYtC_0807JnrIOwd4a7Zq3EVScFfkd8k1UIfq-tCjrkLI0fg3Dhew9U1n9RTA4L53lSZVIvVWAHbYZ4fhfH8Z0fMMBhrcX6XOGuGyYBD8jwrUeB8PknLRt5z03q_rZ0Vd6GGNQV2kr8YW5GSVQOXVK040gkL9GF5sUZADEU2A - name: OBSERVABILITY_STRATEGY value: <> image: quay.io/tripleomastercentos9/openstack-collectd:current-tripleo imagePullPolicy: IfNotPresent name: smoketest-collectd resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/minimal-collectd.conf.template name: collectd-config subPath: minimal-collectd.conf.template - mountPath: /etc/collectd-sensubility.conf name: sensubility-config subPath: collectd-sensubility.conf - mountPath: /healthcheck.log name: healthcheck-log subPath: healthcheck.log - mountPath: /smoketest_collectd_entrypoint.sh name: collectd-entrypoint-script subPath: smoketest_collectd_entrypoint.sh - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9xng2 readOnly: true - command: - /smoketest_ceilometer_entrypoint.sh env: - name: CLOUDNAME value: smoke1 - name: ELASTICSEARCH_AUTH_PASS value: Vi9VYYssS8Rt2DltbKq6JXZC - name: PROMETHEUS_AUTH_TOKEN value: eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM0MCwiaWF0IjoxNzY1MjE2NzQwLCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiI2ZWY1YWIwNi03YjhiLTQ4OGItODkwYi02ZDM0Y2Y5YzVhMDYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InN0Zi1wcm9tZXRoZXVzLXJlYWRlciIsInVpZCI6ImI0MDQ2N2U4LWE0OWQtNGQxOC04Y2Y4LTIxNjIwMjRkYTQ1YSJ9fSwibmJmIjoxNzY1MjE2NzQwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6c2VydmljZS10ZWxlbWV0cnk6c3RmLXByb21ldGhldXMtcmVhZGVyIn0.O5UEY8wJ0JBFa4c4plOcpEkjisII8ssj1wcszQiMZfrph5ajeEV182dvVyEH14huoCZlDjjnwWlmZv2b8SRnMU6QAw5U7MQqXu4Zo7Ay2Al7Dec7pXWMPgkjEz-vDBP7ay-CN9ENZ4U2FHqKi4FER95_n5NdCPDuoMMT8gImOAZUTyrC7FGNN_BvMhlhW0BLawA8jYs2RCkyakVnVgn57G0QBOywE-pMAoajnq8ycH2GUG2KckzBq8b38aOETug8uVr4KO55i5rDaCn0UK1Qi322UZBaIm3dDcKqh0yUX3uzBAX_tByhmXxSNyRhb2kgWTxgKjySQdvpGc7kErLSwtAG5pq8Sy5wgm8sEix1_OuGQcrQK9hG6nluPCbn-ifLUlnkpK5SilWirEJp7tEWCj8aRCJmmyw_A0P6UcU2nSqhv9fdkON-veTeGSygpxSqwEkfAATxNi3A2RGufm6ZZm0r1c34zC66unHx4W1FUk9KlO89cGGybyQh6bQfgrMpkiGYtC_0807JnrIOwd4a7Zq3EVScFfkd8k1UIfq-tCjrkLI0fg3Dhew9U1n9RTA4L53lSZVIvVWAHbYZ4fhfH8Z0fMMBhrcX6XOGuGyYBD8jwrUeB8PknLRt5z03q_rZ0Vd6GGNQV2kr8YW5GSVQOXVK040gkL9GF5sUZADEU2A - name: OBSERVABILITY_STRATEGY value: <> image: quay.io/tripleomastercentos9/openstack-ceilometer-notification:current-tripleo imagePullPolicy: IfNotPresent name: smoketest-ceilometer resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 1000670000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /ceilometer_publish.py name: ceilometer-publisher subPath: ceilometer_publish.py - mountPath: /smoketest_ceilometer_entrypoint.sh name: ceilometer-entrypoint-script subPath: smoketest_ceilometer_entrypoint.sh - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9xng2 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: default-dockercfg-t7fjv nodeName: crc preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Never schedulerName: default-scheduler securityContext: fsGroup: 1000670000 seLinuxOptions: level: s0:c26,c10 seccompProfile: type: RuntimeDefault serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - configMap: defaultMode: 420 name: stf-smoketest-collectd-config name: collectd-config - configMap: defaultMode: 420 name: stf-smoketest-sensubility-config name: sensubility-config - configMap: defaultMode: 420 name: stf-smoketest-healthcheck-log name: healthcheck-log - configMap: defaultMode: 365 name: stf-smoketest-collectd-entrypoint-script name: collectd-entrypoint-script - configMap: defaultMode: 365 name: stf-smoketest-ceilometer-entrypoint-script name: ceilometer-entrypoint-script - configMap: defaultMode: 365 name: stf-smoketest-ceilometer-publisher name: ceilometer-publisher - name: kube-api-access-9xng2 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-12-08T18:00:03Z" status: "False" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:14Z" reason: PodCompleted status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:59Z" reason: PodCompleted status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:59Z" reason: PodCompleted status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-12-08T17:59:14Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://ae6ee93a5a6d6a767e3bbb450044418d804b86e821540a775c6b84a6df04014f image: quay.io/tripleomastercentos9/openstack-ceilometer-notification:current-tripleo imageID: quay.io/tripleomastercentos9/openstack-ceilometer-notification@sha256:73886b5616a348d6cee1766765bef1d7ce4c0de2f02c51fe7b68c2ed45c5cb4d lastState: {} name: smoketest-ceilometer ready: false resources: {} restartCount: 0 started: false state: terminated: containerID: cri-o://ae6ee93a5a6d6a767e3bbb450044418d804b86e821540a775c6b84a6df04014f exitCode: 0 finishedAt: "2025-12-08T18:00:01Z" reason: Completed startedAt: "2025-12-08T17:59:30Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /ceilometer_publish.py name: ceilometer-publisher - mountPath: /smoketest_ceilometer_entrypoint.sh name: ceilometer-entrypoint-script - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9xng2 readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://49a2d94e35dff7bd89d4adda86a9bb2a1c75c043a28e9dd9185f028d19dcc6a8 image: quay.io/tripleomastercentos9/openstack-collectd:current-tripleo imageID: quay.io/tripleomastercentos9/openstack-collectd@sha256:a6550a778495d874a71cb26f104e1bea9ac348091e9edc4ae12229c86ff8b72e lastState: {} name: smoketest-collectd ready: false resources: {} restartCount: 0 started: false state: terminated: containerID: cri-o://49a2d94e35dff7bd89d4adda86a9bb2a1c75c043a28e9dd9185f028d19dcc6a8 exitCode: 0 finishedAt: "2025-12-08T17:59:58Z" reason: Completed startedAt: "2025-12-08T17:59:24Z" user: linux: gid: 0 supplementalGroups: - 0 - 1000670000 uid: 1000670000 volumeMounts: - mountPath: /etc/minimal-collectd.conf.template name: collectd-config - mountPath: /etc/collectd-sensubility.conf name: sensubility-config - mountPath: /healthcheck.log name: healthcheck-log - mountPath: /smoketest_collectd_entrypoint.sh name: collectd-entrypoint-script - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-9xng2 readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.126.11 hostIPs: - ip: 192.168.126.11 phase: Succeeded podIP: 10.217.0.84 podIPs: - ip: 10.217.0.84 qosClass: BestEffort startTime: "2025-12-08T17:59:14Z" kind: List metadata: resourceVersion: "" home/zuul/zuul-output/logs/controller/post_oc_describe_subscriptions_STO.log0000644000175000017500000000613015115611241026773 0ustar zuulzuulName: service-telemetry-operator Namespace: service-telemetry Labels: operators.coreos.com/service-telemetry-operator.service-telemetry= Annotations: API Version: operators.coreos.com/v1alpha1 Kind: Subscription Metadata: Creation Timestamp: 2025-12-08T17:56:08Z Generation: 1 Resource Version: 44702 UID: abe3736a-da7e-4bf3-9140-6b97576f80aa Spec: Channel: unstable Install Plan Approval: Automatic Name: service-telemetry-operator Source: infrawatch-operators Source Namespace: service-telemetry Status: Catalog Health: Catalog Source Ref: API Version: operators.coreos.com/v1alpha1 Kind: CatalogSource Name: certified-operators Namespace: openshift-marketplace Resource Version: 40638 UID: 04c2c69e-a9e9-447b-aff2-c2db7de0ee83 Healthy: true Last Updated: 2025-12-08T17:57:09Z Catalog Source Ref: API Version: operators.coreos.com/v1alpha1 Kind: CatalogSource Name: community-operators Namespace: openshift-marketplace Resource Version: 40568 UID: 88a656bd-c52a-4813-892e-7e3363ba9ac0 Healthy: true Last Updated: 2025-12-08T17:57:09Z Catalog Source Ref: API Version: operators.coreos.com/v1alpha1 Kind: CatalogSource Name: redhat-operators Namespace: openshift-marketplace Resource Version: 43686 UID: ca744265-3ae3-4482-8c3d-b10e28fe1042 Healthy: true Last Updated: 2025-12-08T17:57:09Z Catalog Source Ref: API Version: operators.coreos.com/v1alpha1 Kind: CatalogSource Name: infrawatch-operators Namespace: service-telemetry Resource Version: 43692 UID: c3af8943-d5c5-4768-9a5d-ec7e7c876a75 Healthy: true Last Updated: 2025-12-08T17:57:09Z Conditions: Last Transition Time: 2025-12-08T17:57:12Z Message: all available catalogsources are healthy Reason: AllCatalogSourcesHealthy Status: False Type: CatalogSourcesUnhealthy Current CSV: service-telemetry-operator.v1.5.1765147436 Install Plan Generation: 2 Install Plan Ref: API Version: operators.coreos.com/v1alpha1 Kind: InstallPlan Name: install-s8bl7 Namespace: service-telemetry Resource Version: 43727 UID: 46f43d7a-498c-4074-b276-c969ca1ef029 Installed CSV: service-telemetry-operator.v1.5.1765147436 Installplan: API Version: operators.coreos.com/v1alpha1 Kind: InstallPlan Name: install-s8bl7 Uuid: 46f43d7a-498c-4074-b276-c969ca1ef029 Last Updated: 2025-12-08T17:57:12Z State: AtLatestKnown Events: home/zuul/zuul-output/logs/controller/describe_sto.log0000644000175000017500000003227715115611242022432 0ustar zuulzuulName: service-telemetry-operator-79647f8775-zs8hl Namespace: service-telemetry Priority: 0 Service Account: service-telemetry-operator Node: crc/192.168.126.11 Start Time: Mon, 08 Dec 2025 17:56:41 +0000 Labels: name=service-telemetry-operator pod-template-hash=79647f8775 Annotations: alm-examples: [ { "apiVersion": "infra.watch/v1beta1", "kind": "ServiceTelemetry", "metadata": { "name": "default" }, "spec": { "alerting": { "alertmanager": { "receivers": { "snmpTraps": { "alertOidLabel": "oid", "community": "public", "enabled": false, "port": 162, "retries": 5, "target": "192.168.24.254", "timeout": 1, "trapDefaultOid": "1.3.6.1.4.1.50495.15.1.2.1", "trapDefaultSeverity": "", "trapOidPrefix": "1.3.6.1.4.1.50495.15" } }, "storage": { "persistent": { "pvcStorageRequest": "20G" }, "strategy": "persistent" } }, "enabled": true }, "backends": { "events": { "elasticsearch": { "certificates": { "caCertDuration": "70080h", "endpointCertDuration": "70080h" }, "enabled": false, "forwarding": { "hostUrl": "https://elasticsearch-es-http:9200", "tlsSecretName": "elasticsearch-es-cert", "tlsServerName": "", "useBasicAuth": true, "useTls": true, "userSecretName": "elasticsearch-es-elastic-user" }, "storage": { "persistent": { "pvcStorageRequest": "20Gi" }, "strategy": "persistent" }, "version": "7.16.1" } }, "metrics": { "prometheus": { "enabled": true, "scrapeInterval": "30s", "storage": { "persistent": { "pvcStorageRequest": "20G" }, "retention": "24h", "strategy": "persistent" } } } }, "clouds": [ { "metrics": { "collectors": [ { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 16384, "verbose": false }, "collectorType": "collectd", "debugEnabled": false, "subscriptionAddress": "collectd/cloud1-telemetry" }, { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 16384, "verbose": false }, "collectorType": "ceilometer", "debugEnabled": false, "subscriptionAddress": "anycast/ceilometer/cloud1-metering.sample" }, { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 65535, "verbose": false }, "collectorType": "sensubility", "debugEnabled": false, "subscriptionAddress": "sensubility/cloud1-telemetry" } ] }, "name": "cloud1" } ], "graphing": { "enabled": false, "grafana": { "baseImage": "registry.redhat.io/rhel8/grafana:9", "dashboards": { "enabled": true }, "disableSignoutMenu": false, "ingressEnabled": true } }, "highAvailability": { "enabled": false }, "observabilityStrategy": "use_redhat", "transports": { "qdr": { "auth": "basic", "certificates": { "caCertDuration": "70080h", "endpointCertDuration": "70080h" }, "enabled": true, "web": { "enabled": false } } } } } ] capabilities: Basic Install categories: Monitoring certified: false containerImage: quay.io/infrawatch/service-telemetry-operator:latest createdAt: 2025-12-07T22:43:56Z description: Service Telemetry Framework. Umbrella Operator for instantiating the required dependencies and configuration of various components to buil... features.operators.openshift.io/cnf: false features.operators.openshift.io/cni: false features.operators.openshift.io/csi: false features.operators.openshift.io/disconnected: true features.operators.openshift.io/fips-compliant: false features.operators.openshift.io/proxy-aware: false features.operators.openshift.io/tls-profiles: false features.operators.openshift.io/token-auth-aws: false features.operators.openshift.io/token-auth-azure: false features.operators.openshift.io/token-auth-gcp: false k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.217.0.71/23"],"mac_address":"0a:58:0a:d9:00:47","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.71" ], "mac": "0a:58:0a:d9:00:47", "default": true, "dns": {} }] olm.operatorGroup: service-telemetry-operator-group olm.operatorNamespace: service-telemetry olm.targetNamespaces: service-telemetry openshift.io/scc: restricted-v2 operatorframework.io/properties: {"properties":[{"type":"olm.maxOpenShiftVersion","value":"4.18"},{"type":"olm.constraint","value":{"all":{"constraints":[{"failureMessage"... operatorframework.io/suggested-namespace: service-telemetry operators.openshift.io/valid-subscription: ["OpenStack Platform", "Cloud Infrastructure", "Cloud Suite"] operators.operatorframework.io/builder: operator-sdk-v1.39.2 operators.operatorframework.io/project_layout: unknown repository: https://github.com/infrawatch/service-telemetry-operator seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user support: Red Hat Status: Running SeccompProfile: RuntimeDefault IP: 10.217.0.71 IPs: IP: 10.217.0.71 Controlled By: ReplicaSet/service-telemetry-operator-79647f8775 Containers: operator: Container ID: cri-o://3a21cd1b2f501351e58b3781f2bed15afd0ea184615e7ddb1e5be0235b1a7775 Image: quay.io/infrawatch/service-telemetry-operator:latest Image ID: quay.io/infrawatch/service-telemetry-operator@sha256:a75f7173968326a393ea8b8cde0a30ff783a83cfad8c59b3b1c72d28acf0a45d Port: Host Port: State: Running Started: Mon, 08 Dec 2025 17:57:06 +0000 Ready: True Restart Count: 0 Environment: WATCH_NAMESPACE: (v1:metadata.annotations['olm.targetNamespaces']) POD_NAME: service-telemetry-operator-79647f8775-zs8hl (v1:metadata.name) OPERATOR_NAME: service-telemetry-operator ANSIBLE_GATHERING: explicit RELATED_IMAGE_PROMETHEUS_WEBHOOK_SNMP_IMAGE: quay.io/infrawatch/prometheus-webhook-snmp:latest RELATED_IMAGE_OAUTH_PROXY_IMAGE: quay.io/openshift/origin-oauth-proxy:latest RELATED_IMAGE_PROMETHEUS_IMAGE: quay.io/prometheus/prometheus:latest RELATED_IMAGE_ALERTMANAGER_IMAGE: quay.io/prometheus/alertmanager:latest OPERATOR_CONDITION_NAME: service-telemetry-operator.v1.5.1765147436 Mounts: /tmp/ansible-operator/runner from runner (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-xj895 (ro) Conditions: Type Status PodReadyToStartContainers True Initialized True Ready True ContainersReady True PodScheduled True Volumes: runner: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: kube-api-access-xj895: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt Optional: false DownwardAPI: true ConfigMapName: openshift-service-ca.crt Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 5m29s default-scheduler Successfully assigned service-telemetry/service-telemetry-operator-79647f8775-zs8hl to crc Normal AddedInterface 5m29s multus Add eth0 [10.217.0.71/23] from ovn-kubernetes Normal Pulling 5m29s kubelet Pulling image "quay.io/infrawatch/service-telemetry-operator:latest" Normal Pulled 5m4s kubelet Successfully pulled image "quay.io/infrawatch/service-telemetry-operator:latest" in 24.494s (24.494s including waiting). Image size: 820567468 bytes. Normal Created 5m4s kubelet Created container: operator Normal Started 5m4s kubelet Started container operator home/zuul/zuul-output/logs/controller/post_question_deployment.log0000644000175000017500000005661615115611243025145 0ustar zuulzuulWhat images were created in the internal registry? What state is the STO csv in? service-telemetry-operator.v1.5.1765147436 Service Telemetry Operator 1.5.1765147436 Succeeded apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: annotations: alm-examples: |- [ { "apiVersion": "infra.watch/v1beta1", "kind": "ServiceTelemetry", "metadata": { "name": "default" }, "spec": { "alerting": { "alertmanager": { "receivers": { "snmpTraps": { "alertOidLabel": "oid", "community": "public", "enabled": false, "port": 162, "retries": 5, "target": "192.168.24.254", "timeout": 1, "trapDefaultOid": "1.3.6.1.4.1.50495.15.1.2.1", "trapDefaultSeverity": "", "trapOidPrefix": "1.3.6.1.4.1.50495.15" } }, "storage": { "persistent": { "pvcStorageRequest": "20G" }, "strategy": "persistent" } }, "enabled": true }, "backends": { "events": { "elasticsearch": { "certificates": { "caCertDuration": "70080h", "endpointCertDuration": "70080h" }, "enabled": false, "forwarding": { "hostUrl": "https://elasticsearch-es-http:9200", "tlsSecretName": "elasticsearch-es-cert", "tlsServerName": "", "useBasicAuth": true, "useTls": true, "userSecretName": "elasticsearch-es-elastic-user" }, "storage": { "persistent": { "pvcStorageRequest": "20Gi" }, "strategy": "persistent" }, "version": "7.16.1" } }, "metrics": { "prometheus": { "enabled": true, "scrapeInterval": "30s", "storage": { "persistent": { "pvcStorageRequest": "20G" }, "retention": "24h", "strategy": "persistent" } } } }, "clouds": [ { "metrics": { "collectors": [ { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 16384, "verbose": false }, "collectorType": "collectd", "debugEnabled": false, "subscriptionAddress": "collectd/cloud1-telemetry" }, { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 16384, "verbose": false }, "collectorType": "ceilometer", "debugEnabled": false, "subscriptionAddress": "anycast/ceilometer/cloud1-metering.sample" }, { "bridge": { "ringBufferCount": 15000, "ringBufferSize": 65535, "verbose": false }, "collectorType": "sensubility", "debugEnabled": false, "subscriptionAddress": "sensubility/cloud1-telemetry" } ] }, "name": "cloud1" } ], "graphing": { "enabled": false, "grafana": { "baseImage": "registry.redhat.io/rhel8/grafana:9", "dashboards": { "enabled": true }, "disableSignoutMenu": false, "ingressEnabled": true } }, "highAvailability": { "enabled": false }, "observabilityStrategy": "use_redhat", "transports": { "qdr": { "auth": "basic", "certificates": { "caCertDuration": "70080h", "endpointCertDuration": "70080h" }, "enabled": true, "web": { "enabled": false } } } } } ] capabilities: Basic Install categories: Monitoring certified: "false" containerImage: quay.io/infrawatch/service-telemetry-operator:latest createdAt: "2025-12-07T22:43:56Z" description: Service Telemetry Framework. Umbrella Operator for instantiating the required dependencies and configuration of various components to build a Service Telemetry platform for telco grade monitoring. features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" features.operators.openshift.io/tls-profiles: "false" features.operators.openshift.io/token-auth-aws: "false" features.operators.openshift.io/token-auth-azure: "false" features.operators.openshift.io/token-auth-gcp: "false" olm.operatorGroup: service-telemetry-operator-group olm.operatorNamespace: service-telemetry olm.targetNamespaces: service-telemetry operatorframework.io/properties: '{"properties":[{"type":"olm.maxOpenShiftVersion","value":"4.18"},{"type":"olm.constraint","value":{"all":{"constraints":[{"failureMessage":"Package smart-gateway-operator is needed for Service Telemetry Framework","package":{"packageName":"smart-gateway-operator","versionRange":"\u003e=5.0.0"}}]},"failureMessage":"Require Smart Gateway for Service Telemetry Framework"}},{"type":"olm.constraint","value":{"all":{"constraints":[{"failureMessage":"Package amq7-interconnect-operator is needed for data transport with STF","package":{"packageName":"amq7-interconnect-operator","versionRange":"\u003e=1.10.0"}}]},"failureMessage":"Require data transport for Service Telemetry Framework"}},{"type":"olm.package","value":{"packageName":"service-telemetry-operator","version":"1.5.1765147436"}},{"type":"olm.gvk","value":{"group":"infra.watch","kind":"ServiceTelemetry","version":"v1beta1"}}]}' operatorframework.io/suggested-namespace: service-telemetry operators.openshift.io/valid-subscription: '["OpenStack Platform", "Cloud Infrastructure", "Cloud Suite"]' operators.operatorframework.io/builder: operator-sdk-v1.39.2 operators.operatorframework.io/project_layout: unknown repository: https://github.com/infrawatch/service-telemetry-operator support: Red Hat creationTimestamp: "2025-12-08T17:56:39Z" finalizers: - operators.coreos.com/csv-cleanup generation: 1 labels: olm.managed: "true" operators.coreos.com/service-telemetry-operator.service-telemetry: "" name: service-telemetry-operator.v1.5.1765147436 namespace: service-telemetry resourceVersion: "44620" uid: 768e4f34-61bc-41d7-a3c5-f46daeb2b4b8 spec: apiservicedefinitions: {} cleanup: enabled: false customresourcedefinitions: owned: - description: Represents an instance of the Service Telemetry Framework displayName: STF Cluster kind: ServiceTelemetry name: servicetelemetrys.infra.watch resources: - kind: Pods name: "" version: v1 - kind: ConfigMaps name: "" version: v1 - kind: ServiceTelemetrys name: servicetelemetrys.infra.watch version: v1beta1 - kind: ReplicaSets name: "" version: v1 - kind: Deployments name: "" version: v1 - kind: Services name: "" version: v1 - kind: ServiceMonitors name: servicemonitors.monitoring.coreos.com version: v1 - kind: ScrapeConfigs name: scrapeconfigs.monitoring.coreos.com version: v1alpha1 - kind: ServiceMonitors name: servicemonitors.monitoring.rhobs version: v1 - kind: ScrapeConfigs name: scrapeconfigs.monitoring.rhobs version: v1alpha1 version: v1beta1 description: Service Telemetry Operator for monitoring clouds displayName: Service Telemetry Operator icon: - base64data: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+CjxzdmcKICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICB4bWxuczpjYz0iaHR0cDovL2NyZWF0aXZlY29tbW9ucy5vcmcvbnMjIgogICB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiCiAgIHhtbG5zOnN2Zz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciCiAgIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIKICAgeG1sbnM6c29kaXBvZGk9Imh0dHA6Ly9zb2RpcG9kaS5zb3VyY2Vmb3JnZS5uZXQvRFREL3NvZGlwb2RpLTAuZHRkIgogICB4bWxuczppbmtzY2FwZT0iaHR0cDovL3d3dy5pbmtzY2FwZS5vcmcvbmFtZXNwYWNlcy9pbmtzY2FwZSIKICAgaWQ9Ikljb25zIgogICB2aWV3Qm94PSIwIDAgMTI4IDEyOCIKICAgdmVyc2lvbj0iMS4xIgogICBzb2RpcG9kaTpkb2NuYW1lPSJJY29uLVJlZF9IYXQtRGlhZ3JhbXMtR3JhcGhfQXJyb3dfVXAtQi1CbGFjay1SR0Iuc3ZnIgogICB3aWR0aD0iMTI4IgogICBoZWlnaHQ9IjEyOCIKICAgaW5rc2NhcGU6dmVyc2lvbj0iMS4wYmV0YTIgKHVua25vd24pIj4KICA8bWV0YWRhdGEKICAgICBpZD0ibWV0YWRhdGE2NiI+CiAgICA8cmRmOlJERj4KICAgICAgPGNjOldvcmsKICAgICAgICAgcmRmOmFib3V0PSIiPgogICAgICAgIDxkYzpmb3JtYXQ+aW1hZ2Uvc3ZnK3htbDwvZGM6Zm9ybWF0PgogICAgICAgIDxkYzp0eXBlCiAgICAgICAgICAgcmRmOnJlc291cmNlPSJodHRwOi8vcHVybC5vcmcvZGMvZGNtaXR5cGUvU3RpbGxJbWFnZSIgLz4KICAgICAgICA8ZGM6dGl0bGU+SWNvbi1SZWRfSGF0LURpYWdyYW1zLUdyYXBoX0Fycm93X1VwXzEtQi1CbGFjay1SR0I8L2RjOnRpdGxlPgogICAgICA8L2NjOldvcms+CiAgICA8L3JkZjpSREY+CiAgPC9tZXRhZGF0YT4KICA8ZGVmcwogICAgIGlkPSJkZWZzNjQiIC8+CiAgPHNvZGlwb2RpOm5hbWVkdmlldwogICAgIHBhZ2Vjb2xvcj0iI2ZmZmZmZiIKICAgICBib3JkZXJjb2xvcj0iIzY2NjY2NiIKICAgICBpbmtzY2FwZTpkb2N1bWVudC1yb3RhdGlvbj0iMCIKICAgICBib3JkZXJvcGFjaXR5PSIxIgogICAgIG9iamVjdHRvbGVyYW5jZT0iMTAiCiAgICAgZ3JpZHRvbGVyYW5jZT0iMTAiCiAgICAgZ3VpZGV0b2xlcmFuY2U9IjEwIgogICAgIGlua3NjYXBlOnBhZ2VvcGFjaXR5PSIwIgogICAgIGlua3NjYXBlOnBhZ2VzaGFkb3c9IjIiCiAgICAgaW5rc2NhcGU6d2luZG93LXdpZHRoPSIxOTIwIgogICAgIGlua3NjYXBlOndpbmRvdy1oZWlnaHQ9IjEwNjIiCiAgICAgaWQ9Im5hbWVkdmlldzYyIgogICAgIHNob3dncmlkPSJmYWxzZSIKICAgICBpbmtzY2FwZTp6b29tPSI0LjM4MDEzMzciCiAgICAgaW5rc2NhcGU6Y3g9IjkwLjc4Njg1IgogICAgIGlua3NjYXBlOmN5PSI1OS42NDgxNDMiCiAgICAgaW5rc2NhcGU6d2luZG93LXg9IjM4NDAiCiAgICAgaW5rc2NhcGU6d2luZG93LXk9IjE4IgogICAgIGlua3NjYXBlOndpbmRvdy1tYXhpbWl6ZWQ9IjAiCiAgICAgaW5rc2NhcGU6Y3VycmVudC1sYXllcj0iSWNvbnMiIC8+CiAgPHRpdGxlCiAgICAgaWQ9InRpdGxlNTciPkljb24tUmVkX0hhdC1EaWFncmFtcy1HcmFwaF9BcnJvd19VcF8xLUItQmxhY2stUkdCPC90aXRsZT4KICA8cGF0aAogICAgIHN0eWxlPSJzdHJva2Utd2lkdGg6NC44MjU2NiIKICAgICBkPSJtIDEyNi44MTUzNiwyOS42MTY4NzUgYSAzLjYxOTI1ODgsMy42MTkyNTg4IDAgMCAwIC01LjExNTIyLDAgbCAtMy40NzQ0OCwzLjQ3NDQ4OSBWIDEwLjg0NDk4OCBBIDMuNjY3NTE1NiwzLjY2NzUxNTYgMCAwIDAgMTE0LjYwNjM5LDcuMzcwNDk5MyBIIDguNDQxNDc2OSBBIDMuNjE5MjU4OCwzLjYxOTI1ODggMCAwIDAgNC44MjIyMTgyLDEwLjg0NDk4OCBWIDkxLjYyNjg0IGwgLTMuNzY0MDI4NywzLjY2NzUxNiBhIDMuNjE5MjU4OCwzLjYxOTI1ODggMCAwIDAgMi41NTc2MDk1LDYuMTc2ODc0IDMuNTcxMDAxOSwzLjU3MTAwMTkgMCAwIDAgMS4yMDY0MTkyLC0wLjI0MTI4IHYgMTUuNzc5OTYgYSAzLjU3MTAwMTksMy41NzEwMDE5IDAgMCAwIDMuNjE5MjU4NywzLjYxOTI2IEggMTE0LjYwNjM5IGEgMy42MTkyNTg4LDMuNjE5MjU4OCAwIDAgMCAzLjYxOTI3LC0zLjYxOTI2IFYgNDMuMjI1Mjg4IGwgOC41ODk3LC04LjI1MTkwOSBhIDMuNjE5MjU4OCwzLjYxOTI1ODggMCAwIDAgMCwtNS4zNTY1MDQgeiBNIDU3LjkwNDY3NSw2My45Mjc0NDcgViAyNS4zMjIwMjIgYSAzLjYxOTI1OTksMy42MTkyNTk5IDAgMCAxIDcuMjM4NTE5LDAgdiAzOC42MDU0MjUgYSAzLjYxOTI1OTksMy42MTkyNTk5IDAgMCAxIC03LjIzODUxOSwwIHogTSA4Ni44NTg3NDYsNDQuNjI0NzM0IFYgMjUuMzIyMDIyIGEgMy42MTkyNTg4LDMuNjE5MjU4OCAwIDAgMSA3LjIzODUxNSwwIHYgMTkuMzAyNzEyIGEgMy42MTkyNTg4LDMuNjE5MjU4OCAwIDEgMSAtNy4yMzg1MTUsMCB6IE0gMzYuMTg5MTI4LDI1LjMyMjAyMiB2IDE5LjMwMjcxMiBhIDMuNjE5MjU5LDMuNjE5MjU5IDAgMSAxIC03LjIzODUxOCwwIFYgMjUuMzIyMDIyIGEgMy42MTkyNTksMy42MTkyNTkgMCAwIDEgNy4yMzg1MTgsMCB6IE0gMTEwLjk4NzE0LDExMy4zOTA2NSBIIDEyLjA2MDczNiBWIDk0LjYxODc2MSBsIDI0Ljk5NzAxMywtMjQuNTE0NDQzIDI0LjEyODM4OCwyNC4xMjgzOSBhIDMuMDg4NDM0MSwzLjA4ODQzNDEgMCAwIDAgMS4yMDY0MTcsMC43MjM4NTEgaCAwLjQzNDMxNCBhIDQuODI1Njc4Myw0LjgyNTY3ODMgMCAwIDAgMC45NjUxMzYsMC4yNDEyODEgMi4yNjgwNjg3LDIuMjY4MDY4NyAwIDAgMCAwLjc3MjEwOCwwIHYgMCBhIDMuMzc3OTc0OCwzLjM3Nzk3NDggMCAwIDAgMS41OTI0NzUsLTAuODIwMzY0IHYgMCBMIDExMC42NDkzMyw1MC4zNjcyOTUgWiIKICAgICBpZD0icGF0aDU5IgogICAgIGlua3NjYXBlOmNvbm5lY3Rvci1jdXJ2YXR1cmU9IjAiIC8+Cjwvc3ZnPgo= mediatype: image/svg+xml install: spec: clusterPermissions: - rules: - apiGroups: - authentication.k8s.io resources: - tokenreviews verbs: - create - apiGroups: - rbac.authorization.k8s.io resources: - clusterroles - clusterrolebindings verbs: - create - get - list - watch - update - patch - delete - apiGroups: - authorization.k8s.io resources: - subjectaccessreviews verbs: - create - apiGroups: - security.openshift.io resourceNames: - nonroot - nonroot-v2 resources: - securitycontextconstraints verbs: - use - apiGroups: - "" resources: - nodes/metrics verbs: - get - apiGroups: - "" resources: - namespaces verbs: - get - nonResourceURLs: - /metrics verbs: - get serviceAccountName: service-telemetry-operator deployments: - name: service-telemetry-operator spec: replicas: 1 selector: matchLabels: name: service-telemetry-operator strategy: {} template: metadata: creationTimestamp: null labels: name: service-telemetry-operator spec: containers: - env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.annotations['olm.targetNamespaces'] - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: service-telemetry-operator - name: ANSIBLE_GATHERING value: explicit - name: RELATED_IMAGE_PROMETHEUS_WEBHOOK_SNMP_IMAGE value: quay.io/infrawatch/prometheus-webhook-snmp:latest - name: RELATED_IMAGE_OAUTH_PROXY_IMAGE value: quay.io/openshift/origin-oauth-proxy:latest - name: RELATED_IMAGE_PROMETHEUS_IMAGE value: quay.io/prometheus/prometheus:latest - name: RELATED_IMAGE_ALERTMANAGER_IMAGE value: quay.io/prometheus/alertmanager:latest image: quay.io/infrawatch/service-telemetry-operator:latest imagePullPolicy: Always name: operator resources: {} volumeMounts: - mountPath: /tmp/ansible-operator/runner name: runner serviceAccountName: service-telemetry-operator volumes: - emptyDir: {} name: runner permissions: - rules: - apiGroups: - "" resources: - pods - services - services/finalizers - endpoints - persistentvolumeclaims - events - configmaps - secrets verbs: - '*' - apiGroups: - route.openshift.io resources: - routes verbs: - create - get - list - watch - update - patch - apiGroups: - "" resources: - serviceaccounts verbs: - create - get - list - watch - update - patch - apiGroups: - apps resources: - deployments - daemonsets - replicasets - statefulsets verbs: - '*' - apiGroups: - cert-manager.io resources: - issuers - certificates verbs: - '*' - apiGroups: - interconnectedcloud.github.io - smartgateway.infra.watch - monitoring.coreos.com - monitoring.rhobs - elasticsearch.k8s.elastic.co - grafana.integreatly.org - integreatly.org resources: - '*' verbs: - '*' - apiGroups: - monitoring.coreos.com resources: - scrapeconfigs - servicemonitors verbs: - get - create - delete - apiGroups: - monitoring.rhobs resources: - scrapeconfigs - servicemonitors verbs: - get - create - delete - apiGroups: - apps resourceNames: - service-telemetry-operator resources: - deployments/finalizers verbs: - update - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - apps resources: - replicasets verbs: - get - apiGroups: - infra.watch resources: - '*' verbs: - '*' - apiGroups: - rbac.authorization.k8s.io resources: - roles - rolebindings verbs: - create - get - list - watch - update - patch - apiGroups: - extensions - networking.k8s.io resources: - ingresses verbs: - get - list - watch serviceAccountName: service-telemetry-operator strategy: deployment installModes: - supported: true type: OwnNamespace - supported: true type: SingleNamespace - supported: false type: MultiNamespace - supported: false type: AllNamespaces keywords: - servicetelemetry - monitoring - telemetry - notifications - telecommunications links: - name: Source Code url: https://github.com/infrawatch/service-telemetry-operator - name: Documentation url: https://infrawatch.github.io/documentation maintainers: - email: support@redhat.com name: Red Hat maturity: beta minKubeVersion: 1.23.0 provider: name: Red Hat relatedImages: - image: quay.io/infrawatch/prometheus-webhook-snmp:latest name: prometheus-webhook-snmp-image - image: quay.io/openshift/origin-oauth-proxy:latest name: oauth-proxy-image - image: quay.io/prometheus/prometheus:latest name: prometheus-image - image: quay.io/prometheus/alertmanager:latest name: alertmanager-image version: 1.5.1765147436 status: cleanup: {} conditions: - lastTransitionTime: "2025-12-08T17:56:39Z" lastUpdateTime: "2025-12-08T17:56:39Z" message: requirements not yet checked phase: Pending reason: RequirementsUnknown - lastTransitionTime: "2025-12-08T17:56:39Z" lastUpdateTime: "2025-12-08T17:56:39Z" message: one or more requirements couldn't be found phase: Pending reason: RequirementsNotMet - lastTransitionTime: "2025-12-08T17:56:40Z" lastUpdateTime: "2025-12-08T17:56:40Z" message: all requirements found, attempting install phase: InstallReady reason: AllRequirementsMet - lastTransitionTime: "2025-12-08T17:56:40Z" lastUpdateTime: "2025-12-08T17:56:40Z" message: waiting for install components to report healthy phase: Installing reason: InstallSucceeded - lastTransitionTime: "2025-12-08T17:56:40Z" lastUpdateTime: "2025-12-08T17:56:41Z" message: 'installing: waiting for deployment service-telemetry-operator to become ready: deployment "service-telemetry-operator" not available: Deployment does not have minimum availability.' phase: Installing reason: InstallWaiting - lastTransitionTime: "2025-12-08T17:57:06Z" lastUpdateTime: "2025-12-08T17:57:06Z" message: install strategy completed with no errors phase: Succeeded reason: InstallSucceeded lastTransitionTime: "2025-12-08T17:57:06Z" lastUpdateTime: "2025-12-08T17:57:06Z" message: install strategy completed with no errors phase: Succeeded reason: InstallSucceeded requirementStatus: - group: operators.coreos.com kind: ClusterServiceVersion message: CSV minKubeVersion (1.23.0) less than server version (v1.33.5) name: service-telemetry-operator.v1.5.1765147436 status: Present version: v1alpha1 - group: apiextensions.k8s.io kind: CustomResourceDefinition message: CRD is present and Established condition is true name: servicetelemetrys.infra.watch status: Present uuid: 5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8 version: v1 - dependents: - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["create"],"apiGroups":["authentication.k8s.io"],"resources":["tokenreviews"]} status: Satisfied version: v1 - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["create","get","list","watch","update","patch","delete"],"apiGroups":["rbac.authorization.k8s.io"],"resources":["clusterroles","clusterrolebindings"]} status: Satisfied version: v1 - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["create"],"apiGroups":["authorization.k8s.io"],"resources":["subjectaccessreviews"]} status: Satisfied version: v1 - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["use"],"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"resourceNames":["nonroot","nonroot-v2"]} status: Satisfied version: v1 - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["get"],"apiGroups":[""],"resources":["nodes/metrics"]} status: Satisfied version: v1 - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["get"],"apiGroups":[""],"resources":["namespaces"]} status: Satisfied version: v1 - group: rbac.authorization.k8s.io kind: PolicyRule message: cluster rule:{"verbs":["get"],"nonResourceURLs":["/metrics"]} status: Satisfied version: v1 group: "" kind: ServiceAccount message: "" name: service-telemetry-operator status: Present version: v1 ././@LongLink0000644000000000000000000000017700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/controller/post_oc_describe_pod_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq.loghome/zuul/zuul-output/logs/controller/post_oc_describe_pod_36ffb4ab4bfe83a910ab52ec1870308fea799225a0000644000175000017500000002004315115611244031303 0ustar zuulzuulName: 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq Namespace: service-telemetry Priority: 0 Service Account: default Node: crc/192.168.126.11 Start Time: Mon, 08 Dec 2025 17:56:28 +0000 Labels: batch.kubernetes.io/controller-uid=0bf0ca7a-b827-4094-8a72-9ae29534ef04 batch.kubernetes.io/job-name=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 controller-uid=0bf0ca7a-b827-4094-8a72-9ae29534ef04 job-name=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 olm.managed=true operatorframework.io/bundle-unpack-ref=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 Annotations: k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.217.0.61/23"],"mac_address":"0a:58:0a:d9:00:3d","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.61" ], "mac": "0a:58:0a:d9:00:3d", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user Status: Succeeded SeccompProfile: RuntimeDefault IP: 10.217.0.61 IPs: IP: 10.217.0.61 Controlled By: Job/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 Init Containers: util: Container ID: cri-o://4c35d8974d5888bf524dbe5eefde9e191c58b3803db6ffa3939b4ce04352185f Image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f Image ID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f Port: Host Port: Command: /bin/cp -Rv /bin/cpb /util/cpb State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:56:30 +0000 Finished: Mon, 08 Dec 2025 17:56:30 +0000 Ready: True Restart Count: 0 Requests: cpu: 10m memory: 50Mi Environment: Mounts: /util from util (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4zj26 (ro) pull: Container ID: cri-o://0ca132d5c8c9872037343085b432294d20a42d1355b62c8997516c468986533d Image: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d Image ID: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d Port: Host Port: Command: /util/cpb /bundle State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:56:32 +0000 Finished: Mon, 08 Dec 2025 17:56:32 +0000 Ready: True Restart Count: 0 Requests: cpu: 10m memory: 50Mi Environment: Mounts: /bundle from bundle (rw) /util from util (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4zj26 (ro) Containers: extract: Container ID: cri-o://d3e8561202cc89d83ca353a282a58fe1a93cb345811d80c4f8d79bbece0f3150 Image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 Image ID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 Port: Host Port: Command: opm alpha bundle extract -m /bundle/ -n service-telemetry -c 36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 -z State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:56:33 +0000 Finished: Mon, 08 Dec 2025 17:56:33 +0000 Ready: False Restart Count: 0 Requests: cpu: 10m memory: 50Mi Environment: CONTAINER_IMAGE: quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d Mounts: /bundle from bundle (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4zj26 (ro) Conditions: Type Status PodReadyToStartContainers False Initialized True Ready False ContainersReady False PodScheduled True Volumes: bundle: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: util: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: kube-api-access-4zj26: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt Optional: false DownwardAPI: true ConfigMapName: openshift-service-ca.crt Optional: false QoS Class: Burstable Node-Selectors: kubernetes.io/os=linux Tolerations: kubernetes.io/arch=amd64 kubernetes.io/arch=arm64 kubernetes.io/arch=ppc64le kubernetes.io/arch=s390x node-role.kubernetes.io/master:NoSchedule op=Exists node.kubernetes.io/memory-pressure:NoSchedule op=Exists node.kubernetes.io/not-ready:NoExecute op=Exists for 120s node.kubernetes.io/unreachable:NoExecute op=Exists for 120s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 5m45s default-scheduler Successfully assigned service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq to crc Normal AddedInterface 5m43s multus Add eth0 [10.217.0.61/23] from ovn-kubernetes Normal Pulled 5m43s kubelet Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f" already present on machine Normal Created 5m42s kubelet Created container: util Normal Started 5m42s kubelet Started container util Normal Pulling 5m41s kubelet Pulling image "quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d" Normal Pulled 5m40s kubelet Successfully pulled image "quay.io/infrawatch-operators/smart-gateway-operator-bundle@sha256:95f4acd07c67b9549025ac335762aa8685da9f3917e9160ab6bcb66a946cdf5d" in 1.214s (1.214s including waiting). Image size: 36716 bytes. Normal Created 5m40s kubelet Created container: pull Normal Started 5m40s kubelet Started container pull Normal Pulled 5m39s kubelet Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47" already present on machine Normal Created 5m39s kubelet Created container: extract Normal Started 5m39s kubelet Started container extract home/zuul/zuul-output/logs/controller/post_oc_describe_pod_curl.log0000644000175000017500000001150115115611244025147 0ustar zuulzuulName: curl Namespace: service-telemetry Priority: 0 Service Account: default Node: crc/192.168.126.11 Start Time: Mon, 08 Dec 2025 17:59:14 +0000 Labels: run=curl Annotations: k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.217.0.85/23"],"mac_address":"0a:58:0a:d9:00:55","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.85" ], "mac": "0a:58:0a:d9:00:55", "default": true, "dns": {} }] openshift.io/scc: anyuid security.openshift.io/validated-scc-subject-type: user Status: Succeeded IP: 10.217.0.85 IPs: IP: 10.217.0.85 Containers: curl: Container ID: cri-o://a7928a57f2a8dbff9fceeb51195188b3a2cd0237b3fbfeaf0fd5213020d1106a Image: quay.io/infrawatch/busyboxplus:curl Image ID: quay.io/infrawatch/busyboxplus@sha256:66f2d29e9735508135deb16917cdee84957a0e891d035f14b6557df277d10afc Port: Host Port: Args: sh -c curl -v -k -H "Content-Type: application/json" -H "Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM1NCwiaWF0IjoxNzY1MjE2NzU0LCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiJmMTVjZjE3NC1hMDQxLTRlNzMtODUzNS0yZDU4NDRiYjU4YWUiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InByb21ldGhldXMtc3RmIiwidWlkIjoiNzc0ZGE4MzQtODY3YS00N2UzLWE1MmMtYmZmMzRhMzlmM2Q4In19LCJuYmYiOjE3NjUyMTY3NTQsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpzZXJ2aWNlLXRlbGVtZXRyeTpwcm9tZXRoZXVzLXN0ZiJ9.QPxiNrDZ6fnSoQLCE8bQx8WL4s7PmGgB7XIKsKPknC05ykNETWp6bPJsBPq61zCjGdy6eJ46AJc6HCGNnk-gPMjPUgIyWR9icaKf9L_cs_4KKs85QH7dsHDN9BaEjXbvXvEAXqGY0KKreFa6Bk3zR1j4BwU62kNQycIvo2VxaMwp8JM2mmFzbeSZc_mjJ9o19jVi5kKf9JvXgNtcU0bxldSStVu3CfBbjMZZRWFiwYt91-gUV4cK2E8IgKa77YFywucGhWMYXq2aDeasrjBF9yMoDizucMWlJZamREkzvVfVmccTozpAYDpct-z153dobmP3fnZyqC_pWhmpS6QkggnRqvmo5MgxeFOi0Cm6q06ZmEERA6PvMW9QrSYasxXXvkDyznNGrGP7DhtVCgB8q3veU9K2zQheEAdS9SeyqnECbofewDowxMkjEqhA4L2BenBuQPg_lnPHGcVCcHQxoe7znTWw8SzXp1fBnG3Fmt0et8PRS4IbL8ds5GiSdZf4o4dQA2OXzkCgIvLTyNfxNCUlt-4Kgf6Sb7k7FmeT4U76TVwneAiAIFhlxbYtfLpHwwqcd36w4kFSOXc6haPWzhEryjX3jRIPk5ID6TbdV0n6Kp23woEQzh2B0Dn9MXC9wZC49U_RfOxL6uQLRnzfKQRkafI6tzgrBb1A3X_H8_A" -d '[{"status":"firing","labels":{"alertname":"smoketest","severity":"warning"},"startsAt":"2025-12-08T17:59:14+00:00"}]' https://default-alertmanager-proxy:9095/api/v2/alerts State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:59:16 +0000 Finished: Mon, 08 Dec 2025 17:59:16 +0000 Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-5rv8s (ro) Conditions: Type Status PodReadyToStartContainers False Initialized True Ready False ContainersReady False PodScheduled True Volumes: kube-api-access-5rv8s: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt Optional: false DownwardAPI: true ConfigMapName: openshift-service-ca.crt Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 2m58s default-scheduler Successfully assigned service-telemetry/curl to crc Normal AddedInterface 2m57s multus Add eth0 [10.217.0.85/23] from ovn-kubernetes Normal Pulling 2m57s kubelet Pulling image "quay.io/infrawatch/busyboxplus:curl" Normal Pulled 2m56s kubelet Successfully pulled image "quay.io/infrawatch/busyboxplus:curl" in 1.44s (1.44s including waiting). Image size: 4743849 bytes. Normal Created 2m56s kubelet Created container: curl Normal Started 2m56s kubelet Started container curl ././@LongLink0000644000000000000000000000017700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/controller/post_oc_describe_pod_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx.loghome/zuul/zuul-output/logs/controller/post_oc_describe_pod_f308c3282bd783e18badba37dad473f984d0c04be0000644000175000017500000002006715115611244031371 0ustar zuulzuulName: f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx Namespace: service-telemetry Priority: 0 Service Account: default Node: crc/192.168.126.11 Start Time: Mon, 08 Dec 2025 17:56:29 +0000 Labels: batch.kubernetes.io/controller-uid=f1a99e81-3d6a-46e7-93df-17d6d3d195dd batch.kubernetes.io/job-name=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 controller-uid=f1a99e81-3d6a-46e7-93df-17d6d3d195dd job-name=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 olm.managed=true operatorframework.io/bundle-unpack-ref=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 Annotations: k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.217.0.66/23"],"mac_address":"0a:58:0a:d9:00:42","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.66" ], "mac": "0a:58:0a:d9:00:42", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user Status: Succeeded SeccompProfile: RuntimeDefault IP: 10.217.0.66 IPs: IP: 10.217.0.66 Controlled By: Job/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 Init Containers: util: Container ID: cri-o://b4f8f2eceaa49eb50c4831feb67ddacd68cb76a38dd21c782141f9b5dde7d0fc Image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f Image ID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f Port: Host Port: Command: /bin/cp -Rv /bin/cpb /util/cpb State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:56:30 +0000 Finished: Mon, 08 Dec 2025 17:56:30 +0000 Ready: True Restart Count: 0 Requests: cpu: 10m memory: 50Mi Environment: Mounts: /util from util (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-46hts (ro) pull: Container ID: cri-o://05a0f73f3535c0d5c2a0ebe8006219a712dfb9e9efa1cdc79f315fd1e3633fee Image: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d Image ID: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d Port: Host Port: Command: /util/cpb /bundle State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:56:32 +0000 Finished: Mon, 08 Dec 2025 17:56:32 +0000 Ready: True Restart Count: 0 Requests: cpu: 10m memory: 50Mi Environment: Mounts: /bundle from bundle (rw) /util from util (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-46hts (ro) Containers: extract: Container ID: cri-o://87fea37bc9cf6f903e07e23f2df1da34cc7a8ef0682d180e4755d99a1b948e15 Image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 Image ID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47 Port: Host Port: Command: opm alpha bundle extract -m /bundle/ -n service-telemetry -c f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 -z State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:56:33 +0000 Finished: Mon, 08 Dec 2025 17:56:33 +0000 Ready: False Restart Count: 0 Requests: cpu: 10m memory: 50Mi Environment: CONTAINER_IMAGE: quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d Mounts: /bundle from bundle (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-46hts (ro) Conditions: Type Status PodReadyToStartContainers False Initialized True Ready False ContainersReady False PodScheduled True Volumes: bundle: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: util: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: kube-api-access-46hts: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt Optional: false DownwardAPI: true ConfigMapName: openshift-service-ca.crt Optional: false QoS Class: Burstable Node-Selectors: kubernetes.io/os=linux Tolerations: kubernetes.io/arch=amd64 kubernetes.io/arch=arm64 kubernetes.io/arch=ppc64le kubernetes.io/arch=s390x node-role.kubernetes.io/master:NoSchedule op=Exists node.kubernetes.io/memory-pressure:NoSchedule op=Exists node.kubernetes.io/not-ready:NoExecute op=Exists for 120s node.kubernetes.io/unreachable:NoExecute op=Exists for 120s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 5m44s default-scheduler Successfully assigned service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx to crc Normal AddedInterface 5m43s multus Add eth0 [10.217.0.66/23] from ovn-kubernetes Normal Pulled 5m43s kubelet Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f" already present on machine Normal Created 5m42s kubelet Created container: util Normal Started 5m42s kubelet Started container util Normal Pulling 5m41s kubelet Pulling image "quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d" Normal Pulled 5m40s kubelet Successfully pulled image "quay.io/infrawatch-operators/service-telemetry-operator-bundle@sha256:7aa358e814a4bc2836a723364515ae600d4d5f45afb491456b63283a34b1178d" in 1.293s (1.293s including waiting). Image size: 58789 bytes. Normal Created 5m40s kubelet Created container: pull Normal Started 5m40s kubelet Started container pull Normal Pulled 5m39s kubelet Container image "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47" already present on machine Normal Created 5m39s kubelet Created container: extract Normal Started 5m39s kubelet Started container extract home/zuul/zuul-output/logs/controller/post_oc_describe_pod_stf-smoketest-smoke1-pbhxq.log0000644000175000017500000002333315115611244031335 0ustar zuulzuulName: stf-smoketest-smoke1-pbhxq Namespace: service-telemetry Priority: 0 Service Account: default Node: crc/192.168.126.11 Start Time: Mon, 08 Dec 2025 17:59:14 +0000 Labels: app=stf-smoketest batch.kubernetes.io/controller-uid=6d15069a-7ade-4308-8933-707b8f1ca4e3 batch.kubernetes.io/job-name=stf-smoketest-smoke1 controller-uid=6d15069a-7ade-4308-8933-707b8f1ca4e3 job-name=stf-smoketest-smoke1 Annotations: k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.217.0.84/23"],"mac_address":"0a:58:0a:d9:00:54","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.217.0.84" ], "mac": "0a:58:0a:d9:00:54", "default": true, "dns": {} }] openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default security.openshift.io/validated-scc-subject-type: user Status: Succeeded SeccompProfile: RuntimeDefault IP: 10.217.0.84 IPs: IP: 10.217.0.84 Controlled By: Job/stf-smoketest-smoke1 Containers: smoketest-collectd: Container ID: cri-o://49a2d94e35dff7bd89d4adda86a9bb2a1c75c043a28e9dd9185f028d19dcc6a8 Image: quay.io/tripleomastercentos9/openstack-collectd:current-tripleo Image ID: quay.io/tripleomastercentos9/openstack-collectd@sha256:a6550a778495d874a71cb26f104e1bea9ac348091e9edc4ae12229c86ff8b72e Port: Host Port: Command: /smoketest_collectd_entrypoint.sh State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:59:24 +0000 Finished: Mon, 08 Dec 2025 17:59:58 +0000 Ready: False Restart Count: 0 Environment: CLOUDNAME: smoke1 ELASTICSEARCH_AUTH_PASS: Vi9VYYssS8Rt2DltbKq6JXZC PROMETHEUS_AUTH_TOKEN: eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM0MCwiaWF0IjoxNzY1MjE2NzQwLCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiI2ZWY1YWIwNi03YjhiLTQ4OGItODkwYi02ZDM0Y2Y5YzVhMDYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InN0Zi1wcm9tZXRoZXVzLXJlYWRlciIsInVpZCI6ImI0MDQ2N2U4LWE0OWQtNGQxOC04Y2Y4LTIxNjIwMjRkYTQ1YSJ9fSwibmJmIjoxNzY1MjE2NzQwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6c2VydmljZS10ZWxlbWV0cnk6c3RmLXByb21ldGhldXMtcmVhZGVyIn0.O5UEY8wJ0JBFa4c4plOcpEkjisII8ssj1wcszQiMZfrph5ajeEV182dvVyEH14huoCZlDjjnwWlmZv2b8SRnMU6QAw5U7MQqXu4Zo7Ay2Al7Dec7pXWMPgkjEz-vDBP7ay-CN9ENZ4U2FHqKi4FER95_n5NdCPDuoMMT8gImOAZUTyrC7FGNN_BvMhlhW0BLawA8jYs2RCkyakVnVgn57G0QBOywE-pMAoajnq8ycH2GUG2KckzBq8b38aOETug8uVr4KO55i5rDaCn0UK1Qi322UZBaIm3dDcKqh0yUX3uzBAX_tByhmXxSNyRhb2kgWTxgKjySQdvpGc7kErLSwtAG5pq8Sy5wgm8sEix1_OuGQcrQK9hG6nluPCbn-ifLUlnkpK5SilWirEJp7tEWCj8aRCJmmyw_A0P6UcU2nSqhv9fdkON-veTeGSygpxSqwEkfAATxNi3A2RGufm6ZZm0r1c34zC66unHx4W1FUk9KlO89cGGybyQh6bQfgrMpkiGYtC_0807JnrIOwd4a7Zq3EVScFfkd8k1UIfq-tCjrkLI0fg3Dhew9U1n9RTA4L53lSZVIvVWAHbYZ4fhfH8Z0fMMBhrcX6XOGuGyYBD8jwrUeB8PknLRt5z03q_rZ0Vd6GGNQV2kr8YW5GSVQOXVK040gkL9GF5sUZADEU2A OBSERVABILITY_STRATEGY: <> Mounts: /etc/collectd-sensubility.conf from sensubility-config (rw,path="collectd-sensubility.conf") /etc/minimal-collectd.conf.template from collectd-config (rw,path="minimal-collectd.conf.template") /healthcheck.log from healthcheck-log (rw,path="healthcheck.log") /smoketest_collectd_entrypoint.sh from collectd-entrypoint-script (rw,path="smoketest_collectd_entrypoint.sh") /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9xng2 (ro) smoketest-ceilometer: Container ID: cri-o://ae6ee93a5a6d6a767e3bbb450044418d804b86e821540a775c6b84a6df04014f Image: quay.io/tripleomastercentos9/openstack-ceilometer-notification:current-tripleo Image ID: quay.io/tripleomastercentos9/openstack-ceilometer-notification@sha256:73886b5616a348d6cee1766765bef1d7ce4c0de2f02c51fe7b68c2ed45c5cb4d Port: Host Port: Command: /smoketest_ceilometer_entrypoint.sh State: Terminated Reason: Completed Exit Code: 0 Started: Mon, 08 Dec 2025 17:59:30 +0000 Finished: Mon, 08 Dec 2025 18:00:01 +0000 Ready: False Restart Count: 0 Environment: CLOUDNAME: smoke1 ELASTICSEARCH_AUTH_PASS: Vi9VYYssS8Rt2DltbKq6JXZC PROMETHEUS_AUTH_TOKEN: eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM0MCwiaWF0IjoxNzY1MjE2NzQwLCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiI2ZWY1YWIwNi03YjhiLTQ4OGItODkwYi02ZDM0Y2Y5YzVhMDYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InN0Zi1wcm9tZXRoZXVzLXJlYWRlciIsInVpZCI6ImI0MDQ2N2U4LWE0OWQtNGQxOC04Y2Y4LTIxNjIwMjRkYTQ1YSJ9fSwibmJmIjoxNzY1MjE2NzQwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6c2VydmljZS10ZWxlbWV0cnk6c3RmLXByb21ldGhldXMtcmVhZGVyIn0.O5UEY8wJ0JBFa4c4plOcpEkjisII8ssj1wcszQiMZfrph5ajeEV182dvVyEH14huoCZlDjjnwWlmZv2b8SRnMU6QAw5U7MQqXu4Zo7Ay2Al7Dec7pXWMPgkjEz-vDBP7ay-CN9ENZ4U2FHqKi4FER95_n5NdCPDuoMMT8gImOAZUTyrC7FGNN_BvMhlhW0BLawA8jYs2RCkyakVnVgn57G0QBOywE-pMAoajnq8ycH2GUG2KckzBq8b38aOETug8uVr4KO55i5rDaCn0UK1Qi322UZBaIm3dDcKqh0yUX3uzBAX_tByhmXxSNyRhb2kgWTxgKjySQdvpGc7kErLSwtAG5pq8Sy5wgm8sEix1_OuGQcrQK9hG6nluPCbn-ifLUlnkpK5SilWirEJp7tEWCj8aRCJmmyw_A0P6UcU2nSqhv9fdkON-veTeGSygpxSqwEkfAATxNi3A2RGufm6ZZm0r1c34zC66unHx4W1FUk9KlO89cGGybyQh6bQfgrMpkiGYtC_0807JnrIOwd4a7Zq3EVScFfkd8k1UIfq-tCjrkLI0fg3Dhew9U1n9RTA4L53lSZVIvVWAHbYZ4fhfH8Z0fMMBhrcX6XOGuGyYBD8jwrUeB8PknLRt5z03q_rZ0Vd6GGNQV2kr8YW5GSVQOXVK040gkL9GF5sUZADEU2A OBSERVABILITY_STRATEGY: <> Mounts: /ceilometer_publish.py from ceilometer-publisher (rw,path="ceilometer_publish.py") /smoketest_ceilometer_entrypoint.sh from ceilometer-entrypoint-script (rw,path="smoketest_ceilometer_entrypoint.sh") /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9xng2 (ro) Conditions: Type Status PodReadyToStartContainers False Initialized True Ready False ContainersReady False PodScheduled True Volumes: collectd-config: Type: ConfigMap (a volume populated by a ConfigMap) Name: stf-smoketest-collectd-config Optional: false sensubility-config: Type: ConfigMap (a volume populated by a ConfigMap) Name: stf-smoketest-sensubility-config Optional: false healthcheck-log: Type: ConfigMap (a volume populated by a ConfigMap) Name: stf-smoketest-healthcheck-log Optional: false collectd-entrypoint-script: Type: ConfigMap (a volume populated by a ConfigMap) Name: stf-smoketest-collectd-entrypoint-script Optional: false ceilometer-entrypoint-script: Type: ConfigMap (a volume populated by a ConfigMap) Name: stf-smoketest-ceilometer-entrypoint-script Optional: false ceilometer-publisher: Type: ConfigMap (a volume populated by a ConfigMap) Name: stf-smoketest-ceilometer-publisher Optional: false kube-api-access-9xng2: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt Optional: false DownwardAPI: true ConfigMapName: openshift-service-ca.crt Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 2m58s default-scheduler Successfully assigned service-telemetry/stf-smoketest-smoke1-pbhxq to crc Normal AddedInterface 2m58s multus Add eth0 [10.217.0.84/23] from ovn-kubernetes Normal Pulling 2m58s kubelet Pulling image "quay.io/tripleomastercentos9/openstack-collectd:current-tripleo" Normal Pulled 2m48s kubelet Successfully pulled image "quay.io/tripleomastercentos9/openstack-collectd:current-tripleo" in 9.562s (9.562s including waiting). Image size: 576108685 bytes. Normal Created 2m48s kubelet Created container: smoketest-collectd Normal Started 2m48s kubelet Started container smoketest-collectd Normal Pulling 2m48s kubelet Pulling image "quay.io/tripleomastercentos9/openstack-ceilometer-notification:current-tripleo" Normal Pulled 2m43s kubelet Successfully pulled image "quay.io/tripleomastercentos9/openstack-ceilometer-notification:current-tripleo" in 5.282s (5.282s including waiting). Image size: 700901219 bytes. Normal Created 2m42s kubelet Created container: smoketest-ceilometer Normal Started 2m42s kubelet Started container smoketest-ceilometer home/zuul/zuul-output/logs/controller/post_pv.log0000644000175000017500000000163415115611245021453 0ustar zuulzuulNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 79Gi RWO Retain Bound service-telemetry/alertmanager-default-db-alertmanager-default-0 crc-csi-hostpath-provisioner 4m22s pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 30Gi RWX Retain Bound openshift-image-registry/crc-image-registry-storage crc-csi-hostpath-provisioner 35d pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 79Gi RWO Retain Bound service-telemetry/prometheus-default-db-prometheus-default-0 crc-csi-hostpath-provisioner 4m36s home/zuul/zuul-output/logs/controller/post_pvc.log0000644000175000017500000000106215115611245021611 0ustar zuulzuulNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE alertmanager-default-db-alertmanager-default-0 Bound pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 79Gi RWO crc-csi-hostpath-provisioner 4m27s prometheus-default-db-prometheus-default-0 Bound pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 79Gi RWO crc-csi-hostpath-provisioner 4m36s home/zuul/zuul-output/logs/controller/logs_sto.log0000644000175000017500000000025615115611246021612 0ustar zuulzuulError from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/service-telemetry-operator-79647f8775-zs8hl/operator": remote error: tls: internal error home/zuul/zuul-output/logs/controller/logs_sgo.log0000644000175000017500000000025215115611246021571 0ustar zuulzuulError from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/smart-gateway-operator-5cd794ff55-w8r45/operator": remote error: tls: internal error home/zuul/zuul-output/logs/controller/logs_qdr.log0000644000175000017500000000020615115611246021566 0ustar zuulzuulError from server: Get "https://192.168.126.11:10250/containerLogs/service-telemetry/qdr-test/qdr": remote error: tls: internal error home/zuul/zuul-output/logs/controller/ansible.log0000644000175000017500000046107615115611247021412 0ustar zuulzuul2025-12-08 17:51:42,655 p=31279 u=zuul n=ansible | Starting galaxy collection install process 2025-12-08 17:51:42,656 p=31279 u=zuul n=ansible | Process install dependency map 2025-12-08 17:51:58,145 p=31279 u=zuul n=ansible | Starting collection install process 2025-12-08 17:51:58,145 p=31279 u=zuul n=ansible | Installing 'cifmw.general:1.0.0+33d5122f' to '/home/zuul/.ansible/collections/ansible_collections/cifmw/general' 2025-12-08 17:51:58,665 p=31279 u=zuul n=ansible | Created collection for cifmw.general:1.0.0+33d5122f at /home/zuul/.ansible/collections/ansible_collections/cifmw/general 2025-12-08 17:51:58,666 p=31279 u=zuul n=ansible | cifmw.general:1.0.0+33d5122f was installed successfully 2025-12-08 17:51:58,666 p=31279 u=zuul n=ansible | Installing 'containers.podman:1.16.2' to '/home/zuul/.ansible/collections/ansible_collections/containers/podman' 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | Created collection for containers.podman:1.16.2 at /home/zuul/.ansible/collections/ansible_collections/containers/podman 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | containers.podman:1.16.2 was installed successfully 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | Installing 'community.general:10.0.1' to '/home/zuul/.ansible/collections/ansible_collections/community/general' 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | Created collection for community.general:10.0.1 at /home/zuul/.ansible/collections/ansible_collections/community/general 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | community.general:10.0.1 was installed successfully 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | Installing 'ansible.posix:1.6.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/posix' 2025-12-08 17:51:59,512 p=31279 u=zuul n=ansible | Created collection for ansible.posix:1.6.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/posix 2025-12-08 17:51:59,513 p=31279 u=zuul n=ansible | ansible.posix:1.6.2 was installed successfully 2025-12-08 17:51:59,513 p=31279 u=zuul n=ansible | Installing 'ansible.utils:5.1.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/utils' 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | Created collection for ansible.utils:5.1.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/utils 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | ansible.utils:5.1.2 was installed successfully 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | Installing 'community.libvirt:1.3.0' to '/home/zuul/.ansible/collections/ansible_collections/community/libvirt' 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | Created collection for community.libvirt:1.3.0 at /home/zuul/.ansible/collections/ansible_collections/community/libvirt 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | community.libvirt:1.3.0 was installed successfully 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | Installing 'community.crypto:2.22.3' to '/home/zuul/.ansible/collections/ansible_collections/community/crypto' 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | Created collection for community.crypto:2.22.3 at /home/zuul/.ansible/collections/ansible_collections/community/crypto 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | community.crypto:2.22.3 was installed successfully 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | Installing 'kubernetes.core:5.0.0' to '/home/zuul/.ansible/collections/ansible_collections/kubernetes/core' 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | Created collection for kubernetes.core:5.0.0 at /home/zuul/.ansible/collections/ansible_collections/kubernetes/core 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | kubernetes.core:5.0.0 was installed successfully 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | Installing 'ansible.netcommon:7.1.0' to '/home/zuul/.ansible/collections/ansible_collections/ansible/netcommon' 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | Created collection for ansible.netcommon:7.1.0 at /home/zuul/.ansible/collections/ansible_collections/ansible/netcommon 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | ansible.netcommon:7.1.0 was installed successfully 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | Installing 'openstack.config_template:2.1.1' to '/home/zuul/.ansible/collections/ansible_collections/openstack/config_template' 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | Created collection for openstack.config_template:2.1.1 at /home/zuul/.ansible/collections/ansible_collections/openstack/config_template 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | openstack.config_template:2.1.1 was installed successfully 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | Installing 'junipernetworks.junos:9.1.0' to '/home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos' 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | Created collection for junipernetworks.junos:9.1.0 at /home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | junipernetworks.junos:9.1.0 was installed successfully 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | Installing 'cisco.ios:9.0.3' to '/home/zuul/.ansible/collections/ansible_collections/cisco/ios' 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | Created collection for cisco.ios:9.0.3 at /home/zuul/.ansible/collections/ansible_collections/cisco/ios 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | cisco.ios:9.0.3 was installed successfully 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | Installing 'mellanox.onyx:1.0.0' to '/home/zuul/.ansible/collections/ansible_collections/mellanox/onyx' 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | Created collection for mellanox.onyx:1.0.0 at /home/zuul/.ansible/collections/ansible_collections/mellanox/onyx 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | mellanox.onyx:1.0.0 was installed successfully 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | Installing 'community.okd:4.0.0' to '/home/zuul/.ansible/collections/ansible_collections/community/okd' 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | Created collection for community.okd:4.0.0 at /home/zuul/.ansible/collections/ansible_collections/community/okd 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | community.okd:4.0.0 was installed successfully 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | Installing '@NAMESPACE@.@NAME@:3.1.4' to '/home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@' 2025-12-08 17:52:00,619 p=31279 u=zuul n=ansible | Created collection for @NAMESPACE@.@NAME@:3.1.4 at /home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@ 2025-12-08 17:52:00,619 p=31279 u=zuul n=ansible | @NAMESPACE@.@NAME@:3.1.4 was installed successfully 2025-12-08 17:52:08,760 p=31902 u=zuul n=ansible | PLAY [Bootstrap playbook] ****************************************************** 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | TASK [Gathering Facts ] ******************************************************** 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:08 +0000 (0:00:00.032) 0:00:00.032 ******* 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:08 +0000 (0:00:00.031) 0:00:00.031 ******* 2025-12-08 17:52:10,015 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,052 p=31902 u=zuul n=ansible | TASK [Set custom cifmw PATH reusable fact cifmw_path={{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}, cacheable=True] *** 2025-12-08 17:52:10,053 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:01.276) 0:00:01.308 ******* 2025-12-08 17:52:10,053 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:01.276) 0:00:01.307 ******* 2025-12-08 17:52:10,080 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | TASK [Get customized parameters ci_framework_params={{ hostvars[inventory_hostname] | dict2items | selectattr("key", "match", "^(cifmw|pre|post)_(?!install_yamls|openshift_token|openshift_login|openshift_kubeconfig).*") | list | items2dict }}] *** 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.033) 0:00:01.342 ******* 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.033) 0:00:01.341 ******* 2025-12-08 17:52:10,129 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | TASK [install_ca : Ensure target directory exists path={{ cifmw_install_ca_trust_dir }}, state=directory, mode=0755] *** 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.048) 0:00:01.391 ******* 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.048) 0:00:01.390 ******* 2025-12-08 17:52:10,496 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | TASK [install_ca : Install internal CA from url url={{ cifmw_install_ca_url }}, dest={{ cifmw_install_ca_trust_dir }}, validate_certs={{ cifmw_install_ca_url_validate_certs | default(omit) }}, mode=0644] *** 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.370) 0:00:01.762 ******* 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.370) 0:00:01.760 ******* 2025-12-08 17:52:10,533 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,543 p=31902 u=zuul n=ansible | TASK [install_ca : Install custom CA bundle from inline dest={{ cifmw_install_ca_trust_dir }}/cifmw_inline_ca_bundle.crt, content={{ cifmw_install_ca_bundle_inline }}, mode=0644] *** 2025-12-08 17:52:10,544 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.037) 0:00:01.799 ******* 2025-12-08 17:52:10,544 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.037) 0:00:01.798 ******* 2025-12-08 17:52:10,574 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | TASK [install_ca : Install custom CA bundle from file dest={{ cifmw_install_ca_trust_dir }}/{{ cifmw_install_ca_bundle_src | basename }}, src={{ cifmw_install_ca_bundle_src }}, mode=0644] *** 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.040) 0:00:01.839 ******* 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.040) 0:00:01.838 ******* 2025-12-08 17:52:10,619 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,633 p=31902 u=zuul n=ansible | TASK [install_ca : Update ca bundle _raw_params=update-ca-trust] *************** 2025-12-08 17:52:10,634 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.049) 0:00:01.889 ******* 2025-12-08 17:52:10,634 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.049) 0:00:01.888 ******* 2025-12-08 17:52:12,273 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | TASK [repo_setup : Ensure directories are present path={{ cifmw_repo_setup_basedir }}/{{ item }}, state=directory, mode=0755] *** 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:01.651) 0:00:03.540 ******* 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:01.651) 0:00:03.539 ******* 2025-12-08 17:52:12,519 p=31902 u=zuul n=ansible | changed: [localhost] => (item=tmp) 2025-12-08 17:52:12,700 p=31902 u=zuul n=ansible | changed: [localhost] => (item=artifacts/repositories) 2025-12-08 17:52:12,886 p=31902 u=zuul n=ansible | changed: [localhost] => (item=venv/repo_setup) 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | TASK [repo_setup : Make sure git-core package is installed name=git-core, state=present] *** 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:00.616) 0:00:04.157 ******* 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:00.616) 0:00:04.156 ******* 2025-12-08 17:52:13,882 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:13,891 p=31902 u=zuul n=ansible | TASK [repo_setup : Get repo-setup repository accept_hostkey=True, dest={{ cifmw_repo_setup_basedir }}/tmp/repo-setup, repo={{ cifmw_repo_setup_src }}] *** 2025-12-08 17:52:13,891 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:13 +0000 (0:00:00.990) 0:00:05.147 ******* 2025-12-08 17:52:13,892 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:13 +0000 (0:00:00.990) 0:00:05.146 ******* 2025-12-08 17:52:16,138 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | TASK [repo_setup : Initialize python venv and install requirements virtualenv={{ cifmw_repo_setup_venv }}, requirements={{ cifmw_repo_setup_basedir }}/tmp/repo-setup/requirements.txt, virtualenv_command=python3 -m venv --system-site-packages --upgrade-deps] *** 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:16 +0000 (0:00:02.253) 0:00:07.400 ******* 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:16 +0000 (0:00:02.253) 0:00:07.399 ******* 2025-12-08 17:52:24,622 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | TASK [repo_setup : Install repo-setup package chdir={{ cifmw_repo_setup_basedir }}/tmp/repo-setup, creates={{ cifmw_repo_setup_venv }}/bin/repo-setup, _raw_params={{ cifmw_repo_setup_venv }}/bin/python setup.py install] *** 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:24 +0000 (0:00:08.485) 0:00:15.886 ******* 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:24 +0000 (0:00:08.485) 0:00:15.884 ******* 2025-12-08 17:52:25,496 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | TASK [repo_setup : Set cifmw_repo_setup_dlrn_hash_tag from content provider cifmw_repo_setup_dlrn_hash_tag={{ content_provider_dlrn_md5_hash }}] *** 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.875) 0:00:16.761 ******* 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.875) 0:00:16.760 ******* 2025-12-08 17:52:25,538 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | TASK [repo_setup : Run repo-setup _raw_params={{ cifmw_repo_setup_venv }}/bin/repo-setup {{ cifmw_repo_setup_promotion }} {{ cifmw_repo_setup_additional_repos }} -d {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} -b {{ cifmw_repo_setup_branch }} --rdo-mirror {{ cifmw_repo_setup_rdo_mirror }} {% if cifmw_repo_setup_dlrn_hash_tag | length > 0 %} --dlrn-hash-tag {{ cifmw_repo_setup_dlrn_hash_tag }} {% endif %} -o {{ cifmw_repo_setup_output }}] *** 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.041) 0:00:16.803 ******* 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.041) 0:00:16.801 ******* 2025-12-08 17:52:26,188 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | TASK [repo_setup : Get component repo url={{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/component/{{ cifmw_repo_setup_component_name }}/{{ cifmw_repo_setup_component_promotion_tag }}/delorean.repo, dest={{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo, mode=0644] *** 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.647) 0:00:17.451 ******* 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.647) 0:00:17.449 ******* 2025-12-08 17:52:26,242 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,255 p=31902 u=zuul n=ansible | TASK [repo_setup : Rename component repo path={{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo, regexp=delorean-component-{{ cifmw_repo_setup_component_name }}, replace={{ cifmw_repo_setup_component_name }}-{{ cifmw_repo_setup_component_promotion_tag }}] *** 2025-12-08 17:52:26,256 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.511 ******* 2025-12-08 17:52:26,256 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.510 ******* 2025-12-08 17:52:26,306 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | TASK [repo_setup : Disable component repo in current-podified dlrn repo path={{ cifmw_repo_setup_output }}/delorean.repo, section=delorean-component-{{ cifmw_repo_setup_component_name }}, option=enabled, value=0, mode=0644] *** 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.067) 0:00:17.579 ******* 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.067) 0:00:17.578 ******* 2025-12-08 17:52:26,371 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | TASK [repo_setup : Run repo-setup-get-hash _raw_params={{ cifmw_repo_setup_venv }}/bin/repo-setup-get-hash --dlrn-url {{ cifmw_repo_setup_dlrn_uri[:-1] }} --os-version {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} --release {{ cifmw_repo_setup_branch }} {% if cifmw_repo_setup_component_name | length > 0 -%} --component {{ cifmw_repo_setup_component_name }} --tag {{ cifmw_repo_setup_component_promotion_tag }} {% else -%} --tag {{cifmw_repo_setup_promotion }} {% endif -%} {% if (cifmw_repo_setup_dlrn_hash_tag | length > 0) and (cifmw_repo_setup_component_name | length <= 0) -%} --dlrn-hash-tag {{ cifmw_repo_setup_dlrn_hash_tag }} {% endif -%} --json] *** 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.640 ******* 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.638 ******* 2025-12-08 17:52:26,908 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | TASK [repo_setup : Dump full hash in delorean.repo.md5 file content={{ _repo_setup_json['full_hash'] }} , dest={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5, mode=0644] *** 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.533) 0:00:18.173 ******* 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.533) 0:00:18.172 ******* 2025-12-08 17:52:27,629 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | TASK [repo_setup : Dump current-podified hash url={{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/current-podified/delorean.repo.md5, dest={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5, mode=0644] *** 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.717) 0:00:18.891 ******* 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.717) 0:00:18.889 ******* 2025-12-08 17:52:27,662 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | TASK [repo_setup : Slurp current podified hash src={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5] *** 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.033) 0:00:18.924 ******* 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.033) 0:00:18.923 ******* 2025-12-08 17:52:27,694 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | TASK [repo_setup : Update the value of full_hash _repo_setup_json={{ _repo_setup_json | combine({'full_hash': _hash}, recursive=true) }}] *** 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.032) 0:00:18.957 ******* 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.032) 0:00:18.955 ******* 2025-12-08 17:52:27,728 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | TASK [repo_setup : Export hashes facts for further use cifmw_repo_setup_full_hash={{ _repo_setup_json['full_hash'] }}, cifmw_repo_setup_commit_hash={{ _repo_setup_json['commit_hash'] }}, cifmw_repo_setup_distro_hash={{ _repo_setup_json['distro_hash'] }}, cifmw_repo_setup_extended_hash={{ _repo_setup_json['extended_hash'] }}, cifmw_repo_setup_dlrn_api_url={{ _repo_setup_json['dlrn_api_url'] }}, cifmw_repo_setup_dlrn_url={{ _repo_setup_json['dlrn_url'] }}, cifmw_repo_setup_release={{ _repo_setup_json['release'] }}, cacheable=True] *** 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.035) 0:00:18.993 ******* 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.035) 0:00:18.991 ******* 2025-12-08 17:52:27,784 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:27,792 p=31902 u=zuul n=ansible | TASK [repo_setup : Create download directory path={{ cifmw_repo_setup_rhos_release_path }}, state=directory, mode=0755] *** 2025-12-08 17:52:27,792 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.055) 0:00:19.048 ******* 2025-12-08 17:52:27,793 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.055) 0:00:19.047 ******* 2025-12-08 17:52:27,814 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,821 p=31902 u=zuul n=ansible | TASK [repo_setup : Print the URL to request msg={{ cifmw_repo_setup_rhos_release_rpm }}] *** 2025-12-08 17:52:27,822 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.029) 0:00:19.077 ******* 2025-12-08 17:52:27,822 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.029) 0:00:19.076 ******* 2025-12-08 17:52:27,847 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | TASK [Download the RPM name=krb_request] *************************************** 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.034) 0:00:19.112 ******* 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.034) 0:00:19.111 ******* 2025-12-08 17:52:27,873 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | TASK [repo_setup : Install RHOS Release tool name={{ cifmw_repo_setup_rhos_release_rpm if cifmw_repo_setup_rhos_release_rpm is not url else cifmw_krb_request_out.path }}, state=present, disable_gpg_check={{ cifmw_repo_setup_rhos_release_gpg_check | bool }}] *** 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.024) 0:00:19.137 ******* 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.024) 0:00:19.135 ******* 2025-12-08 17:52:27,898 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | TASK [repo_setup : Get rhos-release tool version _raw_params=rhos-release --version] *** 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.026) 0:00:19.163 ******* 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.026) 0:00:19.161 ******* 2025-12-08 17:52:27,924 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,932 p=31902 u=zuul n=ansible | TASK [repo_setup : Print rhos-release tool version msg={{ rr_version.stdout }}] *** 2025-12-08 17:52:27,933 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.025) 0:00:19.188 ******* 2025-12-08 17:52:27,933 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.025) 0:00:19.187 ******* 2025-12-08 17:52:27,954 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,963 p=31902 u=zuul n=ansible | TASK [repo_setup : Generate repos using rhos-release {{ cifmw_repo_setup_rhos_release_args }} _raw_params=rhos-release {{ cifmw_repo_setup_rhos_release_args }} \ -t {{ cifmw_repo_setup_output }}] *** 2025-12-08 17:52:27,963 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.030) 0:00:19.219 ******* 2025-12-08 17:52:27,964 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.030) 0:00:19.218 ******* 2025-12-08 17:52:27,976 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for /etc/ci/mirror_info.sh path=/etc/ci/mirror_info.sh] *** 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.021) 0:00:19.240 ******* 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.021) 0:00:19.239 ******* 2025-12-08 17:52:28,210 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | TASK [repo_setup : Use RDO proxy mirrors chdir={{ cifmw_repo_setup_output }}, _raw_params=set -o pipefail source /etc/ci/mirror_info.sh sed -i -e "s|https://trunk.rdoproject.org|$NODEPOOL_RDO_PROXY|g" *.repo ] *** 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.233) 0:00:19.474 ******* 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.233) 0:00:19.472 ******* 2025-12-08 17:52:28,449 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:28,466 p=31902 u=zuul n=ansible | TASK [repo_setup : Use RDO CentOS mirrors (remove CentOS 10 conditional when Nodepool mirrors exist) chdir={{ cifmw_repo_setup_output }}, _raw_params=set -o pipefail source /etc/ci/mirror_info.sh sed -i -e "s|http://mirror.stream.centos.org|$NODEPOOL_CENTOS_MIRROR|g" *.repo ] *** 2025-12-08 17:52:28,466 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.248) 0:00:19.722 ******* 2025-12-08 17:52:28,467 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.248) 0:00:19.721 ******* 2025-12-08 17:52:28,706 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:28,721 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for gating.repo file on content provider url=http://{{ content_provider_registry_ip }}:8766/gating.repo] *** 2025-12-08 17:52:28,722 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.255) 0:00:19.977 ******* 2025-12-08 17:52:28,722 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.255) 0:00:19.976 ******* 2025-12-08 17:52:28,749 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | TASK [repo_setup : Populate gating repo from content provider ip content=[gating-repo] baseurl=http://{{ content_provider_registry_ip }}:8766/ enabled=1 gpgcheck=0 priority=1 , dest={{ cifmw_repo_setup_output }}/gating.repo, mode=0644] *** 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.011 ******* 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.010 ******* 2025-12-08 17:52:28,784 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for DLRN repo at the destination path={{ cifmw_repo_setup_output }}/delorean.repo] *** 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.034) 0:00:20.046 ******* 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.034) 0:00:20.045 ******* 2025-12-08 17:52:28,817 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | TASK [repo_setup : Lower the priority of DLRN repos to allow installation from gating repo path={{ cifmw_repo_setup_output }}/delorean.repo, regexp=priority=1, replace=priority=20] *** 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.079 ******* 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.078 ******* 2025-12-08 17:52:28,861 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for DLRN component repo path={{ cifmw_repo_setup_output }}/{{ _comp_repo }}] *** 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.043) 0:00:20.123 ******* 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.043) 0:00:20.122 ******* 2025-12-08 17:52:28,894 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | TASK [repo_setup : Lower the priority of componennt repos to allow installation from gating repo path={{ cifmw_repo_setup_output }}//{{ _comp_repo }}, regexp=priority=1, replace=priority=2] *** 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.032) 0:00:20.155 ******* 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.032) 0:00:20.154 ******* 2025-12-08 17:52:28,939 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | TASK [repo_setup : Find existing repos from /etc/yum.repos.d directory paths=/etc/yum.repos.d/, patterns=*.repo, recurse=False] *** 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.045) 0:00:20.201 ******* 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.045) 0:00:20.199 ******* 2025-12-08 17:52:29,252 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | TASK [repo_setup : Remove existing repos from /etc/yum.repos.d directory path={{ item }}, state=absent] *** 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.321) 0:00:20.523 ******* 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.322) 0:00:20.521 ******* 2025-12-08 17:52:29,525 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/etc/yum.repos.d/centos-addons.repo) 2025-12-08 17:52:29,723 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/etc/yum.repos.d/centos.repo) 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | TASK [repo_setup : Cleanup existing metadata _raw_params=dnf clean metadata] *** 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.465) 0:00:20.988 ******* 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.465) 0:00:20.987 ******* 2025-12-08 17:52:30,222 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:30,236 p=31902 u=zuul n=ansible | TASK [repo_setup : Copy generated repos to /etc/yum.repos.d directory mode=0755, remote_src=True, src={{ cifmw_repo_setup_output }}/, dest=/etc/yum.repos.d] *** 2025-12-08 17:52:30,237 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.503) 0:00:21.492 ******* 2025-12-08 17:52:30,237 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.503) 0:00:21.491 ******* 2025-12-08 17:52:30,666 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather variables for each operating system _raw_params={{ item }}] *** 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.458) 0:00:21.950 ******* 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.458) 0:00:21.949 ******* 2025-12-08 17:52:30,749 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/ci_setup/vars/redhat.yml) 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | TASK [ci_setup : List packages to install var=cifmw_ci_setup_packages] ********* 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.064) 0:00:22.015 ******* 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.064) 0:00:22.014 ******* 2025-12-08 17:52:30,787 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_ci_setup_packages: - bash-completion - ca-certificates - git-core - make - tar - tmux - python3-pip 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | TASK [ci_setup : Install needed packages name={{ cifmw_ci_setup_packages }}, state=latest] *** 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.036) 0:00:22.052 ******* 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.036) 0:00:22.051 ******* 2025-12-08 17:52:58,113 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather version of openshift client _raw_params=oc version --client -o yaml] *** 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:27.323) 0:00:49.376 ******* 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:27.323) 0:00:49.374 ******* 2025-12-08 17:52:58,331 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:58,340 p=31902 u=zuul n=ansible | TASK [ci_setup : Ensure openshift client install path is present path={{ cifmw_ci_setup_oc_install_path }}, state=directory, mode=0755] *** 2025-12-08 17:52:58,340 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.220) 0:00:49.596 ******* 2025-12-08 17:52:58,341 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.220) 0:00:49.595 ******* 2025-12-08 17:52:58,535 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | TASK [ci_setup : Install openshift client src={{ cifmw_ci_setup_openshift_client_download_uri }}/{{ cifmw_ci_setup_openshift_client_version }}/openshift-client-linux.tar.gz, dest={{ cifmw_ci_setup_oc_install_path }}, remote_src=True, mode=0755, creates={{ cifmw_ci_setup_oc_install_path }}/oc] *** 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.201) 0:00:49.798 ******* 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.201) 0:00:49.797 ******* 2025-12-08 17:53:03,845 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | TASK [ci_setup : Add the OC path to cifmw_path if needed cifmw_path={{ cifmw_ci_setup_oc_install_path }}:{{ ansible_env.PATH }}, cacheable=True] *** 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:05.315) 0:00:55.113 ******* 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:05.315) 0:00:55.112 ******* 2025-12-08 17:53:03,880 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | TASK [ci_setup : Create completion file] *************************************** 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:00.033) 0:00:55.146 ******* 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:00.033) 0:00:55.145 ******* 2025-12-08 17:53:04,265 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | TASK [ci_setup : Source completion from within .bashrc create=True, mode=0644, path={{ ansible_user_dir }}/.bashrc, block=if [ -f ~/.oc_completion ]; then source ~/.oc_completion fi] *** 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.383) 0:00:55.529 ******* 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.383) 0:00:55.528 ******* 2025-12-08 17:53:04,625 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | TASK [ci_setup : Check rhsm status _raw_params=subscription-manager status] **** 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.361) 0:00:55.891 ******* 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.361) 0:00:55.889 ******* 2025-12-08 17:53:04,655 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather the repos to be enabled _repos={{ cifmw_ci_setup_rhel_rhsm_default_repos + (cifmw_ci_setup_rhel_rhsm_extra_repos | default([])) }}] *** 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.030) 0:00:55.921 ******* 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.030) 0:00:55.919 ******* 2025-12-08 17:53:04,689 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | TASK [ci_setup : Enabling the required repositories. name={{ item }}, state={{ rhsm_repo_state | default('enabled') }}] *** 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.033) 0:00:55.954 ******* 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.033) 0:00:55.953 ******* 2025-12-08 17:53:04,727 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | TASK [ci_setup : Get current /etc/redhat-release _raw_params=cat /etc/redhat-release] *** 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.038) 0:00:55.993 ******* 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.038) 0:00:55.992 ******* 2025-12-08 17:53:04,762 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | TASK [ci_setup : Print current /etc/redhat-release msg={{ _current_rh_release.stdout }}] *** 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.031) 0:00:56.024 ******* 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.031) 0:00:56.023 ******* 2025-12-08 17:53:04,787 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | TASK [ci_setup : Ensure the repos are enabled in the system using yum name={{ item.name }}, baseurl={{ item.baseurl }}, description={{ item.description | default(item.name) }}, gpgcheck={{ item.gpgcheck | default(false) }}, enabled=True, state={{ yum_repo_state | default('present') }}] *** 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.025) 0:00:56.050 ******* 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.025) 0:00:56.049 ******* 2025-12-08 17:53:04,830 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,839 p=31902 u=zuul n=ansible | TASK [ci_setup : Manage directories path={{ item }}, state={{ directory_state }}, mode=0755, owner={{ ansible_user_id }}, group={{ ansible_user_id }}] *** 2025-12-08 17:53:04,839 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.045) 0:00:56.095 ******* 2025-12-08 17:53:04,840 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.045) 0:00:56.094 ******* 2025-12-08 17:53:05,148 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr) 2025-12-08 17:53:05,359 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/logs) 2025-12-08 17:53:05,572 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/tmp) 2025-12-08 17:53:05,819 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/volumes) 2025-12-08 17:53:06,024 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/parameters) 2025-12-08 17:53:06,051 p=31902 u=zuul n=ansible | TASK [Prepare install_yamls make targets name=install_yamls, apply={'tags': ['bootstrap']}] *** 2025-12-08 17:53:06,052 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:01.212) 0:00:57.307 ******* 2025-12-08 17:53:06,052 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:01.212) 0:00:57.306 ******* 2025-12-08 17:53:06,175 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure directories exist path={{ item }}, state=directory, mode=0755] *** 2025-12-08 17:53:06,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.124) 0:00:57.431 ******* 2025-12-08 17:53:06,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.124) 0:00:57.430 ******* 2025-12-08 17:53:06,432 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts) 2025-12-08 17:53:06,603 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/roles/install_yamls_makes/tasks) 2025-12-08 17:53:06,835 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/parameters) 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | TASK [Create variables with local repos based on Zuul items name=install_yamls, tasks_from=zuul_set_operators_repo.yml] *** 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.670) 0:00:58.102 ******* 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.670) 0:00:58.100 ******* 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | TASK [install_yamls : Set fact with local repos based on Zuul items cifmw_install_yamls_operators_repo={{ cifmw_install_yamls_operators_repo | default({}) | combine(_repo_operator_info | items2dict) }}] *** 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.049) 0:00:58.152 ******* 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.049) 0:00:58.150 ******* 2025-12-08 17:53:06,931 p=31902 u=zuul n=ansible | skipping: [localhost] => (item={'branch': 'master', 'change': '694', 'change_url': 'https://github.com/infrawatch/service-telemetry-operator/pull/694', 'commit_id': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'patchset': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'project': {'canonical_hostname': 'github.com', 'canonical_name': 'github.com/infrawatch/service-telemetry-operator', 'name': 'infrawatch/service-telemetry-operator', 'short_name': 'service-telemetry-operator', 'src_dir': 'src/github.com/infrawatch/service-telemetry-operator'}, 'topic': None}) 2025-12-08 17:53:06,933 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:06,943 p=31902 u=zuul n=ansible | TASK [install_yamls : Print helpful data for debugging msg=_repo_operator_name: {{ _repo_operator_name }} _repo_operator_info: {{ _repo_operator_info }} cifmw_install_yamls_operators_repo: {{ cifmw_install_yamls_operators_repo }} ] *** 2025-12-08 17:53:06,944 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.047) 0:00:58.199 ******* 2025-12-08 17:53:06,944 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.047) 0:00:58.198 ******* 2025-12-08 17:53:06,979 p=31902 u=zuul n=ansible | skipping: [localhost] => (item={'branch': 'master', 'change': '694', 'change_url': 'https://github.com/infrawatch/service-telemetry-operator/pull/694', 'commit_id': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'patchset': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'project': {'canonical_hostname': 'github.com', 'canonical_name': 'github.com/infrawatch/service-telemetry-operator', 'name': 'infrawatch/service-telemetry-operator', 'short_name': 'service-telemetry-operator', 'src_dir': 'src/github.com/infrawatch/service-telemetry-operator'}, 'topic': None}) 2025-12-08 17:53:06,981 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | TASK [Customize install_yamls devsetup vars if needed name=install_yamls, tasks_from=customize_devsetup_vars.yml] *** 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.055) 0:00:58.254 ******* 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.055) 0:00:58.253 ******* 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | TASK [install_yamls : Update opm_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^opm_version:, line=opm_version: {{ cifmw_install_yamls_opm_version }}, state=present] *** 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.051) 0:00:58.306 ******* 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.051) 0:00:58.304 ******* 2025-12-08 17:53:07,070 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,076 p=31902 u=zuul n=ansible | TASK [install_yamls : Update sdk_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^sdk_version:, line=sdk_version: {{ cifmw_install_yamls_sdk_version }}, state=present] *** 2025-12-08 17:53:07,077 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.026) 0:00:58.332 ******* 2025-12-08 17:53:07,077 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.026) 0:00:58.331 ******* 2025-12-08 17:53:07,111 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | TASK [install_yamls : Update go_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^go_version:, line=go_version: {{ cifmw_install_yamls_go_version }}, state=present] *** 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.373 ******* 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.372 ******* 2025-12-08 17:53:07,139 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | TASK [install_yamls : Update kustomize_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^kustomize_version:, line=kustomize_version: {{ cifmw_install_yamls_kustomize_version }}, state=present] *** 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.027) 0:00:58.401 ******* 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.027) 0:00:58.400 ******* 2025-12-08 17:53:07,165 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | TASK [install_yamls : Compute the cifmw_install_yamls_vars final value _install_yamls_override_vars={{ _install_yamls_override_vars | default({}) | combine(item, recursive=True) }}] *** 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.030) 0:00:58.432 ******* 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.030) 0:00:58.431 ******* 2025-12-08 17:53:07,259 p=31902 u=zuul n=ansible | ok: [localhost] => (item={}) 2025-12-08 17:53:07,267 p=31902 u=zuul n=ansible | TASK [install_yamls : Set environment override cifmw_install_yamls_environment fact cifmw_install_yamls_environment={{ _install_yamls_override_vars.keys() | map('upper') | zip(_install_yamls_override_vars.values()) | items2dict(key_name=0, value_name=1) | combine({ 'OUT': cifmw_install_yamls_manifests_dir, 'OUTPUT_DIR': cifmw_install_yamls_edpm_dir, 'CHECKOUT_FROM_OPENSTACK_REF': cifmw_install_yamls_checkout_openstack_ref, 'OPENSTACK_K8S_BRANCH': (zuul is defined and not zuul.branch |regex_search('master|antelope|rhos')) | ternary(zuul.branch, 'main') }) | combine(install_yamls_operators_repos) }}, cacheable=True] *** 2025-12-08 17:53:07,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.091) 0:00:58.523 ******* 2025-12-08 17:53:07,268 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.091) 0:00:58.522 ******* 2025-12-08 17:53:07,299 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | TASK [install_yamls : Get environment structure base_path={{ cifmw_install_yamls_repo }}] *** 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.564 ******* 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.563 ******* 2025-12-08 17:53:07,903 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure Output directory exists path={{ cifmw_install_yamls_out_dir }}, state=directory, mode=0755] *** 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.611) 0:00:59.175 ******* 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.611) 0:00:59.174 ******* 2025-12-08 17:53:07,956 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure user cifmw_install_yamls_vars contains existing Makefile variables that=_cifmw_install_yamls_unmatched_vars | length == 0, msg=cifmw_install_yamls_vars contains a variable that is not defined in install_yamls Makefile nor cifmw_install_yamls_whitelisted_vars: {{ _cifmw_install_yamls_unmatched_vars | join(', ')}}, quiet=True] *** 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.050) 0:00:59.226 ******* 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.050) 0:00:59.224 ******* 2025-12-08 17:53:08,004 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | TASK [install_yamls : Generate /home/zuul/ci-framework-data/artifacts/install_yamls.sh dest={{ cifmw_install_yamls_out_dir }}/{{ cifmw_install_yamls_envfile }}, content={% for k,v in cifmw_install_yamls_environment.items() %} export {{ k }}={{ v }} {% endfor %}, mode=0644] *** 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.057) 0:00:59.283 ******* 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.057) 0:00:59.281 ******* 2025-12-08 17:53:08,060 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | TASK [install_yamls : Set install_yamls default values cifmw_install_yamls_defaults={{ get_makefiles_env_output.makefiles_values | combine(cifmw_install_yamls_environment) }}, cacheable=True] *** 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.047) 0:00:59.330 ******* 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.047) 0:00:59.329 ******* 2025-12-08 17:53:08,117 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | TASK [install_yamls : Show the env structure var=cifmw_install_yamls_environment] *** 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.059) 0:00:59.390 ******* 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.059) 0:00:59.388 ******* 2025-12-08 17:53:08,167 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_install_yamls_environment: CHECKOUT_FROM_OPENSTACK_REF: 'true' OPENSTACK_K8S_BRANCH: main OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | TASK [install_yamls : Show the env structure defaults var=cifmw_install_yamls_defaults] *** 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.042) 0:00:59.432 ******* 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.042) 0:00:59.431 ******* 2025-12-08 17:53:08,219 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_install_yamls_defaults: ADOPTED_EXTERNAL_NETWORK: 172.21.1.0/24 ADOPTED_INTERNALAPI_NETWORK: 172.17.1.0/24 ADOPTED_STORAGEMGMT_NETWORK: 172.20.1.0/24 ADOPTED_STORAGE_NETWORK: 172.18.1.0/24 ADOPTED_TENANT_NETWORK: 172.9.1.0/24 ANSIBLEEE: config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_BRANCH: main ANSIBLEEE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest ANSIBLEEE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml ANSIBLEEE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests ANSIBLEEE_KUTTL_NAMESPACE: ansibleee-kuttl-tests ANSIBLEEE_REPO: https://github.com/openstack-k8s-operators/openstack-ansibleee-operator ANSIBLEE_COMMIT_HASH: '' BARBICAN: config/samples/barbican_v1beta1_barbican.yaml BARBICAN_BRANCH: main BARBICAN_COMMIT_HASH: '' BARBICAN_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml BARBICAN_DEPL_IMG: unused BARBICAN_IMG: quay.io/openstack-k8s-operators/barbican-operator-index:latest BARBICAN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml BARBICAN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests BARBICAN_KUTTL_NAMESPACE: barbican-kuttl-tests BARBICAN_REPO: https://github.com/openstack-k8s-operators/barbican-operator.git BARBICAN_SERVICE_ENABLED: 'true' BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY: sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= BAREMETAL_BRANCH: main BAREMETAL_COMMIT_HASH: '' BAREMETAL_IMG: quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest BAREMETAL_OS_CONTAINER_IMG: '' BAREMETAL_OS_IMG: '' BAREMETAL_REPO: https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git BAREMETAL_TIMEOUT: 20m BASH_IMG: quay.io/openstack-k8s-operators/bash:latest BGP_ASN: '64999' BGP_LEAF_1: 100.65.4.1 BGP_LEAF_2: 100.64.4.1 BGP_OVN_ROUTING: 'false' BGP_PEER_ASN: '64999' BGP_SOURCE_IP: 172.30.4.2 BGP_SOURCE_IP6: f00d:f00d:f00d:f00d:f00d:f00d:f00d:42 BMAAS_BRIDGE_IPV4_PREFIX: 172.20.1.2/24 BMAAS_BRIDGE_IPV6_PREFIX: fd00:bbbb::2/64 BMAAS_INSTANCE_DISK_SIZE: '20' BMAAS_INSTANCE_MEMORY: '4096' BMAAS_INSTANCE_NAME_PREFIX: crc-bmaas BMAAS_INSTANCE_NET_MODEL: virtio BMAAS_INSTANCE_OS_VARIANT: centos-stream9 BMAAS_INSTANCE_VCPUS: '2' BMAAS_INSTANCE_VIRT_TYPE: kvm BMAAS_IPV4: 'true' BMAAS_IPV6: 'false' BMAAS_LIBVIRT_USER: sushyemu BMAAS_METALLB_ADDRESS_POOL: 172.20.1.64/26 BMAAS_METALLB_POOL_NAME: baremetal BMAAS_NETWORK_IPV4_PREFIX: 172.20.1.1/24 BMAAS_NETWORK_IPV6_PREFIX: fd00:bbbb::1/64 BMAAS_NETWORK_NAME: crc-bmaas BMAAS_NODE_COUNT: '1' BMAAS_OCP_INSTANCE_NAME: crc BMAAS_REDFISH_PASSWORD: password BMAAS_REDFISH_USERNAME: admin BMAAS_ROUTE_LIBVIRT_NETWORKS: crc-bmaas,crc,default BMAAS_SUSHY_EMULATOR_DRIVER: libvirt BMAAS_SUSHY_EMULATOR_IMAGE: quay.io/metal3-io/sushy-tools:latest BMAAS_SUSHY_EMULATOR_NAMESPACE: sushy-emulator BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE: /etc/openstack/clouds.yaml BMAAS_SUSHY_EMULATOR_OS_CLOUD: openstack BMH_NAMESPACE: openstack BMO_BRANCH: release-0.9 BMO_CLEANUP: 'true' BMO_COMMIT_HASH: '' BMO_IPA_BRANCH: stable/2024.1 BMO_IRONIC_HOST: 192.168.122.10 BMO_PROVISIONING_INTERFACE: '' BMO_REPO: https://github.com/metal3-io/baremetal-operator BMO_SETUP: '' BMO_SETUP_ROUTE_REPLACE: 'true' BM_CTLPLANE_INTERFACE: enp1s0 BM_INSTANCE_MEMORY: '8192' BM_INSTANCE_NAME_PREFIX: edpm-compute-baremetal BM_INSTANCE_NAME_SUFFIX: '0' BM_NETWORK_NAME: default BM_NODE_COUNT: '1' BM_ROOT_PASSWORD: '' BM_ROOT_PASSWORD_SECRET: '' CEILOMETER_CENTRAL_DEPL_IMG: unused CEILOMETER_NOTIFICATION_DEPL_IMG: unused CEPH_BRANCH: release-1.15 CEPH_CLIENT: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml CEPH_COMMON: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml CEPH_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml CEPH_CRDS: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml CEPH_IMG: quay.io/ceph/demo:latest-squid CEPH_OP: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml CEPH_REPO: https://github.com/rook/rook.git CERTMANAGER_TIMEOUT: 300s CHECKOUT_FROM_OPENSTACK_REF: 'true' CINDER: config/samples/cinder_v1beta1_cinder.yaml CINDERAPI_DEPL_IMG: unused CINDERBKP_DEPL_IMG: unused CINDERSCH_DEPL_IMG: unused CINDERVOL_DEPL_IMG: unused CINDER_BRANCH: main CINDER_COMMIT_HASH: '' CINDER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml CINDER_IMG: quay.io/openstack-k8s-operators/cinder-operator-index:latest CINDER_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml CINDER_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests CINDER_KUTTL_NAMESPACE: cinder-kuttl-tests CINDER_REPO: https://github.com/openstack-k8s-operators/cinder-operator.git CLEANUP_DIR_CMD: rm -Rf CRC_BGP_NIC_1_MAC: '52:54:00:11:11:11' CRC_BGP_NIC_2_MAC: '52:54:00:11:11:12' CRC_HTTPS_PROXY: '' CRC_HTTP_PROXY: '' CRC_STORAGE_NAMESPACE: crc-storage CRC_STORAGE_RETRIES: '3' CRC_URL: '''https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz''' CRC_VERSION: latest DATAPLANE_ANSIBLE_SECRET: dataplane-ansible-ssh-private-key-secret DATAPLANE_ANSIBLE_USER: '' DATAPLANE_COMPUTE_IP: 192.168.122.100 DATAPLANE_CONTAINER_PREFIX: openstack DATAPLANE_CONTAINER_TAG: current-podified DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest DATAPLANE_DEFAULT_GW: 192.168.122.1 DATAPLANE_EXTRA_NOVA_CONFIG_FILE: /dev/null DATAPLANE_GROWVOLS_ARGS: /=8GB /tmp=1GB /home=1GB /var=100% DATAPLANE_KUSTOMIZE_SCENARIO: preprovisioned DATAPLANE_NETWORKER_IP: 192.168.122.200 DATAPLANE_NETWORK_INTERFACE_NAME: eth0 DATAPLANE_NOVA_NFS_PATH: '' DATAPLANE_NTP_SERVER: pool.ntp.org DATAPLANE_PLAYBOOK: osp.edpm.download_cache DATAPLANE_REGISTRY_URL: quay.io/podified-antelope-centos9 DATAPLANE_RUNNER_IMG: '' DATAPLANE_SERVER_ROLE: compute DATAPLANE_SSHD_ALLOWED_RANGES: '[''192.168.122.0/24'']' DATAPLANE_TIMEOUT: 30m DATAPLANE_TLS_ENABLED: 'true' DATAPLANE_TOTAL_NETWORKER_NODES: '1' DATAPLANE_TOTAL_NODES: '1' DBSERVICE: galera DESIGNATE: config/samples/designate_v1beta1_designate.yaml DESIGNATE_BRANCH: main DESIGNATE_COMMIT_HASH: '' DESIGNATE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml DESIGNATE_IMG: quay.io/openstack-k8s-operators/designate-operator-index:latest DESIGNATE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml DESIGNATE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests DESIGNATE_KUTTL_NAMESPACE: designate-kuttl-tests DESIGNATE_REPO: https://github.com/openstack-k8s-operators/designate-operator.git DNSDATA: config/samples/network_v1beta1_dnsdata.yaml DNSDATA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml DNSMASQ: config/samples/network_v1beta1_dnsmasq.yaml DNSMASQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml DNS_DEPL_IMG: unused DNS_DOMAIN: localdomain DOWNLOAD_TOOLS_SELECTION: all EDPM_ATTACH_EXTNET: 'true' EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES: '''[]''' EDPM_COMPUTE_ADDITIONAL_NETWORKS: '''[]''' EDPM_COMPUTE_CELLS: '1' EDPM_COMPUTE_CEPH_ENABLED: 'true' EDPM_COMPUTE_CEPH_NOVA: 'true' EDPM_COMPUTE_DHCP_AGENT_ENABLED: 'true' EDPM_COMPUTE_SRIOV_ENABLED: 'true' EDPM_COMPUTE_SUFFIX: '0' EDPM_CONFIGURE_DEFAULT_ROUTE: 'true' EDPM_CONFIGURE_HUGEPAGES: 'false' EDPM_CONFIGURE_NETWORKING: 'true' EDPM_FIRSTBOOT_EXTRA: /tmp/edpm-firstboot-extra EDPM_NETWORKER_SUFFIX: '0' EDPM_TOTAL_NETWORKERS: '1' EDPM_TOTAL_NODES: '1' GALERA_REPLICAS: '' GENERATE_SSH_KEYS: 'true' GIT_CLONE_OPTS: '' GLANCE: config/samples/glance_v1beta1_glance.yaml GLANCEAPI_DEPL_IMG: unused GLANCE_BRANCH: main GLANCE_COMMIT_HASH: '' GLANCE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml GLANCE_IMG: quay.io/openstack-k8s-operators/glance-operator-index:latest GLANCE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml GLANCE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests GLANCE_KUTTL_NAMESPACE: glance-kuttl-tests GLANCE_REPO: https://github.com/openstack-k8s-operators/glance-operator.git HEAT: config/samples/heat_v1beta1_heat.yaml HEATAPI_DEPL_IMG: unused HEATCFNAPI_DEPL_IMG: unused HEATENGINE_DEPL_IMG: unused HEAT_AUTH_ENCRYPTION_KEY: 767c3ed056cbaa3b9dfedb8c6f825bf0 HEAT_BRANCH: main HEAT_COMMIT_HASH: '' HEAT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml HEAT_IMG: quay.io/openstack-k8s-operators/heat-operator-index:latest HEAT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml HEAT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests HEAT_KUTTL_NAMESPACE: heat-kuttl-tests HEAT_REPO: https://github.com/openstack-k8s-operators/heat-operator.git HEAT_SERVICE_ENABLED: 'true' HORIZON: config/samples/horizon_v1beta1_horizon.yaml HORIZON_BRANCH: main HORIZON_COMMIT_HASH: '' HORIZON_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml HORIZON_DEPL_IMG: unused HORIZON_IMG: quay.io/openstack-k8s-operators/horizon-operator-index:latest HORIZON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml HORIZON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests HORIZON_KUTTL_NAMESPACE: horizon-kuttl-tests HORIZON_REPO: https://github.com/openstack-k8s-operators/horizon-operator.git INFRA_BRANCH: main INFRA_COMMIT_HASH: '' INFRA_IMG: quay.io/openstack-k8s-operators/infra-operator-index:latest INFRA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml INFRA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests INFRA_KUTTL_NAMESPACE: infra-kuttl-tests INFRA_REPO: https://github.com/openstack-k8s-operators/infra-operator.git INSTALL_CERT_MANAGER: 'true' INSTALL_NMSTATE: true || false INSTALL_NNCP: true || false INTERNALAPI_HOST_ROUTES: '' IPV6_LAB_IPV4_NETWORK_IPADDRESS: 172.30.0.1/24 IPV6_LAB_IPV6_NETWORK_IPADDRESS: fd00:abcd:abcd:fc00::1/64 IPV6_LAB_LIBVIRT_STORAGE_POOL: default IPV6_LAB_MANAGE_FIREWALLD: 'true' IPV6_LAB_NAT64_HOST_IPV4: 172.30.0.2/24 IPV6_LAB_NAT64_HOST_IPV6: fd00:abcd:abcd:fc00::2/64 IPV6_LAB_NAT64_INSTANCE_NAME: nat64-router IPV6_LAB_NAT64_IPV6_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL: 192.168.255.0/24 IPV6_LAB_NAT64_TAYGA_IPV4: 192.168.255.1 IPV6_LAB_NAT64_TAYGA_IPV6: fd00:abcd:abcd:fc00::3 IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX: fd00:abcd:abcd:fcff::/96 IPV6_LAB_NAT64_UPDATE_PACKAGES: 'false' IPV6_LAB_NETWORK_NAME: nat64 IPV6_LAB_SNO_CLUSTER_NETWORK: fd00:abcd:0::/48 IPV6_LAB_SNO_HOST_IP: fd00:abcd:abcd:fc00::11 IPV6_LAB_SNO_HOST_PREFIX: '64' IPV6_LAB_SNO_INSTANCE_NAME: sno IPV6_LAB_SNO_MACHINE_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_OCP_MIRROR_URL: https://mirror.openshift.com/pub/openshift-v4/clients/ocp IPV6_LAB_SNO_OCP_VERSION: latest-4.14 IPV6_LAB_SNO_SERVICE_NETWORK: fd00:abcd:abcd:fc03::/112 IPV6_LAB_SSH_PUB_KEY: /home/zuul/.ssh/id_rsa.pub IPV6_LAB_WORK_DIR: /home/zuul/.ipv6lab IRONIC: config/samples/ironic_v1beta1_ironic.yaml IRONICAPI_DEPL_IMG: unused IRONICCON_DEPL_IMG: unused IRONICINS_DEPL_IMG: unused IRONICNAG_DEPL_IMG: unused IRONICPXE_DEPL_IMG: unused IRONIC_BRANCH: main IRONIC_COMMIT_HASH: '' IRONIC_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml IRONIC_IMAGE: quay.io/metal3-io/ironic IRONIC_IMAGE_TAG: release-24.1 IRONIC_IMG: quay.io/openstack-k8s-operators/ironic-operator-index:latest IRONIC_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml IRONIC_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests IRONIC_KUTTL_NAMESPACE: ironic-kuttl-tests IRONIC_REPO: https://github.com/openstack-k8s-operators/ironic-operator.git KEYSTONEAPI: config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_DEPL_IMG: unused KEYSTONE_BRANCH: main KEYSTONE_COMMIT_HASH: '' KEYSTONE_FEDERATION_CLIENT_SECRET: COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE: openstack KEYSTONE_IMG: quay.io/openstack-k8s-operators/keystone-operator-index:latest KEYSTONE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml KEYSTONE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests KEYSTONE_KUTTL_NAMESPACE: keystone-kuttl-tests KEYSTONE_REPO: https://github.com/openstack-k8s-operators/keystone-operator.git KUBEADMIN_PWD: '12345678' LIBVIRT_SECRET: libvirt-secret LOKI_DEPLOY_MODE: openshift-network LOKI_DEPLOY_NAMESPACE: netobserv LOKI_DEPLOY_SIZE: 1x.demo LOKI_NAMESPACE: openshift-operators-redhat LOKI_OPERATOR_GROUP: openshift-operators-redhat-loki LOKI_SUBSCRIPTION: loki-operator LVMS_CR: '1' MANILA: config/samples/manila_v1beta1_manila.yaml MANILAAPI_DEPL_IMG: unused MANILASCH_DEPL_IMG: unused MANILASHARE_DEPL_IMG: unused MANILA_BRANCH: main MANILA_COMMIT_HASH: '' MANILA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml MANILA_IMG: quay.io/openstack-k8s-operators/manila-operator-index:latest MANILA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml MANILA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests MANILA_KUTTL_NAMESPACE: manila-kuttl-tests MANILA_REPO: https://github.com/openstack-k8s-operators/manila-operator.git MANILA_SERVICE_ENABLED: 'true' MARIADB: config/samples/mariadb_v1beta1_galera.yaml MARIADB_BRANCH: main MARIADB_CHAINSAW_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml MARIADB_CHAINSAW_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests MARIADB_CHAINSAW_NAMESPACE: mariadb-chainsaw-tests MARIADB_COMMIT_HASH: '' MARIADB_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml MARIADB_DEPL_IMG: unused MARIADB_IMG: quay.io/openstack-k8s-operators/mariadb-operator-index:latest MARIADB_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml MARIADB_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests MARIADB_KUTTL_NAMESPACE: mariadb-kuttl-tests MARIADB_REPO: https://github.com/openstack-k8s-operators/mariadb-operator.git MEMCACHED: config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_DEPL_IMG: unused METADATA_SHARED_SECRET: '1234567842' METALLB_IPV6_POOL: fd00:aaaa::80-fd00:aaaa::90 METALLB_POOL: 192.168.122.80-192.168.122.90 MICROSHIFT: '0' NAMESPACE: openstack NETCONFIG: config/samples/network_v1beta1_netconfig.yaml NETCONFIG_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml NETCONFIG_DEPL_IMG: unused NETOBSERV_DEPLOY_NAMESPACE: netobserv NETOBSERV_NAMESPACE: openshift-netobserv-operator NETOBSERV_OPERATOR_GROUP: openshift-netobserv-operator-net NETOBSERV_SUBSCRIPTION: netobserv-operator NETWORK_BGP: 'false' NETWORK_DESIGNATE_ADDRESS_PREFIX: 172.28.0 NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX: 172.50.0 NETWORK_INTERNALAPI_ADDRESS_PREFIX: 172.17.0 NETWORK_ISOLATION: 'true' NETWORK_ISOLATION_INSTANCE_NAME: crc NETWORK_ISOLATION_IPV4: 'true' NETWORK_ISOLATION_IPV4_ADDRESS: 172.16.1.1/24 NETWORK_ISOLATION_IPV4_NAT: 'true' NETWORK_ISOLATION_IPV6: 'false' NETWORK_ISOLATION_IPV6_ADDRESS: fd00:aaaa::1/64 NETWORK_ISOLATION_IP_ADDRESS: 192.168.122.10 NETWORK_ISOLATION_MAC: '52:54:00:11:11:10' NETWORK_ISOLATION_NETWORK_NAME: net-iso NETWORK_ISOLATION_NET_NAME: default NETWORK_ISOLATION_USE_DEFAULT_NETWORK: 'true' NETWORK_MTU: '1500' NETWORK_STORAGEMGMT_ADDRESS_PREFIX: 172.20.0 NETWORK_STORAGE_ADDRESS_PREFIX: 172.18.0 NETWORK_STORAGE_MACVLAN: '' NETWORK_TENANT_ADDRESS_PREFIX: 172.19.0 NETWORK_VLAN_START: '20' NETWORK_VLAN_STEP: '1' NEUTRONAPI: config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_DEPL_IMG: unused NEUTRON_BRANCH: main NEUTRON_COMMIT_HASH: '' NEUTRON_IMG: quay.io/openstack-k8s-operators/neutron-operator-index:latest NEUTRON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml NEUTRON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests NEUTRON_KUTTL_NAMESPACE: neutron-kuttl-tests NEUTRON_REPO: https://github.com/openstack-k8s-operators/neutron-operator.git NFS_HOME: /home/nfs NMSTATE_NAMESPACE: openshift-nmstate NMSTATE_OPERATOR_GROUP: openshift-nmstate-tn6k8 NMSTATE_SUBSCRIPTION: kubernetes-nmstate-operator NNCP_ADDITIONAL_HOST_ROUTES: '' NNCP_BGP_1_INTERFACE: enp7s0 NNCP_BGP_1_IP_ADDRESS: 100.65.4.2 NNCP_BGP_2_INTERFACE: enp8s0 NNCP_BGP_2_IP_ADDRESS: 100.64.4.2 NNCP_BRIDGE: ospbr NNCP_CLEANUP_TIMEOUT: 120s NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX: 'fd00:aaaa::' NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX: '10' NNCP_CTLPLANE_IP_ADDRESS_PREFIX: 192.168.122 NNCP_CTLPLANE_IP_ADDRESS_SUFFIX: '10' NNCP_DNS_SERVER: 192.168.122.1 NNCP_DNS_SERVER_IPV6: fd00:aaaa::1 NNCP_GATEWAY: 192.168.122.1 NNCP_GATEWAY_IPV6: fd00:aaaa::1 NNCP_INTERFACE: enp6s0 NNCP_NODES: '' NNCP_TIMEOUT: 240s NOVA: config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_BRANCH: main NOVA_COMMIT_HASH: '' NOVA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_IMG: quay.io/openstack-k8s-operators/nova-operator-index:latest NOVA_REPO: https://github.com/openstack-k8s-operators/nova-operator.git NUMBER_OF_INSTANCES: '1' OCP_NETWORK_NAME: crc OCTAVIA: config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_BRANCH: main OCTAVIA_COMMIT_HASH: '' OCTAVIA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_IMG: quay.io/openstack-k8s-operators/octavia-operator-index:latest OCTAVIA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml OCTAVIA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests OCTAVIA_KUTTL_NAMESPACE: octavia-kuttl-tests OCTAVIA_REPO: https://github.com/openstack-k8s-operators/octavia-operator.git OKD: 'false' OPENSTACK_BRANCH: main OPENSTACK_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-bundle:latest OPENSTACK_COMMIT_HASH: '' OPENSTACK_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_CRDS_DIR: openstack_crds OPENSTACK_CTLPLANE: config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_IMG: quay.io/openstack-k8s-operators/openstack-operator-index:latest OPENSTACK_K8S_BRANCH: main OPENSTACK_K8S_TAG: latest OPENSTACK_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml OPENSTACK_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests OPENSTACK_KUTTL_NAMESPACE: openstack-kuttl-tests OPENSTACK_NEUTRON_CUSTOM_CONF: '' OPENSTACK_REPO: https://github.com/openstack-k8s-operators/openstack-operator.git OPENSTACK_STORAGE_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest OPERATOR_BASE_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator OPERATOR_CHANNEL: '' OPERATOR_NAMESPACE: openstack-operators OPERATOR_SOURCE: '' OPERATOR_SOURCE_NAMESPACE: '' OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm OVNCONTROLLER: config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_NMAP: 'true' OVNDBS: config/samples/ovn_v1beta1_ovndbcluster.yaml OVNDBS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml OVNNORTHD: config/samples/ovn_v1beta1_ovnnorthd.yaml OVNNORTHD_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml OVN_BRANCH: main OVN_COMMIT_HASH: '' OVN_IMG: quay.io/openstack-k8s-operators/ovn-operator-index:latest OVN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml OVN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests OVN_KUTTL_NAMESPACE: ovn-kuttl-tests OVN_REPO: https://github.com/openstack-k8s-operators/ovn-operator.git PASSWORD: '12345678' PLACEMENTAPI: config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_DEPL_IMG: unused PLACEMENT_BRANCH: main PLACEMENT_COMMIT_HASH: '' PLACEMENT_IMG: quay.io/openstack-k8s-operators/placement-operator-index:latest PLACEMENT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml PLACEMENT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests PLACEMENT_KUTTL_NAMESPACE: placement-kuttl-tests PLACEMENT_REPO: https://github.com/openstack-k8s-operators/placement-operator.git PULL_SECRET: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt RABBITMQ: docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_BRANCH: patches RABBITMQ_COMMIT_HASH: '' RABBITMQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_DEPL_IMG: unused RABBITMQ_IMG: quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest RABBITMQ_REPO: https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git REDHAT_OPERATORS: 'false' REDIS: config/samples/redis_v1beta1_redis.yaml REDIS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml REDIS_DEPL_IMG: unused RH_REGISTRY_PWD: '' RH_REGISTRY_USER: '' SECRET: osp-secret SG_CORE_DEPL_IMG: unused STANDALONE_COMPUTE_DRIVER: libvirt STANDALONE_EXTERNAL_NET_PREFFIX: 172.21.0 STANDALONE_INTERNALAPI_NET_PREFIX: 172.17.0 STANDALONE_STORAGEMGMT_NET_PREFIX: 172.20.0 STANDALONE_STORAGE_NET_PREFIX: 172.18.0 STANDALONE_TENANT_NET_PREFIX: 172.19.0 STORAGEMGMT_HOST_ROUTES: '' STORAGE_CLASS: local-storage STORAGE_HOST_ROUTES: '' SWIFT: config/samples/swift_v1beta1_swift.yaml SWIFT_BRANCH: main SWIFT_COMMIT_HASH: '' SWIFT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml SWIFT_IMG: quay.io/openstack-k8s-operators/swift-operator-index:latest SWIFT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml SWIFT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests SWIFT_KUTTL_NAMESPACE: swift-kuttl-tests SWIFT_REPO: https://github.com/openstack-k8s-operators/swift-operator.git TELEMETRY: config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_BRANCH: main TELEMETRY_COMMIT_HASH: '' TELEMETRY_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_IMG: quay.io/openstack-k8s-operators/telemetry-operator-index:latest TELEMETRY_KUTTL_BASEDIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator TELEMETRY_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml TELEMETRY_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites TELEMETRY_KUTTL_NAMESPACE: telemetry-kuttl-tests TELEMETRY_KUTTL_RELPATH: test/kuttl/suites TELEMETRY_REPO: https://github.com/openstack-k8s-operators/telemetry-operator.git TENANT_HOST_ROUTES: '' TIMEOUT: 300s TLS_ENABLED: 'false' tripleo_deploy: 'export REGISTRY_PWD:' 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | TASK [install_yamls : Generate make targets install_yamls_path={{ cifmw_install_yamls_repo }}, output_directory={{ cifmw_install_yamls_tasks_out }}] *** 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.053) 0:00:59.485 ******* 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.053) 0:00:59.484 ******* 2025-12-08 17:53:08,544 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | TASK [install_yamls : Debug generate_make module var=cifmw_generate_makes] ***** 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.322) 0:00:59.808 ******* 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.322) 0:00:59.806 ******* 2025-12-08 17:53:08,582 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_generate_makes: changed: false debug: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/Makefile: - all - help - cleanup - deploy_cleanup - wait - crc_storage - crc_storage_cleanup - crc_storage_release - crc_storage_with_retries - crc_storage_cleanup_with_retries - operator_namespace - namespace - namespace_cleanup - input - input_cleanup - crc_bmo_setup - crc_bmo_cleanup - openstack_prep - openstack - openstack_wait - openstack_init - openstack_cleanup - openstack_repo - openstack_deploy_prep - openstack_deploy - openstack_wait_deploy - openstack_deploy_cleanup - openstack_update_run - update_services - update_system - openstack_patch_version - edpm_deploy_generate_keys - edpm_patch_ansible_runner_image - edpm_deploy_prep - edpm_deploy_cleanup - edpm_deploy - edpm_deploy_baremetal_prep - edpm_deploy_baremetal - edpm_wait_deploy_baremetal - edpm_wait_deploy - edpm_register_dns - edpm_nova_discover_hosts - openstack_crds - openstack_crds_cleanup - edpm_deploy_networker_prep - edpm_deploy_networker_cleanup - edpm_deploy_networker - infra_prep - infra - infra_cleanup - dns_deploy_prep - dns_deploy - dns_deploy_cleanup - netconfig_deploy_prep - netconfig_deploy - netconfig_deploy_cleanup - memcached_deploy_prep - memcached_deploy - memcached_deploy_cleanup - keystone_prep - keystone - keystone_cleanup - keystone_deploy_prep - keystone_deploy - keystone_deploy_cleanup - barbican_prep - barbican - barbican_cleanup - barbican_deploy_prep - barbican_deploy - barbican_deploy_validate - barbican_deploy_cleanup - mariadb - mariadb_cleanup - mariadb_deploy_prep - mariadb_deploy - mariadb_deploy_cleanup - placement_prep - placement - placement_cleanup - placement_deploy_prep - placement_deploy - placement_deploy_cleanup - glance_prep - glance - glance_cleanup - glance_deploy_prep - glance_deploy - glance_deploy_cleanup - ovn_prep - ovn - ovn_cleanup - ovn_deploy_prep - ovn_deploy - ovn_deploy_cleanup - neutron_prep - neutron - neutron_cleanup - neutron_deploy_prep - neutron_deploy - neutron_deploy_cleanup - cinder_prep - cinder - cinder_cleanup - cinder_deploy_prep - cinder_deploy - cinder_deploy_cleanup - rabbitmq_prep - rabbitmq - rabbitmq_cleanup - rabbitmq_deploy_prep - rabbitmq_deploy - rabbitmq_deploy_cleanup - ironic_prep - ironic - ironic_cleanup - ironic_deploy_prep - ironic_deploy - ironic_deploy_cleanup - octavia_prep - octavia - octavia_cleanup - octavia_deploy_prep - octavia_deploy - octavia_deploy_cleanup - designate_prep - designate - designate_cleanup - designate_deploy_prep - designate_deploy - designate_deploy_cleanup - nova_prep - nova - nova_cleanup - nova_deploy_prep - nova_deploy - nova_deploy_cleanup - mariadb_kuttl_run - mariadb_kuttl - kuttl_db_prep - kuttl_db_cleanup - kuttl_common_prep - kuttl_common_cleanup - keystone_kuttl_run - keystone_kuttl - barbican_kuttl_run - barbican_kuttl - placement_kuttl_run - placement_kuttl - cinder_kuttl_run - cinder_kuttl - neutron_kuttl_run - neutron_kuttl - octavia_kuttl_run - octavia_kuttl - designate_kuttl - designate_kuttl_run - ovn_kuttl_run - ovn_kuttl - infra_kuttl_run - infra_kuttl - ironic_kuttl_run - ironic_kuttl - ironic_kuttl_crc - heat_kuttl_run - heat_kuttl - heat_kuttl_crc - ansibleee_kuttl_run - ansibleee_kuttl_cleanup - ansibleee_kuttl_prep - ansibleee_kuttl - glance_kuttl_run - glance_kuttl - manila_kuttl_run - manila_kuttl - swift_kuttl_run - swift_kuttl - horizon_kuttl_run - horizon_kuttl - openstack_kuttl_run - openstack_kuttl - mariadb_chainsaw_run - mariadb_chainsaw - horizon_prep - horizon - horizon_cleanup - horizon_deploy_prep - horizon_deploy - horizon_deploy_cleanup - heat_prep - heat - heat_cleanup - heat_deploy_prep - heat_deploy - heat_deploy_cleanup - ansibleee_prep - ansibleee - ansibleee_cleanup - baremetal_prep - baremetal - baremetal_cleanup - ceph_help - ceph - ceph_cleanup - rook_prep - rook - rook_deploy_prep - rook_deploy - rook_crc_disk - rook_cleanup - lvms - nmstate - nncp - nncp_cleanup - netattach - netattach_cleanup - metallb - metallb_config - metallb_config_cleanup - metallb_cleanup - loki - loki_cleanup - loki_deploy - loki_deploy_cleanup - netobserv - netobserv_cleanup - netobserv_deploy - netobserv_deploy_cleanup - manila_prep - manila - manila_cleanup - manila_deploy_prep - manila_deploy - manila_deploy_cleanup - telemetry_prep - telemetry - telemetry_cleanup - telemetry_deploy_prep - telemetry_deploy - telemetry_deploy_cleanup - telemetry_kuttl_run - telemetry_kuttl - swift_prep - swift - swift_cleanup - swift_deploy_prep - swift_deploy - swift_deploy_cleanup - certmanager - certmanager_cleanup - validate_marketplace - redis_deploy_prep - redis_deploy - redis_deploy_cleanup - set_slower_etcd_profile /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup/Makefile: - help - download_tools - nfs - nfs_cleanup - crc - crc_cleanup - crc_scrub - crc_attach_default_interface - crc_attach_default_interface_cleanup - ipv6_lab_network - ipv6_lab_network_cleanup - ipv6_lab_nat64_router - ipv6_lab_nat64_router_cleanup - ipv6_lab_sno - ipv6_lab_sno_cleanup - ipv6_lab - ipv6_lab_cleanup - attach_default_interface - attach_default_interface_cleanup - network_isolation_bridge - network_isolation_bridge_cleanup - edpm_baremetal_compute - edpm_compute - edpm_compute_bootc - edpm_ansible_runner - edpm_computes_bgp - edpm_compute_repos - edpm_compute_cleanup - edpm_networker - edpm_networker_cleanup - edpm_deploy_instance - tripleo_deploy - standalone_deploy - standalone_sync - standalone - standalone_cleanup - standalone_snapshot - standalone_revert - cifmw_prepare - cifmw_cleanup - bmaas_network - bmaas_network_cleanup - bmaas_route_crc_and_crc_bmaas_networks - bmaas_route_crc_and_crc_bmaas_networks_cleanup - bmaas_crc_attach_network - bmaas_crc_attach_network_cleanup - bmaas_crc_baremetal_bridge - bmaas_crc_baremetal_bridge_cleanup - bmaas_baremetal_net_nad - bmaas_baremetal_net_nad_cleanup - bmaas_metallb - bmaas_metallb_cleanup - bmaas_virtual_bms - bmaas_virtual_bms_cleanup - bmaas_sushy_emulator - bmaas_sushy_emulator_cleanup - bmaas_sushy_emulator_wait - bmaas_generate_nodes_yaml - bmaas - bmaas_cleanup failed: false success: true 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | TASK [install_yamls : Create the install_yamls parameters file dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml, content={{ { 'cifmw_install_yamls_environment': cifmw_install_yamls_environment, 'cifmw_install_yamls_defaults': cifmw_install_yamls_defaults } | to_nice_yaml }}, mode=0644] *** 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.037) 0:00:59.845 ******* 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.037) 0:00:59.843 ******* 2025-12-08 17:53:09,071 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:09,085 p=31902 u=zuul n=ansible | TASK [install_yamls : Create empty cifmw_install_yamls_environment if needed cifmw_install_yamls_environment={}] *** 2025-12-08 17:53:09,085 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.496) 0:01:00.341 ******* 2025-12-08 17:53:09,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.496) 0:01:00.340 ******* 2025-12-08 17:53:09,119 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | TASK [discover_latest_image : Get latest image url={{ cifmw_discover_latest_image_base_url }}, image_prefix={{ cifmw_discover_latest_image_qcow_prefix }}, images_file={{ cifmw_discover_latest_image_images_file }}] *** 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.064) 0:01:00.406 ******* 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.064) 0:01:00.405 ******* 2025-12-08 17:53:09,699 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | TASK [discover_latest_image : Export facts accordingly cifmw_discovered_image_name={{ discovered_image['data']['image_name'] }}, cifmw_discovered_image_url={{ discovered_image['data']['image_url'] }}, cifmw_discovered_hash={{ discovered_image['data']['hash'] }}, cifmw_discovered_hash_algorithm={{ discovered_image['data']['hash_algorithm'] }}, cacheable=True] *** 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.559) 0:01:00.966 ******* 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.559) 0:01:00.964 ******* 2025-12-08 17:53:09,737 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | TASK [Create artifacts with custom params mode=0644, dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/custom-params.yml, content={{ ci_framework_params | to_nice_yaml }}] *** 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.040) 0:01:01.006 ******* 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.040) 0:01:01.005 ******* 2025-12-08 17:53:10,211 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | PLAY RECAP ********************************************************************* 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | localhost : ok=43 changed=23 unreachable=0 failed=0 skipped=40 rescued=0 ignored=0 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:10 +0000 (0:00:00.483) 0:01:01.489 ******* 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Install needed packages ------------------------------------- 27.32s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Initialize python venv and install requirements ------------ 8.49s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Install openshift client ------------------------------------- 5.32s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Get repo-setup repository ---------------------------------- 2.25s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_ca : Update ca bundle ------------------------------------------- 1.65s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | Gathering Facts --------------------------------------------------------- 1.28s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Manage directories ------------------------------------------- 1.21s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Make sure git-core package is installed -------------------- 0.99s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Install repo-setup package --------------------------------- 0.88s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Dump full hash in delorean.repo.md5 file ------------------- 0.72s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Ensure directories exist -------------------------------- 0.67s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Run repo-setup --------------------------------------------- 0.65s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Ensure directories are present ----------------------------- 0.62s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Get environment structure ------------------------------- 0.61s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | discover_latest_image : Get latest image -------------------------------- 0.56s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Run repo-setup-get-hash ------------------------------------ 0.53s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Cleanup existing metadata ---------------------------------- 0.50s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Create the install_yamls parameters file ---------------- 0.50s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | Create artifacts with custom params ------------------------------------- 0.48s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | repo_setup : Remove existing repos from /etc/yum.repos.d directory ------ 0.47s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:10 +0000 (0:00:00.484) 0:01:01.489 ******* 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ci_setup --------------------------------------------------------------- 35.36s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | repo_setup ------------------------------------------------------------- 18.41s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | install_yamls ----------------------------------------------------------- 2.97s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | install_ca -------------------------------------------------------------- 2.15s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | gather_facts ------------------------------------------------------------ 1.28s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | discover_latest_image --------------------------------------------------- 0.60s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.copy ---------------------------------------------------- 0.48s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.include_role -------------------------------------------- 0.12s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.set_fact ------------------------------------------------ 0.08s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | total ------------------------------------------------------------------ 61.46s 2025-12-08 17:53:11,534 p=32717 u=zuul n=ansible | PLAY [Run pre_infra hooks] ***************************************************** 2025-12-08 17:53:11,565 p=32717 u=zuul n=ansible | TASK [run_hook : Assert parameters are valid quiet=True, that=['_list_hooks is not string', '_list_hooks is not mapping', '_list_hooks is iterable', '(hooks | default([])) is not string', '(hooks | default([])) is not mapping', '(hooks | default([])) is iterable']] *** 2025-12-08 17:53:11,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.046) 0:00:00.046 ******* 2025-12-08 17:53:11,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.046) 0:00:00.046 ******* 2025-12-08 17:53:11,640 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | TASK [run_hook : Assert single hooks are all mappings quiet=True, that=['_not_mapping_hooks | length == 0'], msg=All single hooks must be a list of mappings or a mapping.] *** 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.085) 0:00:00.131 ******* 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.085) 0:00:00.131 ******* 2025-12-08 17:53:11,739 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | TASK [run_hook : Loop on hooks for pre_infra _raw_params={{ hook.type }}.yml] *** 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.098) 0:00:00.230 ******* 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.098) 0:00:00.229 ******* 2025-12-08 17:53:11,825 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:11,867 p=32717 u=zuul n=ansible | PLAY [Prepare host virtualization] ********************************************* 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | TASK [Load parameters files dir={{ cifmw_basedir }}/artifacts/parameters] ****** 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.145) 0:00:00.375 ******* 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.145) 0:00:00.374 ******* 2025-12-08 17:53:12,006 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,028 p=32717 u=zuul n=ansible | TASK [Ensure libvirt is present/configured name=libvirt_manager] *************** 2025-12-08 17:53:12,028 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.134) 0:00:00.509 ******* 2025-12-08 17:53:12,029 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.134) 0:00:00.509 ******* 2025-12-08 17:53:12,055 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | TASK [Perpare OpenShift provisioner node name=openshift_provisioner_node] ****** 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.036) 0:00:00.546 ******* 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.036) 0:00:00.545 ******* 2025-12-08 17:53:12,095 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,136 p=32717 u=zuul n=ansible | PLAY [Prepare the platform] **************************************************** 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | TASK [Load parameters files dir={{ cifmw_basedir }}/artifacts/parameters] ****** 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.104) 0:00:00.651 ******* 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.104) 0:00:00.650 ******* 2025-12-08 17:53:12,219 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | TASK [networking_mapper : Check for Networking Environment Definition file existence path={{ cifmw_networking_mapper_networking_env_def_path }}] *** 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.059) 0:00:00.711 ******* 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.059) 0:00:00.710 ******* 2025-12-08 17:53:12,505 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | TASK [networking_mapper : Check for Networking Definition file existance that=['_net_env_def_stat.stat.exists'], msg=Ensure that the Networking Environment Definition file exists in {{ cifmw_networking_mapper_networking_env_def_path }}, quiet=True] *** 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.285) 0:00:00.996 ******* 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.285) 0:00:00.995 ******* 2025-12-08 17:53:12,535 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | TASK [networking_mapper : Load the Networking Definition from file path={{ cifmw_networking_mapper_networking_env_def_path }}] *** 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.030) 0:00:01.026 ******* 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.030) 0:00:01.025 ******* 2025-12-08 17:53:12,565 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | TASK [networking_mapper : Set cifmw_networking_env_definition is present cifmw_networking_env_definition={{ _net_env_def_slurp['content'] | b64decode | from_yaml }}, cacheable=True] *** 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.033) 0:00:01.060 ******* 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.033) 0:00:01.059 ******* 2025-12-08 17:53:12,599 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,625 p=32717 u=zuul n=ansible | TASK [Deploy OCP using Hive name=hive] ***************************************** 2025-12-08 17:53:12,625 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.046) 0:00:01.106 ******* 2025-12-08 17:53:12,626 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.046) 0:00:01.106 ******* 2025-12-08 17:53:12,644 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | TASK [Prepare CRC name=rhol_crc] *********************************************** 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.028) 0:00:01.135 ******* 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.028) 0:00:01.134 ******* 2025-12-08 17:53:12,676 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | TASK [Deploy OpenShift cluster using dev-scripts name=devscripts] ************** 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.037) 0:00:01.172 ******* 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.037) 0:00:01.171 ******* 2025-12-08 17:53:12,715 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | TASK [openshift_login : Ensure output directory exists path={{ cifmw_openshift_login_basedir }}/artifacts, state=directory, mode=0755] *** 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.034) 0:00:01.206 ******* 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.034) 0:00:01.205 ******* 2025-12-08 17:53:13,060 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,074 p=32717 u=zuul n=ansible | TASK [openshift_login : OpenShift login _raw_params=login.yml] ***************** 2025-12-08 17:53:13,074 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.349) 0:00:01.555 ******* 2025-12-08 17:53:13,075 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.349) 0:00:01.555 ******* 2025-12-08 17:53:13,104 p=32717 u=zuul n=ansible | included: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/openshift_login/tasks/login.yml for localhost 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | TASK [openshift_login : Check if the password file is present path={{ cifmw_openshift_login_password_file | default(cifmw_openshift_password_file) }}] *** 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.040) 0:00:01.596 ******* 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.040) 0:00:01.595 ******* 2025-12-08 17:53:13,136 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch user password content src={{ cifmw_openshift_login_password_file | default(cifmw_openshift_password_file) }}] *** 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.035) 0:00:01.632 ******* 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.035) 0:00:01.631 ******* 2025-12-08 17:53:13,176 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | TASK [openshift_login : Set user password as a fact cifmw_openshift_login_password={{ cifmw_openshift_login_password_file_slurp.content | b64decode }}, cacheable=True] *** 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:01.668 ******* 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:01.667 ******* 2025-12-08 17:53:13,208 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | TASK [openshift_login : Set role variables cifmw_openshift_login_kubeconfig={{ cifmw_openshift_login_kubeconfig | default(cifmw_openshift_kubeconfig) | default( ansible_env.KUBECONFIG if 'KUBECONFIG' in ansible_env else cifmw_openshift_login_kubeconfig_default_path ) | trim }}, cifmw_openshift_login_user={{ cifmw_openshift_login_user | default(cifmw_openshift_user) | default(omit) }}, cifmw_openshift_login_password={{ cifmw_openshift_login_password | default(cifmw_openshift_password) | default(omit) }}, cifmw_openshift_login_api={{ cifmw_openshift_login_api | default(cifmw_openshift_api) | default(omit) }}, cifmw_openshift_login_cert_login={{ cifmw_openshift_login_cert_login | default(false)}}, cifmw_openshift_login_provided_token={{ cifmw_openshift_provided_token | default(omit) }}, cacheable=True] *** 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.030) 0:00:01.698 ******* 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.030) 0:00:01.698 ******* 2025-12-08 17:53:13,254 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | TASK [openshift_login : Check if kubeconfig exists path={{ cifmw_openshift_login_kubeconfig }}] *** 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:01.749 ******* 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:01.748 ******* 2025-12-08 17:53:13,479 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | TASK [openshift_login : Assert that enough data is provided to log in to OpenShift that=cifmw_openshift_login_kubeconfig_stat.stat.exists or (cifmw_openshift_login_provided_token is defined and cifmw_openshift_login_provided_token != '') or ( (cifmw_openshift_login_user is defined) and (cifmw_openshift_login_password is defined) and (cifmw_openshift_login_api is defined) ), msg=If an existing kubeconfig is not provided user/pwd or provided/initial token and API URL must be given] *** 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.232) 0:00:01.981 ******* 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.232) 0:00:01.980 ******* 2025-12-08 17:53:13,547 p=32717 u=zuul n=ansible | ok: [localhost] => changed: false msg: All assertions passed 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch kubeconfig content src={{ cifmw_openshift_login_kubeconfig }}] *** 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.072) 0:00:02.054 ******* 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.073) 0:00:02.053 ******* 2025-12-08 17:53:13,613 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,634 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch x509 key based users cifmw_openshift_login_key_based_users={{ ( cifmw_openshift_login_kubeconfig_content_b64.content | b64decode | from_yaml ). users | default([]) | selectattr('user.client-certificate-data', 'defined') | map(attribute="name") | map("split", "/") | map("first") }}, cacheable=True] *** 2025-12-08 17:53:13,635 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.061) 0:00:02.115 ******* 2025-12-08 17:53:13,635 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.061) 0:00:02.115 ******* 2025-12-08 17:53:13,668 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,684 p=32717 u=zuul n=ansible | TASK [openshift_login : Assign key based user if not provided and available cifmw_openshift_login_user={{ (cifmw_openshift_login_assume_cert_system_user | ternary('system:', '')) + (cifmw_openshift_login_key_based_users | map('replace', 'system:', '') | unique | first) }}, cifmw_openshift_login_cert_login=True, cacheable=True] *** 2025-12-08 17:53:13,685 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.165 ******* 2025-12-08 17:53:13,685 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.165 ******* 2025-12-08 17:53:13,718 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,729 p=32717 u=zuul n=ansible | TASK [openshift_login : Set the retry count cifmw_openshift_login_retries_cnt={{ 0 if cifmw_openshift_login_retries_cnt is undefined else cifmw_openshift_login_retries_cnt|int + 1 }}] *** 2025-12-08 17:53:13,729 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.044) 0:00:02.210 ******* 2025-12-08 17:53:13,730 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.044) 0:00:02.210 ******* 2025-12-08 17:53:13,770 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch token _raw_params=try_login.yml] ***************** 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.261 ******* 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.260 ******* 2025-12-08 17:53:13,804 p=32717 u=zuul n=ansible | included: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/openshift_login/tasks/try_login.yml for localhost 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | TASK [openshift_login : Try get OpenShift access token _raw_params=oc whoami -t] *** 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:02.297 ******* 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:02.296 ******* 2025-12-08 17:53:13,841 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,853 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift token output_dir={{ cifmw_openshift_login_basedir }}/artifacts, script=oc login {%- if cifmw_openshift_login_provided_token is not defined %} {%- if cifmw_openshift_login_user is defined %} -u {{ cifmw_openshift_login_user }} {%- endif %} {%- if cifmw_openshift_login_password is defined %} -p {{ cifmw_openshift_login_password }} {%- endif %} {% else %} --token={{ cifmw_openshift_login_provided_token }} {%- endif %} {%- if cifmw_openshift_login_skip_tls_verify|bool %} --insecure-skip-tls-verify=true {%- endif %} {%- if cifmw_openshift_login_api is defined %} {{ cifmw_openshift_login_api }} {%- endif %}] *** 2025-12-08 17:53:13,854 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.037) 0:00:02.334 ******* 2025-12-08 17:53:13,854 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.037) 0:00:02.334 ******* 2025-12-08 17:53:13,924 p=32717 u=zuul n=ansible | Follow script's output here: /home/zuul/ci-framework-data/logs/ci_script_000_fetch_openshift.log 2025-12-08 17:53:14,323 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:14,332 p=32717 u=zuul n=ansible | TASK [openshift_login : Ensure kubeconfig is provided that=cifmw_openshift_login_kubeconfig != ""] *** 2025-12-08 17:53:14,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.479) 0:00:02.813 ******* 2025-12-08 17:53:14,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.479) 0:00:02.813 ******* 2025-12-08 17:53:14,355 p=32717 u=zuul n=ansible | ok: [localhost] => changed: false msg: All assertions passed 2025-12-08 17:53:14,375 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch new OpenShift access token _raw_params=oc whoami -t] *** 2025-12-08 17:53:14,375 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.042) 0:00:02.856 ******* 2025-12-08 17:53:14,376 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.042) 0:00:02.856 ******* 2025-12-08 17:53:14,888 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:14,913 p=32717 u=zuul n=ansible | TASK [openshift_login : Set new OpenShift token cifmw_openshift_login_token={{ (not cifmw_openshift_login_new_token_out.skipped | default(false)) | ternary(cifmw_openshift_login_new_token_out.stdout, cifmw_openshift_login_whoami_out.stdout) }}, cacheable=True] *** 2025-12-08 17:53:14,914 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.538) 0:00:03.394 ******* 2025-12-08 17:53:14,914 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.538) 0:00:03.394 ******* 2025-12-08 17:53:14,963 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:14,982 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift API URL _raw_params=oc whoami --show-server=true] *** 2025-12-08 17:53:14,982 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.068) 0:00:03.463 ******* 2025-12-08 17:53:14,983 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.068) 0:00:03.463 ******* 2025-12-08 17:53:15,364 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift kubeconfig context _raw_params=oc whoami -c] *** 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.390) 0:00:03.853 ******* 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.390) 0:00:03.853 ******* 2025-12-08 17:53:15,651 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift current user _raw_params=oc whoami] **** 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.289) 0:00:04.142 ******* 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.289) 0:00:04.142 ******* 2025-12-08 17:53:16,045 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | TASK [openshift_login : Set OpenShift user, context and API facts cifmw_openshift_login_api={{ cifmw_openshift_login_api_out.stdout }}, cifmw_openshift_login_context={{ cifmw_openshift_login_context_out.stdout }}, cifmw_openshift_login_user={{ _oauth_user }}, cifmw_openshift_kubeconfig={{ cifmw_openshift_login_kubeconfig }}, cifmw_openshift_api={{ cifmw_openshift_login_api_out.stdout }}, cifmw_openshift_context={{ cifmw_openshift_login_context_out.stdout }}, cifmw_openshift_user={{ _oauth_user }}, cifmw_openshift_token={{ cifmw_openshift_login_token | default(omit) }}, cifmw_install_yamls_environment={{ ( cifmw_install_yamls_environment | combine({'KUBECONFIG': cifmw_openshift_login_kubeconfig}) ) if cifmw_install_yamls_environment is defined else omit }}, cacheable=True] *** 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.396) 0:00:04.539 ******* 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.396) 0:00:04.539 ******* 2025-12-08 17:53:16,091 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | TASK [openshift_login : Create the openshift_login parameters file dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/openshift-login-params.yml, content={{ cifmw_openshift_login_params_content | from_yaml | to_nice_yaml }}, mode=0600] *** 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.045) 0:00:04.585 ******* 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.045) 0:00:04.584 ******* 2025-12-08 17:53:16,684 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | TASK [openshift_login : Read the install yamls parameters file path={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml] *** 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.603) 0:00:05.189 ******* 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.603) 0:00:05.188 ******* 2025-12-08 17:53:17,038 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | TASK [openshift_login : Append the KUBECONFIG to the install yamls parameters content={{ cifmw_openshift_login_install_yamls_artifacts_slurp['content'] | b64decode | from_yaml | combine( { 'cifmw_install_yamls_environment': { 'KUBECONFIG': cifmw_openshift_login_kubeconfig } }, recursive=true) | to_nice_yaml }}, dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml, mode=0600] *** 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.345) 0:00:05.534 ******* 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.345) 0:00:05.533 ******* 2025-12-08 17:53:17,537 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | TASK [openshift_setup : Ensure output directory exists path={{ cifmw_openshift_setup_basedir }}/artifacts, state=directory, mode=0755] *** 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.507) 0:00:06.042 ******* 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.507) 0:00:06.041 ******* 2025-12-08 17:53:17,757 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | TASK [openshift_setup : Fetch namespaces to create cifmw_openshift_setup_namespaces={{ (( ([cifmw_install_yamls_defaults['NAMESPACE']] + ([cifmw_install_yamls_defaults['OPERATOR_NAMESPACE']] if 'OPERATOR_NAMESPACE' is in cifmw_install_yamls_defaults else []) ) if cifmw_install_yamls_defaults is defined else [] ) + cifmw_openshift_setup_create_namespaces) | unique }}] *** 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.217) 0:00:06.259 ******* 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.217) 0:00:06.258 ******* 2025-12-08 17:53:17,803 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create required namespaces kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit) }}, name={{ item }}, kind=Namespace, state=present] *** 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.050) 0:00:06.309 ******* 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.050) 0:00:06.308 ******* 2025-12-08 17:53:18,765 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack) 2025-12-08 17:53:19,446 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack-operators) 2025-12-08 17:53:19,474 p=32717 u=zuul n=ansible | TASK [openshift_setup : Get internal OpenShift registry route kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, kind=Route, name=default-route, namespace=openshift-image-registry] *** 2025-12-08 17:53:19,475 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:19 +0000 (0:00:01.646) 0:00:07.955 ******* 2025-12-08 17:53:19,475 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:19 +0000 (0:00:01.646) 0:00:07.955 ******* 2025-12-08 17:53:20,623 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:20,644 p=32717 u=zuul n=ansible | TASK [openshift_setup : Allow anonymous image-pulls in CRC registry for targeted namespaces state=present, kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'kind': 'RoleBinding', 'apiVersion': 'rbac.authorization.k8s.io/v1', 'metadata': {'name': 'system:image-puller', 'namespace': '{{ item }}'}, 'subjects': [{'kind': 'User', 'name': 'system:anonymous'}, {'kind': 'User', 'name': 'system:unauthenticated'}], 'roleRef': {'kind': 'ClusterRole', 'name': 'system:image-puller'}}] *** 2025-12-08 17:53:20,644 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:20 +0000 (0:00:01.169) 0:00:09.125 ******* 2025-12-08 17:53:20,645 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:20 +0000 (0:00:01.169) 0:00:09.125 ******* 2025-12-08 17:53:21,400 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack) 2025-12-08 17:53:22,068 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack-operators) 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | TASK [openshift_setup : Wait for the image registry to be ready kind=Deployment, name=image-registry, namespace=openshift-image-registry, kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, wait=True, wait_sleep=10, wait_timeout=600, wait_condition={'type': 'Available', 'status': 'True'}] *** 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:22 +0000 (0:00:01.451) 0:00:10.577 ******* 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:22 +0000 (0:00:01.451) 0:00:10.576 ******* 2025-12-08 17:53:23,037 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | TASK [openshift_setup : Login into OpenShift internal registry output_dir={{ cifmw_openshift_setup_basedir }}/artifacts, script=podman login -u {{ cifmw_openshift_user }} -p {{ cifmw_openshift_token }} {%- if cifmw_openshift_setup_skip_internal_registry_tls_verify|bool %} --tls-verify=false {%- endif %} {{ cifmw_openshift_setup_registry_default_route.resources[0].spec.host }}] *** 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.959) 0:00:11.536 ******* 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.959) 0:00:11.535 ******* 2025-12-08 17:53:23,112 p=32717 u=zuul n=ansible | Follow script's output here: /home/zuul/ci-framework-data/logs/ci_script_001_login_into_openshift_internal.log 2025-12-08 17:53:23,338 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | TASK [Ensure we have custom CA installed on host role=install_ca] ************** 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.293) 0:00:11.829 ******* 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.293) 0:00:11.828 ******* 2025-12-08 17:53:23,368 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | TASK [openshift_setup : Update ca bundle _raw_params=update-ca-trust extract] *** 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.034) 0:00:11.863 ******* 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.034) 0:00:11.862 ******* 2025-12-08 17:53:23,401 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,415 p=32717 u=zuul n=ansible | TASK [openshift_setup : Slurp CAs file src={{ cifmw_openshift_setup_ca_bundle_path }}] *** 2025-12-08 17:53:23,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:11.896 ******* 2025-12-08 17:53:23,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:11.896 ******* 2025-12-08 17:53:23,452 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create config map with registry CAs kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'apiVersion': 'v1', 'kind': 'ConfigMap', 'metadata': {'namespace': 'openshift-config', 'name': 'registry-cas'}, 'data': '{{ _config_map_data | items2dict }}'}] *** 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.075) 0:00:11.972 ******* 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.075) 0:00:11.971 ******* 2025-12-08 17:53:23,515 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,524 p=32717 u=zuul n=ansible | TASK [openshift_setup : Install Red Hat CA for pulling images from internal registry kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, merge_type=merge, definition={'apiVersion': 'config.openshift.io/v1', 'kind': 'Image', 'metadata': {'name': 'cluster'}, 'spec': {'additionalTrustedCA': {'name': 'registry-cas'}}}] *** 2025-12-08 17:53:23,525 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:12.005 ******* 2025-12-08 17:53:23,525 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:12.005 ******* 2025-12-08 17:53:23,556 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | TASK [openshift_setup : Add insecure registry kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, merge_type=merge, definition={'apiVersion': 'config.openshift.io/v1', 'kind': 'Image', 'metadata': {'name': 'cluster'}, 'spec': {'registrySources': {'insecureRegistries': ['{{ cifmw_update_containers_registry }}'], 'allowedRegistries': '{{ all_registries }}'}}}] *** 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.041) 0:00:12.047 ******* 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.041) 0:00:12.046 ******* 2025-12-08 17:53:23,587 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create a ICSP with repository digest mirrors kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'apiVersion': 'operator.openshift.io/v1alpha1', 'kind': 'ImageContentSourcePolicy', 'metadata': {'name': 'registry-digest-mirrors'}, 'spec': {'repositoryDigestMirrors': '{{ cifmw_openshift_setup_digest_mirrors }}'}}] *** 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.029) 0:00:12.077 ******* 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.029) 0:00:12.076 ******* 2025-12-08 17:53:23,633 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | TASK [openshift_setup : Gather network.operator info kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, api_version=operator.openshift.io/v1, kind=Network, name=cluster] *** 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.051) 0:00:12.128 ******* 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.051) 0:00:12.127 ******* 2025-12-08 17:53:24,386 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | TASK [openshift_setup : Patch network operator api_version=operator.openshift.io/v1, kubeconfig={{ cifmw_openshift_kubeconfig }}, kind=Network, name=cluster, persist_config=True, patch=[{'path': '/spec/defaultNetwork/ovnKubernetesConfig/gatewayConfig/routingViaHost', 'value': True, 'op': 'replace'}, {'path': '/spec/defaultNetwork/ovnKubernetesConfig/gatewayConfig/ipForwarding', 'value': 'Global', 'op': 'replace'}]] *** 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:24 +0000 (0:00:00.768) 0:00:12.897 ******* 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:24 +0000 (0:00:00.768) 0:00:12.896 ******* 2025-12-08 17:53:25,337 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | TASK [openshift_setup : Patch samples registry configuration kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, api_version=samples.operator.openshift.io/v1, kind=Config, name=cluster, patch=[{'op': 'replace', 'path': '/spec/samplesRegistry', 'value': 'registry.redhat.io'}]] *** 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:25 +0000 (0:00:00.944) 0:00:13.841 ******* 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:25 +0000 (0:00:00.944) 0:00:13.840 ******* 2025-12-08 17:53:26,101 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | TASK [openshift_setup : Delete the pods from openshift-marketplace namespace kind=Pod, state=absent, delete_all=True, kubeconfig={{ cifmw_openshift_kubeconfig }}, namespace=openshift-marketplace] *** 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.755) 0:00:14.596 ******* 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.755) 0:00:14.596 ******* 2025-12-08 17:53:26,134 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,147 p=32717 u=zuul n=ansible | TASK [openshift_setup : Wait for openshift-marketplace pods to be running _raw_params=oc wait pod --all --for=condition=Ready -n openshift-marketplace --timeout=1m] *** 2025-12-08 17:53:26,148 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.628 ******* 2025-12-08 17:53:26,148 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.628 ******* 2025-12-08 17:53:26,165 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | TASK [Deploy Observability operator. name=openshift_obs] *********************** 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.037) 0:00:14.666 ******* 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.036) 0:00:14.665 ******* 2025-12-08 17:53:26,210 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | TASK [Deploy Metal3 BMHs name=deploy_bmh] ************************************** 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.086) 0:00:14.752 ******* 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.086) 0:00:14.751 ******* 2025-12-08 17:53:26,293 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | TASK [Install certmanager operator role name=cert_manager] ********************* 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.785 ******* 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.784 ******* 2025-12-08 17:53:26,323 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,332 p=32717 u=zuul n=ansible | TASK [Configure hosts networking using nmstate name=ci_nmstate] **************** 2025-12-08 17:53:26,332 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.813 ******* 2025-12-08 17:53:26,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.813 ******* 2025-12-08 17:53:26,350 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | TASK [Configure multus networks name=ci_multus] ******************************** 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.029) 0:00:14.843 ******* 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.029) 0:00:14.842 ******* 2025-12-08 17:53:26,379 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | TASK [Deploy Sushy Emulator service pod name=sushy_emulator] ******************* 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.026) 0:00:14.870 ******* 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.026) 0:00:14.869 ******* 2025-12-08 17:53:26,408 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | TASK [Setup Libvirt on controller name=libvirt_manager] ************************ 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.898 ******* 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.898 ******* 2025-12-08 17:53:26,434 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | TASK [Prepare container package builder name=pkg_build] ************************ 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.927 ******* 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.926 ******* 2025-12-08 17:53:26,472 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,481 p=32717 u=zuul n=ansible | TASK [run_hook : Assert parameters are valid quiet=True, that=['_list_hooks is not string', '_list_hooks is not mapping', '_list_hooks is iterable', '(hooks | default([])) is not string', '(hooks | default([])) is not mapping', '(hooks | default([])) is iterable']] *** 2025-12-08 17:53:26,482 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.035) 0:00:14.962 ******* 2025-12-08 17:53:26,482 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.035) 0:00:14.962 ******* 2025-12-08 17:53:26,566 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | TASK [run_hook : Assert single hooks are all mappings quiet=True, that=['_not_mapping_hooks | length == 0'], msg=All single hooks must be a list of mappings or a mapping.] *** 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.094) 0:00:15.057 ******* 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.094) 0:00:15.056 ******* 2025-12-08 17:53:26,646 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | TASK [run_hook : Loop on hooks for post_infra _raw_params={{ hook.type }}.yml] *** 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.088) 0:00:15.145 ******* 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.088) 0:00:15.144 ******* 2025-12-08 17:53:26,747 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,805 p=32717 u=zuul n=ansible | PLAY RECAP ********************************************************************* 2025-12-08 17:53:26,805 p=32717 u=zuul n=ansible | localhost : ok=35 changed=12 unreachable=0 failed=0 skipped=34 rescued=0 ignored=0 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.141) 0:00:15.286 ******* 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Create required namespaces ---------------------------- 1.65s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Allow anonymous image-pulls in CRC registry for targeted namespaces --- 1.45s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Get internal OpenShift registry route ----------------- 1.17s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Wait for the image registry to be ready --------------- 0.96s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Patch network operator -------------------------------- 0.94s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Gather network.operator info -------------------------- 0.77s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Patch samples registry configuration ------------------ 0.76s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Create the openshift_login parameters file ------------ 0.60s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch new OpenShift access token ---------------------- 0.54s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Append the KUBECONFIG to the install yamls parameters --- 0.51s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift token --------------------------------- 0.48s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift current user -------------------------- 0.40s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift API URL ------------------------------- 0.39s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Ensure output directory exists ------------------------ 0.35s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Read the install yamls parameters file ---------------- 0.35s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Login into OpenShift internal registry ---------------- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift kubeconfig context -------------------- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | networking_mapper : Check for Networking Environment Definition file existence --- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Check if kubeconfig exists ---------------------------- 0.23s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Ensure output directory exists ------------------------ 0.22s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.142) 0:00:15.286 ******* 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | openshift_setup --------------------------------------------------------- 8.62s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | openshift_login --------------------------------------------------------- 4.84s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | run_hook ---------------------------------------------------------------- 0.65s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ansible.builtin.include_role -------------------------------------------- 0.54s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | networking_mapper ------------------------------------------------------- 0.40s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ansible.builtin.include_vars -------------------------------------------- 0.19s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | total ------------------------------------------------------------------ 15.24s 2025-12-08 17:53:44,400 p=33314 u=zuul n=ansible | Starting galaxy collection install process 2025-12-08 17:53:44,421 p=33314 u=zuul n=ansible | Process install dependency map 2025-12-08 17:53:59,851 p=33314 u=zuul n=ansible | Starting collection install process 2025-12-08 17:53:59,851 p=33314 u=zuul n=ansible | Installing 'cifmw.general:1.0.0+33d5122f' to '/home/zuul/.ansible/collections/ansible_collections/cifmw/general' 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | Created collection for cifmw.general:1.0.0+33d5122f at /home/zuul/.ansible/collections/ansible_collections/cifmw/general 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | cifmw.general:1.0.0+33d5122f was installed successfully 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | Installing 'containers.podman:1.16.2' to '/home/zuul/.ansible/collections/ansible_collections/containers/podman' 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | Created collection for containers.podman:1.16.2 at /home/zuul/.ansible/collections/ansible_collections/containers/podman 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | containers.podman:1.16.2 was installed successfully 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | Installing 'community.general:10.0.1' to '/home/zuul/.ansible/collections/ansible_collections/community/general' 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | Created collection for community.general:10.0.1 at /home/zuul/.ansible/collections/ansible_collections/community/general 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | community.general:10.0.1 was installed successfully 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | Installing 'ansible.posix:1.6.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/posix' 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | Created collection for ansible.posix:1.6.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/posix 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | ansible.posix:1.6.2 was installed successfully 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | Installing 'ansible.utils:5.1.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/utils' 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | Created collection for ansible.utils:5.1.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/utils 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | ansible.utils:5.1.2 was installed successfully 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | Installing 'community.libvirt:1.3.0' to '/home/zuul/.ansible/collections/ansible_collections/community/libvirt' 2025-12-08 17:54:01,797 p=33314 u=zuul n=ansible | Created collection for community.libvirt:1.3.0 at /home/zuul/.ansible/collections/ansible_collections/community/libvirt 2025-12-08 17:54:01,797 p=33314 u=zuul n=ansible | community.libvirt:1.3.0 was installed successfully 2025-12-08 17:54:01,798 p=33314 u=zuul n=ansible | Installing 'community.crypto:2.22.3' to '/home/zuul/.ansible/collections/ansible_collections/community/crypto' 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | Created collection for community.crypto:2.22.3 at /home/zuul/.ansible/collections/ansible_collections/community/crypto 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | community.crypto:2.22.3 was installed successfully 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | Installing 'kubernetes.core:5.0.0' to '/home/zuul/.ansible/collections/ansible_collections/kubernetes/core' 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | Created collection for kubernetes.core:5.0.0 at /home/zuul/.ansible/collections/ansible_collections/kubernetes/core 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | kubernetes.core:5.0.0 was installed successfully 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | Installing 'ansible.netcommon:7.1.0' to '/home/zuul/.ansible/collections/ansible_collections/ansible/netcommon' 2025-12-08 17:54:02,244 p=33314 u=zuul n=ansible | Created collection for ansible.netcommon:7.1.0 at /home/zuul/.ansible/collections/ansible_collections/ansible/netcommon 2025-12-08 17:54:02,245 p=33314 u=zuul n=ansible | ansible.netcommon:7.1.0 was installed successfully 2025-12-08 17:54:02,245 p=33314 u=zuul n=ansible | Installing 'openstack.config_template:2.1.1' to '/home/zuul/.ansible/collections/ansible_collections/openstack/config_template' 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | Created collection for openstack.config_template:2.1.1 at /home/zuul/.ansible/collections/ansible_collections/openstack/config_template 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | openstack.config_template:2.1.1 was installed successfully 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | Installing 'junipernetworks.junos:9.1.0' to '/home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos' 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | Created collection for junipernetworks.junos:9.1.0 at /home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | junipernetworks.junos:9.1.0 was installed successfully 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | Installing 'cisco.ios:9.0.3' to '/home/zuul/.ansible/collections/ansible_collections/cisco/ios' 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | Created collection for cisco.ios:9.0.3 at /home/zuul/.ansible/collections/ansible_collections/cisco/ios 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | cisco.ios:9.0.3 was installed successfully 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | Installing 'mellanox.onyx:1.0.0' to '/home/zuul/.ansible/collections/ansible_collections/mellanox/onyx' 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | Created collection for mellanox.onyx:1.0.0 at /home/zuul/.ansible/collections/ansible_collections/mellanox/onyx 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | mellanox.onyx:1.0.0 was installed successfully 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | Installing 'community.okd:4.0.0' to '/home/zuul/.ansible/collections/ansible_collections/community/okd' 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | Created collection for community.okd:4.0.0 at /home/zuul/.ansible/collections/ansible_collections/community/okd 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | community.okd:4.0.0 was installed successfully 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | Installing '@NAMESPACE@.@NAME@:3.1.4' to '/home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@' 2025-12-08 17:54:03,162 p=33314 u=zuul n=ansible | Created collection for @NAMESPACE@.@NAME@:3.1.4 at /home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@ 2025-12-08 17:54:03,162 p=33314 u=zuul n=ansible | @NAMESPACE@.@NAME@:3.1.4 was installed successfully home/zuul/zuul-output/logs/ci-framework-data/0000755000175000017500000000000015115611537020370 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/0000755000175000017500000000000015115611527021333 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/0000755000175000017500000000000015115611520025551 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/must-gather.logs0000644000175000017500000000750015115611503030702 0ustar zuulzuul[must-gather ] OUT 2025-12-08T18:02:40.489338024Z Using must-gather plug-in image: quay.io/openstack-k8s-operators/openstack-must-gather:latest When opening a support case, bugzilla, or issue please include the following summary data along with any other requested information: ClusterID: ClientVersion: 4.20.5 ClusterVersion: Stable at "4.20.1" ClusterOperators: clusteroperator/machine-config is degraded because Failed to resync 4.20.1 because: error during syncRequiredMachineConfigPools: [context deadline exceeded, error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"")] clusteroperator/operator-lifecycle-manager is not upgradeable because ClusterServiceVersions blocking minor version upgrades to 4.19.0 or higher: - maximum supported OCP version for service-telemetry/smart-gateway-operator.v5.0.1765147433 is 4.18 - maximum supported OCP version for service-telemetry/service-telemetry-operator.v1.5.1765147436 is 4.18 clusteroperator/cloud-credential is missing clusteroperator/cluster-autoscaler is missing clusteroperator/insights is missing clusteroperator/monitoring is missing clusteroperator/storage is missing [must-gather ] OUT 2025-12-08T18:02:40.513415738Z namespace/openshift-must-gather-gctth created [must-gather ] OUT 2025-12-08T18:02:40.519071842Z clusterrolebinding.rbac.authorization.k8s.io/must-gather-vk99s created [must-gather ] OUT 2025-12-08T18:02:40.537001149Z pod for plug-in image quay.io/openstack-k8s-operators/openstack-must-gather:latest created [must-gather-5cz8j] OUT 2025-12-08T18:02:50.54753759Z gather logs unavailable: Get "https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?follow=true×tamps=true": remote error: tls: internal error [must-gather-5cz8j] OUT 2025-12-08T18:02:50.547900939Z waiting for gather to complete [must-gather-5cz8j] OUT 2025-12-08T18:04:50.552451475Z downloading gather output [must-gather-5cz8j] OUT 2025-12-08T18:04:51.073320847Z gather output not downloaded: [Get "https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?timestamps=true": remote error: tls: internal error, No available strategies to copy.] [must-gather-5cz8j] OUT 2025-12-08T18:04:51.073351618Z [must-gather ] OUT 2025-12-08T18:04:51.080283736Z namespace/openshift-must-gather-gctth deleted Reprinting Cluster State: When opening a support case, bugzilla, or issue please include the following summary data along with any other requested information: ClusterID: ClientVersion: 4.20.5 ClusterVersion: Stable at "4.20.1" ClusterOperators: clusteroperator/machine-config is degraded because Failed to resync 4.20.1 because: error during syncRequiredMachineConfigPools: [context deadline exceeded, error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"")] clusteroperator/operator-lifecycle-manager is not upgradeable because ClusterServiceVersions blocking minor version upgrades to 4.19.0 or higher: - maximum supported OCP version for service-telemetry/smart-gateway-operator.v5.0.1765147433 is 4.18 - maximum supported OCP version for service-telemetry/service-telemetry-operator.v1.5.1765147436 is 4.18 clusteroperator/cloud-credential is missing clusteroperator/cluster-autoscaler is missing clusteroperator/insights is missing clusteroperator/monitoring is missing clusteroperator/storage is missing home/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/timestamp0000644000175000017500000000016015115611503027475 0ustar zuulzuul2025-12-08 18:02:40.527173422 +0000 UTC m=+0.117694249 2025-12-08 18:04:51.074369486 +0000 UTC m=+130.664890283 home/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/event-filter.html0000644000175000017500000000641015115611503031045 0ustar zuulzuul Events
Time Namespace Component RelatedObject Reason Message
././@LongLink0000644000000000000000000000017600000000000011606 Kustar rootrootquay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ec4ab7e822ef52dfc2fef000d2chome/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/latest0000777000175000017500000000000015115611503047531 2quay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ecustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/quay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ec4ab7e822ef52dfc2fef000d2c/home/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/quay-io-openstack-k8s-operat0000755000175000017500000000000015115611520033036 5ustar zuulzuul././@LongLink0000644000000000000000000000032200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/quay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ec4ab7e822ef52dfc2fef000d2c/gather.logshome/zuul/zuul-output/logs/ci-framework-data/logs/openstack-must-gather/quay-io-openstack-k8s-operat0000644000175000017500000000000015115611502033026 0ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/2025-12-08_18-02/0000775000175000017500000000000015115611530023113 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/2025-12-08_18-02/ansible.log0000666000175000017500000046107615115610273025256 0ustar zuulzuul2025-12-08 17:51:42,655 p=31279 u=zuul n=ansible | Starting galaxy collection install process 2025-12-08 17:51:42,656 p=31279 u=zuul n=ansible | Process install dependency map 2025-12-08 17:51:58,145 p=31279 u=zuul n=ansible | Starting collection install process 2025-12-08 17:51:58,145 p=31279 u=zuul n=ansible | Installing 'cifmw.general:1.0.0+33d5122f' to '/home/zuul/.ansible/collections/ansible_collections/cifmw/general' 2025-12-08 17:51:58,665 p=31279 u=zuul n=ansible | Created collection for cifmw.general:1.0.0+33d5122f at /home/zuul/.ansible/collections/ansible_collections/cifmw/general 2025-12-08 17:51:58,666 p=31279 u=zuul n=ansible | cifmw.general:1.0.0+33d5122f was installed successfully 2025-12-08 17:51:58,666 p=31279 u=zuul n=ansible | Installing 'containers.podman:1.16.2' to '/home/zuul/.ansible/collections/ansible_collections/containers/podman' 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | Created collection for containers.podman:1.16.2 at /home/zuul/.ansible/collections/ansible_collections/containers/podman 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | containers.podman:1.16.2 was installed successfully 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | Installing 'community.general:10.0.1' to '/home/zuul/.ansible/collections/ansible_collections/community/general' 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | Created collection for community.general:10.0.1 at /home/zuul/.ansible/collections/ansible_collections/community/general 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | community.general:10.0.1 was installed successfully 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | Installing 'ansible.posix:1.6.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/posix' 2025-12-08 17:51:59,512 p=31279 u=zuul n=ansible | Created collection for ansible.posix:1.6.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/posix 2025-12-08 17:51:59,513 p=31279 u=zuul n=ansible | ansible.posix:1.6.2 was installed successfully 2025-12-08 17:51:59,513 p=31279 u=zuul n=ansible | Installing 'ansible.utils:5.1.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/utils' 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | Created collection for ansible.utils:5.1.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/utils 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | ansible.utils:5.1.2 was installed successfully 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | Installing 'community.libvirt:1.3.0' to '/home/zuul/.ansible/collections/ansible_collections/community/libvirt' 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | Created collection for community.libvirt:1.3.0 at /home/zuul/.ansible/collections/ansible_collections/community/libvirt 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | community.libvirt:1.3.0 was installed successfully 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | Installing 'community.crypto:2.22.3' to '/home/zuul/.ansible/collections/ansible_collections/community/crypto' 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | Created collection for community.crypto:2.22.3 at /home/zuul/.ansible/collections/ansible_collections/community/crypto 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | community.crypto:2.22.3 was installed successfully 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | Installing 'kubernetes.core:5.0.0' to '/home/zuul/.ansible/collections/ansible_collections/kubernetes/core' 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | Created collection for kubernetes.core:5.0.0 at /home/zuul/.ansible/collections/ansible_collections/kubernetes/core 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | kubernetes.core:5.0.0 was installed successfully 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | Installing 'ansible.netcommon:7.1.0' to '/home/zuul/.ansible/collections/ansible_collections/ansible/netcommon' 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | Created collection for ansible.netcommon:7.1.0 at /home/zuul/.ansible/collections/ansible_collections/ansible/netcommon 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | ansible.netcommon:7.1.0 was installed successfully 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | Installing 'openstack.config_template:2.1.1' to '/home/zuul/.ansible/collections/ansible_collections/openstack/config_template' 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | Created collection for openstack.config_template:2.1.1 at /home/zuul/.ansible/collections/ansible_collections/openstack/config_template 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | openstack.config_template:2.1.1 was installed successfully 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | Installing 'junipernetworks.junos:9.1.0' to '/home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos' 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | Created collection for junipernetworks.junos:9.1.0 at /home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | junipernetworks.junos:9.1.0 was installed successfully 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | Installing 'cisco.ios:9.0.3' to '/home/zuul/.ansible/collections/ansible_collections/cisco/ios' 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | Created collection for cisco.ios:9.0.3 at /home/zuul/.ansible/collections/ansible_collections/cisco/ios 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | cisco.ios:9.0.3 was installed successfully 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | Installing 'mellanox.onyx:1.0.0' to '/home/zuul/.ansible/collections/ansible_collections/mellanox/onyx' 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | Created collection for mellanox.onyx:1.0.0 at /home/zuul/.ansible/collections/ansible_collections/mellanox/onyx 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | mellanox.onyx:1.0.0 was installed successfully 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | Installing 'community.okd:4.0.0' to '/home/zuul/.ansible/collections/ansible_collections/community/okd' 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | Created collection for community.okd:4.0.0 at /home/zuul/.ansible/collections/ansible_collections/community/okd 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | community.okd:4.0.0 was installed successfully 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | Installing '@NAMESPACE@.@NAME@:3.1.4' to '/home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@' 2025-12-08 17:52:00,619 p=31279 u=zuul n=ansible | Created collection for @NAMESPACE@.@NAME@:3.1.4 at /home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@ 2025-12-08 17:52:00,619 p=31279 u=zuul n=ansible | @NAMESPACE@.@NAME@:3.1.4 was installed successfully 2025-12-08 17:52:08,760 p=31902 u=zuul n=ansible | PLAY [Bootstrap playbook] ****************************************************** 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | TASK [Gathering Facts ] ******************************************************** 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:08 +0000 (0:00:00.032) 0:00:00.032 ******* 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:08 +0000 (0:00:00.031) 0:00:00.031 ******* 2025-12-08 17:52:10,015 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,052 p=31902 u=zuul n=ansible | TASK [Set custom cifmw PATH reusable fact cifmw_path={{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}, cacheable=True] *** 2025-12-08 17:52:10,053 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:01.276) 0:00:01.308 ******* 2025-12-08 17:52:10,053 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:01.276) 0:00:01.307 ******* 2025-12-08 17:52:10,080 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | TASK [Get customized parameters ci_framework_params={{ hostvars[inventory_hostname] | dict2items | selectattr("key", "match", "^(cifmw|pre|post)_(?!install_yamls|openshift_token|openshift_login|openshift_kubeconfig).*") | list | items2dict }}] *** 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.033) 0:00:01.342 ******* 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.033) 0:00:01.341 ******* 2025-12-08 17:52:10,129 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | TASK [install_ca : Ensure target directory exists path={{ cifmw_install_ca_trust_dir }}, state=directory, mode=0755] *** 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.048) 0:00:01.391 ******* 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.048) 0:00:01.390 ******* 2025-12-08 17:52:10,496 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | TASK [install_ca : Install internal CA from url url={{ cifmw_install_ca_url }}, dest={{ cifmw_install_ca_trust_dir }}, validate_certs={{ cifmw_install_ca_url_validate_certs | default(omit) }}, mode=0644] *** 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.370) 0:00:01.762 ******* 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.370) 0:00:01.760 ******* 2025-12-08 17:52:10,533 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,543 p=31902 u=zuul n=ansible | TASK [install_ca : Install custom CA bundle from inline dest={{ cifmw_install_ca_trust_dir }}/cifmw_inline_ca_bundle.crt, content={{ cifmw_install_ca_bundle_inline }}, mode=0644] *** 2025-12-08 17:52:10,544 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.037) 0:00:01.799 ******* 2025-12-08 17:52:10,544 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.037) 0:00:01.798 ******* 2025-12-08 17:52:10,574 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | TASK [install_ca : Install custom CA bundle from file dest={{ cifmw_install_ca_trust_dir }}/{{ cifmw_install_ca_bundle_src | basename }}, src={{ cifmw_install_ca_bundle_src }}, mode=0644] *** 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.040) 0:00:01.839 ******* 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.040) 0:00:01.838 ******* 2025-12-08 17:52:10,619 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,633 p=31902 u=zuul n=ansible | TASK [install_ca : Update ca bundle _raw_params=update-ca-trust] *************** 2025-12-08 17:52:10,634 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.049) 0:00:01.889 ******* 2025-12-08 17:52:10,634 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.049) 0:00:01.888 ******* 2025-12-08 17:52:12,273 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | TASK [repo_setup : Ensure directories are present path={{ cifmw_repo_setup_basedir }}/{{ item }}, state=directory, mode=0755] *** 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:01.651) 0:00:03.540 ******* 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:01.651) 0:00:03.539 ******* 2025-12-08 17:52:12,519 p=31902 u=zuul n=ansible | changed: [localhost] => (item=tmp) 2025-12-08 17:52:12,700 p=31902 u=zuul n=ansible | changed: [localhost] => (item=artifacts/repositories) 2025-12-08 17:52:12,886 p=31902 u=zuul n=ansible | changed: [localhost] => (item=venv/repo_setup) 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | TASK [repo_setup : Make sure git-core package is installed name=git-core, state=present] *** 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:00.616) 0:00:04.157 ******* 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:00.616) 0:00:04.156 ******* 2025-12-08 17:52:13,882 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:13,891 p=31902 u=zuul n=ansible | TASK [repo_setup : Get repo-setup repository accept_hostkey=True, dest={{ cifmw_repo_setup_basedir }}/tmp/repo-setup, repo={{ cifmw_repo_setup_src }}] *** 2025-12-08 17:52:13,891 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:13 +0000 (0:00:00.990) 0:00:05.147 ******* 2025-12-08 17:52:13,892 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:13 +0000 (0:00:00.990) 0:00:05.146 ******* 2025-12-08 17:52:16,138 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | TASK [repo_setup : Initialize python venv and install requirements virtualenv={{ cifmw_repo_setup_venv }}, requirements={{ cifmw_repo_setup_basedir }}/tmp/repo-setup/requirements.txt, virtualenv_command=python3 -m venv --system-site-packages --upgrade-deps] *** 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:16 +0000 (0:00:02.253) 0:00:07.400 ******* 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:16 +0000 (0:00:02.253) 0:00:07.399 ******* 2025-12-08 17:52:24,622 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | TASK [repo_setup : Install repo-setup package chdir={{ cifmw_repo_setup_basedir }}/tmp/repo-setup, creates={{ cifmw_repo_setup_venv }}/bin/repo-setup, _raw_params={{ cifmw_repo_setup_venv }}/bin/python setup.py install] *** 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:24 +0000 (0:00:08.485) 0:00:15.886 ******* 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:24 +0000 (0:00:08.485) 0:00:15.884 ******* 2025-12-08 17:52:25,496 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | TASK [repo_setup : Set cifmw_repo_setup_dlrn_hash_tag from content provider cifmw_repo_setup_dlrn_hash_tag={{ content_provider_dlrn_md5_hash }}] *** 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.875) 0:00:16.761 ******* 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.875) 0:00:16.760 ******* 2025-12-08 17:52:25,538 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | TASK [repo_setup : Run repo-setup _raw_params={{ cifmw_repo_setup_venv }}/bin/repo-setup {{ cifmw_repo_setup_promotion }} {{ cifmw_repo_setup_additional_repos }} -d {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} -b {{ cifmw_repo_setup_branch }} --rdo-mirror {{ cifmw_repo_setup_rdo_mirror }} {% if cifmw_repo_setup_dlrn_hash_tag | length > 0 %} --dlrn-hash-tag {{ cifmw_repo_setup_dlrn_hash_tag }} {% endif %} -o {{ cifmw_repo_setup_output }}] *** 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.041) 0:00:16.803 ******* 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.041) 0:00:16.801 ******* 2025-12-08 17:52:26,188 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | TASK [repo_setup : Get component repo url={{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/component/{{ cifmw_repo_setup_component_name }}/{{ cifmw_repo_setup_component_promotion_tag }}/delorean.repo, dest={{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo, mode=0644] *** 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.647) 0:00:17.451 ******* 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.647) 0:00:17.449 ******* 2025-12-08 17:52:26,242 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,255 p=31902 u=zuul n=ansible | TASK [repo_setup : Rename component repo path={{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo, regexp=delorean-component-{{ cifmw_repo_setup_component_name }}, replace={{ cifmw_repo_setup_component_name }}-{{ cifmw_repo_setup_component_promotion_tag }}] *** 2025-12-08 17:52:26,256 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.511 ******* 2025-12-08 17:52:26,256 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.510 ******* 2025-12-08 17:52:26,306 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | TASK [repo_setup : Disable component repo in current-podified dlrn repo path={{ cifmw_repo_setup_output }}/delorean.repo, section=delorean-component-{{ cifmw_repo_setup_component_name }}, option=enabled, value=0, mode=0644] *** 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.067) 0:00:17.579 ******* 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.067) 0:00:17.578 ******* 2025-12-08 17:52:26,371 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | TASK [repo_setup : Run repo-setup-get-hash _raw_params={{ cifmw_repo_setup_venv }}/bin/repo-setup-get-hash --dlrn-url {{ cifmw_repo_setup_dlrn_uri[:-1] }} --os-version {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} --release {{ cifmw_repo_setup_branch }} {% if cifmw_repo_setup_component_name | length > 0 -%} --component {{ cifmw_repo_setup_component_name }} --tag {{ cifmw_repo_setup_component_promotion_tag }} {% else -%} --tag {{cifmw_repo_setup_promotion }} {% endif -%} {% if (cifmw_repo_setup_dlrn_hash_tag | length > 0) and (cifmw_repo_setup_component_name | length <= 0) -%} --dlrn-hash-tag {{ cifmw_repo_setup_dlrn_hash_tag }} {% endif -%} --json] *** 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.640 ******* 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.638 ******* 2025-12-08 17:52:26,908 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | TASK [repo_setup : Dump full hash in delorean.repo.md5 file content={{ _repo_setup_json['full_hash'] }} , dest={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5, mode=0644] *** 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.533) 0:00:18.173 ******* 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.533) 0:00:18.172 ******* 2025-12-08 17:52:27,629 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | TASK [repo_setup : Dump current-podified hash url={{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/current-podified/delorean.repo.md5, dest={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5, mode=0644] *** 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.717) 0:00:18.891 ******* 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.717) 0:00:18.889 ******* 2025-12-08 17:52:27,662 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | TASK [repo_setup : Slurp current podified hash src={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5] *** 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.033) 0:00:18.924 ******* 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.033) 0:00:18.923 ******* 2025-12-08 17:52:27,694 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | TASK [repo_setup : Update the value of full_hash _repo_setup_json={{ _repo_setup_json | combine({'full_hash': _hash}, recursive=true) }}] *** 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.032) 0:00:18.957 ******* 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.032) 0:00:18.955 ******* 2025-12-08 17:52:27,728 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | TASK [repo_setup : Export hashes facts for further use cifmw_repo_setup_full_hash={{ _repo_setup_json['full_hash'] }}, cifmw_repo_setup_commit_hash={{ _repo_setup_json['commit_hash'] }}, cifmw_repo_setup_distro_hash={{ _repo_setup_json['distro_hash'] }}, cifmw_repo_setup_extended_hash={{ _repo_setup_json['extended_hash'] }}, cifmw_repo_setup_dlrn_api_url={{ _repo_setup_json['dlrn_api_url'] }}, cifmw_repo_setup_dlrn_url={{ _repo_setup_json['dlrn_url'] }}, cifmw_repo_setup_release={{ _repo_setup_json['release'] }}, cacheable=True] *** 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.035) 0:00:18.993 ******* 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.035) 0:00:18.991 ******* 2025-12-08 17:52:27,784 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:27,792 p=31902 u=zuul n=ansible | TASK [repo_setup : Create download directory path={{ cifmw_repo_setup_rhos_release_path }}, state=directory, mode=0755] *** 2025-12-08 17:52:27,792 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.055) 0:00:19.048 ******* 2025-12-08 17:52:27,793 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.055) 0:00:19.047 ******* 2025-12-08 17:52:27,814 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,821 p=31902 u=zuul n=ansible | TASK [repo_setup : Print the URL to request msg={{ cifmw_repo_setup_rhos_release_rpm }}] *** 2025-12-08 17:52:27,822 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.029) 0:00:19.077 ******* 2025-12-08 17:52:27,822 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.029) 0:00:19.076 ******* 2025-12-08 17:52:27,847 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | TASK [Download the RPM name=krb_request] *************************************** 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.034) 0:00:19.112 ******* 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.034) 0:00:19.111 ******* 2025-12-08 17:52:27,873 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | TASK [repo_setup : Install RHOS Release tool name={{ cifmw_repo_setup_rhos_release_rpm if cifmw_repo_setup_rhos_release_rpm is not url else cifmw_krb_request_out.path }}, state=present, disable_gpg_check={{ cifmw_repo_setup_rhos_release_gpg_check | bool }}] *** 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.024) 0:00:19.137 ******* 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.024) 0:00:19.135 ******* 2025-12-08 17:52:27,898 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | TASK [repo_setup : Get rhos-release tool version _raw_params=rhos-release --version] *** 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.026) 0:00:19.163 ******* 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.026) 0:00:19.161 ******* 2025-12-08 17:52:27,924 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,932 p=31902 u=zuul n=ansible | TASK [repo_setup : Print rhos-release tool version msg={{ rr_version.stdout }}] *** 2025-12-08 17:52:27,933 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.025) 0:00:19.188 ******* 2025-12-08 17:52:27,933 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.025) 0:00:19.187 ******* 2025-12-08 17:52:27,954 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,963 p=31902 u=zuul n=ansible | TASK [repo_setup : Generate repos using rhos-release {{ cifmw_repo_setup_rhos_release_args }} _raw_params=rhos-release {{ cifmw_repo_setup_rhos_release_args }} \ -t {{ cifmw_repo_setup_output }}] *** 2025-12-08 17:52:27,963 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.030) 0:00:19.219 ******* 2025-12-08 17:52:27,964 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.030) 0:00:19.218 ******* 2025-12-08 17:52:27,976 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for /etc/ci/mirror_info.sh path=/etc/ci/mirror_info.sh] *** 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.021) 0:00:19.240 ******* 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.021) 0:00:19.239 ******* 2025-12-08 17:52:28,210 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | TASK [repo_setup : Use RDO proxy mirrors chdir={{ cifmw_repo_setup_output }}, _raw_params=set -o pipefail source /etc/ci/mirror_info.sh sed -i -e "s|https://trunk.rdoproject.org|$NODEPOOL_RDO_PROXY|g" *.repo ] *** 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.233) 0:00:19.474 ******* 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.233) 0:00:19.472 ******* 2025-12-08 17:52:28,449 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:28,466 p=31902 u=zuul n=ansible | TASK [repo_setup : Use RDO CentOS mirrors (remove CentOS 10 conditional when Nodepool mirrors exist) chdir={{ cifmw_repo_setup_output }}, _raw_params=set -o pipefail source /etc/ci/mirror_info.sh sed -i -e "s|http://mirror.stream.centos.org|$NODEPOOL_CENTOS_MIRROR|g" *.repo ] *** 2025-12-08 17:52:28,466 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.248) 0:00:19.722 ******* 2025-12-08 17:52:28,467 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.248) 0:00:19.721 ******* 2025-12-08 17:52:28,706 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:28,721 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for gating.repo file on content provider url=http://{{ content_provider_registry_ip }}:8766/gating.repo] *** 2025-12-08 17:52:28,722 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.255) 0:00:19.977 ******* 2025-12-08 17:52:28,722 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.255) 0:00:19.976 ******* 2025-12-08 17:52:28,749 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | TASK [repo_setup : Populate gating repo from content provider ip content=[gating-repo] baseurl=http://{{ content_provider_registry_ip }}:8766/ enabled=1 gpgcheck=0 priority=1 , dest={{ cifmw_repo_setup_output }}/gating.repo, mode=0644] *** 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.011 ******* 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.010 ******* 2025-12-08 17:52:28,784 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for DLRN repo at the destination path={{ cifmw_repo_setup_output }}/delorean.repo] *** 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.034) 0:00:20.046 ******* 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.034) 0:00:20.045 ******* 2025-12-08 17:52:28,817 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | TASK [repo_setup : Lower the priority of DLRN repos to allow installation from gating repo path={{ cifmw_repo_setup_output }}/delorean.repo, regexp=priority=1, replace=priority=20] *** 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.079 ******* 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.078 ******* 2025-12-08 17:52:28,861 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for DLRN component repo path={{ cifmw_repo_setup_output }}/{{ _comp_repo }}] *** 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.043) 0:00:20.123 ******* 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.043) 0:00:20.122 ******* 2025-12-08 17:52:28,894 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | TASK [repo_setup : Lower the priority of componennt repos to allow installation from gating repo path={{ cifmw_repo_setup_output }}//{{ _comp_repo }}, regexp=priority=1, replace=priority=2] *** 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.032) 0:00:20.155 ******* 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.032) 0:00:20.154 ******* 2025-12-08 17:52:28,939 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | TASK [repo_setup : Find existing repos from /etc/yum.repos.d directory paths=/etc/yum.repos.d/, patterns=*.repo, recurse=False] *** 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.045) 0:00:20.201 ******* 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.045) 0:00:20.199 ******* 2025-12-08 17:52:29,252 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | TASK [repo_setup : Remove existing repos from /etc/yum.repos.d directory path={{ item }}, state=absent] *** 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.321) 0:00:20.523 ******* 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.322) 0:00:20.521 ******* 2025-12-08 17:52:29,525 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/etc/yum.repos.d/centos-addons.repo) 2025-12-08 17:52:29,723 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/etc/yum.repos.d/centos.repo) 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | TASK [repo_setup : Cleanup existing metadata _raw_params=dnf clean metadata] *** 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.465) 0:00:20.988 ******* 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.465) 0:00:20.987 ******* 2025-12-08 17:52:30,222 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:30,236 p=31902 u=zuul n=ansible | TASK [repo_setup : Copy generated repos to /etc/yum.repos.d directory mode=0755, remote_src=True, src={{ cifmw_repo_setup_output }}/, dest=/etc/yum.repos.d] *** 2025-12-08 17:52:30,237 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.503) 0:00:21.492 ******* 2025-12-08 17:52:30,237 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.503) 0:00:21.491 ******* 2025-12-08 17:52:30,666 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather variables for each operating system _raw_params={{ item }}] *** 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.458) 0:00:21.950 ******* 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.458) 0:00:21.949 ******* 2025-12-08 17:52:30,749 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/ci_setup/vars/redhat.yml) 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | TASK [ci_setup : List packages to install var=cifmw_ci_setup_packages] ********* 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.064) 0:00:22.015 ******* 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.064) 0:00:22.014 ******* 2025-12-08 17:52:30,787 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_ci_setup_packages: - bash-completion - ca-certificates - git-core - make - tar - tmux - python3-pip 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | TASK [ci_setup : Install needed packages name={{ cifmw_ci_setup_packages }}, state=latest] *** 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.036) 0:00:22.052 ******* 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.036) 0:00:22.051 ******* 2025-12-08 17:52:58,113 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather version of openshift client _raw_params=oc version --client -o yaml] *** 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:27.323) 0:00:49.376 ******* 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:27.323) 0:00:49.374 ******* 2025-12-08 17:52:58,331 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:58,340 p=31902 u=zuul n=ansible | TASK [ci_setup : Ensure openshift client install path is present path={{ cifmw_ci_setup_oc_install_path }}, state=directory, mode=0755] *** 2025-12-08 17:52:58,340 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.220) 0:00:49.596 ******* 2025-12-08 17:52:58,341 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.220) 0:00:49.595 ******* 2025-12-08 17:52:58,535 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | TASK [ci_setup : Install openshift client src={{ cifmw_ci_setup_openshift_client_download_uri }}/{{ cifmw_ci_setup_openshift_client_version }}/openshift-client-linux.tar.gz, dest={{ cifmw_ci_setup_oc_install_path }}, remote_src=True, mode=0755, creates={{ cifmw_ci_setup_oc_install_path }}/oc] *** 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.201) 0:00:49.798 ******* 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.201) 0:00:49.797 ******* 2025-12-08 17:53:03,845 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | TASK [ci_setup : Add the OC path to cifmw_path if needed cifmw_path={{ cifmw_ci_setup_oc_install_path }}:{{ ansible_env.PATH }}, cacheable=True] *** 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:05.315) 0:00:55.113 ******* 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:05.315) 0:00:55.112 ******* 2025-12-08 17:53:03,880 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | TASK [ci_setup : Create completion file] *************************************** 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:00.033) 0:00:55.146 ******* 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:00.033) 0:00:55.145 ******* 2025-12-08 17:53:04,265 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | TASK [ci_setup : Source completion from within .bashrc create=True, mode=0644, path={{ ansible_user_dir }}/.bashrc, block=if [ -f ~/.oc_completion ]; then source ~/.oc_completion fi] *** 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.383) 0:00:55.529 ******* 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.383) 0:00:55.528 ******* 2025-12-08 17:53:04,625 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | TASK [ci_setup : Check rhsm status _raw_params=subscription-manager status] **** 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.361) 0:00:55.891 ******* 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.361) 0:00:55.889 ******* 2025-12-08 17:53:04,655 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather the repos to be enabled _repos={{ cifmw_ci_setup_rhel_rhsm_default_repos + (cifmw_ci_setup_rhel_rhsm_extra_repos | default([])) }}] *** 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.030) 0:00:55.921 ******* 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.030) 0:00:55.919 ******* 2025-12-08 17:53:04,689 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | TASK [ci_setup : Enabling the required repositories. name={{ item }}, state={{ rhsm_repo_state | default('enabled') }}] *** 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.033) 0:00:55.954 ******* 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.033) 0:00:55.953 ******* 2025-12-08 17:53:04,727 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | TASK [ci_setup : Get current /etc/redhat-release _raw_params=cat /etc/redhat-release] *** 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.038) 0:00:55.993 ******* 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.038) 0:00:55.992 ******* 2025-12-08 17:53:04,762 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | TASK [ci_setup : Print current /etc/redhat-release msg={{ _current_rh_release.stdout }}] *** 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.031) 0:00:56.024 ******* 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.031) 0:00:56.023 ******* 2025-12-08 17:53:04,787 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | TASK [ci_setup : Ensure the repos are enabled in the system using yum name={{ item.name }}, baseurl={{ item.baseurl }}, description={{ item.description | default(item.name) }}, gpgcheck={{ item.gpgcheck | default(false) }}, enabled=True, state={{ yum_repo_state | default('present') }}] *** 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.025) 0:00:56.050 ******* 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.025) 0:00:56.049 ******* 2025-12-08 17:53:04,830 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,839 p=31902 u=zuul n=ansible | TASK [ci_setup : Manage directories path={{ item }}, state={{ directory_state }}, mode=0755, owner={{ ansible_user_id }}, group={{ ansible_user_id }}] *** 2025-12-08 17:53:04,839 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.045) 0:00:56.095 ******* 2025-12-08 17:53:04,840 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.045) 0:00:56.094 ******* 2025-12-08 17:53:05,148 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr) 2025-12-08 17:53:05,359 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/logs) 2025-12-08 17:53:05,572 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/tmp) 2025-12-08 17:53:05,819 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/volumes) 2025-12-08 17:53:06,024 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/parameters) 2025-12-08 17:53:06,051 p=31902 u=zuul n=ansible | TASK [Prepare install_yamls make targets name=install_yamls, apply={'tags': ['bootstrap']}] *** 2025-12-08 17:53:06,052 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:01.212) 0:00:57.307 ******* 2025-12-08 17:53:06,052 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:01.212) 0:00:57.306 ******* 2025-12-08 17:53:06,175 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure directories exist path={{ item }}, state=directory, mode=0755] *** 2025-12-08 17:53:06,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.124) 0:00:57.431 ******* 2025-12-08 17:53:06,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.124) 0:00:57.430 ******* 2025-12-08 17:53:06,432 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts) 2025-12-08 17:53:06,603 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/roles/install_yamls_makes/tasks) 2025-12-08 17:53:06,835 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/parameters) 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | TASK [Create variables with local repos based on Zuul items name=install_yamls, tasks_from=zuul_set_operators_repo.yml] *** 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.670) 0:00:58.102 ******* 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.670) 0:00:58.100 ******* 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | TASK [install_yamls : Set fact with local repos based on Zuul items cifmw_install_yamls_operators_repo={{ cifmw_install_yamls_operators_repo | default({}) | combine(_repo_operator_info | items2dict) }}] *** 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.049) 0:00:58.152 ******* 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.049) 0:00:58.150 ******* 2025-12-08 17:53:06,931 p=31902 u=zuul n=ansible | skipping: [localhost] => (item={'branch': 'master', 'change': '694', 'change_url': 'https://github.com/infrawatch/service-telemetry-operator/pull/694', 'commit_id': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'patchset': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'project': {'canonical_hostname': 'github.com', 'canonical_name': 'github.com/infrawatch/service-telemetry-operator', 'name': 'infrawatch/service-telemetry-operator', 'short_name': 'service-telemetry-operator', 'src_dir': 'src/github.com/infrawatch/service-telemetry-operator'}, 'topic': None}) 2025-12-08 17:53:06,933 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:06,943 p=31902 u=zuul n=ansible | TASK [install_yamls : Print helpful data for debugging msg=_repo_operator_name: {{ _repo_operator_name }} _repo_operator_info: {{ _repo_operator_info }} cifmw_install_yamls_operators_repo: {{ cifmw_install_yamls_operators_repo }} ] *** 2025-12-08 17:53:06,944 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.047) 0:00:58.199 ******* 2025-12-08 17:53:06,944 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.047) 0:00:58.198 ******* 2025-12-08 17:53:06,979 p=31902 u=zuul n=ansible | skipping: [localhost] => (item={'branch': 'master', 'change': '694', 'change_url': 'https://github.com/infrawatch/service-telemetry-operator/pull/694', 'commit_id': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'patchset': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'project': {'canonical_hostname': 'github.com', 'canonical_name': 'github.com/infrawatch/service-telemetry-operator', 'name': 'infrawatch/service-telemetry-operator', 'short_name': 'service-telemetry-operator', 'src_dir': 'src/github.com/infrawatch/service-telemetry-operator'}, 'topic': None}) 2025-12-08 17:53:06,981 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | TASK [Customize install_yamls devsetup vars if needed name=install_yamls, tasks_from=customize_devsetup_vars.yml] *** 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.055) 0:00:58.254 ******* 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.055) 0:00:58.253 ******* 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | TASK [install_yamls : Update opm_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^opm_version:, line=opm_version: {{ cifmw_install_yamls_opm_version }}, state=present] *** 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.051) 0:00:58.306 ******* 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.051) 0:00:58.304 ******* 2025-12-08 17:53:07,070 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,076 p=31902 u=zuul n=ansible | TASK [install_yamls : Update sdk_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^sdk_version:, line=sdk_version: {{ cifmw_install_yamls_sdk_version }}, state=present] *** 2025-12-08 17:53:07,077 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.026) 0:00:58.332 ******* 2025-12-08 17:53:07,077 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.026) 0:00:58.331 ******* 2025-12-08 17:53:07,111 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | TASK [install_yamls : Update go_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^go_version:, line=go_version: {{ cifmw_install_yamls_go_version }}, state=present] *** 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.373 ******* 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.372 ******* 2025-12-08 17:53:07,139 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | TASK [install_yamls : Update kustomize_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^kustomize_version:, line=kustomize_version: {{ cifmw_install_yamls_kustomize_version }}, state=present] *** 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.027) 0:00:58.401 ******* 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.027) 0:00:58.400 ******* 2025-12-08 17:53:07,165 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | TASK [install_yamls : Compute the cifmw_install_yamls_vars final value _install_yamls_override_vars={{ _install_yamls_override_vars | default({}) | combine(item, recursive=True) }}] *** 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.030) 0:00:58.432 ******* 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.030) 0:00:58.431 ******* 2025-12-08 17:53:07,259 p=31902 u=zuul n=ansible | ok: [localhost] => (item={}) 2025-12-08 17:53:07,267 p=31902 u=zuul n=ansible | TASK [install_yamls : Set environment override cifmw_install_yamls_environment fact cifmw_install_yamls_environment={{ _install_yamls_override_vars.keys() | map('upper') | zip(_install_yamls_override_vars.values()) | items2dict(key_name=0, value_name=1) | combine({ 'OUT': cifmw_install_yamls_manifests_dir, 'OUTPUT_DIR': cifmw_install_yamls_edpm_dir, 'CHECKOUT_FROM_OPENSTACK_REF': cifmw_install_yamls_checkout_openstack_ref, 'OPENSTACK_K8S_BRANCH': (zuul is defined and not zuul.branch |regex_search('master|antelope|rhos')) | ternary(zuul.branch, 'main') }) | combine(install_yamls_operators_repos) }}, cacheable=True] *** 2025-12-08 17:53:07,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.091) 0:00:58.523 ******* 2025-12-08 17:53:07,268 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.091) 0:00:58.522 ******* 2025-12-08 17:53:07,299 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | TASK [install_yamls : Get environment structure base_path={{ cifmw_install_yamls_repo }}] *** 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.564 ******* 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.563 ******* 2025-12-08 17:53:07,903 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure Output directory exists path={{ cifmw_install_yamls_out_dir }}, state=directory, mode=0755] *** 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.611) 0:00:59.175 ******* 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.611) 0:00:59.174 ******* 2025-12-08 17:53:07,956 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure user cifmw_install_yamls_vars contains existing Makefile variables that=_cifmw_install_yamls_unmatched_vars | length == 0, msg=cifmw_install_yamls_vars contains a variable that is not defined in install_yamls Makefile nor cifmw_install_yamls_whitelisted_vars: {{ _cifmw_install_yamls_unmatched_vars | join(', ')}}, quiet=True] *** 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.050) 0:00:59.226 ******* 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.050) 0:00:59.224 ******* 2025-12-08 17:53:08,004 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | TASK [install_yamls : Generate /home/zuul/ci-framework-data/artifacts/install_yamls.sh dest={{ cifmw_install_yamls_out_dir }}/{{ cifmw_install_yamls_envfile }}, content={% for k,v in cifmw_install_yamls_environment.items() %} export {{ k }}={{ v }} {% endfor %}, mode=0644] *** 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.057) 0:00:59.283 ******* 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.057) 0:00:59.281 ******* 2025-12-08 17:53:08,060 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | TASK [install_yamls : Set install_yamls default values cifmw_install_yamls_defaults={{ get_makefiles_env_output.makefiles_values | combine(cifmw_install_yamls_environment) }}, cacheable=True] *** 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.047) 0:00:59.330 ******* 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.047) 0:00:59.329 ******* 2025-12-08 17:53:08,117 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | TASK [install_yamls : Show the env structure var=cifmw_install_yamls_environment] *** 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.059) 0:00:59.390 ******* 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.059) 0:00:59.388 ******* 2025-12-08 17:53:08,167 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_install_yamls_environment: CHECKOUT_FROM_OPENSTACK_REF: 'true' OPENSTACK_K8S_BRANCH: main OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | TASK [install_yamls : Show the env structure defaults var=cifmw_install_yamls_defaults] *** 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.042) 0:00:59.432 ******* 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.042) 0:00:59.431 ******* 2025-12-08 17:53:08,219 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_install_yamls_defaults: ADOPTED_EXTERNAL_NETWORK: 172.21.1.0/24 ADOPTED_INTERNALAPI_NETWORK: 172.17.1.0/24 ADOPTED_STORAGEMGMT_NETWORK: 172.20.1.0/24 ADOPTED_STORAGE_NETWORK: 172.18.1.0/24 ADOPTED_TENANT_NETWORK: 172.9.1.0/24 ANSIBLEEE: config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_BRANCH: main ANSIBLEEE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest ANSIBLEEE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml ANSIBLEEE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests ANSIBLEEE_KUTTL_NAMESPACE: ansibleee-kuttl-tests ANSIBLEEE_REPO: https://github.com/openstack-k8s-operators/openstack-ansibleee-operator ANSIBLEE_COMMIT_HASH: '' BARBICAN: config/samples/barbican_v1beta1_barbican.yaml BARBICAN_BRANCH: main BARBICAN_COMMIT_HASH: '' BARBICAN_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml BARBICAN_DEPL_IMG: unused BARBICAN_IMG: quay.io/openstack-k8s-operators/barbican-operator-index:latest BARBICAN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml BARBICAN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests BARBICAN_KUTTL_NAMESPACE: barbican-kuttl-tests BARBICAN_REPO: https://github.com/openstack-k8s-operators/barbican-operator.git BARBICAN_SERVICE_ENABLED: 'true' BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY: sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= BAREMETAL_BRANCH: main BAREMETAL_COMMIT_HASH: '' BAREMETAL_IMG: quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest BAREMETAL_OS_CONTAINER_IMG: '' BAREMETAL_OS_IMG: '' BAREMETAL_REPO: https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git BAREMETAL_TIMEOUT: 20m BASH_IMG: quay.io/openstack-k8s-operators/bash:latest BGP_ASN: '64999' BGP_LEAF_1: 100.65.4.1 BGP_LEAF_2: 100.64.4.1 BGP_OVN_ROUTING: 'false' BGP_PEER_ASN: '64999' BGP_SOURCE_IP: 172.30.4.2 BGP_SOURCE_IP6: f00d:f00d:f00d:f00d:f00d:f00d:f00d:42 BMAAS_BRIDGE_IPV4_PREFIX: 172.20.1.2/24 BMAAS_BRIDGE_IPV6_PREFIX: fd00:bbbb::2/64 BMAAS_INSTANCE_DISK_SIZE: '20' BMAAS_INSTANCE_MEMORY: '4096' BMAAS_INSTANCE_NAME_PREFIX: crc-bmaas BMAAS_INSTANCE_NET_MODEL: virtio BMAAS_INSTANCE_OS_VARIANT: centos-stream9 BMAAS_INSTANCE_VCPUS: '2' BMAAS_INSTANCE_VIRT_TYPE: kvm BMAAS_IPV4: 'true' BMAAS_IPV6: 'false' BMAAS_LIBVIRT_USER: sushyemu BMAAS_METALLB_ADDRESS_POOL: 172.20.1.64/26 BMAAS_METALLB_POOL_NAME: baremetal BMAAS_NETWORK_IPV4_PREFIX: 172.20.1.1/24 BMAAS_NETWORK_IPV6_PREFIX: fd00:bbbb::1/64 BMAAS_NETWORK_NAME: crc-bmaas BMAAS_NODE_COUNT: '1' BMAAS_OCP_INSTANCE_NAME: crc BMAAS_REDFISH_PASSWORD: password BMAAS_REDFISH_USERNAME: admin BMAAS_ROUTE_LIBVIRT_NETWORKS: crc-bmaas,crc,default BMAAS_SUSHY_EMULATOR_DRIVER: libvirt BMAAS_SUSHY_EMULATOR_IMAGE: quay.io/metal3-io/sushy-tools:latest BMAAS_SUSHY_EMULATOR_NAMESPACE: sushy-emulator BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE: /etc/openstack/clouds.yaml BMAAS_SUSHY_EMULATOR_OS_CLOUD: openstack BMH_NAMESPACE: openstack BMO_BRANCH: release-0.9 BMO_CLEANUP: 'true' BMO_COMMIT_HASH: '' BMO_IPA_BRANCH: stable/2024.1 BMO_IRONIC_HOST: 192.168.122.10 BMO_PROVISIONING_INTERFACE: '' BMO_REPO: https://github.com/metal3-io/baremetal-operator BMO_SETUP: '' BMO_SETUP_ROUTE_REPLACE: 'true' BM_CTLPLANE_INTERFACE: enp1s0 BM_INSTANCE_MEMORY: '8192' BM_INSTANCE_NAME_PREFIX: edpm-compute-baremetal BM_INSTANCE_NAME_SUFFIX: '0' BM_NETWORK_NAME: default BM_NODE_COUNT: '1' BM_ROOT_PASSWORD: '' BM_ROOT_PASSWORD_SECRET: '' CEILOMETER_CENTRAL_DEPL_IMG: unused CEILOMETER_NOTIFICATION_DEPL_IMG: unused CEPH_BRANCH: release-1.15 CEPH_CLIENT: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml CEPH_COMMON: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml CEPH_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml CEPH_CRDS: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml CEPH_IMG: quay.io/ceph/demo:latest-squid CEPH_OP: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml CEPH_REPO: https://github.com/rook/rook.git CERTMANAGER_TIMEOUT: 300s CHECKOUT_FROM_OPENSTACK_REF: 'true' CINDER: config/samples/cinder_v1beta1_cinder.yaml CINDERAPI_DEPL_IMG: unused CINDERBKP_DEPL_IMG: unused CINDERSCH_DEPL_IMG: unused CINDERVOL_DEPL_IMG: unused CINDER_BRANCH: main CINDER_COMMIT_HASH: '' CINDER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml CINDER_IMG: quay.io/openstack-k8s-operators/cinder-operator-index:latest CINDER_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml CINDER_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests CINDER_KUTTL_NAMESPACE: cinder-kuttl-tests CINDER_REPO: https://github.com/openstack-k8s-operators/cinder-operator.git CLEANUP_DIR_CMD: rm -Rf CRC_BGP_NIC_1_MAC: '52:54:00:11:11:11' CRC_BGP_NIC_2_MAC: '52:54:00:11:11:12' CRC_HTTPS_PROXY: '' CRC_HTTP_PROXY: '' CRC_STORAGE_NAMESPACE: crc-storage CRC_STORAGE_RETRIES: '3' CRC_URL: '''https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz''' CRC_VERSION: latest DATAPLANE_ANSIBLE_SECRET: dataplane-ansible-ssh-private-key-secret DATAPLANE_ANSIBLE_USER: '' DATAPLANE_COMPUTE_IP: 192.168.122.100 DATAPLANE_CONTAINER_PREFIX: openstack DATAPLANE_CONTAINER_TAG: current-podified DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest DATAPLANE_DEFAULT_GW: 192.168.122.1 DATAPLANE_EXTRA_NOVA_CONFIG_FILE: /dev/null DATAPLANE_GROWVOLS_ARGS: /=8GB /tmp=1GB /home=1GB /var=100% DATAPLANE_KUSTOMIZE_SCENARIO: preprovisioned DATAPLANE_NETWORKER_IP: 192.168.122.200 DATAPLANE_NETWORK_INTERFACE_NAME: eth0 DATAPLANE_NOVA_NFS_PATH: '' DATAPLANE_NTP_SERVER: pool.ntp.org DATAPLANE_PLAYBOOK: osp.edpm.download_cache DATAPLANE_REGISTRY_URL: quay.io/podified-antelope-centos9 DATAPLANE_RUNNER_IMG: '' DATAPLANE_SERVER_ROLE: compute DATAPLANE_SSHD_ALLOWED_RANGES: '[''192.168.122.0/24'']' DATAPLANE_TIMEOUT: 30m DATAPLANE_TLS_ENABLED: 'true' DATAPLANE_TOTAL_NETWORKER_NODES: '1' DATAPLANE_TOTAL_NODES: '1' DBSERVICE: galera DESIGNATE: config/samples/designate_v1beta1_designate.yaml DESIGNATE_BRANCH: main DESIGNATE_COMMIT_HASH: '' DESIGNATE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml DESIGNATE_IMG: quay.io/openstack-k8s-operators/designate-operator-index:latest DESIGNATE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml DESIGNATE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests DESIGNATE_KUTTL_NAMESPACE: designate-kuttl-tests DESIGNATE_REPO: https://github.com/openstack-k8s-operators/designate-operator.git DNSDATA: config/samples/network_v1beta1_dnsdata.yaml DNSDATA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml DNSMASQ: config/samples/network_v1beta1_dnsmasq.yaml DNSMASQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml DNS_DEPL_IMG: unused DNS_DOMAIN: localdomain DOWNLOAD_TOOLS_SELECTION: all EDPM_ATTACH_EXTNET: 'true' EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES: '''[]''' EDPM_COMPUTE_ADDITIONAL_NETWORKS: '''[]''' EDPM_COMPUTE_CELLS: '1' EDPM_COMPUTE_CEPH_ENABLED: 'true' EDPM_COMPUTE_CEPH_NOVA: 'true' EDPM_COMPUTE_DHCP_AGENT_ENABLED: 'true' EDPM_COMPUTE_SRIOV_ENABLED: 'true' EDPM_COMPUTE_SUFFIX: '0' EDPM_CONFIGURE_DEFAULT_ROUTE: 'true' EDPM_CONFIGURE_HUGEPAGES: 'false' EDPM_CONFIGURE_NETWORKING: 'true' EDPM_FIRSTBOOT_EXTRA: /tmp/edpm-firstboot-extra EDPM_NETWORKER_SUFFIX: '0' EDPM_TOTAL_NETWORKERS: '1' EDPM_TOTAL_NODES: '1' GALERA_REPLICAS: '' GENERATE_SSH_KEYS: 'true' GIT_CLONE_OPTS: '' GLANCE: config/samples/glance_v1beta1_glance.yaml GLANCEAPI_DEPL_IMG: unused GLANCE_BRANCH: main GLANCE_COMMIT_HASH: '' GLANCE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml GLANCE_IMG: quay.io/openstack-k8s-operators/glance-operator-index:latest GLANCE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml GLANCE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests GLANCE_KUTTL_NAMESPACE: glance-kuttl-tests GLANCE_REPO: https://github.com/openstack-k8s-operators/glance-operator.git HEAT: config/samples/heat_v1beta1_heat.yaml HEATAPI_DEPL_IMG: unused HEATCFNAPI_DEPL_IMG: unused HEATENGINE_DEPL_IMG: unused HEAT_AUTH_ENCRYPTION_KEY: 767c3ed056cbaa3b9dfedb8c6f825bf0 HEAT_BRANCH: main HEAT_COMMIT_HASH: '' HEAT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml HEAT_IMG: quay.io/openstack-k8s-operators/heat-operator-index:latest HEAT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml HEAT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests HEAT_KUTTL_NAMESPACE: heat-kuttl-tests HEAT_REPO: https://github.com/openstack-k8s-operators/heat-operator.git HEAT_SERVICE_ENABLED: 'true' HORIZON: config/samples/horizon_v1beta1_horizon.yaml HORIZON_BRANCH: main HORIZON_COMMIT_HASH: '' HORIZON_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml HORIZON_DEPL_IMG: unused HORIZON_IMG: quay.io/openstack-k8s-operators/horizon-operator-index:latest HORIZON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml HORIZON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests HORIZON_KUTTL_NAMESPACE: horizon-kuttl-tests HORIZON_REPO: https://github.com/openstack-k8s-operators/horizon-operator.git INFRA_BRANCH: main INFRA_COMMIT_HASH: '' INFRA_IMG: quay.io/openstack-k8s-operators/infra-operator-index:latest INFRA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml INFRA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests INFRA_KUTTL_NAMESPACE: infra-kuttl-tests INFRA_REPO: https://github.com/openstack-k8s-operators/infra-operator.git INSTALL_CERT_MANAGER: 'true' INSTALL_NMSTATE: true || false INSTALL_NNCP: true || false INTERNALAPI_HOST_ROUTES: '' IPV6_LAB_IPV4_NETWORK_IPADDRESS: 172.30.0.1/24 IPV6_LAB_IPV6_NETWORK_IPADDRESS: fd00:abcd:abcd:fc00::1/64 IPV6_LAB_LIBVIRT_STORAGE_POOL: default IPV6_LAB_MANAGE_FIREWALLD: 'true' IPV6_LAB_NAT64_HOST_IPV4: 172.30.0.2/24 IPV6_LAB_NAT64_HOST_IPV6: fd00:abcd:abcd:fc00::2/64 IPV6_LAB_NAT64_INSTANCE_NAME: nat64-router IPV6_LAB_NAT64_IPV6_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL: 192.168.255.0/24 IPV6_LAB_NAT64_TAYGA_IPV4: 192.168.255.1 IPV6_LAB_NAT64_TAYGA_IPV6: fd00:abcd:abcd:fc00::3 IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX: fd00:abcd:abcd:fcff::/96 IPV6_LAB_NAT64_UPDATE_PACKAGES: 'false' IPV6_LAB_NETWORK_NAME: nat64 IPV6_LAB_SNO_CLUSTER_NETWORK: fd00:abcd:0::/48 IPV6_LAB_SNO_HOST_IP: fd00:abcd:abcd:fc00::11 IPV6_LAB_SNO_HOST_PREFIX: '64' IPV6_LAB_SNO_INSTANCE_NAME: sno IPV6_LAB_SNO_MACHINE_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_OCP_MIRROR_URL: https://mirror.openshift.com/pub/openshift-v4/clients/ocp IPV6_LAB_SNO_OCP_VERSION: latest-4.14 IPV6_LAB_SNO_SERVICE_NETWORK: fd00:abcd:abcd:fc03::/112 IPV6_LAB_SSH_PUB_KEY: /home/zuul/.ssh/id_rsa.pub IPV6_LAB_WORK_DIR: /home/zuul/.ipv6lab IRONIC: config/samples/ironic_v1beta1_ironic.yaml IRONICAPI_DEPL_IMG: unused IRONICCON_DEPL_IMG: unused IRONICINS_DEPL_IMG: unused IRONICNAG_DEPL_IMG: unused IRONICPXE_DEPL_IMG: unused IRONIC_BRANCH: main IRONIC_COMMIT_HASH: '' IRONIC_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml IRONIC_IMAGE: quay.io/metal3-io/ironic IRONIC_IMAGE_TAG: release-24.1 IRONIC_IMG: quay.io/openstack-k8s-operators/ironic-operator-index:latest IRONIC_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml IRONIC_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests IRONIC_KUTTL_NAMESPACE: ironic-kuttl-tests IRONIC_REPO: https://github.com/openstack-k8s-operators/ironic-operator.git KEYSTONEAPI: config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_DEPL_IMG: unused KEYSTONE_BRANCH: main KEYSTONE_COMMIT_HASH: '' KEYSTONE_FEDERATION_CLIENT_SECRET: COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE: openstack KEYSTONE_IMG: quay.io/openstack-k8s-operators/keystone-operator-index:latest KEYSTONE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml KEYSTONE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests KEYSTONE_KUTTL_NAMESPACE: keystone-kuttl-tests KEYSTONE_REPO: https://github.com/openstack-k8s-operators/keystone-operator.git KUBEADMIN_PWD: '12345678' LIBVIRT_SECRET: libvirt-secret LOKI_DEPLOY_MODE: openshift-network LOKI_DEPLOY_NAMESPACE: netobserv LOKI_DEPLOY_SIZE: 1x.demo LOKI_NAMESPACE: openshift-operators-redhat LOKI_OPERATOR_GROUP: openshift-operators-redhat-loki LOKI_SUBSCRIPTION: loki-operator LVMS_CR: '1' MANILA: config/samples/manila_v1beta1_manila.yaml MANILAAPI_DEPL_IMG: unused MANILASCH_DEPL_IMG: unused MANILASHARE_DEPL_IMG: unused MANILA_BRANCH: main MANILA_COMMIT_HASH: '' MANILA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml MANILA_IMG: quay.io/openstack-k8s-operators/manila-operator-index:latest MANILA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml MANILA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests MANILA_KUTTL_NAMESPACE: manila-kuttl-tests MANILA_REPO: https://github.com/openstack-k8s-operators/manila-operator.git MANILA_SERVICE_ENABLED: 'true' MARIADB: config/samples/mariadb_v1beta1_galera.yaml MARIADB_BRANCH: main MARIADB_CHAINSAW_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml MARIADB_CHAINSAW_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests MARIADB_CHAINSAW_NAMESPACE: mariadb-chainsaw-tests MARIADB_COMMIT_HASH: '' MARIADB_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml MARIADB_DEPL_IMG: unused MARIADB_IMG: quay.io/openstack-k8s-operators/mariadb-operator-index:latest MARIADB_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml MARIADB_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests MARIADB_KUTTL_NAMESPACE: mariadb-kuttl-tests MARIADB_REPO: https://github.com/openstack-k8s-operators/mariadb-operator.git MEMCACHED: config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_DEPL_IMG: unused METADATA_SHARED_SECRET: '1234567842' METALLB_IPV6_POOL: fd00:aaaa::80-fd00:aaaa::90 METALLB_POOL: 192.168.122.80-192.168.122.90 MICROSHIFT: '0' NAMESPACE: openstack NETCONFIG: config/samples/network_v1beta1_netconfig.yaml NETCONFIG_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml NETCONFIG_DEPL_IMG: unused NETOBSERV_DEPLOY_NAMESPACE: netobserv NETOBSERV_NAMESPACE: openshift-netobserv-operator NETOBSERV_OPERATOR_GROUP: openshift-netobserv-operator-net NETOBSERV_SUBSCRIPTION: netobserv-operator NETWORK_BGP: 'false' NETWORK_DESIGNATE_ADDRESS_PREFIX: 172.28.0 NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX: 172.50.0 NETWORK_INTERNALAPI_ADDRESS_PREFIX: 172.17.0 NETWORK_ISOLATION: 'true' NETWORK_ISOLATION_INSTANCE_NAME: crc NETWORK_ISOLATION_IPV4: 'true' NETWORK_ISOLATION_IPV4_ADDRESS: 172.16.1.1/24 NETWORK_ISOLATION_IPV4_NAT: 'true' NETWORK_ISOLATION_IPV6: 'false' NETWORK_ISOLATION_IPV6_ADDRESS: fd00:aaaa::1/64 NETWORK_ISOLATION_IP_ADDRESS: 192.168.122.10 NETWORK_ISOLATION_MAC: '52:54:00:11:11:10' NETWORK_ISOLATION_NETWORK_NAME: net-iso NETWORK_ISOLATION_NET_NAME: default NETWORK_ISOLATION_USE_DEFAULT_NETWORK: 'true' NETWORK_MTU: '1500' NETWORK_STORAGEMGMT_ADDRESS_PREFIX: 172.20.0 NETWORK_STORAGE_ADDRESS_PREFIX: 172.18.0 NETWORK_STORAGE_MACVLAN: '' NETWORK_TENANT_ADDRESS_PREFIX: 172.19.0 NETWORK_VLAN_START: '20' NETWORK_VLAN_STEP: '1' NEUTRONAPI: config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_DEPL_IMG: unused NEUTRON_BRANCH: main NEUTRON_COMMIT_HASH: '' NEUTRON_IMG: quay.io/openstack-k8s-operators/neutron-operator-index:latest NEUTRON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml NEUTRON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests NEUTRON_KUTTL_NAMESPACE: neutron-kuttl-tests NEUTRON_REPO: https://github.com/openstack-k8s-operators/neutron-operator.git NFS_HOME: /home/nfs NMSTATE_NAMESPACE: openshift-nmstate NMSTATE_OPERATOR_GROUP: openshift-nmstate-tn6k8 NMSTATE_SUBSCRIPTION: kubernetes-nmstate-operator NNCP_ADDITIONAL_HOST_ROUTES: '' NNCP_BGP_1_INTERFACE: enp7s0 NNCP_BGP_1_IP_ADDRESS: 100.65.4.2 NNCP_BGP_2_INTERFACE: enp8s0 NNCP_BGP_2_IP_ADDRESS: 100.64.4.2 NNCP_BRIDGE: ospbr NNCP_CLEANUP_TIMEOUT: 120s NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX: 'fd00:aaaa::' NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX: '10' NNCP_CTLPLANE_IP_ADDRESS_PREFIX: 192.168.122 NNCP_CTLPLANE_IP_ADDRESS_SUFFIX: '10' NNCP_DNS_SERVER: 192.168.122.1 NNCP_DNS_SERVER_IPV6: fd00:aaaa::1 NNCP_GATEWAY: 192.168.122.1 NNCP_GATEWAY_IPV6: fd00:aaaa::1 NNCP_INTERFACE: enp6s0 NNCP_NODES: '' NNCP_TIMEOUT: 240s NOVA: config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_BRANCH: main NOVA_COMMIT_HASH: '' NOVA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_IMG: quay.io/openstack-k8s-operators/nova-operator-index:latest NOVA_REPO: https://github.com/openstack-k8s-operators/nova-operator.git NUMBER_OF_INSTANCES: '1' OCP_NETWORK_NAME: crc OCTAVIA: config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_BRANCH: main OCTAVIA_COMMIT_HASH: '' OCTAVIA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_IMG: quay.io/openstack-k8s-operators/octavia-operator-index:latest OCTAVIA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml OCTAVIA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests OCTAVIA_KUTTL_NAMESPACE: octavia-kuttl-tests OCTAVIA_REPO: https://github.com/openstack-k8s-operators/octavia-operator.git OKD: 'false' OPENSTACK_BRANCH: main OPENSTACK_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-bundle:latest OPENSTACK_COMMIT_HASH: '' OPENSTACK_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_CRDS_DIR: openstack_crds OPENSTACK_CTLPLANE: config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_IMG: quay.io/openstack-k8s-operators/openstack-operator-index:latest OPENSTACK_K8S_BRANCH: main OPENSTACK_K8S_TAG: latest OPENSTACK_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml OPENSTACK_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests OPENSTACK_KUTTL_NAMESPACE: openstack-kuttl-tests OPENSTACK_NEUTRON_CUSTOM_CONF: '' OPENSTACK_REPO: https://github.com/openstack-k8s-operators/openstack-operator.git OPENSTACK_STORAGE_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest OPERATOR_BASE_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator OPERATOR_CHANNEL: '' OPERATOR_NAMESPACE: openstack-operators OPERATOR_SOURCE: '' OPERATOR_SOURCE_NAMESPACE: '' OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm OVNCONTROLLER: config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_NMAP: 'true' OVNDBS: config/samples/ovn_v1beta1_ovndbcluster.yaml OVNDBS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml OVNNORTHD: config/samples/ovn_v1beta1_ovnnorthd.yaml OVNNORTHD_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml OVN_BRANCH: main OVN_COMMIT_HASH: '' OVN_IMG: quay.io/openstack-k8s-operators/ovn-operator-index:latest OVN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml OVN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests OVN_KUTTL_NAMESPACE: ovn-kuttl-tests OVN_REPO: https://github.com/openstack-k8s-operators/ovn-operator.git PASSWORD: '12345678' PLACEMENTAPI: config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_DEPL_IMG: unused PLACEMENT_BRANCH: main PLACEMENT_COMMIT_HASH: '' PLACEMENT_IMG: quay.io/openstack-k8s-operators/placement-operator-index:latest PLACEMENT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml PLACEMENT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests PLACEMENT_KUTTL_NAMESPACE: placement-kuttl-tests PLACEMENT_REPO: https://github.com/openstack-k8s-operators/placement-operator.git PULL_SECRET: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt RABBITMQ: docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_BRANCH: patches RABBITMQ_COMMIT_HASH: '' RABBITMQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_DEPL_IMG: unused RABBITMQ_IMG: quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest RABBITMQ_REPO: https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git REDHAT_OPERATORS: 'false' REDIS: config/samples/redis_v1beta1_redis.yaml REDIS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml REDIS_DEPL_IMG: unused RH_REGISTRY_PWD: '' RH_REGISTRY_USER: '' SECRET: osp-secret SG_CORE_DEPL_IMG: unused STANDALONE_COMPUTE_DRIVER: libvirt STANDALONE_EXTERNAL_NET_PREFFIX: 172.21.0 STANDALONE_INTERNALAPI_NET_PREFIX: 172.17.0 STANDALONE_STORAGEMGMT_NET_PREFIX: 172.20.0 STANDALONE_STORAGE_NET_PREFIX: 172.18.0 STANDALONE_TENANT_NET_PREFIX: 172.19.0 STORAGEMGMT_HOST_ROUTES: '' STORAGE_CLASS: local-storage STORAGE_HOST_ROUTES: '' SWIFT: config/samples/swift_v1beta1_swift.yaml SWIFT_BRANCH: main SWIFT_COMMIT_HASH: '' SWIFT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml SWIFT_IMG: quay.io/openstack-k8s-operators/swift-operator-index:latest SWIFT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml SWIFT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests SWIFT_KUTTL_NAMESPACE: swift-kuttl-tests SWIFT_REPO: https://github.com/openstack-k8s-operators/swift-operator.git TELEMETRY: config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_BRANCH: main TELEMETRY_COMMIT_HASH: '' TELEMETRY_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_IMG: quay.io/openstack-k8s-operators/telemetry-operator-index:latest TELEMETRY_KUTTL_BASEDIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator TELEMETRY_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml TELEMETRY_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites TELEMETRY_KUTTL_NAMESPACE: telemetry-kuttl-tests TELEMETRY_KUTTL_RELPATH: test/kuttl/suites TELEMETRY_REPO: https://github.com/openstack-k8s-operators/telemetry-operator.git TENANT_HOST_ROUTES: '' TIMEOUT: 300s TLS_ENABLED: 'false' tripleo_deploy: 'export REGISTRY_PWD:' 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | TASK [install_yamls : Generate make targets install_yamls_path={{ cifmw_install_yamls_repo }}, output_directory={{ cifmw_install_yamls_tasks_out }}] *** 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.053) 0:00:59.485 ******* 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.053) 0:00:59.484 ******* 2025-12-08 17:53:08,544 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | TASK [install_yamls : Debug generate_make module var=cifmw_generate_makes] ***** 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.322) 0:00:59.808 ******* 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.322) 0:00:59.806 ******* 2025-12-08 17:53:08,582 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_generate_makes: changed: false debug: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/Makefile: - all - help - cleanup - deploy_cleanup - wait - crc_storage - crc_storage_cleanup - crc_storage_release - crc_storage_with_retries - crc_storage_cleanup_with_retries - operator_namespace - namespace - namespace_cleanup - input - input_cleanup - crc_bmo_setup - crc_bmo_cleanup - openstack_prep - openstack - openstack_wait - openstack_init - openstack_cleanup - openstack_repo - openstack_deploy_prep - openstack_deploy - openstack_wait_deploy - openstack_deploy_cleanup - openstack_update_run - update_services - update_system - openstack_patch_version - edpm_deploy_generate_keys - edpm_patch_ansible_runner_image - edpm_deploy_prep - edpm_deploy_cleanup - edpm_deploy - edpm_deploy_baremetal_prep - edpm_deploy_baremetal - edpm_wait_deploy_baremetal - edpm_wait_deploy - edpm_register_dns - edpm_nova_discover_hosts - openstack_crds - openstack_crds_cleanup - edpm_deploy_networker_prep - edpm_deploy_networker_cleanup - edpm_deploy_networker - infra_prep - infra - infra_cleanup - dns_deploy_prep - dns_deploy - dns_deploy_cleanup - netconfig_deploy_prep - netconfig_deploy - netconfig_deploy_cleanup - memcached_deploy_prep - memcached_deploy - memcached_deploy_cleanup - keystone_prep - keystone - keystone_cleanup - keystone_deploy_prep - keystone_deploy - keystone_deploy_cleanup - barbican_prep - barbican - barbican_cleanup - barbican_deploy_prep - barbican_deploy - barbican_deploy_validate - barbican_deploy_cleanup - mariadb - mariadb_cleanup - mariadb_deploy_prep - mariadb_deploy - mariadb_deploy_cleanup - placement_prep - placement - placement_cleanup - placement_deploy_prep - placement_deploy - placement_deploy_cleanup - glance_prep - glance - glance_cleanup - glance_deploy_prep - glance_deploy - glance_deploy_cleanup - ovn_prep - ovn - ovn_cleanup - ovn_deploy_prep - ovn_deploy - ovn_deploy_cleanup - neutron_prep - neutron - neutron_cleanup - neutron_deploy_prep - neutron_deploy - neutron_deploy_cleanup - cinder_prep - cinder - cinder_cleanup - cinder_deploy_prep - cinder_deploy - cinder_deploy_cleanup - rabbitmq_prep - rabbitmq - rabbitmq_cleanup - rabbitmq_deploy_prep - rabbitmq_deploy - rabbitmq_deploy_cleanup - ironic_prep - ironic - ironic_cleanup - ironic_deploy_prep - ironic_deploy - ironic_deploy_cleanup - octavia_prep - octavia - octavia_cleanup - octavia_deploy_prep - octavia_deploy - octavia_deploy_cleanup - designate_prep - designate - designate_cleanup - designate_deploy_prep - designate_deploy - designate_deploy_cleanup - nova_prep - nova - nova_cleanup - nova_deploy_prep - nova_deploy - nova_deploy_cleanup - mariadb_kuttl_run - mariadb_kuttl - kuttl_db_prep - kuttl_db_cleanup - kuttl_common_prep - kuttl_common_cleanup - keystone_kuttl_run - keystone_kuttl - barbican_kuttl_run - barbican_kuttl - placement_kuttl_run - placement_kuttl - cinder_kuttl_run - cinder_kuttl - neutron_kuttl_run - neutron_kuttl - octavia_kuttl_run - octavia_kuttl - designate_kuttl - designate_kuttl_run - ovn_kuttl_run - ovn_kuttl - infra_kuttl_run - infra_kuttl - ironic_kuttl_run - ironic_kuttl - ironic_kuttl_crc - heat_kuttl_run - heat_kuttl - heat_kuttl_crc - ansibleee_kuttl_run - ansibleee_kuttl_cleanup - ansibleee_kuttl_prep - ansibleee_kuttl - glance_kuttl_run - glance_kuttl - manila_kuttl_run - manila_kuttl - swift_kuttl_run - swift_kuttl - horizon_kuttl_run - horizon_kuttl - openstack_kuttl_run - openstack_kuttl - mariadb_chainsaw_run - mariadb_chainsaw - horizon_prep - horizon - horizon_cleanup - horizon_deploy_prep - horizon_deploy - horizon_deploy_cleanup - heat_prep - heat - heat_cleanup - heat_deploy_prep - heat_deploy - heat_deploy_cleanup - ansibleee_prep - ansibleee - ansibleee_cleanup - baremetal_prep - baremetal - baremetal_cleanup - ceph_help - ceph - ceph_cleanup - rook_prep - rook - rook_deploy_prep - rook_deploy - rook_crc_disk - rook_cleanup - lvms - nmstate - nncp - nncp_cleanup - netattach - netattach_cleanup - metallb - metallb_config - metallb_config_cleanup - metallb_cleanup - loki - loki_cleanup - loki_deploy - loki_deploy_cleanup - netobserv - netobserv_cleanup - netobserv_deploy - netobserv_deploy_cleanup - manila_prep - manila - manila_cleanup - manila_deploy_prep - manila_deploy - manila_deploy_cleanup - telemetry_prep - telemetry - telemetry_cleanup - telemetry_deploy_prep - telemetry_deploy - telemetry_deploy_cleanup - telemetry_kuttl_run - telemetry_kuttl - swift_prep - swift - swift_cleanup - swift_deploy_prep - swift_deploy - swift_deploy_cleanup - certmanager - certmanager_cleanup - validate_marketplace - redis_deploy_prep - redis_deploy - redis_deploy_cleanup - set_slower_etcd_profile /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup/Makefile: - help - download_tools - nfs - nfs_cleanup - crc - crc_cleanup - crc_scrub - crc_attach_default_interface - crc_attach_default_interface_cleanup - ipv6_lab_network - ipv6_lab_network_cleanup - ipv6_lab_nat64_router - ipv6_lab_nat64_router_cleanup - ipv6_lab_sno - ipv6_lab_sno_cleanup - ipv6_lab - ipv6_lab_cleanup - attach_default_interface - attach_default_interface_cleanup - network_isolation_bridge - network_isolation_bridge_cleanup - edpm_baremetal_compute - edpm_compute - edpm_compute_bootc - edpm_ansible_runner - edpm_computes_bgp - edpm_compute_repos - edpm_compute_cleanup - edpm_networker - edpm_networker_cleanup - edpm_deploy_instance - tripleo_deploy - standalone_deploy - standalone_sync - standalone - standalone_cleanup - standalone_snapshot - standalone_revert - cifmw_prepare - cifmw_cleanup - bmaas_network - bmaas_network_cleanup - bmaas_route_crc_and_crc_bmaas_networks - bmaas_route_crc_and_crc_bmaas_networks_cleanup - bmaas_crc_attach_network - bmaas_crc_attach_network_cleanup - bmaas_crc_baremetal_bridge - bmaas_crc_baremetal_bridge_cleanup - bmaas_baremetal_net_nad - bmaas_baremetal_net_nad_cleanup - bmaas_metallb - bmaas_metallb_cleanup - bmaas_virtual_bms - bmaas_virtual_bms_cleanup - bmaas_sushy_emulator - bmaas_sushy_emulator_cleanup - bmaas_sushy_emulator_wait - bmaas_generate_nodes_yaml - bmaas - bmaas_cleanup failed: false success: true 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | TASK [install_yamls : Create the install_yamls parameters file dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml, content={{ { 'cifmw_install_yamls_environment': cifmw_install_yamls_environment, 'cifmw_install_yamls_defaults': cifmw_install_yamls_defaults } | to_nice_yaml }}, mode=0644] *** 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.037) 0:00:59.845 ******* 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.037) 0:00:59.843 ******* 2025-12-08 17:53:09,071 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:09,085 p=31902 u=zuul n=ansible | TASK [install_yamls : Create empty cifmw_install_yamls_environment if needed cifmw_install_yamls_environment={}] *** 2025-12-08 17:53:09,085 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.496) 0:01:00.341 ******* 2025-12-08 17:53:09,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.496) 0:01:00.340 ******* 2025-12-08 17:53:09,119 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | TASK [discover_latest_image : Get latest image url={{ cifmw_discover_latest_image_base_url }}, image_prefix={{ cifmw_discover_latest_image_qcow_prefix }}, images_file={{ cifmw_discover_latest_image_images_file }}] *** 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.064) 0:01:00.406 ******* 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.064) 0:01:00.405 ******* 2025-12-08 17:53:09,699 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | TASK [discover_latest_image : Export facts accordingly cifmw_discovered_image_name={{ discovered_image['data']['image_name'] }}, cifmw_discovered_image_url={{ discovered_image['data']['image_url'] }}, cifmw_discovered_hash={{ discovered_image['data']['hash'] }}, cifmw_discovered_hash_algorithm={{ discovered_image['data']['hash_algorithm'] }}, cacheable=True] *** 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.559) 0:01:00.966 ******* 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.559) 0:01:00.964 ******* 2025-12-08 17:53:09,737 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | TASK [Create artifacts with custom params mode=0644, dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/custom-params.yml, content={{ ci_framework_params | to_nice_yaml }}] *** 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.040) 0:01:01.006 ******* 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.040) 0:01:01.005 ******* 2025-12-08 17:53:10,211 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | PLAY RECAP ********************************************************************* 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | localhost : ok=43 changed=23 unreachable=0 failed=0 skipped=40 rescued=0 ignored=0 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:10 +0000 (0:00:00.483) 0:01:01.489 ******* 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Install needed packages ------------------------------------- 27.32s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Initialize python venv and install requirements ------------ 8.49s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Install openshift client ------------------------------------- 5.32s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Get repo-setup repository ---------------------------------- 2.25s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_ca : Update ca bundle ------------------------------------------- 1.65s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | Gathering Facts --------------------------------------------------------- 1.28s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Manage directories ------------------------------------------- 1.21s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Make sure git-core package is installed -------------------- 0.99s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Install repo-setup package --------------------------------- 0.88s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Dump full hash in delorean.repo.md5 file ------------------- 0.72s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Ensure directories exist -------------------------------- 0.67s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Run repo-setup --------------------------------------------- 0.65s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Ensure directories are present ----------------------------- 0.62s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Get environment structure ------------------------------- 0.61s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | discover_latest_image : Get latest image -------------------------------- 0.56s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Run repo-setup-get-hash ------------------------------------ 0.53s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Cleanup existing metadata ---------------------------------- 0.50s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Create the install_yamls parameters file ---------------- 0.50s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | Create artifacts with custom params ------------------------------------- 0.48s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | repo_setup : Remove existing repos from /etc/yum.repos.d directory ------ 0.47s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:10 +0000 (0:00:00.484) 0:01:01.489 ******* 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ci_setup --------------------------------------------------------------- 35.36s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | repo_setup ------------------------------------------------------------- 18.41s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | install_yamls ----------------------------------------------------------- 2.97s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | install_ca -------------------------------------------------------------- 2.15s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | gather_facts ------------------------------------------------------------ 1.28s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | discover_latest_image --------------------------------------------------- 0.60s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.copy ---------------------------------------------------- 0.48s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.include_role -------------------------------------------- 0.12s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.set_fact ------------------------------------------------ 0.08s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | total ------------------------------------------------------------------ 61.46s 2025-12-08 17:53:11,534 p=32717 u=zuul n=ansible | PLAY [Run pre_infra hooks] ***************************************************** 2025-12-08 17:53:11,565 p=32717 u=zuul n=ansible | TASK [run_hook : Assert parameters are valid quiet=True, that=['_list_hooks is not string', '_list_hooks is not mapping', '_list_hooks is iterable', '(hooks | default([])) is not string', '(hooks | default([])) is not mapping', '(hooks | default([])) is iterable']] *** 2025-12-08 17:53:11,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.046) 0:00:00.046 ******* 2025-12-08 17:53:11,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.046) 0:00:00.046 ******* 2025-12-08 17:53:11,640 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | TASK [run_hook : Assert single hooks are all mappings quiet=True, that=['_not_mapping_hooks | length == 0'], msg=All single hooks must be a list of mappings or a mapping.] *** 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.085) 0:00:00.131 ******* 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.085) 0:00:00.131 ******* 2025-12-08 17:53:11,739 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | TASK [run_hook : Loop on hooks for pre_infra _raw_params={{ hook.type }}.yml] *** 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.098) 0:00:00.230 ******* 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.098) 0:00:00.229 ******* 2025-12-08 17:53:11,825 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:11,867 p=32717 u=zuul n=ansible | PLAY [Prepare host virtualization] ********************************************* 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | TASK [Load parameters files dir={{ cifmw_basedir }}/artifacts/parameters] ****** 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.145) 0:00:00.375 ******* 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.145) 0:00:00.374 ******* 2025-12-08 17:53:12,006 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,028 p=32717 u=zuul n=ansible | TASK [Ensure libvirt is present/configured name=libvirt_manager] *************** 2025-12-08 17:53:12,028 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.134) 0:00:00.509 ******* 2025-12-08 17:53:12,029 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.134) 0:00:00.509 ******* 2025-12-08 17:53:12,055 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | TASK [Perpare OpenShift provisioner node name=openshift_provisioner_node] ****** 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.036) 0:00:00.546 ******* 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.036) 0:00:00.545 ******* 2025-12-08 17:53:12,095 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,136 p=32717 u=zuul n=ansible | PLAY [Prepare the platform] **************************************************** 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | TASK [Load parameters files dir={{ cifmw_basedir }}/artifacts/parameters] ****** 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.104) 0:00:00.651 ******* 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.104) 0:00:00.650 ******* 2025-12-08 17:53:12,219 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | TASK [networking_mapper : Check for Networking Environment Definition file existence path={{ cifmw_networking_mapper_networking_env_def_path }}] *** 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.059) 0:00:00.711 ******* 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.059) 0:00:00.710 ******* 2025-12-08 17:53:12,505 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | TASK [networking_mapper : Check for Networking Definition file existance that=['_net_env_def_stat.stat.exists'], msg=Ensure that the Networking Environment Definition file exists in {{ cifmw_networking_mapper_networking_env_def_path }}, quiet=True] *** 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.285) 0:00:00.996 ******* 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.285) 0:00:00.995 ******* 2025-12-08 17:53:12,535 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | TASK [networking_mapper : Load the Networking Definition from file path={{ cifmw_networking_mapper_networking_env_def_path }}] *** 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.030) 0:00:01.026 ******* 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.030) 0:00:01.025 ******* 2025-12-08 17:53:12,565 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | TASK [networking_mapper : Set cifmw_networking_env_definition is present cifmw_networking_env_definition={{ _net_env_def_slurp['content'] | b64decode | from_yaml }}, cacheable=True] *** 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.033) 0:00:01.060 ******* 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.033) 0:00:01.059 ******* 2025-12-08 17:53:12,599 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,625 p=32717 u=zuul n=ansible | TASK [Deploy OCP using Hive name=hive] ***************************************** 2025-12-08 17:53:12,625 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.046) 0:00:01.106 ******* 2025-12-08 17:53:12,626 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.046) 0:00:01.106 ******* 2025-12-08 17:53:12,644 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | TASK [Prepare CRC name=rhol_crc] *********************************************** 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.028) 0:00:01.135 ******* 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.028) 0:00:01.134 ******* 2025-12-08 17:53:12,676 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | TASK [Deploy OpenShift cluster using dev-scripts name=devscripts] ************** 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.037) 0:00:01.172 ******* 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.037) 0:00:01.171 ******* 2025-12-08 17:53:12,715 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | TASK [openshift_login : Ensure output directory exists path={{ cifmw_openshift_login_basedir }}/artifacts, state=directory, mode=0755] *** 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.034) 0:00:01.206 ******* 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.034) 0:00:01.205 ******* 2025-12-08 17:53:13,060 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,074 p=32717 u=zuul n=ansible | TASK [openshift_login : OpenShift login _raw_params=login.yml] ***************** 2025-12-08 17:53:13,074 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.349) 0:00:01.555 ******* 2025-12-08 17:53:13,075 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.349) 0:00:01.555 ******* 2025-12-08 17:53:13,104 p=32717 u=zuul n=ansible | included: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/openshift_login/tasks/login.yml for localhost 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | TASK [openshift_login : Check if the password file is present path={{ cifmw_openshift_login_password_file | default(cifmw_openshift_password_file) }}] *** 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.040) 0:00:01.596 ******* 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.040) 0:00:01.595 ******* 2025-12-08 17:53:13,136 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch user password content src={{ cifmw_openshift_login_password_file | default(cifmw_openshift_password_file) }}] *** 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.035) 0:00:01.632 ******* 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.035) 0:00:01.631 ******* 2025-12-08 17:53:13,176 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | TASK [openshift_login : Set user password as a fact cifmw_openshift_login_password={{ cifmw_openshift_login_password_file_slurp.content | b64decode }}, cacheable=True] *** 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:01.668 ******* 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:01.667 ******* 2025-12-08 17:53:13,208 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | TASK [openshift_login : Set role variables cifmw_openshift_login_kubeconfig={{ cifmw_openshift_login_kubeconfig | default(cifmw_openshift_kubeconfig) | default( ansible_env.KUBECONFIG if 'KUBECONFIG' in ansible_env else cifmw_openshift_login_kubeconfig_default_path ) | trim }}, cifmw_openshift_login_user={{ cifmw_openshift_login_user | default(cifmw_openshift_user) | default(omit) }}, cifmw_openshift_login_password={{ cifmw_openshift_login_password | default(cifmw_openshift_password) | default(omit) }}, cifmw_openshift_login_api={{ cifmw_openshift_login_api | default(cifmw_openshift_api) | default(omit) }}, cifmw_openshift_login_cert_login={{ cifmw_openshift_login_cert_login | default(false)}}, cifmw_openshift_login_provided_token={{ cifmw_openshift_provided_token | default(omit) }}, cacheable=True] *** 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.030) 0:00:01.698 ******* 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.030) 0:00:01.698 ******* 2025-12-08 17:53:13,254 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | TASK [openshift_login : Check if kubeconfig exists path={{ cifmw_openshift_login_kubeconfig }}] *** 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:01.749 ******* 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:01.748 ******* 2025-12-08 17:53:13,479 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | TASK [openshift_login : Assert that enough data is provided to log in to OpenShift that=cifmw_openshift_login_kubeconfig_stat.stat.exists or (cifmw_openshift_login_provided_token is defined and cifmw_openshift_login_provided_token != '') or ( (cifmw_openshift_login_user is defined) and (cifmw_openshift_login_password is defined) and (cifmw_openshift_login_api is defined) ), msg=If an existing kubeconfig is not provided user/pwd or provided/initial token and API URL must be given] *** 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.232) 0:00:01.981 ******* 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.232) 0:00:01.980 ******* 2025-12-08 17:53:13,547 p=32717 u=zuul n=ansible | ok: [localhost] => changed: false msg: All assertions passed 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch kubeconfig content src={{ cifmw_openshift_login_kubeconfig }}] *** 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.072) 0:00:02.054 ******* 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.073) 0:00:02.053 ******* 2025-12-08 17:53:13,613 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,634 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch x509 key based users cifmw_openshift_login_key_based_users={{ ( cifmw_openshift_login_kubeconfig_content_b64.content | b64decode | from_yaml ). users | default([]) | selectattr('user.client-certificate-data', 'defined') | map(attribute="name") | map("split", "/") | map("first") }}, cacheable=True] *** 2025-12-08 17:53:13,635 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.061) 0:00:02.115 ******* 2025-12-08 17:53:13,635 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.061) 0:00:02.115 ******* 2025-12-08 17:53:13,668 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,684 p=32717 u=zuul n=ansible | TASK [openshift_login : Assign key based user if not provided and available cifmw_openshift_login_user={{ (cifmw_openshift_login_assume_cert_system_user | ternary('system:', '')) + (cifmw_openshift_login_key_based_users | map('replace', 'system:', '') | unique | first) }}, cifmw_openshift_login_cert_login=True, cacheable=True] *** 2025-12-08 17:53:13,685 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.165 ******* 2025-12-08 17:53:13,685 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.165 ******* 2025-12-08 17:53:13,718 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,729 p=32717 u=zuul n=ansible | TASK [openshift_login : Set the retry count cifmw_openshift_login_retries_cnt={{ 0 if cifmw_openshift_login_retries_cnt is undefined else cifmw_openshift_login_retries_cnt|int + 1 }}] *** 2025-12-08 17:53:13,729 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.044) 0:00:02.210 ******* 2025-12-08 17:53:13,730 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.044) 0:00:02.210 ******* 2025-12-08 17:53:13,770 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch token _raw_params=try_login.yml] ***************** 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.261 ******* 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.260 ******* 2025-12-08 17:53:13,804 p=32717 u=zuul n=ansible | included: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/openshift_login/tasks/try_login.yml for localhost 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | TASK [openshift_login : Try get OpenShift access token _raw_params=oc whoami -t] *** 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:02.297 ******* 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:02.296 ******* 2025-12-08 17:53:13,841 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,853 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift token output_dir={{ cifmw_openshift_login_basedir }}/artifacts, script=oc login {%- if cifmw_openshift_login_provided_token is not defined %} {%- if cifmw_openshift_login_user is defined %} -u {{ cifmw_openshift_login_user }} {%- endif %} {%- if cifmw_openshift_login_password is defined %} -p {{ cifmw_openshift_login_password }} {%- endif %} {% else %} --token={{ cifmw_openshift_login_provided_token }} {%- endif %} {%- if cifmw_openshift_login_skip_tls_verify|bool %} --insecure-skip-tls-verify=true {%- endif %} {%- if cifmw_openshift_login_api is defined %} {{ cifmw_openshift_login_api }} {%- endif %}] *** 2025-12-08 17:53:13,854 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.037) 0:00:02.334 ******* 2025-12-08 17:53:13,854 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.037) 0:00:02.334 ******* 2025-12-08 17:53:13,924 p=32717 u=zuul n=ansible | Follow script's output here: /home/zuul/ci-framework-data/logs/ci_script_000_fetch_openshift.log 2025-12-08 17:53:14,323 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:14,332 p=32717 u=zuul n=ansible | TASK [openshift_login : Ensure kubeconfig is provided that=cifmw_openshift_login_kubeconfig != ""] *** 2025-12-08 17:53:14,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.479) 0:00:02.813 ******* 2025-12-08 17:53:14,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.479) 0:00:02.813 ******* 2025-12-08 17:53:14,355 p=32717 u=zuul n=ansible | ok: [localhost] => changed: false msg: All assertions passed 2025-12-08 17:53:14,375 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch new OpenShift access token _raw_params=oc whoami -t] *** 2025-12-08 17:53:14,375 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.042) 0:00:02.856 ******* 2025-12-08 17:53:14,376 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.042) 0:00:02.856 ******* 2025-12-08 17:53:14,888 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:14,913 p=32717 u=zuul n=ansible | TASK [openshift_login : Set new OpenShift token cifmw_openshift_login_token={{ (not cifmw_openshift_login_new_token_out.skipped | default(false)) | ternary(cifmw_openshift_login_new_token_out.stdout, cifmw_openshift_login_whoami_out.stdout) }}, cacheable=True] *** 2025-12-08 17:53:14,914 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.538) 0:00:03.394 ******* 2025-12-08 17:53:14,914 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.538) 0:00:03.394 ******* 2025-12-08 17:53:14,963 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:14,982 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift API URL _raw_params=oc whoami --show-server=true] *** 2025-12-08 17:53:14,982 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.068) 0:00:03.463 ******* 2025-12-08 17:53:14,983 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.068) 0:00:03.463 ******* 2025-12-08 17:53:15,364 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift kubeconfig context _raw_params=oc whoami -c] *** 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.390) 0:00:03.853 ******* 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.390) 0:00:03.853 ******* 2025-12-08 17:53:15,651 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift current user _raw_params=oc whoami] **** 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.289) 0:00:04.142 ******* 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.289) 0:00:04.142 ******* 2025-12-08 17:53:16,045 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | TASK [openshift_login : Set OpenShift user, context and API facts cifmw_openshift_login_api={{ cifmw_openshift_login_api_out.stdout }}, cifmw_openshift_login_context={{ cifmw_openshift_login_context_out.stdout }}, cifmw_openshift_login_user={{ _oauth_user }}, cifmw_openshift_kubeconfig={{ cifmw_openshift_login_kubeconfig }}, cifmw_openshift_api={{ cifmw_openshift_login_api_out.stdout }}, cifmw_openshift_context={{ cifmw_openshift_login_context_out.stdout }}, cifmw_openshift_user={{ _oauth_user }}, cifmw_openshift_token={{ cifmw_openshift_login_token | default(omit) }}, cifmw_install_yamls_environment={{ ( cifmw_install_yamls_environment | combine({'KUBECONFIG': cifmw_openshift_login_kubeconfig}) ) if cifmw_install_yamls_environment is defined else omit }}, cacheable=True] *** 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.396) 0:00:04.539 ******* 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.396) 0:00:04.539 ******* 2025-12-08 17:53:16,091 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | TASK [openshift_login : Create the openshift_login parameters file dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/openshift-login-params.yml, content={{ cifmw_openshift_login_params_content | from_yaml | to_nice_yaml }}, mode=0600] *** 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.045) 0:00:04.585 ******* 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.045) 0:00:04.584 ******* 2025-12-08 17:53:16,684 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | TASK [openshift_login : Read the install yamls parameters file path={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml] *** 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.603) 0:00:05.189 ******* 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.603) 0:00:05.188 ******* 2025-12-08 17:53:17,038 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | TASK [openshift_login : Append the KUBECONFIG to the install yamls parameters content={{ cifmw_openshift_login_install_yamls_artifacts_slurp['content'] | b64decode | from_yaml | combine( { 'cifmw_install_yamls_environment': { 'KUBECONFIG': cifmw_openshift_login_kubeconfig } }, recursive=true) | to_nice_yaml }}, dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml, mode=0600] *** 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.345) 0:00:05.534 ******* 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.345) 0:00:05.533 ******* 2025-12-08 17:53:17,537 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | TASK [openshift_setup : Ensure output directory exists path={{ cifmw_openshift_setup_basedir }}/artifacts, state=directory, mode=0755] *** 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.507) 0:00:06.042 ******* 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.507) 0:00:06.041 ******* 2025-12-08 17:53:17,757 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | TASK [openshift_setup : Fetch namespaces to create cifmw_openshift_setup_namespaces={{ (( ([cifmw_install_yamls_defaults['NAMESPACE']] + ([cifmw_install_yamls_defaults['OPERATOR_NAMESPACE']] if 'OPERATOR_NAMESPACE' is in cifmw_install_yamls_defaults else []) ) if cifmw_install_yamls_defaults is defined else [] ) + cifmw_openshift_setup_create_namespaces) | unique }}] *** 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.217) 0:00:06.259 ******* 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.217) 0:00:06.258 ******* 2025-12-08 17:53:17,803 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create required namespaces kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit) }}, name={{ item }}, kind=Namespace, state=present] *** 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.050) 0:00:06.309 ******* 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.050) 0:00:06.308 ******* 2025-12-08 17:53:18,765 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack) 2025-12-08 17:53:19,446 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack-operators) 2025-12-08 17:53:19,474 p=32717 u=zuul n=ansible | TASK [openshift_setup : Get internal OpenShift registry route kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, kind=Route, name=default-route, namespace=openshift-image-registry] *** 2025-12-08 17:53:19,475 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:19 +0000 (0:00:01.646) 0:00:07.955 ******* 2025-12-08 17:53:19,475 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:19 +0000 (0:00:01.646) 0:00:07.955 ******* 2025-12-08 17:53:20,623 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:20,644 p=32717 u=zuul n=ansible | TASK [openshift_setup : Allow anonymous image-pulls in CRC registry for targeted namespaces state=present, kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'kind': 'RoleBinding', 'apiVersion': 'rbac.authorization.k8s.io/v1', 'metadata': {'name': 'system:image-puller', 'namespace': '{{ item }}'}, 'subjects': [{'kind': 'User', 'name': 'system:anonymous'}, {'kind': 'User', 'name': 'system:unauthenticated'}], 'roleRef': {'kind': 'ClusterRole', 'name': 'system:image-puller'}}] *** 2025-12-08 17:53:20,644 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:20 +0000 (0:00:01.169) 0:00:09.125 ******* 2025-12-08 17:53:20,645 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:20 +0000 (0:00:01.169) 0:00:09.125 ******* 2025-12-08 17:53:21,400 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack) 2025-12-08 17:53:22,068 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack-operators) 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | TASK [openshift_setup : Wait for the image registry to be ready kind=Deployment, name=image-registry, namespace=openshift-image-registry, kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, wait=True, wait_sleep=10, wait_timeout=600, wait_condition={'type': 'Available', 'status': 'True'}] *** 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:22 +0000 (0:00:01.451) 0:00:10.577 ******* 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:22 +0000 (0:00:01.451) 0:00:10.576 ******* 2025-12-08 17:53:23,037 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | TASK [openshift_setup : Login into OpenShift internal registry output_dir={{ cifmw_openshift_setup_basedir }}/artifacts, script=podman login -u {{ cifmw_openshift_user }} -p {{ cifmw_openshift_token }} {%- if cifmw_openshift_setup_skip_internal_registry_tls_verify|bool %} --tls-verify=false {%- endif %} {{ cifmw_openshift_setup_registry_default_route.resources[0].spec.host }}] *** 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.959) 0:00:11.536 ******* 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.959) 0:00:11.535 ******* 2025-12-08 17:53:23,112 p=32717 u=zuul n=ansible | Follow script's output here: /home/zuul/ci-framework-data/logs/ci_script_001_login_into_openshift_internal.log 2025-12-08 17:53:23,338 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | TASK [Ensure we have custom CA installed on host role=install_ca] ************** 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.293) 0:00:11.829 ******* 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.293) 0:00:11.828 ******* 2025-12-08 17:53:23,368 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | TASK [openshift_setup : Update ca bundle _raw_params=update-ca-trust extract] *** 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.034) 0:00:11.863 ******* 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.034) 0:00:11.862 ******* 2025-12-08 17:53:23,401 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,415 p=32717 u=zuul n=ansible | TASK [openshift_setup : Slurp CAs file src={{ cifmw_openshift_setup_ca_bundle_path }}] *** 2025-12-08 17:53:23,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:11.896 ******* 2025-12-08 17:53:23,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:11.896 ******* 2025-12-08 17:53:23,452 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create config map with registry CAs kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'apiVersion': 'v1', 'kind': 'ConfigMap', 'metadata': {'namespace': 'openshift-config', 'name': 'registry-cas'}, 'data': '{{ _config_map_data | items2dict }}'}] *** 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.075) 0:00:11.972 ******* 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.075) 0:00:11.971 ******* 2025-12-08 17:53:23,515 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,524 p=32717 u=zuul n=ansible | TASK [openshift_setup : Install Red Hat CA for pulling images from internal registry kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, merge_type=merge, definition={'apiVersion': 'config.openshift.io/v1', 'kind': 'Image', 'metadata': {'name': 'cluster'}, 'spec': {'additionalTrustedCA': {'name': 'registry-cas'}}}] *** 2025-12-08 17:53:23,525 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:12.005 ******* 2025-12-08 17:53:23,525 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:12.005 ******* 2025-12-08 17:53:23,556 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | TASK [openshift_setup : Add insecure registry kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, merge_type=merge, definition={'apiVersion': 'config.openshift.io/v1', 'kind': 'Image', 'metadata': {'name': 'cluster'}, 'spec': {'registrySources': {'insecureRegistries': ['{{ cifmw_update_containers_registry }}'], 'allowedRegistries': '{{ all_registries }}'}}}] *** 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.041) 0:00:12.047 ******* 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.041) 0:00:12.046 ******* 2025-12-08 17:53:23,587 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create a ICSP with repository digest mirrors kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'apiVersion': 'operator.openshift.io/v1alpha1', 'kind': 'ImageContentSourcePolicy', 'metadata': {'name': 'registry-digest-mirrors'}, 'spec': {'repositoryDigestMirrors': '{{ cifmw_openshift_setup_digest_mirrors }}'}}] *** 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.029) 0:00:12.077 ******* 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.029) 0:00:12.076 ******* 2025-12-08 17:53:23,633 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | TASK [openshift_setup : Gather network.operator info kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, api_version=operator.openshift.io/v1, kind=Network, name=cluster] *** 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.051) 0:00:12.128 ******* 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.051) 0:00:12.127 ******* 2025-12-08 17:53:24,386 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | TASK [openshift_setup : Patch network operator api_version=operator.openshift.io/v1, kubeconfig={{ cifmw_openshift_kubeconfig }}, kind=Network, name=cluster, persist_config=True, patch=[{'path': '/spec/defaultNetwork/ovnKubernetesConfig/gatewayConfig/routingViaHost', 'value': True, 'op': 'replace'}, {'path': '/spec/defaultNetwork/ovnKubernetesConfig/gatewayConfig/ipForwarding', 'value': 'Global', 'op': 'replace'}]] *** 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:24 +0000 (0:00:00.768) 0:00:12.897 ******* 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:24 +0000 (0:00:00.768) 0:00:12.896 ******* 2025-12-08 17:53:25,337 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | TASK [openshift_setup : Patch samples registry configuration kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, api_version=samples.operator.openshift.io/v1, kind=Config, name=cluster, patch=[{'op': 'replace', 'path': '/spec/samplesRegistry', 'value': 'registry.redhat.io'}]] *** 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:25 +0000 (0:00:00.944) 0:00:13.841 ******* 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:25 +0000 (0:00:00.944) 0:00:13.840 ******* 2025-12-08 17:53:26,101 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | TASK [openshift_setup : Delete the pods from openshift-marketplace namespace kind=Pod, state=absent, delete_all=True, kubeconfig={{ cifmw_openshift_kubeconfig }}, namespace=openshift-marketplace] *** 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.755) 0:00:14.596 ******* 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.755) 0:00:14.596 ******* 2025-12-08 17:53:26,134 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,147 p=32717 u=zuul n=ansible | TASK [openshift_setup : Wait for openshift-marketplace pods to be running _raw_params=oc wait pod --all --for=condition=Ready -n openshift-marketplace --timeout=1m] *** 2025-12-08 17:53:26,148 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.628 ******* 2025-12-08 17:53:26,148 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.628 ******* 2025-12-08 17:53:26,165 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | TASK [Deploy Observability operator. name=openshift_obs] *********************** 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.037) 0:00:14.666 ******* 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.036) 0:00:14.665 ******* 2025-12-08 17:53:26,210 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | TASK [Deploy Metal3 BMHs name=deploy_bmh] ************************************** 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.086) 0:00:14.752 ******* 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.086) 0:00:14.751 ******* 2025-12-08 17:53:26,293 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | TASK [Install certmanager operator role name=cert_manager] ********************* 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.785 ******* 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.784 ******* 2025-12-08 17:53:26,323 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,332 p=32717 u=zuul n=ansible | TASK [Configure hosts networking using nmstate name=ci_nmstate] **************** 2025-12-08 17:53:26,332 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.813 ******* 2025-12-08 17:53:26,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.813 ******* 2025-12-08 17:53:26,350 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | TASK [Configure multus networks name=ci_multus] ******************************** 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.029) 0:00:14.843 ******* 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.029) 0:00:14.842 ******* 2025-12-08 17:53:26,379 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | TASK [Deploy Sushy Emulator service pod name=sushy_emulator] ******************* 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.026) 0:00:14.870 ******* 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.026) 0:00:14.869 ******* 2025-12-08 17:53:26,408 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | TASK [Setup Libvirt on controller name=libvirt_manager] ************************ 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.898 ******* 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.898 ******* 2025-12-08 17:53:26,434 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | TASK [Prepare container package builder name=pkg_build] ************************ 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.927 ******* 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.926 ******* 2025-12-08 17:53:26,472 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,481 p=32717 u=zuul n=ansible | TASK [run_hook : Assert parameters are valid quiet=True, that=['_list_hooks is not string', '_list_hooks is not mapping', '_list_hooks is iterable', '(hooks | default([])) is not string', '(hooks | default([])) is not mapping', '(hooks | default([])) is iterable']] *** 2025-12-08 17:53:26,482 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.035) 0:00:14.962 ******* 2025-12-08 17:53:26,482 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.035) 0:00:14.962 ******* 2025-12-08 17:53:26,566 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | TASK [run_hook : Assert single hooks are all mappings quiet=True, that=['_not_mapping_hooks | length == 0'], msg=All single hooks must be a list of mappings or a mapping.] *** 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.094) 0:00:15.057 ******* 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.094) 0:00:15.056 ******* 2025-12-08 17:53:26,646 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | TASK [run_hook : Loop on hooks for post_infra _raw_params={{ hook.type }}.yml] *** 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.088) 0:00:15.145 ******* 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.088) 0:00:15.144 ******* 2025-12-08 17:53:26,747 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,805 p=32717 u=zuul n=ansible | PLAY RECAP ********************************************************************* 2025-12-08 17:53:26,805 p=32717 u=zuul n=ansible | localhost : ok=35 changed=12 unreachable=0 failed=0 skipped=34 rescued=0 ignored=0 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.141) 0:00:15.286 ******* 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Create required namespaces ---------------------------- 1.65s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Allow anonymous image-pulls in CRC registry for targeted namespaces --- 1.45s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Get internal OpenShift registry route ----------------- 1.17s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Wait for the image registry to be ready --------------- 0.96s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Patch network operator -------------------------------- 0.94s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Gather network.operator info -------------------------- 0.77s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Patch samples registry configuration ------------------ 0.76s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Create the openshift_login parameters file ------------ 0.60s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch new OpenShift access token ---------------------- 0.54s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Append the KUBECONFIG to the install yamls parameters --- 0.51s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift token --------------------------------- 0.48s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift current user -------------------------- 0.40s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift API URL ------------------------------- 0.39s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Ensure output directory exists ------------------------ 0.35s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Read the install yamls parameters file ---------------- 0.35s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Login into OpenShift internal registry ---------------- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift kubeconfig context -------------------- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | networking_mapper : Check for Networking Environment Definition file existence --- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Check if kubeconfig exists ---------------------------- 0.23s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Ensure output directory exists ------------------------ 0.22s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.142) 0:00:15.286 ******* 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | openshift_setup --------------------------------------------------------- 8.62s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | openshift_login --------------------------------------------------------- 4.84s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | run_hook ---------------------------------------------------------------- 0.65s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ansible.builtin.include_role -------------------------------------------- 0.54s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | networking_mapper ------------------------------------------------------- 0.40s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ansible.builtin.include_vars -------------------------------------------- 0.19s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | total ------------------------------------------------------------------ 15.24s 2025-12-08 17:53:44,400 p=33314 u=zuul n=ansible | Starting galaxy collection install process 2025-12-08 17:53:44,421 p=33314 u=zuul n=ansible | Process install dependency map 2025-12-08 17:53:59,851 p=33314 u=zuul n=ansible | Starting collection install process 2025-12-08 17:53:59,851 p=33314 u=zuul n=ansible | Installing 'cifmw.general:1.0.0+33d5122f' to '/home/zuul/.ansible/collections/ansible_collections/cifmw/general' 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | Created collection for cifmw.general:1.0.0+33d5122f at /home/zuul/.ansible/collections/ansible_collections/cifmw/general 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | cifmw.general:1.0.0+33d5122f was installed successfully 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | Installing 'containers.podman:1.16.2' to '/home/zuul/.ansible/collections/ansible_collections/containers/podman' 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | Created collection for containers.podman:1.16.2 at /home/zuul/.ansible/collections/ansible_collections/containers/podman 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | containers.podman:1.16.2 was installed successfully 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | Installing 'community.general:10.0.1' to '/home/zuul/.ansible/collections/ansible_collections/community/general' 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | Created collection for community.general:10.0.1 at /home/zuul/.ansible/collections/ansible_collections/community/general 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | community.general:10.0.1 was installed successfully 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | Installing 'ansible.posix:1.6.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/posix' 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | Created collection for ansible.posix:1.6.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/posix 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | ansible.posix:1.6.2 was installed successfully 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | Installing 'ansible.utils:5.1.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/utils' 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | Created collection for ansible.utils:5.1.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/utils 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | ansible.utils:5.1.2 was installed successfully 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | Installing 'community.libvirt:1.3.0' to '/home/zuul/.ansible/collections/ansible_collections/community/libvirt' 2025-12-08 17:54:01,797 p=33314 u=zuul n=ansible | Created collection for community.libvirt:1.3.0 at /home/zuul/.ansible/collections/ansible_collections/community/libvirt 2025-12-08 17:54:01,797 p=33314 u=zuul n=ansible | community.libvirt:1.3.0 was installed successfully 2025-12-08 17:54:01,798 p=33314 u=zuul n=ansible | Installing 'community.crypto:2.22.3' to '/home/zuul/.ansible/collections/ansible_collections/community/crypto' 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | Created collection for community.crypto:2.22.3 at /home/zuul/.ansible/collections/ansible_collections/community/crypto 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | community.crypto:2.22.3 was installed successfully 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | Installing 'kubernetes.core:5.0.0' to '/home/zuul/.ansible/collections/ansible_collections/kubernetes/core' 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | Created collection for kubernetes.core:5.0.0 at /home/zuul/.ansible/collections/ansible_collections/kubernetes/core 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | kubernetes.core:5.0.0 was installed successfully 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | Installing 'ansible.netcommon:7.1.0' to '/home/zuul/.ansible/collections/ansible_collections/ansible/netcommon' 2025-12-08 17:54:02,244 p=33314 u=zuul n=ansible | Created collection for ansible.netcommon:7.1.0 at /home/zuul/.ansible/collections/ansible_collections/ansible/netcommon 2025-12-08 17:54:02,245 p=33314 u=zuul n=ansible | ansible.netcommon:7.1.0 was installed successfully 2025-12-08 17:54:02,245 p=33314 u=zuul n=ansible | Installing 'openstack.config_template:2.1.1' to '/home/zuul/.ansible/collections/ansible_collections/openstack/config_template' 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | Created collection for openstack.config_template:2.1.1 at /home/zuul/.ansible/collections/ansible_collections/openstack/config_template 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | openstack.config_template:2.1.1 was installed successfully 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | Installing 'junipernetworks.junos:9.1.0' to '/home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos' 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | Created collection for junipernetworks.junos:9.1.0 at /home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | junipernetworks.junos:9.1.0 was installed successfully 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | Installing 'cisco.ios:9.0.3' to '/home/zuul/.ansible/collections/ansible_collections/cisco/ios' 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | Created collection for cisco.ios:9.0.3 at /home/zuul/.ansible/collections/ansible_collections/cisco/ios 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | cisco.ios:9.0.3 was installed successfully 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | Installing 'mellanox.onyx:1.0.0' to '/home/zuul/.ansible/collections/ansible_collections/mellanox/onyx' 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | Created collection for mellanox.onyx:1.0.0 at /home/zuul/.ansible/collections/ansible_collections/mellanox/onyx 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | mellanox.onyx:1.0.0 was installed successfully 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | Installing 'community.okd:4.0.0' to '/home/zuul/.ansible/collections/ansible_collections/community/okd' 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | Created collection for community.okd:4.0.0 at /home/zuul/.ansible/collections/ansible_collections/community/okd 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | community.okd:4.0.0 was installed successfully 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | Installing '@NAMESPACE@.@NAME@:3.1.4' to '/home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@' 2025-12-08 17:54:03,162 p=33314 u=zuul n=ansible | Created collection for @NAMESPACE@.@NAME@:3.1.4 at /home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@ 2025-12-08 17:54:03,162 p=33314 u=zuul n=ansible | @NAMESPACE@.@NAME@:3.1.4 was installed successfully home/zuul/zuul-output/logs/ci-framework-data/logs/crc/0000755000175000017500000000000015115611513022075 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/0000755000175000017500000000000015115611513025564 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/0000755000175000017500000000000015115611514026532 5ustar zuulzuul././@LongLink0000644000000000000000000000032100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611513033053 5ustar zuulzuul././@LongLink0000644000000000000000000000036200000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000036700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000644000175000017500000053321215115611513033063 0ustar zuulzuul2025-12-08T17:44:21.785916629+00:00 stderr F I1208 17:44:21.784738 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:21.785916629+00:00 stderr F I1208 17:44:21.785221 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:21.786890436+00:00 stderr F I1208 17:44:21.786404 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:21.836755475+00:00 stderr F I1208 17:44:21.825245 1 builder.go:304] kube-controller-manager-operator version v0.0.0-unknown-afdae35-afdae35 2025-12-08T17:44:23.250794966+00:00 stderr F I1208 17:44:23.250575 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:23.250794966+00:00 stderr F W1208 17:44:23.250768 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:23.250794966+00:00 stderr F W1208 17:44:23.250775 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:23.250794966+00:00 stderr F W1208 17:44:23.250779 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:23.250845517+00:00 stderr F W1208 17:44:23.250783 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:23.250845517+00:00 stderr F W1208 17:44:23.250796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:23.250845517+00:00 stderr F W1208 17:44:23.250799 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.258626 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.258777 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.258869 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.258935 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.258993 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.259037 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.259045 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.259059 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.259066 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.259194 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:23.259375850+00:00 stderr F I1208 17:44:23.259276 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-controller-manager-operator/kube-controller-manager-operator-lock... 2025-12-08T17:44:23.276336323+00:00 stderr F I1208 17:44:23.276266 1 leaderelection.go:271] successfully acquired lease openshift-kube-controller-manager-operator/kube-controller-manager-operator-lock 2025-12-08T17:44:23.276691842+00:00 stderr F I1208 17:44:23.276652 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator-lock", UID:"3613d00a-81b7-4e32-b686-2f28f0b60007", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37354", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' kube-controller-manager-operator-69d5f845f8-6lgwk_ea0ef12e-7583-4c9b-8895-9ac613f1507a became leader 2025-12-08T17:44:23.278123512+00:00 stderr F I1208 17:44:23.278099 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:23.284898606+00:00 stderr F I1208 17:44:23.284357 1 starter.go:97] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:23.292189335+00:00 stderr F I1208 17:44:23.292113 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:23.361780854+00:00 stderr F I1208 17:44:23.353023 1 base_controller.go:76] Waiting for caches to sync for GarbageCollectorWatcherController 2025-12-08T17:44:23.369827303+00:00 stderr F I1208 17:44:23.369775 1 base_controller.go:76] Waiting for caches to sync for TargetConfigController 2025-12-08T17:44:23.369916096+00:00 stderr F I1208 17:44:23.369896 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:23.369944616+00:00 stderr F I1208 17:44:23.369927 1 base_controller.go:76] Waiting for caches to sync for MissingStaticPodController 2025-12-08T17:44:23.369944616+00:00 stderr F I1208 17:44:23.369941 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:23.369981307+00:00 stderr F I1208 17:44:23.369964 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager 2025-12-08T17:44:23.370005658+00:00 stderr F I1208 17:44:23.369990 1 base_controller.go:76] Waiting for caches to sync for SATokenSignerController 2025-12-08T17:44:23.370040869+00:00 stderr F I1208 17:44:23.370023 1 base_controller.go:76] Waiting for caches to sync for RevisionController 2025-12-08T17:44:23.370063950+00:00 stderr F I1208 17:44:23.370047 1 base_controller.go:76] Waiting for caches to sync for Installer 2025-12-08T17:44:23.370090580+00:00 stderr F I1208 17:44:23.370074 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager-InstallerState 2025-12-08T17:44:23.370099471+00:00 stderr F I1208 17:44:23.370094 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager-StaticPodState 2025-12-08T17:44:23.370135982+00:00 stderr F I1208 17:44:23.370120 1 base_controller.go:76] Waiting for caches to sync for PruneController 2025-12-08T17:44:23.370175543+00:00 stderr F I1208 17:44:23.370158 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager-Node 2025-12-08T17:44:23.370434230+00:00 stderr F I1208 17:44:23.370363 1 base_controller.go:76] Waiting for caches to sync for BackingResourceController-StaticResources 2025-12-08T17:44:23.375693723+00:00 stderr F I1208 17:44:23.375644 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.375748485+00:00 stderr F I1208 17:44:23.375730 1 base_controller.go:76] Waiting for caches to sync for WorkerLatencyProfile 2025-12-08T17:44:23.375775295+00:00 stderr F I1208 17:44:23.375759 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:23.375887768+00:00 stderr F I1208 17:44:23.375859 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.383738303+00:00 stderr F I1208 17:44:23.378064 1 base_controller.go:76] Waiting for caches to sync for KubeControllerManagerStaticResources-StaticResources 2025-12-08T17:44:23.383738303+00:00 stderr F I1208 17:44:23.378163 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager-operator-UnsupportedConfigOverrides 2025-12-08T17:44:23.383738303+00:00 stderr F I1208 17:44:23.378191 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:23.383738303+00:00 stderr F I1208 17:44:23.378199 1 base_controller.go:76] Waiting for caches to sync for GuardController 2025-12-08T17:44:23.390997550+00:00 stderr F I1208 17:44:23.388493 1 base_controller.go:82] Caches are synced for kube-controller-manager-operator-UnsupportedConfigOverrides 2025-12-08T17:44:23.390997550+00:00 stderr F I1208 17:44:23.388516 1 base_controller.go:119] Starting #1 worker of kube-controller-manager-operator-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:23.390997550+00:00 stderr F I1208 17:44:23.388524 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:23.390997550+00:00 stderr F I1208 17:44:23.388528 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:23.404074627+00:00 stderr F I1208 17:44:23.403984 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_kube-controller-manager 2025-12-08T17:44:23.409409413+00:00 stderr F I1208 17:44:23.406939 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.415765336+00:00 stderr F I1208 17:44:23.407627 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.417466393+00:00 stderr F I1208 17:44:23.410142 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.417466393+00:00 stderr F I1208 17:44:23.410429 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.417466393+00:00 stderr F I1208 17:44:23.412730 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.452568010+00:00 stderr F I1208 17:44:23.451614 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.452568010+00:00 stderr F I1208 17:44:23.451956 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.461551404+00:00 stderr F I1208 17:44:23.458103 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.476912234+00:00 stderr F I1208 17:44:23.476852 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.477683485+00:00 stderr F I1208 17:44:23.477657 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.477839349+00:00 stderr F I1208 17:44:23.477792 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.478251150+00:00 stderr F I1208 17:44:23.478230 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.478438485+00:00 stderr F I1208 17:44:23.478403 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.479429283+00:00 stderr F I1208 17:44:23.479298 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.479612348+00:00 stderr F I1208 17:44:23.479580 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.481248052+00:00 stderr F I1208 17:44:23.481221 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.485973491+00:00 stderr F I1208 17:44:23.485407 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.487894924+00:00 stderr F I1208 17:44:23.487079 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.487894924+00:00 stderr F I1208 17:44:23.487695 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.501988208+00:00 stderr F I1208 17:44:23.491288 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.514377115+00:00 stderr F I1208 17:44:23.512254 1 base_controller.go:82] Caches are synced for StatusSyncer_kube-controller-manager 2025-12-08T17:44:23.514377115+00:00 stderr F I1208 17:44:23.512277 1 base_controller.go:119] Starting #1 worker of StatusSyncer_kube-controller-manager controller ... 2025-12-08T17:44:23.514377115+00:00 stderr F I1208 17:44:23.513267 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)","reason":"NodeController_MasterNodesReady","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:23.526951149+00:00 stderr F I1208 17:44:23.523611 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)","reason":"NodeController_MasterNodesReady","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:23.526951149+00:00 stderr F I1208 17:44:23.524101 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-controller-manager changed: Degraded changed from False to True ("NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)") 2025-12-08T17:44:23.536930351+00:00 stderr F E1208 17:44:23.536525 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:44:23.561683726+00:00 stderr F I1208 17:44:23.559551 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.573484858+00:00 stderr F I1208 17:44:23.572373 1 base_controller.go:82] Caches are synced for kube-controller-manager-Node 2025-12-08T17:44:23.573484858+00:00 stderr F I1208 17:44:23.572396 1 base_controller.go:119] Starting #1 worker of kube-controller-manager-Node controller ... 2025-12-08T17:44:23.574003123+00:00 stderr F I1208 17:44:23.573913 1 base_controller.go:82] Caches are synced for RevisionController 2025-12-08T17:44:23.574003123+00:00 stderr F I1208 17:44:23.573950 1 base_controller.go:119] Starting #1 worker of RevisionController controller ... 2025-12-08T17:44:23.582129953+00:00 stderr F I1208 17:44:23.576620 1 base_controller.go:82] Caches are synced for PruneController 2025-12-08T17:44:23.582129953+00:00 stderr F I1208 17:44:23.576633 1 base_controller.go:119] Starting #1 worker of PruneController controller ... 2025-12-08T17:44:23.582129953+00:00 stderr F I1208 17:44:23.577021 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:44:23.582129953+00:00 stderr F I1208 17:44:23.577060 1 base_controller.go:82] Caches are synced for WorkerLatencyProfile 2025-12-08T17:44:23.582129953+00:00 stderr F I1208 17:44:23.577065 1 base_controller.go:119] Starting #1 worker of WorkerLatencyProfile controller ... 2025-12-08T17:44:23.627130032+00:00 stderr F I1208 17:44:23.617027 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:23.627130032+00:00 stderr F I1208 17:44:23.617254 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:44:23.627130032+00:00 stderr F I1208 17:44:23.620400 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MasterNodesReadyChanged' All master nodes are ready 2025-12-08T17:44:23.691052125+00:00 stderr F I1208 17:44:23.690743 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-controller-manager changed: Degraded changed from True to False ("NodeControllerDegraded: All master nodes are ready") 2025-12-08T17:44:23.831270919+00:00 stderr F I1208 17:44:23.825535 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.860507217+00:00 stderr F I1208 17:44:23.856271 1 base_controller.go:82] Caches are synced for GarbageCollectorWatcherController 2025-12-08T17:44:23.860507217+00:00 stderr F I1208 17:44:23.856296 1 base_controller.go:119] Starting #1 worker of GarbageCollectorWatcherController controller ... 2025-12-08T17:44:23.959450036+00:00 stderr F I1208 17:44:23.959391 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.972955844+00:00 stderr F I1208 17:44:23.972789 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:23.972955844+00:00 stderr F I1208 17:44:23.972811 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:24.165359113+00:00 stderr F I1208 17:44:24.165303 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.361278367+00:00 stderr F I1208 17:44:24.360785 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.370900449+00:00 stderr F I1208 17:44:24.370233 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:24.370900449+00:00 stderr F I1208 17:44:24.370257 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:24.370900449+00:00 stderr F I1208 17:44:24.370344 1 base_controller.go:82] Caches are synced for SATokenSignerController 2025-12-08T17:44:24.370900449+00:00 stderr F I1208 17:44:24.370392 1 base_controller.go:119] Starting #1 worker of SATokenSignerController controller ... 2025-12-08T17:44:24.555923276+00:00 stderr F I1208 17:44:24.555269 1 request.go:752] "Waited before sending request" delay="1.17093488s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts?limit=500&resourceVersion=0" 2025-12-08T17:44:24.558447385+00:00 stderr F I1208 17:44:24.557117 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.581097262+00:00 stderr F I1208 17:44:24.577843 1 base_controller.go:82] Caches are synced for BackingResourceController-StaticResources 2025-12-08T17:44:24.581097262+00:00 stderr F I1208 17:44:24.577869 1 base_controller.go:119] Starting #1 worker of BackingResourceController-StaticResources controller ... 2025-12-08T17:44:24.760925107+00:00 stderr F I1208 17:44:24.760418 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.971157042+00:00 stderr F I1208 17:44:24.970823 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.070080431+00:00 stderr F I1208 17:44:25.070027 1 base_controller.go:82] Caches are synced for kube-controller-manager 2025-12-08T17:44:25.070145173+00:00 stderr F I1208 17:44:25.070132 1 base_controller.go:119] Starting #1 worker of kube-controller-manager controller ... 2025-12-08T17:44:25.166905021+00:00 stderr F I1208 17:44:25.163009 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173019 1 base_controller.go:82] Caches are synced for kube-controller-manager-StaticPodState 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173048 1 base_controller.go:119] Starting #1 worker of kube-controller-manager-StaticPodState controller ... 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173080 1 base_controller.go:82] Caches are synced for MissingStaticPodController 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173084 1 base_controller.go:119] Starting #1 worker of MissingStaticPodController controller ... 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173094 1 base_controller.go:82] Caches are synced for Installer 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173101 1 base_controller.go:119] Starting #1 worker of Installer controller ... 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173434 1 base_controller.go:82] Caches are synced for kube-controller-manager-InstallerState 2025-12-08T17:44:25.176263567+00:00 stderr F I1208 17:44:25.173440 1 base_controller.go:119] Starting #1 worker of kube-controller-manager-InstallerState controller ... 2025-12-08T17:44:25.183907715+00:00 stderr F I1208 17:44:25.180841 1 base_controller.go:82] Caches are synced for GuardController 2025-12-08T17:44:25.183907715+00:00 stderr F I1208 17:44:25.180865 1 base_controller.go:119] Starting #1 worker of GuardController controller ... 2025-12-08T17:44:25.367941975+00:00 stderr F I1208 17:44:25.367057 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.370267169+00:00 stderr F I1208 17:44:25.370223 1 base_controller.go:82] Caches are synced for TargetConfigController 2025-12-08T17:44:25.370326991+00:00 stderr F I1208 17:44:25.370314 1 base_controller.go:119] Starting #1 worker of TargetConfigController controller ... 2025-12-08T17:44:25.406952400+00:00 stderr F I1208 17:44:25.406436 1 base_controller.go:82] Caches are synced for KubeControllerManagerStaticResources-StaticResources 2025-12-08T17:44:25.406952400+00:00 stderr F I1208 17:44:25.406477 1 base_controller.go:119] Starting #1 worker of KubeControllerManagerStaticResources-StaticResources controller ... 2025-12-08T17:44:25.557167116+00:00 stderr F I1208 17:44:25.555209 1 request.go:752] "Waited before sending request" delay="1.184667374s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver" 2025-12-08T17:44:26.556396793+00:00 stderr F I1208 17:44:26.555692 1 request.go:752] "Waited before sending request" delay="1.161912103s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/configmaps/config" 2025-12-08T17:44:26.956988590+00:00 stderr F I1208 17:44:26.956090 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:26.960492315+00:00 stderr F I1208 17:44:26.957704 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SATokenSignerControllerOK' found expected kube-apiserver endpoints 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607623 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.607537175 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607831 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.607810162 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607857 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.607840153 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607896 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.607863354 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607923 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.607906705 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607952 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.607930685 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.607984 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.607969927 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.608007 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.607990907 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.608028 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.608014578 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.608067 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.608038628 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.608418 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-controller-manager-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-controller-manager-operator.svc,metrics.openshift-kube-controller-manager-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:12 +0000 UTC to 2027-11-02 07:52:13 +0000 UTC (now=2025-12-08 17:44:30.608397299 +0000 UTC))" 2025-12-08T17:44:30.613034005+00:00 stderr F I1208 17:44:30.608660 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:21 +0000 UTC to 2028-12-08 16:44:21 +0000 UTC (now=2025-12-08 17:44:30.608638146 +0000 UTC))" 2025-12-08T17:44:43.365677489+00:00 stderr F I1208 17:44:43.365138 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:44:43.437636041+00:00 stderr P I1208 17:44:43.437522 1 core.go:352] ConfigMap "openshift-kube-controller-manager/client-ca" changes: {"data":{"ca-bundle.crt":"-----BEGIN CERTIFICATE-----\nMIIDMDCCAhigAwIBAgIIIzF/30wVgUkwDQYJKoZIhvcNAQELBQAwNjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSAwHgYDVQQDExdhZG1pbi1rdWJlY29uZmlnLXNpZ25lcjAe\nFw0yNTExMDIwNzM0MDdaFw0zNTEwMzEwNzM0MDdaMDYxEjAQBgNVBAsTCW9wZW5z\naGlmdDEgMB4GA1UEAxMXYWRtaW4ta3ViZWNvbmZpZy1zaWduZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEaFvaxE/Ah0Q+4T67KuL6N5MoncMcfqtm\njKd8txx/b3t8o2WCAMF0IKDNMDDobraupmimcAQwOWen0WJzp3DqjVAIKabrG/DZ\nXqsx3xVHxhSvFOKEFQbiFu6HL0FvXs1bsMkm5YAcM/voHkGHefR+5YEgpgTuhZ6a\n9muG9cxUjlZ/BmMP3UwsgmRfxQ7TG3Ixf/mp++cLxi114b8ld8S4XtVuG//82BzB\nvk3J6+7tnRjli/AHSm0fx7ZvgRPY1b1IGSvGUMc6Qrc+nim/Ufd017TeFlkwKIRP\nPnUGuz0S/5Rz9XMoWJ/OHi/vB0eQs3pyqHBDPgTYCt1NZUO9nN7tAgMBAAGjQjBA\nMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBT9Y6Mp\nr1Yg0NUI8hFWKTlX1CJd6zANBgkqhkiG9w0BAQsFAAOCAQEAWD6f4r0mm7D7+uVn\nCs3w3zq0JIVBUtBTXYuSbgpnpmSVALRCSnRyguNx37SHU2qXVF7+hsaWYuO/B6iR\nZ5uZ6fkLEEFI5YN7q1HBEPBRaBFtJ7fSOBl9iSsHI11DX53+wRhJR319P3fZ18eq\nGwTdUHTy+L9ec1NjaJvOz2eJEVB3O2A9ySh+Rhdv75mFqTbNvxyf5fjw7OHDd5ti\nWPCT1UzyXUXpE8ET6HA59gQO3Ix/VPzZTpNWX1FAXDYpYFkK1t9Ifzjdqf3/P+uP\nvwMtUNixJg8RYhfRNZ4RbfULWU9Y0DpadRVX5WppGBTRNAAgmNGBYPPR7HuxVGx1\nReJ2Bg==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIIHuh6I9HTH+QwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM1WhcNMjYx\nMDI4MDgxNzM2WjAmMSQwIgYDVQQDDBtrdWJlLWNzci1zaWduZXJfQDE3NjIwNzE0\nNTUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb91UVWMxGg+Q7+o+5\ntt2z/Oq+7Mc1SNGJ4oF0Y99WM1Y7tIo5NOKWiIXE16rGuzY4+D9O53fh14ngzJm3\nWh3GjCGvr8/2W7J1BblXwuKwDuRl+OfJvyPtETUeME42Y9V6XP/B60iaaEYhm5t7\nTDf7TmmjEpqeWix43KqHnpcW9Zr8tM+tHLrGwcHnb+z3LvGkQA7mbXsaHiuryCVx\nudxQYNKWgtAkw3OOtuVyJ2gGD7iYVni1jg7nc9ZhQOYBoYRbAw3zh36CY50dZA89\nhDKYsVVourd9xfAdwSmrcgtsVo1X0ucCpEEUYKEz3/udgk+Dgf2hy3flIMcg9kcI\nS8xzAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0G\nA1UdDgQWBBSGMLijow+n1t5asMgXimhNhoMUvDAfBgNVHSMEGDAWgBQwDwrhoAwS\n6q2GA5CnoIQZVVim5jANBgkqhkiG9w0BAQsFAAOCAQEAhDT7ncsDiLG3QyVtffPp\nTjWscZowuHbVwUjwpZt5OxQFonSJxNCnqrj4MlIqqxraH5nnsrW5FqWyWWeMmXpz\nbFkiVhPFCVGjmRN9V1LXjHDc4lufe3ixq+MvgtU1YL/WJBmUxxw5dPifTT15IApJ\n6stLJ0QtHBNeEIIroUFpDB+O7OJYZ85ed6o6gT4n/v9nxBaLsZNpO2TzaWfI0Bst\nFEoPsfPKgBvwg9+2GijlP/VyKmP2gFdFm25PWeROU1VZzPrEhaOliO+/YXHt32YU\nJkxTF/smrLzxRRbb507cuvWEilzud93YbHmAhAj1h0CpDdzjxYDr5zRYJSP7BV+6\nUA==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDiTCCAnGgAwIBAgIIOsA0z3vOTSgwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM0WhcNMjcx\nMDIzMDgxNzM1WjBSMVAwTgYDVQQDDEdvcGVuc2hpZnQta3ViZS1jb250cm9sbGVy\nLW1hbmFnZXItb3BlcmF0b3JfY3NyLXNpZ25lci1zaWduZXJAMTc2MjA3MTQ1NTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPY8aATxrjWpRKfnFU7BgmD1\nkNEst9RVY3fzGsARc5tF3x+zhl9BbsoJ9NAncCj+KnkAIYk4pJMU6BHIWIakpvqV\nLsnlPM45VU8ocwOWb5Z7g88YdqHGRWeZqZt/rPXmH/846iVGDstB0YQWgKKKK97X\nvjKMsq9ALSVj8gRWai7B7MVP/bZ4FgeqsYq6zIH9XKdPO8ev20qffrob4nmLGHdJ\nikAliwIy6nkVYrATKOS8t56votMD7xuFQ8rM6uQ0YVejA5rE/Tmq4/NsFpfFfkCP\nMU0vO2XwqCBV81XKD00SmW9MXIxbTq5lU9YjjE1sDFREtN4uZL4nEDa2+wnvIxEC\nAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFDAPCuGgDBLqrYYDkKeghBlVWKbmMB8GA1UdIwQYMBaAFDAPCuGgDBLqrYYD\nkKeghBlVWKbmMA0GCSqGSIb3DQEBCwUAA4IBAQDS31/o3/4/9DBLFVcLvIwOQCK1\n0ZwJUa/8iK5GaMlMH6cNfvVZBrb4SwTVx0WXI5wrIXrlvYc+PtXL0MJeyIJmMpoU\nRyQAJZsh8cckeQjghV2Pf7wMfEbHudKTp8uoQDUBntkfNhJa4pPxmNWuhOrlvdB5\nEF/6IGviKAdSy0jcNpscvD3W0oSpCYRW0Ki/25LaFvIqP2Xy/cNJlWhzJWqZbK6k\nR9I4knhvIv/JYmppOVXw1rEvP+8Pn8UF2oSfFXcN5W+j4YIIhrAUnjAbaflpyX8k\nAUEKdtgVNNe7RlW9nQrhO8GqFJItitBbNtVuSkw9XIlQ0gc40E7mgB7Mjnbu\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDQjCCAiqgAwIBAgIICN23rtJ7PrYwDQYJKoZIhvcNAQELBQAwPzESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSkwJwYDVQQDEyBrdWJlLWFwaXNlcnZlci10by1rdWJlbGV0\nLXNpZ25lcjAeFw0yNTExMDIwNzM0MTFaFw0yNjExMDIwNzM0MTFaMD8xEjAQBgNV\nBAsTCW9wZW5zaGlmdDEpMCcGA1UEAxMga3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxl\ndC1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzRlKEKNEy\nCyd+PPBkdzQZd3BeUb0b2qi1h8fbUiSNENrOpafKxxAHcr3a4KWB6sKhi28r14mF\nxcJ2Yb92/jLpkS15p629AUrGXxKuL8QtkBsY3dH0CqKMBedO6oxodva9Avc+3DMI\nvvYBJFy+4on/0JbM54fduvDmcEmhBtgRItK3Z87VbhemVPj7uDi9EV381uRMlmq4\ncgtD5mfS1yeRu0ut5IIr7/PN1G+93slLGQkHveqWlsFrDYd8Qm5PqirRBYy+18RC\nmEuNirFX3yPrEGwMvRlJyFia0RKuK69bFL2vduI5Wu7h/6VKP0/vEpEYqI6bJYoV\nbUjA2vqrV/1VAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBRNj9dsSIhfwKZ491Q/XZYt2lRWLzANBgkqhkiG9w0BAQsF\nAAOCAQEANmt7a5N+zch83D5+YeC4VQoFvpkHPKEYYT0EM/ykjch3453RmQWQEwkj\nVvstV1U16YpnEI61l1s347RHi64SwtlwV6tiNCpopDF2u3Bb+eiJqrlIC69EFnZE\n1426AVmZZq3sWu3eKB0HgT5u6B1rErSTl3c4hK4SiDsWWlVktBSN0BS4cD+urSAF\nc673/wLKCjq2I+9i3Wv2K7Ton3w5oaETE7lgQyImbKOVhJhFrPGu9fKXaeWlyXGY\nj7tz68vNTvecRynKrmzUJ9BBMfAXTrCowitzjBjanFitgXK4DnQMkb+8lv2Txb/n\nkB7RzcFDyIVd3g5XWBujR3fkQFWsNQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDlTCCAn2gAwIBAgIIV5i/4m8WRp0wDQYJKoZIhvcNAQELBQAwWDFWMFQGA1UE\nAwxNb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtYXBpc2Vy\ndmVyLXRvLWt1YmVsZXQtc2lnbmVyQDE3NjUyMTU4NjMwHhcNMjUxMjA4MTc0NDIz\nWhcNMjYxMjA4MTc0NDI0WjBYMVYwVAYDVQQDDE1vcGVuc2hpZnQta3ViZS1hcGlz\nZXJ2ZXItb3BlcmF0b3Jfa3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxldC1zaWduZXJA\nMTc2NTIxNTg2MzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMQk29Lc\nviCBF+ZN194ChQgHyYo1iN6wW+jZDEEIQpkmIfgrPnEOPL8+9d3SN92BqqYGdwnp\n5TdyDJBFBjrM8iKKvrq6x+EcyQJU6/Q+41bpPSLsziclImlDUUE29OYj6poxfNi1\nQBeFL1q4j9/ks+AfMnpjEbiGjxjJ8cV8++3NERSB1jJLft1rYcnQvgBuE64jqipO\nbNczVjMjcq0g+H+qpZknHlFueBqi5F/Nj/hC7QZbS96VThCxM123zqORBAfU5Fj0\ndMk3XqYTM1mpfyQHihtlyG3vsPXI/CBZgno6CI+KuXZJ46IjNNmiImyVJNKe7tXS\niWxbKKtEZHAvMcsCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFKzukDY3Odtmp3C+ncqem+63g2NuMB8GA1UdIwQYMBaA\nFKzukDY3Odtmp3C+ncqem+63g2NuMA0GCSqGSIb3DQEBCwUAA4IBAQBC2NLbh36L\n05mNq0+V4avx/2/xXvih+RtebPhiF8w8WG7WWRiIlK/yn8+iToFX/07+HWbBSK3g\nu5Yqac0eh8iKLkG+eIFiXpZR4B4Ha3ZRoU4N6dBMohIChZNugHGtjhfFjDpjFY8N\n9jMoZmTtjtK7RW2tu1qRyJcNSk8ou6nYNo/fB9PHWP5E12cWdg2ZQyESq+zE2dFo\n/dNjvb2y+GneObWzG9nclr6L7f6jI4LSOujO9ZA28xW4lf2EmosQ2HOeun48vA3O\n0C9lO1/SqcPkA6TtMHsoXZDSRv+mH62ugEZkDn8lgOizTm3l+jU9UA4RSvRD7ghR\nuXScj56hVynp\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDNDCCAhygAwIBAgIILvKlXd2YBKIwDQYJKoZIhvcNAQELBQAwODESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVy\nMB4XDTI1MTEwMjA3MzQxMFoXDTI2MTEwMjA3MzQxMFowODESMBAGA1UECxMJb3Bl\nbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOnFEdxnqqE7l+5NxN82TBBycC2F\n5cUboCcUVr211lubqmjCusnJzzz+6rKpO8uFqqGeu4C7rGpudOaK52IZhG0WP8b7\n0raQhnM4DV8t2SHDV4GhFUiuNE+b4FJrZ6jiljQo2g9ZeeCZgdmmBrIFHBXDFzEc\nA1RPScqOtvBbbH064Zd267gOmVPJnWmxDXo6X/RGYCm1YUS6FQ2WWpl707ComvgZ\nAvWGSA4H1sZirMQ+ug3bctkLb+SiXUzf+tLnGIHPeqDNfMrNUhxBl6dDhlMbUIzY\nrVxgDD8y3i7eg5i5HG8yntl8epgs2gn47wfavfjLqATBlciJPZY6Qv3BFQIDAQAB\no0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nWi6wmZ50AVdwxaiO3WjTvp82+sEwDQYJKoZIhvcNAQELBQADggEBAA4siMWwOGL5\nCiCxbiJsuKMbCLlFR6JaD43FWnUCZDoe7np7W76Oh4py4Zht7XlZKotcXrrfRYVO\ndVht66PCbSujy375p/B6c3isG4h/1cNSGDm1uhAkHXGZ88S2wSjKT5YJ/HUAkvyj\nadQgZeO7Q60YBSDE/67Ldq1zqvBrMF2k8pF49p1AdAtf4OSDzIaGGPUQJTFExA0E\n03xMlOPNhYZ8MgFT2XE6nRT74lCAfK9krAsZLtFuAtp/14t013PD0FqTTQRUmuSj\nO6pJKDTH8tZ3ieXxSzRR+j4p5hkHaehgQVyUbwiw8WVXkd6NcWR6yQcSeqIsTSCD\neMDdTmmKyuo=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhzCCAm+gAwIBAgIIcuJfJWKJ/NEwDQYJKoZIhvcNAQELBQAwUTFPME0GA1UE\nAwxGb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtY29udHJv\nbC1wbGFuZS1zaWduZXJAMTc2NTIxNTg2MzAeFw0yNTEyMDgxNzQ0MjNaFw0yNjAy\nMDYxNzQ0MjRaMFExTzBNBgNVBAMMRm9wZW5zaGlmdC1rdWJlLWFwaXNlcnZlci1v\ncGVyYXRvcl9rdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyQDE3NjUyMTU4NjMwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC86U3wU8BYhyUyqYM3Vuy/Kvfu\nQlUYxAqiVxA50CLa488sawRtfVN03+NKfPtzoj6xg5nYxR0a/+IP95n2YltFsU5k\nyumfMqcMWP1gZeUuqq0tHgy/GYvD4uF2IWLRMYMdYrsbJlOPWRCnRfWtXN7LJHAY\nBQwKW01c7MOm8AMOT5sGCw7z1GwRO 2025-12-08T17:44:43.437694812+00:00 stderr F dLkjebZSAWeWP+uho5ubO7R9yFVrMJGzBum\nXUceaUrjiVyDCVdMBMttbZtjYYwW1NqDl4P4CgtW+CRONRTW8FNDdldzjm2fo/HL\n/frz934yfHA6c6xDWRI4+BEKJpecqxBUoC6xeGNdPd3KFmqPFRp7N/oERpH5AgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRrr6Rca+ABepsTcpAiBlwqNlkzNzAfBgNVHSMEGDAWgBRrr6Rca+ABepsTcpAi\nBlwqNlkzNzANBgkqhkiG9w0BAQsFAAOCAQEArD1l55HNxEi+lDb8LV+9Zzmb+gxB\nDq27GP6pZD+v8cHdoet3SgTFXeYKrd/Aw34+ZJceKPQrhoLtGkl+UW9T50ymZmVx\nENwuX+8e/OxAYAcKZdAwlCmPBV2A+puager7UZ6cE35W22ZqqijJ3J+nB7BmCtQ7\nqooWmH+OcHkw9Eoa8BbWCAH8nItf7bglCui0yQb4MCbrGMCHOVKwInTpI2biAdb6\nvQwXe1ofL4bVZt0eiPk2tuhljglLjV23q/aaFqTXC7T6UIKtb0olqNjGO10Aasew\ntAxUmbhL/uOz2X2JztYbjYPfVWbeUefTtX8tXV8oqflB6auskk/m2wMUbw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDSDCCAjCgAwIBAgIIV//3Qv2OxM4wDQYJKoZIhvcNAQELBQAwQjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSwwKgYDVQQDEyNrdWJlbGV0LWJvb3RzdHJhcC1rdWJlY29u\nZmlnLXNpZ25lcjAeFw0yNTExMDIwNzM0MDhaFw0zNTEwMzEwNzM0MDhaMEIxEjAQ\nBgNVBAsTCW9wZW5zaGlmdDEsMCoGA1UEAxMja3ViZWxldC1ib290c3RyYXAta3Vi\nZWNvbmZpZy1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc\nZBn9WZ5wLPck6+g3f4p0NBGyE6LaXnWavXx9m0sVdTVQooknndmKufeYkXGZ5Lb+\nfbMAp/6swgSJ1DjdBj06rCqJEZfdZl3uZoD/Th4ha2Phl12bXaNYLiuOu5BOZ3UW\n08y1Wab9Y9zc0o4Z71pHH4o9TH3QPNT6BqAz4kkgD6t1r/R7E7lrZbx+7e+0JBAW\nRgufaFOX1AYU5B4+pSM21eJY7oP1P9I4DMeeJW39opCCHAuUQgHpOV1YPtRqEPJ4\n9matas8qm5qIMIPbGEGFckSJqgny9YCfHaLezJtZMIHJgz5LW4H91gQCGvfSbLtH\nYxYO/PcTXQCnDYNwqf29AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBSCg7Y8QkzypoNAqYVPNK5YWMVIXzANBgkqhkiG\n9w0BAQsFAAOCAQEAsP1NR5ZgC7F5FvoU2CXla4FufdeYTk4bjjNoFJMqnvI5gcY6\nJDxP1Rl2YBHCpRxngtBFxw3Xoe8UnBKzZWsS5wLUYRloprhGVBSLM0vqJJOvP7M0\njt3SLuB7h0dG2GO9yQ4y10+xVWxP5Os9wcbQcRgTQKL3gHmCq4aQN1cqJSxyJ/ut\nlbfYlM/xBcfLMY5Leeas6y2FPCFIEONh1U9FJZlF3YkhPp+XD7aePtC4tJqsokMc\nP80IwPn54aDT9akRPsOteB6C+xSAz2TlfWaJ/l/x9yXK+HJrRhMartqyN511SeEd\nDpNcMW9qPTjJzBj+N3f0ZfvbTmhVSvV65ZEtAw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhTCCAm2gAwIBAgIIfksp5LDEMMgwDQYJKoZIhvcNAQELBQAwUDFOMEwGA1UE\nAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX25vZGUtc3lzdGVt\nLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MB4XDTI1MTEwMjA3NTEyN1oXDTI4MTEw\nMTA3NTEyOFowUDFOMEwGA1UEAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9w\nZXJhdG9yX25vZGUtc3lzdGVtLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA402A+B9GaYmzBVLJEyrGvytCY0Vb\nvvIKHW2kxl9IFN3+LNZHW/mKeJfz/hBTm2bs+6uRCDlMSyONDlVUWVsuE+q0+F42\n0n3VyxWRSrDZ2ur5oNxmoBSsHRM+PxccQ6X3JTZyO397LHNOzxAs/Es+St8A8sbY\nGLc1lNqeOLvwAOT5d2PrFlYCAfXYs/UVIaio846jidKKN1f8Z6W5pgdAHuTXbyBQ\nLDQh6s43TBPhww1KszmcwURjEBDCT6KlhsM/quMd9XlMU0ZEAMf6XxsqvW8ia8C8\nF+RNAaGkwmiS4qZ+hJ4KIUnWM84j+bsyNBqlHFKi1e7LsKRyjnQ288FqIQIDAQAB\no2MwYTAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\n9O+dX0K7dzD13bshuR70sHbUAeAwHwYDVR0jBBgwFoAU9O+dX0K7dzD13bshuR70\nsHbUAeAwDQYJKoZIhvcNAQELBQADggEBADPRGSn1U/YwBUkpU7vTzsxqLaVJW21o\n6hV/W2IjjGNGqp6c2kSH/3ZGSEjNwIJqKRFpC2gmTPgAqnC4nDosOHx5F5HXTmrU\n1l2Ivcm1Ep+t/zBgNHjBi3yommx8n2iTTdakpQaq7/u1s0I4UiRqXydjoGXp7H1C\naAsmRlK8ovgEAWzItjeMBzy65wqiStPBK+XAIddqznHCxrRyH5xk3HcnyMG4GDWl\nrogdK8yTGCuZVCvGfe9Hwm8tyYrxDRNvRLTc0ssTonAwnR/7IzaVVc9Pp0svCynJ\n6VX3FGhgWwDVWeajj8yrXeR42az/Rr1TAAOZtJMW+4hIkaU0/+msvgw=\n-----END CERTIFICATE-----\n"},"metadata":{"creationTimestamp":"2025-11-02T07:51:50Z","managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ca-bundle.crt":{}},"f:metadata":{"f:annotations":{".":{},"f:openshift.io/owning-component":{}}}},"manager":"cluster-kube-apiserver-operator","operation":"Update","time":"2025-12-08T17:44:32Z"}],"resourceVersion":null,"uid":"60afc254-0d91-486c-a410-610b8f84e03e"}} 2025-12-08T17:44:43.437715423+00:00 stderr F I1208 17:44:43.437676 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/client-ca -n openshift-kube-controller-manager: 2025-12-08T17:44:43.437715423+00:00 stderr F cause by changes in data.ca-bundle.crt 2025-12-08T17:45:16.051676165+00:00 stderr F I1208 17:45:16.051139 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.051086349 +0000 UTC))" 2025-12-08T17:45:16.051708656+00:00 stderr F I1208 17:45:16.051670 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.051650415 +0000 UTC))" 2025-12-08T17:45:16.051719877+00:00 stderr F I1208 17:45:16.051712 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.051680246 +0000 UTC))" 2025-12-08T17:45:16.051763958+00:00 stderr F I1208 17:45:16.051738 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.051721237 +0000 UTC))" 2025-12-08T17:45:16.051791299+00:00 stderr F I1208 17:45:16.051769 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.051752318 +0000 UTC))" 2025-12-08T17:45:16.051833870+00:00 stderr F I1208 17:45:16.051799 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.051782858 +0000 UTC))" 2025-12-08T17:45:16.053894527+00:00 stderr F I1208 17:45:16.053175 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.053143077 +0000 UTC))" 2025-12-08T17:45:16.053894527+00:00 stderr F I1208 17:45:16.053211 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.053192958 +0000 UTC))" 2025-12-08T17:45:16.053894527+00:00 stderr F I1208 17:45:16.053239 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.053220349 +0000 UTC))" 2025-12-08T17:45:16.053894527+00:00 stderr F I1208 17:45:16.053267 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.05325194 +0000 UTC))" 2025-12-08T17:45:16.053894527+00:00 stderr F I1208 17:45:16.053294 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.05327628 +0000 UTC))" 2025-12-08T17:45:16.053894527+00:00 stderr F I1208 17:45:16.053691 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-controller-manager-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-controller-manager-operator.svc,metrics.openshift-kube-controller-manager-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:12 +0000 UTC to 2027-11-02 07:52:13 +0000 UTC (now=2025-12-08 17:45:16.053666801 +0000 UTC))" 2025-12-08T17:45:16.054396711+00:00 stderr F I1208 17:45:16.054035 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:21 +0000 UTC to 2028-12-08 16:44:21 +0000 UTC (now=2025-12-08 17:45:16.054014511 +0000 UTC))" 2025-12-08T17:45:19.464672203+00:00 stderr P I1208 17:45:19.464574 1 core.go:352] ConfigMap "openshift-kube-controller-manager/client-ca" changes: {"data":{"ca-bundle.crt":"-----BEGIN CERTIFICATE-----\nMIIFWzCCA0OgAwIBAgIUGCGM8Q3O0omYhECixt5AvIY+d4owDQYJKoZIhvcNAQEL\nBQAwPTESMBAGA1UECwwJb3BlbnNoaWZ0MScwJQYDVQQDDB5hZG1pbi1rdWJlY29u\nZmlnLXNpZ25lci1jdXN0b20wHhcNMjUxMjA4MTc0NTA5WhcNMzUxMjA2MTc0NTA5\nWjA9MRIwEAYDVQQLDAlvcGVuc2hpZnQxJzAlBgNVBAMMHmFkbWluLWt1YmVjb25m\naWctc2lnbmVyLWN1c3RvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB\nANpMZjkbItUcAdd+J4eSy87mk6EfZSZD8WMH+LJpdHgeZFUqJZfH8qVDAn3QyEkE\nPn1W+6zV/EUmgAx76hvaAfYq9U7ic5RYK8jJt2j6Tb0SMG+/kvaEohwCnX5GDSek\nzzSRKc6aHZwjkR3d4QpY8BOzMx8lBIIl/px2xsw3QGtihaeBbnYa7CcbWznR/V0b\nfJ/o/oMd5okhZtJZkc0w6o4codNaSIFu1MbPPBCK6OwVfoD43uq+y/Wcinv3M1sw\nKKFaW9gaMFAkStevvcQcFFSSRej8CuZK+o2H+2OxTVi19P4WmIDn9A22MPrlIGno\nOcQPfFayfIczLMiUNe6bjueCMkVEIfTszMKUALNlzHPQ1W15CC3Bqg4xqnRL9JpL\nE1DBQwhuq4lvAxFItsJhQCagWlHgyinbVZHOB/QS+RZ4Vo2DcIkTcXRxZ7KUz/mj\nitF8kCdDz6aUiPeDNGm2M4fKBdWqrgHLUqfATGq3Qh545HpZ6QqYffvLLNLuKxM0\nim+qD5wCgoJPROitdK5plsPfe/C4zjoYc7oFKlXM389DNj0KxwRvMUE6kZoptjUo\nd676JxYQF3XrZnIpZ+PlIqXt2R+ahpuz0BvBMAlwrqEhDP9CsCHx8sRXNrw3OSSp\n9LZ5CRFampF4RoHWikdd8uybWY05f7Eis/o2gEPJrCUnAgMBAAGjUzBRMB0GA1Ud\nDgQWBBScadKZBJR7Ydm+yfS6UsTlrSxg1DAfBgNVHSMEGDAWgBScadKZBJR7Ydm+\nyfS6UsTlrSxg1DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBp\nOh3vzzZhIkTOOVScU0gxIIl6mpSRScsJkK3TfSdvQWCrNlYJmPcO1aD/YAJv6fmT\nfih+NkBD00qrrkzcdOsaDJXsT2oaB4h8VJQrTDcal3V4F3jWS8j0bn8QukgwWtIq\nevP2sTS8oIzx59k/e2EhjdCgDjnTEBDajXfn9UXRjh+3ZHqmFtZYdz/uZWmBxeLK\n6Kqi0GtFwP6dfylQzg1IXB0C4D2xqVoHEimKIrBQyak8RmDKzleRxvIOUSj3o5DM\nVEyajcJQ6XaD+IwMGh1/DVLxN4uTMbMZDwv+gWl3TvK++f+TSSTMhy+92A2WecDO\nPNLD7xiX5wc6ge5Dh9AzzoOW3tP1iiB9Y0iCmxuj4SUhR0hfgQlRY6sxF40E8xWO\nNNQYbDo+rEwE7frnykHMfqclzJ/a8ax3+lzfM4CvYOmj97909M+2pc0d8Dnbkg75\nncxbob8nQ2UTmQ4nu3qFCZ+5ssDtQaDBXCzSbrSUiFpYtZ1vDZMXcBcoPtri29Ih\ndhUSPKLUHmHzvcEK1n8PPRcfKHjES8s0ankZfnKkcU11Yjhx8eeKUT+s2Iq+Tl6e\ndHKDccvC42BF9X0NarLfvMJcrQu1mPjBYs6WX9a2v9uvg0DG4OAZnu6oQ+gw25LT\n31bGdVPDcEEpNdcMmlY/LsOs00DNYQmW8rdLjfhjrQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIIHuh6I9HTH+QwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM1WhcNMjYx\nMDI4MDgxNzM2WjAmMSQwIgYDVQQDDBtrdWJlLWNzci1zaWduZXJfQDE3NjIwNzE0\nNTUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb91UVWMxGg+Q7+o+5\ntt2z/Oq+7Mc1SNGJ4oF0Y99WM1Y7tIo5NOKWiIXE16rGuzY4+D9O53fh14ngzJm3\nWh3GjCGvr8/2W7J1BblXwuKwDuRl+OfJvyPtETUeME42Y9V6XP/B60iaaEYhm5t7\nTDf7TmmjEpqeWix43KqHnpcW9Zr8tM+tHLrGwcHnb+z3LvGkQA7mbXsaHiuryCVx\nudxQYNKWgtAkw3OOtuVyJ2gGD7iYVni1jg7nc9ZhQOYBoYRbAw3zh36CY50dZA89\nhDKYsVVourd9xfAdwSmrcgtsVo1X0ucCpEEUYKEz3/udgk+Dgf2hy3flIMcg9kcI\nS8xzAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0G\nA1UdDgQWBBSGMLijow+n1t5asMgXimhNhoMUvDAfBgNVHSMEGDAWgBQwDwrhoAwS\n6q2GA5CnoIQZVVim5jANBgkqhkiG9w0BAQsFAAOCAQEAhDT7ncsDiLG3QyVtffPp\nTjWscZowuHbVwUjwpZt5OxQFonSJxNCnqrj4MlIqqxraH5nnsrW5FqWyWWeMmXpz\nbFkiVhPFCVGjmRN9V1LXjHDc4lufe3ixq+MvgtU1YL/WJBmUxxw5dPifTT15IApJ\n6stLJ0QtHBNeEIIroUFpDB+O7OJYZ85ed6o6gT4n/v9nxBaLsZNpO2TzaWfI0Bst\nFEoPsfPKgBvwg9+2GijlP/VyKmP2gFdFm25PWeROU1VZzPrEhaOliO+/YXHt32YU\nJkxTF/smrLzxRRbb507cuvWEilzud93YbHmAhAj1h0CpDdzjxYDr5zRYJSP7BV+6\nUA==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDiTCCAnGgAwIBAgIIOsA0z3vOTSgwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM0WhcNMjcx\nMDIzMDgxNzM1WjBSMVAwTgYDVQQDDEdvcGVuc2hpZnQta3ViZS1jb250cm9sbGVy\nLW1hbmFnZXItb3BlcmF0b3JfY3NyLXNpZ25lci1zaWduZXJAMTc2MjA3MTQ1NTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPY8aATxrjWpRKfnFU7BgmD1\nkNEst9RVY3fzGsARc5tF3x+zhl9BbsoJ9NAncCj+KnkAIYk4pJMU6BHIWIakpvqV\nLsnlPM45VU8ocwOWb5Z7g88YdqHGRWeZqZt/rPXmH/846iVGDstB0YQWgKKKK97X\nvjKMsq9ALSVj8gRWai7B7MVP/bZ4FgeqsYq6zIH9XKdPO8ev20qffrob4nmLGHdJ\nikAliwIy6nkVYrATKOS8t56votMD7xuFQ8rM6uQ0YVejA5rE/Tmq4/NsFpfFfkCP\nMU0vO2XwqCBV81XKD00SmW9MXIxbTq5lU9YjjE1sDFREtN4uZL4nEDa2+wnvIxEC\nAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFDAPCuGgDBLqrYYDkKeghBlVWKbmMB8GA1UdIwQYMBaAFDAPCuGgDBLqrYYD\nkKeghBlVWKbmMA0GCSqGSIb3DQEBCwUAA4IBAQDS31/o3/4/9DBLFVcLvIwOQCK1\n0ZwJUa/8iK5GaMlMH6cNfvVZBrb4SwTVx0WXI5wrIXrlvYc+PtXL0MJeyIJmMpoU\nRyQAJZsh8cckeQjghV2Pf7wMfEbHudKTp8uoQDUBntkfNhJa4pPxmNWuhOrlvdB5\nEF/6IGviKAdSy0jcNpscvD3W0oSpCYRW0Ki/25LaFvIqP2Xy/cNJlWhzJWqZbK6k\nR9I4knhvIv/JYmppOVXw1rEvP+8Pn8UF2oSfFXcN5W+j4YIIhrAUnjAbaflpyX8k\nAUEKdtgVNNe7RlW9nQrhO8GqFJItitBbNtVuSkw9XIlQ0gc40E7mgB7Mjnbu\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDQjCCAiqgAwIBAgIICN23rtJ7PrYwDQYJKoZIhvcNAQELBQAwPzESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSkwJwYDVQQDEyBrdWJlLWFwaXNlcnZlci10by1rdWJlbGV0\nLXNpZ25lcjAeFw0yNTExMDIwNzM0MTFaFw0yNjExMDIwNzM0MTFaMD8xEjAQBgNV\nBAsTCW9wZW5zaGlmdDEpMCcGA1UEAxMga3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxl\ndC1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzRlKEKNEy\nCyd+PPBkdzQZd3BeUb0b2qi1h8fbUiSNENrOpafKxxAHcr3a4KWB6sKhi28r14mF\nxcJ2Yb92/jLpkS15p629AUrGXxKuL8QtkBsY3dH0CqKMBedO6oxodva9Avc+3DMI\nvvYBJFy+4on/0JbM54fduvDmcEmhBtgRItK3Z87VbhemVPj7uDi9EV381uRMlmq4\ncgtD5mfS1yeRu0ut5IIr7/PN1G+93slLGQkHveqWlsFrDYd8Qm5PqirRBYy+18RC\nmEuNirFX3yPrEGwMvRlJyFia0RKuK69bFL2vduI5Wu7h/6VKP0/vEpEYqI6bJYoV\nbUjA2vqrV/1VAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBRNj9dsSIhfwKZ491Q/XZYt2lRWLzANBgkqhkiG9w0BAQsF\nAAOCAQEANmt7a5N+zch83D5+YeC4VQoFvpkHPKEYYT0EM/ykjch3453RmQWQEwkj\nVvstV1U16YpnEI61l1s347RHi64SwtlwV6tiNCpopDF2u3Bb+eiJqrlIC69EFnZE\n1426AVmZZq3sWu3eKB0HgT5u6B1rErSTl3c4hK4SiDsWWlVktBSN0BS4cD+urSAF\nc673/wLKCjq2I+9i3Wv2K7Ton3w5oaETE7lgQyImbKOVhJhFrPGu9fKXaeWlyXGY\nj7tz68vNTvecRynKrmzUJ9BBMfAXTrCowitzjBjanFitgXK4DnQMkb+8lv2Txb/n\nkB7RzcFDyIVd3g5XWBujR3fkQFWsNQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDlTCCAn2gAwIBAgIIV5i/4m8WRp0wDQYJKoZIhvcNAQELBQAwWDFWMFQGA1UE\nAwxNb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtYXBpc2Vy\ndmVyLXRvLWt1YmVsZXQtc2lnbmVyQDE3NjUyMTU4NjMwHhcNMjUxMjA4MTc0NDIz\nWhcNMjYxMjA4MTc0NDI0WjBYMVYwVAYDVQQDDE1vcGVuc2hpZnQta3ViZS1hcGlz\nZXJ2ZXItb3BlcmF0b3Jfa3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxldC1zaWduZXJA\nMTc2NTIxNTg2MzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMQk29Lc\nviCBF+ZN194ChQgHyYo1iN6wW+jZDEEIQpkmIfgrPnEOPL8+9d3SN92BqqYGdwnp\n5TdyDJBFBjrM8iKKvrq6x+EcyQJU6/Q+41bpPSLsziclImlDUUE29OYj6poxfNi1\nQBeFL1q4j9/ks+AfMnpjEbiGjxjJ8cV8++3NERSB1jJLft1rYcnQvgBuE64jqipO\nbNczVjMjcq0g+H+qpZknHlFueBqi5F/Nj/hC7QZbS96VThCxM123zqORBAfU5Fj0\ndMk3XqYTM1mpfyQHihtlyG3vsPXI/CBZgno6CI+KuXZJ46IjNNmiImyVJNKe7tXS\niWxbKKtEZHAvMcsCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFKzukDY3Odtmp3C+ncqem+63g2NuMB8GA1UdIwQYMBaA\nFKzukDY3Odtmp3C+ncqem+63g2NuMA0GCSqGSIb3DQEBCwUAA4IBAQBC2NLbh36L\n05mNq0+V4avx/2/xXvih+RtebPhiF8w8WG7WWRiIlK/yn8+iToFX/07+HWbBSK3g\nu5Yqac0eh8iKLkG+eIFiXpZR4B4Ha3ZRoU4N6dBMohIChZNugHGtjhfFjDpjFY8N\n9jMoZmTtjtK7RW2tu1qRyJcNSk8ou6nYNo/fB9PHWP5E12cWdg2ZQyESq+zE2dFo\n/dNjvb2y+GneObWzG9nclr6L7f6jI4LSOujO9ZA28xW4lf2EmosQ2HOeun48vA3O\n0C9lO1/SqcPkA6TtMHsoXZDSRv+mH62ugEZkDn8lgOizTm3l+jU9UA4RSvRD7ghR\nuXScj56hVynp\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDNDCCAhygAwIBAgIILvKlXd2YBKIwDQYJKoZIhvcNAQELBQAwODESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVy\nMB4XDTI1MTEwMjA3MzQxMFoXDTI2MTEwMjA3MzQxMFowODESMBAGA1UECxMJb3Bl\nbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOnFEdxnqqE7l+5NxN82TBBycC2F\n5cUboCcUVr211lubqmjCusnJzzz+6rKpO8uFqqGeu4C7rGpudOaK52IZhG0WP8b7\n0raQhnM4DV8t2SHDV4GhFUiuNE+b4FJrZ6jiljQo2g9ZeeCZgdmmBrIFHBXDFzEc\nA1RPScqOtvBbbH064Zd267gOmVPJnWmxDXo6X/RGYCm1YUS6FQ2WWpl707ComvgZ\nAvWGSA4H1sZirMQ+ug3bctkLb+SiXUzf+tLnGIHPeqDNfMrNUhxBl6dDhlMbUIzY\nrVxgDD8y3i7eg5i5HG8yntl8epgs2gn47wfavfjLqATBlciJPZY6Qv3BFQIDAQAB\no0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nWi6wmZ50AVdwxaiO3WjTvp82+sEwDQYJKoZIhvcNAQELBQADggEBAA4siMWwOGL5\nCiCxbiJsuKMbCLlFR6JaD43FWnUCZDoe7np7W76Oh4py4Zht7XlZKotcXrrfRYVO\ndVht66PCbSujy375p/B6c3isG4h/1cNSGDm1uhAkHXGZ88S2wSjKT5YJ/HUAkvyj\nadQgZeO7Q60YBSDE/67Ldq1zqvBrMF2k8pF49p1AdAtf4OSDzIaGGPUQJTFExA0 2025-12-08T17:45:19.464718224+00:00 stderr F E\n03xMlOPNhYZ8MgFT2XE6nRT74lCAfK9krAsZLtFuAtp/14t013PD0FqTTQRUmuSj\nO6pJKDTH8tZ3ieXxSzRR+j4p5hkHaehgQVyUbwiw8WVXkd6NcWR6yQcSeqIsTSCD\neMDdTmmKyuo=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhzCCAm+gAwIBAgIIcuJfJWKJ/NEwDQYJKoZIhvcNAQELBQAwUTFPME0GA1UE\nAwxGb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtY29udHJv\nbC1wbGFuZS1zaWduZXJAMTc2NTIxNTg2MzAeFw0yNTEyMDgxNzQ0MjNaFw0yNjAy\nMDYxNzQ0MjRaMFExTzBNBgNVBAMMRm9wZW5zaGlmdC1rdWJlLWFwaXNlcnZlci1v\ncGVyYXRvcl9rdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyQDE3NjUyMTU4NjMwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC86U3wU8BYhyUyqYM3Vuy/Kvfu\nQlUYxAqiVxA50CLa488sawRtfVN03+NKfPtzoj6xg5nYxR0a/+IP95n2YltFsU5k\nyumfMqcMWP1gZeUuqq0tHgy/GYvD4uF2IWLRMYMdYrsbJlOPWRCnRfWtXN7LJHAY\nBQwKW01c7MOm8AMOT5sGCw7z1GwROdLkjebZSAWeWP+uho5ubO7R9yFVrMJGzBum\nXUceaUrjiVyDCVdMBMttbZtjYYwW1NqDl4P4CgtW+CRONRTW8FNDdldzjm2fo/HL\n/frz934yfHA6c6xDWRI4+BEKJpecqxBUoC6xeGNdPd3KFmqPFRp7N/oERpH5AgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRrr6Rca+ABepsTcpAiBlwqNlkzNzAfBgNVHSMEGDAWgBRrr6Rca+ABepsTcpAi\nBlwqNlkzNzANBgkqhkiG9w0BAQsFAAOCAQEArD1l55HNxEi+lDb8LV+9Zzmb+gxB\nDq27GP6pZD+v8cHdoet3SgTFXeYKrd/Aw34+ZJceKPQrhoLtGkl+UW9T50ymZmVx\nENwuX+8e/OxAYAcKZdAwlCmPBV2A+puager7UZ6cE35W22ZqqijJ3J+nB7BmCtQ7\nqooWmH+OcHkw9Eoa8BbWCAH8nItf7bglCui0yQb4MCbrGMCHOVKwInTpI2biAdb6\nvQwXe1ofL4bVZt0eiPk2tuhljglLjV23q/aaFqTXC7T6UIKtb0olqNjGO10Aasew\ntAxUmbhL/uOz2X2JztYbjYPfVWbeUefTtX8tXV8oqflB6auskk/m2wMUbw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDSDCCAjCgAwIBAgIIV//3Qv2OxM4wDQYJKoZIhvcNAQELBQAwQjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSwwKgYDVQQDEyNrdWJlbGV0LWJvb3RzdHJhcC1rdWJlY29u\nZmlnLXNpZ25lcjAeFw0yNTExMDIwNzM0MDhaFw0zNTEwMzEwNzM0MDhaMEIxEjAQ\nBgNVBAsTCW9wZW5zaGlmdDEsMCoGA1UEAxMja3ViZWxldC1ib290c3RyYXAta3Vi\nZWNvbmZpZy1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc\nZBn9WZ5wLPck6+g3f4p0NBGyE6LaXnWavXx9m0sVdTVQooknndmKufeYkXGZ5Lb+\nfbMAp/6swgSJ1DjdBj06rCqJEZfdZl3uZoD/Th4ha2Phl12bXaNYLiuOu5BOZ3UW\n08y1Wab9Y9zc0o4Z71pHH4o9TH3QPNT6BqAz4kkgD6t1r/R7E7lrZbx+7e+0JBAW\nRgufaFOX1AYU5B4+pSM21eJY7oP1P9I4DMeeJW39opCCHAuUQgHpOV1YPtRqEPJ4\n9matas8qm5qIMIPbGEGFckSJqgny9YCfHaLezJtZMIHJgz5LW4H91gQCGvfSbLtH\nYxYO/PcTXQCnDYNwqf29AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBSCg7Y8QkzypoNAqYVPNK5YWMVIXzANBgkqhkiG\n9w0BAQsFAAOCAQEAsP1NR5ZgC7F5FvoU2CXla4FufdeYTk4bjjNoFJMqnvI5gcY6\nJDxP1Rl2YBHCpRxngtBFxw3Xoe8UnBKzZWsS5wLUYRloprhGVBSLM0vqJJOvP7M0\njt3SLuB7h0dG2GO9yQ4y10+xVWxP5Os9wcbQcRgTQKL3gHmCq4aQN1cqJSxyJ/ut\nlbfYlM/xBcfLMY5Leeas6y2FPCFIEONh1U9FJZlF3YkhPp+XD7aePtC4tJqsokMc\nP80IwPn54aDT9akRPsOteB6C+xSAz2TlfWaJ/l/x9yXK+HJrRhMartqyN511SeEd\nDpNcMW9qPTjJzBj+N3f0ZfvbTmhVSvV65ZEtAw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhTCCAm2gAwIBAgIIfksp5LDEMMgwDQYJKoZIhvcNAQELBQAwUDFOMEwGA1UE\nAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX25vZGUtc3lzdGVt\nLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MB4XDTI1MTEwMjA3NTEyN1oXDTI4MTEw\nMTA3NTEyOFowUDFOMEwGA1UEAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9w\nZXJhdG9yX25vZGUtc3lzdGVtLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA402A+B9GaYmzBVLJEyrGvytCY0Vb\nvvIKHW2kxl9IFN3+LNZHW/mKeJfz/hBTm2bs+6uRCDlMSyONDlVUWVsuE+q0+F42\n0n3VyxWRSrDZ2ur5oNxmoBSsHRM+PxccQ6X3JTZyO397LHNOzxAs/Es+St8A8sbY\nGLc1lNqeOLvwAOT5d2PrFlYCAfXYs/UVIaio846jidKKN1f8Z6W5pgdAHuTXbyBQ\nLDQh6s43TBPhww1KszmcwURjEBDCT6KlhsM/quMd9XlMU0ZEAMf6XxsqvW8ia8C8\nF+RNAaGkwmiS4qZ+hJ4KIUnWM84j+bsyNBqlHFKi1e7LsKRyjnQ288FqIQIDAQAB\no2MwYTAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\n9O+dX0K7dzD13bshuR70sHbUAeAwHwYDVR0jBBgwFoAU9O+dX0K7dzD13bshuR70\nsHbUAeAwDQYJKoZIhvcNAQELBQADggEBADPRGSn1U/YwBUkpU7vTzsxqLaVJW21o\n6hV/W2IjjGNGqp6c2kSH/3ZGSEjNwIJqKRFpC2gmTPgAqnC4nDosOHx5F5HXTmrU\n1l2Ivcm1Ep+t/zBgNHjBi3yommx8n2iTTdakpQaq7/u1s0I4UiRqXydjoGXp7H1C\naAsmRlK8ovgEAWzItjeMBzy65wqiStPBK+XAIddqznHCxrRyH5xk3HcnyMG4GDWl\nrogdK8yTGCuZVCvGfe9Hwm8tyYrxDRNvRLTc0ssTonAwnR/7IzaVVc9Pp0svCynJ\n6VX3FGhgWwDVWeajj8yrXeR42az/Rr1TAAOZtJMW+4hIkaU0/+msvgw=\n-----END CERTIFICATE-----\n"},"metadata":{"creationTimestamp":"2025-11-02T07:51:50Z","managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ca-bundle.crt":{}},"f:metadata":{"f:annotations":{".":{},"f:openshift.io/owning-component":{}}}},"manager":"cluster-kube-apiserver-operator","operation":"Update","time":"2025-12-08T17:45:16Z"}],"resourceVersion":null,"uid":"60afc254-0d91-486c-a410-610b8f84e03e"}} 2025-12-08T17:45:19.464736774+00:00 stderr F I1208 17:45:19.464709 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/client-ca -n openshift-kube-controller-manager: 2025-12-08T17:45:19.464736774+00:00 stderr F cause by changes in data.ca-bundle.crt 2025-12-08T17:45:19.466296057+00:00 stderr F I1208 17:45:19.466246 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:46:23.293101463+00:00 stderr F E1208 17:46:23.292538 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager-operator/leases/kube-controller-manager-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:23.294072013+00:00 stderr F E1208 17:46:23.293990 1 leaderelection.go:436] error retrieving resource lock openshift-kube-controller-manager-operator/kube-controller-manager-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager-operator/leases/kube-controller-manager-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:24.587554146+00:00 stderr F E1208 17:46:24.586765 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.599807545+00:00 stderr F E1208 17:46:24.599709 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.617713652+00:00 stderr F E1208 17:46:24.617501 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.650387273+00:00 stderr F E1208 17:46:24.650318 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.698372673+00:00 stderr F E1208 17:46:24.698306 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.786740645+00:00 stderr F E1208 17:46:24.786647 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.954153200+00:00 stderr F E1208 17:46:24.954066 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.176295348+00:00 stderr F E1208 17:46:25.176233 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.178680380+00:00 stderr F E1208 17:46:25.178629 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.184396491+00:00 stderr F E1208 17:46:25.184350 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.189800313+00:00 stderr F E1208 17:46:25.189732 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.197222576+00:00 stderr F E1208 17:46:25.197137 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.205759463+00:00 stderr F E1208 17:46:25.205704 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.372366293+00:00 stderr F E1208 17:46:25.372264 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.379865988+00:00 stderr F E1208 17:46:25.379766 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.382107066+00:00 stderr F E1208 17:46:25.382027 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.393504388+00:00 stderr F E1208 17:46:25.393420 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.416918840+00:00 stderr F E1208 17:46:25.416813 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.586746848+00:00 stderr F E1208 17:46:25.586676 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.786753781+00:00 stderr F E1208 17:46:25.786711 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.986606861+00:00 stderr F E1208 17:46:25.986539 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.182908823+00:00 stderr F E1208 17:46:26.181243 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.188018166+00:00 stderr F E1208 17:46:26.187980 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.386842564+00:00 stderr F E1208 17:46:26.386629 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.588030712+00:00 stderr F E1208 17:46:26.587962 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.781456738+00:00 stderr F E1208 17:46:26.781399 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.786470839+00:00 stderr F E1208 17:46:26.786434 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.183502526+00:00 stderr F E1208 17:46:27.183439 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.384607233+00:00 stderr F E1208 17:46:27.384099 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.431442858+00:00 stderr F E1208 17:46:27.430920 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.581569504+00:00 stderr F E1208 17:46:27.581511 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.985601091+00:00 stderr F E1208 17:46:27.985513 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.383284868+00:00 stderr F E1208 17:46:28.383203 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.714104608+00:00 stderr F E1208 17:46:28.713608 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.784563673+00:00 stderr F E1208 17:46:28.784492 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-controller-manager-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=kube-controller-manager-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.987735171+00:00 stderr F E1208 17:46:28.987670 1 base_controller.go:279] "Unhandled Error" err="KubeControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/namespace-openshift-infra.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-infra\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/services/kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/kube-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/recycler-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-infra/serviceaccounts/pv-recycler-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/localhost-recovery-client-crb.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-controller-manager-recovery\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/localhost-recovery-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/localhost-recovery-client\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/csr_approver_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:cluster-csr-approver-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/csr_approver_clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:cluster-csr-approver-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/vsphere/legacy-cloud-provider-sa.yaml\" (string): Delete \"https://10.217.4.1:443/api/v1/namespaces/kube-system/serviceaccounts/vsphere-legacy-cloud-provider\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/vsphere/legacy-cloud-provider-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:kube-controller-manager:vsphere-legacy-cloud-provider\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/vsphere/legacy-cloud-provider-binding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:kube-controller-manager:vsphere-legacy-cloud-provider\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeControllerManagerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=KubeControllerManagerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:29.189389424+00:00 stderr F E1208 17:46:29.189325 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-controller-manager-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubecontrollermanagers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:29.581716230+00:00 stderr F E1208 17:46:29.581657 1 base_controller.go:279] "Unhandled Error" err="kube-controller-manager-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:31.009393003+00:00 stderr F E1208 17:46:31.009307 1 base_controller.go:279] "Unhandled Error" err="KubeControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:58.852234585+00:00 stderr F I1208 17:46:58.851360 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:02.391342113+00:00 stderr F I1208 17:47:02.390538 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:02.605301108+00:00 stderr F I1208 17:47:02.605201 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.262737 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.265052 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.265922 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.266143 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.266309 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.266463 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.266617 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.266766 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.266943 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.267093 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.267313 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.267470 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.267618 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:03.270917791+00:00 stderr F I1208 17:47:03.267782 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:05.608821435+00:00 stderr F I1208 17:47:05.608730 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:06.213188231+00:00 stderr F I1208 17:47:06.213076 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:07.027849676+00:00 stderr F I1208 17:47:07.027780 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:09.824309356+00:00 stderr F I1208 17:47:09.823372 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubecontrollermanagers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:09.831179473+00:00 stderr F I1208 17:47:09.831120 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:09.835728136+00:00 stderr F I1208 17:47:09.835041 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:09.848706754+00:00 stderr F I1208 17:47:09.848569 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-controller-manager changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready" to "NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: " 2025-12-08T17:47:09.963280871+00:00 stderr F I1208 17:47:09.963220 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.012468779+00:00 stderr F I1208 17:47:10.012389 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.412488691+00:00 stderr F I1208 17:47:10.412317 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.420949078+00:00 stderr F E1208 17:47:10.418031 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:10.422949881+00:00 stderr F I1208 17:47:10.421673 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.430099606+00:00 stderr F E1208 17:47:10.429279 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:10.432828872+00:00 stderr F I1208 17:47:10.432320 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.440323238+00:00 stderr F E1208 17:47:10.440129 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:10.443226229+00:00 stderr F I1208 17:47:10.442998 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.449914740+00:00 stderr F E1208 17:47:10.449851 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:10.492084678+00:00 stderr F I1208 17:47:10.491903 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.498956574+00:00 stderr F E1208 17:47:10.498898 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:10.581359827+00:00 stderr F I1208 17:47:10.581280 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.588518783+00:00 stderr F E1208 17:47:10.588459 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:10.741119706+00:00 stderr F I1208 17:47:10.741053 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.752010559+00:00 stderr F I1208 17:47:10.751941 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:10.756984316+00:00 stderr F E1208 17:47:10.756937 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:11.007056268+00:00 stderr F I1208 17:47:11.006926 1 request.go:752] "Waited before sending request" delay="1.175473032s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc" 2025-12-08T17:47:11.080602044+00:00 stderr F I1208 17:47:11.080273 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:11.092938302+00:00 stderr F E1208 17:47:11.092004 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:11.734936241+00:00 stderr F I1208 17:47:11.734820 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:11.743214822+00:00 stderr F E1208 17:47:11.743158 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:11.914429621+00:00 stderr F I1208 17:47:11.914375 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.027381177+00:00 stderr F I1208 17:47:13.027238 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:13.034102768+00:00 stderr F E1208 17:47:13.034029 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:13.634336082+00:00 stderr F I1208 17:47:13.634275 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:13.808155284+00:00 stderr F I1208 17:47:13.808069 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.807642987+00:00 stderr F I1208 17:47:14.807563 1 request.go:752] "Waited before sending request" delay="1.164554559s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc" 2025-12-08T17:47:16.142547529+00:00 stderr F I1208 17:47:16.142035 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:16.208642739+00:00 stderr F I1208 17:47:16.208568 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.409197752+00:00 stderr F I1208 17:47:17.409139 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:18.010862142+00:00 stderr F I1208 17:47:18.010794 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:19.008520778+00:00 stderr F I1208 17:47:19.008443 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.210086801+00:00 stderr F I1208 17:47:21.209491 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.808684564+00:00 stderr F I1208 17:47:21.808610 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:23.521687588+00:00 stderr F I1208 17:47:23.521413 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:24.008402489+00:00 stderr F I1208 17:47:24.008340 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:28.611914764+00:00 stderr F I1208 17:47:28.611431 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.218189848+00:00 stderr F I1208 17:47:30.215964 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:34.409640120+00:00 stderr F I1208 17:47:34.409025 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:35.266839254+00:00 stderr F I1208 17:47:35.266280 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:35.267793255+00:00 stderr F I1208 17:47:35.267764 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:35.278536322+00:00 stderr F I1208 17:47:35.275232 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator", UID:"09857aec-2c93-4f0d-9e38-a820bd5b8362", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-controller-manager changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/roles/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/leader-election-cluster-policy-controller-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-controller-manager/rolebindings/system:openshift:leader-election-lock-cluster-policy-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/namespace-security-allocation-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:namespace-security-allocation-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:podsecurity-admission-label-syncer-controller\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-controller-manager/podsecurity-admission-label-privileged-namespaces-syncer-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:privileged-namespaces-psa-label-syncer\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: " to "NodeControllerDegraded: All master nodes are ready" 2025-12-08T17:47:35.278536322+00:00 stderr F I1208 17:47:35.275610 1 status_controller.go:230] clusteroperator/kube-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:17:05Z","message":"NodeInstallerProgressing: 1 node is at revision 7","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:06:09Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 7","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:55Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:55Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:35.281741144+00:00 stderr F E1208 17:47:35.281653 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-controller-manager reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:35.808340591+00:00 stderr F I1208 17:47:35.808270 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:39.032855195+00:00 stderr F I1208 17:47:39.032249 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:41.548644799+00:00 stderr F I1208 17:47:41.548551 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:50.318615149+00:00 stderr F I1208 17:47:50.318006 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:51.712813857+00:00 stderr F I1208 17:47:51.712395 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:55.200345473+00:00 stderr F I1208 17:47:55.200155 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:00.064282584+00:00 stderr F I1208 17:48:00.063778 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:02.007533568+00:00 stderr F I1208 17:48:02.007449 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:04.647084559+00:00 stderr F I1208 17:48:04.646338 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:04.670491947+00:00 stderr F I1208 17:48:04.670440 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:05.427945105+00:00 stderr F I1208 17:48:05.426807 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:09.657702729+00:00 stderr F I1208 17:48:09.655777 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:57:03.265918773+00:00 stderr F I1208 17:57:03.265000 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:03.272321970+00:00 stderr F I1208 17:57:03.266450 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:03.272321970+00:00 stderr F I1208 17:57:03.266763 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:03.272321970+00:00 stderr F I1208 17:57:03.267017 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:03.272321970+00:00 stderr F I1208 17:57:03.267244 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:03.272321970+00:00 stderr F I1208 17:57:03.267570 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:03.272321970+00:00 stderr F I1208 17:57:03.267783 1 prune_controller.go:277] Nothing to prune ././@LongLink0000644000000000000000000000023700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000755000175000017500000000000015115611514032747 5ustar zuulzuul././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000755000175000017500000000000015115611523032747 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000644000175000017500000010352515115611514032757 0ustar zuulzuul2025-12-08T17:56:01.048054399+00:00 stderr F I1208 17:56:01.047892 1 controller.go:284] "configured acme dns01 nameservers" logger="cert-manager.controller.build-context" nameservers=["10.217.4.10:53"] 2025-12-08T17:56:01.048704827+00:00 stderr F I1208 17:56:01.048683 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:01.048735278+00:00 stderr F I1208 17:56:01.048725 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:01.048757848+00:00 stderr F I1208 17:56:01.048748 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:01.048779669+00:00 stderr F I1208 17:56:01.048770 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:01.051375590+00:00 stderr F I1208 17:56:01.051321 1 controller.go:88] "enabled controllers: [certificaterequests-approver certificaterequests-issuer-acme certificaterequests-issuer-ca certificaterequests-issuer-selfsigned certificaterequests-issuer-vault certificaterequests-issuer-venafi certificates-issuing certificates-key-manager certificates-metrics certificates-readiness certificates-request-manager certificates-revision-manager certificates-trigger challenges clusterissuers ingress-shim issuers orders]" logger="cert-manager.controller" 2025-12-08T17:56:01.051422842+00:00 stderr F I1208 17:56:01.051410 1 controller.go:437] "serving insecurely as tls certificate data not provided" logger="cert-manager.controller" 2025-12-08T17:56:01.051447892+00:00 stderr F I1208 17:56:01.051439 1 controller.go:101] "listening for insecure connections" logger="cert-manager.controller" address="0.0.0.0:9402" 2025-12-08T17:56:01.052270806+00:00 stderr F I1208 17:56:01.052219 1 controller.go:125] "starting metrics server" logger="cert-manager.controller" address="[::]:9402" 2025-12-08T17:56:01.052316427+00:00 stderr F I1208 17:56:01.052251 1 controller.go:176] "starting leader election" logger="cert-manager.controller" 2025-12-08T17:56:01.052393719+00:00 stderr F I1208 17:56:01.052354 1 controller.go:169] "starting healthz server" logger="cert-manager.controller" address="[::]:9403" 2025-12-08T17:56:01.056362787+00:00 stderr F I1208 17:56:01.056304 1 leaderelection.go:257] attempting to acquire leader lease kube-system/cert-manager-controller... 2025-12-08T17:56:01.063734450+00:00 stderr F E1208 17:56:01.063685 1 leaderelection.go:436] error retrieving resource lock kube-system/cert-manager-controller: leases.coordination.k8s.io "cert-manager-controller" is forbidden: User "system:serviceaccount:cert-manager:cert-manager" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-system" 2025-12-08T17:56:32.848557209+00:00 stderr F I1208 17:56:32.847716 1 leaderelection.go:271] successfully acquired lease kube-system/cert-manager-controller 2025-12-08T17:56:32.859266698+00:00 stderr F I1208 17:56:32.859190 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="orders" 2025-12-08T17:56:32.859369661+00:00 stderr F I1208 17:56:32.859332 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificaterequests-issuer-selfsigned" 2025-12-08T17:56:32.859580106+00:00 stderr F I1208 17:56:32.859536 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificaterequests-issuer-vault" 2025-12-08T17:56:32.862254326+00:00 stderr F I1208 17:56:32.862198 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="clusterissuers" 2025-12-08T17:56:32.864023102+00:00 stderr F I1208 17:56:32.863976 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-metrics" 2025-12-08T17:56:32.865697166+00:00 stderr F I1208 17:56:32.865642 1 controller.go:223] "skipping disabled controller" logger="cert-manager.controller" controller="certificatesigningrequests-issuer-selfsigned" 2025-12-08T17:56:32.865697166+00:00 stderr F I1208 17:56:32.865662 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificaterequests-issuer-ca" 2025-12-08T17:56:32.867450492+00:00 stderr F I1208 17:56:32.867403 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-key-manager" 2025-12-08T17:56:32.869411322+00:00 stderr F I1208 17:56:32.869366 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificaterequests-issuer-acme" 2025-12-08T17:56:32.871018305+00:00 stderr F I1208 17:56:32.870961 1 controller.go:223] "skipping disabled controller" logger="cert-manager.controller" controller="certificatesigningrequests-issuer-acme" 2025-12-08T17:56:32.871018305+00:00 stderr F I1208 17:56:32.870983 1 controller.go:223] "skipping disabled controller" logger="cert-manager.controller" controller="certificatesigningrequests-issuer-ca" 2025-12-08T17:56:32.871018305+00:00 stderr F I1208 17:56:32.870986 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificaterequests-issuer-venafi" 2025-12-08T17:56:32.871158088+00:00 stderr F I1208 17:56:32.871115 1 controller.go:223] "skipping disabled controller" logger="cert-manager.controller" controller="certificatesigningrequests-issuer-vault" 2025-12-08T17:56:32.871158088+00:00 stderr F I1208 17:56:32.871136 1 controller.go:223] "skipping disabled controller" logger="cert-manager.controller" controller="certificatesigningrequests-issuer-venafi" 2025-12-08T17:56:32.871158088+00:00 stderr F I1208 17:56:32.871144 1 controller.go:223] "skipping disabled controller" logger="cert-manager.controller" controller="gateway-shim" 2025-12-08T17:56:32.874566387+00:00 stderr F I1208 17:56:32.874512 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificaterequests-approver" 2025-12-08T17:56:32.889317851+00:00 stderr F I1208 17:56:32.889262 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-readiness" 2025-12-08T17:56:32.892645859+00:00 stderr F I1208 17:56:32.892605 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-request-manager" 2025-12-08T17:56:32.895387060+00:00 stderr F I1208 17:56:32.895369 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-revision-manager" 2025-12-08T17:56:32.898994244+00:00 stderr F I1208 17:56:32.898968 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-trigger" 2025-12-08T17:56:32.899170649+00:00 stderr F I1208 17:56:32.899125 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="issuers" 2025-12-08T17:56:32.901018037+00:00 stderr F I1208 17:56:32.900922 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="challenges" 2025-12-08T17:56:32.903694926+00:00 stderr F I1208 17:56:32.903669 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="ingress-shim" 2025-12-08T17:56:32.916944852+00:00 stderr F I1208 17:56:32.916847 1 controller.go:240] "starting controller" logger="cert-manager.controller" controller="certificates-issuing" 2025-12-08T17:56:32.934764677+00:00 stderr F I1208 17:56:32.933119 1 reflector.go:376] Caches populated for *v1.Issuer from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.936670906+00:00 stderr F I1208 17:56:32.936618 1 reflector.go:376] Caches populated for *v1.Challenge from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.936782329+00:00 stderr F I1208 17:56:32.936734 1 reflector.go:376] Caches populated for *v1.PartialObjectMetadata from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.936782329+00:00 stderr F I1208 17:56:32.936740 1 reflector.go:376] Caches populated for *v1.CertificateRequest from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.936817450+00:00 stderr F I1208 17:56:32.936769 1 reflector.go:376] Caches populated for *v1.ClusterIssuer from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.937720424+00:00 stderr F I1208 17:56:32.936868 1 reflector.go:376] Caches populated for *v1.PartialObjectMetadata from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.937720424+00:00 stderr F I1208 17:56:32.937264 1 reflector.go:376] Caches populated for *v1.Order from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.937768446+00:00 stderr F I1208 17:56:32.937736 1 reflector.go:376] Caches populated for *v1.Ingress from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.938248078+00:00 stderr F I1208 17:56:32.938216 1 reflector.go:376] Caches populated for *v1.Certificate from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.941791900+00:00 stderr F I1208 17:56:32.939490 1 reflector.go:376] Caches populated for *v1.Secret from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:56:32.961820282+00:00 stderr F I1208 17:56:32.961718 1 reflector.go:376] Caches populated for *v1.PartialObjectMetadata from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:57:19.035321070+00:00 stderr F I1208 17:57:19.034533 1 conditions.go:102] "Setting lastTransitionTime for Issuer condition" logger="cert-manager" issuer="service-telemetry/default-interconnect-selfsigned" condition="Ready" lastTransitionTime="2025-12-08 17:57:19.034497059 +0000 UTC m=+78.024089083" 2025-12-08T17:57:19.968450928+00:00 stderr F I1208 17:57:19.968049 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-selfsigned" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:19.968450928+00:00 stderr F I1208 17:57:19.968421 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-selfsigned" condition="Issuing" lastTransitionTime="2025-12-08 17:57:19.968410067 +0000 UTC m=+78.958002101" 2025-12-08T17:57:19.968504039+00:00 stderr F I1208 17:57:19.968310 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-selfsigned" condition="Ready" lastTransitionTime="2025-12-08 17:57:19.968296694 +0000 UTC m=+78.957888708" 2025-12-08T17:57:19.984991000+00:00 stderr F I1208 17:57:19.984937 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-selfsigned\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:19.985042061+00:00 stderr F I1208 17:57:19.985017 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-selfsigned" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:19.985080962+00:00 stderr F I1208 17:57:19.985052 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-selfsigned" condition="Issuing" lastTransitionTime="2025-12-08 17:57:19.985040141 +0000 UTC m=+78.974632175" 2025-12-08T17:57:20.329156116+00:00 stderr F I1208 17:57:20.329009 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-selfsigned-1" condition="Approved" lastTransitionTime="2025-12-08 17:57:20.328995072 +0000 UTC m=+79.318587086" 2025-12-08T17:57:20.361151820+00:00 stderr F I1208 17:57:20.361031 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-selfsigned-1" condition="Ready" lastTransitionTime="2025-12-08 17:57:20.361014187 +0000 UTC m=+79.350606211" 2025-12-08T17:57:20.381132061+00:00 stderr F I1208 17:57:20.380999 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-selfsigned" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:20.380978847 +0000 UTC m=+79.370570861" 2025-12-08T17:57:20.389015637+00:00 stderr F I1208 17:57:20.388899 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-selfsigned\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:20.389951802+00:00 stderr F I1208 17:57:20.389827 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-selfsigned" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:20.389815298 +0000 UTC m=+79.379407312" 2025-12-08T17:57:20.401210706+00:00 stderr F I1208 17:57:20.401073 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-selfsigned\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:20.801263939+00:00 stderr F I1208 17:57:20.801154 1 conditions.go:102] "Setting lastTransitionTime for Issuer condition" logger="cert-manager" issuer="service-telemetry/default-interconnect-ca" condition="Ready" lastTransitionTime="2025-12-08 17:57:20.801137216 +0000 UTC m=+79.790729230" 2025-12-08T17:57:21.632858729+00:00 stderr F I1208 17:57:21.632768 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-ca" condition="Ready" lastTransitionTime="2025-12-08 17:57:21.632743586 +0000 UTC m=+80.622335630" 2025-12-08T17:57:21.632929071+00:00 stderr F I1208 17:57:21.632807 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-openstack-ca" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:21.633058564+00:00 stderr F I1208 17:57:21.632928 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-ca" condition="Issuing" lastTransitionTime="2025-12-08 17:57:21.63291436 +0000 UTC m=+80.622506394" 2025-12-08T17:57:21.641794092+00:00 stderr F I1208 17:57:21.640748 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:21.641794092+00:00 stderr F I1208 17:57:21.640811 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-openstack-ca" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:21.641794092+00:00 stderr F I1208 17:57:21.640841 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-ca" condition="Issuing" lastTransitionTime="2025-12-08 17:57:21.640835277 +0000 UTC m=+80.630427301" 2025-12-08T17:57:21.947754813+00:00 stderr F I1208 17:57:21.947650 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:21.956023628+00:00 stderr F I1208 17:57:21.955524 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-openstack-ca-1" condition="Approved" lastTransitionTime="2025-12-08 17:57:21.955508534 +0000 UTC m=+80.945100548" 2025-12-08T17:57:21.968635457+00:00 stderr F I1208 17:57:21.968580 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-openstack-ca-1" condition="Ready" lastTransitionTime="2025-12-08 17:57:21.968563705 +0000 UTC m=+80.958155719" 2025-12-08T17:57:21.987735575+00:00 stderr F I1208 17:57:21.987670 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-ca" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:21.987652723 +0000 UTC m=+80.977244737" 2025-12-08T17:57:21.997650433+00:00 stderr F I1208 17:57:21.997586 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:22.430155705+00:00 stderr F I1208 17:57:22.430081 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-openstack-credentials" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:22.430201686+00:00 stderr F I1208 17:57:22.430153 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-credentials" condition="Issuing" lastTransitionTime="2025-12-08 17:57:22.430136414 +0000 UTC m=+81.419728468" 2025-12-08T17:57:22.431136820+00:00 stderr F I1208 17:57:22.431081 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-credentials" condition="Ready" lastTransitionTime="2025-12-08 17:57:22.431063308 +0000 UTC m=+81.420655362" 2025-12-08T17:57:22.440369850+00:00 stderr F I1208 17:57:22.440313 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:22.440468713+00:00 stderr F I1208 17:57:22.440425 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-credentials" condition="Ready" lastTransitionTime="2025-12-08 17:57:22.440412111 +0000 UTC m=+81.430004135" 2025-12-08T17:57:23.050029942+00:00 stderr F I1208 17:57:23.049943 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:23.075539897+00:00 stderr F I1208 17:57:23.075476 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-openstack-credentials-1" condition="Approved" lastTransitionTime="2025-12-08 17:57:23.075454745 +0000 UTC m=+82.065046759" 2025-12-08T17:57:23.086604456+00:00 stderr F I1208 17:57:23.086526 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-openstack-credentials-1" condition="Ready" lastTransitionTime="2025-12-08 17:57:23.086509033 +0000 UTC m=+82.076101047" 2025-12-08T17:57:23.111630948+00:00 stderr F I1208 17:57:23.111503 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-credentials" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:23.111445184 +0000 UTC m=+82.101037208" 2025-12-08T17:57:23.121020683+00:00 stderr F I1208 17:57:23.120945 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:23.122927843+00:00 stderr F I1208 17:57:23.122633 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-openstack-credentials" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:23.122616615 +0000 UTC m=+82.112208669" 2025-12-08T17:57:23.127373409+00:00 stderr F I1208 17:57:23.127253 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:23.130297546+00:00 stderr F I1208 17:57:23.130233 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-openstack-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:23.281641593+00:00 stderr F E1208 17:57:23.281524 1 setup.go:48] "error getting signing CA TLS certificate" err="secrets \"default-interconnect-inter-router-ca\" not found" logger="cert-manager.controller.setup" resource_name="default-interconnect-inter-router-ca" resource_namespace="service-telemetry" resource_kind="Issuer" resource_version="v1" 2025-12-08T17:57:23.281671464+00:00 stderr F I1208 17:57:23.281632 1 conditions.go:102] "Setting lastTransitionTime for Issuer condition" logger="cert-manager" issuer="service-telemetry/default-interconnect-inter-router-ca" condition="Ready" lastTransitionTime="2025-12-08 17:57:23.281618192 +0000 UTC m=+82.271210226" 2025-12-08T17:57:23.281680474+00:00 stderr F I1208 17:57:23.281664 1 sync.go:62] "Error initializing issuer: secrets \"default-interconnect-inter-router-ca\" not found" logger="cert-manager.controller" resource_name="default-interconnect-inter-router-ca" resource_namespace="service-telemetry" resource_kind="Issuer" resource_version="v1" 2025-12-08T17:57:23.293390379+00:00 stderr F E1208 17:57:23.293301 1 controller.go:157] "re-queuing item due to error processing" err="secrets \"default-interconnect-inter-router-ca\" not found" logger="cert-manager.controller" 2025-12-08T17:57:23.298403830+00:00 stderr F E1208 17:57:23.298330 1 setup.go:48] "error getting signing CA TLS certificate" err="secrets \"default-interconnect-inter-router-ca\" not found" logger="cert-manager.controller.setup" resource_name="default-interconnect-inter-router-ca" resource_namespace="service-telemetry" resource_kind="Issuer" resource_version="v1" 2025-12-08T17:57:23.298522533+00:00 stderr F I1208 17:57:23.298459 1 sync.go:62] "Error initializing issuer: secrets \"default-interconnect-inter-router-ca\" not found" logger="cert-manager.controller" resource_name="default-interconnect-inter-router-ca" resource_namespace="service-telemetry" resource_kind="Issuer" resource_version="v1" 2025-12-08T17:57:23.298616956+00:00 stderr F E1208 17:57:23.298588 1 controller.go:157] "re-queuing item due to error processing" err="secrets \"default-interconnect-inter-router-ca\" not found" logger="cert-manager.controller" 2025-12-08T17:57:24.155415853+00:00 stderr F I1208 17:57:24.155052 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-ca" condition="Ready" lastTransitionTime="2025-12-08 17:57:24.155033993 +0000 UTC m=+83.144626007" 2025-12-08T17:57:24.155415853+00:00 stderr F I1208 17:57:24.155355 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-inter-router-ca" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:24.155462214+00:00 stderr F I1208 17:57:24.155396 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-ca" condition="Issuing" lastTransitionTime="2025-12-08 17:57:24.155386842 +0000 UTC m=+83.144978856" 2025-12-08T17:57:24.161977314+00:00 stderr F I1208 17:57:24.161912 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:24.162151958+00:00 stderr F I1208 17:57:24.162105 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-ca" condition="Ready" lastTransitionTime="2025-12-08 17:57:24.162093417 +0000 UTC m=+83.151685441" 2025-12-08T17:57:24.636249253+00:00 stderr F I1208 17:57:24.636175 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:24.654928851+00:00 stderr F I1208 17:57:24.654833 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-inter-router-ca-1" condition="Approved" lastTransitionTime="2025-12-08 17:57:24.654810318 +0000 UTC m=+83.644402342" 2025-12-08T17:57:24.670110746+00:00 stderr F I1208 17:57:24.670034 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-inter-router-ca-1" condition="Ready" lastTransitionTime="2025-12-08 17:57:24.670017014 +0000 UTC m=+83.659609028" 2025-12-08T17:57:24.689998295+00:00 stderr F I1208 17:57:24.689905 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-ca" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:24.689869262 +0000 UTC m=+83.679461276" 2025-12-08T17:57:24.690425577+00:00 stderr F I1208 17:57:24.690381 1 conditions.go:86] "Found status change for Issuer condition; setting lastTransitionTime" logger="cert-manager" issuer="service-telemetry/default-interconnect-inter-router-ca" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:24.690369296 +0000 UTC m=+83.679961340" 2025-12-08T17:57:24.699854973+00:00 stderr F I1208 17:57:24.699395 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:24.700955891+00:00 stderr F I1208 17:57:24.700778 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-ca" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:24.700100979 +0000 UTC m=+83.689692993" 2025-12-08T17:57:24.711330102+00:00 stderr F I1208 17:57:24.711272 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-ca\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:25.022641991+00:00 stderr F I1208 17:57:25.022567 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-credentials" condition="Ready" lastTransitionTime="2025-12-08 17:57:25.022542659 +0000 UTC m=+84.012134683" 2025-12-08T17:57:25.023043752+00:00 stderr F I1208 17:57:25.022975 1 trigger_controller.go:225] "Certificate must be re-issued" logger="cert-manager.controller" key="service-telemetry/default-interconnect-inter-router-credentials" reason="DoesNotExist" message="Issuing certificate as Secret does not exist" 2025-12-08T17:57:25.023207806+00:00 stderr F I1208 17:57:25.023164 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-credentials" condition="Issuing" lastTransitionTime="2025-12-08 17:57:25.023015951 +0000 UTC m=+84.012607965" 2025-12-08T17:57:25.030845005+00:00 stderr F I1208 17:57:25.030034 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:25.030845005+00:00 stderr F I1208 17:57:25.030098 1 conditions.go:217] "Setting lastTransitionTime for Certificate condition" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-credentials" condition="Ready" lastTransitionTime="2025-12-08 17:57:25.030090146 +0000 UTC m=+84.019682160" 2025-12-08T17:57:25.250114224+00:00 stderr F I1208 17:57:25.249739 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:25.269353206+00:00 stderr F I1208 17:57:25.269264 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-inter-router-credentials-1" condition="Approved" lastTransitionTime="2025-12-08 17:57:25.269219743 +0000 UTC m=+84.258811757" 2025-12-08T17:57:25.286083622+00:00 stderr F I1208 17:57:25.286011 1 conditions.go:285] "Setting lastTransitionTime for CertificateRequest condition" logger="cert-manager" certificateRequest="service-telemetry/default-interconnect-inter-router-credentials-1" condition="Ready" lastTransitionTime="2025-12-08 17:57:25.28599383 +0000 UTC m=+84.275585844" 2025-12-08T17:57:25.302348217+00:00 stderr F I1208 17:57:25.302265 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-credentials" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:25.302245674 +0000 UTC m=+84.291837678" 2025-12-08T17:57:25.310586502+00:00 stderr F I1208 17:57:25.310504 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-credentials\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:25.311984809+00:00 stderr F I1208 17:57:25.311688 1 conditions.go:201] "Found status change for Certificate condition; setting lastTransitionTime" logger="cert-manager" certificate="service-telemetry/default-interconnect-inter-router-credentials" condition="Ready" oldStatus="False" status="True" lastTransitionTime="2025-12-08 17:57:25.3116774 +0000 UTC m=+84.301269414" 2025-12-08T17:57:25.323942940+00:00 stderr F I1208 17:57:25.323798 1 controller.go:152] "re-queuing item due to optimistic locking on resource" logger="cert-manager.controller" error="Operation cannot be fulfilled on certificates.cert-manager.io \"default-interconnect-inter-router-credentials\": the object has been modified; please apply your changes to the latest version and try again" ././@LongLink0000644000000000000000000000022200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611513033226 5ustar zuulzuul././@LongLink0000644000000000000000000000023600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000024300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000051010315115611513033230 0ustar zuulzuul2025-12-08T17:53:42.120278832+00:00 stdout F 2025-12-08T17:53:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_48a60d36-b016-465a-8310-79f666bdf19b 2025-12-08T17:53:42.176012987+00:00 stdout F 2025-12-08T17:53:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_48a60d36-b016-465a-8310-79f666bdf19b to /host/opt/cni/bin/ 2025-12-08T17:53:42.192064603+00:00 stderr F 2025-12-08T17:53:42Z [verbose] multus-daemon started 2025-12-08T17:53:42.192064603+00:00 stderr F 2025-12-08T17:53:42Z [verbose] Readiness Indicator file check 2025-12-08T17:53:52.192532537+00:00 stderr F 2025-12-08T17:53:52Z [verbose] Readiness Indicator file check done! 2025-12-08T17:53:52.198267333+00:00 stderr F I1208 17:53:52.198126 23682 certificate_store.go:130] Loading cert/key pair from "/etc/cni/multus/certs/multus-client-current.pem". 2025-12-08T17:53:52.198641823+00:00 stderr F 2025-12-08T17:53:52Z [verbose] Waiting for certificate 2025-12-08T17:53:53.199171087+00:00 stderr F I1208 17:53:53.199092 23682 certificate_store.go:130] Loading cert/key pair from "/etc/cni/multus/certs/multus-client-current.pem". 2025-12-08T17:53:53.199400884+00:00 stderr F 2025-12-08T17:53:53Z [verbose] Certificate found! 2025-12-08T17:53:53.200040782+00:00 stderr F 2025-12-08T17:53:53Z [verbose] server configured with chroot: /hostroot 2025-12-08T17:53:53.200040782+00:00 stderr F 2025-12-08T17:53:53Z [verbose] Filtering pod watch for node "crc" 2025-12-08T17:53:53.300933835+00:00 stderr F 2025-12-08T17:53:53Z [verbose] API readiness check 2025-12-08T17:53:53.301724486+00:00 stderr F 2025-12-08T17:53:53Z [verbose] API readiness check done! 2025-12-08T17:53:53.301913231+00:00 stderr F 2025-12-08T17:53:53Z [verbose] Generated MultusCNI config: {"binDir":"/var/lib/cni/bin","cniVersion":"0.3.1","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","namespaceIsolation":true,"globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","type":"multus-shim","auxiliaryCNIChainName":"vendor-cni-chain","daemonSocketDir":"/run/multus/socket"} 2025-12-08T17:53:53.687001672+00:00 stderr F 2025-12-08T17:53:53Z [verbose] started to watch file /host/run/multus/cni/net.d/10-ovn-kubernetes.conf 2025-12-08T17:54:28.818737918+00:00 stderr F 2025-12-08T17:54:28Z [verbose] ADD starting CNI request ContainerID:"4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f" Netns:"/var/run/netns/a24304af-9a69-4c1e-af8d-718e52efb085" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-tkpnz;K8S_POD_INFRA_CONTAINER_ID=4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f;K8S_POD_UID=2d7eb264-8a42-4885-a9db-a693bf3911c8" Path:"" 2025-12-08T17:54:28.955518009+00:00 stderr F I1208 17:54:28.949508 24807 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:54:28.955518009+00:00 stderr F I1208 17:54:28.950124 24807 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:54:28.955518009+00:00 stderr F I1208 17:54:28.950825 24807 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:54:28.955518009+00:00 stderr F I1208 17:54:28.950849 24807 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:54:28.955518009+00:00 stderr F I1208 17:54:28.950868 24807 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:54:28.956073734+00:00 stderr F 2025-12-08T17:54:28Z [verbose] Add: openshift-marketplace:certified-operators-tkpnz:2d7eb264-8a42-4885-a9db-a693bf3911c8:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"4b4187aa05d60ca","mac":"fa:7d:82:00:be:49"},{"name":"eth0","mac":"0a:58:0a:d9:00:07","sandbox":"/var/run/netns/a24304af-9a69-4c1e-af8d-718e52efb085"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.7/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:54:28.956325640+00:00 stderr F I1208 17:54:28.956245 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"certified-operators-tkpnz", UID:"2d7eb264-8a42-4885-a9db-a693bf3911c8", APIVersion:"v1", ResourceVersion:"40641", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.7/23] from ovn-kubernetes 2025-12-08T17:54:28.967537112+00:00 stderr F 2025-12-08T17:54:28Z [verbose] ADD finished CNI request ContainerID:"4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f" Netns:"/var/run/netns/a24304af-9a69-4c1e-af8d-718e52efb085" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-tkpnz;K8S_POD_INFRA_CONTAINER_ID=4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f;K8S_POD_UID=2d7eb264-8a42-4885-a9db-a693bf3911c8" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"fa:7d:82:00:be:49\",\"name\":\"4b4187aa05d60ca\"},{\"mac\":\"0a:58:0a:d9:00:07\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/a24304af-9a69-4c1e-af8d-718e52efb085\"}],\"ips\":[{\"address\":\"10.217.0.7/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:54:41.304225423+00:00 stderr F 2025-12-08T17:54:41Z [verbose] DEL starting CNI request ContainerID:"4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f" Netns:"/var/run/netns/a24304af-9a69-4c1e-af8d-718e52efb085" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-tkpnz;K8S_POD_INFRA_CONTAINER_ID=4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f;K8S_POD_UID=2d7eb264-8a42-4885-a9db-a693bf3911c8" Path:"" 2025-12-08T17:54:41.305716434+00:00 stderr F 2025-12-08T17:54:41Z [verbose] Del: openshift-marketplace:certified-operators-tkpnz:2d7eb264-8a42-4885-a9db-a693bf3911c8:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:54:41.476683825+00:00 stderr F 2025-12-08T17:54:41Z [verbose] DEL finished CNI request ContainerID:"4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f" Netns:"/var/run/netns/a24304af-9a69-4c1e-af8d-718e52efb085" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-tkpnz;K8S_POD_INFRA_CONTAINER_ID=4b4187aa05d60ca91fd5cf349b353949f50cc44c5221895dfe286963d741fc5f;K8S_POD_UID=2d7eb264-8a42-4885-a9db-a693bf3911c8" Path:"", result: "", err: 2025-12-08T17:54:42.036705376+00:00 stderr F 2025-12-08T17:54:42Z [verbose] ADD starting CNI request ContainerID:"24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d" Netns:"/var/run/netns/fd11c167-57c6-4252-b901-626d9142de41" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-hl4hq;K8S_POD_INFRA_CONTAINER_ID=24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d;K8S_POD_UID=3cb8fcb4-9838-4dd2-93a0-5bb860fd915b" Path:"" 2025-12-08T17:54:42.387454354+00:00 stderr F I1208 17:54:42.377696 25240 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:54:42.387454354+00:00 stderr F I1208 17:54:42.378458 25240 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:54:42.387454354+00:00 stderr F I1208 17:54:42.378505 25240 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:54:42.387454354+00:00 stderr F I1208 17:54:42.378512 25240 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:54:42.387454354+00:00 stderr F I1208 17:54:42.378517 25240 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:54:42.387839235+00:00 stderr F 2025-12-08T17:54:42Z [verbose] Add: openshift-marketplace:redhat-operators-hl4hq:3cb8fcb4-9838-4dd2-93a0-5bb860fd915b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"24210ec6bd60a71","mac":"5a:0d:3f:4d:24:58"},{"name":"eth0","mac":"0a:58:0a:d9:00:0a","sandbox":"/var/run/netns/fd11c167-57c6-4252-b901-626d9142de41"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.10/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:54:42.388263396+00:00 stderr F I1208 17:54:42.388222 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-operators-hl4hq", UID:"3cb8fcb4-9838-4dd2-93a0-5bb860fd915b", APIVersion:"v1", ResourceVersion:"40692", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.10/23] from ovn-kubernetes 2025-12-08T17:54:42.405744446+00:00 stderr F 2025-12-08T17:54:42Z [verbose] ADD finished CNI request ContainerID:"24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d" Netns:"/var/run/netns/fd11c167-57c6-4252-b901-626d9142de41" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-hl4hq;K8S_POD_INFRA_CONTAINER_ID=24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d;K8S_POD_UID=3cb8fcb4-9838-4dd2-93a0-5bb860fd915b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"5a:0d:3f:4d:24:58\",\"name\":\"24210ec6bd60a71\"},{\"mac\":\"0a:58:0a:d9:00:0a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/fd11c167-57c6-4252-b901-626d9142de41\"}],\"ips\":[{\"address\":\"10.217.0.10/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:54:50.177555005+00:00 stderr F 2025-12-08T17:54:50Z [verbose] DEL starting CNI request ContainerID:"c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5" Netns:"/var/run/netns/5e56a3a7-fb3b-4a87-a3ef-744f5fbb3910" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-xp5vr;K8S_POD_INFRA_CONTAINER_ID=c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5;K8S_POD_UID=c9416e49-5134-45de-9eeb-a15be7fdbf63" Path:"" 2025-12-08T17:54:50.178113860+00:00 stderr F 2025-12-08T17:54:50Z [verbose] Del: openshift-marketplace:redhat-marketplace-xp5vr:c9416e49-5134-45de-9eeb-a15be7fdbf63:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:54:50.314120459+00:00 stderr F 2025-12-08T17:54:50Z [verbose] DEL finished CNI request ContainerID:"c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5" Netns:"/var/run/netns/5e56a3a7-fb3b-4a87-a3ef-744f5fbb3910" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-xp5vr;K8S_POD_INFRA_CONTAINER_ID=c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5;K8S_POD_UID=c9416e49-5134-45de-9eeb-a15be7fdbf63" Path:"", result: "", err: 2025-12-08T17:54:51.542157588+00:00 stderr F 2025-12-08T17:54:51Z [verbose] ADD starting CNI request ContainerID:"42b4b9839c709fcee00bdae06ff7f011cff52246bc72cbd262f9589b280f7e98" Netns:"/var/run/netns/aaf65196-0f14-445a-8e50-d05dddb8500b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=image-registry-5d9d95bf5b-cmjbz;K8S_POD_INFRA_CONTAINER_ID=42b4b9839c709fcee00bdae06ff7f011cff52246bc72cbd262f9589b280f7e98;K8S_POD_UID=82c8be84-d9b0-44df-99be-57f994255a0b" Path:"" 2025-12-08T17:54:51.683384918+00:00 stderr F I1208 17:54:51.675612 25514 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:54:51.683384918+00:00 stderr F I1208 17:54:51.676264 25514 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:54:51.683384918+00:00 stderr F I1208 17:54:51.676310 25514 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:54:51.683384918+00:00 stderr F I1208 17:54:51.676353 25514 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:54:51.683384918+00:00 stderr F I1208 17:54:51.676383 25514 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:54:51.684424377+00:00 stderr F 2025-12-08T17:54:51Z [verbose] Add: openshift-image-registry:image-registry-5d9d95bf5b-cmjbz:82c8be84-d9b0-44df-99be-57f994255a0b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"42b4b9839c709fc","mac":"72:71:c1:dd:92:df"},{"name":"eth0","mac":"0a:58:0a:d9:00:0b","sandbox":"/var/run/netns/aaf65196-0f14-445a-8e50-d05dddb8500b"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.11/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:54:51.685853525+00:00 stderr F I1208 17:54:51.685805 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-image-registry", Name:"image-registry-5d9d95bf5b-cmjbz", UID:"82c8be84-d9b0-44df-99be-57f994255a0b", APIVersion:"v1", ResourceVersion:"40765", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.11/23] from ovn-kubernetes 2025-12-08T17:54:51.699169173+00:00 stderr F 2025-12-08T17:54:51Z [verbose] ADD finished CNI request ContainerID:"42b4b9839c709fcee00bdae06ff7f011cff52246bc72cbd262f9589b280f7e98" Netns:"/var/run/netns/aaf65196-0f14-445a-8e50-d05dddb8500b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=image-registry-5d9d95bf5b-cmjbz;K8S_POD_INFRA_CONTAINER_ID=42b4b9839c709fcee00bdae06ff7f011cff52246bc72cbd262f9589b280f7e98;K8S_POD_UID=82c8be84-d9b0-44df-99be-57f994255a0b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"72:71:c1:dd:92:df\",\"name\":\"42b4b9839c709fc\"},{\"mac\":\"0a:58:0a:d9:00:0b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/aaf65196-0f14-445a-8e50-d05dddb8500b\"}],\"ips\":[{\"address\":\"10.217.0.11/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:54:57.905840361+00:00 stderr F 2025-12-08T17:54:57Z [verbose] ADD starting CNI request ContainerID:"2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285" Netns:"/var/run/netns/18c75a7c-9024-44ff-b2b1-d929d162be1a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5;K8S_POD_INFRA_CONTAINER_ID=2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285;K8S_POD_UID=8d941e2a-672c-4bb7-b8fc-314ecbcf7781" Path:"" 2025-12-08T17:54:58.047404812+00:00 stderr F I1208 17:54:58.041351 25697 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:54:58.047404812+00:00 stderr F I1208 17:54:58.041926 25697 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:54:58.047404812+00:00 stderr F I1208 17:54:58.041966 25697 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:54:58.047404812+00:00 stderr F I1208 17:54:58.041983 25697 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:54:58.047404812+00:00 stderr F I1208 17:54:58.042000 25697 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:54:58.047781792+00:00 stderr F 2025-12-08T17:54:58Z [verbose] Add: openshift-marketplace:6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5:8d941e2a-672c-4bb7-b8fc-314ecbcf7781:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"2a7c27be34f0ea8","mac":"1a:c2:43:d0:c3:89"},{"name":"eth0","mac":"0a:58:0a:d9:00:17","sandbox":"/var/run/netns/18c75a7c-9024-44ff-b2b1-d929d162be1a"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.23/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:54:58.048028158+00:00 stderr F I1208 17:54:58.047962 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5", UID:"8d941e2a-672c-4bb7-b8fc-314ecbcf7781", APIVersion:"v1", ResourceVersion:"40853", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.23/23] from ovn-kubernetes 2025-12-08T17:54:58.061459620+00:00 stderr F 2025-12-08T17:54:58Z [verbose] ADD finished CNI request ContainerID:"2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285" Netns:"/var/run/netns/18c75a7c-9024-44ff-b2b1-d929d162be1a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5;K8S_POD_INFRA_CONTAINER_ID=2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285;K8S_POD_UID=8d941e2a-672c-4bb7-b8fc-314ecbcf7781" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"1a:c2:43:d0:c3:89\",\"name\":\"2a7c27be34f0ea8\"},{\"mac\":\"0a:58:0a:d9:00:17\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/18c75a7c-9024-44ff-b2b1-d929d162be1a\"}],\"ips\":[{\"address\":\"10.217.0.23/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:54:59.047894176+00:00 stderr F 2025-12-08T17:54:59Z [verbose] DEL starting CNI request ContainerID:"24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d" Netns:"/var/run/netns/fd11c167-57c6-4252-b901-626d9142de41" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-hl4hq;K8S_POD_INFRA_CONTAINER_ID=24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d;K8S_POD_UID=3cb8fcb4-9838-4dd2-93a0-5bb860fd915b" Path:"" 2025-12-08T17:54:59.048510432+00:00 stderr F 2025-12-08T17:54:59Z [verbose] Del: openshift-marketplace:redhat-operators-hl4hq:3cb8fcb4-9838-4dd2-93a0-5bb860fd915b:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:54:59.218355813+00:00 stderr F 2025-12-08T17:54:59Z [verbose] DEL finished CNI request ContainerID:"24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d" Netns:"/var/run/netns/fd11c167-57c6-4252-b901-626d9142de41" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-hl4hq;K8S_POD_INFRA_CONTAINER_ID=24210ec6bd60a71aa153a7bd8e0b815977fd1af47e969217fdbff4efc408db1d;K8S_POD_UID=3cb8fcb4-9838-4dd2-93a0-5bb860fd915b" Path:"", result: "", err: 2025-12-08T17:55:04.372168379+00:00 stderr F 2025-12-08T17:55:04Z [verbose] DEL starting CNI request ContainerID:"2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285" Netns:"/var/run/netns/18c75a7c-9024-44ff-b2b1-d929d162be1a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5;K8S_POD_INFRA_CONTAINER_ID=2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285;K8S_POD_UID=8d941e2a-672c-4bb7-b8fc-314ecbcf7781" Path:"" 2025-12-08T17:55:04.372770655+00:00 stderr F 2025-12-08T17:55:04Z [verbose] Del: openshift-marketplace:6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5:8d941e2a-672c-4bb7-b8fc-314ecbcf7781:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:55:04.573227670+00:00 stderr F 2025-12-08T17:55:04Z [verbose] ADD starting CNI request ContainerID:"778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9" Netns:"/var/run/netns/e12e255e-17f2-47e0-9af9-47f5876a32db" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f;K8S_POD_INFRA_CONTAINER_ID=778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9;K8S_POD_UID=4d041d5b-762b-4616-bc8a-d21727bd0547" Path:"" 2025-12-08T17:55:04.613920794+00:00 stderr F 2025-12-08T17:55:04Z [verbose] DEL finished CNI request ContainerID:"2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285" Netns:"/var/run/netns/18c75a7c-9024-44ff-b2b1-d929d162be1a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5;K8S_POD_INFRA_CONTAINER_ID=2a7c27be34f0ea80d0d8deca84039524de988c06829e6374708a625c6a090285;K8S_POD_UID=8d941e2a-672c-4bb7-b8fc-314ecbcf7781" Path:"", result: "", err: 2025-12-08T17:55:04.947785549+00:00 stderr F I1208 17:55:04.937971 25974 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:04.947785549+00:00 stderr F I1208 17:55:04.938488 25974 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:04.947785549+00:00 stderr F I1208 17:55:04.938521 25974 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:04.947785549+00:00 stderr F I1208 17:55:04.938550 25974 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:04.947785549+00:00 stderr F I1208 17:55:04.938569 25974 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:04.948625832+00:00 stderr F 2025-12-08T17:55:04Z [verbose] Add: openshift-marketplace:8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f:4d041d5b-762b-4616-bc8a-d21727bd0547:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"778e9a31359bff5","mac":"3e:59:97:f9:20:87"},{"name":"eth0","mac":"0a:58:0a:d9:00:2a","sandbox":"/var/run/netns/e12e255e-17f2-47e0-9af9-47f5876a32db"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.42/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:04.948866099+00:00 stderr F I1208 17:55:04.948815 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f", UID:"4d041d5b-762b-4616-bc8a-d21727bd0547", APIVersion:"v1", ResourceVersion:"40898", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.42/23] from ovn-kubernetes 2025-12-08T17:55:04.971126348+00:00 stderr F 2025-12-08T17:55:04Z [verbose] ADD finished CNI request ContainerID:"778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9" Netns:"/var/run/netns/e12e255e-17f2-47e0-9af9-47f5876a32db" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f;K8S_POD_INFRA_CONTAINER_ID=778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9;K8S_POD_UID=4d041d5b-762b-4616-bc8a-d21727bd0547" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"3e:59:97:f9:20:87\",\"name\":\"778e9a31359bff5\"},{\"mac\":\"0a:58:0a:d9:00:2a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/e12e255e-17f2-47e0-9af9-47f5876a32db\"}],\"ips\":[{\"address\":\"10.217.0.42/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:05.011007551+00:00 stderr F 2025-12-08T17:55:05Z [verbose] ADD starting CNI request ContainerID:"2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce" Netns:"/var/run/netns/1d1d3f78-d7f2-4063-bd7e-9c5f196513df" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj;K8S_POD_INFRA_CONTAINER_ID=2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce;K8S_POD_UID=0b5d1008-e7ed-481b-85c2-5f359d8eda2d" Path:"" 2025-12-08T17:55:05.167945174+00:00 stderr F I1208 17:55:05.159955 26048 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:05.167945174+00:00 stderr F I1208 17:55:05.160575 26048 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:05.167945174+00:00 stderr F I1208 17:55:05.160619 26048 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:05.167945174+00:00 stderr F I1208 17:55:05.160643 26048 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:05.167945174+00:00 stderr F I1208 17:55:05.160681 26048 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:05.168328914+00:00 stderr F 2025-12-08T17:55:05Z [verbose] Add: openshift-marketplace:1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj:0b5d1008-e7ed-481b-85c2-5f359d8eda2d:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"2935ada013e6ac1","mac":"fa:2d:0f:55:52:4f"},{"name":"eth0","mac":"0a:58:0a:d9:00:2c","sandbox":"/var/run/netns/1d1d3f78-d7f2-4063-bd7e-9c5f196513df"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.44/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:05.170534034+00:00 stderr F I1208 17:55:05.168717 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj", UID:"0b5d1008-e7ed-481b-85c2-5f359d8eda2d", APIVersion:"v1", ResourceVersion:"40912", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.44/23] from ovn-kubernetes 2025-12-08T17:55:05.188473766+00:00 stderr F 2025-12-08T17:55:05Z [verbose] ADD finished CNI request ContainerID:"2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce" Netns:"/var/run/netns/1d1d3f78-d7f2-4063-bd7e-9c5f196513df" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj;K8S_POD_INFRA_CONTAINER_ID=2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce;K8S_POD_UID=0b5d1008-e7ed-481b-85c2-5f359d8eda2d" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"fa:2d:0f:55:52:4f\",\"name\":\"2935ada013e6ac1\"},{\"mac\":\"0a:58:0a:d9:00:2c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/1d1d3f78-d7f2-4063-bd7e-9c5f196513df\"}],\"ips\":[{\"address\":\"10.217.0.44/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:08.458351812+00:00 stderr F 2025-12-08T17:55:08Z [verbose] DEL starting CNI request ContainerID:"778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9" Netns:"/var/run/netns/e12e255e-17f2-47e0-9af9-47f5876a32db" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f;K8S_POD_INFRA_CONTAINER_ID=778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9;K8S_POD_UID=4d041d5b-762b-4616-bc8a-d21727bd0547" Path:"" 2025-12-08T17:55:08.458351812+00:00 stderr F 2025-12-08T17:55:08Z [verbose] Del: openshift-marketplace:8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f:4d041d5b-762b-4616-bc8a-d21727bd0547:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:55:08.637833632+00:00 stderr F 2025-12-08T17:55:08Z [verbose] DEL finished CNI request ContainerID:"778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9" Netns:"/var/run/netns/e12e255e-17f2-47e0-9af9-47f5876a32db" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f;K8S_POD_INFRA_CONTAINER_ID=778e9a31359bff55f065276d20f080996ec0132459d6dad1578f82a69aa467d9;K8S_POD_UID=4d041d5b-762b-4616-bc8a-d21727bd0547" Path:"", result: "", err: 2025-12-08T17:55:14.506773589+00:00 stderr F 2025-12-08T17:55:14Z [verbose] DEL starting CNI request ContainerID:"2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce" Netns:"/var/run/netns/1d1d3f78-d7f2-4063-bd7e-9c5f196513df" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj;K8S_POD_INFRA_CONTAINER_ID=2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce;K8S_POD_UID=0b5d1008-e7ed-481b-85c2-5f359d8eda2d" Path:"" 2025-12-08T17:55:14.507269173+00:00 stderr F 2025-12-08T17:55:14Z [verbose] Del: openshift-marketplace:1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj:0b5d1008-e7ed-481b-85c2-5f359d8eda2d:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:55:14.698343325+00:00 stderr F 2025-12-08T17:55:14Z [verbose] DEL finished CNI request ContainerID:"2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce" Netns:"/var/run/netns/1d1d3f78-d7f2-4063-bd7e-9c5f196513df" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj;K8S_POD_INFRA_CONTAINER_ID=2935ada013e6ac1600bb19077c21a67804e238011fc0ce83d6bf8ec21c5000ce;K8S_POD_UID=0b5d1008-e7ed-481b-85c2-5f359d8eda2d" Path:"", result: "", err: 2025-12-08T17:55:16.012946722+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD starting CNI request ContainerID:"889a8c7004b017ad49fc63d83dc803d26fd9d9ca3514a1d964fbb16a472effb3" Netns:"/var/run/netns/6a6ae7fb-1036-4b8e-9042-898b6b00862e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=obo-prometheus-operator-86648f486b-4j9kn;K8S_POD_INFRA_CONTAINER_ID=889a8c7004b017ad49fc63d83dc803d26fd9d9ca3514a1d964fbb16a472effb3;K8S_POD_UID=abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd" Path:"" 2025-12-08T17:55:16.203812199+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD starting CNI request ContainerID:"e0c592366b5ef63052d61e7ed67660df6fb54cc953bb9c3514427c2726ae04a9" Netns:"/var/run/netns/6d174f5b-3ed2-4f7a-b4c7-f5afca7f4d28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t;K8S_POD_INFRA_CONTAINER_ID=e0c592366b5ef63052d61e7ed67660df6fb54cc953bb9c3514427c2726ae04a9;K8S_POD_UID=174b7c35-bd90-4386-a01d-b20d986df7e5" Path:"" 2025-12-08T17:55:16.212691287+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD starting CNI request ContainerID:"f73dfe933a483271a8d1b8d72606f9def0bca48b5bba88daf7f3e1286897c5e5" Netns:"/var/run/netns/ec8c2acd-5f68-433b-a891-607a4ba56877" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm;K8S_POD_INFRA_CONTAINER_ID=f73dfe933a483271a8d1b8d72606f9def0bca48b5bba88daf7f3e1286897c5e5;K8S_POD_UID=b0b7331f-5f3a-41e7-84d0-64a9aa478c60" Path:"" 2025-12-08T17:55:16.292707501+00:00 stderr F I1208 17:55:16.279664 26455 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:16.292707501+00:00 stderr F I1208 17:55:16.280027 26455 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:16.292707501+00:00 stderr F I1208 17:55:16.280039 26455 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:16.292707501+00:00 stderr F I1208 17:55:16.280045 26455 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:16.292707501+00:00 stderr F I1208 17:55:16.280051 26455 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:16.293143673+00:00 stderr F 2025-12-08T17:55:16Z [verbose] Add: openshift-operators:obo-prometheus-operator-86648f486b-4j9kn:abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"889a8c7004b017a","mac":"d2:ce:38:2f:44:bd"},{"name":"eth0","mac":"0a:58:0a:d9:00:2d","sandbox":"/var/run/netns/6a6ae7fb-1036-4b8e-9042-898b6b00862e"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.45/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:16.293278616+00:00 stderr F I1208 17:55:16.293231 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operators", Name:"obo-prometheus-operator-86648f486b-4j9kn", UID:"abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd", APIVersion:"v1", ResourceVersion:"41520", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.45/23] from ovn-kubernetes 2025-12-08T17:55:16.310722276+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD finished CNI request ContainerID:"889a8c7004b017ad49fc63d83dc803d26fd9d9ca3514a1d964fbb16a472effb3" Netns:"/var/run/netns/6a6ae7fb-1036-4b8e-9042-898b6b00862e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=obo-prometheus-operator-86648f486b-4j9kn;K8S_POD_INFRA_CONTAINER_ID=889a8c7004b017ad49fc63d83dc803d26fd9d9ca3514a1d964fbb16a472effb3;K8S_POD_UID=abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d2:ce:38:2f:44:bd\",\"name\":\"889a8c7004b017a\"},{\"mac\":\"0a:58:0a:d9:00:2d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/6a6ae7fb-1036-4b8e-9042-898b6b00862e\"}],\"ips\":[{\"address\":\"10.217.0.45/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:16.423383138+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD starting CNI request ContainerID:"b4449071ef2dfa477169a324e12c5b54db8a8d53b4056e856142b43c86c47931" Netns:"/var/run/netns/d824ae1b-4171-491d-bb64-be32ba54c8bd" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=observability-operator-78c97476f4-mg4b2;K8S_POD_INFRA_CONTAINER_ID=b4449071ef2dfa477169a324e12c5b54db8a8d53b4056e856142b43c86c47931;K8S_POD_UID=a7981d87-d276-41a7-ad7c-d6f0cde8fa7d" Path:"" 2025-12-08T17:55:16.592083568+00:00 stderr P 2025-12-08T17:55:16Z [verbose] 2025-12-08T17:55:16.592151640+00:00 stderr P ADD starting CNI request ContainerID:"a3008cb88c503a6d26be96167f587bdda69dc07ebc987e9a8d0f4d694e66272e" Netns:"/var/run/netns/a1f3a3a3-65d6-425a-8a0a-23daf163a3e8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=perses-operator-68bdb49cbf-m2cdr;K8S_POD_INFRA_CONTAINER_ID=a3008cb88c503a6d26be96167f587bdda69dc07ebc987e9a8d0f4d694e66272e;K8S_POD_UID=eae302b5-bcca-41b8-9f24-34be44dd7f83" Path:"" 2025-12-08T17:55:16.592174500+00:00 stderr F 2025-12-08T17:55:16.655412261+00:00 stderr F I1208 17:55:16.646167 26506 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:16.655412261+00:00 stderr F I1208 17:55:16.646422 26506 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:16.655412261+00:00 stderr F I1208 17:55:16.646432 26506 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:16.655412261+00:00 stderr F I1208 17:55:16.646439 26506 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:16.655412261+00:00 stderr F I1208 17:55:16.646446 26506 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:16.655412261+00:00 stderr F 2025-12-08T17:55:16Z [verbose] Add: openshift-operators:obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm:b0b7331f-5f3a-41e7-84d0-64a9aa478c60:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"f73dfe933a48327","mac":"ee:e9:e0:3b:56:ce"},{"name":"eth0","mac":"0a:58:0a:d9:00:2f","sandbox":"/var/run/netns/ec8c2acd-5f68-433b-a891-607a4ba56877"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.47/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:16.655412261+00:00 stderr F I1208 17:55:16.652492 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operators", Name:"obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm", UID:"b0b7331f-5f3a-41e7-84d0-64a9aa478c60", APIVersion:"v1", ResourceVersion:"41549", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.47/23] from ovn-kubernetes 2025-12-08T17:55:16.680057845+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD finished CNI request ContainerID:"f73dfe933a483271a8d1b8d72606f9def0bca48b5bba88daf7f3e1286897c5e5" Netns:"/var/run/netns/ec8c2acd-5f68-433b-a891-607a4ba56877" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm;K8S_POD_INFRA_CONTAINER_ID=f73dfe933a483271a8d1b8d72606f9def0bca48b5bba88daf7f3e1286897c5e5;K8S_POD_UID=b0b7331f-5f3a-41e7-84d0-64a9aa478c60" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ee:e9:e0:3b:56:ce\",\"name\":\"f73dfe933a48327\"},{\"mac\":\"0a:58:0a:d9:00:2f\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ec8c2acd-5f68-433b-a891-607a4ba56877\"}],\"ips\":[{\"address\":\"10.217.0.47/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:16.770213491+00:00 stderr F I1208 17:55:16.749041 26504 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:16.770213491+00:00 stderr F I1208 17:55:16.749265 26504 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:16.770213491+00:00 stderr F I1208 17:55:16.749274 26504 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:16.770213491+00:00 stderr F I1208 17:55:16.749281 26504 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:16.770213491+00:00 stderr F I1208 17:55:16.749288 26504 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:16.770213491+00:00 stderr F 2025-12-08T17:55:16Z [verbose] Add: openshift-operators:obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t:174b7c35-bd90-4386-a01d-b20d986df7e5:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"e0c592366b5ef63","mac":"9a:79:66:46:33:79"},{"name":"eth0","mac":"0a:58:0a:d9:00:2e","sandbox":"/var/run/netns/6d174f5b-3ed2-4f7a-b4c7-f5afca7f4d28"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.46/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:16.770213491+00:00 stderr F I1208 17:55:16.769678 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operators", Name:"obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t", UID:"174b7c35-bd90-4386-a01d-b20d986df7e5", APIVersion:"v1", ResourceVersion:"41546", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.46/23] from ovn-kubernetes 2025-12-08T17:55:16.787044344+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD finished CNI request ContainerID:"e0c592366b5ef63052d61e7ed67660df6fb54cc953bb9c3514427c2726ae04a9" Netns:"/var/run/netns/6d174f5b-3ed2-4f7a-b4c7-f5afca7f4d28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t;K8S_POD_INFRA_CONTAINER_ID=e0c592366b5ef63052d61e7ed67660df6fb54cc953bb9c3514427c2726ae04a9;K8S_POD_UID=174b7c35-bd90-4386-a01d-b20d986df7e5" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9a:79:66:46:33:79\",\"name\":\"e0c592366b5ef63\"},{\"mac\":\"0a:58:0a:d9:00:2e\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/6d174f5b-3ed2-4f7a-b4c7-f5afca7f4d28\"}],\"ips\":[{\"address\":\"10.217.0.46/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:16.817197676+00:00 stderr F I1208 17:55:16.800712 26561 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:16.817197676+00:00 stderr F I1208 17:55:16.800864 26561 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:16.817197676+00:00 stderr F I1208 17:55:16.800891 26561 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:16.817197676+00:00 stderr F I1208 17:55:16.800903 26561 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:16.817197676+00:00 stderr F I1208 17:55:16.800911 26561 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:16.817553415+00:00 stderr F 2025-12-08T17:55:16Z [verbose] Add: openshift-operators:observability-operator-78c97476f4-mg4b2:a7981d87-d276-41a7-ad7c-d6f0cde8fa7d:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b4449071ef2dfa4","mac":"ba:b4:0e:49:d2:e6"},{"name":"eth0","mac":"0a:58:0a:d9:00:30","sandbox":"/var/run/netns/d824ae1b-4171-491d-bb64-be32ba54c8bd"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.48/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:16.818563763+00:00 stderr F I1208 17:55:16.817649 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operators", Name:"observability-operator-78c97476f4-mg4b2", UID:"a7981d87-d276-41a7-ad7c-d6f0cde8fa7d", APIVersion:"v1", ResourceVersion:"41572", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.48/23] from ovn-kubernetes 2025-12-08T17:55:16.843250066+00:00 stderr F I1208 17:55:16.833548 26599 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:16.843250066+00:00 stderr F I1208 17:55:16.833681 26599 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:16.843250066+00:00 stderr F I1208 17:55:16.833692 26599 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:16.843250066+00:00 stderr F I1208 17:55:16.833700 26599 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:16.843250066+00:00 stderr F I1208 17:55:16.833708 26599 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:16.843250066+00:00 stderr F 2025-12-08T17:55:16Z [verbose] Add: openshift-operators:perses-operator-68bdb49cbf-m2cdr:eae302b5-bcca-41b8-9f24-34be44dd7f83:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"a3008cb88c503a6","mac":"d2:83:e2:98:72:7b"},{"name":"eth0","mac":"0a:58:0a:d9:00:31","sandbox":"/var/run/netns/a1f3a3a3-65d6-425a-8a0a-23daf163a3e8"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.49/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:16.843559376+00:00 stderr F I1208 17:55:16.843411 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operators", Name:"perses-operator-68bdb49cbf-m2cdr", UID:"eae302b5-bcca-41b8-9f24-34be44dd7f83", APIVersion:"v1", ResourceVersion:"41595", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.49/23] from ovn-kubernetes 2025-12-08T17:55:16.855310062+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD finished CNI request ContainerID:"b4449071ef2dfa477169a324e12c5b54db8a8d53b4056e856142b43c86c47931" Netns:"/var/run/netns/d824ae1b-4171-491d-bb64-be32ba54c8bd" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=observability-operator-78c97476f4-mg4b2;K8S_POD_INFRA_CONTAINER_ID=b4449071ef2dfa477169a324e12c5b54db8a8d53b4056e856142b43c86c47931;K8S_POD_UID=a7981d87-d276-41a7-ad7c-d6f0cde8fa7d" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ba:b4:0e:49:d2:e6\",\"name\":\"b4449071ef2dfa4\"},{\"mac\":\"0a:58:0a:d9:00:30\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/d824ae1b-4171-491d-bb64-be32ba54c8bd\"}],\"ips\":[{\"address\":\"10.217.0.48/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:16.878196707+00:00 stderr F 2025-12-08T17:55:16Z [verbose] ADD finished CNI request ContainerID:"a3008cb88c503a6d26be96167f587bdda69dc07ebc987e9a8d0f4d694e66272e" Netns:"/var/run/netns/a1f3a3a3-65d6-425a-8a0a-23daf163a3e8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operators;K8S_POD_NAME=perses-operator-68bdb49cbf-m2cdr;K8S_POD_INFRA_CONTAINER_ID=a3008cb88c503a6d26be96167f587bdda69dc07ebc987e9a8d0f4d694e66272e;K8S_POD_UID=eae302b5-bcca-41b8-9f24-34be44dd7f83" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d2:83:e2:98:72:7b\",\"name\":\"a3008cb88c503a6\"},{\"mac\":\"0a:58:0a:d9:00:31\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/a1f3a3a3-65d6-425a-8a0a-23daf163a3e8\"}],\"ips\":[{\"address\":\"10.217.0.49/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:17.155101979+00:00 stderr F 2025-12-08T17:55:17Z [verbose] ADD starting CNI request ContainerID:"1ff35b59333394c98d2f3df1bede17bded5937bf35e3e28941b364dccb236ed3" Netns:"/var/run/netns/ea579379-f986-45cc-9bab-2886993f0bae" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=elastic-operator-c9c86658-4qchz;K8S_POD_INFRA_CONTAINER_ID=1ff35b59333394c98d2f3df1bede17bded5937bf35e3e28941b364dccb236ed3;K8S_POD_UID=1899106f-2682-474e-ad41-4dd00dbc7d4b" Path:"" 2025-12-08T17:55:17.320366787+00:00 stderr F I1208 17:55:17.307718 26634 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:17.320366787+00:00 stderr F I1208 17:55:17.307989 26634 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:17.320366787+00:00 stderr F I1208 17:55:17.308001 26634 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:17.320366787+00:00 stderr F I1208 17:55:17.308016 26634 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:17.320366787+00:00 stderr F I1208 17:55:17.308022 26634 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:17.320679825+00:00 stderr F 2025-12-08T17:55:17Z [verbose] Add: service-telemetry:elastic-operator-c9c86658-4qchz:1899106f-2682-474e-ad41-4dd00dbc7d4b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"1ff35b59333394c","mac":"d2:67:60:34:33:97"},{"name":"eth0","mac":"0a:58:0a:d9:00:32","sandbox":"/var/run/netns/ea579379-f986-45cc-9bab-2886993f0bae"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.50/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:17.320893241+00:00 stderr F I1208 17:55:17.320817 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"elastic-operator-c9c86658-4qchz", UID:"1899106f-2682-474e-ad41-4dd00dbc7d4b", APIVersion:"v1", ResourceVersion:"41681", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.50/23] from ovn-kubernetes 2025-12-08T17:55:17.338619268+00:00 stderr F 2025-12-08T17:55:17Z [verbose] ADD finished CNI request ContainerID:"1ff35b59333394c98d2f3df1bede17bded5937bf35e3e28941b364dccb236ed3" Netns:"/var/run/netns/ea579379-f986-45cc-9bab-2886993f0bae" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=elastic-operator-c9c86658-4qchz;K8S_POD_INFRA_CONTAINER_ID=1ff35b59333394c98d2f3df1bede17bded5937bf35e3e28941b364dccb236ed3;K8S_POD_UID=1899106f-2682-474e-ad41-4dd00dbc7d4b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d2:67:60:34:33:97\",\"name\":\"1ff35b59333394c\"},{\"mac\":\"0a:58:0a:d9:00:32\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ea579379-f986-45cc-9bab-2886993f0bae\"}],\"ips\":[{\"address\":\"10.217.0.50/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:34.128508783+00:00 stderr F 2025-12-08T17:55:34Z [verbose] ADD starting CNI request ContainerID:"0416812ef58d875c73d2d34ff65e34fefafd98ea0eedd5d0318f579aa326b738" Netns:"/var/run/netns/433a5091-d885-44e1-bea9-83af423cda61" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager-operator;K8S_POD_NAME=cert-manager-operator-controller-manager-64c74584c4-qtkx9;K8S_POD_INFRA_CONTAINER_ID=0416812ef58d875c73d2d34ff65e34fefafd98ea0eedd5d0318f579aa326b738;K8S_POD_UID=4356ed35-799c-4e39-a660-872291edf6cc" Path:"" 2025-12-08T17:55:34.305589082+00:00 stderr F I1208 17:55:34.297417 26983 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:34.305589082+00:00 stderr F I1208 17:55:34.297909 26983 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:34.305589082+00:00 stderr F I1208 17:55:34.297928 26983 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:34.305589082+00:00 stderr F I1208 17:55:34.297935 26983 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:34.305589082+00:00 stderr F I1208 17:55:34.297942 26983 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:34.305926331+00:00 stderr F 2025-12-08T17:55:34Z [verbose] Add: cert-manager-operator:cert-manager-operator-controller-manager-64c74584c4-qtkx9:4356ed35-799c-4e39-a660-872291edf6cc:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"0416812ef58d875","mac":"ba:e6:2e:83:f2:94"},{"name":"eth0","mac":"0a:58:0a:d9:00:33","sandbox":"/var/run/netns/433a5091-d885-44e1-bea9-83af423cda61"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.51/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:34.306273341+00:00 stderr F I1208 17:55:34.306247 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager-64c74584c4-qtkx9", UID:"4356ed35-799c-4e39-a660-872291edf6cc", APIVersion:"v1", ResourceVersion:"42156", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.51/23] from ovn-kubernetes 2025-12-08T17:55:34.321820548+00:00 stderr F 2025-12-08T17:55:34Z [verbose] ADD finished CNI request ContainerID:"0416812ef58d875c73d2d34ff65e34fefafd98ea0eedd5d0318f579aa326b738" Netns:"/var/run/netns/433a5091-d885-44e1-bea9-83af423cda61" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager-operator;K8S_POD_NAME=cert-manager-operator-controller-manager-64c74584c4-qtkx9;K8S_POD_INFRA_CONTAINER_ID=0416812ef58d875c73d2d34ff65e34fefafd98ea0eedd5d0318f579aa326b738;K8S_POD_UID=4356ed35-799c-4e39-a660-872291edf6cc" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ba:e6:2e:83:f2:94\",\"name\":\"0416812ef58d875\"},{\"mac\":\"0a:58:0a:d9:00:33\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/433a5091-d885-44e1-bea9-83af423cda61\"}],\"ips\":[{\"address\":\"10.217.0.51/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:39.016464918+00:00 stderr F 2025-12-08T17:55:39Z [verbose] DEL starting CNI request ContainerID:"ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270" Netns:"/var/run/netns/7478f019-784a-4444-b828-453e6b9ff1ba" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=image-registry-66587d64c8-s6hn4;K8S_POD_INFRA_CONTAINER_ID=ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270;K8S_POD_UID=1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc" Path:"" 2025-12-08T17:55:39.017217158+00:00 stderr F 2025-12-08T17:55:39Z [verbose] Del: openshift-image-registry:image-registry-66587d64c8-s6hn4:1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:55:39.205321340+00:00 stderr F 2025-12-08T17:55:39Z [verbose] DEL finished CNI request ContainerID:"ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270" Netns:"/var/run/netns/7478f019-784a-4444-b828-453e6b9ff1ba" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=image-registry-66587d64c8-s6hn4;K8S_POD_INFRA_CONTAINER_ID=ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270;K8S_POD_UID=1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc" Path:"", result: "", err: 2025-12-08T17:55:41.034946774+00:00 stderr F 2025-12-08T17:55:41Z [verbose] ADD starting CNI request ContainerID:"5dbcf59da1fc73d327f27b4e3f855691b78c63c29755116cda8eef8573c359ee" Netns:"/var/run/netns/e8fb14ae-119d-4e82-bcff-adcff64d476c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager;K8S_POD_NAME=cert-manager-webhook-7894b5b9b4-wdn4b;K8S_POD_INFRA_CONTAINER_ID=5dbcf59da1fc73d327f27b4e3f855691b78c63c29755116cda8eef8573c359ee;K8S_POD_UID=72f27276-bf08-481d-ad0b-11f8e684d170" Path:"" 2025-12-08T17:55:41.194792030+00:00 stderr F I1208 17:55:41.186813 27336 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:41.194792030+00:00 stderr F I1208 17:55:41.187209 27336 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:41.194792030+00:00 stderr F I1208 17:55:41.187232 27336 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:41.194792030+00:00 stderr F I1208 17:55:41.187239 27336 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:41.194792030+00:00 stderr F I1208 17:55:41.187249 27336 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:41.195001986+00:00 stderr F 2025-12-08T17:55:41Z [verbose] Add: cert-manager:cert-manager-webhook-7894b5b9b4-wdn4b:72f27276-bf08-481d-ad0b-11f8e684d170:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"5dbcf59da1fc73d","mac":"e6:e3:c3:6d:6e:07"},{"name":"eth0","mac":"0a:58:0a:d9:00:34","sandbox":"/var/run/netns/e8fb14ae-119d-4e82-bcff-adcff64d476c"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.52/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:41.196375094+00:00 stderr F I1208 17:55:41.195397 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"cert-manager", Name:"cert-manager-webhook-7894b5b9b4-wdn4b", UID:"72f27276-bf08-481d-ad0b-11f8e684d170", APIVersion:"v1", ResourceVersion:"42621", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.52/23] from ovn-kubernetes 2025-12-08T17:55:41.226264943+00:00 stderr F 2025-12-08T17:55:41Z [verbose] ADD finished CNI request ContainerID:"5dbcf59da1fc73d327f27b4e3f855691b78c63c29755116cda8eef8573c359ee" Netns:"/var/run/netns/e8fb14ae-119d-4e82-bcff-adcff64d476c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager;K8S_POD_NAME=cert-manager-webhook-7894b5b9b4-wdn4b;K8S_POD_INFRA_CONTAINER_ID=5dbcf59da1fc73d327f27b4e3f855691b78c63c29755116cda8eef8573c359ee;K8S_POD_UID=72f27276-bf08-481d-ad0b-11f8e684d170" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"e6:e3:c3:6d:6e:07\",\"name\":\"5dbcf59da1fc73d\"},{\"mac\":\"0a:58:0a:d9:00:34\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/e8fb14ae-119d-4e82-bcff-adcff64d476c\"}],\"ips\":[{\"address\":\"10.217.0.52/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:42.428161733+00:00 stderr F 2025-12-08T17:55:42Z [verbose] ADD starting CNI request ContainerID:"63f14142a29c4e7ca44e46d3a69071a22cbd80cb5b480e16054385c0040ccd60" Netns:"/var/run/netns/b6c3fc6b-1311-4a04-abcb-a28b4dd1638d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=elasticsearch-es-default-0;K8S_POD_INFRA_CONTAINER_ID=63f14142a29c4e7ca44e46d3a69071a22cbd80cb5b480e16054385c0040ccd60;K8S_POD_UID=72b61c1d-040f-465f-bea8-e024f5879f98" Path:"" 2025-12-08T17:55:42.850815431+00:00 stderr F I1208 17:55:42.842747 27435 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:42.850815431+00:00 stderr F I1208 17:55:42.843073 27435 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:42.850815431+00:00 stderr F I1208 17:55:42.843085 27435 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:42.850815431+00:00 stderr F I1208 17:55:42.843091 27435 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:42.850815431+00:00 stderr F I1208 17:55:42.843097 27435 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:42.851601453+00:00 stderr F 2025-12-08T17:55:42Z [verbose] Add: service-telemetry:elasticsearch-es-default-0:72b61c1d-040f-465f-bea8-e024f5879f98:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"63f14142a29c4e7","mac":"9e:1d:37:b8:b5:87"},{"name":"eth0","mac":"0a:58:0a:d9:00:35","sandbox":"/var/run/netns/b6c3fc6b-1311-4a04-abcb-a28b4dd1638d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.53/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:42.851601453+00:00 stderr F I1208 17:55:42.851512 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"elasticsearch-es-default-0", UID:"72b61c1d-040f-465f-bea8-e024f5879f98", APIVersion:"v1", ResourceVersion:"42716", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.53/23] from ovn-kubernetes 2025-12-08T17:55:42.903133377+00:00 stderr F 2025-12-08T17:55:42Z [verbose] ADD finished CNI request ContainerID:"63f14142a29c4e7ca44e46d3a69071a22cbd80cb5b480e16054385c0040ccd60" Netns:"/var/run/netns/b6c3fc6b-1311-4a04-abcb-a28b4dd1638d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=elasticsearch-es-default-0;K8S_POD_INFRA_CONTAINER_ID=63f14142a29c4e7ca44e46d3a69071a22cbd80cb5b480e16054385c0040ccd60;K8S_POD_UID=72b61c1d-040f-465f-bea8-e024f5879f98" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9e:1d:37:b8:b5:87\",\"name\":\"63f14142a29c4e7\"},{\"mac\":\"0a:58:0a:d9:00:35\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/b6c3fc6b-1311-4a04-abcb-a28b4dd1638d\"}],\"ips\":[{\"address\":\"10.217.0.53/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:55:43.546835280+00:00 stderr F 2025-12-08T17:55:43Z [verbose] ADD starting CNI request ContainerID:"ca3f80877bb835ca98440ccb2512c749261bb18d46b126546945e35111759e30" Netns:"/var/run/netns/f62a88aa-8848-4fca-8e64-0e28d4241f82" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager;K8S_POD_NAME=cert-manager-cainjector-7dbf76d5c8-fdk5q;K8S_POD_INFRA_CONTAINER_ID=ca3f80877bb835ca98440ccb2512c749261bb18d46b126546945e35111759e30;K8S_POD_UID=57678783-1dc9-4366-a2e6-7f8c6321e40f" Path:"" 2025-12-08T17:55:43.930794226+00:00 stderr F I1208 17:55:43.925316 27487 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:43.930794226+00:00 stderr F I1208 17:55:43.925828 27487 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:43.930794226+00:00 stderr F I1208 17:55:43.925974 27487 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:43.930794226+00:00 stderr F I1208 17:55:43.926023 27487 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:55:43.930794226+00:00 stderr F I1208 17:55:43.926059 27487 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:43.931437483+00:00 stderr P 2025-12-08T17:55:43Z [verbose] Add: cert-manager:cert-manager-cainjector-7dbf76d5c8-fdk5q:57678783-1dc9-4366-a2e6-7f8c6321e40f:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ca3f80877bb835c","mac":"6a:84:85:2e:8b:c7"},{"name":"eth0","mac":"0a:58:0a:d9:00:36","sandbox":"/var/run/netns/f62a88aa-8848-4fca-8e64-0e28d4241f82"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.54/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:55:43.931468224+00:00 stderr F 2025-12-08T17:55:43.931811123+00:00 stderr F I1208 17:55:43.931663 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"cert-manager", Name:"cert-manager-cainjector-7dbf76d5c8-fdk5q", UID:"57678783-1dc9-4366-a2e6-7f8c6321e40f", APIVersion:"v1", ResourceVersion:"42887", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.54/23] from ovn-kubernetes 2025-12-08T17:55:43.942406834+00:00 stderr F 2025-12-08T17:55:43Z [verbose] ADD finished CNI request ContainerID:"ca3f80877bb835ca98440ccb2512c749261bb18d46b126546945e35111759e30" Netns:"/var/run/netns/f62a88aa-8848-4fca-8e64-0e28d4241f82" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager;K8S_POD_NAME=cert-manager-cainjector-7dbf76d5c8-fdk5q;K8S_POD_INFRA_CONTAINER_ID=ca3f80877bb835ca98440ccb2512c749261bb18d46b126546945e35111759e30;K8S_POD_UID=57678783-1dc9-4366-a2e6-7f8c6321e40f" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"6a:84:85:2e:8b:c7\",\"name\":\"ca3f80877bb835c\"},{\"mac\":\"0a:58:0a:d9:00:36\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/f62a88aa-8848-4fca-8e64-0e28d4241f82\"}],\"ips\":[{\"address\":\"10.217.0.54/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:00.427562354+00:00 stderr F 2025-12-08T17:56:00Z [verbose] ADD starting CNI request ContainerID:"58c1590086f577ebd3c8d0d925d56fc467809bc70c4d333f467540b7567c889b" Netns:"/var/run/netns/316bc1a4-6aec-4461-84ca-34b6b9d0ae07" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager;K8S_POD_NAME=cert-manager-858d87f86b-7q2ss;K8S_POD_INFRA_CONTAINER_ID=58c1590086f577ebd3c8d0d925d56fc467809bc70c4d333f467540b7567c889b;K8S_POD_UID=dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62" Path:"" 2025-12-08T17:56:00.820867536+00:00 stderr F I1208 17:56:00.811233 28046 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:00.820867536+00:00 stderr F I1208 17:56:00.811864 28046 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:00.820867536+00:00 stderr F I1208 17:56:00.811917 28046 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:00.820867536+00:00 stderr F I1208 17:56:00.811928 28046 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:00.820867536+00:00 stderr F I1208 17:56:00.811944 28046 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:00.821694768+00:00 stderr F 2025-12-08T17:56:00Z [verbose] Add: cert-manager:cert-manager-858d87f86b-7q2ss:dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"58c1590086f577e","mac":"6a:77:ef:1b:fa:17"},{"name":"eth0","mac":"0a:58:0a:d9:00:37","sandbox":"/var/run/netns/316bc1a4-6aec-4461-84ca-34b6b9d0ae07"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.55/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:00.822158251+00:00 stderr F I1208 17:56:00.822095 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"cert-manager", Name:"cert-manager-858d87f86b-7q2ss", UID:"dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62", APIVersion:"v1", ResourceVersion:"43376", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.55/23] from ovn-kubernetes 2025-12-08T17:56:00.839097616+00:00 stderr F 2025-12-08T17:56:00Z [verbose] ADD finished CNI request ContainerID:"58c1590086f577ebd3c8d0d925d56fc467809bc70c4d333f467540b7567c889b" Netns:"/var/run/netns/316bc1a4-6aec-4461-84ca-34b6b9d0ae07" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=cert-manager;K8S_POD_NAME=cert-manager-858d87f86b-7q2ss;K8S_POD_INFRA_CONTAINER_ID=58c1590086f577ebd3c8d0d925d56fc467809bc70c4d333f467540b7567c889b;K8S_POD_UID=dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"6a:77:ef:1b:fa:17\",\"name\":\"58c1590086f577e\"},{\"mac\":\"0a:58:0a:d9:00:37\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/316bc1a4-6aec-4461-84ca-34b6b9d0ae07\"}],\"ips\":[{\"address\":\"10.217.0.55/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:09.756849607+00:00 stderr F 2025-12-08T17:56:09Z [verbose] ADD starting CNI request ContainerID:"a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c" Netns:"/var/run/netns/966aee6f-d6bd-4cb9-b3ea-41733f424e9c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-xmhcm;K8S_POD_INFRA_CONTAINER_ID=a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c;K8S_POD_UID=86a460fd-a75a-45d8-8022-1a3aab4c30fd" Path:"" 2025-12-08T17:56:09.921838904+00:00 stderr F I1208 17:56:09.913762 28442 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:09.921838904+00:00 stderr F I1208 17:56:09.914231 28442 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:09.921838904+00:00 stderr F I1208 17:56:09.914240 28442 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:09.921838904+00:00 stderr F I1208 17:56:09.914250 28442 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:09.921838904+00:00 stderr F I1208 17:56:09.914256 28442 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:09.922127472+00:00 stderr F 2025-12-08T17:56:09Z [verbose] Add: service-telemetry:infrawatch-operators-xmhcm:86a460fd-a75a-45d8-8022-1a3aab4c30fd:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"a97f426e75bd96a","mac":"5e:90:36:c5:42:0e"},{"name":"eth0","mac":"0a:58:0a:d9:00:38","sandbox":"/var/run/netns/966aee6f-d6bd-4cb9-b3ea-41733f424e9c"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.56/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:09.922342108+00:00 stderr F I1208 17:56:09.922307 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"infrawatch-operators-xmhcm", UID:"86a460fd-a75a-45d8-8022-1a3aab4c30fd", APIVersion:"v1", ResourceVersion:"43451", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.56/23] from ovn-kubernetes 2025-12-08T17:56:09.935530640+00:00 stderr F 2025-12-08T17:56:09Z [verbose] ADD finished CNI request ContainerID:"a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c" Netns:"/var/run/netns/966aee6f-d6bd-4cb9-b3ea-41733f424e9c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-xmhcm;K8S_POD_INFRA_CONTAINER_ID=a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c;K8S_POD_UID=86a460fd-a75a-45d8-8022-1a3aab4c30fd" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"5e:90:36:c5:42:0e\",\"name\":\"a97f426e75bd96a\"},{\"mac\":\"0a:58:0a:d9:00:38\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/966aee6f-d6bd-4cb9-b3ea-41733f424e9c\"}],\"ips\":[{\"address\":\"10.217.0.56/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:13.179023290+00:00 stderr F 2025-12-08T17:56:13Z [verbose] DEL starting CNI request ContainerID:"a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c" Netns:"/var/run/netns/966aee6f-d6bd-4cb9-b3ea-41733f424e9c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-xmhcm;K8S_POD_INFRA_CONTAINER_ID=a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c;K8S_POD_UID=86a460fd-a75a-45d8-8022-1a3aab4c30fd" Path:"" 2025-12-08T17:56:13.179875334+00:00 stderr F 2025-12-08T17:56:13Z [verbose] Del: service-telemetry:infrawatch-operators-xmhcm:86a460fd-a75a-45d8-8022-1a3aab4c30fd:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:56:13.354960338+00:00 stderr F 2025-12-08T17:56:13Z [verbose] ADD starting CNI request ContainerID:"ac36bb4df55ec56255ddd967453b3fbdc2364a3df344521f0809cd33d0681223" Netns:"/var/run/netns/54ca9956-bc31-43ad-bdf7-8588da70b781" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-tv99j;K8S_POD_INFRA_CONTAINER_ID=ac36bb4df55ec56255ddd967453b3fbdc2364a3df344521f0809cd33d0681223;K8S_POD_UID=020b4835-c362-478d-b714-bb42757ae9e2" Path:"" 2025-12-08T17:56:13.486548328+00:00 stderr F 2025-12-08T17:56:13Z [verbose] DEL finished CNI request ContainerID:"a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c" Netns:"/var/run/netns/966aee6f-d6bd-4cb9-b3ea-41733f424e9c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-xmhcm;K8S_POD_INFRA_CONTAINER_ID=a97f426e75bd96aa15e7c6c95fd4f97e212f85aa5b5354f651fc378e78e1100c;K8S_POD_UID=86a460fd-a75a-45d8-8022-1a3aab4c30fd" Path:"", result: "", err: 2025-12-08T17:56:13.794366565+00:00 stderr F I1208 17:56:13.778847 28729 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:13.794366565+00:00 stderr F I1208 17:56:13.779295 28729 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:13.794366565+00:00 stderr F I1208 17:56:13.779310 28729 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:13.794366565+00:00 stderr F I1208 17:56:13.779318 28729 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:13.794366565+00:00 stderr F I1208 17:56:13.779338 28729 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:13.796751130+00:00 stderr F 2025-12-08T17:56:13Z [verbose] Add: service-telemetry:infrawatch-operators-tv99j:020b4835-c362-478d-b714-bb42757ae9e2:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ac36bb4df55ec56","mac":"72:3f:0c:56:6e:15"},{"name":"eth0","mac":"0a:58:0a:d9:00:3c","sandbox":"/var/run/netns/54ca9956-bc31-43ad-bdf7-8588da70b781"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.60/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:13.796751130+00:00 stderr F I1208 17:56:13.795839 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"infrawatch-operators-tv99j", UID:"020b4835-c362-478d-b714-bb42757ae9e2", APIVersion:"v1", ResourceVersion:"43479", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.60/23] from ovn-kubernetes 2025-12-08T17:56:13.808829112+00:00 stderr P 2025-12-08T17:56:13Z [verbose] 2025-12-08T17:56:13.808904794+00:00 stderr P ADD finished CNI request ContainerID:"ac36bb4df55ec56255ddd967453b3fbdc2364a3df344521f0809cd33d0681223" Netns:"/var/run/netns/54ca9956-bc31-43ad-bdf7-8588da70b781" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-tv99j;K8S_POD_INFRA_CONTAINER_ID=ac36bb4df55ec56255ddd967453b3fbdc2364a3df344521f0809cd33d0681223;K8S_POD_UID=020b4835-c362-478d-b714-bb42757ae9e2" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"72:3f:0c:56:6e:15\",\"name\":\"ac36bb4df55ec56\"},{\"mac\":\"0a:58:0a:d9:00:3c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/54ca9956-bc31-43ad-bdf7-8588da70b781\"}],\"ips\":[{\"address\":\"10.217.0.60/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:13.808928404+00:00 stderr F 2025-12-08T17:56:28.844054533+00:00 stderr F 2025-12-08T17:56:28Z [verbose] ADD starting CNI request ContainerID:"3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d" Netns:"/var/run/netns/093e70b0-771c-4dc3-9d88-1ed5b540491f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq;K8S_POD_INFRA_CONTAINER_ID=3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d;K8S_POD_UID=8dfcd1bd-ac9d-4eba-b160-b7f4335fb440" Path:"" 2025-12-08T17:56:29.199071773+00:00 stderr F I1208 17:56:29.187224 29062 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:29.199071773+00:00 stderr F I1208 17:56:29.187790 29062 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:29.199071773+00:00 stderr F I1208 17:56:29.187817 29062 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:29.199071773+00:00 stderr F I1208 17:56:29.187830 29062 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:29.199071773+00:00 stderr F I1208 17:56:29.187845 29062 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:29.199516314+00:00 stderr F 2025-12-08T17:56:29Z [verbose] Add: service-telemetry:36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq:8dfcd1bd-ac9d-4eba-b160-b7f4335fb440:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"3c2d2aabec3ddf4","mac":"c6:4a:f0:2a:b6:c4"},{"name":"eth0","mac":"0a:58:0a:d9:00:3d","sandbox":"/var/run/netns/093e70b0-771c-4dc3-9d88-1ed5b540491f"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.61/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:29.199627317+00:00 stderr F I1208 17:56:29.199582 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq", UID:"8dfcd1bd-ac9d-4eba-b160-b7f4335fb440", APIVersion:"v1", ResourceVersion:"43600", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.61/23] from ovn-kubernetes 2025-12-08T17:56:29.213111219+00:00 stderr P 2025-12-08T17:56:29Z [verbose] 2025-12-08T17:56:29.213175761+00:00 stderr F ADD finished CNI request ContainerID:"3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d" Netns:"/var/run/netns/093e70b0-771c-4dc3-9d88-1ed5b540491f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq;K8S_POD_INFRA_CONTAINER_ID=3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d;K8S_POD_UID=8dfcd1bd-ac9d-4eba-b160-b7f4335fb440" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"c6:4a:f0:2a:b6:c4\",\"name\":\"3c2d2aabec3ddf4\"},{\"mac\":\"0a:58:0a:d9:00:3d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/093e70b0-771c-4dc3-9d88-1ed5b540491f\"}],\"ips\":[{\"address\":\"10.217.0.61/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:29.640125347+00:00 stderr F 2025-12-08T17:56:29Z [verbose] ADD starting CNI request ContainerID:"3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8" Netns:"/var/run/netns/ca0ee0ab-f68a-4bac-8e15-e3a1d2da9ada" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx;K8S_POD_INFRA_CONTAINER_ID=3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8;K8S_POD_UID=f97402a7-57a3-4f4a-af9f-478d646d2cbc" Path:"" 2025-12-08T17:56:29.766997676+00:00 stderr F I1208 17:56:29.762125 29124 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:29.766997676+00:00 stderr F I1208 17:56:29.762489 29124 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:29.766997676+00:00 stderr F I1208 17:56:29.762512 29124 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:29.766997676+00:00 stderr F I1208 17:56:29.762521 29124 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:29.766997676+00:00 stderr F I1208 17:56:29.762534 29124 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:29.767606841+00:00 stderr F 2025-12-08T17:56:29Z [verbose] Add: service-telemetry:f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx:f97402a7-57a3-4f4a-af9f-478d646d2cbc:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"3182b268e15e5a9","mac":"5a:f3:22:49:8e:3e"},{"name":"eth0","mac":"0a:58:0a:d9:00:42","sandbox":"/var/run/netns/ca0ee0ab-f68a-4bac-8e15-e3a1d2da9ada"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.66/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:29.767822037+00:00 stderr F I1208 17:56:29.767775 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx", UID:"f97402a7-57a3-4f4a-af9f-478d646d2cbc", APIVersion:"v1", ResourceVersion:"43620", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.66/23] from ovn-kubernetes 2025-12-08T17:56:29.783713922+00:00 stderr F 2025-12-08T17:56:29Z [verbose] ADD finished CNI request ContainerID:"3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8" Netns:"/var/run/netns/ca0ee0ab-f68a-4bac-8e15-e3a1d2da9ada" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx;K8S_POD_INFRA_CONTAINER_ID=3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8;K8S_POD_UID=f97402a7-57a3-4f4a-af9f-478d646d2cbc" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"5a:f3:22:49:8e:3e\",\"name\":\"3182b268e15e5a9\"},{\"mac\":\"0a:58:0a:d9:00:42\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ca0ee0ab-f68a-4bac-8e15-e3a1d2da9ada\"}],\"ips\":[{\"address\":\"10.217.0.66/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:30.532116321+00:00 stderr F 2025-12-08T17:56:30Z [verbose] ADD starting CNI request ContainerID:"cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f" Netns:"/var/run/netns/43739f90-5aca-4183-9305-fb5c6895bf2c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj;K8S_POD_INFRA_CONTAINER_ID=cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f;K8S_POD_UID=c70d8b4a-afd5-4ece-bd7f-9caf1f100d65" Path:"" 2025-12-08T17:56:30.690187464+00:00 stderr F I1208 17:56:30.683460 29179 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:30.690187464+00:00 stderr F I1208 17:56:30.684039 29179 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:30.690187464+00:00 stderr F I1208 17:56:30.684083 29179 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:30.690187464+00:00 stderr F I1208 17:56:30.684102 29179 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:30.690187464+00:00 stderr F I1208 17:56:30.684128 29179 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:30.690801041+00:00 stderr F 2025-12-08T17:56:30Z [verbose] Add: openshift-marketplace:6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj:c70d8b4a-afd5-4ece-bd7f-9caf1f100d65:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"cb663a0d10c4cf8","mac":"ea:72:f0:eb:99:72"},{"name":"eth0","mac":"0a:58:0a:d9:00:45","sandbox":"/var/run/netns/43739f90-5aca-4183-9305-fb5c6895bf2c"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.69/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:30.691547720+00:00 stderr F I1208 17:56:30.690992 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj", UID:"c70d8b4a-afd5-4ece-bd7f-9caf1f100d65", APIVersion:"v1", ResourceVersion:"43638", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.69/23] from ovn-kubernetes 2025-12-08T17:56:30.708717428+00:00 stderr F 2025-12-08T17:56:30Z [verbose] ADD finished CNI request ContainerID:"cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f" Netns:"/var/run/netns/43739f90-5aca-4183-9305-fb5c6895bf2c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj;K8S_POD_INFRA_CONTAINER_ID=cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f;K8S_POD_UID=c70d8b4a-afd5-4ece-bd7f-9caf1f100d65" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ea:72:f0:eb:99:72\",\"name\":\"cb663a0d10c4cf8\"},{\"mac\":\"0a:58:0a:d9:00:45\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/43739f90-5aca-4183-9305-fb5c6895bf2c\"}],\"ips\":[{\"address\":\"10.217.0.69/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:35.310157642+00:00 stderr F 2025-12-08T17:56:35Z [verbose] DEL starting CNI request ContainerID:"cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f" Netns:"/var/run/netns/43739f90-5aca-4183-9305-fb5c6895bf2c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj;K8S_POD_INFRA_CONTAINER_ID=cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f;K8S_POD_UID=c70d8b4a-afd5-4ece-bd7f-9caf1f100d65" Path:"" 2025-12-08T17:56:35.311048425+00:00 stderr F 2025-12-08T17:56:35Z [verbose] Del: openshift-marketplace:6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj:c70d8b4a-afd5-4ece-bd7f-9caf1f100d65:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:56:35.311431375+00:00 stderr F 2025-12-08T17:56:35Z [verbose] DEL starting CNI request ContainerID:"3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d" Netns:"/var/run/netns/093e70b0-771c-4dc3-9d88-1ed5b540491f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq;K8S_POD_INFRA_CONTAINER_ID=3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d;K8S_POD_UID=8dfcd1bd-ac9d-4eba-b160-b7f4335fb440" Path:"" 2025-12-08T17:56:35.311735413+00:00 stderr F 2025-12-08T17:56:35Z [verbose] Del: service-telemetry:36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq:8dfcd1bd-ac9d-4eba-b160-b7f4335fb440:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:56:35.318580352+00:00 stderr F 2025-12-08T17:56:35Z [verbose] DEL starting CNI request ContainerID:"3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8" Netns:"/var/run/netns/ca0ee0ab-f68a-4bac-8e15-e3a1d2da9ada" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx;K8S_POD_INFRA_CONTAINER_ID=3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8;K8S_POD_UID=f97402a7-57a3-4f4a-af9f-478d646d2cbc" Path:"" 2025-12-08T17:56:35.318843439+00:00 stderr F 2025-12-08T17:56:35Z [verbose] Del: service-telemetry:f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx:f97402a7-57a3-4f4a-af9f-478d646d2cbc:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:56:35.461411607+00:00 stderr F 2025-12-08T17:56:35Z [verbose] DEL finished CNI request ContainerID:"cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f" Netns:"/var/run/netns/43739f90-5aca-4183-9305-fb5c6895bf2c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj;K8S_POD_INFRA_CONTAINER_ID=cb663a0d10c4cf8f0556c3d9ba76fa1ca496666a39393c22677549dfa2d54c1f;K8S_POD_UID=c70d8b4a-afd5-4ece-bd7f-9caf1f100d65" Path:"", result: "", err: 2025-12-08T17:56:35.558645172+00:00 stderr F 2025-12-08T17:56:35Z [verbose] DEL finished CNI request ContainerID:"3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d" Netns:"/var/run/netns/093e70b0-771c-4dc3-9d88-1ed5b540491f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq;K8S_POD_INFRA_CONTAINER_ID=3c2d2aabec3ddf467be563ae844cae07e4cb0c8bd4763b2ce84272a0a197550d;K8S_POD_UID=8dfcd1bd-ac9d-4eba-b160-b7f4335fb440" Path:"", result: "", err: 2025-12-08T17:56:35.558826677+00:00 stderr F 2025-12-08T17:56:35Z [verbose] DEL finished CNI request ContainerID:"3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8" Netns:"/var/run/netns/ca0ee0ab-f68a-4bac-8e15-e3a1d2da9ada" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx;K8S_POD_INFRA_CONTAINER_ID=3182b268e15e5a9dae6bd9d62c0e1c15361887d6f5afe589908eb4becf27d9e8;K8S_POD_UID=f97402a7-57a3-4f4a-af9f-478d646d2cbc" Path:"", result: "", err: 2025-12-08T17:56:39.971975501+00:00 stderr F 2025-12-08T17:56:39Z [verbose] ADD starting CNI request ContainerID:"1d8201820a175b8c1c05383eb05125d6444707e713bbea7f072e21acc90251cf" Netns:"/var/run/netns/53eb3c43-9508-4b0a-84b9-e3d109ce8a12" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=interconnect-operator-78b9bd8798-456sz;K8S_POD_INFRA_CONTAINER_ID=1d8201820a175b8c1c05383eb05125d6444707e713bbea7f072e21acc90251cf;K8S_POD_UID=871b0dde-aad5-4e54-bd14-1c4bc8779b60" Path:"" 2025-12-08T17:56:40.154297106+00:00 stderr F I1208 17:56:40.148167 29704 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:40.154297106+00:00 stderr F I1208 17:56:40.148543 29704 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:40.154297106+00:00 stderr F I1208 17:56:40.148563 29704 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:40.154297106+00:00 stderr F I1208 17:56:40.148573 29704 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:40.154297106+00:00 stderr F I1208 17:56:40.148584 29704 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:40.154589864+00:00 stderr F 2025-12-08T17:56:40Z [verbose] Add: service-telemetry:interconnect-operator-78b9bd8798-456sz:871b0dde-aad5-4e54-bd14-1c4bc8779b60:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"1d8201820a175b8","mac":"d6:ad:f8:3b:34:db"},{"name":"eth0","mac":"0a:58:0a:d9:00:46","sandbox":"/var/run/netns/53eb3c43-9508-4b0a-84b9-e3d109ce8a12"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.70/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:40.154839080+00:00 stderr F I1208 17:56:40.154765 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"interconnect-operator-78b9bd8798-456sz", UID:"871b0dde-aad5-4e54-bd14-1c4bc8779b60", APIVersion:"v1", ResourceVersion:"43802", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.70/23] from ovn-kubernetes 2025-12-08T17:56:40.164242455+00:00 stderr F 2025-12-08T17:56:40Z [verbose] ADD finished CNI request ContainerID:"1d8201820a175b8c1c05383eb05125d6444707e713bbea7f072e21acc90251cf" Netns:"/var/run/netns/53eb3c43-9508-4b0a-84b9-e3d109ce8a12" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=interconnect-operator-78b9bd8798-456sz;K8S_POD_INFRA_CONTAINER_ID=1d8201820a175b8c1c05383eb05125d6444707e713bbea7f072e21acc90251cf;K8S_POD_UID=871b0dde-aad5-4e54-bd14-1c4bc8779b60" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d6:ad:f8:3b:34:db\",\"name\":\"1d8201820a175b8\"},{\"mac\":\"0a:58:0a:d9:00:46\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/53eb3c43-9508-4b0a-84b9-e3d109ce8a12\"}],\"ips\":[{\"address\":\"10.217.0.70/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:41.417684458+00:00 stderr F 2025-12-08T17:56:41Z [verbose] ADD starting CNI request ContainerID:"cc64fdcf023ae6ec287ba1b3bf0cef5081db5760d0ee172cbb13cf476e1bf387" Netns:"/var/run/netns/0c6668f2-2901-4538-ab6c-e47a7cfc9214" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=service-telemetry-operator-79647f8775-zs8hl;K8S_POD_INFRA_CONTAINER_ID=cc64fdcf023ae6ec287ba1b3bf0cef5081db5760d0ee172cbb13cf476e1bf387;K8S_POD_UID=b4cd1da4-b555-42d4-b09a-38f141ee7dc4" Path:"" 2025-12-08T17:56:41.773755865+00:00 stderr F I1208 17:56:41.764920 29775 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:41.773755865+00:00 stderr F I1208 17:56:41.765396 29775 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:41.773755865+00:00 stderr F I1208 17:56:41.765414 29775 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:41.773755865+00:00 stderr F I1208 17:56:41.765423 29775 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:41.773755865+00:00 stderr F I1208 17:56:41.765434 29775 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:41.774358620+00:00 stderr F 2025-12-08T17:56:41Z [verbose] Add: service-telemetry:service-telemetry-operator-79647f8775-zs8hl:b4cd1da4-b555-42d4-b09a-38f141ee7dc4:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"cc64fdcf023ae6e","mac":"ce:d1:65:3f:24:40"},{"name":"eth0","mac":"0a:58:0a:d9:00:47","sandbox":"/var/run/netns/0c6668f2-2901-4538-ab6c-e47a7cfc9214"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.71/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:41.774655988+00:00 stderr F I1208 17:56:41.774625 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"service-telemetry-operator-79647f8775-zs8hl", UID:"b4cd1da4-b555-42d4-b09a-38f141ee7dc4", APIVersion:"v1", ResourceVersion:"43876", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.71/23] from ovn-kubernetes 2025-12-08T17:56:41.790676176+00:00 stderr F 2025-12-08T17:56:41Z [verbose] ADD finished CNI request ContainerID:"cc64fdcf023ae6ec287ba1b3bf0cef5081db5760d0ee172cbb13cf476e1bf387" Netns:"/var/run/netns/0c6668f2-2901-4538-ab6c-e47a7cfc9214" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=service-telemetry-operator-79647f8775-zs8hl;K8S_POD_INFRA_CONTAINER_ID=cc64fdcf023ae6ec287ba1b3bf0cef5081db5760d0ee172cbb13cf476e1bf387;K8S_POD_UID=b4cd1da4-b555-42d4-b09a-38f141ee7dc4" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ce:d1:65:3f:24:40\",\"name\":\"cc64fdcf023ae6e\"},{\"mac\":\"0a:58:0a:d9:00:47\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/0c6668f2-2901-4538-ab6c-e47a7cfc9214\"}],\"ips\":[{\"address\":\"10.217.0.71/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:56:42.834285145+00:00 stderr P 2025-12-08T17:56:42Z [verbose] 2025-12-08T17:56:42.834354117+00:00 stderr P ADD starting CNI request ContainerID:"935e5adaa947f7579267581015d5cf9e7812bcc51aa5127e2dbb772b426d393f" Netns:"/var/run/netns/f5eb171b-9c89-483c-8753-81237c5e87ce" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=smart-gateway-operator-5cd794ff55-w8r45;K8S_POD_INFRA_CONTAINER_ID=935e5adaa947f7579267581015d5cf9e7812bcc51aa5127e2dbb772b426d393f;K8S_POD_UID=88186169-23e9-44fb-a70c-0f6fe06b2800" Path:"" 2025-12-08T17:56:42.834388098+00:00 stderr F 2025-12-08T17:56:43.254542426+00:00 stderr F I1208 17:56:43.247761 29882 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:56:43.254542426+00:00 stderr F I1208 17:56:43.248156 29882 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:56:43.254542426+00:00 stderr F I1208 17:56:43.248164 29882 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:56:43.254542426+00:00 stderr F I1208 17:56:43.248170 29882 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:56:43.254542426+00:00 stderr F I1208 17:56:43.248175 29882 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:56:43.255301327+00:00 stderr P 2025-12-08T17:56:43Z [verbose] 2025-12-08T17:56:43.255334778+00:00 stderr P Add: service-telemetry:smart-gateway-operator-5cd794ff55-w8r45:88186169-23e9-44fb-a70c-0f6fe06b2800:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"935e5adaa947f75","mac":"2e:46:1c:3b:d5:b9"},{"name":"eth0","mac":"0a:58:0a:d9:00:48","sandbox":"/var/run/netns/f5eb171b-9c89-483c-8753-81237c5e87ce"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.72/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:56:43.255354348+00:00 stderr F 2025-12-08T17:56:43.255610565+00:00 stderr F I1208 17:56:43.255523 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"smart-gateway-operator-5cd794ff55-w8r45", UID:"88186169-23e9-44fb-a70c-0f6fe06b2800", APIVersion:"v1", ResourceVersion:"43945", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.72/23] from ovn-kubernetes 2025-12-08T17:56:43.269519067+00:00 stderr F 2025-12-08T17:56:43Z [verbose] ADD finished CNI request ContainerID:"935e5adaa947f7579267581015d5cf9e7812bcc51aa5127e2dbb772b426d393f" Netns:"/var/run/netns/f5eb171b-9c89-483c-8753-81237c5e87ce" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=smart-gateway-operator-5cd794ff55-w8r45;K8S_POD_INFRA_CONTAINER_ID=935e5adaa947f7579267581015d5cf9e7812bcc51aa5127e2dbb772b426d393f;K8S_POD_UID=88186169-23e9-44fb-a70c-0f6fe06b2800" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"2e:46:1c:3b:d5:b9\",\"name\":\"935e5adaa947f75\"},{\"mac\":\"0a:58:0a:d9:00:48\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/f5eb171b-9c89-483c-8753-81237c5e87ce\"}],\"ips\":[{\"address\":\"10.217.0.72/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:57:28.945814236+00:00 stderr P 2025-12-08T17:57:28Z [verbose] 2025-12-08T17:57:28.945906088+00:00 stderr P ADD starting CNI request ContainerID:"b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8" Netns:"/var/run/netns/b0c0fa35-4a72-47c0-9d0f-bafa8960fe43" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-interconnect-55bf8d5cb-76n5w;K8S_POD_INFRA_CONTAINER_ID=b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8;K8S_POD_UID=df9f5211-ab02-49a8-82e6-0c2f4b07bc52" Path:"" 2025-12-08T17:57:28.945937589+00:00 stderr F 2025-12-08T17:57:29.329022000+00:00 stderr F I1208 17:57:29.321396 31488 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:57:29.329022000+00:00 stderr F I1208 17:57:29.321981 31488 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:57:29.329022000+00:00 stderr F I1208 17:57:29.322001 31488 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:57:29.329022000+00:00 stderr F I1208 17:57:29.322010 31488 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:57:29.329022000+00:00 stderr F I1208 17:57:29.322030 31488 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:57:29.329856183+00:00 stderr P 2025-12-08T17:57:29Z [verbose] 2025-12-08T17:57:29.329938675+00:00 stderr P Add: service-telemetry:default-interconnect-55bf8d5cb-76n5w:df9f5211-ab02-49a8-82e6-0c2f4b07bc52:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b9eccbab184d45e","mac":"b6:33:98:4d:eb:a8"},{"name":"eth0","mac":"0a:58:0a:d9:00:49","sandbox":"/var/run/netns/b0c0fa35-4a72-47c0-9d0f-bafa8960fe43"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.73/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:57:29.329985016+00:00 stderr F 2025-12-08T17:57:29.330628463+00:00 stderr F I1208 17:57:29.330583 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-interconnect-55bf8d5cb-76n5w", UID:"df9f5211-ab02-49a8-82e6-0c2f4b07bc52", APIVersion:"v1", ResourceVersion:"44942", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.73/23] from ovn-kubernetes 2025-12-08T17:57:29.349297430+00:00 stderr F 2025-12-08T17:57:29Z [verbose] ADD finished CNI request ContainerID:"b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8" Netns:"/var/run/netns/b0c0fa35-4a72-47c0-9d0f-bafa8960fe43" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-interconnect-55bf8d5cb-76n5w;K8S_POD_INFRA_CONTAINER_ID=b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8;K8S_POD_UID=df9f5211-ab02-49a8-82e6-0c2f4b07bc52" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"b6:33:98:4d:eb:a8\",\"name\":\"b9eccbab184d45e\"},{\"mac\":\"0a:58:0a:d9:00:49\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/b0c0fa35-4a72-47c0-9d0f-bafa8960fe43\"}],\"ips\":[{\"address\":\"10.217.0.73/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:57:40.485963883+00:00 stderr F 2025-12-08T17:57:40Z [verbose] ADD starting CNI request ContainerID:"36ca181347274fbe8b29a8090ba78e0141864b9aeeccac5eab1d78b763bfc7c0" Netns:"/var/run/netns/eb883e8a-c47f-4c4a-a631-5206e6359f7c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=prometheus-default-0;K8S_POD_INFRA_CONTAINER_ID=36ca181347274fbe8b29a8090ba78e0141864b9aeeccac5eab1d78b763bfc7c0;K8S_POD_UID=3d62a6f6-b57c-48e0-9279-d8dadd01a921" Path:"" 2025-12-08T17:57:40.635004045+00:00 stderr F I1208 17:57:40.630562 32019 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:57:40.635004045+00:00 stderr F I1208 17:57:40.630961 32019 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:57:40.635004045+00:00 stderr F I1208 17:57:40.630974 32019 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:57:40.635004045+00:00 stderr F I1208 17:57:40.630981 32019 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:57:40.635004045+00:00 stderr F I1208 17:57:40.630987 32019 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:57:40.635566680+00:00 stderr F 2025-12-08T17:57:40Z [verbose] Add: service-telemetry:prometheus-default-0:3d62a6f6-b57c-48e0-9279-d8dadd01a921:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"36ca181347274fb","mac":"8e:6c:ce:9e:d7:c0"},{"name":"eth0","mac":"0a:58:0a:d9:00:4a","sandbox":"/var/run/netns/eb883e8a-c47f-4c4a-a631-5206e6359f7c"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.74/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:57:40.635801006+00:00 stderr F I1208 17:57:40.635749 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"prometheus-default-0", UID:"3d62a6f6-b57c-48e0-9279-d8dadd01a921", APIVersion:"v1", ResourceVersion:"45041", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.74/23] from ovn-kubernetes 2025-12-08T17:57:40.652047405+00:00 stderr F 2025-12-08T17:57:40Z [verbose] ADD finished CNI request ContainerID:"36ca181347274fbe8b29a8090ba78e0141864b9aeeccac5eab1d78b763bfc7c0" Netns:"/var/run/netns/eb883e8a-c47f-4c4a-a631-5206e6359f7c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=prometheus-default-0;K8S_POD_INFRA_CONTAINER_ID=36ca181347274fbe8b29a8090ba78e0141864b9aeeccac5eab1d78b763bfc7c0;K8S_POD_UID=3d62a6f6-b57c-48e0-9279-d8dadd01a921" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"8e:6c:ce:9e:d7:c0\",\"name\":\"36ca181347274fb\"},{\"mac\":\"0a:58:0a:d9:00:4a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/eb883e8a-c47f-4c4a-a631-5206e6359f7c\"}],\"ips\":[{\"address\":\"10.217.0.74/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:57:48.845117361+00:00 stderr P 2025-12-08T17:57:48Z [verbose] 2025-12-08T17:57:48.845192962+00:00 stderr P ADD starting CNI request ContainerID:"f8e9688711f76b654202dc32616384db9c588343391f2380d5559ee3a510781f" Netns:"/var/run/netns/b53d8d8a-785f-45e5-948b-2113d01dc147" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-snmp-webhook-6774d8dfbc-75fxn;K8S_POD_INFRA_CONTAINER_ID=f8e9688711f76b654202dc32616384db9c588343391f2380d5559ee3a510781f;K8S_POD_UID=37bee34a-f42e-4493-85f3-7f5e5cbd7301" Path:"" 2025-12-08T17:57:48.845218343+00:00 stderr F 2025-12-08T17:57:49.115911160+00:00 stderr F I1208 17:57:49.109480 32391 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:57:49.115911160+00:00 stderr F I1208 17:57:49.109967 32391 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:57:49.115911160+00:00 stderr F I1208 17:57:49.109984 32391 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:57:49.115911160+00:00 stderr F I1208 17:57:49.109992 32391 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:57:49.115911160+00:00 stderr F I1208 17:57:49.109998 32391 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:57:49.116272859+00:00 stderr F 2025-12-08T17:57:49Z [verbose] Add: service-telemetry:default-snmp-webhook-6774d8dfbc-75fxn:37bee34a-f42e-4493-85f3-7f5e5cbd7301:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"f8e9688711f76b6","mac":"82:c8:25:04:6c:db"},{"name":"eth0","mac":"0a:58:0a:d9:00:4b","sandbox":"/var/run/netns/b53d8d8a-785f-45e5-948b-2113d01dc147"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.75/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:57:49.116510675+00:00 stderr F I1208 17:57:49.116472 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-snmp-webhook-6774d8dfbc-75fxn", UID:"37bee34a-f42e-4493-85f3-7f5e5cbd7301", APIVersion:"v1", ResourceVersion:"45148", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.75/23] from ovn-kubernetes 2025-12-08T17:57:49.133649918+00:00 stderr F 2025-12-08T17:57:49Z [verbose] ADD finished CNI request ContainerID:"f8e9688711f76b654202dc32616384db9c588343391f2380d5559ee3a510781f" Netns:"/var/run/netns/b53d8d8a-785f-45e5-948b-2113d01dc147" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-snmp-webhook-6774d8dfbc-75fxn;K8S_POD_INFRA_CONTAINER_ID=f8e9688711f76b654202dc32616384db9c588343391f2380d5559ee3a510781f;K8S_POD_UID=37bee34a-f42e-4493-85f3-7f5e5cbd7301" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"82:c8:25:04:6c:db\",\"name\":\"f8e9688711f76b6\"},{\"mac\":\"0a:58:0a:d9:00:4b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/b53d8d8a-785f-45e5-948b-2113d01dc147\"}],\"ips\":[{\"address\":\"10.217.0.75/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:57:57.539438512+00:00 stderr F 2025-12-08T17:57:57Z [verbose] ADD starting CNI request ContainerID:"725078b150652ee37c6f8be09b83faf913154086093a95789f27cecb17163254" Netns:"/var/run/netns/ceacbb1d-8d5d-45b5-a933-feea6eb322d3" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=alertmanager-default-0;K8S_POD_INFRA_CONTAINER_ID=725078b150652ee37c6f8be09b83faf913154086093a95789f27cecb17163254;K8S_POD_UID=81e17e77-b0f9-4df6-8c85-e06d1fd7a46a" Path:"" 2025-12-08T17:57:57.970041950+00:00 stderr F I1208 17:57:57.961652 32882 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:57:57.970041950+00:00 stderr F I1208 17:57:57.962338 32882 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:57:57.970041950+00:00 stderr F I1208 17:57:57.962350 32882 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:57:57.970041950+00:00 stderr F I1208 17:57:57.962357 32882 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:57:57.970041950+00:00 stderr F I1208 17:57:57.962363 32882 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:57:57.970287166+00:00 stderr F 2025-12-08T17:57:57Z [verbose] Add: service-telemetry:alertmanager-default-0:81e17e77-b0f9-4df6-8c85-e06d1fd7a46a:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"725078b150652ee","mac":"3a:61:78:6a:31:b1"},{"name":"eth0","mac":"0a:58:0a:d9:00:4c","sandbox":"/var/run/netns/ceacbb1d-8d5d-45b5-a933-feea6eb322d3"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.76/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:57:57.970507753+00:00 stderr F I1208 17:57:57.970469 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"alertmanager-default-0", UID:"81e17e77-b0f9-4df6-8c85-e06d1fd7a46a", APIVersion:"v1", ResourceVersion:"45190", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.76/23] from ovn-kubernetes 2025-12-08T17:57:57.992079310+00:00 stderr F 2025-12-08T17:57:57Z [verbose] ADD finished CNI request ContainerID:"725078b150652ee37c6f8be09b83faf913154086093a95789f27cecb17163254" Netns:"/var/run/netns/ceacbb1d-8d5d-45b5-a933-feea6eb322d3" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=alertmanager-default-0;K8S_POD_INFRA_CONTAINER_ID=725078b150652ee37c6f8be09b83faf913154086093a95789f27cecb17163254;K8S_POD_UID=81e17e77-b0f9-4df6-8c85-e06d1fd7a46a" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"3a:61:78:6a:31:b1\",\"name\":\"725078b150652ee\"},{\"mac\":\"0a:58:0a:d9:00:4c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ceacbb1d-8d5d-45b5-a933-feea6eb322d3\"}],\"ips\":[{\"address\":\"10.217.0.76/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:58:10.250224270+00:00 stderr F 2025-12-08T17:58:10Z [verbose] ADD starting CNI request ContainerID:"3522d94343d4e36504f002acd25129011e004332fa5391e14e3ba4bc63217040" Netns:"/var/run/netns/98aa8d94-b993-48cb-8338-df8e5687ad49" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-coll-meter-smartgateway-787645d794-4zrzx;K8S_POD_INFRA_CONTAINER_ID=3522d94343d4e36504f002acd25129011e004332fa5391e14e3ba4bc63217040;K8S_POD_UID=0e2a1994-199f-4b38-903b-cba9061dfcad" Path:"" 2025-12-08T17:58:10.619739191+00:00 stderr F I1208 17:58:10.613961 34118 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:58:10.619739191+00:00 stderr F I1208 17:58:10.614702 34118 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:58:10.619739191+00:00 stderr F I1208 17:58:10.614748 34118 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:58:10.619739191+00:00 stderr F I1208 17:58:10.614788 34118 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:58:10.619739191+00:00 stderr F I1208 17:58:10.614835 34118 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:58:10.620159411+00:00 stderr F 2025-12-08T17:58:10Z [verbose] Add: service-telemetry:default-cloud1-coll-meter-smartgateway-787645d794-4zrzx:0e2a1994-199f-4b38-903b-cba9061dfcad:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"3522d94343d4e36","mac":"4a:0d:0a:f7:27:4d"},{"name":"eth0","mac":"0a:58:0a:d9:00:4d","sandbox":"/var/run/netns/98aa8d94-b993-48cb-8338-df8e5687ad49"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.77/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:58:10.620656424+00:00 stderr F I1208 17:58:10.620585 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-cloud1-coll-meter-smartgateway-787645d794-4zrzx", UID:"0e2a1994-199f-4b38-903b-cba9061dfcad", APIVersion:"v1", ResourceVersion:"45311", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.77/23] from ovn-kubernetes 2025-12-08T17:58:10.632022688+00:00 stderr F 2025-12-08T17:58:10Z [verbose] ADD finished CNI request ContainerID:"3522d94343d4e36504f002acd25129011e004332fa5391e14e3ba4bc63217040" Netns:"/var/run/netns/98aa8d94-b993-48cb-8338-df8e5687ad49" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-coll-meter-smartgateway-787645d794-4zrzx;K8S_POD_INFRA_CONTAINER_ID=3522d94343d4e36504f002acd25129011e004332fa5391e14e3ba4bc63217040;K8S_POD_UID=0e2a1994-199f-4b38-903b-cba9061dfcad" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"4a:0d:0a:f7:27:4d\",\"name\":\"3522d94343d4e36\"},{\"mac\":\"0a:58:0a:d9:00:4d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/98aa8d94-b993-48cb-8338-df8e5687ad49\"}],\"ips\":[{\"address\":\"10.217.0.77/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:58:11.407980763+00:00 stderr F 2025-12-08T17:58:11Z [verbose] ADD starting CNI request ContainerID:"15dd03737f23b7b6bcb46c7c34055a189a88ce71b67519c8210e73e1a6196a9a" Netns:"/var/run/netns/da1a297c-7e4c-4ee4-bd14-3c24530ac6e8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v;K8S_POD_INFRA_CONTAINER_ID=15dd03737f23b7b6bcb46c7c34055a189a88ce71b67519c8210e73e1a6196a9a;K8S_POD_UID=ef58ecee-c967-4d4f-946b-8c8123a73084" Path:"" 2025-12-08T17:58:11.578031348+00:00 stderr F I1208 17:58:11.567274 34342 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:58:11.578031348+00:00 stderr F I1208 17:58:11.567689 34342 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:58:11.578031348+00:00 stderr F I1208 17:58:11.567711 34342 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:58:11.578031348+00:00 stderr F I1208 17:58:11.567719 34342 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:58:11.578031348+00:00 stderr F I1208 17:58:11.567726 34342 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:58:11.578421278+00:00 stderr F 2025-12-08T17:58:11Z [verbose] Add: service-telemetry:default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v:ef58ecee-c967-4d4f-946b-8c8123a73084:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"15dd03737f23b7b","mac":"62:da:dd:c7:61:87"},{"name":"eth0","mac":"0a:58:0a:d9:00:4e","sandbox":"/var/run/netns/da1a297c-7e4c-4ee4-bd14-3c24530ac6e8"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.78/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:58:11.578966452+00:00 stderr F I1208 17:58:11.578921 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v", UID:"ef58ecee-c967-4d4f-946b-8c8123a73084", APIVersion:"v1", ResourceVersion:"45353", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.78/23] from ovn-kubernetes 2025-12-08T17:58:11.591802294+00:00 stderr F 2025-12-08T17:58:11Z [verbose] ADD finished CNI request ContainerID:"15dd03737f23b7b6bcb46c7c34055a189a88ce71b67519c8210e73e1a6196a9a" Netns:"/var/run/netns/da1a297c-7e4c-4ee4-bd14-3c24530ac6e8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v;K8S_POD_INFRA_CONTAINER_ID=15dd03737f23b7b6bcb46c7c34055a189a88ce71b67519c8210e73e1a6196a9a;K8S_POD_UID=ef58ecee-c967-4d4f-946b-8c8123a73084" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"62:da:dd:c7:61:87\",\"name\":\"15dd03737f23b7b\"},{\"mac\":\"0a:58:0a:d9:00:4e\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/da1a297c-7e4c-4ee4-bd14-3c24530ac6e8\"}],\"ips\":[{\"address\":\"10.217.0.78/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:58:19.047093430+00:00 stderr F 2025-12-08T17:58:19Z [verbose] ADD starting CNI request ContainerID:"677eeab8da7fd167f2eb41701230e28a9f38f2ba8fcffa2c3cbacf19a465cc22" Netns:"/var/run/netns/5c69b929-3c1c-4687-9ec7-40afca0a0323" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp;K8S_POD_INFRA_CONTAINER_ID=677eeab8da7fd167f2eb41701230e28a9f38f2ba8fcffa2c3cbacf19a465cc22;K8S_POD_UID=f486b0de-c62f-46a2-8649-dca61a92506c" Path:"" 2025-12-08T17:58:19.429958236+00:00 stderr F I1208 17:58:19.419771 35275 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:58:19.429958236+00:00 stderr F I1208 17:58:19.421104 35275 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:58:19.429958236+00:00 stderr F I1208 17:58:19.421149 35275 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:58:19.429958236+00:00 stderr F I1208 17:58:19.421159 35275 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:58:19.429958236+00:00 stderr F I1208 17:58:19.421167 35275 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:58:19.429958236+00:00 stderr F 2025-12-08T17:58:19Z [verbose] Add: service-telemetry:default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp:f486b0de-c62f-46a2-8649-dca61a92506c:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"677eeab8da7fd16","mac":"d2:64:da:ce:e0:2f"},{"name":"eth0","mac":"0a:58:0a:d9:00:4f","sandbox":"/var/run/netns/5c69b929-3c1c-4687-9ec7-40afca0a0323"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.79/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:58:19.429958236+00:00 stderr F I1208 17:58:19.429749 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp", UID:"f486b0de-c62f-46a2-8649-dca61a92506c", APIVersion:"v1", ResourceVersion:"45417", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.79/23] from ovn-kubernetes 2025-12-08T17:58:19.443409734+00:00 stderr F 2025-12-08T17:58:19Z [verbose] ADD finished CNI request ContainerID:"677eeab8da7fd167f2eb41701230e28a9f38f2ba8fcffa2c3cbacf19a465cc22" Netns:"/var/run/netns/5c69b929-3c1c-4687-9ec7-40afca0a0323" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp;K8S_POD_INFRA_CONTAINER_ID=677eeab8da7fd167f2eb41701230e28a9f38f2ba8fcffa2c3cbacf19a465cc22;K8S_POD_UID=f486b0de-c62f-46a2-8649-dca61a92506c" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d2:64:da:ce:e0:2f\",\"name\":\"677eeab8da7fd16\"},{\"mac\":\"0a:58:0a:d9:00:4f\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5c69b929-3c1c-4687-9ec7-40afca0a0323\"}],\"ips\":[{\"address\":\"10.217.0.79/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:58:20.520411489+00:00 stderr F 2025-12-08T17:58:20Z [verbose] ADD starting CNI request ContainerID:"fc460b0b57386679f435426a0847be0a107eb47388ad63b9c335baecad1525cb" Netns:"/var/run/netns/5b235fa4-1e0d-485b-ad3a-6cbba8a583b5" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-coll-event-smartgateway-d956b4648-jwkwn;K8S_POD_INFRA_CONTAINER_ID=fc460b0b57386679f435426a0847be0a107eb47388ad63b9c335baecad1525cb;K8S_POD_UID=8ecda967-3335-4158-839b-9b4048b8f049" Path:"" 2025-12-08T17:58:20.887152268+00:00 stderr F I1208 17:58:20.877088 35517 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:58:20.887152268+00:00 stderr F I1208 17:58:20.877501 35517 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:58:20.887152268+00:00 stderr F I1208 17:58:20.877512 35517 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:58:20.887152268+00:00 stderr F I1208 17:58:20.877519 35517 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:58:20.887152268+00:00 stderr F I1208 17:58:20.877534 35517 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:58:20.887730232+00:00 stderr F 2025-12-08T17:58:20Z [verbose] Add: service-telemetry:default-cloud1-coll-event-smartgateway-d956b4648-jwkwn:8ecda967-3335-4158-839b-9b4048b8f049:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"fc460b0b5738667","mac":"26:34:7a:82:45:8c"},{"name":"eth0","mac":"0a:58:0a:d9:00:50","sandbox":"/var/run/netns/5b235fa4-1e0d-485b-ad3a-6cbba8a583b5"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.80/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:58:20.887932937+00:00 stderr F I1208 17:58:20.887890 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-cloud1-coll-event-smartgateway-d956b4648-jwkwn", UID:"8ecda967-3335-4158-839b-9b4048b8f049", APIVersion:"v1", ResourceVersion:"45493", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.80/23] from ovn-kubernetes 2025-12-08T17:58:20.898699866+00:00 stderr F 2025-12-08T17:58:20Z [verbose] ADD finished CNI request ContainerID:"fc460b0b57386679f435426a0847be0a107eb47388ad63b9c335baecad1525cb" Netns:"/var/run/netns/5b235fa4-1e0d-485b-ad3a-6cbba8a583b5" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-coll-event-smartgateway-d956b4648-jwkwn;K8S_POD_INFRA_CONTAINER_ID=fc460b0b57386679f435426a0847be0a107eb47388ad63b9c335baecad1525cb;K8S_POD_UID=8ecda967-3335-4158-839b-9b4048b8f049" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"26:34:7a:82:45:8c\",\"name\":\"fc460b0b5738667\"},{\"mac\":\"0a:58:0a:d9:00:50\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5b235fa4-1e0d-485b-ad3a-6cbba8a583b5\"}],\"ips\":[{\"address\":\"10.217.0.80/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:58:34.113449980+00:00 stderr F 2025-12-08T17:58:34Z [verbose] ADD starting CNI request ContainerID:"8a2e7cdfc8f5a1739f56608ea6c7b4584cdaa12d21b19554af13cd002a673eaa" Netns:"/var/run/netns/8f55f4c9-1041-4aa3-a2d9-1e62eb9485af" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk;K8S_POD_INFRA_CONTAINER_ID=8a2e7cdfc8f5a1739f56608ea6c7b4584cdaa12d21b19554af13cd002a673eaa;K8S_POD_UID=35c3d7e4-3ad4-4184-a22e-86654ad7867b" Path:"" 2025-12-08T17:58:34.265571252+00:00 stderr F I1208 17:58:34.258556 36413 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:58:34.265571252+00:00 stderr F I1208 17:58:34.259176 36413 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:58:34.265571252+00:00 stderr F I1208 17:58:34.259189 36413 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:58:34.265571252+00:00 stderr F I1208 17:58:34.259195 36413 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:58:34.265571252+00:00 stderr F I1208 17:58:34.259201 36413 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:58:34.266340482+00:00 stderr F 2025-12-08T17:58:34Z [verbose] Add: service-telemetry:default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk:35c3d7e4-3ad4-4184-a22e-86654ad7867b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"8a2e7cdfc8f5a17","mac":"1e:23:c4:33:a1:e7"},{"name":"eth0","mac":"0a:58:0a:d9:00:51","sandbox":"/var/run/netns/8f55f4c9-1041-4aa3-a2d9-1e62eb9485af"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.81/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:58:34.266627579+00:00 stderr F I1208 17:58:34.266560 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk", UID:"35c3d7e4-3ad4-4184-a22e-86654ad7867b", APIVersion:"v1", ResourceVersion:"45528", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.81/23] from ovn-kubernetes 2025-12-08T17:58:34.282835008+00:00 stderr F 2025-12-08T17:58:34Z [verbose] ADD finished CNI request ContainerID:"8a2e7cdfc8f5a1739f56608ea6c7b4584cdaa12d21b19554af13cd002a673eaa" Netns:"/var/run/netns/8f55f4c9-1041-4aa3-a2d9-1e62eb9485af" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk;K8S_POD_INFRA_CONTAINER_ID=8a2e7cdfc8f5a1739f56608ea6c7b4584cdaa12d21b19554af13cd002a673eaa;K8S_POD_UID=35c3d7e4-3ad4-4184-a22e-86654ad7867b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"1e:23:c4:33:a1:e7\",\"name\":\"8a2e7cdfc8f5a17\"},{\"mac\":\"0a:58:0a:d9:00:51\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8f55f4c9-1041-4aa3-a2d9-1e62eb9485af\"}],\"ips\":[{\"address\":\"10.217.0.81/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:58:34.518335015+00:00 stderr F 2025-12-08T17:58:34Z [verbose] DEL starting CNI request ContainerID:"b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8" Netns:"/var/run/netns/b0c0fa35-4a72-47c0-9d0f-bafa8960fe43" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-interconnect-55bf8d5cb-76n5w;K8S_POD_INFRA_CONTAINER_ID=b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8;K8S_POD_UID=df9f5211-ab02-49a8-82e6-0c2f4b07bc52" Path:"" 2025-12-08T17:58:34.519059033+00:00 stderr F 2025-12-08T17:58:34Z [verbose] Del: service-telemetry:default-interconnect-55bf8d5cb-76n5w:df9f5211-ab02-49a8-82e6-0c2f4b07bc52:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:58:34.702373872+00:00 stderr F 2025-12-08T17:58:34Z [verbose] DEL finished CNI request ContainerID:"b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8" Netns:"/var/run/netns/b0c0fa35-4a72-47c0-9d0f-bafa8960fe43" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-interconnect-55bf8d5cb-76n5w;K8S_POD_INFRA_CONTAINER_ID=b9eccbab184d45ec8b09d562fad481be8c01d5ebbe8dc86e74b7741c2826ecb8;K8S_POD_UID=df9f5211-ab02-49a8-82e6-0c2f4b07bc52" Path:"", result: "", err: 2025-12-08T17:58:35.152081254+00:00 stderr F 2025-12-08T17:58:35Z [verbose] ADD starting CNI request ContainerID:"ad1af3fce6542ee2b9720079ff3e519c97ea182e4f44abb305379e55fa862060" Netns:"/var/run/netns/937f25e6-56e1-4a48-9f71-31ff96d3d882" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-interconnect-55bf8d5cb-rwr2k;K8S_POD_INFRA_CONTAINER_ID=ad1af3fce6542ee2b9720079ff3e519c97ea182e4f44abb305379e55fa862060;K8S_POD_UID=d839602b-f183-45c8-af76-72a0d292aa33" Path:"" 2025-12-08T17:58:35.524953541+00:00 stderr F I1208 17:58:35.514611 36637 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:58:35.524953541+00:00 stderr F I1208 17:58:35.515233 36637 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:58:35.524953541+00:00 stderr F I1208 17:58:35.515242 36637 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:58:35.524953541+00:00 stderr F I1208 17:58:35.515250 36637 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:58:35.524953541+00:00 stderr F I1208 17:58:35.515258 36637 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:58:35.524953541+00:00 stderr F 2025-12-08T17:58:35Z [verbose] Add: service-telemetry:default-interconnect-55bf8d5cb-rwr2k:d839602b-f183-45c8-af76-72a0d292aa33:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ad1af3fce6542ee","mac":"8a:2f:74:5c:cb:86"},{"name":"eth0","mac":"0a:58:0a:d9:00:52","sandbox":"/var/run/netns/937f25e6-56e1-4a48-9f71-31ff96d3d882"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.82/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:58:35.524953541+00:00 stderr F I1208 17:58:35.523208 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"default-interconnect-55bf8d5cb-rwr2k", UID:"d839602b-f183-45c8-af76-72a0d292aa33", APIVersion:"v1", ResourceVersion:"45623", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.82/23] from ovn-kubernetes 2025-12-08T17:58:35.541792306+00:00 stderr F 2025-12-08T17:58:35Z [verbose] ADD finished CNI request ContainerID:"ad1af3fce6542ee2b9720079ff3e519c97ea182e4f44abb305379e55fa862060" Netns:"/var/run/netns/937f25e6-56e1-4a48-9f71-31ff96d3d882" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=default-interconnect-55bf8d5cb-rwr2k;K8S_POD_INFRA_CONTAINER_ID=ad1af3fce6542ee2b9720079ff3e519c97ea182e4f44abb305379e55fa862060;K8S_POD_UID=d839602b-f183-45c8-af76-72a0d292aa33" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"8a:2f:74:5c:cb:86\",\"name\":\"ad1af3fce6542ee\"},{\"mac\":\"0a:58:0a:d9:00:52\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/937f25e6-56e1-4a48-9f71-31ff96d3d882\"}],\"ips\":[{\"address\":\"10.217.0.82/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:59:06.376764051+00:00 stderr F 2025-12-08T17:59:06Z [verbose] ADD starting CNI request ContainerID:"00e4072bf714a82fa7a0613060eb288b2efa56b76d893e0bc34338bdce1c1591" Netns:"/var/run/netns/f894970e-e3fe-44c8-a2f7-f77739ae6715" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=qdr-test;K8S_POD_INFRA_CONTAINER_ID=00e4072bf714a82fa7a0613060eb288b2efa56b76d893e0bc34338bdce1c1591;K8S_POD_UID=73a290f7-fdfb-4484-9e5f-e3f80b72dec3" Path:"" 2025-12-08T17:59:06.813429849+00:00 stderr F I1208 17:59:06.804205 39627 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:59:06.813429849+00:00 stderr F I1208 17:59:06.804708 39627 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:59:06.813429849+00:00 stderr F I1208 17:59:06.804724 39627 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:59:06.813429849+00:00 stderr F I1208 17:59:06.804732 39627 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:59:06.813429849+00:00 stderr F I1208 17:59:06.804740 39627 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:59:06.814030975+00:00 stderr F 2025-12-08T17:59:06Z [verbose] Add: service-telemetry:qdr-test:73a290f7-fdfb-4484-9e5f-e3f80b72dec3:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"00e4072bf714a82","mac":"22:8e:0e:f6:08:d3"},{"name":"eth0","mac":"0a:58:0a:d9:00:53","sandbox":"/var/run/netns/f894970e-e3fe-44c8-a2f7-f77739ae6715"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.83/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:59:06.814393634+00:00 stderr F I1208 17:59:06.814306 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"qdr-test", UID:"73a290f7-fdfb-4484-9e5f-e3f80b72dec3", APIVersion:"v1", ResourceVersion:"45894", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.83/23] from ovn-kubernetes 2025-12-08T17:59:06.830872809+00:00 stderr F 2025-12-08T17:59:06Z [verbose] ADD finished CNI request ContainerID:"00e4072bf714a82fa7a0613060eb288b2efa56b76d893e0bc34338bdce1c1591" Netns:"/var/run/netns/f894970e-e3fe-44c8-a2f7-f77739ae6715" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=qdr-test;K8S_POD_INFRA_CONTAINER_ID=00e4072bf714a82fa7a0613060eb288b2efa56b76d893e0bc34338bdce1c1591;K8S_POD_UID=73a290f7-fdfb-4484-9e5f-e3f80b72dec3" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"22:8e:0e:f6:08:d3\",\"name\":\"00e4072bf714a82\"},{\"mac\":\"0a:58:0a:d9:00:53\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/f894970e-e3fe-44c8-a2f7-f77739ae6715\"}],\"ips\":[{\"address\":\"10.217.0.83/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:59:14.555302661+00:00 stderr F 2025-12-08T17:59:14Z [verbose] ADD starting CNI request ContainerID:"d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354" Netns:"/var/run/netns/ba07f313-bd0b-4684-af8e-757ea1c6bdf0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=stf-smoketest-smoke1-pbhxq;K8S_POD_INFRA_CONTAINER_ID=d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354;K8S_POD_UID=612790c4-c2da-4318-89f8-c7745da26ece" Path:"" 2025-12-08T17:59:14.744212319+00:00 stderr F I1208 17:59:14.736432 40043 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:59:14.744212319+00:00 stderr F I1208 17:59:14.736926 40043 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:59:14.744212319+00:00 stderr F I1208 17:59:14.736951 40043 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:59:14.744212319+00:00 stderr F I1208 17:59:14.736960 40043 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:59:14.744212319+00:00 stderr F I1208 17:59:14.736967 40043 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:59:14.744629450+00:00 stderr F 2025-12-08T17:59:14Z [verbose] Add: service-telemetry:stf-smoketest-smoke1-pbhxq:612790c4-c2da-4318-89f8-c7745da26ece:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"d780467c91a7b84","mac":"da:46:d8:2b:88:f0"},{"name":"eth0","mac":"0a:58:0a:d9:00:54","sandbox":"/var/run/netns/ba07f313-bd0b-4684-af8e-757ea1c6bdf0"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.84/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:59:14.744857286+00:00 stderr F I1208 17:59:14.744797 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"stf-smoketest-smoke1-pbhxq", UID:"612790c4-c2da-4318-89f8-c7745da26ece", APIVersion:"v1", ResourceVersion:"45929", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.84/23] from ovn-kubernetes 2025-12-08T17:59:14.756567766+00:00 stderr F 2025-12-08T17:59:14Z [verbose] ADD finished CNI request ContainerID:"d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354" Netns:"/var/run/netns/ba07f313-bd0b-4684-af8e-757ea1c6bdf0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=stf-smoketest-smoke1-pbhxq;K8S_POD_INFRA_CONTAINER_ID=d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354;K8S_POD_UID=612790c4-c2da-4318-89f8-c7745da26ece" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"da:46:d8:2b:88:f0\",\"name\":\"d780467c91a7b84\"},{\"mac\":\"0a:58:0a:d9:00:54\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ba07f313-bd0b-4684-af8e-757ea1c6bdf0\"}],\"ips\":[{\"address\":\"10.217.0.84/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:59:15.030258388+00:00 stderr F 2025-12-08T17:59:15Z [verbose] ADD starting CNI request ContainerID:"b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2" Netns:"/var/run/netns/659316f0-974f-472b-9ae1-5ebf8effc753" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=curl;K8S_POD_INFRA_CONTAINER_ID=b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2;K8S_POD_UID=f1d063fa-3d6b-49c3-aa66-288dd70351b0" Path:"" 2025-12-08T17:59:15.261685098+00:00 stderr F I1208 17:59:15.253155 40145 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:59:15.261685098+00:00 stderr F I1208 17:59:15.253585 40145 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:59:15.261685098+00:00 stderr F I1208 17:59:15.253603 40145 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:59:15.261685098+00:00 stderr F I1208 17:59:15.253621 40145 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:59:15.261685098+00:00 stderr F I1208 17:59:15.253630 40145 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:59:15.263043773+00:00 stderr F 2025-12-08T17:59:15Z [verbose] Add: service-telemetry:curl:f1d063fa-3d6b-49c3-aa66-288dd70351b0:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b472bcb7f848f75","mac":"96:0f:1f:b9:8c:ea"},{"name":"eth0","mac":"0a:58:0a:d9:00:55","sandbox":"/var/run/netns/659316f0-974f-472b-9ae1-5ebf8effc753"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.85/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:59:15.263043773+00:00 stderr F I1208 17:59:15.262211 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"curl", UID:"f1d063fa-3d6b-49c3-aa66-288dd70351b0", APIVersion:"v1", ResourceVersion:"45935", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.85/23] from ovn-kubernetes 2025-12-08T17:59:15.278964073+00:00 stderr F 2025-12-08T17:59:15Z [verbose] ADD finished CNI request ContainerID:"b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2" Netns:"/var/run/netns/659316f0-974f-472b-9ae1-5ebf8effc753" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=curl;K8S_POD_INFRA_CONTAINER_ID=b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2;K8S_POD_UID=f1d063fa-3d6b-49c3-aa66-288dd70351b0" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"96:0f:1f:b9:8c:ea\",\"name\":\"b472bcb7f848f75\"},{\"mac\":\"0a:58:0a:d9:00:55\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/659316f0-974f-472b-9ae1-5ebf8effc753\"}],\"ips\":[{\"address\":\"10.217.0.85/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:59:18.878835680+00:00 stderr F 2025-12-08T17:59:18Z [verbose] DEL starting CNI request ContainerID:"b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2" Netns:"/var/run/netns/659316f0-974f-472b-9ae1-5ebf8effc753" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=curl;K8S_POD_INFRA_CONTAINER_ID=b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2;K8S_POD_UID=f1d063fa-3d6b-49c3-aa66-288dd70351b0" Path:"" 2025-12-08T17:59:18.879505117+00:00 stderr F 2025-12-08T17:59:18Z [verbose] Del: service-telemetry:curl:f1d063fa-3d6b-49c3-aa66-288dd70351b0:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:59:19.060350514+00:00 stderr F 2025-12-08T17:59:19Z [verbose] DEL finished CNI request ContainerID:"b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2" Netns:"/var/run/netns/659316f0-974f-472b-9ae1-5ebf8effc753" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=curl;K8S_POD_INFRA_CONTAINER_ID=b472bcb7f848f750e3d98d3c87b7fe31a8c888618d7990843018d1b071acd1c2;K8S_POD_UID=f1d063fa-3d6b-49c3-aa66-288dd70351b0" Path:"", result: "", err: 2025-12-08T17:59:52.062730888+00:00 stderr F 2025-12-08T17:59:52Z [verbose] ADD starting CNI request ContainerID:"f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75" Netns:"/var/run/netns/cb27452c-34d2-434a-a11e-f8c49cc308c1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-jlbqc;K8S_POD_INFRA_CONTAINER_ID=f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75;K8S_POD_UID=5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97" Path:"" 2025-12-08T17:59:52.260087137+00:00 stderr F I1208 17:59:52.251072 41854 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:59:52.260087137+00:00 stderr F I1208 17:59:52.251468 41854 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:59:52.260087137+00:00 stderr F I1208 17:59:52.251482 41854 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:59:52.260087137+00:00 stderr F I1208 17:59:52.251488 41854 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:59:52.260087137+00:00 stderr F I1208 17:59:52.251495 41854 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:59:52.260320493+00:00 stderr F 2025-12-08T17:59:52Z [verbose] Add: openshift-marketplace:community-operators-jlbqc:5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"f65e8336c15dfa0","mac":"be:fb:db:26:88:aa"},{"name":"eth0","mac":"0a:58:0a:d9:00:56","sandbox":"/var/run/netns/cb27452c-34d2-434a-a11e-f8c49cc308c1"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.86/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:59:52.260654712+00:00 stderr F I1208 17:59:52.260588 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"community-operators-jlbqc", UID:"5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97", APIVersion:"v1", ResourceVersion:"46055", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.86/23] from ovn-kubernetes 2025-12-08T17:59:52.275916654+00:00 stderr F 2025-12-08T17:59:52Z [verbose] ADD finished CNI request ContainerID:"f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75" Netns:"/var/run/netns/cb27452c-34d2-434a-a11e-f8c49cc308c1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-jlbqc;K8S_POD_INFRA_CONTAINER_ID=f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75;K8S_POD_UID=5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"be:fb:db:26:88:aa\",\"name\":\"f65e8336c15dfa0\"},{\"mac\":\"0a:58:0a:d9:00:56\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/cb27452c-34d2-434a-a11e-f8c49cc308c1\"}],\"ips\":[{\"address\":\"10.217.0.86/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T18:00:00.571202202+00:00 stderr P 2025-12-08T18:00:00Z [verbose] 2025-12-08T18:00:00.571284844+00:00 stderr P ADD starting CNI request ContainerID:"b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d" Netns:"/var/run/netns/8f6870b8-d6f3-46dd-9a0c-c8beca83cc5d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420280-hxvtb;K8S_POD_INFRA_CONTAINER_ID=b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d;K8S_POD_UID=730f299b-bb80-45b1-a8bc-a10ce2e3567b" Path:"" 2025-12-08T18:00:00.571303225+00:00 stderr F 2025-12-08T18:00:00.828487648+00:00 stderr F I1208 18:00:00.822435 42272 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T18:00:00.828487648+00:00 stderr F I1208 18:00:00.823047 42272 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T18:00:00.828487648+00:00 stderr F I1208 18:00:00.823085 42272 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T18:00:00.828487648+00:00 stderr F I1208 18:00:00.823096 42272 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T18:00:00.828487648+00:00 stderr F I1208 18:00:00.823118 42272 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T18:00:00.828944160+00:00 stderr F 2025-12-08T18:00:00Z [verbose] Add: openshift-operator-lifecycle-manager:collect-profiles-29420280-hxvtb:730f299b-bb80-45b1-a8bc-a10ce2e3567b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b5bd753bbb1e3b7","mac":"ce:9d:30:97:07:40"},{"name":"eth0","mac":"0a:58:0a:d9:00:57","sandbox":"/var/run/netns/8f6870b8-d6f3-46dd-9a0c-c8beca83cc5d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.87/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T18:00:00.829124605+00:00 stderr F I1208 18:00:00.829080 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"collect-profiles-29420280-hxvtb", UID:"730f299b-bb80-45b1-a8bc-a10ce2e3567b", APIVersion:"v1", ResourceVersion:"46107", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.87/23] from ovn-kubernetes 2025-12-08T18:00:00.840001271+00:00 stderr F 2025-12-08T18:00:00Z [verbose] ADD finished CNI request ContainerID:"b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d" Netns:"/var/run/netns/8f6870b8-d6f3-46dd-9a0c-c8beca83cc5d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420280-hxvtb;K8S_POD_INFRA_CONTAINER_ID=b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d;K8S_POD_UID=730f299b-bb80-45b1-a8bc-a10ce2e3567b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ce:9d:30:97:07:40\",\"name\":\"b5bd753bbb1e3b7\"},{\"mac\":\"0a:58:0a:d9:00:57\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8f6870b8-d6f3-46dd-9a0c-c8beca83cc5d\"}],\"ips\":[{\"address\":\"10.217.0.87/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T18:00:02.248181802+00:00 stderr F 2025-12-08T18:00:02Z [verbose] DEL starting CNI request ContainerID:"b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d" Netns:"/var/run/netns/8f6870b8-d6f3-46dd-9a0c-c8beca83cc5d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420280-hxvtb;K8S_POD_INFRA_CONTAINER_ID=b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d;K8S_POD_UID=730f299b-bb80-45b1-a8bc-a10ce2e3567b" Path:"" 2025-12-08T18:00:02.248655216+00:00 stderr F 2025-12-08T18:00:02Z [verbose] Del: openshift-operator-lifecycle-manager:collect-profiles-29420280-hxvtb:730f299b-bb80-45b1-a8bc-a10ce2e3567b:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T18:00:02.477076672+00:00 stderr F 2025-12-08T18:00:02Z [verbose] DEL finished CNI request ContainerID:"b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d" Netns:"/var/run/netns/8f6870b8-d6f3-46dd-9a0c-c8beca83cc5d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420280-hxvtb;K8S_POD_INFRA_CONTAINER_ID=b5bd753bbb1e3b7a1a3a45a2e95e5ba50be111ef86f9fb933a18cfd783496e8d;K8S_POD_UID=730f299b-bb80-45b1-a8bc-a10ce2e3567b" Path:"", result: "", err: 2025-12-08T18:00:03.259424096+00:00 stderr F 2025-12-08T18:00:03Z [verbose] DEL starting CNI request ContainerID:"d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354" Netns:"/var/run/netns/ba07f313-bd0b-4684-af8e-757ea1c6bdf0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=stf-smoketest-smoke1-pbhxq;K8S_POD_INFRA_CONTAINER_ID=d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354;K8S_POD_UID=612790c4-c2da-4318-89f8-c7745da26ece" Path:"" 2025-12-08T18:00:03.259783426+00:00 stderr F 2025-12-08T18:00:03Z [verbose] Del: service-telemetry:stf-smoketest-smoke1-pbhxq:612790c4-c2da-4318-89f8-c7745da26ece:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T18:00:03.445510690+00:00 stderr F 2025-12-08T18:00:03Z [verbose] DEL finished CNI request ContainerID:"d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354" Netns:"/var/run/netns/ba07f313-bd0b-4684-af8e-757ea1c6bdf0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=stf-smoketest-smoke1-pbhxq;K8S_POD_INFRA_CONTAINER_ID=d780467c91a7b84e66a1c6b108d5d66e85dec21a5e28ea3e2bcd71f25b565354;K8S_POD_UID=612790c4-c2da-4318-89f8-c7745da26ece" Path:"", result: "", err: 2025-12-08T18:00:04.399346024+00:00 stderr F 2025-12-08T18:00:04Z [verbose] DEL starting CNI request ContainerID:"f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75" Netns:"/var/run/netns/cb27452c-34d2-434a-a11e-f8c49cc308c1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-jlbqc;K8S_POD_INFRA_CONTAINER_ID=f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75;K8S_POD_UID=5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97" Path:"" 2025-12-08T18:00:04.399757365+00:00 stderr F 2025-12-08T18:00:04Z [verbose] Del: openshift-marketplace:community-operators-jlbqc:5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T18:00:04.601084369+00:00 stderr F 2025-12-08T18:00:04Z [verbose] DEL finished CNI request ContainerID:"f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75" Netns:"/var/run/netns/cb27452c-34d2-434a-a11e-f8c49cc308c1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-jlbqc;K8S_POD_INFRA_CONTAINER_ID=f65e8336c15dfa0952bc851e6729d6f5b18e9601bde2f081d6c11bed66bb7f75;K8S_POD_UID=5dcfd2a5-06dd-4fc6-ad8f-8979503b1a97" Path:"", result: "", err: 2025-12-08T18:01:18.646753562+00:00 stderr F 2025-12-08T18:01:18Z [verbose] ADD starting CNI request ContainerID:"92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5" Netns:"/var/run/netns/0da2566e-27c6-4cbf-ad4c-0cf4f6839e4d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-b88kp;K8S_POD_INFRA_CONTAINER_ID=92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5;K8S_POD_UID=b3a22077-e946-43fe-a687-8eb0a8454203" Path:"" 2025-12-08T18:01:18.830481277+00:00 stderr F I1208 18:01:18.823583 44181 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T18:01:18.830481277+00:00 stderr F I1208 18:01:18.824088 44181 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T18:01:18.830481277+00:00 stderr F I1208 18:01:18.824119 44181 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T18:01:18.830481277+00:00 stderr F I1208 18:01:18.824133 44181 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T18:01:18.830481277+00:00 stderr F I1208 18:01:18.824143 44181 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T18:01:18.831034892+00:00 stderr F 2025-12-08T18:01:18Z [verbose] Add: service-telemetry:infrawatch-operators-b88kp:b3a22077-e946-43fe-a687-8eb0a8454203:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"92d99c5a4fa68f7","mac":"56:d6:99:d5:19:09"},{"name":"eth0","mac":"0a:58:0a:d9:00:58","sandbox":"/var/run/netns/0da2566e-27c6-4cbf-ad4c-0cf4f6839e4d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.88/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T18:01:18.831421252+00:00 stderr F I1208 18:01:18.831379 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"service-telemetry", Name:"infrawatch-operators-b88kp", UID:"b3a22077-e946-43fe-a687-8eb0a8454203", APIVersion:"v1", ResourceVersion:"46314", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.88/23] from ovn-kubernetes 2025-12-08T18:01:18.843419481+00:00 stderr F 2025-12-08T18:01:18Z [verbose] ADD finished CNI request ContainerID:"92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5" Netns:"/var/run/netns/0da2566e-27c6-4cbf-ad4c-0cf4f6839e4d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-b88kp;K8S_POD_INFRA_CONTAINER_ID=92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5;K8S_POD_UID=b3a22077-e946-43fe-a687-8eb0a8454203" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"56:d6:99:d5:19:09\",\"name\":\"92d99c5a4fa68f7\"},{\"mac\":\"0a:58:0a:d9:00:58\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/0da2566e-27c6-4cbf-ad4c-0cf4f6839e4d\"}],\"ips\":[{\"address\":\"10.217.0.88/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T18:01:31.201563362+00:00 stderr F 2025-12-08T18:01:31Z [verbose] DEL starting CNI request ContainerID:"92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5" Netns:"/var/run/netns/0da2566e-27c6-4cbf-ad4c-0cf4f6839e4d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-b88kp;K8S_POD_INFRA_CONTAINER_ID=92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5;K8S_POD_UID=b3a22077-e946-43fe-a687-8eb0a8454203" Path:"" 2025-12-08T18:01:31.201754217+00:00 stderr F 2025-12-08T18:01:31Z [verbose] Del: service-telemetry:infrawatch-operators-b88kp:b3a22077-e946-43fe-a687-8eb0a8454203:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T18:01:31.439584393+00:00 stderr F 2025-12-08T18:01:31Z [verbose] DEL finished CNI request ContainerID:"92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5" Netns:"/var/run/netns/0da2566e-27c6-4cbf-ad4c-0cf4f6839e4d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=service-telemetry;K8S_POD_NAME=infrawatch-operators-b88kp;K8S_POD_INFRA_CONTAINER_ID=92d99c5a4fa68f7524856ce7dcfd40196bc172967fc2489cf66b15e6dd7572b5;K8S_POD_UID=b3a22077-e946-43fe-a687-8eb0a8454203" Path:"", result: "", err: 2025-12-08T18:02:40.948270991+00:00 stderr P 2025-12-08T18:02:40Z [verbose] 2025-12-08T18:02:40.948339933+00:00 stderr P ADD starting CNI request ContainerID:"8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5" Netns:"/var/run/netns/d9432ac4-2bdf-469d-baa9-0f454a917e12" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-must-gather-gctth;K8S_POD_NAME=must-gather-5cz8j;K8S_POD_INFRA_CONTAINER_ID=8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5;K8S_POD_UID=736c26bc-8908-4abc-89f5-7f1d201b7e1a" Path:"" 2025-12-08T18:02:40.948372574+00:00 stderr F 2025-12-08T18:02:41.290333359+00:00 stderr F I1208 18:02:41.284332 45993 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T18:02:41.290333359+00:00 stderr F I1208 18:02:41.284781 45993 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T18:02:41.290333359+00:00 stderr F I1208 18:02:41.284794 45993 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T18:02:41.290333359+00:00 stderr F I1208 18:02:41.284803 45993 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T18:02:41.290333359+00:00 stderr F I1208 18:02:41.284824 45993 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T18:02:41.290937234+00:00 stderr P 2025-12-08T18:02:41Z [verbose] 2025-12-08T18:02:41.290966695+00:00 stderr P Add: openshift-must-gather-gctth:must-gather-5cz8j:736c26bc-8908-4abc-89f5-7f1d201b7e1a:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"8c2078259dc7954","mac":"b6:a9:8f:77:e7:18"},{"name":"eth0","mac":"0a:58:0a:d9:00:59","sandbox":"/var/run/netns/d9432ac4-2bdf-469d-baa9-0f454a917e12"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.89/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T18:02:41.290984426+00:00 stderr F 2025-12-08T18:02:41.291352566+00:00 stderr F I1208 18:02:41.291329 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-must-gather-gctth", Name:"must-gather-5cz8j", UID:"736c26bc-8908-4abc-89f5-7f1d201b7e1a", APIVersion:"v1", ResourceVersion:"46557", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.89/23] from ovn-kubernetes 2025-12-08T18:02:41.305385630+00:00 stderr F 2025-12-08T18:02:41Z [verbose] ADD finished CNI request ContainerID:"8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5" Netns:"/var/run/netns/d9432ac4-2bdf-469d-baa9-0f454a917e12" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-must-gather-gctth;K8S_POD_NAME=must-gather-5cz8j;K8S_POD_INFRA_CONTAINER_ID=8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5;K8S_POD_UID=736c26bc-8908-4abc-89f5-7f1d201b7e1a" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"b6:a9:8f:77:e7:18\",\"name\":\"8c2078259dc7954\"},{\"mac\":\"0a:58:0a:d9:00:59\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/d9432ac4-2bdf-469d-baa9-0f454a917e12\"}],\"ips\":[{\"address\":\"10.217.0.89/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T18:04:32.149996123+00:00 stderr F 2025-12-08T18:04:32Z [verbose] ADD starting CNI request ContainerID:"656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a" Netns:"/var/run/netns/25b9c8c1-e70e-4dea-8db3-4d0ab11dbf5e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-p8pz8;K8S_POD_INFRA_CONTAINER_ID=656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a;K8S_POD_UID=a2de420a-ccef-431d-8597-193d09e4fa4f" Path:"" 2025-12-08T18:04:32.511871221+00:00 stderr F I1208 18:04:32.502927 61860 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T18:04:32.511871221+00:00 stderr F I1208 18:04:32.503563 61860 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T18:04:32.511871221+00:00 stderr F I1208 18:04:32.503582 61860 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T18:04:32.511871221+00:00 stderr F I1208 18:04:32.503589 61860 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T18:04:32.511871221+00:00 stderr F I1208 18:04:32.503595 61860 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T18:04:32.512805036+00:00 stderr F 2025-12-08T18:04:32Z [verbose] Add: openshift-marketplace:certified-operators-p8pz8:a2de420a-ccef-431d-8597-193d09e4fa4f:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"656c976a128194b","mac":"7e:2f:51:07:21:d3"},{"name":"eth0","mac":"0a:58:0a:d9:00:5a","sandbox":"/var/run/netns/25b9c8c1-e70e-4dea-8db3-4d0ab11dbf5e"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.90/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T18:04:32.517014747+00:00 stderr F I1208 18:04:32.513118 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"certified-operators-p8pz8", UID:"a2de420a-ccef-431d-8597-193d09e4fa4f", APIVersion:"v1", ResourceVersion:"46826", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.90/23] from ovn-kubernetes 2025-12-08T18:04:32.532502578+00:00 stderr F 2025-12-08T18:04:32Z [verbose] ADD finished CNI request ContainerID:"656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a" Netns:"/var/run/netns/25b9c8c1-e70e-4dea-8db3-4d0ab11dbf5e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-p8pz8;K8S_POD_INFRA_CONTAINER_ID=656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a;K8S_POD_UID=a2de420a-ccef-431d-8597-193d09e4fa4f" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"7e:2f:51:07:21:d3\",\"name\":\"656c976a128194b\"},{\"mac\":\"0a:58:0a:d9:00:5a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/25b9c8c1-e70e-4dea-8db3-4d0ab11dbf5e\"}],\"ips\":[{\"address\":\"10.217.0.90/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T18:04:43.359045741+00:00 stderr P 2025-12-08T18:04:43Z [verbose] 2025-12-08T18:04:43.359137113+00:00 stderr P ADD starting CNI request ContainerID:"b2ae2dd42072206957faae07a63052ff3899d34cdfbee5aa2b6610adac5d0916" Netns:"/var/run/netns/1623a12c-5dbf-4767-93fe-b849484b0b50" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-5gtms;K8S_POD_INFRA_CONTAINER_ID=b2ae2dd42072206957faae07a63052ff3899d34cdfbee5aa2b6610adac5d0916;K8S_POD_UID=a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5" Path:"" 2025-12-08T18:04:43.359164994+00:00 stderr F 2025-12-08T18:04:43.517978535+00:00 stderr F I1208 18:04:43.509279 62204 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T18:04:43.517978535+00:00 stderr F I1208 18:04:43.509692 62204 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T18:04:43.517978535+00:00 stderr F I1208 18:04:43.509705 62204 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T18:04:43.517978535+00:00 stderr F I1208 18:04:43.509711 62204 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T18:04:43.517978535+00:00 stderr F I1208 18:04:43.509717 62204 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T18:04:43.518632423+00:00 stderr P 2025-12-08T18:04:43Z [verbose] 2025-12-08T18:04:43.518662924+00:00 stderr P Add: openshift-marketplace:redhat-operators-5gtms:a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b2ae2dd42072206","mac":"1e:db:91:03:b5:18"},{"name":"eth0","mac":"0a:58:0a:d9:00:5b","sandbox":"/var/run/netns/1623a12c-5dbf-4767-93fe-b849484b0b50"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.91/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T18:04:43.518685444+00:00 stderr F 2025-12-08T18:04:43.519025493+00:00 stderr F I1208 18:04:43.518915 23682 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-operators-5gtms", UID:"a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5", APIVersion:"v1", ResourceVersion:"46871", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.91/23] from ovn-kubernetes 2025-12-08T18:04:43.535451439+00:00 stderr F 2025-12-08T18:04:43Z [verbose] ADD finished CNI request ContainerID:"b2ae2dd42072206957faae07a63052ff3899d34cdfbee5aa2b6610adac5d0916" Netns:"/var/run/netns/1623a12c-5dbf-4767-93fe-b849484b0b50" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-5gtms;K8S_POD_INFRA_CONTAINER_ID=b2ae2dd42072206957faae07a63052ff3899d34cdfbee5aa2b6610adac5d0916;K8S_POD_UID=a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"1e:db:91:03:b5:18\",\"name\":\"b2ae2dd42072206\"},{\"mac\":\"0a:58:0a:d9:00:5b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/1623a12c-5dbf-4767-93fe-b849484b0b50\"}],\"ips\":[{\"address\":\"10.217.0.91/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T18:04:45.739972764+00:00 stderr F 2025-12-08T18:04:45Z [verbose] DEL starting CNI request ContainerID:"656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a" Netns:"/var/run/netns/25b9c8c1-e70e-4dea-8db3-4d0ab11dbf5e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-p8pz8;K8S_POD_INFRA_CONTAINER_ID=656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a;K8S_POD_UID=a2de420a-ccef-431d-8597-193d09e4fa4f" Path:"" 2025-12-08T18:04:45.740647392+00:00 stderr F 2025-12-08T18:04:45Z [verbose] Del: openshift-marketplace:certified-operators-p8pz8:a2de420a-ccef-431d-8597-193d09e4fa4f:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T18:04:45.968874194+00:00 stderr F 2025-12-08T18:04:45Z [verbose] DEL finished CNI request ContainerID:"656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a" Netns:"/var/run/netns/25b9c8c1-e70e-4dea-8db3-4d0ab11dbf5e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-p8pz8;K8S_POD_INFRA_CONTAINER_ID=656c976a128194bbccaf372ab604816f935ff61f3b5270bd6d2cdd60be6c9c8a;K8S_POD_UID=a2de420a-ccef-431d-8597-193d09e4fa4f" Path:"", result: "", err: 2025-12-08T18:04:57.654309574+00:00 stderr F 2025-12-08T18:04:57Z [verbose] DEL starting CNI request ContainerID:"8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5" Netns:"/var/run/netns/d9432ac4-2bdf-469d-baa9-0f454a917e12" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-must-gather-gctth;K8S_POD_NAME=must-gather-5cz8j;K8S_POD_INFRA_CONTAINER_ID=8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5;K8S_POD_UID=736c26bc-8908-4abc-89f5-7f1d201b7e1a" Path:"" 2025-12-08T18:04:57.654609512+00:00 stderr F 2025-12-08T18:04:57Z [error] Multus: GetPod failed: pod not found during Multus GetPod, but continue to delete 2025-12-08T18:04:57.654698774+00:00 stderr F 2025-12-08T18:04:57Z [verbose] Del: openshift-must-gather-gctth:must-gather-5cz8j:unknownUID:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T18:04:57.800045019+00:00 stderr F 2025-12-08T18:04:57Z [verbose] DEL finished CNI request ContainerID:"8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5" Netns:"/var/run/netns/d9432ac4-2bdf-469d-baa9-0f454a917e12" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-must-gather-gctth;K8S_POD_NAME=must-gather-5cz8j;K8S_POD_INFRA_CONTAINER_ID=8c2078259dc7954fb8b48e4ad3a36c55973c83a30a621384b0abb7895ef2c2f5;K8S_POD_UID=736c26bc-8908-4abc-89f5-7f1d201b7e1a" Path:"", result: "", err: ././@LongLink0000644000000000000000000000024300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000072337115115611513033245 0ustar zuulzuul2025-12-08T17:44:02.732790317+00:00 stdout F 2025-12-08T17:44:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4ed7b6bb-26b9-4250-a2d3-36f020bbb52b 2025-12-08T17:44:02.741535565+00:00 stdout F 2025-12-08T17:44:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4ed7b6bb-26b9-4250-a2d3-36f020bbb52b to /host/opt/cni/bin/ 2025-12-08T17:44:02.857566721+00:00 stderr F 2025-12-08T17:44:02Z [verbose] multus-daemon started 2025-12-08T17:44:02.857566721+00:00 stderr F 2025-12-08T17:44:02Z [verbose] Readiness Indicator file check 2025-12-08T17:44:13.858407760+00:00 stderr P 2025-12-08T17:44:13Z [verbose] 2025-12-08T17:44:13.858523443+00:00 stderr P Readiness Indicator file check done! 2025-12-08T17:44:13.858586795+00:00 stderr F 2025-12-08T17:44:14.444561059+00:00 stderr F I1208 17:44:14.444348 6625 certificate_store.go:130] Loading cert/key pair from "/etc/cni/multus/certs/multus-client-current.pem". 2025-12-08T17:44:14.446074090+00:00 stderr F 2025-12-08T17:44:14Z [verbose] Waiting for certificate 2025-12-08T17:44:14.461099641+00:00 stderr F 2025-12-08T17:44:14Z [error] failed to list pods with new certs: pods is forbidden: User "system:node:crc" cannot list resource "pods" in API group "" at the cluster scope: can only list/watch pods with spec.nodeName field selector 2025-12-08T17:44:15.446397566+00:00 stderr F I1208 17:44:15.446274 6625 certificate_store.go:130] Loading cert/key pair from "/etc/cni/multus/certs/multus-client-current.pem". 2025-12-08T17:44:15.446817867+00:00 stderr F 2025-12-08T17:44:15Z [verbose] Certificate found! 2025-12-08T17:44:15.447970959+00:00 stderr F 2025-12-08T17:44:15Z [verbose] server configured with chroot: /hostroot 2025-12-08T17:44:15.447970959+00:00 stderr F 2025-12-08T17:44:15Z [verbose] Filtering pod watch for node "crc" 2025-12-08T17:44:15.550952648+00:00 stderr F 2025-12-08T17:44:15Z [verbose] API readiness check 2025-12-08T17:44:15.553069125+00:00 stderr F 2025-12-08T17:44:15Z [verbose] API readiness check done! 2025-12-08T17:44:15.553466887+00:00 stderr F 2025-12-08T17:44:15Z [verbose] Generated MultusCNI config: {"binDir":"/var/lib/cni/bin","cniVersion":"0.3.1","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","namespaceIsolation":true,"globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","type":"multus-shim","auxiliaryCNIChainName":"vendor-cni-chain","daemonSocketDir":"/run/multus/socket"} 2025-12-08T17:44:15.553645272+00:00 stderr F 2025-12-08T17:44:15Z [verbose] started to watch file /host/run/multus/cni/net.d/10-ovn-kubernetes.conf 2025-12-08T17:44:17.927321108+00:00 stderr F 2025-12-08T17:44:17Z [verbose] ADD starting CNI request ContainerID:"d2ebdfc8441e7878e8a40568330da7cc9a409e78be428ef0238fe30db4f65e25" Netns:"/var/run/netns/69514686-3096-48bd-8df4-9ad4f238820b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-apiserver-operator;K8S_POD_NAME=openshift-apiserver-operator-846cbfc458-q6lj7;K8S_POD_INFRA_CONTAINER_ID=d2ebdfc8441e7878e8a40568330da7cc9a409e78be428ef0238fe30db4f65e25;K8S_POD_UID=837f85a8-fff5-46a0-b1d5-2d51271f415a" Path:"" 2025-12-08T17:44:17.990103100+00:00 stderr F 2025-12-08T17:44:17Z [verbose] ADD starting CNI request ContainerID:"68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad" Netns:"/var/run/netns/01cf7c94-aee0-4bae-afb8-ad66147400a0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-776cdc94d6-qkg2q;K8S_POD_INFRA_CONTAINER_ID=68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad;K8S_POD_UID=32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6" Path:"" 2025-12-08T17:44:17.990657306+00:00 stderr F 2025-12-08T17:44:17Z [verbose] ADD starting CNI request ContainerID:"816097fb59ca861735960fa8c873f9eb211a033a3d03ed41d1527d9856c3c611" Netns:"/var/run/netns/63a1d337-c8f9-4aeb-a26e-4e3fd2a3d990" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-oauth-apiserver;K8S_POD_NAME=apiserver-8596bd845d-rdv9c;K8S_POD_INFRA_CONTAINER_ID=816097fb59ca861735960fa8c873f9eb211a033a3d03ed41d1527d9856c3c611;K8S_POD_UID=3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c" Path:"" 2025-12-08T17:44:18.008722248+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.008791600+00:00 stderr P ADD starting CNI request ContainerID:"6a4e4c5b074f1d16ab88e055d0f9e1cfa752bd041cd8492fbc0bc4919735264a" Netns:"/var/run/netns/bb397925-a414-45ca-8fc2-6f1a2cade641" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-api;K8S_POD_NAME=machine-api-operator-755bb95488-5httz;K8S_POD_INFRA_CONTAINER_ID=6a4e4c5b074f1d16ab88e055d0f9e1cfa752bd041cd8492fbc0bc4919735264a;K8S_POD_UID=1a749ad3-837c-4804-b23c-2abb017b5b82" Path:"" 2025-12-08T17:44:18.008817111+00:00 stderr F 2025-12-08T17:44:18.034570723+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"c4b331e3708f747d5c861febcad16cb5896d1f3f0a948c754be2e18821ce0619" Netns:"/var/run/netns/5dfb0ffd-4675-4448-838f-e257cc65f8f1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver-operator;K8S_POD_NAME=kube-apiserver-operator-575994946d-bhk9x;K8S_POD_INFRA_CONTAINER_ID=c4b331e3708f747d5c861febcad16cb5896d1f3f0a948c754be2e18821ce0619;K8S_POD_UID=28b33fd8-46b7-46e9-bef9-ec6b3f035300" Path:"" 2025-12-08T17:44:18.095085124+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"c9e7e4c5beb239d44fda84d69d0bf1de0551d5bbed4fbadbcd54641355317b4f" Netns:"/var/run/netns/d85de139-6d17-4389-b44f-28321ec2c3f1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-multus;K8S_POD_NAME=multus-admission-controller-69db94689b-v9sxk;K8S_POD_INFRA_CONTAINER_ID=c9e7e4c5beb239d44fda84d69d0bf1de0551d5bbed4fbadbcd54641355317b4f;K8S_POD_UID=f5c1e280-e9c9-4a30-bb13-023852fd940b" Path:"" 2025-12-08T17:44:18.095085124+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"134079b4d4f851ef7758ad94f6c8e53b5aac3957f7ae79005f6514385384f7ed" Netns:"/var/run/netns/b15278b0-878a-4b42-8178-b0943312e365" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-config-operator;K8S_POD_NAME=machine-config-operator-67c9d58cbb-4g75z;K8S_POD_INFRA_CONTAINER_ID=134079b4d4f851ef7758ad94f6c8e53b5aac3957f7ae79005f6514385384f7ed;K8S_POD_UID=2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f" Path:"" 2025-12-08T17:44:18.215081428+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"040d0a9b0bab7159bf6dba4662d179754d736d11236a3e461026f5afe21acae1" Netns:"/var/run/netns/2cbd5905-4d5e-49d3-80ad-5e157cfe74b4" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=package-server-manager-77f986bd66-d8qsj;K8S_POD_INFRA_CONTAINER_ID=040d0a9b0bab7159bf6dba4662d179754d736d11236a3e461026f5afe21acae1;K8S_POD_UID=9148080a-77e2-4847-840a-d67f837c8fbe" Path:"" 2025-12-08T17:44:18.238559108+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d" Netns:"/var/run/netns/3e8f96cf-eab6-4897-874c-72d797702360" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-65b6cccf98-6wjgz;K8S_POD_INFRA_CONTAINER_ID=a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d;K8S_POD_UID=8dcd2702-e20f-439b-b2c7-27095126b87e" Path:"" 2025-12-08T17:44:18.252637382+00:00 stderr F I1208 17:44:18.238599 7754 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.252637382+00:00 stderr F I1208 17:44:18.239142 7754 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.252637382+00:00 stderr F I1208 17:44:18.239155 7754 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.252637382+00:00 stderr F I1208 17:44:18.239161 7754 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.252637382+00:00 stderr F I1208 17:44:18.239167 7754 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.255128800+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-oauth-apiserver:apiserver-8596bd845d-rdv9c:3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"816097fb59ca861","mac":"2e:49:ca:df:22:d2"},{"name":"eth0","mac":"0a:58:0a:d9:00:08","sandbox":"/var/run/netns/63a1d337-c8f9-4aeb-a26e-4e3fd2a3d990"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.8/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.255786618+00:00 stderr F I1208 17:44:18.255737 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-oauth-apiserver", Name:"apiserver-8596bd845d-rdv9c", UID:"3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c", APIVersion:"v1", ResourceVersion:"36703", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.8/23] from ovn-kubernetes 2025-12-08T17:44:18.259937631+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015" Netns:"/var/run/netns/eebcfb75-07f1-4b55-bfff-9f893537c6ab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420250-qhrfp;K8S_POD_INFRA_CONTAINER_ID=e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015;K8S_POD_UID=742843af-c521-4d4a-beea-e6feae8140e1" Path:"" 2025-12-08T17:44:18.274658653+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"816097fb59ca861735960fa8c873f9eb211a033a3d03ed41d1527d9856c3c611" Netns:"/var/run/netns/63a1d337-c8f9-4aeb-a26e-4e3fd2a3d990" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-oauth-apiserver;K8S_POD_NAME=apiserver-8596bd845d-rdv9c;K8S_POD_INFRA_CONTAINER_ID=816097fb59ca861735960fa8c873f9eb211a033a3d03ed41d1527d9856c3c611;K8S_POD_UID=3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"2e:49:ca:df:22:d2\",\"name\":\"816097fb59ca861\"},{\"mac\":\"0a:58:0a:d9:00:08\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/63a1d337-c8f9-4aeb-a26e-4e3fd2a3d990\"}],\"ips\":[{\"address\":\"10.217.0.8/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.344497168+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.344533319+00:00 stderr P ADD starting CNI request ContainerID:"3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e" Netns:"/var/run/netns/dda686e6-d5b9-4505-8d5e-2bbd0eb93a28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-547dbd544d-85wdh;K8S_POD_INFRA_CONTAINER_ID=3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e;K8S_POD_UID=9af82654-06bc-4376-bff5-d6adacce9785" Path:"" 2025-12-08T17:44:18.344551649+00:00 stderr F 2025-12-08T17:44:18.360994768+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"f627e015c95ea1ee0513c36a0287fcb4c9d5de035dc2e3452f489e480b701515" Netns:"/var/run/netns/f9a5274f-e035-49ae-8555-2f9ae2f6139b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-storage-version-migrator;K8S_POD_NAME=migrator-866fcbc849-5pp5q;K8S_POD_INFRA_CONTAINER_ID=f627e015c95ea1ee0513c36a0287fcb4c9d5de035dc2e3452f489e480b701515;K8S_POD_UID=82728066-0204-4d71-acff-8779194a3e3c" Path:"" 2025-12-08T17:44:18.380853879+00:00 stderr F I1208 17:44:18.363527 7880 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.380853879+00:00 stderr F I1208 17:44:18.363729 7880 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.380853879+00:00 stderr F I1208 17:44:18.363743 7880 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.380853879+00:00 stderr F I1208 17:44:18.363749 7880 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.380853879+00:00 stderr F I1208 17:44:18.363756 7880 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.381588439+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-multus:multus-admission-controller-69db94689b-v9sxk:f5c1e280-e9c9-4a30-bb13-023852fd940b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"c9e7e4c5beb239d","mac":"ce:59:c3:ad:ee:fc"},{"name":"eth0","mac":"0a:58:0a:d9:00:1c","sandbox":"/var/run/netns/d85de139-6d17-4389-b44f-28321ec2c3f1"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.28/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.382011900+00:00 stderr F I1208 17:44:18.381972 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-multus", Name:"multus-admission-controller-69db94689b-v9sxk", UID:"f5c1e280-e9c9-4a30-bb13-023852fd940b", APIVersion:"v1", ResourceVersion:"36757", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.28/23] from ovn-kubernetes 2025-12-08T17:44:18.395751005+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.395801387+00:00 stderr P ADD finished CNI request ContainerID:"c9e7e4c5beb239d44fda84d69d0bf1de0551d5bbed4fbadbcd54641355317b4f" Netns:"/var/run/netns/d85de139-6d17-4389-b44f-28321ec2c3f1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-multus;K8S_POD_NAME=multus-admission-controller-69db94689b-v9sxk;K8S_POD_INFRA_CONTAINER_ID=c9e7e4c5beb239d44fda84d69d0bf1de0551d5bbed4fbadbcd54641355317b4f;K8S_POD_UID=f5c1e280-e9c9-4a30-bb13-023852fd940b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ce:59:c3:ad:ee:fc\",\"name\":\"c9e7e4c5beb239d\"},{\"mac\":\"0a:58:0a:d9:00:1c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/d85de139-6d17-4389-b44f-28321ec2c3f1\"}],\"ips\":[{\"address\":\"10.217.0.28/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.395819747+00:00 stderr F 2025-12-08T17:44:18.398987923+00:00 stderr F I1208 17:44:18.391779 7995 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.398987923+00:00 stderr F I1208 17:44:18.391933 7995 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.398987923+00:00 stderr F I1208 17:44:18.391943 7995 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.398987923+00:00 stderr F I1208 17:44:18.391949 7995 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.398987923+00:00 stderr F I1208 17:44:18.391955 7995 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.399320952+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-controller-manager:controller-manager-65b6cccf98-6wjgz:8dcd2702-e20f-439b-b2c7-27095126b87e:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"a52870906a18720","mac":"f6:d7:dc:41:ac:66"},{"name":"eth0","mac":"0a:58:0a:d9:00:0b","sandbox":"/var/run/netns/3e8f96cf-eab6-4897-874c-72d797702360"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.11/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.399722064+00:00 stderr F I1208 17:44:18.399690 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-controller-manager", Name:"controller-manager-65b6cccf98-6wjgz", UID:"8dcd2702-e20f-439b-b2c7-27095126b87e", APIVersion:"v1", ResourceVersion:"36770", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.11/23] from ovn-kubernetes 2025-12-08T17:44:18.414305202+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d" Netns:"/var/run/netns/3e8f96cf-eab6-4897-874c-72d797702360" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-65b6cccf98-6wjgz;K8S_POD_INFRA_CONTAINER_ID=a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d;K8S_POD_UID=8dcd2702-e20f-439b-b2c7-27095126b87e" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"f6:d7:dc:41:ac:66\",\"name\":\"a52870906a18720\"},{\"mac\":\"0a:58:0a:d9:00:0b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/3e8f96cf-eab6-4897-874c-72d797702360\"}],\"ips\":[{\"address\":\"10.217.0.11/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.422352251+00:00 stderr F I1208 17:44:18.414985 7972 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.422352251+00:00 stderr F I1208 17:44:18.415089 7972 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.422352251+00:00 stderr F I1208 17:44:18.415101 7972 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.422352251+00:00 stderr F I1208 17:44:18.415108 7972 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.422352251+00:00 stderr F I1208 17:44:18.415114 7972 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.422601228+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-operator-lifecycle-manager:package-server-manager-77f986bd66-d8qsj:9148080a-77e2-4847-840a-d67f837c8fbe:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"040d0a9b0bab715","mac":"5a:bf:75:4a:92:4e"},{"name":"eth0","mac":"0a:58:0a:d9:00:13","sandbox":"/var/run/netns/2cbd5905-4d5e-49d3-80ad-5e157cfe74b4"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.19/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.423083721+00:00 stderr F I1208 17:44:18.423053 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"package-server-manager-77f986bd66-d8qsj", UID:"9148080a-77e2-4847-840a-d67f837c8fbe", APIVersion:"v1", ResourceVersion:"36730", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.19/23] from ovn-kubernetes 2025-12-08T17:44:18.425170938+00:00 stderr F I1208 17:44:18.419345 8019 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.425170938+00:00 stderr F I1208 17:44:18.419519 8019 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.425170938+00:00 stderr F I1208 17:44:18.419551 8019 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.425170938+00:00 stderr F I1208 17:44:18.419573 8019 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.425170938+00:00 stderr F I1208 17:44:18.419593 8019 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.425170938+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-operator-lifecycle-manager:collect-profiles-29420250-qhrfp:742843af-c521-4d4a-beea-e6feae8140e1:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"e7e4e5294ae9ba6","mac":"4a:0d:3c:48:9d:d6"},{"name":"eth0","mac":"0a:58:0a:d9:00:0a","sandbox":"/var/run/netns/eebcfb75-07f1-4b55-bfff-9f893537c6ab"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.10/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.425472787+00:00 stderr F I1208 17:44:18.425408 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"collect-profiles-29420250-qhrfp", UID:"742843af-c521-4d4a-beea-e6feae8140e1", APIVersion:"v1", ResourceVersion:"36716", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.10/23] from ovn-kubernetes 2025-12-08T17:44:18.434418960+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"040d0a9b0bab7159bf6dba4662d179754d736d11236a3e461026f5afe21acae1" Netns:"/var/run/netns/2cbd5905-4d5e-49d3-80ad-5e157cfe74b4" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=package-server-manager-77f986bd66-d8qsj;K8S_POD_INFRA_CONTAINER_ID=040d0a9b0bab7159bf6dba4662d179754d736d11236a3e461026f5afe21acae1;K8S_POD_UID=9148080a-77e2-4847-840a-d67f837c8fbe" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"5a:bf:75:4a:92:4e\",\"name\":\"040d0a9b0bab715\"},{\"mac\":\"0a:58:0a:d9:00:13\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/2cbd5905-4d5e-49d3-80ad-5e157cfe74b4\"}],\"ips\":[{\"address\":\"10.217.0.19/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.437564096+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.437603517+00:00 stderr P ADD finished CNI request ContainerID:"e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015" Netns:"/var/run/netns/eebcfb75-07f1-4b55-bfff-9f893537c6ab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420250-qhrfp;K8S_POD_INFRA_CONTAINER_ID=e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015;K8S_POD_UID=742843af-c521-4d4a-beea-e6feae8140e1" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"4a:0d:3c:48:9d:d6\",\"name\":\"e7e4e5294ae9ba6\"},{\"mac\":\"0a:58:0a:d9:00:0a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/eebcfb75-07f1-4b55-bfff-9f893537c6ab\"}],\"ips\":[{\"address\":\"10.217.0.10/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.437621838+00:00 stderr F 2025-12-08T17:44:18.463706099+00:00 stderr F I1208 17:44:18.454636 7756 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.463706099+00:00 stderr F I1208 17:44:18.454743 7756 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.463706099+00:00 stderr F I1208 17:44:18.454756 7756 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.463706099+00:00 stderr F I1208 17:44:18.454763 7756 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.463706099+00:00 stderr F I1208 17:44:18.454769 7756 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.464152442+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-route-controller-manager:route-controller-manager-776cdc94d6-qkg2q:32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"68c350dfaec5080","mac":"fa:92:d5:24:52:79"},{"name":"eth0","mac":"0a:58:0a:d9:00:07","sandbox":"/var/run/netns/01cf7c94-aee0-4bae-afb8-ad66147400a0"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.7/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.464340657+00:00 stderr F I1208 17:44:18.464295 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-route-controller-manager", Name:"route-controller-manager-776cdc94d6-qkg2q", UID:"32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6", APIVersion:"v1", ResourceVersion:"36702", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.7/23] from ovn-kubernetes 2025-12-08T17:44:18.464800869+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"7e94bf2ba52806324d32672c1909b65af02786b0323b6863e98b0796a6cc858a" Netns:"/var/run/netns/e4e9421e-b1d6-4694-91b2-68848c1d31d7" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-api;K8S_POD_NAME=control-plane-machine-set-operator-75ffdb6fcd-dhfht;K8S_POD_INFRA_CONTAINER_ID=7e94bf2ba52806324d32672c1909b65af02786b0323b6863e98b0796a6cc858a;K8S_POD_UID=0b3a0959-d09e-4fd8-b931-d85bb42a3896" Path:"" 2025-12-08T17:44:18.468898890+00:00 stderr F I1208 17:44:18.461428 7716 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.468898890+00:00 stderr F I1208 17:44:18.461526 7716 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.468898890+00:00 stderr F I1208 17:44:18.461533 7716 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.468898890+00:00 stderr F I1208 17:44:18.461538 7716 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.468898890+00:00 stderr F I1208 17:44:18.461544 7716 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.468898890+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-apiserver-operator:openshift-apiserver-operator-846cbfc458-q6lj7:837f85a8-fff5-46a0-b1d5-2d51271f415a:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"d2ebdfc8441e787","mac":"6a:cf:5f:04:e3:f0"},{"name":"eth0","mac":"0a:58:0a:d9:00:06","sandbox":"/var/run/netns/69514686-3096-48bd-8df4-9ad4f238820b"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.6/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.468898890+00:00 stderr F I1208 17:44:18.467408 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator-846cbfc458-q6lj7", UID:"837f85a8-fff5-46a0-b1d5-2d51271f415a", APIVersion:"v1", ResourceVersion:"36701", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.6/23] from ovn-kubernetes 2025-12-08T17:44:18.469269200+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.469292981+00:00 stderr P ADD starting CNI request ContainerID:"2b5aafb4cf2fe32e9e7cc2fbf243cade2e2c4a4f237dea8c6de46a683a65283d" Netns:"/var/run/netns/cfbb7aa5-a4a5-473b-88af-37644d18764b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=catalog-operator-75ff9f647d-bl822;K8S_POD_INFRA_CONTAINER_ID=2b5aafb4cf2fe32e9e7cc2fbf243cade2e2c4a4f237dea8c6de46a683a65283d;K8S_POD_UID=9a815eca-9800-4b68-adc1-5953173f4427" Path:"" 2025-12-08T17:44:18.469310532+00:00 stderr F 2025-12-08T17:44:18.486465729+00:00 stderr F I1208 17:44:18.468660 7780 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.486465729+00:00 stderr F I1208 17:44:18.468770 7780 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.486465729+00:00 stderr F I1208 17:44:18.468777 7780 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.486465729+00:00 stderr F I1208 17:44:18.468783 7780 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.486465729+00:00 stderr F I1208 17:44:18.468789 7780 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.487095637+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-machine-api:machine-api-operator-755bb95488-5httz:1a749ad3-837c-4804-b23c-2abb017b5b82:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"6a4e4c5b074f1d1","mac":"ce:74:75:49:4c:d1"},{"name":"eth0","mac":"0a:58:0a:d9:00:09","sandbox":"/var/run/netns/bb397925-a414-45ca-8fc2-6f1a2cade641"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.9/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.487415666+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.487442627+00:00 stderr P ADD finished CNI request ContainerID:"68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad" Netns:"/var/run/netns/01cf7c94-aee0-4bae-afb8-ad66147400a0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-776cdc94d6-qkg2q;K8S_POD_INFRA_CONTAINER_ID=68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad;K8S_POD_UID=32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"fa:92:d5:24:52:79\",\"name\":\"68c350dfaec5080\"},{\"mac\":\"0a:58:0a:d9:00:07\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/01cf7c94-aee0-4bae-afb8-ad66147400a0\"}],\"ips\":[{\"address\":\"10.217.0.7/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.487477338+00:00 stderr F 2025-12-08T17:44:18.487736405+00:00 stderr P 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"d2ebdfc8441e7878e8a40568330da7cc9a409e78be428ef0238fe30db4f65e25" Netns:"/var/run/netns/69514686-3096-48bd-8df4-9ad4f238820b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-apiserver-operator;K8S_POD_NAME=openshift-apiserver-operator-846cbfc458-q6lj7;K8S_POD_INFRA_CONTAINER_ID=d2ebdfc8441e7878e8a40568330da7cc9a409e78be428ef0238fe30db4f65e25;K8S_POD_UID=837f85a8-fff5-46a0-b1d5-2d51271f415a" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"6a:cf:5f:04:e3:f0\",\"name\":\"d2ebdfc8441e787\"},{\"mac\":\"0a:58:0a:d9:00:06\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/69514686-3096-48bd-8df4-9ad4f238820b\"}],\"ips\":[{\"address\":\"10.217.0.6/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.487748625+00:00 stderr F 2025-12-08T17:44:18.489044050+00:00 stderr F I1208 17:44:18.488755 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-machine-api", Name:"machine-api-operator-755bb95488-5httz", UID:"1a749ad3-837c-4804-b23c-2abb017b5b82", APIVersion:"v1", ResourceVersion:"36704", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.9/23] from ovn-kubernetes 2025-12-08T17:44:18.489326308+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.489353749+00:00 stderr P ADD starting CNI request ContainerID:"6897b984d61654e6d3a250f3c2bbf767dffe9819bb35e5b7cbcf8f25cc9d44bf" Netns:"/var/run/netns/a109e27a-f20c-467e-a22a-f980e67c3363" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=packageserver-7d4fc7d867-4kjg6;K8S_POD_INFRA_CONTAINER_ID=6897b984d61654e6d3a250f3c2bbf767dffe9819bb35e5b7cbcf8f25cc9d44bf;K8S_POD_UID=085a3a20-9b8f-4448-a4cb-89465f57027c" Path:"" 2025-12-08T17:44:18.489371530+00:00 stderr F 2025-12-08T17:44:18.502745644+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"6a4e4c5b074f1d16ab88e055d0f9e1cfa752bd041cd8492fbc0bc4919735264a" Netns:"/var/run/netns/bb397925-a414-45ca-8fc2-6f1a2cade641" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-api;K8S_POD_NAME=machine-api-operator-755bb95488-5httz;K8S_POD_INFRA_CONTAINER_ID=6a4e4c5b074f1d16ab88e055d0f9e1cfa752bd041cd8492fbc0bc4919735264a;K8S_POD_UID=1a749ad3-837c-4804-b23c-2abb017b5b82" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ce:74:75:49:4c:d1\",\"name\":\"6a4e4c5b074f1d1\"},{\"mac\":\"0a:58:0a:d9:00:09\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/bb397925-a414-45ca-8fc2-6f1a2cade641\"}],\"ips\":[{\"address\":\"10.217.0.9/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.525130734+00:00 stderr F I1208 17:44:18.509215 8081 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.525130734+00:00 stderr F I1208 17:44:18.509317 8081 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.525130734+00:00 stderr F I1208 17:44:18.509324 8081 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.525130734+00:00 stderr F I1208 17:44:18.509330 8081 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.525130734+00:00 stderr F I1208 17:44:18.509336 8081 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.525556656+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-marketplace:marketplace-operator-547dbd544d-85wdh:9af82654-06bc-4376-bff5-d6adacce9785:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"3635ccac4190e9a","mac":"d6:cf:87:fe:34:58"},{"name":"eth0","mac":"0a:58:0a:d9:00:17","sandbox":"/var/run/netns/dda686e6-d5b9-4505-8d5e-2bbd0eb93a28"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.23/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.525680709+00:00 stderr F I1208 17:44:18.525630 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"marketplace-operator-547dbd544d-85wdh", UID:"9af82654-06bc-4376-bff5-d6adacce9785", APIVersion:"v1", ResourceVersion:"36738", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.23/23] from ovn-kubernetes 2025-12-08T17:44:18.540595267+00:00 stderr F I1208 17:44:18.523701 7830 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.540595267+00:00 stderr F I1208 17:44:18.523835 7830 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.540595267+00:00 stderr F I1208 17:44:18.523845 7830 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.540595267+00:00 stderr F I1208 17:44:18.523853 7830 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.540595267+00:00 stderr F I1208 17:44:18.523860 7830 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.540595267+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-kube-apiserver-operator:kube-apiserver-operator-575994946d-bhk9x:28b33fd8-46b7-46e9-bef9-ec6b3f035300:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"c4b331e3708f747","mac":"0e:32:9a:36:b1:b5"},{"name":"eth0","mac":"0a:58:0a:d9:00:16","sandbox":"/var/run/netns/5dfb0ffd-4675-4448-838f-e257cc65f8f1"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.22/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.540711260+00:00 stderr F I1208 17:44:18.540679 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator-575994946d-bhk9x", UID:"28b33fd8-46b7-46e9-bef9-ec6b3f035300", APIVersion:"v1", ResourceVersion:"36732", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.22/23] from ovn-kubernetes 2025-12-08T17:44:18.542075406+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e" Netns:"/var/run/netns/dda686e6-d5b9-4505-8d5e-2bbd0eb93a28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-547dbd544d-85wdh;K8S_POD_INFRA_CONTAINER_ID=3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e;K8S_POD_UID=9af82654-06bc-4376-bff5-d6adacce9785" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d6:cf:87:fe:34:58\",\"name\":\"3635ccac4190e9a\"},{\"mac\":\"0a:58:0a:d9:00:17\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/dda686e6-d5b9-4505-8d5e-2bbd0eb93a28\"}],\"ips\":[{\"address\":\"10.217.0.23/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.553616411+00:00 stderr F I1208 17:44:18.542601 7881 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.553616411+00:00 stderr F I1208 17:44:18.542726 7881 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.553616411+00:00 stderr F I1208 17:44:18.542740 7881 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.553616411+00:00 stderr F I1208 17:44:18.542747 7881 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.553616411+00:00 stderr F I1208 17:44:18.542753 7881 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.554308411+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-machine-config-operator:machine-config-operator-67c9d58cbb-4g75z:2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"134079b4d4f851e","mac":"b2:7c:e6:bd:07:18"},{"name":"eth0","mac":"0a:58:0a:d9:00:0d","sandbox":"/var/run/netns/b15278b0-878a-4b42-8178-b0943312e365"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.13/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.554363932+00:00 stderr F I1208 17:44:18.554318 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-machine-config-operator", Name:"machine-config-operator-67c9d58cbb-4g75z", UID:"2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f", APIVersion:"v1", ResourceVersion:"36727", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.13/23] from ovn-kubernetes 2025-12-08T17:44:18.561861246+00:00 stderr F I1208 17:44:18.556913 8096 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.561861246+00:00 stderr F I1208 17:44:18.557023 8096 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.561861246+00:00 stderr F I1208 17:44:18.557030 8096 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.561861246+00:00 stderr F I1208 17:44:18.557035 8096 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.561861246+00:00 stderr F I1208 17:44:18.557041 8096 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.562163025+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-kube-storage-version-migrator:migrator-866fcbc849-5pp5q:82728066-0204-4d71-acff-8779194a3e3c:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"f627e015c95ea1e","mac":"da:ef:10:ce:be:0c"},{"name":"eth0","mac":"0a:58:0a:d9:00:15","sandbox":"/var/run/netns/f9a5274f-e035-49ae-8555-2f9ae2f6139b"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.21/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.562401422+00:00 stderr F I1208 17:44:18.562306 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-storage-version-migrator", Name:"migrator-866fcbc849-5pp5q", UID:"82728066-0204-4d71-acff-8779194a3e3c", APIVersion:"v1", ResourceVersion:"36733", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.21/23] from ovn-kubernetes 2025-12-08T17:44:18.566721219+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"c4b331e3708f747d5c861febcad16cb5896d1f3f0a948c754be2e18821ce0619" Netns:"/var/run/netns/5dfb0ffd-4675-4448-838f-e257cc65f8f1" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver-operator;K8S_POD_NAME=kube-apiserver-operator-575994946d-bhk9x;K8S_POD_INFRA_CONTAINER_ID=c4b331e3708f747d5c861febcad16cb5896d1f3f0a948c754be2e18821ce0619;K8S_POD_UID=28b33fd8-46b7-46e9-bef9-ec6b3f035300" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"0e:32:9a:36:b1:b5\",\"name\":\"c4b331e3708f747\"},{\"mac\":\"0a:58:0a:d9:00:16\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5dfb0ffd-4675-4448-838f-e257cc65f8f1\"}],\"ips\":[{\"address\":\"10.217.0.22/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.577337299+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"134079b4d4f851ef7758ad94f6c8e53b5aac3957f7ae79005f6514385384f7ed" Netns:"/var/run/netns/b15278b0-878a-4b42-8178-b0943312e365" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-config-operator;K8S_POD_NAME=machine-config-operator-67c9d58cbb-4g75z;K8S_POD_INFRA_CONTAINER_ID=134079b4d4f851ef7758ad94f6c8e53b5aac3957f7ae79005f6514385384f7ed;K8S_POD_UID=2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"b2:7c:e6:bd:07:18\",\"name\":\"134079b4d4f851e\"},{\"mac\":\"0a:58:0a:d9:00:0d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/b15278b0-878a-4b42-8178-b0943312e365\"}],\"ips\":[{\"address\":\"10.217.0.13/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.577700679+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"f627e015c95ea1ee0513c36a0287fcb4c9d5de035dc2e3452f489e480b701515" Netns:"/var/run/netns/f9a5274f-e035-49ae-8555-2f9ae2f6139b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-storage-version-migrator;K8S_POD_NAME=migrator-866fcbc849-5pp5q;K8S_POD_INFRA_CONTAINER_ID=f627e015c95ea1ee0513c36a0287fcb4c9d5de035dc2e3452f489e480b701515;K8S_POD_UID=82728066-0204-4d71-acff-8779194a3e3c" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"da:ef:10:ce:be:0c\",\"name\":\"f627e015c95ea1e\"},{\"mac\":\"0a:58:0a:d9:00:15\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/f9a5274f-e035-49ae-8555-2f9ae2f6139b\"}],\"ips\":[{\"address\":\"10.217.0.21/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.645125128+00:00 stderr F I1208 17:44:18.636173 8163 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.645125128+00:00 stderr F I1208 17:44:18.636280 8163 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.645125128+00:00 stderr F I1208 17:44:18.636287 8163 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.645125128+00:00 stderr F I1208 17:44:18.636293 8163 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.645125128+00:00 stderr F I1208 17:44:18.636299 8163 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.645428896+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-machine-api:control-plane-machine-set-operator-75ffdb6fcd-dhfht:0b3a0959-d09e-4fd8-b931-d85bb42a3896:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"7e94bf2ba528063","mac":"1a:ac:5a:e8:66:67"},{"name":"eth0","mac":"0a:58:0a:d9:00:0e","sandbox":"/var/run/netns/e4e9421e-b1d6-4694-91b2-68848c1d31d7"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.14/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.646086033+00:00 stderr F I1208 17:44:18.645803 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-machine-api", Name:"control-plane-machine-set-operator-75ffdb6fcd-dhfht", UID:"0b3a0959-d09e-4fd8-b931-d85bb42a3896", APIVersion:"v1", ResourceVersion:"36724", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.14/23] from ovn-kubernetes 2025-12-08T17:44:18.658703798+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"7e94bf2ba52806324d32672c1909b65af02786b0323b6863e98b0796a6cc858a" Netns:"/var/run/netns/e4e9421e-b1d6-4694-91b2-68848c1d31d7" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-api;K8S_POD_NAME=control-plane-machine-set-operator-75ffdb6fcd-dhfht;K8S_POD_INFRA_CONTAINER_ID=7e94bf2ba52806324d32672c1909b65af02786b0323b6863e98b0796a6cc858a;K8S_POD_UID=0b3a0959-d09e-4fd8-b931-d85bb42a3896" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"1a:ac:5a:e8:66:67\",\"name\":\"7e94bf2ba528063\"},{\"mac\":\"0a:58:0a:d9:00:0e\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/e4e9421e-b1d6-4694-91b2-68848c1d31d7\"}],\"ips\":[{\"address\":\"10.217.0.14/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.680089822+00:00 stderr F I1208 17:44:18.672503 8186 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.680089822+00:00 stderr F I1208 17:44:18.672665 8186 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.680089822+00:00 stderr F I1208 17:44:18.672672 8186 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.680089822+00:00 stderr F I1208 17:44:18.672678 8186 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.680089822+00:00 stderr F I1208 17:44:18.672684 8186 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.680421131+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-operator-lifecycle-manager:catalog-operator-75ff9f647d-bl822:9a815eca-9800-4b68-adc1-5953173f4427:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"2b5aafb4cf2fe32","mac":"72:c4:f0:97:5c:9c"},{"name":"eth0","mac":"0a:58:0a:d9:00:0f","sandbox":"/var/run/netns/cfbb7aa5-a4a5-473b-88af-37644d18764b"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.15/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.680696748+00:00 stderr F I1208 17:44:18.680660 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"catalog-operator-75ff9f647d-bl822", UID:"9a815eca-9800-4b68-adc1-5953173f4427", APIVersion:"v1", ResourceVersion:"36731", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.15/23] from ovn-kubernetes 2025-12-08T17:44:18.690391942+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.690448034+00:00 stderr P ADD starting CNI request ContainerID:"38d50ce5086da05afa7a3898ce13b67b33051171c36a2939e44da253e41ced09" Netns:"/var/run/netns/0cdd019b-1258-4b59-aa5e-b802e10d59a8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-service-ca-operator;K8S_POD_NAME=service-ca-operator-5b9c976747-cdz4v;K8S_POD_INFRA_CONTAINER_ID=38d50ce5086da05afa7a3898ce13b67b33051171c36a2939e44da253e41ced09;K8S_POD_UID=2554c491-6bfb-47fd-9b76-c1da12e702b1" Path:"" 2025-12-08T17:44:18.690466484+00:00 stderr F 2025-12-08T17:44:18.696811118+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"2b5aafb4cf2fe32e9e7cc2fbf243cade2e2c4a4f237dea8c6de46a683a65283d" Netns:"/var/run/netns/cfbb7aa5-a4a5-473b-88af-37644d18764b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=catalog-operator-75ff9f647d-bl822;K8S_POD_INFRA_CONTAINER_ID=2b5aafb4cf2fe32e9e7cc2fbf243cade2e2c4a4f237dea8c6de46a683a65283d;K8S_POD_UID=9a815eca-9800-4b68-adc1-5953173f4427" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"72:c4:f0:97:5c:9c\",\"name\":\"2b5aafb4cf2fe32\"},{\"mac\":\"0a:58:0a:d9:00:0f\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/cfbb7aa5-a4a5-473b-88af-37644d18764b\"}],\"ips\":[{\"address\":\"10.217.0.15/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.701477935+00:00 stderr F I1208 17:44:18.694729 8194 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.701477935+00:00 stderr F I1208 17:44:18.695045 8194 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.701477935+00:00 stderr F I1208 17:44:18.695055 8194 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.701477935+00:00 stderr F I1208 17:44:18.695060 8194 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.701477935+00:00 stderr F I1208 17:44:18.695066 8194 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.701829654+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.701865665+00:00 stderr P Add: openshift-operator-lifecycle-manager:packageserver-7d4fc7d867-4kjg6:085a3a20-9b8f-4448-a4cb-89465f57027c:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"6897b984d61654e","mac":"72:33:f4:1b:d5:3e"},{"name":"eth0","mac":"0a:58:0a:d9:00:10","sandbox":"/var/run/netns/a109e27a-f20c-467e-a22a-f980e67c3363"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.16/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.701929427+00:00 stderr F 2025-12-08T17:44:18.702495773+00:00 stderr F I1208 17:44:18.702471 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver-7d4fc7d867-4kjg6", UID:"085a3a20-9b8f-4448-a4cb-89465f57027c", APIVersion:"v1", ResourceVersion:"36726", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.16/23] from ovn-kubernetes 2025-12-08T17:44:18.705692120+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"4ed40421c9121dda3498bb864df7f9530153e9b4f72cb4cbfe60b409b4540405" Netns:"/var/run/netns/5fa74ebb-c9e6-4f09-bc66-903823850f10" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-scheduler-operator;K8S_POD_NAME=openshift-kube-scheduler-operator-54f497555d-gvb6q;K8S_POD_INFRA_CONTAINER_ID=4ed40421c9121dda3498bb864df7f9530153e9b4f72cb4cbfe60b409b4540405;K8S_POD_UID=a52d6e07-c08e-4424-8a3f-50052c311604" Path:"" 2025-12-08T17:44:18.713829792+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.713865163+00:00 stderr P ADD finished CNI request ContainerID:"6897b984d61654e6d3a250f3c2bbf767dffe9819bb35e5b7cbcf8f25cc9d44bf" Netns:"/var/run/netns/a109e27a-f20c-467e-a22a-f980e67c3363" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=packageserver-7d4fc7d867-4kjg6;K8S_POD_INFRA_CONTAINER_ID=6897b984d61654e6d3a250f3c2bbf767dffe9819bb35e5b7cbcf8f25cc9d44bf;K8S_POD_UID=085a3a20-9b8f-4448-a4cb-89465f57027c" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"72:33:f4:1b:d5:3e\",\"name\":\"6897b984d61654e\"},{\"mac\":\"0a:58:0a:d9:00:10\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/a109e27a-f20c-467e-a22a-f980e67c3363\"}],\"ips\":[{\"address\":\"10.217.0.16/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.713909094+00:00 stderr F 2025-12-08T17:44:18.779736709+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"851c6622d148b06bc6416c3a5117faa5c7f1cd7ee3259489350f9b5c41877051" Netns:"/var/run/netns/36b3e0ec-d8b5-4a46-be21-c34de785e706" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-controller-manager-operator;K8S_POD_NAME=kube-controller-manager-operator-69d5f845f8-6lgwk;K8S_POD_INFRA_CONTAINER_ID=851c6622d148b06bc6416c3a5117faa5c7f1cd7ee3259489350f9b5c41877051;K8S_POD_UID=163e109f-c588-4057-a961-86bcca55948f" Path:"" 2025-12-08T17:44:18.834950876+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.835015367+00:00 stderr P ADD starting CNI request ContainerID:"a29f0138192d5f7b697a45fae43a401e19a3d8c209024ecaae04de9e284797e6" Netns:"/var/run/netns/8b625298-e6d0-4fb0-8983-61793d1a2751" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-console;K8S_POD_NAME=downloads-747b44746d-x7wvx;K8S_POD_INFRA_CONTAINER_ID=a29f0138192d5f7b697a45fae43a401e19a3d8c209024ecaae04de9e284797e6;K8S_POD_UID=39c08b26-3404-4ffd-a53a-c86f0c654db7" Path:"" 2025-12-08T17:44:18.835126660+00:00 stderr F 2025-12-08T17:44:18.874366581+00:00 stderr F I1208 17:44:18.845151 8310 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.874366581+00:00 stderr F I1208 17:44:18.845365 8310 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.874366581+00:00 stderr F I1208 17:44:18.845391 8310 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.874366581+00:00 stderr F I1208 17:44:18.845404 8310 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.874366581+00:00 stderr F I1208 17:44:18.845418 8310 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.874366581+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-service-ca-operator:service-ca-operator-5b9c976747-cdz4v:2554c491-6bfb-47fd-9b76-c1da12e702b1:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"38d50ce5086da05","mac":"5e:dc:ef:9e:1d:6f"},{"name":"eth0","mac":"0a:58:0a:d9:00:11","sandbox":"/var/run/netns/0cdd019b-1258-4b59-aa5e-b802e10d59a8"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.17/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.874366581+00:00 stderr F I1208 17:44:18.866763 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-service-ca-operator", Name:"service-ca-operator-5b9c976747-cdz4v", UID:"2554c491-6bfb-47fd-9b76-c1da12e702b1", APIVersion:"v1", ResourceVersion:"36725", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.17/23] from ovn-kubernetes 2025-12-08T17:44:18.877977819+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"38d50ce5086da05afa7a3898ce13b67b33051171c36a2939e44da253e41ced09" Netns:"/var/run/netns/0cdd019b-1258-4b59-aa5e-b802e10d59a8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-service-ca-operator;K8S_POD_NAME=service-ca-operator-5b9c976747-cdz4v;K8S_POD_INFRA_CONTAINER_ID=38d50ce5086da05afa7a3898ce13b67b33051171c36a2939e44da253e41ced09;K8S_POD_UID=2554c491-6bfb-47fd-9b76-c1da12e702b1" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"5e:dc:ef:9e:1d:6f\",\"name\":\"38d50ce5086da05\"},{\"mac\":\"0a:58:0a:d9:00:11\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/0cdd019b-1258-4b59-aa5e-b802e10d59a8\"}],\"ips\":[{\"address\":\"10.217.0.17/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.899717122+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD starting CNI request ContainerID:"55ee1749961e18b627e9b4aefcb2a91f1db0f87d70ada2e87c92d1f25343c71e" Netns:"/var/run/netns/0cb26de4-8af2-4300-9336-936a7e776259" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-apiserver;K8S_POD_NAME=apiserver-9ddfb9f55-8h8fl;K8S_POD_INFRA_CONTAINER_ID=55ee1749961e18b627e9b4aefcb2a91f1db0f87d70ada2e87c92d1f25343c71e;K8S_POD_UID=695dd41c-159e-4e22-98e5-e27fdf4296fd" Path:"" 2025-12-08T17:44:18.964076328+00:00 stderr F I1208 17:44:18.951753 8361 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:18.964076328+00:00 stderr F I1208 17:44:18.951873 8361 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:18.964076328+00:00 stderr F I1208 17:44:18.952017 8361 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:18.964076328+00:00 stderr F I1208 17:44:18.952026 8361 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:18.964076328+00:00 stderr F I1208 17:44:18.952034 8361 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:18.964588692+00:00 stderr F 2025-12-08T17:44:18Z [verbose] Add: openshift-kube-controller-manager-operator:kube-controller-manager-operator-69d5f845f8-6lgwk:163e109f-c588-4057-a961-86bcca55948f:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"851c6622d148b06","mac":"6a:cf:47:8f:46:40"},{"name":"eth0","mac":"0a:58:0a:d9:00:22","sandbox":"/var/run/netns/36b3e0ec-d8b5-4a46-be21-c34de785e706"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.34/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:18.965076745+00:00 stderr F I1208 17:44:18.964795 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager-operator", Name:"kube-controller-manager-operator-69d5f845f8-6lgwk", UID:"163e109f-c588-4057-a961-86bcca55948f", APIVersion:"v1", ResourceVersion:"36773", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.34/23] from ovn-kubernetes 2025-12-08T17:44:18.974994656+00:00 stderr F 2025-12-08T17:44:18Z [verbose] ADD finished CNI request ContainerID:"851c6622d148b06bc6416c3a5117faa5c7f1cd7ee3259489350f9b5c41877051" Netns:"/var/run/netns/36b3e0ec-d8b5-4a46-be21-c34de785e706" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-controller-manager-operator;K8S_POD_NAME=kube-controller-manager-operator-69d5f845f8-6lgwk;K8S_POD_INFRA_CONTAINER_ID=851c6622d148b06bc6416c3a5117faa5c7f1cd7ee3259489350f9b5c41877051;K8S_POD_UID=163e109f-c588-4057-a961-86bcca55948f" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"6a:cf:47:8f:46:40\",\"name\":\"851c6622d148b06\"},{\"mac\":\"0a:58:0a:d9:00:22\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/36b3e0ec-d8b5-4a46-be21-c34de785e706\"}],\"ips\":[{\"address\":\"10.217.0.34/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:18.994994911+00:00 stderr P 2025-12-08T17:44:18Z [verbose] 2025-12-08T17:44:18.995034342+00:00 stderr P ADD starting CNI request ContainerID:"cc8ef7b3262f688f379b890c5c136fad520d130a37d57f8baf21bc1628f38f4f" Netns:"/var/run/netns/2d19456c-b736-478a-9d75-df9a00f3b37d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-multus;K8S_POD_NAME=network-metrics-daemon-54w78;K8S_POD_INFRA_CONTAINER_ID=cc8ef7b3262f688f379b890c5c136fad520d130a37d57f8baf21bc1628f38f4f;K8S_POD_UID=e666ddb1-3625-4468-9d05-21215b5041c1" Path:"" 2025-12-08T17:44:18.995052473+00:00 stderr F 2025-12-08T17:44:19.007595845+00:00 stderr F I1208 17:44:19.002894 8388 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.007595845+00:00 stderr F I1208 17:44:19.003057 8388 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.007595845+00:00 stderr F I1208 17:44:19.003070 8388 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.007595845+00:00 stderr F I1208 17:44:19.003076 8388 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.007595845+00:00 stderr F I1208 17:44:19.003083 8388 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.007922914+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-console:downloads-747b44746d-x7wvx:39c08b26-3404-4ffd-a53a-c86f0c654db7:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"a29f0138192d5f7","mac":"36:da:d3:5a:06:95"},{"name":"eth0","mac":"0a:58:0a:d9:00:23","sandbox":"/var/run/netns/8b625298-e6d0-4fb0-8983-61793d1a2751"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.35/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.008199851+00:00 stderr F I1208 17:44:19.008168 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-console", Name:"downloads-747b44746d-x7wvx", UID:"39c08b26-3404-4ffd-a53a-c86f0c654db7", APIVersion:"v1", ResourceVersion:"36769", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.35/23] from ovn-kubernetes 2025-12-08T17:44:19.020272990+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf" Netns:"/var/run/netns/1aa454ed-d6a1-411d-964d-512818ccb7fe" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-cluster-samples-operator;K8S_POD_NAME=cluster-samples-operator-6b564684c8-2cnx5;K8S_POD_INFRA_CONTAINER_ID=05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf;K8S_POD_UID=f22fa87e-79cb-498c-a2ab-166d47fd70a5" Path:"" 2025-12-08T17:44:19.020323032+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"a29f0138192d5f7b697a45fae43a401e19a3d8c209024ecaae04de9e284797e6" Netns:"/var/run/netns/8b625298-e6d0-4fb0-8983-61793d1a2751" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-console;K8S_POD_NAME=downloads-747b44746d-x7wvx;K8S_POD_INFRA_CONTAINER_ID=a29f0138192d5f7b697a45fae43a401e19a3d8c209024ecaae04de9e284797e6;K8S_POD_UID=39c08b26-3404-4ffd-a53a-c86f0c654db7" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"36:da:d3:5a:06:95\",\"name\":\"a29f0138192d5f7\"},{\"mac\":\"0a:58:0a:d9:00:23\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8b625298-e6d0-4fb0-8983-61793d1a2751\"}],\"ips\":[{\"address\":\"10.217.0.35/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.035517016+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.036283418+00:00 stderr F ADD starting CNI request ContainerID:"c432fb564191dc1677ec4262eb92512120aaf51382e3ebf58be7ad2bd0d28836" Netns:"/var/run/netns/7078d441-1ee1-4a9c-a744-ae20eed74dd9" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=cluster-image-registry-operator-86c45576b9-rwgjl;K8S_POD_INFRA_CONTAINER_ID=c432fb564191dc1677ec4262eb92512120aaf51382e3ebf58be7ad2bd0d28836;K8S_POD_UID=1cd09f9c-6a6f-438a-a982-082edc35a55c" Path:"" 2025-12-08T17:44:19.041679985+00:00 stderr F I1208 17:44:19.032174 8422 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.041679985+00:00 stderr F I1208 17:44:19.032275 8422 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.041679985+00:00 stderr F I1208 17:44:19.032283 8422 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.041679985+00:00 stderr F I1208 17:44:19.032294 8422 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.041679985+00:00 stderr F I1208 17:44:19.032299 8422 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.042267251+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-apiserver:apiserver-9ddfb9f55-8h8fl:695dd41c-159e-4e22-98e5-e27fdf4296fd:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"55ee1749961e18b","mac":"ce:63:a4:6c:92:f2"},{"name":"eth0","mac":"0a:58:0a:d9:00:05","sandbox":"/var/run/netns/0cb26de4-8af2-4300-9336-936a7e776259"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.5/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.042545598+00:00 stderr F I1208 17:44:19.042490 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-apiserver", Name:"apiserver-9ddfb9f55-8h8fl", UID:"695dd41c-159e-4e22-98e5-e27fdf4296fd", APIVersion:"v1", ResourceVersion:"36737", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.5/23] from ovn-kubernetes 2025-12-08T17:44:19.044869242+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"860f26053ce76290b7bc171c60796fd1bc70f38dff02f1d38ba7ca5ff60bc527" Netns:"/var/run/netns/efd772e0-1622-49ad-ab00-b839991e98ba" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-service-ca;K8S_POD_NAME=service-ca-74545575db-d69qv;K8S_POD_INFRA_CONTAINER_ID=860f26053ce76290b7bc171c60796fd1bc70f38dff02f1d38ba7ca5ff60bc527;K8S_POD_UID=ada44265-dcab-408c-843e-e5c5a45aa138" Path:"" 2025-12-08T17:44:19.067006536+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"55ee1749961e18b627e9b4aefcb2a91f1db0f87d70ada2e87c92d1f25343c71e" Netns:"/var/run/netns/0cb26de4-8af2-4300-9336-936a7e776259" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-apiserver;K8S_POD_NAME=apiserver-9ddfb9f55-8h8fl;K8S_POD_INFRA_CONTAINER_ID=55ee1749961e18b627e9b4aefcb2a91f1db0f87d70ada2e87c92d1f25343c71e;K8S_POD_UID=695dd41c-159e-4e22-98e5-e27fdf4296fd" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ce:63:a4:6c:92:f2\",\"name\":\"55ee1749961e18b\"},{\"mac\":\"0a:58:0a:d9:00:05\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/0cb26de4-8af2-4300-9336-936a7e776259\"}],\"ips\":[{\"address\":\"10.217.0.5/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.150382329+00:00 stderr F I1208 17:44:19.082827 8317 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.150382329+00:00 stderr F I1208 17:44:19.083021 8317 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.150382329+00:00 stderr F I1208 17:44:19.083030 8317 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.150382329+00:00 stderr F I1208 17:44:19.083039 8317 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.150382329+00:00 stderr F I1208 17:44:19.083046 8317 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.150680488+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.150702969+00:00 stderr P Add: openshift-kube-scheduler-operator:openshift-kube-scheduler-operator-54f497555d-gvb6q:a52d6e07-c08e-4424-8a3f-50052c311604:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"4ed40421c9121dd","mac":"8e:a8:cc:63:9a:70"},{"name":"eth0","mac":"0a:58:0a:d9:00:12","sandbox":"/var/run/netns/5fa74ebb-c9e6-4f09-bc66-903823850f10"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.18/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.150720539+00:00 stderr F 2025-12-08T17:44:19.150909264+00:00 stderr F I1208 17:44:19.150829 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator-54f497555d-gvb6q", UID:"a52d6e07-c08e-4424-8a3f-50052c311604", APIVersion:"v1", ResourceVersion:"36728", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.18/23] from ovn-kubernetes 2025-12-08T17:44:19.168472113+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"4ed40421c9121dda3498bb864df7f9530153e9b4f72cb4cbfe60b409b4540405" Netns:"/var/run/netns/5fa74ebb-c9e6-4f09-bc66-903823850f10" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-scheduler-operator;K8S_POD_NAME=openshift-kube-scheduler-operator-54f497555d-gvb6q;K8S_POD_INFRA_CONTAINER_ID=4ed40421c9121dda3498bb864df7f9530153e9b4f72cb4cbfe60b409b4540405;K8S_POD_UID=a52d6e07-c08e-4424-8a3f-50052c311604" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"8e:a8:cc:63:9a:70\",\"name\":\"4ed40421c9121dd\"},{\"mac\":\"0a:58:0a:d9:00:12\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5fa74ebb-c9e6-4f09-bc66-903823850f10\"}],\"ips\":[{\"address\":\"10.217.0.18/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.239218313+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"044dfd7745fd201d9ca0be6708e7ec266db6338da8c530c6a9a94e1a77e85897" Netns:"/var/run/netns/c65ecb7c-7363-4ef7-a2f7-6702f5f3c978" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=olm-operator-5cdf44d969-ggh59;K8S_POD_INFRA_CONTAINER_ID=044dfd7745fd201d9ca0be6708e7ec266db6338da8c530c6a9a94e1a77e85897;K8S_POD_UID=c987ac4d-5129-45aa-afe4-ab42b6907462" Path:"" 2025-12-08T17:44:19.280729965+00:00 stderr F I1208 17:44:19.275008 8482 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.280729965+00:00 stderr F I1208 17:44:19.275248 8482 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.280729965+00:00 stderr F I1208 17:44:19.275262 8482 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.280729965+00:00 stderr F I1208 17:44:19.275268 8482 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.280729965+00:00 stderr F I1208 17:44:19.275275 8482 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.281286060+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.281313011+00:00 stderr P Add: openshift-multus:network-metrics-daemon-54w78:e666ddb1-3625-4468-9d05-21215b5041c1:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"cc8ef7b3262f688","mac":"7a:ba:e4:42:8a:9f"},{"name":"eth0","mac":"0a:58:0a:d9:00:03","sandbox":"/var/run/netns/2d19456c-b736-478a-9d75-df9a00f3b37d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.3/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.281330521+00:00 stderr F 2025-12-08T17:44:19.281645870+00:00 stderr F I1208 17:44:19.281593 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-multus", Name:"network-metrics-daemon-54w78", UID:"e666ddb1-3625-4468-9d05-21215b5041c1", APIVersion:"v1", ResourceVersion:"36618", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.3/23] from ovn-kubernetes 2025-12-08T17:44:19.293424062+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.294285375+00:00 stderr P ADD finished CNI request ContainerID:"cc8ef7b3262f688f379b890c5c136fad520d130a37d57f8baf21bc1628f38f4f" Netns:"/var/run/netns/2d19456c-b736-478a-9d75-df9a00f3b37d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-multus;K8S_POD_NAME=network-metrics-daemon-54w78;K8S_POD_INFRA_CONTAINER_ID=cc8ef7b3262f688f379b890c5c136fad520d130a37d57f8baf21bc1628f38f4f;K8S_POD_UID=e666ddb1-3625-4468-9d05-21215b5041c1" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"7a:ba:e4:42:8a:9f\",\"name\":\"cc8ef7b3262f688\"},{\"mac\":\"0a:58:0a:d9:00:03\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/2d19456c-b736-478a-9d75-df9a00f3b37d\"}],\"ips\":[{\"address\":\"10.217.0.3/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.294316836+00:00 stderr F 2025-12-08T17:44:19.332369273+00:00 stderr F I1208 17:44:19.320748 8508 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.332369273+00:00 stderr F I1208 17:44:19.321127 8508 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.332369273+00:00 stderr F I1208 17:44:19.321139 8508 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.332369273+00:00 stderr F I1208 17:44:19.321145 8508 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.332369273+00:00 stderr F I1208 17:44:19.321151 8508 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.333210727+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-cluster-samples-operator:cluster-samples-operator-6b564684c8-2cnx5:f22fa87e-79cb-498c-a2ab-166d47fd70a5:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"05562bed0a58785","mac":"b6:2e:c2:ad:98:c4"},{"name":"eth0","mac":"0a:58:0a:d9:00:18","sandbox":"/var/run/netns/1aa454ed-d6a1-411d-964d-512818ccb7fe"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.24/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.333622878+00:00 stderr F I1208 17:44:19.333589 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-cluster-samples-operator", Name:"cluster-samples-operator-6b564684c8-2cnx5", UID:"f22fa87e-79cb-498c-a2ab-166d47fd70a5", APIVersion:"v1", ResourceVersion:"36751", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.24/23] from ovn-kubernetes 2025-12-08T17:44:19.344584587+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf" Netns:"/var/run/netns/1aa454ed-d6a1-411d-964d-512818ccb7fe" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-cluster-samples-operator;K8S_POD_NAME=cluster-samples-operator-6b564684c8-2cnx5;K8S_POD_INFRA_CONTAINER_ID=05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf;K8S_POD_UID=f22fa87e-79cb-498c-a2ab-166d47fd70a5" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"b6:2e:c2:ad:98:c4\",\"name\":\"05562bed0a58785\"},{\"mac\":\"0a:58:0a:d9:00:18\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/1aa454ed-d6a1-411d-964d-512818ccb7fe\"}],\"ips\":[{\"address\":\"10.217.0.24/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.347659021+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.347691262+00:00 stderr P ADD starting CNI request ContainerID:"171be8675a537f5c91f07695f1d02d53877dd94bf1dea71665144e6537718f47" Netns:"/var/run/netns/ba01ec8b-5a43-4905-9d60-918c17df240e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-dns-operator;K8S_POD_NAME=dns-operator-799b87ffcd-9b988;K8S_POD_INFRA_CONTAINER_ID=171be8675a537f5c91f07695f1d02d53877dd94bf1dea71665144e6537718f47;K8S_POD_UID=6be72eaf-a179-4e2b-a12d-4b5dbb213183" Path:"" 2025-12-08T17:44:19.347721622+00:00 stderr F 2025-12-08T17:44:19.410298179+00:00 stderr F I1208 17:44:19.402767 8624 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.410298179+00:00 stderr F I1208 17:44:19.402996 8624 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.410298179+00:00 stderr F I1208 17:44:19.403004 8624 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.410298179+00:00 stderr F I1208 17:44:19.403205 8624 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.410298179+00:00 stderr F I1208 17:44:19.403213 8624 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.410298179+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-operator-lifecycle-manager:olm-operator-5cdf44d969-ggh59:c987ac4d-5129-45aa-afe4-ab42b6907462:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"044dfd7745fd201","mac":"36:a1:c1:5d:b5:8c"},{"name":"eth0","mac":"0a:58:0a:d9:00:1a","sandbox":"/var/run/netns/c65ecb7c-7363-4ef7-a2f7-6702f5f3c978"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.26/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.411164433+00:00 stderr F I1208 17:44:19.410448 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"olm-operator-5cdf44d969-ggh59", UID:"c987ac4d-5129-45aa-afe4-ab42b6907462", APIVersion:"v1", ResourceVersion:"36850", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.26/23] from ovn-kubernetes 2025-12-08T17:44:19.435365833+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"044dfd7745fd201d9ca0be6708e7ec266db6338da8c530c6a9a94e1a77e85897" Netns:"/var/run/netns/c65ecb7c-7363-4ef7-a2f7-6702f5f3c978" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=olm-operator-5cdf44d969-ggh59;K8S_POD_INFRA_CONTAINER_ID=044dfd7745fd201d9ca0be6708e7ec266db6338da8c530c6a9a94e1a77e85897;K8S_POD_UID=c987ac4d-5129-45aa-afe4-ab42b6907462" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"36:a1:c1:5d:b5:8c\",\"name\":\"044dfd7745fd201\"},{\"mac\":\"0a:58:0a:d9:00:1a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/c65ecb7c-7363-4ef7-a2f7-6702f5f3c978\"}],\"ips\":[{\"address\":\"10.217.0.26/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.487540196+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"9accb4d0366a7a6ee7e967e14b871a878f5bb1961d14d60f8a5f3d145e7ccfef" Netns:"/var/run/netns/25917d5d-62f7-4c47-beb2-ffb16bd2a214" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication-operator;K8S_POD_NAME=authentication-operator-7f5c659b84-5scww;K8S_POD_INFRA_CONTAINER_ID=9accb4d0366a7a6ee7e967e14b871a878f5bb1961d14d60f8a5f3d145e7ccfef;K8S_POD_UID=4c48eb41-252c-441b-9506-329d9f6b0371" Path:"" 2025-12-08T17:44:19.536306776+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"6278e7a7e9899b2ad5ce35a00b137621f1dfd9cc576a596b193fa1aabb7e545e" Netns:"/var/run/netns/c8a3bad3-908a-4712-96d2-cec609a7464d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-storage-version-migrator-operator;K8S_POD_NAME=kube-storage-version-migrator-operator-565b79b866-6gkgz;K8S_POD_INFRA_CONTAINER_ID=6278e7a7e9899b2ad5ce35a00b137621f1dfd9cc576a596b193fa1aabb7e545e;K8S_POD_UID=dbad8204-9790-4f15-a74c-0149d19a4785" Path:"" 2025-12-08T17:44:19.543448351+00:00 stderr F I1208 17:44:19.529228 8686 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.543448351+00:00 stderr F I1208 17:44:19.529515 8686 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.543448351+00:00 stderr F I1208 17:44:19.529523 8686 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.543448351+00:00 stderr F I1208 17:44:19.529529 8686 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.543448351+00:00 stderr F I1208 17:44:19.529535 8686 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.544580412+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.544692255+00:00 stderr P Add: openshift-dns-operator:dns-operator-799b87ffcd-9b988:6be72eaf-a179-4e2b-a12d-4b5dbb213183:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"171be8675a537f5","mac":"9a:7f:3f:8e:1d:15"},{"name":"eth0","mac":"0a:58:0a:d9:00:24","sandbox":"/var/run/netns/ba01ec8b-5a43-4905-9d60-918c17df240e"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.36/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.544712305+00:00 stderr F 2025-12-08T17:44:19.545859757+00:00 stderr F I1208 17:44:19.544980 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-dns-operator", Name:"dns-operator-799b87ffcd-9b988", UID:"6be72eaf-a179-4e2b-a12d-4b5dbb213183", APIVersion:"v1", ResourceVersion:"36768", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.36/23] from ovn-kubernetes 2025-12-08T17:44:19.557119865+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.557503075+00:00 stderr P ADD finished CNI request ContainerID:"171be8675a537f5c91f07695f1d02d53877dd94bf1dea71665144e6537718f47" Netns:"/var/run/netns/ba01ec8b-5a43-4905-9d60-918c17df240e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-dns-operator;K8S_POD_NAME=dns-operator-799b87ffcd-9b988;K8S_POD_INFRA_CONTAINER_ID=171be8675a537f5c91f07695f1d02d53877dd94bf1dea71665144e6537718f47;K8S_POD_UID=6be72eaf-a179-4e2b-a12d-4b5dbb213183" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9a:7f:3f:8e:1d:15\",\"name\":\"171be8675a537f5\"},{\"mac\":\"0a:58:0a:d9:00:24\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ba01ec8b-5a43-4905-9d60-918c17df240e\"}],\"ips\":[{\"address\":\"10.217.0.36/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.557525575+00:00 stderr F 2025-12-08T17:44:19.564928247+00:00 stderr F I1208 17:44:19.558799 8543 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.564928247+00:00 stderr F I1208 17:44:19.558955 8543 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.564928247+00:00 stderr F I1208 17:44:19.558963 8543 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.564928247+00:00 stderr F I1208 17:44:19.558969 8543 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.564928247+00:00 stderr F I1208 17:44:19.558976 8543 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.565296688+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-service-ca:service-ca-74545575db-d69qv:ada44265-dcab-408c-843e-e5c5a45aa138:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"860f26053ce7629","mac":"3a:27:c8:8a:d1:1f"},{"name":"eth0","mac":"0a:58:0a:d9:00:25","sandbox":"/var/run/netns/efd772e0-1622-49ad-ab00-b839991e98ba"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.37/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.566590392+00:00 stderr F I1208 17:44:19.565452 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-service-ca", Name:"service-ca-74545575db-d69qv", UID:"ada44265-dcab-408c-843e-e5c5a45aa138", APIVersion:"v1", ResourceVersion:"36766", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.37/23] from ovn-kubernetes 2025-12-08T17:44:19.583832473+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"860f26053ce76290b7bc171c60796fd1bc70f38dff02f1d38ba7ca5ff60bc527" Netns:"/var/run/netns/efd772e0-1622-49ad-ab00-b839991e98ba" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-service-ca;K8S_POD_NAME=service-ca-74545575db-d69qv;K8S_POD_INFRA_CONTAINER_ID=860f26053ce76290b7bc171c60796fd1bc70f38dff02f1d38ba7ca5ff60bc527;K8S_POD_UID=ada44265-dcab-408c-843e-e5c5a45aa138" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"3a:27:c8:8a:d1:1f\",\"name\":\"860f26053ce7629\"},{\"mac\":\"0a:58:0a:d9:00:25\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/efd772e0-1622-49ad-ab00-b839991e98ba\"}],\"ips\":[{\"address\":\"10.217.0.37/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.587512514+00:00 stderr F I1208 17:44:19.581729 8528 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.587512514+00:00 stderr F I1208 17:44:19.582011 8528 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.587512514+00:00 stderr F I1208 17:44:19.582020 8528 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.587512514+00:00 stderr F I1208 17:44:19.582027 8528 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.587512514+00:00 stderr F I1208 17:44:19.582032 8528 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.587973746+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-image-registry:cluster-image-registry-operator-86c45576b9-rwgjl:1cd09f9c-6a6f-438a-a982-082edc35a55c:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"c432fb564191dc1","mac":"9a:59:5e:18:57:22"},{"name":"eth0","mac":"0a:58:0a:d9:00:14","sandbox":"/var/run/netns/7078d441-1ee1-4a9c-a744-ae20eed74dd9"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.20/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.591348078+00:00 stderr F I1208 17:44:19.589640 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-image-registry", Name:"cluster-image-registry-operator-86c45576b9-rwgjl", UID:"1cd09f9c-6a6f-438a-a982-082edc35a55c", APIVersion:"v1", ResourceVersion:"36734", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.20/23] from ovn-kubernetes 2025-12-08T17:44:19.609972076+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"c432fb564191dc1677ec4262eb92512120aaf51382e3ebf58be7ad2bd0d28836" Netns:"/var/run/netns/7078d441-1ee1-4a9c-a744-ae20eed74dd9" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=cluster-image-registry-operator-86c45576b9-rwgjl;K8S_POD_INFRA_CONTAINER_ID=c432fb564191dc1677ec4262eb92512120aaf51382e3ebf58be7ad2bd0d28836;K8S_POD_UID=1cd09f9c-6a6f-438a-a982-082edc35a55c" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9a:59:5e:18:57:22\",\"name\":\"c432fb564191dc1\"},{\"mac\":\"0a:58:0a:d9:00:14\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/7078d441-1ee1-4a9c-a744-ae20eed74dd9\"}],\"ips\":[{\"address\":\"10.217.0.20/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.619697811+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.619751803+00:00 stderr P ADD starting CNI request ContainerID:"137e813a0937cb381bde370d2667c10d162673b57a4dea10c7dc09f970f70b80" Netns:"/var/run/netns/8b1532b8-29fb-4880-8806-6103578f78f5" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-console;K8S_POD_NAME=console-64d44f6ddf-dhfvx;K8S_POD_INFRA_CONTAINER_ID=137e813a0937cb381bde370d2667c10d162673b57a4dea10c7dc09f970f70b80;K8S_POD_UID=a272b1fd-864b-4107-a4fd-6f6ab82a1d34" Path:"" 2025-12-08T17:44:19.619770373+00:00 stderr F 2025-12-08T17:44:19.635240785+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"119b829edb31a9542c4e4278438498a6d436d9ffaf05166df40ae801a6ad750b" Netns:"/var/run/netns/c7f8b8fc-1c5b-42f8-be75-26d280d89b8e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=hostpath-provisioner;K8S_POD_NAME=csi-hostpathplugin-qrls7;K8S_POD_INFRA_CONTAINER_ID=119b829edb31a9542c4e4278438498a6d436d9ffaf05166df40ae801a6ad750b;K8S_POD_UID=b81b63fd-c7d6-4446-ab93-c62912586002" Path:"" 2025-12-08T17:44:19.651164889+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"2449e51c8bd92a5ae9266cc65003e01d779611a71a071521302420a8b6a964d5" Netns:"/var/run/netns/06622877-dbb0-4afd-b918-d88bac8bdbab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-ingress-canary;K8S_POD_NAME=ingress-canary-psjrr;K8S_POD_INFRA_CONTAINER_ID=2449e51c8bd92a5ae9266cc65003e01d779611a71a071521302420a8b6a964d5;K8S_POD_UID=c32d3580-29a1-4299-8926-e4c9caa4ff86" Path:"" 2025-12-08T17:44:19.675823222+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"7e30c5d9c26bffaf27132475d044e517d0468cb10a56a5b9876e62c41fd6908b" Netns:"/var/run/netns/1dd7b96f-c198-4d6f-8673-f17f7bc0e424" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-config-operator;K8S_POD_NAME=openshift-config-operator-5777786469-v69x6;K8S_POD_INFRA_CONTAINER_ID=7e30c5d9c26bffaf27132475d044e517d0468cb10a56a5b9876e62c41fd6908b;K8S_POD_UID=ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b" Path:"" 2025-12-08T17:44:19.690273736+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"07295a2f7b5b3c65edd96320d163fc6e805c5e086dfc16c48b794802b29335a5" Netns:"/var/run/netns/7c3653a9-0048-4c3a-bd93-e058387b37ec" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-etcd-operator;K8S_POD_NAME=etcd-operator-69b85846b6-k26tc;K8S_POD_INFRA_CONTAINER_ID=07295a2f7b5b3c65edd96320d163fc6e805c5e086dfc16c48b794802b29335a5;K8S_POD_UID=1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f" Path:"" 2025-12-08T17:44:19.738745079+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"ec4f637da38df73ec133f4025cc97b99242e32adbee5eb6a0c499617f008b5d0" Netns:"/var/run/netns/ab623c7b-d643-4a27-9e12-e78dd50fb2d8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager-operator;K8S_POD_NAME=openshift-controller-manager-operator-686468bdd5-m5ltz;K8S_POD_INFRA_CONTAINER_ID=ec4f637da38df73ec133f4025cc97b99242e32adbee5eb6a0c499617f008b5d0;K8S_POD_UID=0f90a7a2-721d-4929-a4fa-fd1d2019b4cd" Path:"" 2025-12-08T17:44:19.739092698+00:00 stderr P 2025-12-08T17:44:19Z [verbose] 2025-12-08T17:44:19.739119779+00:00 stderr P ADD starting CNI request ContainerID:"f53011be2342d2e1df81bdc8b416956b013bd24a064c9fab44113e845132ed40" Netns:"/var/run/netns/95bbb289-82d7-41e4-9d82-a2bdaa8e7f58" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-config-operator;K8S_POD_NAME=machine-config-controller-f9cdd68f7-p88k2;K8S_POD_INFRA_CONTAINER_ID=f53011be2342d2e1df81bdc8b416956b013bd24a064c9fab44113e845132ed40;K8S_POD_UID=78316998-7ca1-4495-997b-bad16252fa84" Path:"" 2025-12-08T17:44:19.739143859+00:00 stderr F 2025-12-08T17:44:19.803240758+00:00 stderr F I1208 17:44:19.783293 8788 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.803240758+00:00 stderr F I1208 17:44:19.789538 8788 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.803240758+00:00 stderr F I1208 17:44:19.789546 8788 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:19.803240758+00:00 stderr F I1208 17:44:19.789552 8788 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.803240758+00:00 stderr F I1208 17:44:19.789558 8788 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.803942547+00:00 stderr F 2025-12-08T17:44:19Z [verbose] Add: openshift-authentication-operator:authentication-operator-7f5c659b84-5scww:4c48eb41-252c-441b-9506-329d9f6b0371:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"9accb4d0366a7a6","mac":"5e:10:e9:4c:ed:9d"},{"name":"eth0","mac":"0a:58:0a:d9:00:20","sandbox":"/var/run/netns/25917d5d-62f7-4c47-beb2-ffb16bd2a214"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.32/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:19.803942547+00:00 stderr F I1208 17:44:19.803812 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-authentication-operator", Name:"authentication-operator-7f5c659b84-5scww", UID:"4c48eb41-252c-441b-9506-329d9f6b0371", APIVersion:"v1", ResourceVersion:"36762", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.32/23] from ovn-kubernetes 2025-12-08T17:44:19.834699536+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD finished CNI request ContainerID:"9accb4d0366a7a6ee7e967e14b871a878f5bb1961d14d60f8a5f3d145e7ccfef" Netns:"/var/run/netns/25917d5d-62f7-4c47-beb2-ffb16bd2a214" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication-operator;K8S_POD_NAME=authentication-operator-7f5c659b84-5scww;K8S_POD_INFRA_CONTAINER_ID=9accb4d0366a7a6ee7e967e14b871a878f5bb1961d14d60f8a5f3d145e7ccfef;K8S_POD_UID=4c48eb41-252c-441b-9506-329d9f6b0371" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"5e:10:e9:4c:ed:9d\",\"name\":\"9accb4d0366a7a6\"},{\"mac\":\"0a:58:0a:d9:00:20\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/25917d5d-62f7-4c47-beb2-ffb16bd2a214\"}],\"ips\":[{\"address\":\"10.217.0.32/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:19.965679249+00:00 stderr F 2025-12-08T17:44:19Z [verbose] ADD starting CNI request ContainerID:"803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf" Netns:"/var/run/netns/9665a549-0227-4119-90f3-dff23b56b248" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-66458b6674-ztdrc;K8S_POD_INFRA_CONTAINER_ID=803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf;K8S_POD_UID=9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e" Path:"" 2025-12-08T17:44:20.308150730+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD starting CNI request ContainerID:"010f527922a84c158a643229ed5cb60f8dc1f0dddf8d575e30ead9ed434fdc86" Netns:"/var/run/netns/635d1a67-610f-4509-9e23-772e6a57729c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-dns;K8S_POD_NAME=dns-default-c5tbq;K8S_POD_INFRA_CONTAINER_ID=010f527922a84c158a643229ed5cb60f8dc1f0dddf8d575e30ead9ed434fdc86;K8S_POD_UID=1125cbf4-59e9-464e-8305-d2fc133ae675" Path:"" 2025-12-08T17:44:20.312516999+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD starting CNI request ContainerID:"b9ab01aa001ad2c5784ddac95fc04ea32122d7e15a7294751601084b9dfa2398" Netns:"/var/run/netns/5c1622e5-bb33-4d3b-a279-f8d6f9069e28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-console-operator;K8S_POD_NAME=console-operator-67c89758df-79mps;K8S_POD_INFRA_CONTAINER_ID=b9ab01aa001ad2c5784ddac95fc04ea32122d7e15a7294751601084b9dfa2398;K8S_POD_UID=2e8b3e0b-d963-4522-9a08-71aee0979479" Path:"" 2025-12-08T17:44:20.322094801+00:00 stderr F I1208 17:44:20.304823 8826 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.322094801+00:00 stderr F I1208 17:44:20.308245 8826 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.322094801+00:00 stderr F I1208 17:44:20.308257 8826 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.322094801+00:00 stderr F I1208 17:44:20.308263 8826 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.322094801+00:00 stderr F I1208 17:44:20.308269 8826 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.322463020+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: openshift-kube-storage-version-migrator-operator:kube-storage-version-migrator-operator-565b79b866-6gkgz:dbad8204-9790-4f15-a74c-0149d19a4785:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"6278e7a7e9899b2","mac":"ca:81:b8:a1:4a:a0"},{"name":"eth0","mac":"0a:58:0a:d9:00:21","sandbox":"/var/run/netns/c8a3bad3-908a-4712-96d2-cec609a7464d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.33/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.324943338+00:00 stderr F I1208 17:44:20.322764 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-storage-version-migrator-operator", Name:"kube-storage-version-migrator-operator-565b79b866-6gkgz", UID:"dbad8204-9790-4f15-a74c-0149d19a4785", APIVersion:"v1", ResourceVersion:"36767", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.33/23] from ovn-kubernetes 2025-12-08T17:44:20.333531462+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD starting CNI request ContainerID:"7e54acff5cfb3f98418c71eda57c0e86ac7ddac7f2c3cafad91c274492ac084b" Netns:"/var/run/netns/5df95d8e-25a5-4b71-9eaa-01341f42e397" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-ingress-operator;K8S_POD_NAME=ingress-operator-6b9cb4dbcf-2pwhz;K8S_POD_INFRA_CONTAINER_ID=7e54acff5cfb3f98418c71eda57c0e86ac7ddac7f2c3cafad91c274492ac084b;K8S_POD_UID=0157c9d2-3779-46c8-9da9-1fffa52986a6" Path:"" 2025-12-08T17:44:20.353591379+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"6278e7a7e9899b2ad5ce35a00b137621f1dfd9cc576a596b193fa1aabb7e545e" Netns:"/var/run/netns/c8a3bad3-908a-4712-96d2-cec609a7464d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-storage-version-migrator-operator;K8S_POD_NAME=kube-storage-version-migrator-operator-565b79b866-6gkgz;K8S_POD_INFRA_CONTAINER_ID=6278e7a7e9899b2ad5ce35a00b137621f1dfd9cc576a596b193fa1aabb7e545e;K8S_POD_UID=dbad8204-9790-4f15-a74c-0149d19a4785" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ca:81:b8:a1:4a:a0\",\"name\":\"6278e7a7e9899b2\"},{\"mac\":\"0a:58:0a:d9:00:21\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/c8a3bad3-908a-4712-96d2-cec609a7464d\"}],\"ips\":[{\"address\":\"10.217.0.33/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:20.547524859+00:00 stderr F I1208 17:44:20.528187 8910 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.547524859+00:00 stderr F I1208 17:44:20.528299 8910 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.547524859+00:00 stderr F I1208 17:44:20.528309 8910 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.547524859+00:00 stderr F I1208 17:44:20.528316 8910 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.547524859+00:00 stderr F I1208 17:44:20.528323 8910 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.547993302+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: openshift-console:console-64d44f6ddf-dhfvx:a272b1fd-864b-4107-a4fd-6f6ab82a1d34:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"137e813a0937cb3","mac":"f2:94:af:9a:3f:27"},{"name":"eth0","mac":"0a:58:0a:d9:00:29","sandbox":"/var/run/netns/8b1532b8-29fb-4880-8806-6103578f78f5"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.41/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.548457285+00:00 stderr F I1208 17:44:20.548140 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-console", Name:"console-64d44f6ddf-dhfvx", UID:"a272b1fd-864b-4107-a4fd-6f6ab82a1d34", APIVersion:"v1", ResourceVersion:"36777", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.41/23] from ovn-kubernetes 2025-12-08T17:44:20.560011690+00:00 stderr F I1208 17:44:20.538299 8898 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.560011690+00:00 stderr F I1208 17:44:20.538521 8898 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.560011690+00:00 stderr F I1208 17:44:20.538539 8898 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.560011690+00:00 stderr F I1208 17:44:20.538569 8898 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.560011690+00:00 stderr F I1208 17:44:20.538585 8898 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.560011690+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: hostpath-provisioner:csi-hostpathplugin-qrls7:b81b63fd-c7d6-4446-ab93-c62912586002:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"119b829edb31a95","mac":"4a:b1:e2:59:b6:12"},{"name":"eth0","mac":"0a:58:0a:d9:00:1e","sandbox":"/var/run/netns/c7f8b8fc-1c5b-42f8-be75-26d280d89b8e"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.30/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.560011690+00:00 stderr F I1208 17:44:20.559957 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"hostpath-provisioner", Name:"csi-hostpathplugin-qrls7", UID:"b81b63fd-c7d6-4446-ab93-c62912586002", APIVersion:"v1", ResourceVersion:"36763", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.30/23] from ovn-kubernetes 2025-12-08T17:44:20.562033925+00:00 stderr F I1208 17:44:20.520447 8945 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.562033925+00:00 stderr F I1208 17:44:20.521279 8945 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.562033925+00:00 stderr F I1208 17:44:20.521300 8945 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.562033925+00:00 stderr F I1208 17:44:20.521306 8945 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.562033925+00:00 stderr F I1208 17:44:20.521312 8945 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.564827201+00:00 stderr F I1208 17:44:20.528857 8929 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.564827201+00:00 stderr F I1208 17:44:20.528974 8929 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.564827201+00:00 stderr F I1208 17:44:20.528982 8929 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.564827201+00:00 stderr F I1208 17:44:20.528988 8929 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.564827201+00:00 stderr F I1208 17:44:20.528994 8929 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.564827201+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: openshift-ingress-canary:ingress-canary-psjrr:c32d3580-29a1-4299-8926-e4c9caa4ff86:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"2449e51c8bd92a5","mac":"be:d8:dc:64:27:fe"},{"name":"eth0","mac":"0a:58:0a:d9:00:2b","sandbox":"/var/run/netns/06622877-dbb0-4afd-b918-d88bac8bdbab"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.43/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.564827201+00:00 stderr F I1208 17:44:20.563605 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-ingress-canary", Name:"ingress-canary-psjrr", UID:"c32d3580-29a1-4299-8926-e4c9caa4ff86", APIVersion:"v1", ResourceVersion:"36795", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.43/23] from ovn-kubernetes 2025-12-08T17:44:20.566226470+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: openshift-config-operator:openshift-config-operator-5777786469-v69x6:ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"7e30c5d9c26bffa","mac":"8e:0d:e1:c6:7a:1a"},{"name":"eth0","mac":"0a:58:0a:d9:00:28","sandbox":"/var/run/netns/1dd7b96f-c198-4d6f-8673-f17f7bc0e424"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.40/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.566482916+00:00 stderr F I1208 17:44:20.566434 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-config-operator", Name:"openshift-config-operator-5777786469-v69x6", UID:"ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b", APIVersion:"v1", ResourceVersion:"36779", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.40/23] from ovn-kubernetes 2025-12-08T17:44:20.581107845+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"137e813a0937cb381bde370d2667c10d162673b57a4dea10c7dc09f970f70b80" Netns:"/var/run/netns/8b1532b8-29fb-4880-8806-6103578f78f5" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-console;K8S_POD_NAME=console-64d44f6ddf-dhfvx;K8S_POD_INFRA_CONTAINER_ID=137e813a0937cb381bde370d2667c10d162673b57a4dea10c7dc09f970f70b80;K8S_POD_UID=a272b1fd-864b-4107-a4fd-6f6ab82a1d34" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"f2:94:af:9a:3f:27\",\"name\":\"137e813a0937cb3\"},{\"mac\":\"0a:58:0a:d9:00:29\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8b1532b8-29fb-4880-8806-6103578f78f5\"}],\"ips\":[{\"address\":\"10.217.0.41/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:20.581107845+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"119b829edb31a9542c4e4278438498a6d436d9ffaf05166df40ae801a6ad750b" Netns:"/var/run/netns/c7f8b8fc-1c5b-42f8-be75-26d280d89b8e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=hostpath-provisioner;K8S_POD_NAME=csi-hostpathplugin-qrls7;K8S_POD_INFRA_CONTAINER_ID=119b829edb31a9542c4e4278438498a6d436d9ffaf05166df40ae801a6ad750b;K8S_POD_UID=b81b63fd-c7d6-4446-ab93-c62912586002" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"4a:b1:e2:59:b6:12\",\"name\":\"119b829edb31a95\"},{\"mac\":\"0a:58:0a:d9:00:1e\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/c7f8b8fc-1c5b-42f8-be75-26d280d89b8e\"}],\"ips\":[{\"address\":\"10.217.0.30/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:20.589451723+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"2449e51c8bd92a5ae9266cc65003e01d779611a71a071521302420a8b6a964d5" Netns:"/var/run/netns/06622877-dbb0-4afd-b918-d88bac8bdbab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-ingress-canary;K8S_POD_NAME=ingress-canary-psjrr;K8S_POD_INFRA_CONTAINER_ID=2449e51c8bd92a5ae9266cc65003e01d779611a71a071521302420a8b6a964d5;K8S_POD_UID=c32d3580-29a1-4299-8926-e4c9caa4ff86" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"be:d8:dc:64:27:fe\",\"name\":\"2449e51c8bd92a5\"},{\"mac\":\"0a:58:0a:d9:00:2b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/06622877-dbb0-4afd-b918-d88bac8bdbab\"}],\"ips\":[{\"address\":\"10.217.0.43/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:20.589451723+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"7e30c5d9c26bffaf27132475d044e517d0468cb10a56a5b9876e62c41fd6908b" Netns:"/var/run/netns/1dd7b96f-c198-4d6f-8673-f17f7bc0e424" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-config-operator;K8S_POD_NAME=openshift-config-operator-5777786469-v69x6;K8S_POD_INFRA_CONTAINER_ID=7e30c5d9c26bffaf27132475d044e517d0468cb10a56a5b9876e62c41fd6908b;K8S_POD_UID=ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"8e:0d:e1:c6:7a:1a\",\"name\":\"7e30c5d9c26bffa\"},{\"mac\":\"0a:58:0a:d9:00:28\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/1dd7b96f-c198-4d6f-8673-f17f7bc0e424\"}],\"ips\":[{\"address\":\"10.217.0.40/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:20.873000637+00:00 stderr F I1208 17:44:20.837173 8960 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.873000637+00:00 stderr F I1208 17:44:20.837694 8960 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.873000637+00:00 stderr F I1208 17:44:20.837704 8960 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.873000637+00:00 stderr F I1208 17:44:20.837712 8960 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.873000637+00:00 stderr F I1208 17:44:20.837720 8960 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.874918920+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: openshift-etcd-operator:etcd-operator-69b85846b6-k26tc:1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"07295a2f7b5b3c6","mac":"9e:cd:0a:06:53:c2"},{"name":"eth0","mac":"0a:58:0a:d9:00:26","sandbox":"/var/run/netns/7c3653a9-0048-4c3a-bd93-e058387b37ec"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.38/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.874918920+00:00 stderr F I1208 17:44:20.873820 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-etcd-operator", Name:"etcd-operator-69b85846b6-k26tc", UID:"1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f", APIVersion:"v1", ResourceVersion:"36772", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.38/23] from ovn-kubernetes 2025-12-08T17:44:20.906092740+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"07295a2f7b5b3c65edd96320d163fc6e805c5e086dfc16c48b794802b29335a5" Netns:"/var/run/netns/7c3653a9-0048-4c3a-bd93-e058387b37ec" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-etcd-operator;K8S_POD_NAME=etcd-operator-69b85846b6-k26tc;K8S_POD_INFRA_CONTAINER_ID=07295a2f7b5b3c65edd96320d163fc6e805c5e086dfc16c48b794802b29335a5;K8S_POD_UID=1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9e:cd:0a:06:53:c2\",\"name\":\"07295a2f7b5b3c6\"},{\"mac\":\"0a:58:0a:d9:00:26\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/7c3653a9-0048-4c3a-bd93-e058387b37ec\"}],\"ips\":[{\"address\":\"10.217.0.38/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:20.943067838+00:00 stderr F I1208 17:44:20.930651 9114 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.943067838+00:00 stderr F I1208 17:44:20.930842 9114 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.943067838+00:00 stderr F I1208 17:44:20.930850 9114 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:20.943067838+00:00 stderr F I1208 17:44:20.930856 9114 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.943067838+00:00 stderr F I1208 17:44:20.930862 9114 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.943344107+00:00 stderr F 2025-12-08T17:44:20Z [verbose] Add: openshift-authentication:oauth-openshift-66458b6674-ztdrc:9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"803ad93dfa8700d","mac":"d2:85:b0:85:37:86"},{"name":"eth0","mac":"0a:58:0a:d9:00:2a","sandbox":"/var/run/netns/9665a549-0227-4119-90f3-dff23b56b248"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.42/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:20.944780865+00:00 stderr F I1208 17:44:20.944054 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-authentication", Name:"oauth-openshift-66458b6674-ztdrc", UID:"9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e", APIVersion:"v1", ResourceVersion:"36778", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.42/23] from ovn-kubernetes 2025-12-08T17:44:20.982340489+00:00 stderr F 2025-12-08T17:44:20Z [verbose] ADD finished CNI request ContainerID:"803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf" Netns:"/var/run/netns/9665a549-0227-4119-90f3-dff23b56b248" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-66458b6674-ztdrc;K8S_POD_INFRA_CONTAINER_ID=803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf;K8S_POD_UID=9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d2:85:b0:85:37:86\",\"name\":\"803ad93dfa8700d\"},{\"mac\":\"0a:58:0a:d9:00:2a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/9665a549-0227-4119-90f3-dff23b56b248\"}],\"ips\":[{\"address\":\"10.217.0.42/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.996814 9225 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.996992 9225 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.997016 9225 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.997035 9225 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.999599 9225 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.949647 9016 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.949786 9016 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.949794 9016 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.949800 9016 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:20.949806 9016 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.023921844+00:00 stderr F 2025-12-08T17:44:21Z [verbose] Add: openshift-ingress-operator:ingress-operator-6b9cb4dbcf-2pwhz:0157c9d2-3779-46c8-9da9-1fffa52986a6:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"7e54acff5cfb3f9","mac":"ca:e2:1a:01:45:aa"},{"name":"eth0","mac":"0a:58:0a:d9:00:27","sandbox":"/var/run/netns/5df95d8e-25a5-4b71-9eaa-01341f42e397"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.39/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:21.023921844+00:00 stderr F 2025-12-08T17:44:21Z [verbose] Add: openshift-machine-config-operator:machine-config-controller-f9cdd68f7-p88k2:78316998-7ca1-4495-997b-bad16252fa84:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"f53011be2342d2e","mac":"9a:59:3c:2e:e0:28"},{"name":"eth0","mac":"0a:58:0a:d9:00:0c","sandbox":"/var/run/netns/95bbb289-82d7-41e4-9d82-a2bdaa8e7f58"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.12/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:21.023661 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-ingress-operator", Name:"ingress-operator-6b9cb4dbcf-2pwhz", UID:"0157c9d2-3779-46c8-9da9-1fffa52986a6", APIVersion:"v1", ResourceVersion:"37002", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.39/23] from ovn-kubernetes 2025-12-08T17:44:21.023921844+00:00 stderr F I1208 17:44:21.023677 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-machine-config-operator", Name:"machine-config-controller-f9cdd68f7-p88k2", UID:"78316998-7ca1-4495-997b-bad16252fa84", APIVersion:"v1", ResourceVersion:"36876", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.12/23] from ovn-kubernetes 2025-12-08T17:44:21.055932347+00:00 stderr F 2025-12-08T17:44:21Z [verbose] ADD finished CNI request ContainerID:"7e54acff5cfb3f98418c71eda57c0e86ac7ddac7f2c3cafad91c274492ac084b" Netns:"/var/run/netns/5df95d8e-25a5-4b71-9eaa-01341f42e397" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-ingress-operator;K8S_POD_NAME=ingress-operator-6b9cb4dbcf-2pwhz;K8S_POD_INFRA_CONTAINER_ID=7e54acff5cfb3f98418c71eda57c0e86ac7ddac7f2c3cafad91c274492ac084b;K8S_POD_UID=0157c9d2-3779-46c8-9da9-1fffa52986a6" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ca:e2:1a:01:45:aa\",\"name\":\"7e54acff5cfb3f9\"},{\"mac\":\"0a:58:0a:d9:00:27\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5df95d8e-25a5-4b71-9eaa-01341f42e397\"}],\"ips\":[{\"address\":\"10.217.0.39/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:21.055932347+00:00 stderr F I1208 17:44:20.988724 9018 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:21.055932347+00:00 stderr F I1208 17:44:20.988828 9018 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.055932347+00:00 stderr F I1208 17:44:20.988845 9018 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.055932347+00:00 stderr F I1208 17:44:20.988851 9018 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.055932347+00:00 stderr F I1208 17:44:20.988857 9018 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.055932347+00:00 stderr F 2025-12-08T17:44:21Z [verbose] Add: openshift-controller-manager-operator:openshift-controller-manager-operator-686468bdd5-m5ltz:0f90a7a2-721d-4929-a4fa-fd1d2019b4cd:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ec4f637da38df73","mac":"72:da:79:cc:af:42"},{"name":"eth0","mac":"0a:58:0a:d9:00:1f","sandbox":"/var/run/netns/ab623c7b-d643-4a27-9e12-e78dd50fb2d8"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.31/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:21.055932347+00:00 stderr F I1208 17:44:21.042736 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator-686468bdd5-m5ltz", UID:"0f90a7a2-721d-4929-a4fa-fd1d2019b4cd", APIVersion:"v1", ResourceVersion:"36765", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.31/23] from ovn-kubernetes 2025-12-08T17:44:21.055932347+00:00 stderr F 2025-12-08T17:44:21Z [verbose] ADD finished CNI request ContainerID:"f53011be2342d2e1df81bdc8b416956b013bd24a064c9fab44113e845132ed40" Netns:"/var/run/netns/95bbb289-82d7-41e4-9d82-a2bdaa8e7f58" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-machine-config-operator;K8S_POD_NAME=machine-config-controller-f9cdd68f7-p88k2;K8S_POD_INFRA_CONTAINER_ID=f53011be2342d2e1df81bdc8b416956b013bd24a064c9fab44113e845132ed40;K8S_POD_UID=78316998-7ca1-4495-997b-bad16252fa84" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9a:59:3c:2e:e0:28\",\"name\":\"f53011be2342d2e\"},{\"mac\":\"0a:58:0a:d9:00:0c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/95bbb289-82d7-41e4-9d82-a2bdaa8e7f58\"}],\"ips\":[{\"address\":\"10.217.0.12/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:21.086737588+00:00 stderr F I1208 17:44:21.064469 9215 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.086737588+00:00 stderr F I1208 17:44:21.064733 9215 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.086737588+00:00 stderr F I1208 17:44:21.064748 9215 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.086737588+00:00 stderr F I1208 17:44:21.064756 9215 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.086737588+00:00 stderr F I1208 17:44:21.064779 9215 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:21.086737588+00:00 stderr F 2025-12-08T17:44:21Z [verbose] Add: openshift-console-operator:console-operator-67c89758df-79mps:2e8b3e0b-d963-4522-9a08-71aee0979479:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b9ab01aa001ad2c","mac":"de:3a:ab:5e:2f:09"},{"name":"eth0","mac":"0a:58:0a:d9:00:1d","sandbox":"/var/run/netns/5c1622e5-bb33-4d3b-a279-f8d6f9069e28"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.29/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:21.086737588+00:00 stderr F I1208 17:44:21.078415 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-console-operator", Name:"console-operator-67c89758df-79mps", UID:"2e8b3e0b-d963-4522-9a08-71aee0979479", APIVersion:"v1", ResourceVersion:"36995", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.29/23] from ovn-kubernetes 2025-12-08T17:44:21.086737588+00:00 stderr F 2025-12-08T17:44:21Z [verbose] ADD finished CNI request ContainerID:"ec4f637da38df73ec133f4025cc97b99242e32adbee5eb6a0c499617f008b5d0" Netns:"/var/run/netns/ab623c7b-d643-4a27-9e12-e78dd50fb2d8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager-operator;K8S_POD_NAME=openshift-controller-manager-operator-686468bdd5-m5ltz;K8S_POD_INFRA_CONTAINER_ID=ec4f637da38df73ec133f4025cc97b99242e32adbee5eb6a0c499617f008b5d0;K8S_POD_UID=0f90a7a2-721d-4929-a4fa-fd1d2019b4cd" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"72:da:79:cc:af:42\",\"name\":\"ec4f637da38df73\"},{\"mac\":\"0a:58:0a:d9:00:1f\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ab623c7b-d643-4a27-9e12-e78dd50fb2d8\"}],\"ips\":[{\"address\":\"10.217.0.31/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:21.099229378+00:00 stderr F 2025-12-08T17:44:21Z [verbose] ADD finished CNI request ContainerID:"b9ab01aa001ad2c5784ddac95fc04ea32122d7e15a7294751601084b9dfa2398" Netns:"/var/run/netns/5c1622e5-bb33-4d3b-a279-f8d6f9069e28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-console-operator;K8S_POD_NAME=console-operator-67c89758df-79mps;K8S_POD_INFRA_CONTAINER_ID=b9ab01aa001ad2c5784ddac95fc04ea32122d7e15a7294751601084b9dfa2398;K8S_POD_UID=2e8b3e0b-d963-4522-9a08-71aee0979479" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"de:3a:ab:5e:2f:09\",\"name\":\"b9ab01aa001ad2c\"},{\"mac\":\"0a:58:0a:d9:00:1d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5c1622e5-bb33-4d3b-a279-f8d6f9069e28\"}],\"ips\":[{\"address\":\"10.217.0.29/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:21.177102952+00:00 stderr F I1208 17:44:21.164367 9205 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:21.177102952+00:00 stderr F I1208 17:44:21.164625 9205 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.177102952+00:00 stderr F I1208 17:44:21.164640 9205 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.177102952+00:00 stderr F I1208 17:44:21.164648 9205 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.177102952+00:00 stderr F I1208 17:44:21.164662 9205 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.177102952+00:00 stderr F 2025-12-08T17:44:21Z [verbose] Add: openshift-dns:dns-default-c5tbq:1125cbf4-59e9-464e-8305-d2fc133ae675:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"010f527922a84c1","mac":"ea:52:26:7c:82:bc"},{"name":"eth0","mac":"0a:58:0a:d9:00:1b","sandbox":"/var/run/netns/635d1a67-610f-4509-9e23-772e6a57729c"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.27/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:21.177102952+00:00 stderr F I1208 17:44:21.175385 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-dns", Name:"dns-default-c5tbq", UID:"1125cbf4-59e9-464e-8305-d2fc133ae675", APIVersion:"v1", ResourceVersion:"36758", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.27/23] from ovn-kubernetes 2025-12-08T17:44:21.208458657+00:00 stderr F 2025-12-08T17:44:21Z [verbose] ADD finished CNI request ContainerID:"010f527922a84c158a643229ed5cb60f8dc1f0dddf8d575e30ead9ed434fdc86" Netns:"/var/run/netns/635d1a67-610f-4509-9e23-772e6a57729c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-dns;K8S_POD_NAME=dns-default-c5tbq;K8S_POD_INFRA_CONTAINER_ID=010f527922a84c158a643229ed5cb60f8dc1f0dddf8d575e30ead9ed434fdc86;K8S_POD_UID=1125cbf4-59e9-464e-8305-d2fc133ae675" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ea:52:26:7c:82:bc\",\"name\":\"010f527922a84c1\"},{\"mac\":\"0a:58:0a:d9:00:1b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/635d1a67-610f-4509-9e23-772e6a57729c\"}],\"ips\":[{\"address\":\"10.217.0.27/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:22.849909611+00:00 stderr F 2025-12-08T17:44:22Z [verbose] ADD starting CNI request ContainerID:"08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c" Netns:"/var/run/netns/fa0e2d4a-9766-43dd-8bba-302637083e90" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-lxwl6;K8S_POD_INFRA_CONTAINER_ID=08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c;K8S_POD_UID=fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf" Path:"" 2025-12-08T17:44:22.892433291+00:00 stderr F 2025-12-08T17:44:22Z [verbose] ADD starting CNI request ContainerID:"ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea" Netns:"/var/run/netns/2ec1e7e4-9840-4512-9258-2dbe7411cb16" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-sb7gg;K8S_POD_INFRA_CONTAINER_ID=ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea;K8S_POD_UID=e4f4fc3c-88d2-455a-a8d2-209388238c9a" Path:"" 2025-12-08T17:44:22.998912746+00:00 stderr F 2025-12-08T17:44:22Z [verbose] ADD starting CNI request ContainerID:"ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257" Netns:"/var/run/netns/3cb2bf20-d369-4245-90f8-b4dae15ed896" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-r22jf;K8S_POD_INFRA_CONTAINER_ID=ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257;K8S_POD_UID=cb8303fe-2019-44f4-a124-af174b28cc02" Path:"" 2025-12-08T17:44:23.144060095+00:00 stderr F 2025-12-08T17:44:23Z [verbose] ADD starting CNI request ContainerID:"ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58" Netns:"/var/run/netns/3aeca3dc-68b7-424a-8c0c-6f9f76c7cf83" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-n5vp7;K8S_POD_INFRA_CONTAINER_ID=ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58;K8S_POD_UID=8c05f773-74bd-433b-84ce-a7f5430d9b55" Path:"" 2025-12-08T17:44:23.395541314+00:00 stderr F I1208 17:44:23.384086 9888 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.395541314+00:00 stderr F I1208 17:44:23.384557 9888 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.395541314+00:00 stderr F I1208 17:44:23.384565 9888 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.395541314+00:00 stderr F I1208 17:44:23.384571 9888 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:23.395541314+00:00 stderr F I1208 17:44:23.384577 9888 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.400956622+00:00 stderr F 2025-12-08T17:44:23Z [verbose] Add: openshift-marketplace:certified-operators-lxwl6:fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"08a30309aab05f7","mac":"22:a5:2c:97:39:47"},{"name":"eth0","mac":"0a:58:0a:d9:00:2d","sandbox":"/var/run/netns/fa0e2d4a-9766-43dd-8bba-302637083e90"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.45/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:23.400956622+00:00 stderr F I1208 17:44:23.397101 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"certified-operators-lxwl6", UID:"fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf", APIVersion:"v1", ResourceVersion:"37226", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.45/23] from ovn-kubernetes 2025-12-08T17:44:23.434767084+00:00 stderr F 2025-12-08T17:44:23Z [verbose] ADD finished CNI request ContainerID:"08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c" Netns:"/var/run/netns/fa0e2d4a-9766-43dd-8bba-302637083e90" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-lxwl6;K8S_POD_INFRA_CONTAINER_ID=08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c;K8S_POD_UID=fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"22:a5:2c:97:39:47\",\"name\":\"08a30309aab05f7\"},{\"mac\":\"0a:58:0a:d9:00:2d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/fa0e2d4a-9766-43dd-8bba-302637083e90\"}],\"ips\":[{\"address\":\"10.217.0.45/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:23.728983880+00:00 stderr F I1208 17:44:23.712718 9927 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:23.728983880+00:00 stderr F I1208 17:44:23.713278 9927 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.728983880+00:00 stderr F I1208 17:44:23.713287 9927 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.728983880+00:00 stderr F I1208 17:44:23.713296 9927 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.728983880+00:00 stderr F I1208 17:44:23.713304 9927 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.732527996+00:00 stderr F 2025-12-08T17:44:23Z [verbose] Add: openshift-marketplace:community-operators-r22jf:cb8303fe-2019-44f4-a124-af174b28cc02:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ad18d5cd7629954","mac":"06:2e:0a:02:e4:ea"},{"name":"eth0","mac":"0a:58:0a:d9:00:2c","sandbox":"/var/run/netns/3cb2bf20-d369-4245-90f8-b4dae15ed896"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.44/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:23.732527996+00:00 stderr F I1208 17:44:23.731280 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"community-operators-r22jf", UID:"cb8303fe-2019-44f4-a124-af174b28cc02", APIVersion:"v1", ResourceVersion:"37214", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.44/23] from ovn-kubernetes 2025-12-08T17:44:23.741905263+00:00 stderr F I1208 17:44:23.724351 9903 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.741905263+00:00 stderr F I1208 17:44:23.724449 9903 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.741905263+00:00 stderr F I1208 17:44:23.724456 9903 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:23.741905263+00:00 stderr F I1208 17:44:23.724462 9903 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.741905263+00:00 stderr F I1208 17:44:23.724468 9903 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.741905263+00:00 stderr F 2025-12-08T17:44:23Z [verbose] Add: openshift-marketplace:community-operators-sb7gg:e4f4fc3c-88d2-455a-a8d2-209388238c9a:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ef0290741bfadc0","mac":"4a:a8:a4:eb:5a:f0"},{"name":"eth0","mac":"0a:58:0a:d9:00:2e","sandbox":"/var/run/netns/2ec1e7e4-9840-4512-9258-2dbe7411cb16"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.46/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:23.741905263+00:00 stderr F I1208 17:44:23.741678 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"community-operators-sb7gg", UID:"e4f4fc3c-88d2-455a-a8d2-209388238c9a", APIVersion:"v1", ResourceVersion:"37243", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.46/23] from ovn-kubernetes 2025-12-08T17:44:23.769806443+00:00 stderr F 2025-12-08T17:44:23Z [verbose] ADD finished CNI request ContainerID:"ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea" Netns:"/var/run/netns/2ec1e7e4-9840-4512-9258-2dbe7411cb16" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-sb7gg;K8S_POD_INFRA_CONTAINER_ID=ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea;K8S_POD_UID=e4f4fc3c-88d2-455a-a8d2-209388238c9a" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"4a:a8:a4:eb:5a:f0\",\"name\":\"ef0290741bfadc0\"},{\"mac\":\"0a:58:0a:d9:00:2e\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/2ec1e7e4-9840-4512-9258-2dbe7411cb16\"}],\"ips\":[{\"address\":\"10.217.0.46/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:23.799780811+00:00 stderr F 2025-12-08T17:44:23Z [verbose] ADD finished CNI request ContainerID:"ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257" Netns:"/var/run/netns/3cb2bf20-d369-4245-90f8-b4dae15ed896" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-r22jf;K8S_POD_INFRA_CONTAINER_ID=ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257;K8S_POD_UID=cb8303fe-2019-44f4-a124-af174b28cc02" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"06:2e:0a:02:e4:ea\",\"name\":\"ad18d5cd7629954\"},{\"mac\":\"0a:58:0a:d9:00:2c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/3cb2bf20-d369-4245-90f8-b4dae15ed896\"}],\"ips\":[{\"address\":\"10.217.0.44/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:23.979692708+00:00 stderr F I1208 17:44:23.928057 9979 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.979692708+00:00 stderr F I1208 17:44:23.928220 9979 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.979692708+00:00 stderr F I1208 17:44:23.928233 9979 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.979692708+00:00 stderr F I1208 17:44:23.928239 9979 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.979692708+00:00 stderr F I1208 17:44:23.928245 9979 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:23.979692708+00:00 stderr F 2025-12-08T17:44:23Z [verbose] Add: openshift-marketplace:certified-operators-n5vp7:8c05f773-74bd-433b-84ce-a7f5430d9b55:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ae661eb10cf40ee","mac":"d2:9c:6c:26:59:b6"},{"name":"eth0","mac":"0a:58:0a:d9:00:2f","sandbox":"/var/run/netns/3aeca3dc-68b7-424a-8c0c-6f9f76c7cf83"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.47/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:23.983349518+00:00 stderr F I1208 17:44:23.979802 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"certified-operators-n5vp7", UID:"8c05f773-74bd-433b-84ce-a7f5430d9b55", APIVersion:"v1", ResourceVersion:"37260", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.47/23] from ovn-kubernetes 2025-12-08T17:44:24.023653367+00:00 stderr F 2025-12-08T17:44:24Z [verbose] ADD finished CNI request ContainerID:"ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58" Netns:"/var/run/netns/3aeca3dc-68b7-424a-8c0c-6f9f76c7cf83" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-n5vp7;K8S_POD_INFRA_CONTAINER_ID=ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58;K8S_POD_UID=8c05f773-74bd-433b-84ce-a7f5430d9b55" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"d2:9c:6c:26:59:b6\",\"name\":\"ae661eb10cf40ee\"},{\"mac\":\"0a:58:0a:d9:00:2f\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/3aeca3dc-68b7-424a-8c0c-6f9f76c7cf83\"}],\"ips\":[{\"address\":\"10.217.0.47/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:24.553433378+00:00 stderr F 2025-12-08T17:44:24Z [verbose] ADD starting CNI request ContainerID:"300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434" Netns:"/var/run/netns/8531667c-3d6c-48a5-9fbe-09c00b96fc75" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-rvglb;K8S_POD_INFRA_CONTAINER_ID=300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434;K8S_POD_UID=fe467668-8954-4465-87ca-ef1d5f933d43" Path:"" 2025-12-08T17:44:24.994014115+00:00 stderr F I1208 17:44:24.981958 10215 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:24.994014115+00:00 stderr F I1208 17:44:24.982438 10215 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:24.994014115+00:00 stderr F I1208 17:44:24.982464 10215 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:24.994014115+00:00 stderr F I1208 17:44:24.982485 10215 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:24.994014115+00:00 stderr F I1208 17:44:24.982505 10215 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:24.994534740+00:00 stderr F 2025-12-08T17:44:24Z [verbose] Add: openshift-marketplace:redhat-marketplace-rvglb:fe467668-8954-4465-87ca-ef1d5f933d43:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"300fcfe62cb2a22","mac":"da:44:c2:78:25:ec"},{"name":"eth0","mac":"0a:58:0a:d9:00:30","sandbox":"/var/run/netns/8531667c-3d6c-48a5-9fbe-09c00b96fc75"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.48/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:24.994826878+00:00 stderr F I1208 17:44:24.994797 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-marketplace-rvglb", UID:"fe467668-8954-4465-87ca-ef1d5f933d43", APIVersion:"v1", ResourceVersion:"37473", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.48/23] from ovn-kubernetes 2025-12-08T17:44:25.034228913+00:00 stderr F 2025-12-08T17:44:25Z [verbose] ADD finished CNI request ContainerID:"300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434" Netns:"/var/run/netns/8531667c-3d6c-48a5-9fbe-09c00b96fc75" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-rvglb;K8S_POD_INFRA_CONTAINER_ID=300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434;K8S_POD_UID=fe467668-8954-4465-87ca-ef1d5f933d43" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"da:44:c2:78:25:ec\",\"name\":\"300fcfe62cb2a22\"},{\"mac\":\"0a:58:0a:d9:00:30\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8531667c-3d6c-48a5-9fbe-09c00b96fc75\"}],\"ips\":[{\"address\":\"10.217.0.48/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:25.159285193+00:00 stderr F 2025-12-08T17:44:25Z [verbose] ADD starting CNI request ContainerID:"bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b" Netns:"/var/run/netns/15c4923b-8672-4cf2-83ab-b897e464cf33" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-6m6rs;K8S_POD_INFRA_CONTAINER_ID=bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b;K8S_POD_UID=caab7ab2-a04e-42fc-bd64-76c76ee3755d" Path:"" 2025-12-08T17:44:25.521736501+00:00 stderr F I1208 17:44:25.499913 10311 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:25.521736501+00:00 stderr F I1208 17:44:25.500445 10311 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:25.521736501+00:00 stderr F I1208 17:44:25.500454 10311 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:25.521736501+00:00 stderr F I1208 17:44:25.500461 10311 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:25.521736501+00:00 stderr F I1208 17:44:25.500467 10311 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:25.524897697+00:00 stderr F 2025-12-08T17:44:25Z [verbose] Add: openshift-marketplace:redhat-marketplace-6m6rs:caab7ab2-a04e-42fc-bd64-76c76ee3755d:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"bebe1f0da9278f6","mac":"46:43:b2:21:7a:8a"},{"name":"eth0","mac":"0a:58:0a:d9:00:31","sandbox":"/var/run/netns/15c4923b-8672-4cf2-83ab-b897e464cf33"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.49/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:25.524897697+00:00 stderr F I1208 17:44:25.524524 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-marketplace-6m6rs", UID:"caab7ab2-a04e-42fc-bd64-76c76ee3755d", APIVersion:"v1", ResourceVersion:"37509", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.49/23] from ovn-kubernetes 2025-12-08T17:44:25.554557376+00:00 stderr F 2025-12-08T17:44:25Z [verbose] ADD finished CNI request ContainerID:"bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b" Netns:"/var/run/netns/15c4923b-8672-4cf2-83ab-b897e464cf33" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-6m6rs;K8S_POD_INFRA_CONTAINER_ID=bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b;K8S_POD_UID=caab7ab2-a04e-42fc-bd64-76c76ee3755d" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"46:43:b2:21:7a:8a\",\"name\":\"bebe1f0da9278f6\"},{\"mac\":\"0a:58:0a:d9:00:31\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/15c4923b-8672-4cf2-83ab-b897e464cf33\"}],\"ips\":[{\"address\":\"10.217.0.49/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:25.607097368+00:00 stderr F 2025-12-08T17:44:25Z [verbose] ADD starting CNI request ContainerID:"bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6" Netns:"/var/run/netns/c6721d99-c53d-40b4-8b98-83142d159b17" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-zfv6j;K8S_POD_INFRA_CONTAINER_ID=bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6;K8S_POD_UID=e2c92d64-3525-4675-bbe9-38bfe6dd4504" Path:"" 2025-12-08T17:44:25.836070694+00:00 stderr F I1208 17:44:25.814594 10358 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:25.836070694+00:00 stderr F I1208 17:44:25.815051 10358 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:25.836070694+00:00 stderr F I1208 17:44:25.815059 10358 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:25.836070694+00:00 stderr F I1208 17:44:25.815065 10358 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:25.836070694+00:00 stderr F I1208 17:44:25.815071 10358 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:25.836559728+00:00 stderr F 2025-12-08T17:44:25Z [verbose] Add: openshift-marketplace:redhat-operators-zfv6j:e2c92d64-3525-4675-bbe9-38bfe6dd4504:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"bad7cc157537585","mac":"f6:d1:ea:c9:04:85"},{"name":"eth0","mac":"0a:58:0a:d9:00:33","sandbox":"/var/run/netns/c6721d99-c53d-40b4-8b98-83142d159b17"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.51/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:25.839309663+00:00 stderr F I1208 17:44:25.837043 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-operators-zfv6j", UID:"e2c92d64-3525-4675-bbe9-38bfe6dd4504", APIVersion:"v1", ResourceVersion:"37576", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.51/23] from ovn-kubernetes 2025-12-08T17:44:25.853959182+00:00 stderr P 2025-12-08T17:44:25Z [verbose] 2025-12-08T17:44:25.854009083+00:00 stderr P ADD finished CNI request ContainerID:"bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6" Netns:"/var/run/netns/c6721d99-c53d-40b4-8b98-83142d159b17" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-zfv6j;K8S_POD_INFRA_CONTAINER_ID=bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6;K8S_POD_UID=e2c92d64-3525-4675-bbe9-38bfe6dd4504" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"f6:d1:ea:c9:04:85\",\"name\":\"bad7cc157537585\"},{\"mac\":\"0a:58:0a:d9:00:33\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/c6721d99-c53d-40b4-8b98-83142d159b17\"}],\"ips\":[{\"address\":\"10.217.0.51/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:25.854028304+00:00 stderr F 2025-12-08T17:44:25.865801686+00:00 stderr P 2025-12-08T17:44:25Z [verbose] 2025-12-08T17:44:25.865845287+00:00 stderr P ADD starting CNI request ContainerID:"1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0" Netns:"/var/run/netns/0cf08c50-91e3-4cb3-9a83-bcbdd761f25d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-scheduler;K8S_POD_NAME=revision-pruner-6-crc;K8S_POD_INFRA_CONTAINER_ID=1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0;K8S_POD_UID=c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6" Path:"" 2025-12-08T17:44:25.865895828+00:00 stderr F 2025-12-08T17:44:25.960935371+00:00 stderr F 2025-12-08T17:44:25Z [verbose] ADD starting CNI request ContainerID:"79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac" Netns:"/var/run/netns/ac4dd58d-a0ec-4bd2-abf7-d3047aaac169" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-w7jrs;K8S_POD_INFRA_CONTAINER_ID=79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac;K8S_POD_UID=ba520484-b334-4e08-8f1a-5eb554b62dc4" Path:"" 2025-12-08T17:44:26.136986592+00:00 stderr F I1208 17:44:26.125461 10405 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:26.136986592+00:00 stderr F I1208 17:44:26.125860 10405 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:26.136986592+00:00 stderr F I1208 17:44:26.125871 10405 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:26.136986592+00:00 stderr F I1208 17:44:26.128056 10405 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:26.136986592+00:00 stderr F I1208 17:44:26.128074 10405 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:26.137314142+00:00 stderr P 2025-12-08T17:44:26Z [verbose] 2025-12-08T17:44:26.137338302+00:00 stderr P Add: openshift-kube-scheduler:revision-pruner-6-crc:c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"1527bd07152c424","mac":"7a:75:d1:23:2f:d3"},{"name":"eth0","mac":"0a:58:0a:d9:00:32","sandbox":"/var/run/netns/0cf08c50-91e3-4cb3-9a83-bcbdd761f25d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.50/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:26.137355863+00:00 stderr F 2025-12-08T17:44:26.137710773+00:00 stderr F I1208 17:44:26.137690 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-scheduler", Name:"revision-pruner-6-crc", UID:"c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6", APIVersion:"v1", ResourceVersion:"37564", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.50/23] from ovn-kubernetes 2025-12-08T17:44:26.151328344+00:00 stderr F 2025-12-08T17:44:26Z [verbose] ADD finished CNI request ContainerID:"1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0" Netns:"/var/run/netns/0cf08c50-91e3-4cb3-9a83-bcbdd761f25d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-scheduler;K8S_POD_NAME=revision-pruner-6-crc;K8S_POD_INFRA_CONTAINER_ID=1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0;K8S_POD_UID=c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"7a:75:d1:23:2f:d3\",\"name\":\"1527bd07152c424\"},{\"mac\":\"0a:58:0a:d9:00:32\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/0cf08c50-91e3-4cb3-9a83-bcbdd761f25d\"}],\"ips\":[{\"address\":\"10.217.0.50/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:26.275931592+00:00 stderr F I1208 17:44:26.251959 10429 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:26.275931592+00:00 stderr F I1208 17:44:26.252252 10429 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:26.275931592+00:00 stderr F I1208 17:44:26.252259 10429 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:26.275931592+00:00 stderr F I1208 17:44:26.252265 10429 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:26.275931592+00:00 stderr F I1208 17:44:26.252270 10429 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:26.275931592+00:00 stderr F 2025-12-08T17:44:26Z [verbose] Add: openshift-marketplace:redhat-operators-w7jrs:ba520484-b334-4e08-8f1a-5eb554b62dc4:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"79fd674b2f19826","mac":"f6:1e:00:98:4d:07"},{"name":"eth0","mac":"0a:58:0a:d9:00:34","sandbox":"/var/run/netns/ac4dd58d-a0ec-4bd2-abf7-d3047aaac169"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.52/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:26.275931592+00:00 stderr F I1208 17:44:26.273845 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-operators-w7jrs", UID:"ba520484-b334-4e08-8f1a-5eb554b62dc4", APIVersion:"v1", ResourceVersion:"37628", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.52/23] from ovn-kubernetes 2025-12-08T17:44:26.292735551+00:00 stderr F 2025-12-08T17:44:26Z [verbose] ADD finished CNI request ContainerID:"79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac" Netns:"/var/run/netns/ac4dd58d-a0ec-4bd2-abf7-d3047aaac169" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-w7jrs;K8S_POD_INFRA_CONTAINER_ID=79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac;K8S_POD_UID=ba520484-b334-4e08-8f1a-5eb554b62dc4" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"f6:1e:00:98:4d:07\",\"name\":\"79fd674b2f19826\"},{\"mac\":\"0a:58:0a:d9:00:34\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/ac4dd58d-a0ec-4bd2-abf7-d3047aaac169\"}],\"ips\":[{\"address\":\"10.217.0.52/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:26.486163087+00:00 stderr F 2025-12-08T17:44:26Z [verbose] DEL starting CNI request ContainerID:"e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015" Netns:"/var/run/netns/eebcfb75-07f1-4b55-bfff-9f893537c6ab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420250-qhrfp;K8S_POD_INFRA_CONTAINER_ID=e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015;K8S_POD_UID=742843af-c521-4d4a-beea-e6feae8140e1" Path:"" 2025-12-08T17:44:26.489899218+00:00 stderr F 2025-12-08T17:44:26Z [verbose] Del: openshift-operator-lifecycle-manager:collect-profiles-29420250-qhrfp:742843af-c521-4d4a-beea-e6feae8140e1:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:44:26.652349620+00:00 stderr F 2025-12-08T17:44:26Z [verbose] DEL finished CNI request ContainerID:"e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015" Netns:"/var/run/netns/eebcfb75-07f1-4b55-bfff-9f893537c6ab" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420250-qhrfp;K8S_POD_INFRA_CONTAINER_ID=e7e4e5294ae9ba605e56ade0a4247be53479964b4053089fd141b6910e3a9015;K8S_POD_UID=742843af-c521-4d4a-beea-e6feae8140e1" Path:"", result: "", err: 2025-12-08T17:44:28.083966489+00:00 stderr F 2025-12-08T17:44:28Z [verbose] ADD starting CNI request ContainerID:"76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d" Netns:"/var/run/netns/e70cb80e-cf98-4dc7-8578-3ab5be361b87" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-11-crc;K8S_POD_INFRA_CONTAINER_ID=76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d;K8S_POD_UID=46f67036-aba9-49da-a298-d68e56b91e00" Path:"" 2025-12-08T17:44:28.246455452+00:00 stderr F I1208 17:44:28.236448 10595 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:28.246455452+00:00 stderr F I1208 17:44:28.237149 10595 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:28.246455452+00:00 stderr F I1208 17:44:28.237159 10595 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:28.246455452+00:00 stderr F I1208 17:44:28.237165 10595 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:28.246455452+00:00 stderr F I1208 17:44:28.237172 10595 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:28.246455452+00:00 stderr F 2025-12-08T17:44:28Z [verbose] Add: openshift-kube-apiserver:revision-pruner-11-crc:46f67036-aba9-49da-a298-d68e56b91e00:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"76b530a11ce10f3","mac":"56:c1:16:0c:f4:5c"},{"name":"eth0","mac":"0a:58:0a:d9:00:35","sandbox":"/var/run/netns/e70cb80e-cf98-4dc7-8578-3ab5be361b87"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.53/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:28.246455452+00:00 stderr F I1208 17:44:28.245267 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-apiserver", Name:"revision-pruner-11-crc", UID:"46f67036-aba9-49da-a298-d68e56b91e00", APIVersion:"v1", ResourceVersion:"37899", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.53/23] from ovn-kubernetes 2025-12-08T17:44:28.258539952+00:00 stderr F 2025-12-08T17:44:28Z [verbose] ADD finished CNI request ContainerID:"76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d" Netns:"/var/run/netns/e70cb80e-cf98-4dc7-8578-3ab5be361b87" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-11-crc;K8S_POD_INFRA_CONTAINER_ID=76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d;K8S_POD_UID=46f67036-aba9-49da-a298-d68e56b91e00" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"56:c1:16:0c:f4:5c\",\"name\":\"76b530a11ce10f3\"},{\"mac\":\"0a:58:0a:d9:00:35\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/e70cb80e-cf98-4dc7-8578-3ab5be361b87\"}],\"ips\":[{\"address\":\"10.217.0.53/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:29.690656876+00:00 stderr F 2025-12-08T17:44:29Z [verbose] DEL starting CNI request ContainerID:"1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0" Netns:"/var/run/netns/0cf08c50-91e3-4cb3-9a83-bcbdd761f25d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-scheduler;K8S_POD_NAME=revision-pruner-6-crc;K8S_POD_INFRA_CONTAINER_ID=1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0;K8S_POD_UID=c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6" Path:"" 2025-12-08T17:44:29.691637082+00:00 stderr F 2025-12-08T17:44:29Z [verbose] Del: openshift-kube-scheduler:revision-pruner-6-crc:c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:44:29.826119270+00:00 stderr F 2025-12-08T17:44:29Z [verbose] DEL finished CNI request ContainerID:"1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0" Netns:"/var/run/netns/0cf08c50-91e3-4cb3-9a83-bcbdd761f25d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-scheduler;K8S_POD_NAME=revision-pruner-6-crc;K8S_POD_INFRA_CONTAINER_ID=1527bd07152c4241e773659ffece99cf6e3c5940dd78184bc260f9235526b2d0;K8S_POD_UID=c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6" Path:"", result: "", err: 2025-12-08T17:44:31.685921440+00:00 stderr F 2025-12-08T17:44:31Z [verbose] DEL starting CNI request ContainerID:"76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d" Netns:"/var/run/netns/e70cb80e-cf98-4dc7-8578-3ab5be361b87" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-11-crc;K8S_POD_INFRA_CONTAINER_ID=76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d;K8S_POD_UID=46f67036-aba9-49da-a298-d68e56b91e00" Path:"" 2025-12-08T17:44:31.685921440+00:00 stderr F 2025-12-08T17:44:31Z [verbose] Del: openshift-kube-apiserver:revision-pruner-11-crc:46f67036-aba9-49da-a298-d68e56b91e00:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:44:31.803942529+00:00 stderr F 2025-12-08T17:44:31Z [verbose] DEL finished CNI request ContainerID:"76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d" Netns:"/var/run/netns/e70cb80e-cf98-4dc7-8578-3ab5be361b87" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-11-crc;K8S_POD_INFRA_CONTAINER_ID=76b530a11ce10f34f5cdd09e5e49ebd752f7193db3ca9a4a4a8a5819dfcecf1d;K8S_POD_UID=46f67036-aba9-49da-a298-d68e56b91e00" Path:"", result: "", err: 2025-12-08T17:44:41.135372660+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD starting CNI request ContainerID:"ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270" Netns:"/var/run/netns/7478f019-784a-4444-b828-453e6b9ff1ba" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=image-registry-66587d64c8-s6hn4;K8S_POD_INFRA_CONTAINER_ID=ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270;K8S_POD_UID=1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc" Path:"" 2025-12-08T17:44:41.218161394+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD starting CNI request ContainerID:"1ba4ac8b3e5261e35866bcc88a4e9fdb4766766105be6ee4c4056b67467c9190" Netns:"/var/run/netns/8b42aaa3-7a24-4399-b13e-20fdcc47462b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-network-console;K8S_POD_NAME=networking-console-plugin-5ff7774fd9-nljh6;K8S_POD_INFRA_CONTAINER_ID=1ba4ac8b3e5261e35866bcc88a4e9fdb4766766105be6ee4c4056b67467c9190;K8S_POD_UID=6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Path:"" 2025-12-08T17:44:41.309894077+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD starting CNI request ContainerID:"84afb7c8a9e13df098ddcb53b85dae340e7050cd4bd41b251d4fc9612abe78c4" Netns:"/var/run/netns/60951d9c-e886-4778-82ff-c4719e5aeb13" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-network-diagnostics;K8S_POD_NAME=network-check-target-fhkjl;K8S_POD_INFRA_CONTAINER_ID=84afb7c8a9e13df098ddcb53b85dae340e7050cd4bd41b251d4fc9612abe78c4;K8S_POD_UID=17b87002-b798-480a-8e17-83053d698239" Path:"" 2025-12-08T17:44:41.320953845+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD starting CNI request ContainerID:"b0fdb2b61c63d63f0329ade1afeea4b4caf60c7102ce0dc1b6283051f89919e7" Netns:"/var/run/netns/3baaa0b3-374b-4a5d-bf81-8e9034dc9551" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-network-diagnostics;K8S_POD_NAME=network-check-source-5bb8f5cd97-xdvz5;K8S_POD_INFRA_CONTAINER_ID=b0fdb2b61c63d63f0329ade1afeea4b4caf60c7102ce0dc1b6283051f89919e7;K8S_POD_UID=f863fff9-286a-45fa-b8f0-8a86994b8440" Path:"" 2025-12-08T17:44:41.320953845+00:00 stderr F I1208 17:44:41.314958 11061 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:41.320953845+00:00 stderr F I1208 17:44:41.315445 11061 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:41.320953845+00:00 stderr F I1208 17:44:41.315491 11061 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:41.320953845+00:00 stderr F I1208 17:44:41.315504 11061 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:41.320953845+00:00 stderr F I1208 17:44:41.315511 11061 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:41.320953845+00:00 stderr F 2025-12-08T17:44:41Z [verbose] Add: openshift-image-registry:image-registry-66587d64c8-s6hn4:1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"ef2774eb27b084c","mac":"06:cf:fb:a6:d4:ed"},{"name":"eth0","mac":"0a:58:0a:d9:00:19","sandbox":"/var/run/netns/7478f019-784a-4444-b828-453e6b9ff1ba"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.25/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:41.320953845+00:00 stderr F I1208 17:44:41.320467 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-image-registry", Name:"image-registry-66587d64c8-s6hn4", UID:"1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc", APIVersion:"v1", ResourceVersion:"37006", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.25/23] from ovn-kubernetes 2025-12-08T17:44:41.337414453+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD finished CNI request ContainerID:"ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270" Netns:"/var/run/netns/7478f019-784a-4444-b828-453e6b9ff1ba" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-image-registry;K8S_POD_NAME=image-registry-66587d64c8-s6hn4;K8S_POD_INFRA_CONTAINER_ID=ef2774eb27b084c192ab2fbfe7c52e1babc8bccadb79956c3c83e557c0e28270;K8S_POD_UID=1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"06:cf:fb:a6:d4:ed\",\"name\":\"ef2774eb27b084c\"},{\"mac\":\"0a:58:0a:d9:00:19\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/7478f019-784a-4444-b828-453e6b9ff1ba\"}],\"ips\":[{\"address\":\"10.217.0.25/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:41.369798743+00:00 stderr F I1208 17:44:41.364486 11089 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:41.369798743+00:00 stderr F I1208 17:44:41.364602 11089 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:41.369798743+00:00 stderr F I1208 17:44:41.364612 11089 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:41.369798743+00:00 stderr F I1208 17:44:41.364628 11089 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:41.369798743+00:00 stderr F I1208 17:44:41.364635 11089 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:41.370560714+00:00 stderr F 2025-12-08T17:44:41Z [verbose] Add: openshift-network-console:networking-console-plugin-5ff7774fd9-nljh6:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"1ba4ac8b3e5261e","mac":"b6:44:03:2e:a2:dc"},{"name":"eth0","mac":"0a:58:0a:d9:00:3b","sandbox":"/var/run/netns/8b42aaa3-7a24-4399-b13e-20fdcc47462b"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.59/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:41.370560714+00:00 stderr F I1208 17:44:41.370422 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-network-console", Name:"networking-console-plugin-5ff7774fd9-nljh6", UID:"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141", APIVersion:"v1", ResourceVersion:"36435", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.59/23] from ovn-kubernetes 2025-12-08T17:44:41.388383691+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD finished CNI request ContainerID:"1ba4ac8b3e5261e35866bcc88a4e9fdb4766766105be6ee4c4056b67467c9190" Netns:"/var/run/netns/8b42aaa3-7a24-4399-b13e-20fdcc47462b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-network-console;K8S_POD_NAME=networking-console-plugin-5ff7774fd9-nljh6;K8S_POD_INFRA_CONTAINER_ID=1ba4ac8b3e5261e35866bcc88a4e9fdb4766766105be6ee4c4056b67467c9190;K8S_POD_UID=6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"b6:44:03:2e:a2:dc\",\"name\":\"1ba4ac8b3e5261e\"},{\"mac\":\"0a:58:0a:d9:00:3b\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8b42aaa3-7a24-4399-b13e-20fdcc47462b\"}],\"ips\":[{\"address\":\"10.217.0.59/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:41.471719939+00:00 stderr F I1208 17:44:41.464634 11160 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:41.471719939+00:00 stderr F I1208 17:44:41.464787 11160 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:41.471719939+00:00 stderr F I1208 17:44:41.464799 11160 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:41.471719939+00:00 stderr F I1208 17:44:41.464805 11160 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:41.471719939+00:00 stderr F I1208 17:44:41.464811 11160 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:41.472024768+00:00 stderr F 2025-12-08T17:44:41Z [verbose] Add: openshift-network-diagnostics:network-check-source-5bb8f5cd97-xdvz5:f863fff9-286a-45fa-b8f0-8a86994b8440:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b0fdb2b61c63d63","mac":"9a:3f:be:e5:50:54"},{"name":"eth0","mac":"0a:58:0a:d9:00:3a","sandbox":"/var/run/netns/3baaa0b3-374b-4a5d-bf81-8e9034dc9551"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.58/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:41.472221724+00:00 stderr F I1208 17:44:41.472171 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-network-diagnostics", Name:"network-check-source-5bb8f5cd97-xdvz5", UID:"f863fff9-286a-45fa-b8f0-8a86994b8440", APIVersion:"v1", ResourceVersion:"36449", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.58/23] from ovn-kubernetes 2025-12-08T17:44:41.487113268+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD finished CNI request ContainerID:"b0fdb2b61c63d63f0329ade1afeea4b4caf60c7102ce0dc1b6283051f89919e7" Netns:"/var/run/netns/3baaa0b3-374b-4a5d-bf81-8e9034dc9551" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-network-diagnostics;K8S_POD_NAME=network-check-source-5bb8f5cd97-xdvz5;K8S_POD_INFRA_CONTAINER_ID=b0fdb2b61c63d63f0329ade1afeea4b4caf60c7102ce0dc1b6283051f89919e7;K8S_POD_UID=f863fff9-286a-45fa-b8f0-8a86994b8440" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"9a:3f:be:e5:50:54\",\"name\":\"b0fdb2b61c63d63\"},{\"mac\":\"0a:58:0a:d9:00:3a\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/3baaa0b3-374b-4a5d-bf81-8e9034dc9551\"}],\"ips\":[{\"address\":\"10.217.0.58/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:41.505700435+00:00 stderr F I1208 17:44:41.500010 11151 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:41.505700435+00:00 stderr F I1208 17:44:41.500143 11151 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:41.505700435+00:00 stderr F I1208 17:44:41.500159 11151 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:41.505700435+00:00 stderr F I1208 17:44:41.500170 11151 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:41.505700435+00:00 stderr F I1208 17:44:41.500178 11151 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:41.508835882+00:00 stderr F 2025-12-08T17:44:41Z [verbose] Add: openshift-network-diagnostics:network-check-target-fhkjl:17b87002-b798-480a-8e17-83053d698239:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"84afb7c8a9e13df","mac":"72:9c:4e:46:a5:bf"},{"name":"eth0","mac":"0a:58:0a:d9:00:04","sandbox":"/var/run/netns/60951d9c-e886-4778-82ff-c4719e5aeb13"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.4/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:44:41.508835882+00:00 stderr F I1208 17:44:41.506053 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-network-diagnostics", Name:"network-check-target-fhkjl", UID:"17b87002-b798-480a-8e17-83053d698239", APIVersion:"v1", ResourceVersion:"36452", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.4/23] from ovn-kubernetes 2025-12-08T17:44:41.518892952+00:00 stderr F 2025-12-08T17:44:41Z [verbose] ADD finished CNI request ContainerID:"84afb7c8a9e13df098ddcb53b85dae340e7050cd4bd41b251d4fc9612abe78c4" Netns:"/var/run/netns/60951d9c-e886-4778-82ff-c4719e5aeb13" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-network-diagnostics;K8S_POD_NAME=network-check-target-fhkjl;K8S_POD_INFRA_CONTAINER_ID=84afb7c8a9e13df098ddcb53b85dae340e7050cd4bd41b251d4fc9612abe78c4;K8S_POD_UID=17b87002-b798-480a-8e17-83053d698239" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"72:9c:4e:46:a5:bf\",\"name\":\"84afb7c8a9e13df\"},{\"mac\":\"0a:58:0a:d9:00:04\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/60951d9c-e886-4778-82ff-c4719e5aeb13\"}],\"ips\":[{\"address\":\"10.217.0.4/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:44:58.537022751+00:00 stderr F 2025-12-08T17:44:58Z [verbose] DEL starting CNI request ContainerID:"ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea" Netns:"/var/run/netns/2ec1e7e4-9840-4512-9258-2dbe7411cb16" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-sb7gg;K8S_POD_INFRA_CONTAINER_ID=ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea;K8S_POD_UID=e4f4fc3c-88d2-455a-a8d2-209388238c9a" Path:"" 2025-12-08T17:44:58.538530793+00:00 stderr F 2025-12-08T17:44:58Z [verbose] Del: openshift-marketplace:community-operators-sb7gg:e4f4fc3c-88d2-455a-a8d2-209388238c9a:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:44:58.569747272+00:00 stderr F 2025-12-08T17:44:58Z [verbose] DEL starting CNI request ContainerID:"bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b" Netns:"/var/run/netns/15c4923b-8672-4cf2-83ab-b897e464cf33" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-6m6rs;K8S_POD_INFRA_CONTAINER_ID=bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b;K8S_POD_UID=caab7ab2-a04e-42fc-bd64-76c76ee3755d" Path:"" 2025-12-08T17:44:58.569747272+00:00 stderr F 2025-12-08T17:44:58Z [verbose] Del: openshift-marketplace:redhat-marketplace-6m6rs:caab7ab2-a04e-42fc-bd64-76c76ee3755d:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:44:58.797478858+00:00 stderr F 2025-12-08T17:44:58Z [verbose] DEL starting CNI request ContainerID:"ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58" Netns:"/var/run/netns/3aeca3dc-68b7-424a-8c0c-6f9f76c7cf83" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-n5vp7;K8S_POD_INFRA_CONTAINER_ID=ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58;K8S_POD_UID=8c05f773-74bd-433b-84ce-a7f5430d9b55" Path:"" 2025-12-08T17:44:58.797992392+00:00 stderr F 2025-12-08T17:44:58Z [verbose] Del: openshift-marketplace:certified-operators-n5vp7:8c05f773-74bd-433b-84ce-a7f5430d9b55:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:44:58.814311606+00:00 stderr F 2025-12-08T17:44:58Z [verbose] DEL finished CNI request ContainerID:"ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea" Netns:"/var/run/netns/2ec1e7e4-9840-4512-9258-2dbe7411cb16" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-sb7gg;K8S_POD_INFRA_CONTAINER_ID=ef0290741bfadc050351726657ea5d1e90d89bb42d86f01f2b2081dd32e004ea;K8S_POD_UID=e4f4fc3c-88d2-455a-a8d2-209388238c9a" Path:"", result: "", err: 2025-12-08T17:44:58.902146501+00:00 stderr F 2025-12-08T17:44:58Z [verbose] DEL finished CNI request ContainerID:"bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b" Netns:"/var/run/netns/15c4923b-8672-4cf2-83ab-b897e464cf33" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-6m6rs;K8S_POD_INFRA_CONTAINER_ID=bebe1f0da9278f62d8caef7874fc35428010d09942af1719c37fac3e6c4e8b5b;K8S_POD_UID=caab7ab2-a04e-42fc-bd64-76c76ee3755d" Path:"", result: "", err: 2025-12-08T17:44:59.061538075+00:00 stderr F 2025-12-08T17:44:59Z [verbose] DEL finished CNI request ContainerID:"ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58" Netns:"/var/run/netns/3aeca3dc-68b7-424a-8c0c-6f9f76c7cf83" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-n5vp7;K8S_POD_INFRA_CONTAINER_ID=ae661eb10cf40ee037c5bbb75003eb4cc6748efa6c166b48128d05415dc58f58;K8S_POD_UID=8c05f773-74bd-433b-84ce-a7f5430d9b55" Path:"", result: "", err: 2025-12-08T17:45:00.311272860+00:00 stderr F 2025-12-08T17:45:00Z [verbose] DEL starting CNI request ContainerID:"79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac" Netns:"/var/run/netns/ac4dd58d-a0ec-4bd2-abf7-d3047aaac169" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-w7jrs;K8S_POD_INFRA_CONTAINER_ID=79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac;K8S_POD_UID=ba520484-b334-4e08-8f1a-5eb554b62dc4" Path:"" 2025-12-08T17:45:00.311775694+00:00 stderr F 2025-12-08T17:45:00Z [verbose] Del: openshift-marketplace:redhat-operators-w7jrs:ba520484-b334-4e08-8f1a-5eb554b62dc4:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:45:00.504804845+00:00 stderr F 2025-12-08T17:45:00Z [verbose] ADD starting CNI request ContainerID:"7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1" Netns:"/var/run/netns/7a14d57d-f2c4-49c8-83e8-5511852c836b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420265-vsxwc;K8S_POD_INFRA_CONTAINER_ID=7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1;K8S_POD_UID=3ec0e45e-87cc-4b67-b137-ac7179bf7d74" Path:"" 2025-12-08T17:45:00.604263452+00:00 stderr P 2025-12-08T17:45:00Z [verbose] 2025-12-08T17:45:00.604319983+00:00 stderr P DEL finished CNI request ContainerID:"79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac" Netns:"/var/run/netns/ac4dd58d-a0ec-4bd2-abf7-d3047aaac169" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-w7jrs;K8S_POD_INFRA_CONTAINER_ID=79fd674b2f1982666d841b20537687d86fe6bb801a03c4ed53a6f95d3bc986ac;K8S_POD_UID=ba520484-b334-4e08-8f1a-5eb554b62dc4" Path:"", result: "", err: 2025-12-08T17:45:00.604341554+00:00 stderr F 2025-12-08T17:45:00.833856840+00:00 stderr F I1208 17:45:00.829361 12217 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:45:00.833856840+00:00 stderr F I1208 17:45:00.829817 12217 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:45:00.833856840+00:00 stderr F I1208 17:45:00.829835 12217 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:45:00.833856840+00:00 stderr F I1208 17:45:00.829843 12217 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:45:00.833856840+00:00 stderr F I1208 17:45:00.829850 12217 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:45:00.834173789+00:00 stderr F 2025-12-08T17:45:00Z [verbose] Add: openshift-operator-lifecycle-manager:collect-profiles-29420265-vsxwc:3ec0e45e-87cc-4b67-b137-ac7179bf7d74:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"7810789719cfd0f","mac":"de:71:39:6b:eb:26"},{"name":"eth0","mac":"0a:58:0a:d9:00:36","sandbox":"/var/run/netns/7a14d57d-f2c4-49c8-83e8-5511852c836b"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.54/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:45:00.834348074+00:00 stderr F I1208 17:45:00.834319 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-operator-lifecycle-manager", Name:"collect-profiles-29420265-vsxwc", UID:"3ec0e45e-87cc-4b67-b137-ac7179bf7d74", APIVersion:"v1", ResourceVersion:"38561", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.54/23] from ovn-kubernetes 2025-12-08T17:45:00.844836236+00:00 stderr F 2025-12-08T17:45:00Z [verbose] ADD finished CNI request ContainerID:"7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1" Netns:"/var/run/netns/7a14d57d-f2c4-49c8-83e8-5511852c836b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420265-vsxwc;K8S_POD_INFRA_CONTAINER_ID=7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1;K8S_POD_UID=3ec0e45e-87cc-4b67-b137-ac7179bf7d74" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"de:71:39:6b:eb:26\",\"name\":\"7810789719cfd0f\"},{\"mac\":\"0a:58:0a:d9:00:36\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/7a14d57d-f2c4-49c8-83e8-5511852c836b\"}],\"ips\":[{\"address\":\"10.217.0.54/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:45:02.995672873+00:00 stderr F 2025-12-08T17:45:02Z [verbose] DEL starting CNI request ContainerID:"7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1" Netns:"/var/run/netns/7a14d57d-f2c4-49c8-83e8-5511852c836b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420265-vsxwc;K8S_POD_INFRA_CONTAINER_ID=7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1;K8S_POD_UID=3ec0e45e-87cc-4b67-b137-ac7179bf7d74" Path:"" 2025-12-08T17:45:02.996175337+00:00 stderr F 2025-12-08T17:45:02Z [verbose] Del: openshift-operator-lifecycle-manager:collect-profiles-29420265-vsxwc:3ec0e45e-87cc-4b67-b137-ac7179bf7d74:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:45:03.253320312+00:00 stderr F 2025-12-08T17:45:03Z [verbose] DEL finished CNI request ContainerID:"7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1" Netns:"/var/run/netns/7a14d57d-f2c4-49c8-83e8-5511852c836b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-operator-lifecycle-manager;K8S_POD_NAME=collect-profiles-29420265-vsxwc;K8S_POD_INFRA_CONTAINER_ID=7810789719cfd0fd8d7090c5dccc5d21f47fb6dcf6d7d8296f3c4eea157842d1;K8S_POD_UID=3ec0e45e-87cc-4b67-b137-ac7179bf7d74" Path:"", result: "", err: 2025-12-08T17:45:04.167756766+00:00 stderr F 2025-12-08T17:45:04Z [verbose] ADD starting CNI request ContainerID:"1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3" Netns:"/var/run/netns/18926e1d-eae8-43f1-8dca-aa0d2fcee77e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-12-crc;K8S_POD_INFRA_CONTAINER_ID=1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3;K8S_POD_UID=1087bc4c-df19-4954-92b2-e9bfc266fdab" Path:"" 2025-12-08T17:45:04.519077392+00:00 stderr F I1208 17:45:04.513983 12537 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:45:04.519077392+00:00 stderr F I1208 17:45:04.514447 12537 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:45:04.519077392+00:00 stderr F I1208 17:45:04.514459 12537 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:45:04.519077392+00:00 stderr F I1208 17:45:04.514467 12537 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:45:04.519077392+00:00 stderr F I1208 17:45:04.514474 12537 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:45:04.519556795+00:00 stderr F 2025-12-08T17:45:04Z [verbose] Add: openshift-kube-apiserver:revision-pruner-12-crc:1087bc4c-df19-4954-92b2-e9bfc266fdab:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"1e64deb33e2db93","mac":"32:0d:15:17:79:3b"},{"name":"eth0","mac":"0a:58:0a:d9:00:37","sandbox":"/var/run/netns/18926e1d-eae8-43f1-8dca-aa0d2fcee77e"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.55/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:45:04.521684874+00:00 stderr F I1208 17:45:04.521586 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-apiserver", Name:"revision-pruner-12-crc", UID:"1087bc4c-df19-4954-92b2-e9bfc266fdab", APIVersion:"v1", ResourceVersion:"38596", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.55/23] from ovn-kubernetes 2025-12-08T17:45:04.540303282+00:00 stderr F 2025-12-08T17:45:04Z [verbose] ADD finished CNI request ContainerID:"1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3" Netns:"/var/run/netns/18926e1d-eae8-43f1-8dca-aa0d2fcee77e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-12-crc;K8S_POD_INFRA_CONTAINER_ID=1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3;K8S_POD_UID=1087bc4c-df19-4954-92b2-e9bfc266fdab" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"32:0d:15:17:79:3b\",\"name\":\"1e64deb33e2db93\"},{\"mac\":\"0a:58:0a:d9:00:37\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/18926e1d-eae8-43f1-8dca-aa0d2fcee77e\"}],\"ips\":[{\"address\":\"10.217.0.55/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:45:07.028697141+00:00 stderr F 2025-12-08T17:45:07Z [verbose] DEL starting CNI request ContainerID:"1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3" Netns:"/var/run/netns/18926e1d-eae8-43f1-8dca-aa0d2fcee77e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-12-crc;K8S_POD_INFRA_CONTAINER_ID=1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3;K8S_POD_UID=1087bc4c-df19-4954-92b2-e9bfc266fdab" Path:"" 2025-12-08T17:45:07.029472543+00:00 stderr F 2025-12-08T17:45:07Z [verbose] Del: openshift-kube-apiserver:revision-pruner-12-crc:1087bc4c-df19-4954-92b2-e9bfc266fdab:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:45:07.272502005+00:00 stderr F 2025-12-08T17:45:07Z [verbose] DEL finished CNI request ContainerID:"1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3" Netns:"/var/run/netns/18926e1d-eae8-43f1-8dca-aa0d2fcee77e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=revision-pruner-12-crc;K8S_POD_INFRA_CONTAINER_ID=1e64deb33e2db93be18e177b42b0973ac5c5d3af629b652c416cc89d95dd00d3;K8S_POD_UID=1087bc4c-df19-4954-92b2-e9bfc266fdab" Path:"", result: "", err: 2025-12-08T17:45:11.735046835+00:00 stderr F 2025-12-08T17:45:11Z [verbose] ADD starting CNI request ContainerID:"b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8" Netns:"/var/run/netns/417cfb3d-e89f-4818-9139-8e25d490060a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=installer-12-crc;K8S_POD_INFRA_CONTAINER_ID=b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8;K8S_POD_UID=158725bd-7556-4281-a3cb-acaa6baf5d8c" Path:"" 2025-12-08T17:45:12.096361779+00:00 stderr F I1208 17:45:12.091152 12992 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:45:12.096361779+00:00 stderr F I1208 17:45:12.091521 12992 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:45:12.096361779+00:00 stderr F I1208 17:45:12.091535 12992 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:45:12.096361779+00:00 stderr F I1208 17:45:12.091542 12992 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:45:12.096361779+00:00 stderr F I1208 17:45:12.091548 12992 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:45:12.096931405+00:00 stderr F 2025-12-08T17:45:12Z [verbose] Add: openshift-kube-apiserver:installer-12-crc:158725bd-7556-4281-a3cb-acaa6baf5d8c:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"b8a864a71dfd0d8","mac":"22:ae:62:17:f3:45"},{"name":"eth0","mac":"0a:58:0a:d9:00:38","sandbox":"/var/run/netns/417cfb3d-e89f-4818-9139-8e25d490060a"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.56/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:45:12.097172082+00:00 stderr F I1208 17:45:12.097131 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-apiserver", Name:"installer-12-crc", UID:"158725bd-7556-4281-a3cb-acaa6baf5d8c", APIVersion:"v1", ResourceVersion:"38653", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.56/23] from ovn-kubernetes 2025-12-08T17:45:12.115290936+00:00 stderr F 2025-12-08T17:45:12Z [verbose] ADD finished CNI request ContainerID:"b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8" Netns:"/var/run/netns/417cfb3d-e89f-4818-9139-8e25d490060a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=installer-12-crc;K8S_POD_INFRA_CONTAINER_ID=b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8;K8S_POD_UID=158725bd-7556-4281-a3cb-acaa6baf5d8c" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"22:ae:62:17:f3:45\",\"name\":\"b8a864a71dfd0d8\"},{\"mac\":\"0a:58:0a:d9:00:38\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/417cfb3d-e89f-4818-9139-8e25d490060a\"}],\"ips\":[{\"address\":\"10.217.0.56/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:45:47.310166923+00:00 stderr F 2025-12-08T17:45:47Z [verbose] DEL starting CNI request ContainerID:"803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf" Netns:"/var/run/netns/9665a549-0227-4119-90f3-dff23b56b248" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-66458b6674-ztdrc;K8S_POD_INFRA_CONTAINER_ID=803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf;K8S_POD_UID=9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e" Path:"" 2025-12-08T17:45:47.311681028+00:00 stderr F 2025-12-08T17:45:47Z [verbose] Del: openshift-authentication:oauth-openshift-66458b6674-ztdrc:9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:45:47.503376982+00:00 stderr F 2025-12-08T17:45:47Z [verbose] DEL finished CNI request ContainerID:"803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf" Netns:"/var/run/netns/9665a549-0227-4119-90f3-dff23b56b248" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-66458b6674-ztdrc;K8S_POD_INFRA_CONTAINER_ID=803ad93dfa8700dbf09b3e6a4e33d63e186ef2c8cc3dfa4d900a01a2b041fbcf;K8S_POD_UID=9bdb30d2-8f69-4d2d-9bf1-3bc70f85369e" Path:"", result: "", err: 2025-12-08T17:45:47.936196353+00:00 stderr F 2025-12-08T17:45:47Z [verbose] ADD starting CNI request ContainerID:"abc3a2e84c88a41b34688913233ffa318a8b6cce7c084027b6e31005e2a9a619" Netns:"/var/run/netns/6f31117e-871a-4017-a1ef-578a85582f34" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-57ffdf54dd-5dg99;K8S_POD_INFRA_CONTAINER_ID=abc3a2e84c88a41b34688913233ffa318a8b6cce7c084027b6e31005e2a9a619;K8S_POD_UID=0c242c34-d446-4428-b8d7-0b8dbf4137c9" Path:"" 2025-12-08T17:45:48.074490504+00:00 stderr F I1208 17:45:48.069216 13884 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:45:48.074490504+00:00 stderr F I1208 17:45:48.069687 13884 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:45:48.074490504+00:00 stderr F I1208 17:45:48.069723 13884 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:45:48.074490504+00:00 stderr F I1208 17:45:48.069733 13884 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:45:48.074490504+00:00 stderr F I1208 17:45:48.069741 13884 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:45:48.074819723+00:00 stderr F 2025-12-08T17:45:48Z [verbose] Add: openshift-authentication:oauth-openshift-57ffdf54dd-5dg99:0c242c34-d446-4428-b8d7-0b8dbf4137c9:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"abc3a2e84c88a41","mac":"26:aa:40:42:72:94"},{"name":"eth0","mac":"0a:58:0a:d9:00:39","sandbox":"/var/run/netns/6f31117e-871a-4017-a1ef-578a85582f34"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.57/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:45:48.075114493+00:00 stderr F I1208 17:45:48.075061 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-authentication", Name:"oauth-openshift-57ffdf54dd-5dg99", UID:"0c242c34-d446-4428-b8d7-0b8dbf4137c9", APIVersion:"v1", ResourceVersion:"38793", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.57/23] from ovn-kubernetes 2025-12-08T17:45:48.094837105+00:00 stderr F 2025-12-08T17:45:48Z [verbose] ADD finished CNI request ContainerID:"abc3a2e84c88a41b34688913233ffa318a8b6cce7c084027b6e31005e2a9a619" Netns:"/var/run/netns/6f31117e-871a-4017-a1ef-578a85582f34" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-57ffdf54dd-5dg99;K8S_POD_INFRA_CONTAINER_ID=abc3a2e84c88a41b34688913233ffa318a8b6cce7c084027b6e31005e2a9a619;K8S_POD_UID=0c242c34-d446-4428-b8d7-0b8dbf4137c9" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"26:aa:40:42:72:94\",\"name\":\"abc3a2e84c88a41\"},{\"mac\":\"0a:58:0a:d9:00:39\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/6f31117e-871a-4017-a1ef-578a85582f34\"}],\"ips\":[{\"address\":\"10.217.0.57/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:45:52.419383889+00:00 stderr F 2025-12-08T17:45:52Z [verbose] DEL starting CNI request ContainerID:"b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8" Netns:"/var/run/netns/417cfb3d-e89f-4818-9139-8e25d490060a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=installer-12-crc;K8S_POD_INFRA_CONTAINER_ID=b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8;K8S_POD_UID=158725bd-7556-4281-a3cb-acaa6baf5d8c" Path:"" 2025-12-08T17:45:52.419840803+00:00 stderr F 2025-12-08T17:45:52Z [verbose] Del: openshift-kube-apiserver:installer-12-crc:158725bd-7556-4281-a3cb-acaa6baf5d8c:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:45:52.607282038+00:00 stderr F 2025-12-08T17:45:52Z [verbose] DEL finished CNI request ContainerID:"b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8" Netns:"/var/run/netns/417cfb3d-e89f-4818-9139-8e25d490060a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-kube-apiserver;K8S_POD_NAME=installer-12-crc;K8S_POD_INFRA_CONTAINER_ID=b8a864a71dfd0d83f1246550bc2ef6c6048487044c12c63e1738f0ca50d342f8;K8S_POD_UID=158725bd-7556-4281-a3cb-acaa6baf5d8c" Path:"", result: "", err: 2025-12-08T17:47:22.235974995+00:00 stderr F 2025-12-08T17:47:22Z [verbose] DEL starting CNI request ContainerID:"68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad" Netns:"/var/run/netns/01cf7c94-aee0-4bae-afb8-ad66147400a0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-776cdc94d6-qkg2q;K8S_POD_INFRA_CONTAINER_ID=68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad;K8S_POD_UID=32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6" Path:"" 2025-12-08T17:47:22.235974995+00:00 stderr F 2025-12-08T17:47:22Z [verbose] Del: openshift-route-controller-manager:route-controller-manager-776cdc94d6-qkg2q:32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:47:22.269112298+00:00 stderr F 2025-12-08T17:47:22Z [verbose] DEL starting CNI request ContainerID:"a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d" Netns:"/var/run/netns/3e8f96cf-eab6-4897-874c-72d797702360" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-65b6cccf98-6wjgz;K8S_POD_INFRA_CONTAINER_ID=a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d;K8S_POD_UID=8dcd2702-e20f-439b-b2c7-27095126b87e" Path:"" 2025-12-08T17:47:22.269112298+00:00 stderr F 2025-12-08T17:47:22Z [verbose] Del: openshift-controller-manager:controller-manager-65b6cccf98-6wjgz:8dcd2702-e20f-439b-b2c7-27095126b87e:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:47:22.453864185+00:00 stderr F 2025-12-08T17:47:22Z [verbose] DEL finished CNI request ContainerID:"68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad" Netns:"/var/run/netns/01cf7c94-aee0-4bae-afb8-ad66147400a0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-776cdc94d6-qkg2q;K8S_POD_INFRA_CONTAINER_ID=68c350dfaec5080e8a88faabfaf27154a6c5538a37e7bd8bd70c0353c8cdd2ad;K8S_POD_UID=32bb589d-b6b8-4ab2-a9a2-5bae968bd2c6" Path:"", result: "", err: 2025-12-08T17:47:22.495915729+00:00 stderr F 2025-12-08T17:47:22Z [verbose] DEL finished CNI request ContainerID:"a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d" Netns:"/var/run/netns/3e8f96cf-eab6-4897-874c-72d797702360" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-65b6cccf98-6wjgz;K8S_POD_INFRA_CONTAINER_ID=a52870906a18720d6272a3d6961d0db095af769bae361b4b65db5b6303cb885d;K8S_POD_UID=8dcd2702-e20f-439b-b2c7-27095126b87e" Path:"", result: "", err: 2025-12-08T17:47:22.944638553+00:00 stderr F 2025-12-08T17:47:22Z [verbose] ADD starting CNI request ContainerID:"808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a" Netns:"/var/run/netns/83e4399a-34d3-482b-94a2-9f08403e7535" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-6975b9f87f-8vkdj;K8S_POD_INFRA_CONTAINER_ID=808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a;K8S_POD_UID=0ec00f11-942e-4b18-91f5-5efd88fe3f3a" Path:"" 2025-12-08T17:47:22.981499443+00:00 stderr F 2025-12-08T17:47:22Z [verbose] ADD starting CNI request ContainerID:"a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520" Netns:"/var/run/netns/8607005b-ad90-44ab-8cad-e4128dab0a3d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-6cd9c44569-vhg58;K8S_POD_INFRA_CONTAINER_ID=a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520;K8S_POD_UID=6f2d1606-a6a8-49a3-87d3-56c3b28b41e0" Path:"" 2025-12-08T17:47:23.119189078+00:00 stderr F I1208 17:47:23.114307 16143 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:47:23.119189078+00:00 stderr F I1208 17:47:23.114696 16143 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:47:23.119189078+00:00 stderr F I1208 17:47:23.114709 16143 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:47:23.119189078+00:00 stderr F I1208 17:47:23.114715 16143 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:47:23.119189078+00:00 stderr F I1208 17:47:23.114723 16143 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:47:23.119189078+00:00 stderr F 2025-12-08T17:47:23Z [verbose] Add: openshift-route-controller-manager:route-controller-manager-6975b9f87f-8vkdj:0ec00f11-942e-4b18-91f5-5efd88fe3f3a:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"808736aa9807f44","mac":"46:73:81:65:ee:b9"},{"name":"eth0","mac":"0a:58:0a:d9:00:3c","sandbox":"/var/run/netns/83e4399a-34d3-482b-94a2-9f08403e7535"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.60/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:47:23.119228019+00:00 stderr F I1208 17:47:23.119183 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-route-controller-manager", Name:"route-controller-manager-6975b9f87f-8vkdj", UID:"0ec00f11-942e-4b18-91f5-5efd88fe3f3a", APIVersion:"v1", ResourceVersion:"39185", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.60/23] from ovn-kubernetes 2025-12-08T17:47:23.131842406+00:00 stderr F 2025-12-08T17:47:23Z [verbose] ADD finished CNI request ContainerID:"808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a" Netns:"/var/run/netns/83e4399a-34d3-482b-94a2-9f08403e7535" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-6975b9f87f-8vkdj;K8S_POD_INFRA_CONTAINER_ID=808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a;K8S_POD_UID=0ec00f11-942e-4b18-91f5-5efd88fe3f3a" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"46:73:81:65:ee:b9\",\"name\":\"808736aa9807f44\"},{\"mac\":\"0a:58:0a:d9:00:3c\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/83e4399a-34d3-482b-94a2-9f08403e7535\"}],\"ips\":[{\"address\":\"10.217.0.60/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:47:23.149504842+00:00 stderr F I1208 17:47:23.145092 16167 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:47:23.149504842+00:00 stderr F I1208 17:47:23.145220 16167 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:47:23.149504842+00:00 stderr F I1208 17:47:23.145232 16167 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:47:23.149504842+00:00 stderr F I1208 17:47:23.145239 16167 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:47:23.149504842+00:00 stderr F I1208 17:47:23.145246 16167 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:47:23.149862813+00:00 stderr F 2025-12-08T17:47:23Z [verbose] Add: openshift-controller-manager:controller-manager-6cd9c44569-vhg58:6f2d1606-a6a8-49a3-87d3-56c3b28b41e0:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"a691a901a94f282","mac":"02:f3:cb:61:75:b2"},{"name":"eth0","mac":"0a:58:0a:d9:00:3d","sandbox":"/var/run/netns/8607005b-ad90-44ab-8cad-e4128dab0a3d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.61/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:47:23.150284886+00:00 stderr F I1208 17:47:23.150234 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-controller-manager", Name:"controller-manager-6cd9c44569-vhg58", UID:"6f2d1606-a6a8-49a3-87d3-56c3b28b41e0", APIVersion:"v1", ResourceVersion:"39187", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.61/23] from ovn-kubernetes 2025-12-08T17:47:23.164428322+00:00 stderr F 2025-12-08T17:47:23Z [verbose] ADD finished CNI request ContainerID:"a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520" Netns:"/var/run/netns/8607005b-ad90-44ab-8cad-e4128dab0a3d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-6cd9c44569-vhg58;K8S_POD_INFRA_CONTAINER_ID=a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520;K8S_POD_UID=6f2d1606-a6a8-49a3-87d3-56c3b28b41e0" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"02:f3:cb:61:75:b2\",\"name\":\"a691a901a94f282\"},{\"mac\":\"0a:58:0a:d9:00:3d\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/8607005b-ad90-44ab-8cad-e4128dab0a3d\"}],\"ips\":[{\"address\":\"10.217.0.61/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:47:24.189339865+00:00 stderr F 2025-12-08T17:47:24Z [verbose] DEL starting CNI request ContainerID:"a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520" Netns:"/var/run/netns/8607005b-ad90-44ab-8cad-e4128dab0a3d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-6cd9c44569-vhg58;K8S_POD_INFRA_CONTAINER_ID=a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520;K8S_POD_UID=6f2d1606-a6a8-49a3-87d3-56c3b28b41e0" Path:"" 2025-12-08T17:47:24.189339865+00:00 stderr F 2025-12-08T17:47:24Z [verbose] Del: openshift-controller-manager:controller-manager-6cd9c44569-vhg58:6f2d1606-a6a8-49a3-87d3-56c3b28b41e0:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:47:24.332335656+00:00 stderr F 2025-12-08T17:47:24Z [verbose] DEL finished CNI request ContainerID:"a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520" Netns:"/var/run/netns/8607005b-ad90-44ab-8cad-e4128dab0a3d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-6cd9c44569-vhg58;K8S_POD_INFRA_CONTAINER_ID=a691a901a94f282e4180b4900b8d8b9c89e8509d34eae05f4127639012410520;K8S_POD_UID=6f2d1606-a6a8-49a3-87d3-56c3b28b41e0" Path:"", result: "", err: 2025-12-08T17:47:24.795195077+00:00 stderr F 2025-12-08T17:47:24Z [verbose] ADD starting CNI request ContainerID:"68baf2802c3eddd9fabce6ce9c52c39ba9ed05121945d576775172a9cf19ae30" Netns:"/var/run/netns/7a1b97d3-9b16-4a10-9051-75e36604f9de" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5cb6f9d449-mjxkv;K8S_POD_INFRA_CONTAINER_ID=68baf2802c3eddd9fabce6ce9c52c39ba9ed05121945d576775172a9cf19ae30;K8S_POD_UID=bb242c6c-f6d4-4c20-b143-aaf339af083f" Path:"" 2025-12-08T17:47:24.945221930+00:00 stderr F I1208 17:47:24.937260 16323 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:47:24.945221930+00:00 stderr F I1208 17:47:24.937751 16323 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:47:24.945221930+00:00 stderr F I1208 17:47:24.937767 16323 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:47:24.945221930+00:00 stderr F I1208 17:47:24.937775 16323 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:47:24.945221930+00:00 stderr F I1208 17:47:24.937782 16323 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:47:24.945664773+00:00 stderr F 2025-12-08T17:47:24Z [verbose] Add: openshift-controller-manager:controller-manager-5cb6f9d449-mjxkv:bb242c6c-f6d4-4c20-b143-aaf339af083f:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"68baf2802c3eddd","mac":"ea:0a:a6:13:65:2c"},{"name":"eth0","mac":"0a:58:0a:d9:00:3e","sandbox":"/var/run/netns/7a1b97d3-9b16-4a10-9051-75e36604f9de"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.62/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:47:24.945817788+00:00 stderr F I1208 17:47:24.945776 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-controller-manager", Name:"controller-manager-5cb6f9d449-mjxkv", UID:"bb242c6c-f6d4-4c20-b143-aaf339af083f", APIVersion:"v1", ResourceVersion:"39269", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.62/23] from ovn-kubernetes 2025-12-08T17:47:24.958497758+00:00 stderr F 2025-12-08T17:47:24Z [verbose] ADD finished CNI request ContainerID:"68baf2802c3eddd9fabce6ce9c52c39ba9ed05121945d576775172a9cf19ae30" Netns:"/var/run/netns/7a1b97d3-9b16-4a10-9051-75e36604f9de" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-controller-manager;K8S_POD_NAME=controller-manager-5cb6f9d449-mjxkv;K8S_POD_INFRA_CONTAINER_ID=68baf2802c3eddd9fabce6ce9c52c39ba9ed05121945d576775172a9cf19ae30;K8S_POD_UID=bb242c6c-f6d4-4c20-b143-aaf339af083f" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"ea:0a:a6:13:65:2c\",\"name\":\"68baf2802c3eddd\"},{\"mac\":\"0a:58:0a:d9:00:3e\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/7a1b97d3-9b16-4a10-9051-75e36604f9de\"}],\"ips\":[{\"address\":\"10.217.0.62/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:48:03.407661360+00:00 stderr F 2025-12-08T17:48:03Z [verbose] DEL starting CNI request ContainerID:"808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a" Netns:"/var/run/netns/83e4399a-34d3-482b-94a2-9f08403e7535" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-6975b9f87f-8vkdj;K8S_POD_INFRA_CONTAINER_ID=808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a;K8S_POD_UID=0ec00f11-942e-4b18-91f5-5efd88fe3f3a" Path:"" 2025-12-08T17:48:03.408715082+00:00 stderr F 2025-12-08T17:48:03Z [verbose] Del: openshift-route-controller-manager:route-controller-manager-6975b9f87f-8vkdj:0ec00f11-942e-4b18-91f5-5efd88fe3f3a:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:48:03.630529862+00:00 stderr F 2025-12-08T17:48:03Z [verbose] DEL finished CNI request ContainerID:"808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a" Netns:"/var/run/netns/83e4399a-34d3-482b-94a2-9f08403e7535" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-6975b9f87f-8vkdj;K8S_POD_INFRA_CONTAINER_ID=808736aa9807f44016256ad7e368d5805041910512efa8731f76387b642e9e6a;K8S_POD_UID=0ec00f11-942e-4b18-91f5-5efd88fe3f3a" Path:"", result: "", err: 2025-12-08T17:48:04.627015452+00:00 stderr F 2025-12-08T17:48:04Z [verbose] ADD starting CNI request ContainerID:"eccbf9022c048c422f4d063c310e16f26237a2cd496817eb6b591884401187ef" Netns:"/var/run/netns/aca3706e-8d81-42d1-9c61-78e81f18ff16" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-7dd6d6d8c8-wfznc;K8S_POD_INFRA_CONTAINER_ID=eccbf9022c048c422f4d063c310e16f26237a2cd496817eb6b591884401187ef;K8S_POD_UID=0b1ea033-2c13-4941-a658-0129d8822fb2" Path:"" 2025-12-08T17:48:04.991845120+00:00 stderr F I1208 17:48:04.981950 17248 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:48:04.991845120+00:00 stderr F I1208 17:48:04.982690 17248 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:48:04.991845120+00:00 stderr F I1208 17:48:04.982721 17248 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:48:04.991845120+00:00 stderr F I1208 17:48:04.982739 17248 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:48:04.991845120+00:00 stderr F I1208 17:48:04.982756 17248 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:48:04.992242302+00:00 stderr F 2025-12-08T17:48:04Z [verbose] Add: openshift-route-controller-manager:route-controller-manager-7dd6d6d8c8-wfznc:0b1ea033-2c13-4941-a658-0129d8822fb2:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"eccbf9022c048c4","mac":"b6:cf:b2:c0:58:fa"},{"name":"eth0","mac":"0a:58:0a:d9:00:3f","sandbox":"/var/run/netns/aca3706e-8d81-42d1-9c61-78e81f18ff16"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.63/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:48:04.992485619+00:00 stderr F I1208 17:48:04.992432 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-route-controller-manager", Name:"route-controller-manager-7dd6d6d8c8-wfznc", UID:"0b1ea033-2c13-4941-a658-0129d8822fb2", APIVersion:"v1", ResourceVersion:"39433", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.63/23] from ovn-kubernetes 2025-12-08T17:48:05.017277329+00:00 stderr F 2025-12-08T17:48:05Z [verbose] ADD finished CNI request ContainerID:"eccbf9022c048c422f4d063c310e16f26237a2cd496817eb6b591884401187ef" Netns:"/var/run/netns/aca3706e-8d81-42d1-9c61-78e81f18ff16" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-route-controller-manager;K8S_POD_NAME=route-controller-manager-7dd6d6d8c8-wfznc;K8S_POD_INFRA_CONTAINER_ID=eccbf9022c048c422f4d063c310e16f26237a2cd496817eb6b591884401187ef;K8S_POD_UID=0b1ea033-2c13-4941-a658-0129d8822fb2" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"b6:cf:b2:c0:58:fa\",\"name\":\"eccbf9022c048c4\"},{\"mac\":\"0a:58:0a:d9:00:3f\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/aca3706e-8d81-42d1-9c61-78e81f18ff16\"}],\"ips\":[{\"address\":\"10.217.0.63/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:48:09.485910810+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL starting CNI request ContainerID:"08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c" Netns:"/var/run/netns/fa0e2d4a-9766-43dd-8bba-302637083e90" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-lxwl6;K8S_POD_INFRA_CONTAINER_ID=08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c;K8S_POD_UID=fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf" Path:"" 2025-12-08T17:48:09.486602352+00:00 stderr F 2025-12-08T17:48:09Z [verbose] Del: openshift-marketplace:certified-operators-lxwl6:fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:48:09.504325628+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL starting CNI request ContainerID:"ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257" Netns:"/var/run/netns/3cb2bf20-d369-4245-90f8-b4dae15ed896" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-r22jf;K8S_POD_INFRA_CONTAINER_ID=ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257;K8S_POD_UID=cb8303fe-2019-44f4-a124-af174b28cc02" Path:"" 2025-12-08T17:48:09.504928936+00:00 stderr F 2025-12-08T17:48:09Z [verbose] Del: openshift-marketplace:community-operators-r22jf:cb8303fe-2019-44f4-a124-af174b28cc02:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:48:09.533535311+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL starting CNI request ContainerID:"300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434" Netns:"/var/run/netns/8531667c-3d6c-48a5-9fbe-09c00b96fc75" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-rvglb;K8S_POD_INFRA_CONTAINER_ID=300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434;K8S_POD_UID=fe467668-8954-4465-87ca-ef1d5f933d43" Path:"" 2025-12-08T17:48:09.533535311+00:00 stderr F 2025-12-08T17:48:09Z [verbose] Del: openshift-marketplace:redhat-marketplace-rvglb:fe467668-8954-4465-87ca-ef1d5f933d43:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:48:09.536988496+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL starting CNI request ContainerID:"3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e" Netns:"/var/run/netns/dda686e6-d5b9-4505-8d5e-2bbd0eb93a28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-547dbd544d-85wdh;K8S_POD_INFRA_CONTAINER_ID=3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e;K8S_POD_UID=9af82654-06bc-4376-bff5-d6adacce9785" Path:"" 2025-12-08T17:48:09.536988496+00:00 stderr F 2025-12-08T17:48:09Z [verbose] Del: openshift-marketplace:marketplace-operator-547dbd544d-85wdh:9af82654-06bc-4376-bff5-d6adacce9785:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:48:09.548507085+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL starting CNI request ContainerID:"bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6" Netns:"/var/run/netns/c6721d99-c53d-40b4-8b98-83142d159b17" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-zfv6j;K8S_POD_INFRA_CONTAINER_ID=bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6;K8S_POD_UID=e2c92d64-3525-4675-bbe9-38bfe6dd4504" Path:"" 2025-12-08T17:48:09.548709621+00:00 stderr F 2025-12-08T17:48:09Z [verbose] Del: openshift-marketplace:redhat-operators-zfv6j:e2c92d64-3525-4675-bbe9-38bfe6dd4504:ovn-kubernetes:eth0 {"cniVersion":"0.4.0","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{},"logFile":"/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log","logLevel":"4","logfile-maxsize":100,"logfile-maxbackups":5,"logfile-maxage":0,"runtimeConfig":{}} 2025-12-08T17:48:09.678716674+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL finished CNI request ContainerID:"08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c" Netns:"/var/run/netns/fa0e2d4a-9766-43dd-8bba-302637083e90" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-lxwl6;K8S_POD_INFRA_CONTAINER_ID=08a30309aab05f724b4d90b3610f7ad6b5bae8633f9e5f0e956fb4a55ca08d5c;K8S_POD_UID=fe8486ce-b0ff-43e5-b2a4-3e1d81feeebf" Path:"", result: "", err: 2025-12-08T17:48:09.749939689+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL finished CNI request ContainerID:"ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257" Netns:"/var/run/netns/3cb2bf20-d369-4245-90f8-b4dae15ed896" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-r22jf;K8S_POD_INFRA_CONTAINER_ID=ad18d5cd7629954fa392ecb15a908995ca8664574c76fffd455810a5c533b257;K8S_POD_UID=cb8303fe-2019-44f4-a124-af174b28cc02" Path:"", result: "", err: 2025-12-08T17:48:09.781668849+00:00 stderr P 2025-12-08T17:48:09Z [verbose] 2025-12-08T17:48:09.781711940+00:00 stderr P DEL finished CNI request ContainerID:"300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434" Netns:"/var/run/netns/8531667c-3d6c-48a5-9fbe-09c00b96fc75" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-rvglb;K8S_POD_INFRA_CONTAINER_ID=300fcfe62cb2a2236d7576185a01858472eaf4d7b3901f788ba4cb7d1721d434;K8S_POD_UID=fe467668-8954-4465-87ca-ef1d5f933d43" Path:"", result: "", err: 2025-12-08T17:48:09.781729881+00:00 stderr F 2025-12-08T17:48:09.832210018+00:00 stderr F 2025-12-08T17:48:09Z [verbose] ADD starting CNI request ContainerID:"023ae68f913e3364024f45b9ef33e12f3f6e2e37229c46bb927a5eb359d414c6" Netns:"/var/run/netns/d254caf2-650e-4785-ad29-14d3a071a4fa" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-547dbd544d-6bbtn;K8S_POD_INFRA_CONTAINER_ID=023ae68f913e3364024f45b9ef33e12f3f6e2e37229c46bb927a5eb359d414c6;K8S_POD_UID=c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1" Path:"" 2025-12-08T17:48:09.840356675+00:00 stderr F 2025-12-08T17:48:09Z [verbose] DEL finished CNI request ContainerID:"3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e" Netns:"/var/run/netns/dda686e6-d5b9-4505-8d5e-2bbd0eb93a28" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-547dbd544d-85wdh;K8S_POD_INFRA_CONTAINER_ID=3635ccac4190e9ac4d7e71077ab9092bae6db0a6613f789211d0b6f919a4a49e;K8S_POD_UID=9af82654-06bc-4376-bff5-d6adacce9785" Path:"", result: "", err: 2025-12-08T17:48:09.849151231+00:00 stderr P 2025-12-08T17:48:09Z [verbose] 2025-12-08T17:48:09.849193922+00:00 stderr P DEL finished CNI request ContainerID:"bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6" Netns:"/var/run/netns/c6721d99-c53d-40b4-8b98-83142d159b17" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-zfv6j;K8S_POD_INFRA_CONTAINER_ID=bad7cc15753758580e7b5d15966ebb1082d0a9a66fb5c9a65077ce2b2db411b6;K8S_POD_UID=e2c92d64-3525-4675-bbe9-38bfe6dd4504" Path:"", result: "", err: 2025-12-08T17:48:09.849212183+00:00 stderr F 2025-12-08T17:48:10.180321270+00:00 stderr F I1208 17:48:10.175196 17733 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:48:10.180321270+00:00 stderr F I1208 17:48:10.175788 17733 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:48:10.180321270+00:00 stderr F I1208 17:48:10.175805 17733 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:48:10.180321270+00:00 stderr F I1208 17:48:10.175812 17733 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:48:10.180321270+00:00 stderr F I1208 17:48:10.175818 17733 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:48:10.180681961+00:00 stderr F 2025-12-08T17:48:10Z [verbose] Add: openshift-marketplace:marketplace-operator-547dbd544d-6bbtn:c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"023ae68f913e336","mac":"46:d1:1c:d1:49:55"},{"name":"eth0","mac":"0a:58:0a:d9:00:40","sandbox":"/var/run/netns/d254caf2-650e-4785-ad29-14d3a071a4fa"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.64/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:48:10.180923228+00:00 stderr F I1208 17:48:10.180858 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"marketplace-operator-547dbd544d-6bbtn", UID:"c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1", APIVersion:"v1", ResourceVersion:"39490", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.64/23] from ovn-kubernetes 2025-12-08T17:48:10.193412207+00:00 stderr F 2025-12-08T17:48:10Z [verbose] ADD finished CNI request ContainerID:"023ae68f913e3364024f45b9ef33e12f3f6e2e37229c46bb927a5eb359d414c6" Netns:"/var/run/netns/d254caf2-650e-4785-ad29-14d3a071a4fa" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-547dbd544d-6bbtn;K8S_POD_INFRA_CONTAINER_ID=023ae68f913e3364024f45b9ef33e12f3f6e2e37229c46bb927a5eb359d414c6;K8S_POD_UID=c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"46:d1:1c:d1:49:55\",\"name\":\"023ae68f913e336\"},{\"mac\":\"0a:58:0a:d9:00:40\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/d254caf2-650e-4785-ad29-14d3a071a4fa\"}],\"ips\":[{\"address\":\"10.217.0.64/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:48:11.549374132+00:00 stderr F 2025-12-08T17:48:11Z [verbose] ADD starting CNI request ContainerID:"6de3e2a5fd64a82fb7314939a1915d8ecbf9ba4a8c0d8b9710455241d403b89e" Netns:"/var/run/netns/4235b3be-8bf4-44af-ba5c-cc6d2373cc5d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-58d6l;K8S_POD_INFRA_CONTAINER_ID=6de3e2a5fd64a82fb7314939a1915d8ecbf9ba4a8c0d8b9710455241d403b89e;K8S_POD_UID=af364a45-2b54-442a-b71a-4032d578bc89" Path:"" 2025-12-08T17:48:11.892398260+00:00 stderr F I1208 17:48:11.887212 17877 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:48:11.892398260+00:00 stderr F I1208 17:48:11.887676 17877 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:48:11.892398260+00:00 stderr F I1208 17:48:11.887690 17877 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:48:11.892398260+00:00 stderr F I1208 17:48:11.887696 17877 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:48:11.892398260+00:00 stderr F I1208 17:48:11.887703 17877 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:48:11.893148423+00:00 stderr F 2025-12-08T17:48:11Z [verbose] Add: openshift-marketplace:certified-operators-58d6l:af364a45-2b54-442a-b71a-4032d578bc89:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"6de3e2a5fd64a82","mac":"56:55:c6:65:a9:65"},{"name":"eth0","mac":"0a:58:0a:d9:00:41","sandbox":"/var/run/netns/4235b3be-8bf4-44af-ba5c-cc6d2373cc5d"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.65/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:48:11.893357090+00:00 stderr F I1208 17:48:11.893313 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"certified-operators-58d6l", UID:"af364a45-2b54-442a-b71a-4032d578bc89", APIVersion:"v1", ResourceVersion:"39546", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.65/23] from ovn-kubernetes 2025-12-08T17:48:11.911056135+00:00 stderr F 2025-12-08T17:48:11Z [verbose] ADD finished CNI request ContainerID:"6de3e2a5fd64a82fb7314939a1915d8ecbf9ba4a8c0d8b9710455241d403b89e" Netns:"/var/run/netns/4235b3be-8bf4-44af-ba5c-cc6d2373cc5d" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=certified-operators-58d6l;K8S_POD_INFRA_CONTAINER_ID=6de3e2a5fd64a82fb7314939a1915d8ecbf9ba4a8c0d8b9710455241d403b89e;K8S_POD_UID=af364a45-2b54-442a-b71a-4032d578bc89" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"56:55:c6:65:a9:65\",\"name\":\"6de3e2a5fd64a82\"},{\"mac\":\"0a:58:0a:d9:00:41\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/4235b3be-8bf4-44af-ba5c-cc6d2373cc5d\"}],\"ips\":[{\"address\":\"10.217.0.65/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:48:12.520953948+00:00 stderr F 2025-12-08T17:48:12Z [verbose] ADD starting CNI request ContainerID:"c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5" Netns:"/var/run/netns/5e56a3a7-fb3b-4a87-a3ef-744f5fbb3910" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-xp5vr;K8S_POD_INFRA_CONTAINER_ID=c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5;K8S_POD_UID=c9416e49-5134-45de-9eeb-a15be7fdbf63" Path:"" 2025-12-08T17:48:12.724824125+00:00 stderr F I1208 17:48:12.719270 17993 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:48:12.724824125+00:00 stderr F I1208 17:48:12.719631 17993 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:48:12.724824125+00:00 stderr F I1208 17:48:12.719646 17993 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:48:12.724824125+00:00 stderr F I1208 17:48:12.719661 17993 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:48:12.724824125+00:00 stderr F I1208 17:48:12.719669 17993 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:48:12.725159596+00:00 stderr F 2025-12-08T17:48:12Z [verbose] Add: openshift-marketplace:redhat-marketplace-xp5vr:c9416e49-5134-45de-9eeb-a15be7fdbf63:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"c11f84302bfe326","mac":"8a:c2:6e:19:41:c4"},{"name":"eth0","mac":"0a:58:0a:d9:00:42","sandbox":"/var/run/netns/5e56a3a7-fb3b-4a87-a3ef-744f5fbb3910"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.66/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:48:12.725335702+00:00 stderr F I1208 17:48:12.725308 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-marketplace-xp5vr", UID:"c9416e49-5134-45de-9eeb-a15be7fdbf63", APIVersion:"v1", ResourceVersion:"39564", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.66/23] from ovn-kubernetes 2025-12-08T17:48:12.735506979+00:00 stderr P 2025-12-08T17:48:12Z [verbose] 2025-12-08T17:48:12.735551280+00:00 stderr P ADD finished CNI request ContainerID:"c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5" Netns:"/var/run/netns/5e56a3a7-fb3b-4a87-a3ef-744f5fbb3910" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-marketplace-xp5vr;K8S_POD_INFRA_CONTAINER_ID=c11f84302bfe3264cf3e55e89a65907964bdd273130b6ff7fe1c6969648837c5;K8S_POD_UID=c9416e49-5134-45de-9eeb-a15be7fdbf63" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"8a:c2:6e:19:41:c4\",\"name\":\"c11f84302bfe326\"},{\"mac\":\"0a:58:0a:d9:00:42\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/5e56a3a7-fb3b-4a87-a3ef-744f5fbb3910\"}],\"ips\":[{\"address\":\"10.217.0.66/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:48:12.735569611+00:00 stderr F 2025-12-08T17:48:13.953376546+00:00 stderr F 2025-12-08T17:48:13Z [verbose] ADD starting CNI request ContainerID:"2e369e900be26b57b9f7a1bc5cab886fe858f0af35227f2d72416c136d57cef3" Netns:"/var/run/netns/39aa02e8-89fc-4af3-ab90-f05a01788d06" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-xpnf9;K8S_POD_INFRA_CONTAINER_ID=2e369e900be26b57b9f7a1bc5cab886fe858f0af35227f2d72416c136d57cef3;K8S_POD_UID=259174f2-efbe-4b44-ae95-b0d2f2865ab9" Path:"" 2025-12-08T17:48:14.310790310+00:00 stderr F I1208 17:48:14.304930 18115 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:48:14.310790310+00:00 stderr F I1208 17:48:14.306147 18115 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:48:14.310790310+00:00 stderr F I1208 17:48:14.306165 18115 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:48:14.310790310+00:00 stderr F I1208 17:48:14.306173 18115 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:48:14.310790310+00:00 stderr F I1208 17:48:14.306180 18115 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:48:14.311317336+00:00 stderr F 2025-12-08T17:48:14Z [verbose] Add: openshift-marketplace:redhat-operators-xpnf9:259174f2-efbe-4b44-ae95-b0d2f2865ab9:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"2e369e900be26b5","mac":"82:9e:ce:45:25:20"},{"name":"eth0","mac":"0a:58:0a:d9:00:43","sandbox":"/var/run/netns/39aa02e8-89fc-4af3-ab90-f05a01788d06"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.67/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:48:14.311708548+00:00 stderr F I1208 17:48:14.311674 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"redhat-operators-xpnf9", UID:"259174f2-efbe-4b44-ae95-b0d2f2865ab9", APIVersion:"v1", ResourceVersion:"39584", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.67/23] from ovn-kubernetes 2025-12-08T17:48:14.326295479+00:00 stderr F 2025-12-08T17:48:14Z [verbose] ADD finished CNI request ContainerID:"2e369e900be26b57b9f7a1bc5cab886fe858f0af35227f2d72416c136d57cef3" Netns:"/var/run/netns/39aa02e8-89fc-4af3-ab90-f05a01788d06" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=redhat-operators-xpnf9;K8S_POD_INFRA_CONTAINER_ID=2e369e900be26b57b9f7a1bc5cab886fe858f0af35227f2d72416c136d57cef3;K8S_POD_UID=259174f2-efbe-4b44-ae95-b0d2f2865ab9" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"82:9e:ce:45:25:20\",\"name\":\"2e369e900be26b5\"},{\"mac\":\"0a:58:0a:d9:00:43\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/39aa02e8-89fc-4af3-ab90-f05a01788d06\"}],\"ips\":[{\"address\":\"10.217.0.67/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:48:14.918949480+00:00 stderr F 2025-12-08T17:48:14Z [verbose] ADD starting CNI request ContainerID:"8808a88a8b0d81bcfadb7a2fb65038f9c08c99f71348b280e6aee0991c20edd9" Netns:"/var/run/netns/4298caa4-8f0b-4ac8-9bf0-777f8e9f9506" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-zdvxg;K8S_POD_INFRA_CONTAINER_ID=8808a88a8b0d81bcfadb7a2fb65038f9c08c99f71348b280e6aee0991c20edd9;K8S_POD_UID=a52a5ff3-1e70-4b19-b013-95206cae40fc" Path:"" 2025-12-08T17:48:15.052926424+00:00 stderr F I1208 17:48:15.043113 18247 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:48:15.052926424+00:00 stderr F I1208 17:48:15.043532 18247 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:48:15.052926424+00:00 stderr F I1208 17:48:15.043542 18247 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:48:15.052926424+00:00 stderr F I1208 17:48:15.043550 18247 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:48:15.052926424+00:00 stderr F I1208 17:48:15.043559 18247 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:48:15.052926424+00:00 stderr F 2025-12-08T17:48:15Z [verbose] Add: openshift-marketplace:community-operators-zdvxg:a52a5ff3-1e70-4b19-b013-95206cae40fc:ovn-kubernetes(ovn-kubernetes):eth0 {"cniVersion":"0.4.0","interfaces":[{"name":"8808a88a8b0d81b","mac":"da:c9:cb:cd:4c:80"},{"name":"eth0","mac":"0a:58:0a:d9:00:44","sandbox":"/var/run/netns/4298caa4-8f0b-4ac8-9bf0-777f8e9f9506"}],"ips":[{"version":"4","interface":1,"address":"10.217.0.68/23","gateway":"10.217.0.1"}],"dns":{}} 2025-12-08T17:48:15.052926424+00:00 stderr F I1208 17:48:15.050586 6625 event.go:364] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-marketplace", Name:"community-operators-zdvxg", UID:"a52a5ff3-1e70-4b19-b013-95206cae40fc", APIVersion:"v1", ResourceVersion:"39609", FieldPath:""}): type: 'Normal' reason: 'AddedInterface' Add eth0 [10.217.0.68/23] from ovn-kubernetes 2025-12-08T17:48:15.065005079+00:00 stderr F 2025-12-08T17:48:15Z [verbose] ADD finished CNI request ContainerID:"8808a88a8b0d81bcfadb7a2fb65038f9c08c99f71348b280e6aee0991c20edd9" Netns:"/var/run/netns/4298caa4-8f0b-4ac8-9bf0-777f8e9f9506" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=community-operators-zdvxg;K8S_POD_INFRA_CONTAINER_ID=8808a88a8b0d81bcfadb7a2fb65038f9c08c99f71348b280e6aee0991c20edd9;K8S_POD_UID=a52a5ff3-1e70-4b19-b013-95206cae40fc" Path:"", result: "{\"Result\":{\"cniVersion\":\"1.1.0\",\"interfaces\":[{\"mac\":\"da:c9:cb:cd:4c:80\",\"name\":\"8808a88a8b0d81b\"},{\"mac\":\"0a:58:0a:d9:00:44\",\"name\":\"eth0\",\"sandbox\":\"/var/run/netns/4298caa4-8f0b-4ac8-9bf0-777f8e9f9506\"}],\"ips\":[{\"address\":\"10.217.0.68/23\",\"gateway\":\"10.217.0.1\",\"interface\":1}]}}", err: 2025-12-08T17:53:41.377734982+00:00 stderr F 2025-12-08T17:53:41Z [verbose] readiness indicator file is gone. restart multus-daemon ././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_pe0000755000175000017500000000000015115611513033151 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_pe0000755000175000017500000000000015115611520033147 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_pe0000644000175000017500000000653615115611513033165 0ustar zuulzuul2025-12-08T17:55:34.323968837+00:00 stderr F 2025-12-08T17:55:34Z INFO setup starting manager 2025-12-08T17:55:34.323968837+00:00 stderr F 2025-12-08T17:55:34Z INFO controller-runtime.metrics Starting metrics server 2025-12-08T17:55:34.323968837+00:00 stderr F 2025-12-08T17:55:34Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8082", "secure": false} 2025-12-08T17:55:34.323968837+00:00 stderr F 2025-12-08T17:55:34Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-12-08T17:55:34.323968837+00:00 stderr F 2025-12-08T17:55:34Z INFO starting server {"name": "pprof", "addr": "127.0.0.1:8083"} 2025-12-08T17:55:34.323968837+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "persesdatasource", "controllerGroup": "perses.dev", "controllerKind": "PersesDatasource", "source": "kind source: *v1alpha1.PersesDatasource"} 2025-12-08T17:55:34.327226456+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "persesdashboard", "controllerGroup": "perses.dev", "controllerKind": "PersesDashboard", "source": "kind source: *v1alpha1.PersesDashboard"} 2025-12-08T17:55:34.327226456+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses", "source": "kind source: *v1.Service"} 2025-12-08T17:55:34.327226456+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses", "source": "kind source: *v1.StatefulSet"} 2025-12-08T17:55:34.327226456+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:55:34.327226456+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses", "source": "kind source: *v1alpha1.Perses"} 2025-12-08T17:55:34.327226456+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses", "source": "kind source: *v1.Deployment"} 2025-12-08T17:55:34.798222170+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting Controller {"controller": "persesdashboard", "controllerGroup": "perses.dev", "controllerKind": "PersesDashboard"} 2025-12-08T17:55:34.798222170+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting workers {"controller": "persesdashboard", "controllerGroup": "perses.dev", "controllerKind": "PersesDashboard", "worker count": 1} 2025-12-08T17:55:34.800451922+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting Controller {"controller": "persesdatasource", "controllerGroup": "perses.dev", "controllerKind": "PersesDatasource"} 2025-12-08T17:55:34.800451922+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting workers {"controller": "persesdatasource", "controllerGroup": "perses.dev", "controllerKind": "PersesDatasource", "worker count": 1} 2025-12-08T17:55:34.800451922+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting Controller {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses"} 2025-12-08T17:55:34.800451922+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting workers {"controller": "perses", "controllerGroup": "perses.dev", "controllerKind": "Perses", "worker count": 1} ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611513033052 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611520033050 5ustar zuulzuul././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000000042115115611513033051 0ustar zuulzuul2025-12-08T17:44:19.687247463+00:00 stderr F time="2025-12-08T17:44:19Z" level=info msg="error verifying provided cert and key: certificate has expired" 2025-12-08T17:44:19.687247463+00:00 stderr F time="2025-12-08T17:44:19Z" level=info msg="generating a new cert and key" ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611514033077 5ustar zuulzuul././@LongLink0000644000000000000000000000032200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000116615115611514033105 0ustar zuulzuul2025-12-08T17:56:33.512362982+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:56:33.526680285+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/manifests/interconnect-operator.v1.10.x.clusterserviceversion.yaml 2025-12-08T17:56:33.527780734+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/manifests/interconnect-operator_v1alpha1_interconnect_crd.yaml 2025-12-08T17:56:33.528255486+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/metadata/annotations.yaml ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000010715115611514033077 0ustar zuulzuul2025-12-08T17:56:31.085283209+00:00 stdout F '/bin/cpb' -> '/util/cpb' ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000430315115611514033101 0ustar zuulzuul2025-12-08T17:56:33.197309194+00:00 stdout F skipping a dir without errors: / 2025-12-08T17:56:33.197309194+00:00 stdout F skipping a dir without errors: /bundle 2025-12-08T17:56:33.197470598+00:00 stdout F skipping all files in the dir: /dev 2025-12-08T17:56:33.197470598+00:00 stdout F skipping a dir without errors: /etc 2025-12-08T17:56:33.197470598+00:00 stdout F skipping a dir without errors: /manifests 2025-12-08T17:56:33.197470598+00:00 stdout F skipping a dir without errors: /metadata 2025-12-08T17:56:33.197636523+00:00 stdout F skipping all files in the dir: /proc 2025-12-08T17:56:33.197661303+00:00 stdout F skipping a dir without errors: /root 2025-12-08T17:56:33.197690744+00:00 stdout F skipping a dir without errors: /root/buildinfo 2025-12-08T17:56:33.197728405+00:00 stdout F skipping a dir without errors: /root/buildinfo/content_manifests 2025-12-08T17:56:33.197763926+00:00 stdout F skipping a dir without errors: /run 2025-12-08T17:56:33.197790597+00:00 stdout F skipping a dir without errors: /run/secrets 2025-12-08T17:56:33.197811487+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm 2025-12-08T17:56:33.197832558+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/ca 2025-12-08T17:56:33.197864100+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/syspurpose 2025-12-08T17:56:33.197970672+00:00 stdout F skipping all files in the dir: /sys 2025-12-08T17:56:33.197981383+00:00 stdout F skipping a dir without errors: /util 2025-12-08T17:56:33.198010643+00:00 stdout F skipping a dir without errors: /var 2025-12-08T17:56:33.198032214+00:00 stdout F skipping a dir without errors: /var/run 2025-12-08T17:56:33.198056375+00:00 stdout F skipping a dir without errors: /var/run/secrets 2025-12-08T17:56:33.198081535+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io 2025-12-08T17:56:33.198112306+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount 2025-12-08T17:56:33.198137267+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount/..2025_12_08_17_56_30.2134724141 2025-12-08T17:56:33.198210638+00:00 stdout F &{metadata/annotations.yaml manifests/} ././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611513033101 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000271115115611513033104 0ustar zuulzuul2025-12-08T17:58:20.303005930+00:00 stderr F 2025/12/08 17:58:20 provider.go:129: Defaulting client-id to system:serviceaccount:service-telemetry:smart-gateway 2025-12-08T17:58:20.303005930+00:00 stderr F 2025/12/08 17:58:20 provider.go:134: Defaulting client-secret to service account token /var/run/secrets/kubernetes.io/serviceaccount/token 2025-12-08T17:58:20.303775440+00:00 stderr F 2025/12/08 17:58:20 provider.go:358: Delegation of authentication and authorization to OpenShift is enabled for bearer tokens and client certificates. 2025-12-08T17:58:20.327267547+00:00 stderr F 2025/12/08 17:58:20 oauthproxy.go:210: mapping path "/" => upstream "http://localhost:8081/" 2025-12-08T17:58:20.327267547+00:00 stderr F 2025/12/08 17:58:20 oauthproxy.go:237: OAuthProxy configured for Client ID: system:serviceaccount:service-telemetry:smart-gateway 2025-12-08T17:58:20.327267547+00:00 stderr F 2025/12/08 17:58:20 oauthproxy.go:247: Cookie settings: name:_oauth_proxy secure(https):true httponly:true expiry:168h0m0s domain: samesite: refresh:disabled 2025-12-08T17:58:20.329379932+00:00 stderr F 2025/12/08 17:58:20 http.go:64: HTTP: listening on 127.0.0.1:4180 2025-12-08T17:58:20.330201903+00:00 stderr F I1208 17:58:20.329647 1 dynamic_serving_content.go:135] "Starting controller" name="serving::/etc/tls/private/tls.crt::/etc/tls/private/tls.key" 2025-12-08T17:58:20.330201903+00:00 stderr F 2025/12/08 17:58:20 http.go:110: HTTPS: listening on [::]:8083 ././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000250015115611513033100 0ustar zuulzuul2025-12-08T17:58:34.477868418+00:00 stdout F 2025-12-08 17:58:34 [INFO] initialized handler [transport pair: socket0, handler: sensubility-metrics] 2025-12-08T17:58:34.477868418+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded transport [transport: socket0] 2025-12-08T17:58:34.493930394+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded application plugin [application: prometheus] 2025-12-08T17:58:34.493930394+00:00 stdout F 2025-12-08 17:58:34 [INFO] metric server at : 127.0.0.1:8081 [plugin: Prometheus] 2025-12-08T17:58:34.493930394+00:00 stdout F 2025-12-08 17:58:34 [INFO] socket listening on /tmp/smartgateway [plugin: socket] 2025-12-08T17:58:35.500944551+00:00 stdout F 2025-12-08 17:58:35 [INFO] registered collector tracking metrics with 1 label [plugin: Prometheus] 2025-12-08T17:58:35.500944551+00:00 stdout F 2025-12-08 17:58:35 [INFO] registered expiry process for metrics with interval 0s [plugin: Prometheus] 2025-12-08T17:59:30.659475915+00:00 stdout F 2025-12-08 17:59:30 [INFO] registered collector tracking metrics with 2 labels [plugin: Prometheus] 2025-12-08T17:59:30.659475915+00:00 stdout F 2025-12-08 17:59:30 [INFO] registered expiry process for metrics with interval 10s [plugin: Prometheus] 2025-12-08T18:00:35.497167284+00:00 stdout F 2025-12-08 18:00:35 [WARN] prometheus collector expired [plugin: Prometheus] ././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/2.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000202215115611513033077 0ustar zuulzuul2025-12-08T17:58:54.021301516+00:00 stdout F bridge-349 ==> (/tmp/smartgateway) 2025-12-08T17:58:54.028051673+00:00 stdout F bridge-349 ==> (amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/sensubility/cloud1-telemetry) 2025-12-08T17:59:54.070940868+00:00 stdout F in: 8(0), amqp_overrun: 0(0), out: 8(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:00:53.098747062+00:00 stdout F in: 10(0), amqp_overrun: 0(0), out: 10(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:01:52.110788446+00:00 stdout F in: 10(0), amqp_overrun: 0(0), out: 10(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:02:51.118241184+00:00 stdout F in: 10(0), amqp_overrun: 0(0), out: 10(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:03:50.143109302+00:00 stdout F in: 10(0), amqp_overrun: 0(0), out: 10(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:04:49.157138997+00:00 stdout F in: 10(0), amqp_overrun: 0(0), out: 10(0), sock_overrun: 0(0), link_credit_average: -nan ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000112415115611513033101 0ustar zuulzuul2025-12-08T17:58:36.094466181+00:00 stdout F bridge-368 ==> (/tmp/smartgateway) 2025-12-08T17:58:36.103172966+00:00 stderr F PN_TRANSPORT_CLOSED: proton:io: Connection refused - disconnected default-interconnect.service-telemetry.svc.cluster.local:5673 2025-12-08T17:58:36.103172966+00:00 stderr F Exit AMQP RCV thread... 2025-12-08T17:58:37.094639991+00:00 stdout F Joining amqp_rcv_th... 2025-12-08T17:58:37.094639991+00:00 stdout F Cancel socket_snd_th... 2025-12-08T17:58:37.094639991+00:00 stdout F Joining socket_snd_th... 2025-12-08T17:58:37.094720273+00:00 stderr F Exit SOCKET thread... ././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authenticati0000755000175000017500000000000015115611514033132 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authenticati0000755000175000017500000000000015115611521033130 5ustar zuulzuul././@LongLink0000644000000000000000000000033400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authenticati0000644000175000017500000155071215115611514033147 0ustar zuulzuul2025-12-08T17:44:21.710161282+00:00 stdout F Copying system trust bundle 2025-12-08T17:44:22.732012436+00:00 stderr F W1208 17:44:22.730453 1 cmd.go:167] Unable to read initial content of "/tmp/terminate": open /tmp/terminate: no such file or directory 2025-12-08T17:44:22.732012436+00:00 stderr F I1208 17:44:22.731559 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:22.735962383+00:00 stderr F I1208 17:44:22.735575 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:22.735962383+00:00 stderr F I1208 17:44:22.735613 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:22.755904618+00:00 stderr F I1208 17:44:22.737360 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:22.757785748+00:00 stderr F I1208 17:44:22.756999 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:22.757785748+00:00 stderr F I1208 17:44:22.757382 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:22.757785748+00:00 stderr F I1208 17:44:22.757389 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:22.757785748+00:00 stderr F I1208 17:44:22.757395 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:22.757785748+00:00 stderr F I1208 17:44:22.757400 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:22.887916048+00:00 stderr F I1208 17:44:22.869857 1 builder.go:304] cluster-authentication-operator version - 2025-12-08T17:44:22.887916048+00:00 stderr F I1208 17:44:22.873267 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:23.579127412+00:00 stderr F I1208 17:44:23.578510 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:44:23.597093002+00:00 stderr F I1208 17:44:23.597008 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:23.597093002+00:00 stderr F I1208 17:44:23.597031 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:23.597093002+00:00 stderr F I1208 17:44:23.597059 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:44:23.597093002+00:00 stderr F I1208 17:44:23.597064 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:44:23.623290466+00:00 stderr F I1208 17:44:23.623239 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:23.632146658+00:00 stderr F W1208 17:44:23.631773 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:23.632903999+00:00 stderr F W1208 17:44:23.632856 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:23.632944390+00:00 stderr F W1208 17:44:23.632934 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:23.632965501+00:00 stderr F W1208 17:44:23.632957 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:23.632985071+00:00 stderr F W1208 17:44:23.632977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:23.633004422+00:00 stderr F W1208 17:44:23.632996 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:23.633341421+00:00 stderr F I1208 17:44:23.626843 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:44:23.633398423+00:00 stderr F I1208 17:44:23.628473 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:23.634138382+00:00 stderr F I1208 17:44:23.634106 1 leaderelection.go:257] attempting to acquire leader lease openshift-authentication-operator/cluster-authentication-operator-lock... 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637198 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637235 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637519 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637557 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637569 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637856 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-authentication-operator.svc\" [serving] validServingFor=[metrics.openshift-authentication-operator.svc,metrics.openshift-authentication-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:44:23.637833703 +0000 UTC))" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637938 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.637954 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.638153 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:23.638131031 +0000 UTC))" 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.638169 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.638189 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:44:23.640223998+00:00 stderr F I1208 17:44:23.638201 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:23.643637331+00:00 stderr F I1208 17:44:23.643226 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:44:23.643637331+00:00 stderr F I1208 17:44:23.643504 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:44:23.643744484+00:00 stderr F I1208 17:44:23.643697 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go:183" 2025-12-08T17:44:23.646236432+00:00 stderr F I1208 17:44:23.645600 1 leaderelection.go:271] successfully acquired lease openshift-authentication-operator/cluster-authentication-operator-lock 2025-12-08T17:44:23.647036875+00:00 stderr F I1208 17:44:23.646963 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-authentication-operator", Name:"cluster-authentication-operator-lock", UID:"3dbd319f-80dc-4bd9-bf2b-95668b17141c", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37416", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' authentication-operator-7f5c659b84-5scww_dd36d592-8bf7-4823-9267-a5f432dabf9d became leader 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.738830 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.738900 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739099 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:23.739077554 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739305 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-authentication-operator.svc\" [serving] validServingFor=[metrics.openshift-authentication-operator.svc,metrics.openshift-authentication-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:44:23.739291491 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739433 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:23.739422145 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739451 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739658 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:23.739645001 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739672 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:23.739663971 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739683 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:23.739676252 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739695 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:23.739687872 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739706 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:23.739698782 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739732 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:23.739710542 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739748 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:23.739738133 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739767 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:23.739752004 +0000 UTC))" 2025-12-08T17:44:23.739952719+00:00 stderr F I1208 17:44:23.739926 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-authentication-operator.svc\" [serving] validServingFor=[metrics.openshift-authentication-operator.svc,metrics.openshift-authentication-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:44:23.739913998 +0000 UTC))" 2025-12-08T17:44:23.740897454+00:00 stderr F I1208 17:44:23.740054 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:23.740046091 +0000 UTC))" 2025-12-08T17:44:23.876425382+00:00 stderr F I1208 17:44:23.876130 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:23.878971710+00:00 stderr F I1208 17:44:23.877548 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:23.887629767+00:00 stderr F I1208 17:44:23.887476 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.926154037+00:00 stderr F I1208 17:44:23.925148 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.926788666+00:00 stderr F I1208 17:44:23.926726 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.931517384+00:00 stderr F I1208 17:44:23.931431 1 reflector.go:430] "Caches populated" type="*v1.OAuth" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.937168968+00:00 stderr F I1208 17:44:23.935148 1 reflector.go:430] "Caches populated" type="*v1.Console" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.937168968+00:00 stderr F I1208 17:44:23.935943 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.937168968+00:00 stderr F I1208 17:44:23.936235 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:23.937168968+00:00 stderr F I1208 17:44:23.937068 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:23.937233750+00:00 stderr F I1208 17:44:23.937141 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:23.950629245+00:00 stderr F I1208 17:44:23.948574 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:23.956198467+00:00 stderr F I1208 17:44:23.956076 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-PayloadConfig 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960665 1 base_controller.go:76] Waiting for caches to sync for openshift-oauth-apiserver-EncryptionKey 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960706 1 base_controller.go:76] Waiting for caches to sync for RouterCertsDomainValidationController 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960718 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-ServiceCA 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960848 1 base_controller.go:76] Waiting for caches to sync for OpenshiftAuthenticationStaticResources-StaticResources 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960858 1 base_controller.go:76] Waiting for caches to sync for WellKnownReadyController 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960886 1 base_controller.go:76] Waiting for caches to sync for OAuthServerRouteEndpointAccessibleController 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960915 1 base_controller.go:76] Waiting for caches to sync for OAuthServerServiceEndpointAccessibleController 2025-12-08T17:44:23.960938187+00:00 stderr F I1208 17:44:23.960930 1 base_controller.go:76] Waiting for caches to sync for OAuthServerServiceEndpointsEndpointAccessibleController 2025-12-08T17:44:23.960995278+00:00 stderr F I1208 17:44:23.960940 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-IngressNodesAvailable 2025-12-08T17:44:23.960995278+00:00 stderr F I1208 17:44:23.960950 1 base_controller.go:76] Waiting for caches to sync for ProxyConfigController 2025-12-08T17:44:23.960995278+00:00 stderr F I1208 17:44:23.960960 1 base_controller.go:76] Waiting for caches to sync for CustomRouteController 2025-12-08T17:44:23.960995278+00:00 stderr F I1208 17:44:23.960970 1 base_controller.go:76] Waiting for caches to sync for TrustDistributionController 2025-12-08T17:44:23.960995278+00:00 stderr F I1208 17:44:23.960982 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-RemoveStaleConditions 2025-12-08T17:44:23.960995278+00:00 stderr F I1208 17:44:23.960990 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-IngressState 2025-12-08T17:44:23.961007199+00:00 stderr F I1208 17:44:23.960997 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:23.961041300+00:00 stderr F I1208 17:44:23.961020 1 base_controller.go:76] Waiting for caches to sync for OpenShiftAuthenticatorCertRequester 2025-12-08T17:44:23.961041300+00:00 stderr F I1208 17:44:23.961033 1 base_controller.go:76] Waiting for caches to sync for WebhookAuthenticatorController 2025-12-08T17:44:23.961051750+00:00 stderr F I1208 17:44:23.961043 1 base_controller.go:76] Waiting for caches to sync for WebhookAuthenticatorCertApprover_OpenShiftAuthenticator 2025-12-08T17:44:23.961060230+00:00 stderr F I1208 17:44:23.961055 1 base_controller.go:76] Waiting for caches to sync for auditPolicyController 2025-12-08T17:44:23.961070520+00:00 stderr F I1208 17:44:23.961064 1 base_controller.go:76] Waiting for caches to sync for openshift-oauth-apiserver-EncryptionPrune 2025-12-08T17:44:23.961080831+00:00 stderr F I1208 17:44:23.961075 1 base_controller.go:76] Waiting for caches to sync for SecretRevisionPruneController 2025-12-08T17:44:23.961101331+00:00 stderr F I1208 17:44:23.961084 1 base_controller.go:76] Waiting for caches to sync for OAuthAPIServerController-WorkloadWorkloadController 2025-12-08T17:44:23.961110491+00:00 stderr F I1208 17:44:23.961094 1 base_controller.go:76] Waiting for caches to sync for openshift-oauth-apiserver-EncryptionState 2025-12-08T17:44:23.961118852+00:00 stderr F I1208 17:44:23.961110 1 base_controller.go:76] Waiting for caches to sync for openshift-oauth-apiserver-EncryptionCondition 2025-12-08T17:44:23.961127092+00:00 stderr F I1208 17:44:23.961119 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-APIService 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.961183 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_authentication 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.961203 1 base_controller.go:76] Waiting for caches to sync for RevisionController 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.961212 1 base_controller.go:76] Waiting for caches to sync for openshift-oauth-apiserver-EncryptionMigration 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.961221 1 base_controller.go:76] Waiting for caches to sync for NamespaceFinalizerController_openshift-oauth-apiserver 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.961275 1 base_controller.go:76] Waiting for caches to sync for APIServerStaticResources-StaticResources 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.962084 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:23.964553895+00:00 stderr F I1208 17:44:23.962208 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:23.970087846+00:00 stderr F I1208 17:44:23.967660 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:23.970087846+00:00 stderr F I1208 17:44:23.969453 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:23.984011886+00:00 stderr F I1208 17:44:23.983968 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:23.987516521+00:00 stderr F I1208 17:44:23.984653 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.015089644+00:00 stderr F I1208 17:44:24.015045 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.020552233+00:00 stderr F I1208 17:44:24.020520 1 base_controller.go:76] Waiting for caches to sync for oauth-server 2025-12-08T17:44:24.020620014+00:00 stderr F I1208 17:44:24.020610 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-UnsupportedConfigOverrides 2025-12-08T17:44:24.020655495+00:00 stderr F I1208 17:44:24.020646 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:24.020686706+00:00 stderr F I1208 17:44:24.020678 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:24.020713107+00:00 stderr F I1208 17:44:24.020705 1 base_controller.go:76] Waiting for caches to sync for OAuthServer-WorkloadWorkloadController 2025-12-08T17:44:24.020762148+00:00 stderr F I1208 17:44:24.020752 1 base_controller.go:76] Waiting for caches to sync for authentication-ManagementState 2025-12-08T17:44:24.020788549+00:00 stderr F I1208 17:44:24.020780 1 base_controller.go:76] Waiting for caches to sync for openshift-authentication-Metadata 2025-12-08T17:44:24.020820780+00:00 stderr F I1208 17:44:24.020812 1 base_controller.go:76] Waiting for caches to sync for OAuthClientsController_SwitchedController 2025-12-08T17:44:24.025580510+00:00 stderr F I1208 17:44:24.025551 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.031730898+00:00 stderr F I1208 17:44:24.031693 1 reflector.go:430] "Caches populated" type="*v1.Authentication" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.039070048+00:00 stderr F I1208 17:44:24.039029 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.039755867+00:00 stderr F I1208 17:44:24.039722 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=authentications" reflector="k8s.io/client-go/dynamic/dynamicinformer/informer.go:108" 2025-12-08T17:44:24.069203679+00:00 stderr F I1208 17:44:24.064147 1 base_controller.go:82] Caches are synced for openshift-authentication-RemoveStaleConditions 2025-12-08T17:44:24.069203679+00:00 stderr F I1208 17:44:24.064175 1 base_controller.go:119] Starting #1 worker of openshift-authentication-RemoveStaleConditions controller ... 2025-12-08T17:44:24.069203679+00:00 stderr F I1208 17:44:24.064238 1 base_controller.go:82] Caches are synced for APIServerStaticResources-StaticResources 2025-12-08T17:44:24.069203679+00:00 stderr F I1208 17:44:24.064246 1 base_controller.go:119] Starting #1 worker of APIServerStaticResources-StaticResources controller ... 2025-12-08T17:44:24.069203679+00:00 stderr F I1208 17:44:24.064510 1 reflector.go:430] "Caches populated" type="*v1.KubeAPIServer" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.069711194+00:00 stderr F I1208 17:44:24.069685 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.109801837+00:00 stderr F I1208 17:44:24.109092 1 reflector.go:430] "Caches populated" type="*v1.IngressController" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.115226265+00:00 stderr F I1208 17:44:24.114404 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.115226265+00:00 stderr F I1208 17:44:24.114916 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.117567849+00:00 stderr F I1208 17:44:24.115921 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.119679476+00:00 stderr F I1208 17:44:24.118842 1 reflector.go:430] "Caches populated" type="*v1.APIService" reflector="k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go:141" 2025-12-08T17:44:24.120994133+00:00 stderr F I1208 17:44:24.120975 1 base_controller.go:82] Caches are synced for openshift-authentication-UnsupportedConfigOverrides 2025-12-08T17:44:24.121027733+00:00 stderr F I1208 17:44:24.121018 1 base_controller.go:119] Starting #1 worker of openshift-authentication-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:24.121942838+00:00 stderr F I1208 17:44:24.121914 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:24.121942838+00:00 stderr F I1208 17:44:24.121935 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:24.122497493+00:00 stderr F I1208 17:44:24.122472 1 base_controller.go:82] Caches are synced for authentication-ManagementState 2025-12-08T17:44:24.122497493+00:00 stderr F I1208 17:44:24.122482 1 base_controller.go:119] Starting #1 worker of authentication-ManagementState controller ... 2025-12-08T17:44:24.171193632+00:00 stderr F I1208 17:44:24.171145 1 base_controller.go:82] Caches are synced for WebhookAuthenticatorCertApprover_OpenShiftAuthenticator 2025-12-08T17:44:24.171193632+00:00 stderr F I1208 17:44:24.171172 1 base_controller.go:119] Starting #1 worker of WebhookAuthenticatorCertApprover_OpenShiftAuthenticator controller ... 2025-12-08T17:44:24.180173387+00:00 stderr F E1208 17:44:24.180133 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:24.181762160+00:00 stderr F I1208 17:44:24.181711 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.183390294+00:00 stderr F I1208 17:44:24.183371 1 reflector.go:430] "Caches populated" type="*v1alpha1.StorageVersionMigration" reflector="sigs.k8s.io/kube-storage-version-migrator/pkg/clients/informer/factory.go:132" 2025-12-08T17:44:24.189432749+00:00 stderr F I1208 17:44:24.189383 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.222941903+00:00 stderr F I1208 17:44:24.222617 1 base_controller.go:82] Caches are synced for OAuthClientsController_SwitchedController 2025-12-08T17:44:24.222941903+00:00 stderr F I1208 17:44:24.222646 1 base_controller.go:119] Starting #1 worker of OAuthClientsController_SwitchedController controller ... 2025-12-08T17:44:24.223609571+00:00 stderr F I1208 17:44:24.223587 1 base_controller.go:76] Waiting for caches to sync for OAuthClientsController 2025-12-08T17:44:24.227151968+00:00 stderr F E1208 17:44:24.227117 1 reflector.go:200] "Failed to watch" err="failed to list *v1.OAuthClient: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io)" reflector="github.com/openshift/cluster-authentication-operator/pkg/controllers/oauthclientscontroller/oauthclientscontroller.go:57" type="*v1.OAuthClient" 2025-12-08T17:44:24.311948661+00:00 stderr F I1208 17:44:24.301961 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.362173981+00:00 stderr F I1208 17:44:24.361637 1 base_controller.go:82] Caches are synced for StatusSyncer_authentication 2025-12-08T17:44:24.362173981+00:00 stderr F I1208 17:44:24.361748 1 base_controller.go:119] Starting #1 worker of StatusSyncer_authentication controller ... 2025-12-08T17:44:24.364290129+00:00 stderr F I1208 17:44:24.363096 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError::OAuthServerServiceEndpointAccessibleController_SyncError::OAuthServerServiceEndpointsEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"APIServerDeployment_NoPod::APIServices_PreconditionNotReady::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable::OAuthServerServiceEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:24.377648983+00:00 stderr F I1208 17:44:24.373529 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:24.377648983+00:00 stderr F I1208 17:44:24.373622 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.377648983+00:00 stderr F I1208 17:44:24.376936 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:24.511048822+00:00 stderr F I1208 17:44:24.509944 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded changed from False to True ("APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready") 2025-12-08T17:44:24.601604082+00:00 stderr F I1208 17:44:24.601555 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.774006785+00:00 stderr F I1208 17:44:24.773464 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.861950793+00:00 stderr F I1208 17:44:24.860957 1 base_controller.go:82] Caches are synced for RouterCertsDomainValidationController 2025-12-08T17:44:24.861950793+00:00 stderr F I1208 17:44:24.860983 1 base_controller.go:119] Starting #1 worker of RouterCertsDomainValidationController controller ... 2025-12-08T17:44:24.861950793+00:00 stderr F I1208 17:44:24.861268 1 base_controller.go:82] Caches are synced for TrustDistributionController 2025-12-08T17:44:24.861950793+00:00 stderr F I1208 17:44:24.861311 1 base_controller.go:119] Starting #1 worker of TrustDistributionController controller ... 2025-12-08T17:44:24.968726285+00:00 stderr F I1208 17:44:24.968438 1 request.go:752] "Waited before sending request" delay="1.011254914s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/secrets?limit=500&resourceVersion=0" 2025-12-08T17:44:24.973526597+00:00 stderr F I1208 17:44:24.972642 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.064110167+00:00 stderr F I1208 17:44:25.064052 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:25.064110167+00:00 stderr F I1208 17:44:25.064077 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:25.064110167+00:00 stderr F I1208 17:44:25.064105 1 base_controller.go:82] Caches are synced for OpenShiftAuthenticatorCertRequester 2025-12-08T17:44:25.064152149+00:00 stderr F I1208 17:44:25.064109 1 base_controller.go:119] Starting #1 worker of OpenShiftAuthenticatorCertRequester controller ... 2025-12-08T17:44:25.175835285+00:00 stderr F I1208 17:44:25.174984 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.267924567+00:00 stderr F E1208 17:44:25.264434 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:25.383432927+00:00 stderr F I1208 17:44:25.379747 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.465198439+00:00 stderr F E1208 17:44:25.462103 1 reflector.go:200] "Failed to watch" err="failed to list *v1.OAuthClient: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io)" reflector="github.com/openshift/cluster-authentication-operator/pkg/controllers/oauthclientscontroller/oauthclientscontroller.go:57" type="*v1.OAuthClient" 2025-12-08T17:44:25.569917935+00:00 stderr F I1208 17:44:25.569622 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.662463079+00:00 stderr F I1208 17:44:25.662336 1 base_controller.go:82] Caches are synced for auditPolicyController 2025-12-08T17:44:25.662463079+00:00 stderr F I1208 17:44:25.662368 1 base_controller.go:119] Starting #1 worker of auditPolicyController controller ... 2025-12-08T17:44:25.662463079+00:00 stderr F I1208 17:44:25.662412 1 base_controller.go:82] Caches are synced for RevisionController 2025-12-08T17:44:25.662463079+00:00 stderr F I1208 17:44:25.662416 1 base_controller.go:119] Starting #1 worker of RevisionController controller ... 2025-12-08T17:44:25.771755550+00:00 stderr F I1208 17:44:25.770096 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.864350876+00:00 stderr F I1208 17:44:25.863981 1 base_controller.go:82] Caches are synced for SecretRevisionPruneController 2025-12-08T17:44:25.864350876+00:00 stderr F I1208 17:44:25.864320 1 base_controller.go:119] Starting #1 worker of SecretRevisionPruneController controller ... 2025-12-08T17:44:25.864416627+00:00 stderr F I1208 17:44:25.864105 1 base_controller.go:82] Caches are synced for OAuthAPIServerController-WorkloadWorkloadController 2025-12-08T17:44:25.864416627+00:00 stderr F I1208 17:44:25.864406 1 base_controller.go:119] Starting #1 worker of OAuthAPIServerController-WorkloadWorkloadController controller ... 2025-12-08T17:44:25.866646308+00:00 stderr F I1208 17:44:25.864150 1 base_controller.go:82] Caches are synced for NamespaceFinalizerController_openshift-oauth-apiserver 2025-12-08T17:44:25.866646308+00:00 stderr F I1208 17:44:25.866320 1 base_controller.go:119] Starting #1 worker of NamespaceFinalizerController_openshift-oauth-apiserver controller ... 2025-12-08T17:44:25.969909695+00:00 stderr F I1208 17:44:25.969612 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:25.969909695+00:00 stderr F I1208 17:44:25.969708 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.974504190+00:00 stderr F I1208 17:44:25.971783 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:26.167869994+00:00 stderr F I1208 17:44:26.167817 1 request.go:752] "Waited before sending request" delay="2.209166518s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services?limit=500&resourceVersion=0" 2025-12-08T17:44:26.170958580+00:00 stderr F I1208 17:44:26.170923 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:26.267839122+00:00 stderr F I1208 17:44:26.267362 1 base_controller.go:82] Caches are synced for WebhookAuthenticatorController 2025-12-08T17:44:26.267839122+00:00 stderr F I1208 17:44:26.267822 1 base_controller.go:119] Starting #1 worker of WebhookAuthenticatorController controller ... 2025-12-08T17:44:26.267953605+00:00 stderr F I1208 17:44:26.267869 1 base_controller.go:82] Caches are synced for openshift-apiserver-APIService 2025-12-08T17:44:26.267953605+00:00 stderr F I1208 17:44:26.267947 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-APIService controller ... 2025-12-08T17:44:26.373946076+00:00 stderr F I1208 17:44:26.373447 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:26.575113863+00:00 stderr F I1208 17:44:26.574559 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:26.783274291+00:00 stderr F I1208 17:44:26.783216 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:26.981188710+00:00 stderr F I1208 17:44:26.981145 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:27.061562923+00:00 stderr F I1208 17:44:27.061489 1 base_controller.go:82] Caches are synced for openshift-authentication-ServiceCA 2025-12-08T17:44:27.061562923+00:00 stderr F I1208 17:44:27.061522 1 base_controller.go:119] Starting #1 worker of openshift-authentication-ServiceCA controller ... 2025-12-08T17:44:27.061562923+00:00 stderr F I1208 17:44:27.061555 1 base_controller.go:82] Caches are synced for OAuthServerServiceEndpointAccessibleController 2025-12-08T17:44:27.061562923+00:00 stderr F I1208 17:44:27.061559 1 base_controller.go:119] Starting #1 worker of OAuthServerServiceEndpointAccessibleController controller ... 2025-12-08T17:44:27.107409393+00:00 stderr F I1208 17:44:27.107369 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError::OAuthServerServiceEndpointAccessibleController_SyncError::OAuthServerServiceEndpointsEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::APIServices_PreconditionNotReady::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:27.114741152+00:00 stderr F I1208 17:44:27.114592 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" to "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:27.165069815+00:00 stderr F I1208 17:44:27.159396 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError::OAuthServerServiceEndpointsEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::APIServices_PreconditionNotReady::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:27.167918483+00:00 stderr F I1208 17:44:27.167896 1 request.go:752] "Waited before sending request" delay="3.206485463s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints?limit=500&resourceVersion=0" 2025-12-08T17:44:27.168955902+00:00 stderr F I1208 17:44:27.168939 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:27.169058444+00:00 stderr F I1208 17:44:27.169047 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:27.170070052+00:00 stderr F I1208 17:44:27.170055 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:27.178591444+00:00 stderr F I1208 17:44:27.178544 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready" to "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready" 2025-12-08T17:44:27.261593848+00:00 stderr F I1208 17:44:27.261544 1 base_controller.go:82] Caches are synced for OAuthServerServiceEndpointsEndpointAccessibleController 2025-12-08T17:44:27.261650430+00:00 stderr F I1208 17:44:27.261639 1 base_controller.go:119] Starting #1 worker of OAuthServerServiceEndpointsEndpointAccessibleController controller ... 2025-12-08T17:44:27.263640865+00:00 stderr F I1208 17:44:27.263619 1 base_controller.go:82] Caches are synced for openshift-authentication-IngressState 2025-12-08T17:44:27.263683066+00:00 stderr F I1208 17:44:27.263673 1 base_controller.go:119] Starting #1 worker of openshift-authentication-IngressState controller ... 2025-12-08T17:44:27.333161620+00:00 stderr F I1208 17:44:27.333083 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::APIServices_PreconditionNotReady::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:27.401285479+00:00 stderr F I1208 17:44:27.400222 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused\nOAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready" to "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:27.401285479+00:00 stderr F I1208 17:44:27.400924 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:27.463855765+00:00 stderr F I1208 17:44:27.461039 1 base_controller.go:82] Caches are synced for OpenshiftAuthenticationStaticResources-StaticResources 2025-12-08T17:44:27.463855765+00:00 stderr F I1208 17:44:27.461080 1 base_controller.go:119] Starting #1 worker of OpenshiftAuthenticationStaticResources-StaticResources controller ... 2025-12-08T17:44:27.496288330+00:00 stderr F E1208 17:44:27.487799 1 reflector.go:200] "Failed to watch" err="failed to list *v1.OAuthClient: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io)" reflector="github.com/openshift/cluster-authentication-operator/pkg/controllers/oauthclientscontroller/oauthclientscontroller.go:57" type="*v1.OAuthClient" 2025-12-08T17:44:27.595827895+00:00 stderr F I1208 17:44:27.594493 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:27.601653074+00:00 stderr F E1208 17:44:27.600352 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:27.773426820+00:00 stderr F I1208 17:44:27.773060 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:27.862188970+00:00 stderr F I1208 17:44:27.862105 1 base_controller.go:82] Caches are synced for openshift-oauth-apiserver-EncryptionMigration 2025-12-08T17:44:27.862188970+00:00 stderr F I1208 17:44:27.862146 1 base_controller.go:119] Starting #1 worker of openshift-oauth-apiserver-EncryptionMigration controller ... 2025-12-08T17:44:27.862247412+00:00 stderr F I1208 17:44:27.862188 1 base_controller.go:82] Caches are synced for openshift-oauth-apiserver-EncryptionKey 2025-12-08T17:44:27.862247412+00:00 stderr F I1208 17:44:27.862193 1 base_controller.go:119] Starting #1 worker of openshift-oauth-apiserver-EncryptionKey controller ... 2025-12-08T17:44:27.862247412+00:00 stderr F I1208 17:44:27.862221 1 base_controller.go:82] Caches are synced for openshift-authentication-IngressNodesAvailable 2025-12-08T17:44:27.862247412+00:00 stderr F I1208 17:44:27.862227 1 base_controller.go:119] Starting #1 worker of openshift-authentication-IngressNodesAvailable controller ... 2025-12-08T17:44:27.862259202+00:00 stderr F I1208 17:44:27.862250 1 base_controller.go:82] Caches are synced for openshift-oauth-apiserver-EncryptionPrune 2025-12-08T17:44:27.862259202+00:00 stderr F I1208 17:44:27.862255 1 base_controller.go:119] Starting #1 worker of openshift-oauth-apiserver-EncryptionPrune controller ... 2025-12-08T17:44:27.862283803+00:00 stderr F I1208 17:44:27.862275 1 base_controller.go:82] Caches are synced for openshift-oauth-apiserver-EncryptionState 2025-12-08T17:44:27.862293093+00:00 stderr F I1208 17:44:27.862281 1 base_controller.go:119] Starting #1 worker of openshift-oauth-apiserver-EncryptionState controller ... 2025-12-08T17:44:27.862305053+00:00 stderr F I1208 17:44:27.862297 1 base_controller.go:82] Caches are synced for openshift-oauth-apiserver-EncryptionCondition 2025-12-08T17:44:27.862305053+00:00 stderr F I1208 17:44:27.862301 1 base_controller.go:119] Starting #1 worker of openshift-oauth-apiserver-EncryptionCondition controller ... 2025-12-08T17:44:27.971902313+00:00 stderr F I1208 17:44:27.971840 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:28.168634710+00:00 stderr F I1208 17:44:28.168122 1 request.go:752] "Waited before sending request" delay="4.205477632s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/default/services?limit=500&resourceVersion=0" 2025-12-08T17:44:28.170096559+00:00 stderr F I1208 17:44:28.169668 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:28.370625539+00:00 stderr F I1208 17:44:28.370195 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:28.570819059+00:00 stderr F I1208 17:44:28.570783 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:28.686452884+00:00 stderr F E1208 17:44:28.686375 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: apiservices.apiregistration.k8s.io/v1.oauth.openshift.io: not available: endpoints for service/api in \"openshift-oauth-apiserver\" have no addresses with port name \"https\"" 2025-12-08T17:44:28.690378611+00:00 stderr F I1208 17:44:28.689683 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: apiservices.apiregistration.k8s.io/v1.oauth.openshift.io: not available: endpoints for service/api in \"openshift-oauth-apiserver\" have no addresses with port name \"https\"\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::APIServices_Error::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:28.707622911+00:00 stderr F I1208 17:44:28.707464 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: apiservices.apiregistration.k8s.io/v1.oauth.openshift.io: not available: endpoints for service/api in \"openshift-oauth-apiserver\" have no addresses with port name \"https\"\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.715213029+00:00 stderr F I1208 17:44:28.715151 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'OpenShiftAPICheckFailed' "oauth.openshift.io.v1" failed with an attempt failed with statusCode = 503, err = the server is currently unable to handle the request 2025-12-08T17:44:28.758110208+00:00 stderr F E1208 17:44:28.758066 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: \"oauth.openshift.io.v1\" is not ready: an attempt failed with statusCode = 503, err = the server is currently unable to handle the request" 2025-12-08T17:44:28.763802894+00:00 stderr F I1208 17:44:28.763746 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: \"oauth.openshift.io.v1\" is not ready: an attempt failed with statusCode = 503, err = the server is currently unable to handle the request\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::APIServices_Error::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:28.771797772+00:00 stderr F I1208 17:44:28.771721 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:28.784943061+00:00 stderr F I1208 17:44:28.784820 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: apiservices.apiregistration.k8s.io/v1.oauth.openshift.io: not available: endpoints for service/api in \"openshift-oauth-apiserver\" have no addresses with port name \"https\"\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: \"oauth.openshift.io.v1\" is not ready: an attempt failed with statusCode = 503, err = the server is currently unable to handle the request\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.826676579+00:00 stderr F I1208 17:44:28.826421 1 base_controller.go:82] Caches are synced for oauth-server 2025-12-08T17:44:28.826676579+00:00 stderr F I1208 17:44:28.826455 1 base_controller.go:119] Starting #1 worker of oauth-server controller ... 2025-12-08T17:44:28.826726431+00:00 stderr F I1208 17:44:28.826702 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:28.826726431+00:00 stderr F I1208 17:44:28.826708 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:28.834838872+00:00 stderr F I1208 17:44:28.833670 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::IngressStateEndpoints_NonReadyEndpoints::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:28.848276568+00:00 stderr F I1208 17:44:28.847569 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nAPIServicesAvailable: \"oauth.openshift.io.v1\" is not ready: an attempt failed with statusCode = 503, err = the server is currently unable to handle the request\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:29.168905744+00:00 stderr F I1208 17:44:29.168834 1 request.go:752] "Waited before sending request" delay="4.658841458s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:44:29.773097404+00:00 stderr F I1208 17:44:29.771543 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'NoValidCertificateFound' No valid client certificate for OpenShiftAuthenticatorCertRequester is found: part of the certificate is expired: sub: CN=system:serviceaccount:openshift-oauth-apiserver:openshift-authenticator, notAfter: 2025-12-03 08:35:50 +0000 UTC 2025-12-08T17:44:29.774820051+00:00 stderr F I1208 17:44:29.774760 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CSRApproval' The CSR "system:openshift:openshift-authenticator-c6q8r" has been approved 2025-12-08T17:44:29.774869072+00:00 stderr F I1208 17:44:29.774817 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CSRCreated' A csr "system:openshift:openshift-authenticator-c6q8r" is created for OpenShiftAuthenticatorCertRequester 2025-12-08T17:44:30.370311115+00:00 stderr F I1208 17:44:30.369997 1 request.go:752] "Waited before sending request" delay="4.502742751s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/secrets/etcd-client" 2025-12-08T17:44:30.621927087+00:00 stderr F I1208 17:44:30.621855 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.621819064 +0000 UTC))" 2025-12-08T17:44:30.623337306+00:00 stderr F I1208 17:44:30.623311 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.623291115 +0000 UTC))" 2025-12-08T17:44:30.623354947+00:00 stderr F I1208 17:44:30.623335 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.623325176 +0000 UTC))" 2025-12-08T17:44:30.623383777+00:00 stderr F I1208 17:44:30.623367 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.623351437 +0000 UTC))" 2025-12-08T17:44:30.623391378+00:00 stderr F I1208 17:44:30.623384 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.623375457 +0000 UTC))" 2025-12-08T17:44:30.623435619+00:00 stderr F I1208 17:44:30.623410 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.623390398 +0000 UTC))" 2025-12-08T17:44:30.623454959+00:00 stderr F I1208 17:44:30.623442 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.623429429 +0000 UTC))" 2025-12-08T17:44:30.623477570+00:00 stderr F I1208 17:44:30.623460 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.623448819 +0000 UTC))" 2025-12-08T17:44:30.623505971+00:00 stderr F I1208 17:44:30.623490 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.62347094 +0000 UTC))" 2025-12-08T17:44:30.623526181+00:00 stderr F I1208 17:44:30.623511 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.623502031 +0000 UTC))" 2025-12-08T17:44:30.623764898+00:00 stderr F I1208 17:44:30.623737 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-authentication-operator.svc\" [serving] validServingFor=[metrics.openshift-authentication-operator.svc,metrics.openshift-authentication-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:44:30.623713416 +0000 UTC))" 2025-12-08T17:44:30.623937132+00:00 stderr F I1208 17:44:30.623914 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:30.623902161 +0000 UTC))" 2025-12-08T17:44:30.970935717+00:00 stderr F I1208 17:44:30.970614 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:31.396236149+00:00 stderr F E1208 17:44:31.396191 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:31.562752070+00:00 stderr F I1208 17:44:31.562700 1 reflector.go:430] "Caches populated" type="*v1.OAuthClient" reflector="github.com/openshift/cluster-authentication-operator/pkg/controllers/oauthclientscontroller/oauthclientscontroller.go:57" 2025-12-08T17:44:31.568855346+00:00 stderr F I1208 17:44:31.568813 1 request.go:752] "Waited before sending request" delay="2.587257761s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api" 2025-12-08T17:44:32.768903001+00:00 stderr F I1208 17:44:32.768827 1 request.go:752] "Waited before sending request" delay="2.993811712s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:44:32.861441985+00:00 stderr F I1208 17:44:32.861351 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for openshift-authenticator-certs/openshift-oauth-apiserver, diff:   string( 2025-12-08T17:44:32.861441985+00:00 stderr F -  "2025-11-03T08:35:50Z", 2025-12-08T17:44:32.861441985+00:00 stderr F +  "2025-12-08T17:39:29Z", 2025-12-08T17:44:32.861441985+00:00 stderr F   ) 2025-12-08T17:44:32.861441985+00:00 stderr F I1208 17:44:32.861410 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for openshift-authenticator-certs/openshift-oauth-apiserver, diff:   string( 2025-12-08T17:44:32.861441985+00:00 stderr F -  "2025-12-03T08:35:50Z", 2025-12-08T17:44:32.861441985+00:00 stderr F +  "2026-01-07T17:39:29Z", 2025-12-08T17:44:32.861441985+00:00 stderr F   ) 2025-12-08T17:44:33.972934803+00:00 stderr F I1208 17:44:33.969074 1 request.go:752] "Waited before sending request" delay="2.998053848s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-ztdrc" 2025-12-08T17:44:34.016454420+00:00 stderr F I1208 17:44:34.016393 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_UnavailablePod::CustomRouteController_SyncError::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"APIServerDeployment_NoPod::OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:34.035153590+00:00 stderr F I1208 17:44:34.034897 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:35.167973500+00:00 stderr F I1208 17:44:35.167832 1 request.go:752] "Waited before sending request" delay="2.79614248s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:44:35.372204861+00:00 stderr F I1208 17:44:35.372128 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClientCertificateCreated' A new client certificate for OpenShiftAuthenticatorCertRequester is available 2025-12-08T17:44:35.606701628+00:00 stderr F I1208 17:44:35.606617 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"CustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"CustomRouteController_SyncError::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:35.617922594+00:00 stderr F I1208 17:44:35.615853 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-oauth-apiserver ()\nCustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "CustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused",Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-oauth-apiserver pods available on any node.\nOAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:36.168606134+00:00 stderr F I1208 17:44:36.168336 1 request.go:752] "Waited before sending request" delay="2.152092373s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/audit" 2025-12-08T17:44:37.168473111+00:00 stderr F I1208 17:44:37.168410 1 request.go:752] "Waited before sending request" delay="2.197803963s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/oauth-serving-cert" 2025-12-08T17:44:38.368746430+00:00 stderr F I1208 17:44:38.368361 1 request.go:752] "Waited before sending request" delay="1.991586076s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication" 2025-12-08T17:44:39.368752105+00:00 stderr F I1208 17:44:39.368704 1 request.go:752] "Waited before sending request" delay="1.59785049s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/etcd-serving-ca" 2025-12-08T17:44:39.573997105+00:00 stderr F I1208 17:44:39.573916 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/webhook-authentication-integrated-oauth -n openshift-config because it changed 2025-12-08T17:44:39.635993530+00:00 stderr F I1208 17:44:39.635932 1 reflector.go:430] "Caches populated" type="*v1.Route" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" 2025-12-08T17:44:39.656284945+00:00 stderr F I1208 17:44:39.656210 1 base_controller.go:82] Caches are synced for openshift-authentication-PayloadConfig 2025-12-08T17:44:39.656284945+00:00 stderr F I1208 17:44:39.656236 1 base_controller.go:119] Starting #1 worker of openshift-authentication-PayloadConfig controller ... 2025-12-08T17:44:39.661557501+00:00 stderr F I1208 17:44:39.661519 1 base_controller.go:82] Caches are synced for CustomRouteController 2025-12-08T17:44:39.661557501+00:00 stderr F I1208 17:44:39.661532 1 base_controller.go:82] Caches are synced for OAuthServerRouteEndpointAccessibleController 2025-12-08T17:44:39.661557501+00:00 stderr F I1208 17:44:39.661538 1 base_controller.go:119] Starting #1 worker of CustomRouteController controller ... 2025-12-08T17:44:39.661557501+00:00 stderr F I1208 17:44:39.661544 1 base_controller.go:119] Starting #1 worker of OAuthServerRouteEndpointAccessibleController controller ... 2025-12-08T17:44:39.661591752+00:00 stderr F I1208 17:44:39.661560 1 base_controller.go:82] Caches are synced for ProxyConfigController 2025-12-08T17:44:39.661591752+00:00 stderr F I1208 17:44:39.661564 1 base_controller.go:119] Starting #1 worker of ProxyConfigController controller ... 2025-12-08T17:44:39.661591752+00:00 stderr F I1208 17:44:39.661565 1 base_controller.go:82] Caches are synced for WellKnownReadyController 2025-12-08T17:44:39.661591752+00:00 stderr F I1208 17:44:39.661571 1 base_controller.go:119] Starting #1 worker of WellKnownReadyController controller ... 2025-12-08T17:44:39.715513633+00:00 stderr F I1208 17:44:39.715446 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"CustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"CustomRouteController_SyncError::OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.","reason":"OAuthServerDeployment_NoPod","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:39.722701703+00:00 stderr F I1208 17:44:39.722647 1 base_controller.go:82] Caches are synced for openshift-authentication-Metadata 2025-12-08T17:44:39.722701703+00:00 stderr F I1208 17:44:39.722670 1 base_controller.go:119] Starting #1 worker of openshift-authentication-Metadata controller ... 2025-12-08T17:44:39.722701703+00:00 stderr F I1208 17:44:39.722692 1 base_controller.go:82] Caches are synced for OAuthServer-WorkloadWorkloadController 2025-12-08T17:44:39.722750664+00:00 stderr F I1208 17:44:39.722697 1 base_controller.go:119] Starting #1 worker of OAuthServer-WorkloadWorkloadController controller ... 2025-12-08T17:44:39.723738971+00:00 stderr F I1208 17:44:39.723712 1 base_controller.go:82] Caches are synced for OAuthClientsController 2025-12-08T17:44:39.723782473+00:00 stderr F I1208 17:44:39.723769 1 base_controller.go:119] Starting #1 worker of OAuthClientsController controller ... 2025-12-08T17:44:39.734416939+00:00 stderr F I1208 17:44:39.734368 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node." 2025-12-08T17:44:39.738818422+00:00 stderr F I1208 17:44:39.738734 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"OAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"OAuthServerDeployment_UnavailablePod::OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:46Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.","reason":"OAuthServerDeployment_NoPod","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:39.749249832+00:00 stderr F I1208 17:44:39.748725 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "CustomRouteControllerDegraded: the server is currently unable to handle the request (get routes.route.openshift.io oauth-openshift)\nOAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "OAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:39.768197459+00:00 stderr F I1208 17:44:39.768146 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"OAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540-\u003e10.217.4.10:53: read: connection refused","reason":"OAuthServerRouteEndpointAccessibleController_SyncError","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:39.778850135+00:00 stderr F I1208 17:44:39.778775 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerDeploymentDegraded: 1 of 1 requested instances are unavailable for oauth-openshift.openshift-authentication ()\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused" to "OAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": dial tcp: lookup oauth-openshift.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.16:55540->10.217.4.10:53: read: connection refused",Available changed from False to True ("All is well") 2025-12-08T17:44:39.800631591+00:00 stderr F I1208 17:44:39.800576 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:26Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:39.808149381+00:00 stderr F I1208 17:44:39.807344 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded changed from True to False ("All is well") 2025-12-08T17:44:40.569379181+00:00 stderr F I1208 17:44:40.569302 1 request.go:752] "Waited before sending request" delay="1.395890771s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/secrets/openshift-authenticator-certs" 2025-12-08T17:44:41.769853375+00:00 stderr F I1208 17:44:41.767947 1 request.go:752] "Waited before sending request" delay="1.992471381s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift" 2025-12-08T17:44:42.769643444+00:00 stderr F I1208 17:44:42.768173 1 request.go:752] "Waited before sending request" delay="2.373876312s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-service-ca" 2025-12-08T17:44:43.769954668+00:00 stderr F I1208 17:44:43.768261 1 request.go:752] "Waited before sending request" delay="2.386381531s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/audit" 2025-12-08T17:44:44.769293905+00:00 stderr F I1208 17:44:44.768941 1 request.go:752] "Waited before sending request" delay="2.393432707s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/secrets/v4-0-config-system-router-certs" 2025-12-08T17:44:45.968447831+00:00 stderr F I1208 17:44:45.968155 1 request.go:752] "Waited before sending request" delay="2.394556169s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-cliconfig" 2025-12-08T17:44:47.168917804+00:00 stderr F I1208 17:44:47.168541 1 request.go:752] "Waited before sending request" delay="2.197582728s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/oauth-serving-cert" 2025-12-08T17:44:48.370120167+00:00 stderr F I1208 17:44:48.370060 1 request.go:752] "Waited before sending request" delay="2.188067063s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-metadata" 2025-12-08T17:44:49.568208884+00:00 stderr F I1208 17:44:49.568155 1 request.go:752] "Waited before sending request" delay="1.989206769s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config/secrets/webhook-authentication-integrated-oauth" 2025-12-08T17:44:50.569025351+00:00 stderr F I1208 17:44:50.568934 1 request.go:752] "Waited before sending request" delay="1.795574371s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:44:51.768799075+00:00 stderr F I1208 17:44:51.768688 1 request.go:752] "Waited before sending request" delay="1.795826948s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-cliconfig" 2025-12-08T17:44:52.969041383+00:00 stderr F I1208 17:44:52.968381 1 request.go:752] "Waited before sending request" delay="1.885567825s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/secrets/etcd-client" 2025-12-08T17:44:53.968568644+00:00 stderr F I1208 17:44:53.968495 1 request.go:752] "Waited before sending request" delay="1.979183401s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-metadata" 2025-12-08T17:44:54.970842042+00:00 stderr F I1208 17:44:54.968639 1 request.go:752] "Waited before sending request" delay="1.996956044s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/etcd-serving-ca" 2025-12-08T17:44:55.969070188+00:00 stderr F I1208 17:44:55.968788 1 request.go:752] "Waited before sending request" delay="1.986769112s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-metadata" 2025-12-08T17:44:56.969273559+00:00 stderr F I1208 17:44:56.968858 1 request.go:752] "Waited before sending request" delay="1.7890597s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-service-ca" 2025-12-08T17:45:01.369714740+00:00 stderr F I1208 17:45:01.368741 1 request.go:752] "Waited before sending request" delay="1.136659947s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-service-ca" 2025-12-08T17:45:02.569020060+00:00 stderr F I1208 17:45:02.568271 1 request.go:752] "Waited before sending request" delay="1.390352496s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-metadata" 2025-12-08T17:45:07.575152396+00:00 stderr F I1208 17:45:07.574503 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/v4-0-config-user-idp-0-file-data -n openshift-authentication because it changed 2025-12-08T17:45:08.768664297+00:00 stderr F I1208 17:45:08.768224 1 request.go:752] "Waited before sending request" delay="1.19286721s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:45:16.043555669+00:00 stderr F I1208 17:45:16.043282 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.043243611 +0000 UTC))" 2025-12-08T17:45:16.043555669+00:00 stderr F I1208 17:45:16.043547 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.043529829 +0000 UTC))" 2025-12-08T17:45:16.043586680+00:00 stderr F I1208 17:45:16.043567 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.043555129 +0000 UTC))" 2025-12-08T17:45:16.043596471+00:00 stderr F I1208 17:45:16.043585 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.04357325 +0000 UTC))" 2025-12-08T17:45:16.043609091+00:00 stderr F I1208 17:45:16.043601 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.0435898 +0000 UTC))" 2025-12-08T17:45:16.043645002+00:00 stderr F I1208 17:45:16.043621 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.043608831 +0000 UTC))" 2025-12-08T17:45:16.043662372+00:00 stderr F I1208 17:45:16.043643 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.043631491 +0000 UTC))" 2025-12-08T17:45:16.043671673+00:00 stderr F I1208 17:45:16.043660 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.043650212 +0000 UTC))" 2025-12-08T17:45:16.043682603+00:00 stderr F I1208 17:45:16.043675 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.043665502 +0000 UTC))" 2025-12-08T17:45:16.043725504+00:00 stderr F I1208 17:45:16.043702 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.043683593 +0000 UTC))" 2025-12-08T17:45:16.043735754+00:00 stderr F I1208 17:45:16.043724 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.043713754 +0000 UTC))" 2025-12-08T17:45:16.044015793+00:00 stderr F I1208 17:45:16.043986 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-authentication-operator.svc\" [serving] validServingFor=[metrics.openshift-authentication-operator.svc,metrics.openshift-authentication-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:45:16.043967042 +0000 UTC))" 2025-12-08T17:45:16.044203578+00:00 stderr F I1208 17:45:16.044172 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:45:16.044155847 +0000 UTC))" 2025-12-08T17:45:22.047436978+00:00 stderr F I1208 17:45:22.047363 1 apps.go:155] Deployment "openshift-authentication/oauth-openshift" changes: {"metadata":{"annotations":{"operator.openshift.io/rvs-hash":"JawKVs2XFtN7teSQkxkR5qsUENnSBh-px1jrMN_9enjFZ8Ho24bcZRYveLP9eFJr_1bHOMYwCz8UeNXcjHw_1g","operator.openshift.io/spec-hash":"7fae9a00fcd38d33ab093f9eddf2f7349428b1c40ca338dcd94a8d42a0854869"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"operator.openshift.io/rvs-hash":"JawKVs2XFtN7teSQkxkR5qsUENnSBh-px1jrMN_9enjFZ8Ho24bcZRYveLP9eFJr_1bHOMYwCz8UeNXcjHw_1g"}},"spec":{"containers":[{"args":["if [ -s /var/config/system/configmaps/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt ]; then\n echo \"Copying system trust bundle\"\n cp -f /var/config/system/configmaps/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\nfi\nexec oauth-server osinserver \\\n--config=/var/config/system/configmaps/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig \\\n--v=2 \\\n--audit-log-format=json \\\n--audit-log-maxbackup=10 \\\n--audit-log-maxsize=100 \\\n--audit-log-path=/var/log/oauth-server/audit.log \\\n--audit-policy-file=/var/run/configmaps/audit/audit.yaml\n"],"command":["/bin/bash","-ec"],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c","lifecycle":{"preStop":{"exec":{"command":["sleep","25"]}}},"livenessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":6443,"scheme":"HTTPS"},"initialDelaySeconds":30,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1},"name":"oauth-openshift","ports":[{"containerPort":6443,"name":"https","protocol":"TCP"}],"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":6443,"scheme":"HTTPS"},"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1},"resources":{"requests":{"cpu":"10m","memory":"50Mi"}},"securityContext":{"privileged":true,"readOnlyRootFilesystem":false,"runAsUser":0},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/audit","name":"audit-policies"},{"mountPath":"/var/log/oauth-server","name":"audit-dir"},{"mountPath":"/var/config/system/secrets/v4-0-config-system-session","name":"v4-0-config-system-session","readOnly":true},{"mountPath":"/var/config/system/configmaps/v4-0-config-system-cliconfig","name":"v4-0-config-system-cliconfig","readOnly":true},{"mountPath":"/var/config/system/secrets/v4-0-config-system-serving-cert","name":"v4-0-config-system-serving-cert","readOnly":true},{"mountPath":"/var/config/system/configmaps/v4-0-config-system-service-ca","name":"v4-0-config-system-service-ca","readOnly":true},{"mountPath":"/var/config/system/secrets/v4-0-config-system-router-certs","name":"v4-0-config-system-router-certs","readOnly":true},{"mountPath":"/var/config/system/secrets/v4-0-config-system-ocp-branding-template","name":"v4-0-config-system-ocp-branding-template","readOnly":true},{"mountPath":"/var/config/user/template/secret/v4-0-config-user-template-login","name":"v4-0-config-user-template-login","readOnly":true},{"mountPath":"/var/config/user/template/secret/v4-0-config-user-template-provider-selection","name":"v4-0-config-user-template-provider-selection","readOnly":true},{"mountPath":"/var/config/user/template/secret/v4-0-config-user-template-error","name":"v4-0-config-user-template-error","readOnly":true},{"mountPath":"/var/config/system/configmaps/v4-0-config-system-trusted-ca-bundle","name":"v4-0-config-system-trusted-ca-bundle","readOnly":true},{"mountPath":"/var/config/user/idp/0/secret/v4-0-config-user-idp-0-file-data","name":"v4-0-config-user-idp-0-file-data","readOnly":true}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"securityContext":null,"serviceAccount":null,"volumes":[{"configMap":{"name":"audit"},"name":"audit-policies"},{"hostPath":{"path":"/var/log/oauth-server"},"name":"audit-dir"},{"name":"v4-0-config-system-session","secret":{"secretName":"v4-0-config-system-session"}},{"configMap":{"name":"v4-0-config-system-cliconfig"},"name":"v4-0-config-system-cliconfig"},{"name":"v4-0-config-system-serving-cert","secret":{"secretName":"v4-0-config-system-serving-cert"}},{"configMap":{"name":"v4-0-config-system-service-ca"},"name":"v4-0-config-system-service-ca"},{"name":"v4-0-config-system-router-certs","secret":{"secretName":"v4-0-config-system-router-certs"}},{"name":"v4-0-config-system-ocp-branding-template","secret":{"secretName":"v4-0-config-system-ocp-branding-template"}},{"name":"v4-0-config-user-template-login","secret":{"optional":true,"secretName":"v4-0-config-user-template-login"}},{"name":"v4-0-config-user-template-provider-selection","secret":{"optional":true,"secretName":"v4-0-config-user-template-provider-selection"}},{"name":"v4-0-config-user-template-error","secret":{"optional":true,"secretName":"v4-0-config-user-template-error"}},{"configMap":{"name":"v4-0-config-system-trusted-ca-bundle","optional":true},"name":"v4-0-config-system-trusted-ca-bundle"},{"name":"v4-0-config-user-idp-0-file-data","secret":{"items":[{"key":"htpasswd","path":"htpasswd"}],"secretName":"v4-0-config-user-idp-0-file-data"}}]}}}} 2025-12-08T17:45:22.059631947+00:00 stderr F I1208 17:45:22.059497 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:45:22.060133661+00:00 stderr F I1208 17:45:22.060021 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/oauth-openshift -n openshift-authentication because it changed 2025-12-08T17:45:22.116941721+00:00 stderr F I1208 17:45:22.113138 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:22.118188917+00:00 stderr F I1208 17:45:22.118150 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6.","reason":"OAuthServerDeployment_NewGeneration","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:22.142968956+00:00 stderr F I1208 17:45:22.142227 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Progressing changed from False to True ("OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6.") 2025-12-08T17:45:22.182632060+00:00 stderr F E1208 17:45:22.182581 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.187648269+00:00 stderr F I1208 17:45:22.187531 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6.","reason":"OAuthServerDeployment_NewGeneration","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:22.197938935+00:00 stderr F I1208 17:45:22.196800 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "All is well" to "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready" 2025-12-08T17:45:22.223562188+00:00 stderr F E1208 17:45:22.223493 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.227025895+00:00 stderr F E1208 17:45:22.226977 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.231533780+00:00 stderr F E1208 17:45:22.231467 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.233293799+00:00 stderr F I1208 17:45:22.233245 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6.","reason":"OAuthServerDeployment_NewGeneration","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:22.240733727+00:00 stderr F E1208 17:45:22.238605 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.240733727+00:00 stderr F I1208 17:45:22.240317 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available changed from True to False ("OAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF") 2025-12-08T17:45:22.258362207+00:00 stderr F E1208 17:45:22.258293 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.263939042+00:00 stderr F I1208 17:45:22.263894 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6.","reason":"OAuthServerDeployment_NewGeneration","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:22.270213986+00:00 stderr F E1208 17:45:22.270152 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.273823816+00:00 stderr F I1208 17:45:22.273049 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready" to "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.290436639+00:00 stderr F E1208 17:45:22.290381 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.302389532+00:00 stderr F E1208 17:45:22.302356 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.314471778+00:00 stderr F E1208 17:45:22.314425 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.325036162+00:00 stderr F E1208 17:45:22.324998 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.333301552+00:00 stderr F E1208 17:45:22.333280 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.427213515+00:00 stderr F E1208 17:45:22.427146 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.602739159+00:00 stderr F E1208 17:45:22.602679 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.637998700+00:00 stderr F I1208 17:45:22.637851 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:22.648341128+00:00 stderr F E1208 17:45:22.648220 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.691307383+00:00 stderr F I1208 17:45:22.691199 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: No subsets found for the endpoints of oauth-server\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6.","reason":"OAuthServerDeployment_NewGeneration","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:22.692462256+00:00 stderr F E1208 17:45:22.692414 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:22.703385690+00:00 stderr F E1208 17:45:22.700675 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.703385690+00:00 stderr F I1208 17:45:22.701408 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: No subsets found for the endpoints of oauth-server\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:22.948702945+00:00 stderr F E1208 17:45:22.948603 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:23.237205513+00:00 stderr F I1208 17:45:23.236559 1 request.go:752] "Waited before sending request" delay="1.097102817s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:45:23.296575515+00:00 stderr F E1208 17:45:23.296497 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:24.239250965+00:00 stderr F I1208 17:45:24.239185 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:24.249012776+00:00 stderr F E1208 17:45:24.248919 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:24.435964228+00:00 stderr F I1208 17:45:24.435903 1 request.go:752] "Waited before sending request" delay="1.594405094s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-cliconfig" 2025-12-08T17:45:25.438973257+00:00 stderr F I1208 17:45:25.436362 1 request.go:752] "Waited before sending request" delay="1.595473954s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/audit" 2025-12-08T17:45:25.863200782+00:00 stderr F E1208 17:45:25.862753 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:26.636671623+00:00 stderr F I1208 17:45:26.636223 1 request.go:752] "Waited before sending request" delay="1.385466721s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication" 2025-12-08T17:45:26.835334531+00:00 stderr F E1208 17:45:26.835289 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:27.835923955+00:00 stderr F I1208 17:45:27.835805 1 request.go:752] "Waited before sending request" delay="1.392504199s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api" 2025-12-08T17:45:29.001767204+00:00 stderr F E1208 17:45:29.001668 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:30.991320105+00:00 stderr F E1208 17:45:30.991232 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:31.982461234+00:00 stderr F E1208 17:45:31.982336 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:32.426976584+00:00 stderr F I1208 17:45:32.425295 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: No subsets found for the endpoints of oauth-server\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:32.433783033+00:00 stderr F E1208 17:45:32.433693 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:32.445858809+00:00 stderr F I1208 17:45:32.445787 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Progressing message changed from "OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: observed generation is 5, desired generation is 6." to "OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available",Available message changed from "OAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:32.453171072+00:00 stderr F E1208 17:45:32.453120 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:33.532305149+00:00 stderr F I1208 17:45:33.532203 1 request.go:752] "Waited before sending request" delay="1.019584099s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift" 2025-12-08T17:45:39.936198749+00:00 stderr F E1208 17:45:39.935466 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:42.238091461+00:00 stderr F E1208 17:45:42.238015 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:47.563316511+00:00 stderr F I1208 17:45:47.561232 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:47.568464455+00:00 stderr F I1208 17:45:47.568399 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:47.576725163+00:00 stderr F I1208 17:45:47.576668 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:47.596914339+00:00 stderr F I1208 17:45:47.596837 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:48.100290389+00:00 stderr F I1208 17:45:48.099582 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:48.339932791+00:00 stderr F I1208 17:45:48.339836 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:48.343685934+00:00 stderr F I1208 17:45:48.343659 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:49.327052390+00:00 stderr F I1208 17:45:49.325230 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:49.331858274+00:00 stderr F I1208 17:45:49.331788 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:49.337240506+00:00 stderr F E1208 17:45:49.335975 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointsEndpointAccessibleController reconciliation failed: oauth service endpoints are not ready" 2025-12-08T17:45:49.372766103+00:00 stderr F I1208 17:45:49.372692 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.376795903+00:00 stderr F I1208 17:45:49.376243 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:45:49.388948899+00:00 stderr F I1208 17:45:49.385822 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: No subsets found for the endpoints of oauth-server\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:49.406399652+00:00 stderr F E1208 17:45:49.400223 1 base_controller.go:279] "Unhandled Error" err="OAuthServerRouteEndpointAccessibleController reconciliation failed: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:49.457920688+00:00 stderr F I1208 17:45:49.455689 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.521518878+00:00 stderr F I1208 17:45:49.521464 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:49.531804057+00:00 stderr F I1208 17:45:49.531736 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"OAuthServerDeployment_NoPod::OAuthServerRouteEndpointAccessibleController_EndpointUnavailable::OAuthServerServiceEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.541896309+00:00 stderr F I1208 17:45:49.541805 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF",Available message changed from "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" 2025-12-08T17:45:49.560333663+00:00 stderr F I1208 17:45:49.560282 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"OAuthServerDeployment_NoPod::OAuthServerServiceEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.571692354+00:00 stderr F I1208 17:45:49.571639 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerRouteEndpointAccessibleControllerAvailable: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" 2025-12-08T17:45:49.584499808+00:00 stderr F E1208 17:45:49.584445 1 base_controller.go:279] "Unhandled Error" err="OAuthServerServiceEndpointAccessibleController reconciliation failed: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" 2025-12-08T17:45:49.586175228+00:00 stderr F I1208 17:45:49.586130 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"OAuthServerDeployment_NoPod::OAuthServerServiceEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.596550480+00:00 stderr F I1208 17:45:49.596249 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" 2025-12-08T17:45:49.615483977+00:00 stderr F I1208 17:45:49.615417 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"OAuthServerDeployment_NoPod::OAuthServerServiceEndpointAccessibleController_EndpointUnavailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.631429466+00:00 stderr F I1208 17:45:49.631364 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \"https://oauth-openshift.apps-crc.testing/healthz\": EOF" to "OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" 2025-12-08T17:45:49.657095627+00:00 stderr F I1208 17:45:49.657013 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.","reason":"OAuthServerDeployment_NoPod","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.666292893+00:00 stderr F I1208 17:45:49.665839 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nOAuthServerServiceEndpointAccessibleControllerAvailable: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node." 2025-12-08T17:45:49.677008614+00:00 stderr F I1208 17:45:49.676856 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.","reason":"OAuthServerDeployment_NoPod","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:49.685851450+00:00 stderr F I1208 17:45:49.685660 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "OAuthServerServiceEndpointAccessibleControllerDegraded: Get \"https://10.217.5.136:443/healthz\": dial tcp 10.217.5.136:443: connect: connection refused" to "All is well" 2025-12-08T17:45:50.313125388+00:00 stderr F I1208 17:45:50.313027 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.324262602+00:00 stderr F W1208 17:45:50.324209 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:50.324262602+00:00 stderr F E1208 17:45:50.324228 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.324262602+00:00 stderr F E1208 17:45:50.324252 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:50.329271912+00:00 stderr F E1208 17:45:50.329232 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-Metadata reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-Metadata\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-Metadata&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.332349825+00:00 stderr F I1208 17:45:50.332303 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.333142599+00:00 stderr F E1208 17:45:50.333110 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.336587172+00:00 stderr F W1208 17:45:50.336543 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:50.336630614+00:00 stderr F E1208 17:45:50.336604 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:50.345992034+00:00 stderr F I1208 17:45:50.345959 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.346601652+00:00 stderr F E1208 17:45:50.346566 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.355485550+00:00 stderr F W1208 17:45:50.355431 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:50.355540602+00:00 stderr F E1208 17:45:50.355503 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:50.369337016+00:00 stderr F I1208 17:45:50.369279 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.370563642+00:00 stderr F E1208 17:45:50.370532 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.381806240+00:00 stderr F W1208 17:45:50.381781 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:50.381832490+00:00 stderr F E1208 17:45:50.381812 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:50.413283184+00:00 stderr F I1208 17:45:50.413238 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.414316356+00:00 stderr F E1208 17:45:50.414288 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.496917835+00:00 stderr F I1208 17:45:50.496696 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.498165513+00:00 stderr F E1208 17:45:50.498120 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.521597586+00:00 stderr F I1208 17:45:50.521497 1 request.go:752] "Waited before sending request" delay="1.125105271s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api" 2025-12-08T17:45:50.537513764+00:00 stderr F W1208 17:45:50.537411 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:50.537560955+00:00 stderr F E1208 17:45:50.537540 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:50.661626309+00:00 stderr F I1208 17:45:50.661529 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.663278038+00:00 stderr F E1208 17:45:50.663221 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.737500096+00:00 stderr F W1208 17:45:50.737415 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:50.737580868+00:00 stderr F E1208 17:45:50.737495 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:50.938011935+00:00 stderr F E1208 17:45:50.937947 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:50.986313004+00:00 stderr F I1208 17:45:50.986263 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:50.987900482+00:00 stderr F E1208 17:45:50.987830 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:51.138001127+00:00 stderr F W1208 17:45:51.137937 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:51.138046699+00:00 stderr F E1208 17:45:51.138033 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:51.322638449+00:00 stderr F E1208 17:45:51.322572 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{authentication-operator.187f4e8db7142807 openshift-authentication-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-authentication-operator,Name:authentication-operator,UID:,APIVersion:,ResourceVersion:,FieldPath:,},Reason:OperatorStatusChanged,Message:Status for clusteroperator/authentication changed: Degraded message changed from \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\" to \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\",Source:EventSource{Component:oauth-apiserver-status-controller-statussyncer_authentication,Host:,},FirstTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,LastTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:oauth-apiserver-status-controller-statussyncer_authentication,ReportingInstance:,}" 2025-12-08T17:45:51.337697131+00:00 stderr F E1208 17:45:51.337623 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:51.536778737+00:00 stderr F W1208 17:45:51.536724 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:51.536821598+00:00 stderr F E1208 17:45:51.536779 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:51.631495990+00:00 stderr F I1208 17:45:51.631410 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:51.632939924+00:00 stderr F E1208 17:45:51.632832 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:51.721352897+00:00 stderr F I1208 17:45:51.721299 1 request.go:752] "Waited before sending request" delay="1.597713106s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/trusted-ca-bundle" 2025-12-08T17:45:51.737869733+00:00 stderr F W1208 17:45:51.737811 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:51.737947745+00:00 stderr F E1208 17:45:51.737904 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:51.941057942+00:00 stderr F E1208 17:45:51.940996 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:52.138824007+00:00 stderr F E1208 17:45:52.138720 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:45:52.391727298+00:00 stderr F E1208 17:45:52.391653 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:52.547089152+00:00 stderr F W1208 17:45:52.546844 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:52.547177535+00:00 stderr F E1208 17:45:52.547164 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:52.737617781+00:00 stderr F E1208 17:45:52.737563 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:45:52.916443919+00:00 stderr F I1208 17:45:52.916374 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:52.918899813+00:00 stderr F E1208 17:45:52.918790 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:52.938042736+00:00 stderr F W1208 17:45:52.937957 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:52.938098939+00:00 stderr F E1208 17:45:52.938032 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:53.137863425+00:00 stderr F E1208 17:45:53.137790 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:53.338339952+00:00 stderr F E1208 17:45:53.338274 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:53.537044406+00:00 stderr F E1208 17:45:53.536998 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:53.835263788+00:00 stderr F W1208 17:45:53.834867 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:53.835384991+00:00 stderr F E1208 17:45:53.835366 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:53.937576308+00:00 stderr F W1208 17:45:53.937496 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:53.937576308+00:00 stderr F E1208 17:45:53.937558 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:54.320948356+00:00 stderr F I1208 17:45:54.320817 1 request.go:752] "Waited before sending request" delay="1.178243436s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/secrets/etcd-client" 2025-12-08T17:45:54.327648757+00:00 stderr F E1208 17:45:54.327617 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:54.529117904+00:00 stderr F E1208 17:45:54.529023 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.127007291+00:00 stderr F W1208 17:45:55.126923 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:55.127007291+00:00 stderr F E1208 17:45:55.126994 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.321416396+00:00 stderr F I1208 17:45:55.321311 1 request.go:752] "Waited before sending request" delay="1.198943028s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa" 2025-12-08T17:45:55.326361814+00:00 stderr F E1208 17:45:55.326280 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:45:55.481650535+00:00 stderr F I1208 17:45:55.481551 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:55.482799970+00:00 stderr F E1208 17:45:55.482742 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.529013157+00:00 stderr F E1208 17:45:55.528594 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.724583857+00:00 stderr F E1208 17:45:55.724272 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.927051074+00:00 stderr F E1208 17:45:55.926993 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.134134840+00:00 stderr F E1208 17:45:56.134062 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{authentication-operator.187f4e8db7142807 openshift-authentication-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-authentication-operator,Name:authentication-operator,UID:,APIVersion:,ResourceVersion:,FieldPath:,},Reason:OperatorStatusChanged,Message:Status for clusteroperator/authentication changed: Degraded message changed from \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\" to \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\",Source:EventSource{Component:oauth-apiserver-status-controller-statussyncer_authentication,Host:,},FirstTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,LastTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:oauth-apiserver-status-controller-statussyncer_authentication,ReportingInstance:,}" 2025-12-08T17:45:56.327284537+00:00 stderr F E1208 17:45:56.327241 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:45:56.404021341+00:00 stderr F W1208 17:45:56.403963 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:56.404081053+00:00 stderr F E1208 17:45:56.404018 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:56.521498468+00:00 stderr F I1208 17:45:56.520687 1 request.go:752] "Waited before sending request" delay="1.391488616s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs" 2025-12-08T17:45:56.525374724+00:00 stderr F W1208 17:45:56.525317 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:56.525413885+00:00 stderr F E1208 17:45:56.525382 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.926825303+00:00 stderr F E1208 17:45:56.926767 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.125218988+00:00 stderr F E1208 17:45:57.125146 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.520794692+00:00 stderr F I1208 17:45:57.520702 1 request.go:752] "Waited before sending request" delay="1.191408301s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication" 2025-12-08T17:45:57.724381193+00:00 stderr F W1208 17:45:57.724282 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:57.724381193+00:00 stderr F E1208 17:45:57.724318 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.971644085+00:00 stderr F W1208 17:45:57.971552 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:57.971703786+00:00 stderr F E1208 17:45:57.971641 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:45:58.126270217+00:00 stderr F E1208 17:45:58.126174 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.345600690+00:00 stderr F E1208 17:45:58.345516 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.525286863+00:00 stderr F E1208 17:45:58.525184 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.721491082+00:00 stderr F I1208 17:45:58.721417 1 request.go:752] "Waited before sending request" delay="1.11372066s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift" 2025-12-08T17:45:58.923981840+00:00 stderr F W1208 17:45:58.923820 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:45:58.924065983+00:00 stderr F E1208 17:45:58.923993 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:59.129756337+00:00 stderr F E1208 17:45:59.129668 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:45:59.325743220+00:00 stderr F E1208 17:45:59.325682 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:59.528020061+00:00 stderr F E1208 17:45:59.527764 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:59.921543773+00:00 stderr F I1208 17:45:59.921443 1 request.go:752] "Waited before sending request" delay="1.198751542s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift" 2025-12-08T17:45:59.940304566+00:00 stderr F E1208 17:45:59.940215 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:00.128778823+00:00 stderr F E1208 17:46:00.128314 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:00.325781787+00:00 stderr F W1208 17:46:00.325695 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:00.325781787+00:00 stderr F E1208 17:46:00.325758 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:00.606514683+00:00 stderr F I1208 17:46:00.606415 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:00.608633437+00:00 stderr F E1208 17:46:00.608559 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:00.725508324+00:00 stderr F E1208 17:46:00.725427 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:00.930773656+00:00 stderr F E1208 17:46:00.930669 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:01.121208082+00:00 stderr F I1208 17:46:01.121143 1 request.go:752] "Waited before sending request" delay="1.398445786s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/secrets/v4-0-config-system-session" 2025-12-08T17:46:01.128427688+00:00 stderr F E1208 17:46:01.128376 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:01.526758184+00:00 stderr F E1208 17:46:01.526667 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:01.533242700+00:00 stderr F W1208 17:46:01.533178 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:01.533297761+00:00 stderr F E1208 17:46:01.533252 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:46:01.727585233+00:00 stderr F W1208 17:46:01.727499 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:01.727631494+00:00 stderr F E1208 17:46:01.727573 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:02.126425974+00:00 stderr F E1208 17:46:02.126335 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:02.321340654+00:00 stderr F I1208 17:46:02.321241 1 request.go:752] "Waited before sending request" delay="1.388567608s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/audit" 2025-12-08T17:46:02.328680465+00:00 stderr F E1208 17:46:02.328625 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:02.961774066+00:00 stderr F E1208 17:46:02.961322 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:03.125789499+00:00 stderr F W1208 17:46:03.125701 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:03.125789499+00:00 stderr F E1208 17:46:03.125767 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:03.321479003+00:00 stderr F I1208 17:46:03.321393 1 request.go:752] "Waited before sending request" delay="1.399622339s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa" 2025-12-08T17:46:03.328855715+00:00 stderr F E1208 17:46:03.328799 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:03.525380103+00:00 stderr F E1208 17:46:03.525301 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:03.726961044+00:00 stderr F E1208 17:46:03.726907 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:03.931477053+00:00 stderr F E1208 17:46:03.931017 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:04.128629580+00:00 stderr F E1208 17:46:04.128550 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:04.334338495+00:00 stderr F E1208 17:46:04.334278 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:04.523904344+00:00 stderr F I1208 17:46:04.523009 1 request.go:752] "Waited before sending request" delay="1.395685192s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs" 2025-12-08T17:46:04.531038169+00:00 stderr F W1208 17:46:04.530665 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:04.531038169+00:00 stderr F E1208 17:46:04.530696 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:04.927589401+00:00 stderr F E1208 17:46:04.927318 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.531064725+00:00 stderr F E1208 17:46:05.531023 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.721924984+00:00 stderr F I1208 17:46:05.720594 1 request.go:752] "Waited before sending request" delay="1.188265476s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs" 2025-12-08T17:46:05.725915333+00:00 stderr F W1208 17:46:05.725526 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.725915333+00:00 stderr F E1208 17:46:05.725583 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.125532248+00:00 stderr F E1208 17:46:06.125418 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.330010706+00:00 stderr F E1208 17:46:06.329954 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.721534908+00:00 stderr F I1208 17:46:06.721174 1 request.go:752] "Waited before sending request" delay="1.107267796s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-service-ca" 2025-12-08T17:46:06.727791196+00:00 stderr F E1208 17:46:06.727721 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.928085287+00:00 stderr F W1208 17:46:06.928019 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.928284074+00:00 stderr F E1208 17:46:06.928220 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:07.129575545+00:00 stderr F E1208 17:46:07.129511 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:07.325499456+00:00 stderr F E1208 17:46:07.325413 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:07.522416537+00:00 stderr F E1208 17:46:07.522309 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{authentication-operator.187f4e8db7142807 openshift-authentication-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-authentication-operator,Name:authentication-operator,UID:,APIVersion:,ResourceVersion:,FieldPath:,},Reason:OperatorStatusChanged,Message:Status for clusteroperator/authentication changed: Degraded message changed from \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\" to \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\",Source:EventSource{Component:oauth-apiserver-status-controller-statussyncer_authentication,Host:,},FirstTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,LastTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:oauth-apiserver-status-controller-statussyncer_authentication,ReportingInstance:,}" 2025-12-08T17:46:07.920971180+00:00 stderr F I1208 17:46:07.920606 1 request.go:752] "Waited before sending request" delay="1.397806937s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift" 2025-12-08T17:46:07.931447605+00:00 stderr F E1208 17:46:07.931379 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:08.125997724+00:00 stderr F E1208 17:46:08.125906 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:08.526697111+00:00 stderr F E1208 17:46:08.526617 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:09.129235636+00:00 stderr F E1208 17:46:09.129146 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:09.736280198+00:00 stderr F E1208 17:46:09.736187 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:09.926259040+00:00 stderr F E1208 17:46:09.926163 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.132395257+00:00 stderr F E1208 17:46:10.132312 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:10.536962041+00:00 stderr F E1208 17:46:10.536450 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.733087868+00:00 stderr F W1208 17:46:10.732984 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:10.733087868+00:00 stderr F E1208 17:46:10.733030 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.744519461+00:00 stderr F W1208 17:46:10.744446 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:10.744519461+00:00 stderr F E1208 17:46:10.744490 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.760077967+00:00 stderr F W1208 17:46:10.760001 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:10.760117778+00:00 stderr F E1208 17:46:10.760072 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.786996355+00:00 stderr F W1208 17:46:10.785989 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:10.786996355+00:00 stderr F E1208 17:46:10.786046 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.832747698+00:00 stderr F W1208 17:46:10.832661 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:10.832798800+00:00 stderr F E1208 17:46:10.832736 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.851334377+00:00 stderr F I1208 17:46:10.851283 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:10.852320486+00:00 stderr F E1208 17:46:10.852270 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:10.921705479+00:00 stderr F W1208 17:46:10.921598 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:10.921787361+00:00 stderr F E1208 17:46:10.921698 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:11.092417833+00:00 stderr F W1208 17:46:11.091913 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:11.092417833+00:00 stderr F E1208 17:46:11.091975 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:11.326918452+00:00 stderr F E1208 17:46:11.325494 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:11.418628854+00:00 stderr F W1208 17:46:11.418572 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:11.418628854+00:00 stderr F E1208 17:46:11.418615 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:11.727066043+00:00 stderr F E1208 17:46:11.727013 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:12.065995186+00:00 stderr F W1208 17:46:12.065931 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:12.065995186+00:00 stderr F E1208 17:46:12.065979 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:12.129185322+00:00 stderr F E1208 17:46:12.129108 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:12.523817488+00:00 stderr F E1208 17:46:12.523765 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:12.743921445+00:00 stderr F E1208 17:46:12.740185 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:13.353794700+00:00 stderr F W1208 17:46:13.353433 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:13.353861342+00:00 stderr F E1208 17:46:13.353785 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:13.529602507+00:00 stderr F E1208 17:46:13.529513 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:13.728650631+00:00 stderr F E1208 17:46:13.728577 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:14.926219407+00:00 stderr F E1208 17:46:14.926122 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:15.131959532+00:00 stderr F E1208 17:46:15.131832 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:15.327076099+00:00 stderr F E1208 17:46:15.326976 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:15.528306578+00:00 stderr F E1208 17:46:15.528078 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:15.920833661+00:00 stderr F W1208 17:46:15.920505 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:15.920833661+00:00 stderr F E1208 17:46:15.920801 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:16.326833876+00:00 stderr F W1208 17:46:16.326750 1 base_controller.go:242] Updating status of "RouterCertsDomainValidationController" failed: unable to ApplyStatus for operator using fieldManager "RouterCertsDomainValidationController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=RouterCertsDomainValidationController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:16.327043014+00:00 stderr F E1208 17:46:16.327015 1 base_controller.go:279] "Unhandled Error" err="RouterCertsDomainValidationController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/secrets/router-certs\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:16.728158073+00:00 stderr F E1208 17:46:16.728084 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:16.929115244+00:00 stderr F E1208 17:46:16.929030 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:17.129004104+00:00 stderr F E1208 17:46:17.128917 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:17.922201822+00:00 stderr F E1208 17:46:17.922101 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{authentication-operator.187f4e8db7142807 openshift-authentication-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-authentication-operator,Name:authentication-operator,UID:,APIVersion:,ResourceVersion:,FieldPath:,},Reason:OperatorStatusChanged,Message:Status for clusteroperator/authentication changed: Degraded message changed from \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\" to \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\",Source:EventSource{Component:oauth-apiserver-status-controller-statussyncer_authentication,Host:,},FirstTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,LastTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:oauth-apiserver-status-controller-statussyncer_authentication,ReportingInstance:,}" 2025-12-08T17:46:18.128986749+00:00 stderr F E1208 17:46:18.128860 1 base_controller.go:279] "Unhandled Error" err="OAuthAPIServerController-WorkloadWorkloadController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"OAuthAPIServerController-Workload\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OAuthAPIServerController-Workload&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:18.527368967+00:00 stderr F E1208 17:46:18.527080 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:18.732223605+00:00 stderr F E1208 17:46:18.732022 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:18.922256929+00:00 stderr F E1208 17:46:18.922175 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:19.122376305+00:00 stderr F E1208 17:46:19.122254 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:19.322603426+00:00 stderr F E1208 17:46:19.322511 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:19.723034695+00:00 stderr F E1208 17:46:19.722535 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:20.321741406+00:00 stderr F E1208 17:46:20.321699 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:20.532377008+00:00 stderr F E1208 17:46:20.532301 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:20.922439586+00:00 stderr F E1208 17:46:20.922380 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:21.052279052+00:00 stderr F W1208 17:46:21.052223 1 base_controller.go:242] Updating status of "CustomRouteController" failed: unable to ApplyStatus for operator using fieldManager "CustomRouteController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=CustomRouteController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:21.052279052+00:00 stderr F E1208 17:46:21.052268 1 base_controller.go:279] "Unhandled Error" err="CustomRouteController reconciliation failed: Get \"https://10.217.4.1:443/apis/route.openshift.io/v1/namespaces/openshift-authentication/routes/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:21.128718778+00:00 stderr F E1208 17:46:21.128624 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-ServiceCA reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-ServiceCA\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-ServiceCA&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:21.328448102+00:00 stderr F E1208 17:46:21.328366 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:21.522674222+00:00 stderr F E1208 17:46:21.522586 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:21.846100660+00:00 stderr F E1208 17:46:21.846022 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.025276208+00:00 stderr F W1208 17:46:22.025204 1 base_controller.go:242] Updating status of "WellKnownReadyController" failed: unable to ApplyStatus for operator using fieldManager "WellKnownReadyController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WellKnownReadyController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.025351111+00:00 stderr F E1208 17:46:22.025274 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:46:22.445058518+00:00 stderr F W1208 17:46:22.444848 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.445058518+00:00 stderr F E1208 17:46:22.444956 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.458498971+00:00 stderr F W1208 17:46:22.458409 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.458498971+00:00 stderr F E1208 17:46:22.458474 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.476459721+00:00 stderr F W1208 17:46:22.476029 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.476459721+00:00 stderr F E1208 17:46:22.476073 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.488815972+00:00 stderr F E1208 17:46:22.488719 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.503120031+00:00 stderr F W1208 17:46:22.503053 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.503170782+00:00 stderr F E1208 17:46:22.503125 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.550334848+00:00 stderr F W1208 17:46:22.550254 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.550334848+00:00 stderr F E1208 17:46:22.550316 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.640827604+00:00 stderr F W1208 17:46:22.640694 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.640827604+00:00 stderr F E1208 17:46:22.640758 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.808338492+00:00 stderr F W1208 17:46:22.808276 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.808338492+00:00 stderr F E1208 17:46:22.808317 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.133957626+00:00 stderr F W1208 17:46:23.133856 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:23.134020748+00:00 stderr F E1208 17:46:23.133960 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.459654292+00:00 stderr F E1208 17:46:23.459304 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.660397547+00:00 stderr F E1208 17:46:23.660291 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-authentication-operator/leases/cluster-authentication-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:23.661953584+00:00 stderr F E1208 17:46:23.661917 1 leaderelection.go:436] error retrieving resource lock openshift-authentication-operator/cluster-authentication-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-authentication-operator/leases/cluster-authentication-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:23.772686158+00:00 stderr F E1208 17:46:23.772603 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.782143042+00:00 stderr F W1208 17:46:23.782083 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:23.782279536+00:00 stderr F E1208 17:46:23.782254 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.079781086+00:00 stderr F E1208 17:46:24.079404 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"oauth-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/oauth-apiserver-pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-oauth-apiserver/poddisruptionbudgets/oauth-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/serviceaccounts/oauth-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_binding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-apiserver/RBAC/useroauthaccesstokens_clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:useroauthaccesstoken-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.365922894+00:00 stderr F I1208 17:46:24.365789 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:24.367090530+00:00 stderr F E1208 17:46:24.366969 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Put \"https://10.217.4.1:443/apis/config.openshift.io/v1/clusteroperators/authentication/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.074812762+00:00 stderr F W1208 17:46:25.074748 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.075040349+00:00 stderr F E1208 17:46:25.075003 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.176735712+00:00 stderr F E1208 17:46:25.176656 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-PayloadConfig reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-authentication-PayloadConfig\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-authentication-PayloadConfig&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.671149342+00:00 stderr F E1208 17:46:25.670587 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"oauth-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=oauth-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.869736022+00:00 stderr F E1208 17:46:25.869559 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.878444924+00:00 stderr F E1208 17:46:25.878390 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.890863377+00:00 stderr F E1208 17:46:25.890808 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.913513087+00:00 stderr F E1208 17:46:25.913457 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.956318831+00:00 stderr F E1208 17:46:25.956244 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.039361194+00:00 stderr F E1208 17:46:26.039272 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.202609104+00:00 stderr F E1208 17:46:26.202541 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.275222974+00:00 stderr F E1208 17:46:26.275159 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.286438320+00:00 stderr F E1208 17:46:26.286372 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.303016908+00:00 stderr F E1208 17:46:26.302939 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.328803782+00:00 stderr F E1208 17:46:26.328744 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.334533554+00:00 stderr F E1208 17:46:26.334488 1 base_controller.go:279] "Unhandled Error" err="openshift-authentication-IngressState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/endpoints/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.374419971+00:00 stderr F E1208 17:46:26.374319 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.524762754+00:00 stderr F E1208 17:46:26.524671 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.677469197+00:00 stderr F E1208 17:46:26.677379 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.076739832+00:00 stderr F E1208 17:46:27.076673 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.167618079+00:00 stderr F E1208 17:46:27.167507 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.482935134+00:00 stderr F E1208 17:46:27.479128 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.540634475+00:00 stderr F E1208 17:46:27.540546 1 base_controller.go:279] "Unhandled Error" err="OpenshiftAuthenticationStaticResources-StaticResources reconciliation failed: [\"oauth-openshift/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authorization.openshift.io_rolebindingrestrictions.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/rolebindingrestrictions.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/authentication-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-authentication\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/oauth-service.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/roles/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, \"oauth-openshift/trust_distribution_rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-config-managed/rolebindings/system:openshift:oauth-servercert-trust\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"OpenshiftAuthenticationStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=OpenshiftAuthenticationStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.641067060+00:00 stderr F W1208 17:46:27.640990 1 base_controller.go:242] Updating status of "WebhookAuthenticatorController" failed: unable to ApplyStatus for operator using fieldManager "WebhookAuthenticatorController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=WebhookAuthenticatorController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.641136692+00:00 stderr F E1208 17:46:27.641069 1 base_controller.go:279] "Unhandled Error" err="WebhookAuthenticatorController reconciliation failed: Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/authentications/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.924548959+00:00 stderr F E1208 17:46:27.924430 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{authentication-operator.187f4e8db7142807 openshift-authentication-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-authentication-operator,Name:authentication-operator,UID:,APIVersion:,ResourceVersion:,FieldPath:,},Reason:OperatorStatusChanged,Message:Status for clusteroperator/authentication changed: Degraded message changed from \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nIngressStateEndpointsDegraded: All 1 endpoints for oauth-server are reporting 'not ready'\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\" to \"OAuthServerServiceEndpointsEndpointAccessibleControllerDegraded: oauth service endpoints are not ready\\nOAuthServerRouteEndpointAccessibleControllerDegraded: Get \\\"https://oauth-openshift.apps-crc.testing/healthz\\\": EOF\",Source:EventSource{Component:oauth-apiserver-status-controller-statussyncer_authentication,Host:,},FirstTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,LastTimestamp:2025-12-08 17:45:49.521430535 +0000 UTC m=+87.683420416,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:oauth-apiserver-status-controller-statussyncer_authentication,ReportingInstance:,}" 2025-12-08T17:46:28.128892552+00:00 stderr F E1208 17:46:28.128793 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.450506176+00:00 stderr F E1208 17:46:28.449910 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-oauth-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.415933864+00:00 stderr F E1208 17:46:29.415329 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/authentications/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:31.275960704+00:00 stderr F I1208 17:46:31.275852 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:31.338944134+00:00 stderr F I1208 17:46:31.335207 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:31.343691127+00:00 stderr F I1208 17:46:31.343636 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Available message changed from "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node." to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:46:31.458994358+00:00 stderr F I1208 17:46:31.458931 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:32.335830506+00:00 stderr F I1208 17:46:32.334941 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:35.041566881+00:00 stderr F I1208 17:46:35.041482 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:38.628946089+00:00 stderr F I1208 17:46:38.627351 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:38.636541997+00:00 stderr F E1208 17:46:38.636468 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:38.643988531+00:00 stderr F I1208 17:46:38.643860 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:38.651454735+00:00 stderr F E1208 17:46:38.651386 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:38.664030182+00:00 stderr F I1208 17:46:38.663971 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:38.669676581+00:00 stderr F E1208 17:46:38.669626 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:38.692500926+00:00 stderr F I1208 17:46:38.692433 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:38.698194097+00:00 stderr F E1208 17:46:38.698086 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:38.741656542+00:00 stderr F I1208 17:46:38.741218 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:38.749928601+00:00 stderr F E1208 17:46:38.749701 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:38.831785657+00:00 stderr F I1208 17:46:38.831710 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:38.844345344+00:00 stderr F E1208 17:46:38.844260 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:39.008819641+00:00 stderr F I1208 17:46:39.008725 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:39.018220314+00:00 stderr F E1208 17:46:39.018151 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:39.341272360+00:00 stderr F I1208 17:46:39.341161 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:39.347338042+00:00 stderr F E1208 17:46:39.347260 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:39.991065074+00:00 stderr F I1208 17:46:39.990966 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:39.998251349+00:00 stderr F E1208 17:46:39.998191 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:41.282262690+00:00 stderr F I1208 17:46:41.281823 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:41.291578809+00:00 stderr F E1208 17:46:41.291490 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:43.855978481+00:00 stderr F I1208 17:46:43.855644 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:43.864671313+00:00 stderr F E1208 17:46:43.864591 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:48.987655217+00:00 stderr F I1208 17:46:48.987318 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:48.992260452+00:00 stderr F E1208 17:46:48.992221 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:57.860082703+00:00 stderr F I1208 17:46:57.859515 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:57.900599158+00:00 stderr F E1208 17:46:57.900312 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:46:57.937286303+00:00 stderr F E1208 17:46:57.937228 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:46:59.234652193+00:00 stderr F I1208 17:46:59.234558 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating::WellKnownReadyController","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"OAuthServerDeployment_NoPod::WellKnown_NotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:46:59.241247110+00:00 stderr F E1208 17:46:59.241194 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_authentication reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"authentication\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:59.346063570+00:00 stderr F I1208 17:46:59.345978 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:59.426768421+00:00 stderr F I1208 17:46:59.426682 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:00.053776628+00:00 stderr F I1208 17:47:00.052803 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:02.928954226+00:00 stderr F I1208 17:47:02.928220 1 reflector.go:430] "Caches populated" type="*v1.KubeAPIServer" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:47:03.017497813+00:00 stderr F E1208 17:47:03.017448 1 base_controller.go:279] "Unhandled Error" err="WellKnownReadyController reconciliation failed: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" 2025-12-08T17:47:03.802933308+00:00 stderr F I1208 17:47:03.802496 1 reflector.go:430] "Caches populated" type="*v1.APIService" reflector="k8s.io/kube-aggregator/pkg/client/informers/externalversions/factory.go:141" 2025-12-08T17:47:03.907237642+00:00 stderr F I1208 17:47:03.907149 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:05.339829348+00:00 stderr F I1208 17:47:05.339762 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:05.449394438+00:00 stderr F I1208 17:47:05.449345 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:05.449586814+00:00 stderr F I1208 17:47:05.449567 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:05.451007928+00:00 stderr F I1208 17:47:05.450944 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:06.097284473+00:00 stderr F I1208 17:47:06.097189 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:06.097382976+00:00 stderr F I1208 17:47:06.097335 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:06.100982269+00:00 stderr F I1208 17:47:06.099212 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:06.133329717+00:00 stderr F I1208 17:47:06.133227 1 reflector.go:430] "Caches populated" type="*v1alpha1.StorageVersionMigration" reflector="sigs.k8s.io/kube-storage-version-migrator/pkg/clients/informer/factory.go:132" 2025-12-08T17:47:07.179748157+00:00 stderr F I1208 17:47:07.179696 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:07.785924210+00:00 stderr F I1208 17:47:07.785808 1 reflector.go:430] "Caches populated" type="*v1.Console" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:08.203987920+00:00 stderr F I1208 17:47:08.203861 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:08.215287996+00:00 stderr F I1208 17:47:08.215206 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:08.724577138+00:00 stderr F I1208 17:47:08.724237 1 reflector.go:430] "Caches populated" type="*v1.IngressController" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:47:08.986799002+00:00 stderr F I1208 17:47:08.986685 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:09.970157517+00:00 stderr F I1208 17:47:09.970112 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:11.168741498+00:00 stderr F I1208 17:47:11.168362 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:13.711737679+00:00 stderr F I1208 17:47:13.711616 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:14.147359242+00:00 stderr F I1208 17:47:14.147287 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:16.582127497+00:00 stderr F I1208 17:47:16.581572 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:16.745871341+00:00 stderr F I1208 17:47:16.745747 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:17.746112588+00:00 stderr F I1208 17:47:17.745992 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=authentications" reflector="k8s.io/client-go/dynamic/dynamicinformer/informer.go:108" 2025-12-08T17:47:17.750851507+00:00 stderr F I1208 17:47:17.750774 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"WellKnownReadyControllerDegraded: failed to get API server IPs: unable to find kube api server endpointLister port: \u0026v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:\u003cnil\u003e, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.","reason":"OAuthServerDeployment_NoPod","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:17.762121382+00:00 stderr F I1208 17:47:17.761763 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "All is well" to "WellKnownReadyControllerDegraded: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)",Available message changed from "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.\nWellKnownAvailable: The well-known endpoint is not yet available: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" to "OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node." 2025-12-08T17:47:18.781245024+00:00 stderr F I1208 17:47:18.780733 1 request.go:752] "Waited before sending request" delay="1.027160824s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/secrets/etcd-client" 2025-12-08T17:47:19.583704444+00:00 stderr F I1208 17:47:19.583632 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:19.583993093+00:00 stderr F I1208 17:47:19.583963 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:19.585912193+00:00 stderr F I1208 17:47:19.585583 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:19.980454893+00:00 stderr F I1208 17:47:19.980336 1 request.go:752] "Waited before sending request" delay="1.493175004s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/serviceaccounts/oauth-openshift" 2025-12-08T17:47:20.288972125+00:00 stderr F I1208 17:47:20.288848 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:20.489646472+00:00 stderr F I1208 17:47:20.489569 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:20.783912885+00:00 stderr F I1208 17:47:20.783786 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:20.981454384+00:00 stderr F I1208 17:47:20.980948 1 request.go:752] "Waited before sending request" delay="1.325045721s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets?resourceVersion=38756" 2025-12-08T17:47:20.996406114+00:00 stderr F I1208 17:47:20.996328 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.119779408+00:00 stderr F I1208 17:47:21.117820 1 reflector.go:430] "Caches populated" type="*v1.OAuth" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.185321242+00:00 stderr F I1208 17:47:21.185208 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:23.617727491+00:00 stderr F I1208 17:47:23.617208 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:23.667974843+00:00 stderr F I1208 17:47:23.667699 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentProgressing: deployment/oauth-openshift.openshift-authentication: 1/1 pods have been updated to the latest generation and 0/1 pods are available","reason":"OAuthServerDeployment_PodsUpdating","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:45:22Z","message":"OAuthServerDeploymentAvailable: no oauth-openshift.openshift-authentication pods available on any node.","reason":"OAuthServerDeployment_NoPod","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:23.680152036+00:00 stderr F I1208 17:47:23.680073 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "WellKnownReadyControllerDegraded: failed to get API server IPs: unable to find kube api server endpointLister port: &v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kubernetes\", GenerateName:\"\", Namespace:\"default\", SelfLink:\"\", UID:\"9f2c22a8-5026-4436-90a9-f4bd3fd80926\", ResourceVersion:\"38832\", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 7, 39, 5, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"endpointslice.kubernetes.io/skip-mirror\":\"true\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:\"kube-apiserver\", Operation:\"Update\", APIVersion:\"v1\", Time:time.Date(2025, time.November, 3, 8, 57, 18, 0, time.Local), FieldsType:\"FieldsV1\", FieldsV1:(*v1.FieldsV1)(0xc002b5c228), Subresource:\"\"}}}, Subsets:[]v1.EndpointSubset(nil)} (check kube-apiserver that it deploys correctly)" to "All is well" 2025-12-08T17:47:23.902921609+00:00 stderr F I1208 17:47:23.899653 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:23.961480793+00:00 stderr F I1208 17:47:23.961409 1 status_controller.go:230] clusteroperator/authentication diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:47:23Z","message":"AuthenticatorCertKeyProgressing: All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:23Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:58Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:58Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:23.971486157+00:00 stderr F I1208 17:47:23.971401 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Progressing changed from True to False ("AuthenticatorCertKeyProgressing: All is well"),Available changed from False to True ("All is well") 2025-12-08T17:47:24.780840355+00:00 stderr F I1208 17:47:24.780474 1 request.go:752] "Waited before sending request" delay="1.092965146s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/services/api" 2025-12-08T17:47:25.781801684+00:00 stderr F I1208 17:47:25.781253 1 request.go:752] "Waited before sending request" delay="1.394978883s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication-operator/events" 2025-12-08T17:47:26.981219641+00:00 stderr F I1208 17:47:26.980504 1 request.go:752] "Waited before sending request" delay="1.316043407s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/audit" 2025-12-08T17:47:28.183082585+00:00 stderr F I1208 17:47:28.182684 1 request.go:752] "Waited before sending request" delay="1.40001973s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/etcd-serving-ca" 2025-12-08T17:47:28.990096459+00:00 stderr F I1208 17:47:28.989994 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:30.391489923+00:00 stderr F I1208 17:47:30.391384 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:31.580849513+00:00 stderr F I1208 17:47:31.580774 1 request.go:752] "Waited before sending request" delay="1.188711359s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/oauth-serving-cert" 2025-12-08T17:47:32.781089655+00:00 stderr F I1208 17:47:32.780745 1 request.go:752] "Waited before sending request" delay="1.597137646s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-oauth-apiserver/configmaps/trusted-ca-bundle" 2025-12-08T17:47:33.585324092+00:00 stderr F I1208 17:47:33.584758 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:33.638294799+00:00 stderr F I1208 17:47:33.638210 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:33.981184833+00:00 stderr F I1208 17:47:33.981114 1 request.go:752] "Waited before sending request" delay="1.796379318s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/services/oauth-openshift" 2025-12-08T17:47:34.174678684+00:00 stderr F I1208 17:47:34.174619 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:47:36.077026438+00:00 stderr F I1208 17:47:36.076232 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:42.450908711+00:00 stderr F I1208 17:47:42.450301 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:43.529685120+00:00 stderr F I1208 17:47:43.529381 1 reflector.go:430] "Caches populated" type="*v1.Authentication" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:44.312976247+00:00 stderr F I1208 17:47:44.312918 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:45.113647521+00:00 stderr F I1208 17:47:45.113533 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:46.911244148+00:00 stderr F I1208 17:47:46.910758 1 request.go:752] "Waited before sending request" delay="1.083434186s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication" 2025-12-08T17:47:48.110421597+00:00 stderr F I1208 17:47:48.110333 1 request.go:752] "Waited before sending request" delay="1.597782896s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/secrets/v4-0-config-system-session" 2025-12-08T17:47:48.514226428+00:00 stderr F I1208 17:47:48.514142 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:49.111144629+00:00 stderr F I1208 17:47:49.110663 1 request.go:752] "Waited before sending request" delay="1.783785712s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-metadata" 2025-12-08T17:47:49.642077433+00:00 stderr F I1208 17:47:49.641971 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go:183" 2025-12-08T17:47:50.111433387+00:00 stderr F I1208 17:47:50.111317 1 request.go:752] "Waited before sending request" delay="1.962129915s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config/secrets?resourceVersion=38756" 2025-12-08T17:47:50.114085400+00:00 stderr F I1208 17:47:50.113994 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:50.717139814+00:00 stderr F I1208 17:47:50.717037 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:51.714234081+00:00 stderr F I1208 17:47:51.713811 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:51.910929824+00:00 stderr F I1208 17:47:51.910846 1 request.go:752] "Waited before sending request" delay="1.064340635s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/oauth-serving-cert" 2025-12-08T17:47:52.911171653+00:00 stderr F I1208 17:47:52.911042 1 request.go:752] "Waited before sending request" delay="1.79761068s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/secrets/v4-0-config-system-session" 2025-12-08T17:47:53.125516548+00:00 stderr F I1208 17:47:53.125439 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:53.916070676+00:00 stderr F I1208 17:47:53.915981 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:54.110477509+00:00 stderr F I1208 17:47:54.110395 1 request.go:752] "Waited before sending request" delay="1.191600733s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-authentication/configmaps/v4-0-config-system-cliconfig" 2025-12-08T17:47:55.113035512+00:00 stderr F I1208 17:47:55.112965 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:55.912715776+00:00 stderr F I1208 17:47:55.912609 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:57.461309940+00:00 stderr F I1208 17:47:57.461217 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:47:59.204975705+00:00 stderr F I1208 17:47:59.204348 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:59.204975705+00:00 stderr F I1208 17:47:59.204684 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:59.208417020+00:00 stderr F I1208 17:47:59.208285 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:48:07.045392651+00:00 stderr F I1208 17:48:07.044797 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:08.350188969+00:00 stderr F I1208 17:48:08.349710 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:48:10.333497365+00:00 stderr F I1208 17:48:10.332919 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:48:11.221338947+00:00 stderr F I1208 17:48:11.221010 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:50:01.865266827+00:00 stderr F I1208 17:50:01.864801 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:51:53.397051424+00:00 stderr F I1208 17:51:53.396473 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:52:30.454491702+00:00 stderr F I1208 17:52:30.453949 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:53:31.210606375+00:00 stderr F I1208 17:53:31.209955 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:53:44.930824861+00:00 stderr F I1208 17:53:44.930209 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:55:14.587324037+00:00 stderr F I1208 17:55:14.586765 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:55:36.460037480+00:00 stderr F I1208 17:55:36.459375 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:56:59.102468872+00:00 stderr F I1208 17:56:59.101986 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:57:19.590964112+00:00 stderr F I1208 17:57:19.590233 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:57:27.990195192+00:00 stderr F I1208 17:57:27.989545 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:57:31.593378530+00:00 stderr F I1208 17:57:31.592778 1 request.go:752] "Waited before sending request" delay="1.184876644s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config/secrets/webhook-authentication-integrated-oauth" 2025-12-08T17:57:32.792373597+00:00 stderr F I1208 17:57:32.791828 1 request.go:752] "Waited before sending request" delay="1.189267814s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-config/secrets/webhook-authentication-integrated-oauth" 2025-12-08T17:57:55.117022392+00:00 stderr F I1208 17:57:55.116472 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:59:19.520462760+00:00 stderr F I1208 17:59:19.519889 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:00:42.457110425+00:00 stderr F I1208 18:00:42.456165 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:01:11.052734335+00:00 stderr F I1208 18:01:11.052073 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:02:07.213165589+00:00 stderr F I1208 18:02:07.212446 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:02:17.105035492+00:00 stderr F I1208 18:02:17.104288 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:03:02.583936065+00:00 stderr F I1208 18:03:02.583469 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:04:50.590442348+00:00 stderr F I1208 18:04:50.589901 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:04:54.114156898+00:00 stderr F I1208 18:04:54.113439 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" ././@LongLink0000644000000000000000000000024600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-oper0000755000175000017500000000000015115611514033104 5ustar zuulzuul././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-oper0000755000175000017500000000000015115611521033102 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-oper0000644000175000017500000000017015115611514033104 0ustar zuulzuul2025-12-08T17:44:05.809354796+00:00 stdout F Mon Dec 8 17:44:05 UTC 2025 2025-12-08T17:44:15.511517013+00:00 stdout F ././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611513033044 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611520033042 5ustar zuulzuul././@LongLink0000644000000000000000000000033400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000102776515115611513033070 0ustar zuulzuul2025-12-08T17:44:19.695331924+00:00 stderr F I1208 17:44:19.691384 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:19.695331924+00:00 stderr F I1208 17:44:19.691852 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:19.695331924+00:00 stderr F I1208 17:44:19.693021 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:19.734921904+00:00 stderr F I1208 17:44:19.733906 1 builder.go:304] kube-apiserver-operator version v0.0.0-unknown-c3d9642-c3d9642 2025-12-08T17:44:20.800075618+00:00 stderr F I1208 17:44:20.795413 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:20.800075618+00:00 stderr F W1208 17:44:20.797115 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:20.800075618+00:00 stderr F W1208 17:44:20.797124 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:20.800075618+00:00 stderr F W1208 17:44:20.797130 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:20.800075618+00:00 stderr F W1208 17:44:20.797133 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:20.800075618+00:00 stderr F W1208 17:44:20.797137 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:20.800075618+00:00 stderr F W1208 17:44:20.797140 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:20.804989742+00:00 stderr F I1208 17:44:20.804767 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:20.804989742+00:00 stderr F I1208 17:44:20.804818 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:20.804989742+00:00 stderr F I1208 17:44:20.804891 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:20.804989742+00:00 stderr F I1208 17:44:20.804902 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:20.804989742+00:00 stderr F I1208 17:44:20.804916 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:20.804989742+00:00 stderr F I1208 17:44:20.804923 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:20.808913610+00:00 stderr F I1208 17:44:20.805079 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:20.808913610+00:00 stderr F I1208 17:44:20.805133 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:20.808913610+00:00 stderr F I1208 17:44:20.805258 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:20.808913610+00:00 stderr F I1208 17:44:20.807120 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:20.808913610+00:00 stderr F I1208 17:44:20.807557 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-apiserver-operator/kube-apiserver-operator-lock... 2025-12-08T17:44:20.827298091+00:00 stderr F I1208 17:44:20.820788 1 leaderelection.go:271] successfully acquired lease openshift-kube-apiserver-operator/kube-apiserver-operator-lock 2025-12-08T17:44:20.827298091+00:00 stderr F I1208 17:44:20.821257 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator-lock", UID:"bb542d24-f3f6-4756-8267-0f35b4b06688", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37052", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' kube-apiserver-operator-575994946d-bhk9x_1bea7b6f-c806-4c6a-b390-e1d358fcde59 became leader 2025-12-08T17:44:20.827298091+00:00 stderr F I1208 17:44:20.822428 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:20.830925069+00:00 stderr F I1208 17:44:20.829437 1 starter.go:164] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:20.830925069+00:00 stderr F I1208 17:44:20.829969 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:20.845019084+00:00 stderr F E1208 17:44:20.843029 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.846917366+00:00 stderr F E1208 17:44:20.846369 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.849221519+00:00 stderr F E1208 17:44:20.847632 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.854245836+00:00 stderr F E1208 17:44:20.852289 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.858623165+00:00 stderr F E1208 17:44:20.854293 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.858623165+00:00 stderr F E1208 17:44:20.856120 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.874935270+00:00 stderr F E1208 17:44:20.872973 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.898251266+00:00 stderr F E1208 17:44:20.895373 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.898251266+00:00 stderr F E1208 17:44:20.896768 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.898298117+00:00 stderr F E1208 17:44:20.898289 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.912131794+00:00 stderr F E1208 17:44:20.911691 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.912131794+00:00 stderr F I1208 17:44:20.911714 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:20.912131794+00:00 stderr F I1208 17:44:20.911752 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:20.912131794+00:00 stderr F I1208 17:44:20.911735 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:20.916334530+00:00 stderr F E1208 17:44:20.913124 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.942926974+00:00 stderr F E1208 17:44:20.939336 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:20.944527288+00:00 stderr F I1208 17:44:20.944439 1 certrotationcontroller.go:148] Setting monthPeriod to 720h0m0s, yearPeriod to 8760h0m0s, tenMonthPeriod to 7008h0m0s 2025-12-08T17:44:20.993116193+00:00 stderr F I1208 17:44:20.992810 1 base_controller.go:76] Waiting for caches to sync for SCCReconcileController 2025-12-08T17:44:20.993867055+00:00 stderr F I1208 17:44:20.993686 1 base_controller.go:76] Waiting for caches to sync for MissingStaticPodController 2025-12-08T17:44:20.993867055+00:00 stderr F I1208 17:44:20.993706 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver 2025-12-08T17:44:20.993867055+00:00 stderr F I1208 17:44:20.993831 1 base_controller.go:76] Waiting for caches to sync for KubeAPIServerStaticResources-StaticResources 2025-12-08T17:44:20.993867055+00:00 stderr F I1208 17:44:20.993860 1 base_controller.go:76] Waiting for caches to sync for TargetConfigController 2025-12-08T17:44:20.993906396+00:00 stderr F I1208 17:44:20.993899 1 base_controller.go:76] Waiting for caches to sync for NodeKubeconfigController 2025-12-08T17:44:20.993918346+00:00 stderr F I1208 17:44:20.993913 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.993964 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_kube-apiserver 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.993982 1 certrotationcontroller.go:919] Starting CertRotation 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.993986 1 certrotationcontroller.go:884] Waiting for CertRotation 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994031 1 base_controller.go:76] Waiting for caches to sync for openshift-kube-apiserver-EncryptionCondition 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994044 1 base_controller.go:76] Waiting for caches to sync for CertRotationTimeUpgradeableController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994063 1 termination_observer.go:145] Starting TerminationObserver 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994074 1 base_controller.go:76] Waiting for caches to sync for EventWatchController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994098 1 base_controller.go:76] Waiting for caches to sync for BoundSATokenSignerController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994386 1 base_controller.go:76] Waiting for caches to sync for auditPolicyController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994399 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver-RemoveStaleConditions 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994411 1 base_controller.go:76] Waiting for caches to sync for ConnectivityCheckController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994429 1 base_controller.go:76] Waiting for caches to sync for KubeletVersionSkewController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994438 1 base_controller.go:76] Waiting for caches to sync for WorkerLatencyProfile 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994446 1 base_controller.go:76] Waiting for caches to sync for webhookSupportabilityController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994455 1 base_controller.go:76] Waiting for caches to sync for ServiceAccountIssuerController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994469 1 base_controller.go:76] Waiting for caches to sync for PodSecurityReadinessController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994475 1 base_controller.go:82] Caches are synced for PodSecurityReadinessController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994483 1 base_controller.go:119] Starting #1 worker of PodSecurityReadinessController controller ... 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994506 1 base_controller.go:76] Waiting for caches to sync for highCPUUsageAlertController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994528 1 base_controller.go:76] Waiting for caches to sync for RevisionController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994583 1 base_controller.go:76] Waiting for caches to sync for Installer 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994593 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver-InstallerState 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994602 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver-StaticPodState 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994610 1 base_controller.go:76] Waiting for caches to sync for PruneController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994621 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver-StartupMonitorPodCondition 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994631 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver-StaticPodStateFallback 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994640 1 base_controller.go:76] Waiting for caches to sync for kube-apiserver-Node 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994688 1 base_controller.go:76] Waiting for caches to sync for BackingResourceController-StaticResources 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994697 1 base_controller.go:76] Waiting for caches to sync for cluster-kube-apiserver-operator-UnsupportedConfigOverrides 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994705 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994716 1 base_controller.go:76] Waiting for caches to sync for openshift-kube-apiserver-EncryptionPrune 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994726 1 base_controller.go:76] Waiting for caches to sync for openshift-kube-apiserver-EncryptionMigration 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994737 1 base_controller.go:76] Waiting for caches to sync for openshift-kube-apiserver-EncryptionState 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994785 1 base_controller.go:76] Waiting for caches to sync for GuardController 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.994840 1 base_controller.go:76] Waiting for caches to sync for openshift-kube-apiserver-EncryptionKey 2025-12-08T17:44:20.997906544+00:00 stderr F I1208 17:44:20.995221 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:20.999914169+00:00 stderr F I1208 17:44:20.999792 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:21.039983632+00:00 stderr F I1208 17:44:21.039397 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:21.046042517+00:00 stderr F I1208 17:44:21.045538 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.093588 1 base_controller.go:82] Caches are synced for SCCReconcileController 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.093612 1 base_controller.go:119] Starting #1 worker of SCCReconcileController controller ... 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094803 1 base_controller.go:82] Caches are synced for StatusSyncer_kube-apiserver 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094817 1 base_controller.go:119] Starting #1 worker of StatusSyncer_kube-apiserver controller ... 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094872 1 certrotationcontroller.go:902] Finished waiting for CertRotation 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094928 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094960 1 base_controller.go:82] Caches are synced for kube-apiserver-RemoveStaleConditions 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094966 1 base_controller.go:119] Starting #1 worker of kube-apiserver-RemoveStaleConditions controller ... 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094992 1 base_controller.go:82] Caches are synced for ServiceAccountIssuerController 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.094997 1 base_controller.go:119] Starting #1 worker of ServiceAccountIssuerController controller ... 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.095010 1 base_controller.go:82] Caches are synced for highCPUUsageAlertController 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.095015 1 base_controller.go:119] Starting #1 worker of highCPUUsageAlertController controller ... 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.095053 1 base_controller.go:82] Caches are synced for cluster-kube-apiserver-operator-UnsupportedConfigOverrides 2025-12-08T17:44:21.096649828+00:00 stderr F I1208 17:44:21.095060 1 base_controller.go:119] Starting #1 worker of cluster-kube-apiserver-operator-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:21.096900394+00:00 stderr F I1208 17:44:21.096779 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.096900394+00:00 stderr F I1208 17:44:21.096814 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:21.096900394+00:00 stderr F I1208 17:44:21.096821 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097458 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097484 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097515 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097549 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097562 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097574 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097628 1 servicehostname.go:46] syncing servicenetwork hostnames: [10.217.4.1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local openshift openshift.default openshift.default.svc openshift.default.svc.cluster.local] 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097679 1 externalloadbalancer.go:27] syncing external loadbalancer hostnames: api.crc.testing 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097699 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097714 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.097901402+00:00 stderr F I1208 17:44:21.097732 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.098083797+00:00 stderr F I1208 17:44:21.097968 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:44:21.100823502+00:00 stderr F I1208 17:44:21.100526 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:21Z","message":"NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)","reason":"NodeController_MasterNodesReady","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:29Z","message":"NodeInstallerProgressing: 1 node is at revision 11","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:21.200907191+00:00 stderr F I1208 17:44:21.198667 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.237450469+00:00 stderr F I1208 17:44:21.237385 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.239046742+00:00 stderr F I1208 17:44:21.238001 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.295040289+00:00 stderr F I1208 17:44:21.294605 1 base_controller.go:82] Caches are synced for CertRotationTimeUpgradeableController 2025-12-08T17:44:21.295040289+00:00 stderr F I1208 17:44:21.295008 1 base_controller.go:119] Starting #1 worker of CertRotationTimeUpgradeableController controller ... 2025-12-08T17:44:21.295095041+00:00 stderr F I1208 17:44:21.295060 1 base_controller.go:82] Caches are synced for ConnectivityCheckController 2025-12-08T17:44:21.295095041+00:00 stderr F I1208 17:44:21.295084 1 base_controller.go:119] Starting #1 worker of ConnectivityCheckController controller ... 2025-12-08T17:44:21.312922367+00:00 stderr F E1208 17:44:21.312503 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" kube-node-lease="(MISSING)" 2025-12-08T17:44:21.312922367+00:00 stderr F E1208 17:44:21.312574 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift="(MISSING)" 2025-12-08T17:44:21.312922367+00:00 stderr F E1208 17:44:21.312665 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-apiserver-operator="(MISSING)" 2025-12-08T17:44:21.312922367+00:00 stderr F E1208 17:44:21.312772 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-cloud-network-config-controller="(MISSING)" 2025-12-08T17:44:21.312922367+00:00 stderr F E1208 17:44:21.312891 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-cluster-samples-operator="(MISSING)" 2025-12-08T17:44:21.313014539+00:00 stderr F E1208 17:44:21.312977 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-cluster-storage-operator="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313048 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-config-managed="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313140 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-config-operator="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313224 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-console="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313306 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-console-operator="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313379 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-console-user-settings="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313453 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-controller-manager="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313562 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-controller-manager-operator="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313647 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-dns-operator="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313721 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-host-network="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313775 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-ingress-canary="(MISSING)" 2025-12-08T17:44:21.313918084+00:00 stderr F E1208 17:44:21.313908 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-ingress-operator="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318042 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-kube-controller-manager-operator="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318104 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-kube-storage-version-migrator="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318161 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-kube-storage-version-migrator-operator="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318205 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-network-console="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318308 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-network-diagnostics="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318343 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-node="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318386 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-route-controller-manager="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318420 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-service-ca="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318475 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-service-ca-operator="(MISSING)" 2025-12-08T17:44:21.321923813+00:00 stderr F E1208 17:44:21.318536 1 podsecurityreadinesscontroller.go:88] "namespace:" err="unable to determine if the namespace is violating because no appropriate labels or annotations were found" openshift-user-workload-monitoring="(MISSING)" 2025-12-08T17:44:21.392676363+00:00 stderr F I1208 17:44:21.392539 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.425971141+00:00 stderr F I1208 17:44:21.425486 1 reflector.go:430] "Caches populated" type="*v1.Scheduler" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.637085909+00:00 stderr F I1208 17:44:21.634149 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.637085909+00:00 stderr F I1208 17:44:21.634514 1 reflector.go:430] "Caches populated" type="*v1.OAuth" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.789554839+00:00 stderr F I1208 17:44:21.789487 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:21.835721807+00:00 stderr F I1208 17:44:21.835563 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Degraded changed from False to True ("NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)") 2025-12-08T17:44:21.990091918+00:00 stderr F I1208 17:44:21.989375 1 request.go:752] "Waited before sending request" delay="1.000301265s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces?limit=500&resourceVersion=0" 2025-12-08T17:44:21.999851845+00:00 stderr F I1208 17:44:21.998894 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.193566269+00:00 stderr F I1208 17:44:22.193499 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.196022476+00:00 stderr F I1208 17:44:22.195000 1 base_controller.go:82] Caches are synced for BackingResourceController-StaticResources 2025-12-08T17:44:22.196022476+00:00 stderr F I1208 17:44:22.195031 1 base_controller.go:119] Starting #1 worker of BackingResourceController-StaticResources controller ... 2025-12-08T17:44:22.394373305+00:00 stderr F I1208 17:44:22.390104 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.394998723+00:00 stderr F I1208 17:44:22.394969 1 base_controller.go:82] Caches are synced for openshift-kube-apiserver-EncryptionMigration 2025-12-08T17:44:22.394998723+00:00 stderr F I1208 17:44:22.394993 1 base_controller.go:119] Starting #1 worker of openshift-kube-apiserver-EncryptionMigration controller ... 2025-12-08T17:44:22.395046434+00:00 stderr F I1208 17:44:22.395030 1 base_controller.go:82] Caches are synced for kube-apiserver-InstallerState 2025-12-08T17:44:22.395046434+00:00 stderr F I1208 17:44:22.395038 1 base_controller.go:119] Starting #1 worker of kube-apiserver-InstallerState controller ... 2025-12-08T17:44:22.395055005+00:00 stderr F I1208 17:44:22.395048 1 base_controller.go:82] Caches are synced for kube-apiserver-StaticPodState 2025-12-08T17:44:22.395055005+00:00 stderr F I1208 17:44:22.395052 1 base_controller.go:119] Starting #1 worker of kube-apiserver-StaticPodState controller ... 2025-12-08T17:44:22.395079295+00:00 stderr F I1208 17:44:22.395063 1 base_controller.go:82] Caches are synced for kube-apiserver-StartupMonitorPodCondition 2025-12-08T17:44:22.395079295+00:00 stderr F I1208 17:44:22.395071 1 base_controller.go:119] Starting #1 worker of kube-apiserver-StartupMonitorPodCondition controller ... 2025-12-08T17:44:22.395087116+00:00 stderr F I1208 17:44:22.395080 1 base_controller.go:82] Caches are synced for kube-apiserver-StaticPodStateFallback 2025-12-08T17:44:22.395087116+00:00 stderr F I1208 17:44:22.395083 1 base_controller.go:119] Starting #1 worker of kube-apiserver-StaticPodStateFallback controller ... 2025-12-08T17:44:22.395103426+00:00 stderr F I1208 17:44:22.395095 1 base_controller.go:82] Caches are synced for openshift-kube-apiserver-EncryptionPrune 2025-12-08T17:44:22.395103426+00:00 stderr F I1208 17:44:22.395098 1 base_controller.go:119] Starting #1 worker of openshift-kube-apiserver-EncryptionPrune controller ... 2025-12-08T17:44:22.395135127+00:00 stderr F I1208 17:44:22.395122 1 base_controller.go:82] Caches are synced for openshift-kube-apiserver-EncryptionCondition 2025-12-08T17:44:22.395161288+00:00 stderr F I1208 17:44:22.395152 1 base_controller.go:119] Starting #1 worker of openshift-kube-apiserver-EncryptionCondition controller ... 2025-12-08T17:44:22.402958871+00:00 stderr F I1208 17:44:22.402909 1 base_controller.go:82] Caches are synced for openshift-kube-apiserver-EncryptionState 2025-12-08T17:44:22.403008522+00:00 stderr F I1208 17:44:22.402998 1 base_controller.go:119] Starting #1 worker of openshift-kube-apiserver-EncryptionState controller ... 2025-12-08T17:44:22.403043163+00:00 stderr F I1208 17:44:22.403034 1 base_controller.go:82] Caches are synced for openshift-kube-apiserver-EncryptionKey 2025-12-08T17:44:22.403064333+00:00 stderr F I1208 17:44:22.403056 1 base_controller.go:119] Starting #1 worker of openshift-kube-apiserver-EncryptionKey controller ... 2025-12-08T17:44:22.613836942+00:00 stderr F I1208 17:44:22.613771 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.698612635+00:00 stderr F I1208 17:44:22.698492 1 base_controller.go:82] Caches are synced for MissingStaticPodController 2025-12-08T17:44:22.698612635+00:00 stderr F I1208 17:44:22.698517 1 base_controller.go:119] Starting #1 worker of MissingStaticPodController controller ... 2025-12-08T17:44:22.698612635+00:00 stderr F I1208 17:44:22.698575 1 base_controller.go:82] Caches are synced for auditPolicyController 2025-12-08T17:44:22.698612635+00:00 stderr F I1208 17:44:22.698580 1 base_controller.go:119] Starting #1 worker of auditPolicyController controller ... 2025-12-08T17:44:22.698612635+00:00 stderr F I1208 17:44:22.698596 1 base_controller.go:82] Caches are synced for WorkerLatencyProfile 2025-12-08T17:44:22.698612635+00:00 stderr F I1208 17:44:22.698600 1 base_controller.go:119] Starting #1 worker of WorkerLatencyProfile controller ... 2025-12-08T17:44:22.698659726+00:00 stderr F I1208 17:44:22.698615 1 base_controller.go:82] Caches are synced for RevisionController 2025-12-08T17:44:22.698659726+00:00 stderr F I1208 17:44:22.698619 1 base_controller.go:119] Starting #1 worker of RevisionController controller ... 2025-12-08T17:44:22.698659726+00:00 stderr F I1208 17:44:22.698636 1 base_controller.go:82] Caches are synced for Installer 2025-12-08T17:44:22.698659726+00:00 stderr F I1208 17:44:22.698640 1 base_controller.go:119] Starting #1 worker of Installer controller ... 2025-12-08T17:44:22.699290973+00:00 stderr F I1208 17:44:22.699241 1 base_controller.go:82] Caches are synced for PruneController 2025-12-08T17:44:22.699290973+00:00 stderr F I1208 17:44:22.699276 1 base_controller.go:119] Starting #1 worker of PruneController controller ... 2025-12-08T17:44:22.788919988+00:00 stderr F I1208 17:44:22.788275 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.794046658+00:00 stderr F I1208 17:44:22.793954 1 base_controller.go:82] Caches are synced for KubeAPIServerStaticResources-StaticResources 2025-12-08T17:44:22.794046658+00:00 stderr F I1208 17:44:22.793979 1 base_controller.go:119] Starting #1 worker of KubeAPIServerStaticResources-StaticResources controller ... 2025-12-08T17:44:23.003406898+00:00 stderr F I1208 17:44:23.002976 1 reflector.go:430] "Caches populated" type="*v1.Event" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.097851455+00:00 stderr F I1208 17:44:23.096423 1 base_controller.go:82] Caches are synced for EventWatchController 2025-12-08T17:44:23.097851455+00:00 stderr F I1208 17:44:23.097310 1 base_controller.go:119] Starting #1 worker of EventWatchController controller ... 2025-12-08T17:44:23.187452779+00:00 stderr F I1208 17:44:23.187363 1 request.go:752] "Waited before sending request" delay="2.197900352s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver-operator/configmaps?limit=500&resourceVersion=0" 2025-12-08T17:44:23.197409590+00:00 stderr F I1208 17:44:23.197360 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.202913840+00:00 stderr F I1208 17:44:23.202252 1 base_controller.go:82] Caches are synced for TargetConfigController 2025-12-08T17:44:23.202913840+00:00 stderr F I1208 17:44:23.202277 1 base_controller.go:119] Starting #1 worker of TargetConfigController controller ... 2025-12-08T17:44:23.401243920+00:00 stderr F I1208 17:44:23.400636 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494535 1 base_controller.go:82] Caches are synced for kube-apiserver 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494559 1 base_controller.go:119] Starting #1 worker of kube-apiserver controller ... 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494586 1 base_controller.go:82] Caches are synced for NodeKubeconfigController 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494591 1 base_controller.go:119] Starting #1 worker of NodeKubeconfigController controller ... 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494610 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494614 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494623 1 base_controller.go:82] Caches are synced for BoundSATokenSignerController 2025-12-08T17:44:23.494785861+00:00 stderr F I1208 17:44:23.494628 1 base_controller.go:119] Starting #1 worker of BoundSATokenSignerController controller ... 2025-12-08T17:44:23.495681586+00:00 stderr F I1208 17:44:23.495650 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.495681586+00:00 stderr F I1208 17:44:23.495670 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497047 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497059 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497548 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497557 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497566 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497570 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497579 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497582 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497590 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.497840094+00:00 stderr F I1208 17:44:23.497593 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.498054120+00:00 stderr F I1208 17:44:23.498033 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.498100511+00:00 stderr F I1208 17:44:23.498083 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.498173254+00:00 stderr F I1208 17:44:23.498103 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.498203695+00:00 stderr F I1208 17:44:23.498193 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.499789258+00:00 stderr F I1208 17:44:23.498109 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.499831899+00:00 stderr F I1208 17:44:23.499819 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.523140844+00:00 stderr F I1208 17:44:23.509910 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.523140844+00:00 stderr F I1208 17:44:23.509929 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.523140844+00:00 stderr F I1208 17:44:23.520996 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.523140844+00:00 stderr F I1208 17:44:23.521023 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.523140844+00:00 stderr F I1208 17:44:23.521061 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:44:23.523140844+00:00 stderr F I1208 17:44:23.521065 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:23.525520409+00:00 stderr F I1208 17:44:23.525459 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SignerUpdateRequired' "kube-control-plane-signer" in "openshift-kube-apiserver-operator" requires a new signing cert/key pair: past its refresh time 2025-12-02 07:34:10 +0000 UTC 2025-12-08T17:44:23.525520409+00:00 stderr F I1208 17:44:23.525491 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SignerUpdateRequired' "kube-control-plane-signer" in "openshift-kube-apiserver-operator" requires a new signing cert/key pair: past its refresh time 2025-12-02 07:34:10 +0000 UTC 2025-12-08T17:44:23.525520409+00:00 stderr F I1208 17:44:23.525499 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SignerUpdateRequired' "kube-apiserver-to-kubelet-signer" in "openshift-kube-apiserver-operator" requires a new signing cert/key pair: past its refresh time 2025-12-02 07:34:11 +0000 UTC 2025-12-08T17:44:23.525520409+00:00 stderr F I1208 17:44:23.525506 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SignerUpdateRequired' "kube-control-plane-signer" in "openshift-kube-apiserver-operator" requires a new signing cert/key pair: past its refresh time 2025-12-02 07:34:10 +0000 UTC 2025-12-08T17:44:23.603220769+00:00 stderr F I1208 17:44:23.602593 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.707017200+00:00 stderr F I1208 17:44:23.697009 1 base_controller.go:82] Caches are synced for webhookSupportabilityController 2025-12-08T17:44:23.707017200+00:00 stderr F I1208 17:44:23.697369 1 base_controller.go:119] Starting #1 worker of webhookSupportabilityController controller ... 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.791047 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.794719 1 base_controller.go:82] Caches are synced for kube-apiserver-Node 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.794735 1 base_controller.go:119] Starting #1 worker of kube-apiserver-Node controller ... 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.799996 1 base_controller.go:82] Caches are synced for KubeletVersionSkewController 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.800020 1 base_controller.go:119] Starting #1 worker of KubeletVersionSkewController controller ... 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.800639 1 base_controller.go:82] Caches are synced for GuardController 2025-12-08T17:44:23.808314033+00:00 stderr F I1208 17:44:23.800645 1 base_controller.go:119] Starting #1 worker of GuardController controller ... 2025-12-08T17:44:23.899291505+00:00 stderr F W1208 17:44:23.898363 1 degraded_webhook.go:147] failed to connect to webhook "controlplanemachineset.machine.openshift.io" via service "control-plane-machine-set-operator.openshift-machine-api.svc:9443": dial tcp: lookup control-plane-machine-set-operator.openshift-machine-api.svc on 10.217.4.10:53: read udp 10.217.0.22:40435->10.217.4.10:53: read: connection refused 2025-12-08T17:44:23.927420753+00:00 stderr F I1208 17:44:23.927365 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MasterNodesReadyChanged' All master nodes are ready 2025-12-08T17:44:23.970676122+00:00 stderr F I1208 17:44:23.962376 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:29Z","message":"NodeInstallerProgressing: 1 node is at revision 11","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:24.045315478+00:00 stderr F I1208 17:44:24.042145 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Degraded changed from True to False ("NodeControllerDegraded: All master nodes are ready") 2025-12-08T17:44:24.085046212+00:00 stderr F I1208 17:44:24.078076 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.085046212+00:00 stderr F - "2025-11-02T07:34:10Z", 2025-12-08T17:44:24.085046212+00:00 stderr F + "2025-12-08T17:44:23Z", 2025-12-08T17:44:24.085046212+00:00 stderr F ) 2025-12-08T17:44:24.085046212+00:00 stderr F I1208 17:44:24.078112 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.085046212+00:00 stderr F - "2026-11-02T07:34:10Z", 2025-12-08T17:44:24.085046212+00:00 stderr F + "2026-02-06T17:44:24Z", 2025-12-08T17:44:24.085046212+00:00 stderr F ) 2025-12-08T17:44:24.085046212+00:00 stderr F I1208 17:44:24.078139 1 annotations.go:88] Updating "certificates.openshift.io/refresh-period" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.085046212+00:00 stderr F - "", 2025-12-08T17:44:24.085046212+00:00 stderr F + "720h0m0s", 2025-12-08T17:44:24.085046212+00:00 stderr F ) 2025-12-08T17:44:24.140011571+00:00 stderr F I1208 17:44:24.139529 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.140011571+00:00 stderr F - "2025-11-02T07:34:10Z", 2025-12-08T17:44:24.140011571+00:00 stderr F + "2025-12-08T17:44:23Z", 2025-12-08T17:44:24.140011571+00:00 stderr F ) 2025-12-08T17:44:24.140011571+00:00 stderr F I1208 17:44:24.139570 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.140011571+00:00 stderr F - "2026-11-02T07:34:10Z", 2025-12-08T17:44:24.140011571+00:00 stderr F + "2026-02-06T17:44:24Z", 2025-12-08T17:44:24.140011571+00:00 stderr F ) 2025-12-08T17:44:24.140011571+00:00 stderr F I1208 17:44:24.139583 1 annotations.go:88] Updating "certificates.openshift.io/refresh-period" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.140011571+00:00 stderr F - "", 2025-12-08T17:44:24.140011571+00:00 stderr F + "720h0m0s", 2025-12-08T17:44:24.140011571+00:00 stderr F ) 2025-12-08T17:44:24.187857566+00:00 stderr F I1208 17:44:24.187651 1 request.go:752] "Waited before sending request" delay="1.79015267s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:44:24.287795792+00:00 stderr F I1208 17:44:24.285945 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for kube-apiserver-to-kubelet-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.287795792+00:00 stderr F - "2025-11-02T07:34:11Z", 2025-12-08T17:44:24.287795792+00:00 stderr F + "2025-12-08T17:44:23Z", 2025-12-08T17:44:24.287795792+00:00 stderr F ) 2025-12-08T17:44:24.287795792+00:00 stderr F I1208 17:44:24.286261 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for kube-apiserver-to-kubelet-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.287795792+00:00 stderr F - "2026-11-02T07:34:11Z", 2025-12-08T17:44:24.287795792+00:00 stderr F + "2026-12-08T17:44:24Z", 2025-12-08T17:44:24.287795792+00:00 stderr F ) 2025-12-08T17:44:24.287795792+00:00 stderr F I1208 17:44:24.286276 1 annotations.go:88] Updating "certificates.openshift.io/refresh-period" annotation for kube-apiserver-to-kubelet-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.287795792+00:00 stderr F - "", 2025-12-08T17:44:24.287795792+00:00 stderr F + "720h0m0s", 2025-12-08T17:44:24.287795792+00:00 stderr F ) 2025-12-08T17:44:24.315914429+00:00 stderr F I1208 17:44:24.311946 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.315914429+00:00 stderr F - "2025-11-02T07:34:10Z", 2025-12-08T17:44:24.315914429+00:00 stderr F + "2025-12-08T17:44:23Z", 2025-12-08T17:44:24.315914429+00:00 stderr F ) 2025-12-08T17:44:24.315914429+00:00 stderr F I1208 17:44:24.312202 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.315914429+00:00 stderr F - "2026-11-02T07:34:10Z", 2025-12-08T17:44:24.315914429+00:00 stderr F + "2026-02-06T17:44:24Z", 2025-12-08T17:44:24.315914429+00:00 stderr F ) 2025-12-08T17:44:24.315914429+00:00 stderr F I1208 17:44:24.312214 1 annotations.go:88] Updating "certificates.openshift.io/refresh-period" annotation for kube-control-plane-signer/openshift-kube-apiserver-operator, diff: string( 2025-12-08T17:44:24.315914429+00:00 stderr F - "", 2025-12-08T17:44:24.315914429+00:00 stderr F + "720h0m0s", 2025-12-08T17:44:24.315914429+00:00 stderr F ) 2025-12-08T17:44:24.932442946+00:00 stderr F W1208 17:44:24.931988 1 degraded_webhook.go:147] failed to connect to webhook "controlplanemachineset.machine.openshift.io" via service "control-plane-machine-set-operator.openshift-machine-api.svc:9443": dial tcp: lookup control-plane-machine-set-operator.openshift-machine-api.svc on 10.217.4.10:53: read udp 10.217.0.22:60769->10.217.4.10:53: read: connection refused 2025-12-08T17:44:25.189926079+00:00 stderr F I1208 17:44:25.187649 1 request.go:752] "Waited before sending request" delay="2.39318208s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver" 2025-12-08T17:44:25.995528844+00:00 stderr F I1208 17:44:25.994553 1 signer.go:123] Updated secret openshift-kube-apiserver-operator/kube-control-plane-signer 2025-12-08T17:44:25.997551239+00:00 stderr F I1208 17:44:25.997522 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kube-control-plane-signer -n openshift-kube-apiserver-operator because it changed 2025-12-08T17:44:25.997601001+00:00 stderr F I1208 17:44:25.997583 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CABundleUpdateRequired' "kube-control-plane-signer-ca" in "openshift-kube-apiserver-operator" requires a new cert: signer update openshift-config-managed/kube-scheduler-client-cert-key 2025-12-08T17:44:25.997664762+00:00 stderr F I1208 17:44:25.997646 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CABundleUpdateRequired' "kube-control-plane-signer-ca" in "openshift-kube-apiserver-operator" requires a new cert: signer update openshift-kube-apiserver/check-endpoints-client-cert-key 2025-12-08T17:44:26.390935409+00:00 stderr F I1208 17:44:26.387110 1 request.go:752] "Waited before sending request" delay="2.247408803s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver-operator/secrets/kube-control-plane-signer" 2025-12-08T17:44:26.591310055+00:00 stderr F I1208 17:44:26.591131 1 signer.go:123] Updated secret openshift-kube-apiserver-operator/kube-apiserver-to-kubelet-signer 2025-12-08T17:44:26.598857921+00:00 stderr F I1208 17:44:26.598723 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kube-apiserver-to-kubelet-signer -n openshift-kube-apiserver-operator because it changed 2025-12-08T17:44:26.598857921+00:00 stderr F I1208 17:44:26.598746 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CABundleUpdateRequired' "kube-apiserver-to-kubelet-client-ca" in "openshift-kube-apiserver-operator" requires a new cert: signer update openshift-kube-apiserver/kubelet-client 2025-12-08T17:44:26.963954210+00:00 stderr F W1208 17:44:26.963315 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.22:47849->10.217.4.10:53: read: connection refused 2025-12-08T17:44:27.590550891+00:00 stderr F I1208 17:44:27.589516 1 request.go:752] "Waited before sending request" delay="2.592739633s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods" 2025-12-08T17:44:27.612664864+00:00 stderr F I1208 17:44:27.612605 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'PodCreated' Created Pod/revision-pruner-11-crc -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:28.011437432+00:00 stderr F W1208 17:44:28.011092 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.22:59592->10.217.4.10:53: read: connection refused 2025-12-08T17:44:28.787899561+00:00 stderr F I1208 17:44:28.787321 1 request.go:752] "Waited before sending request" delay="2.790420905s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver-operator/configmaps/kube-control-plane-signer-ca" 2025-12-08T17:44:28.794925012+00:00 stderr F I1208 17:44:28.794848 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'TargetUpdateRequired' "kube-scheduler-client-cert-key" in "openshift-config-managed" requires a new target cert/key pair: issuer "openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863", not in ca bundle: 2025-12-08T17:44:29.003155663+00:00 stderr F I1208 17:44:29.000622 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'TargetUpdateRequired' "check-endpoints-client-cert-key" in "openshift-kube-apiserver" requires a new target cert/key pair: issuer "kube-control-plane-signer", not in ca bundle: 2025-12-08T17:44:29.112058043+00:00 stderr F I1208 17:44:29.111952 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for check-endpoints-client-cert-key/openshift-kube-apiserver, diff: string( 2025-12-08T17:44:29.112058043+00:00 stderr F - "2025-11-02T07:51:37Z", 2025-12-08T17:44:29.112058043+00:00 stderr F + "2025-12-08T17:44:28Z", 2025-12-08T17:44:29.112058043+00:00 stderr F ) 2025-12-08T17:44:29.112058043+00:00 stderr F I1208 17:44:29.111999 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for check-endpoints-client-cert-key/openshift-kube-apiserver, diff: string( 2025-12-08T17:44:29.112058043+00:00 stderr F - "2026-11-02T07:34:10Z", 2025-12-08T17:44:29.112058043+00:00 stderr F + "2026-02-06T17:44:24Z", 2025-12-08T17:44:29.112058043+00:00 stderr F ) 2025-12-08T17:44:29.191571722+00:00 stderr F I1208 17:44:29.191491 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'TargetUpdateRequired' "kubelet-client" in "openshift-kube-apiserver" requires a new target cert/key pair: issuer "kube-apiserver-to-kubelet-signer", not in ca bundle: 2025-12-08T17:44:29.384522335+00:00 stderr F I1208 17:44:29.384469 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for kubelet-client/openshift-kube-apiserver, diff: string( 2025-12-08T17:44:29.384522335+00:00 stderr F - "2025-11-02T07:51:35Z", 2025-12-08T17:44:29.384522335+00:00 stderr F + "2025-12-08T17:44:28Z", 2025-12-08T17:44:29.384522335+00:00 stderr F ) 2025-12-08T17:44:29.384522335+00:00 stderr F I1208 17:44:29.384500 1 annotations.go:82] Updating "auth.openshift.io/certificate-not-after" annotation for kubelet-client/openshift-kube-apiserver, diff: string( 2025-12-08T17:44:29.384522335+00:00 stderr F - "2026-11-02T07:34:11Z", 2025-12-08T17:44:29.384522335+00:00 stderr F + "2026-12-08T17:44:24Z", 2025-12-08T17:44:29.384522335+00:00 stderr F ) 2025-12-08T17:44:29.417200866+00:00 stderr F I1208 17:44:29.417155 1 annotations.go:76] Updating "auth.openshift.io/certificate-not-before" annotation for kube-scheduler-client-cert-key/openshift-config-managed, diff: string( 2025-12-08T17:44:29.417200866+00:00 stderr F - "2025-12-08T17:44:25Z", 2025-12-08T17:44:29.417200866+00:00 stderr F + "2025-12-08T17:44:28Z", 2025-12-08T17:44:29.417200866+00:00 stderr F ) 2025-12-08T17:44:29.788047402+00:00 stderr F I1208 17:44:29.787996 1 request.go:752] "Waited before sending request" delay="2.186800029s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/revision-pruner-11-crc" 2025-12-08T17:44:30.019523225+00:00 stderr F E1208 17:44:30.019483 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.22:51761->10.217.4.10:53: read: connection refused 2025-12-08T17:44:30.031285867+00:00 stderr F W1208 17:44:30.031208 1 degraded_webhook.go:147] failed to connect to webhook "multus-validating-config.k8s.io" via service "multus-admission-controller.openshift-multus.svc:443": dial tcp: lookup multus-admission-controller.openshift-multus.svc on 10.217.4.10:53: read udp 10.217.0.22:40879->10.217.4.10:53: read: connection refused 2025-12-08T17:44:30.598055987+00:00 stderr F I1208 17:44:30.597266 1 targetconfigcontroller.go:419] Updated client CA bundle configmap openshift-kube-apiserver/client-ca 2025-12-08T17:44:30.600010340+00:00 stderr F I1208 17:44:30.597406 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/client-ca -n openshift-kube-apiserver because it changed 2025-12-08T17:44:30.608447890+00:00 stderr F I1208 17:44:30.608400 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.608366138 +0000 UTC))" 2025-12-08T17:44:30.608510732+00:00 stderr F I1208 17:44:30.608498 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.608483171 +0000 UTC))" 2025-12-08T17:44:30.608543323+00:00 stderr F I1208 17:44:30.608534 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.608525052 +0000 UTC))" 2025-12-08T17:44:30.608573874+00:00 stderr F I1208 17:44:30.608565 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.608556373 +0000 UTC))" 2025-12-08T17:44:30.608608225+00:00 stderr F I1208 17:44:30.608599 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.608588594 +0000 UTC))" 2025-12-08T17:44:30.608640396+00:00 stderr F I1208 17:44:30.608631 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.608621215 +0000 UTC))" 2025-12-08T17:44:30.610693791+00:00 stderr F I1208 17:44:30.610619 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.610599478 +0000 UTC))" 2025-12-08T17:44:30.610742912+00:00 stderr F I1208 17:44:30.610733 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.610719622 +0000 UTC))" 2025-12-08T17:44:30.610775903+00:00 stderr F I1208 17:44:30.610767 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.610756543 +0000 UTC))" 2025-12-08T17:44:30.610812004+00:00 stderr F I1208 17:44:30.610803 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.610791194 +0000 UTC))" 2025-12-08T17:44:30.611175165+00:00 stderr F I1208 17:44:30.611160 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-apiserver-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-apiserver-operator.svc,metrics.openshift-kube-apiserver-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:13 +0000 UTC to 2027-11-02 07:52:14 +0000 UTC (now=2025-12-08 17:44:30.611145294 +0000 UTC))" 2025-12-08T17:44:30.611620457+00:00 stderr F I1208 17:44:30.611357 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215860\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:19 +0000 UTC to 2028-12-08 16:44:19 +0000 UTC (now=2025-12-08 17:44:30.611343079 +0000 UTC))" 2025-12-08T17:44:30.990959234+00:00 stderr F I1208 17:44:30.990913 1 request.go:752] "Waited before sending request" delay="1.878787517s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets/check-endpoints-client-cert-key" 2025-12-08T17:44:30.995452346+00:00 stderr F I1208 17:44:30.995396 1 target.go:150] Updated secret openshift-kube-apiserver/check-endpoints-client-cert-key 2025-12-08T17:44:30.998831859+00:00 stderr F I1208 17:44:30.998776 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/check-endpoints-client-cert-key -n openshift-kube-apiserver because it changed 2025-12-08T17:44:31.039645692+00:00 stderr F W1208 17:44:31.039585 1 degraded_webhook.go:147] failed to connect to webhook "multus-validating-config.k8s.io" via service "multus-admission-controller.openshift-multus.svc:443": dial tcp: lookup multus-admission-controller.openshift-multus.svc on 10.217.4.10:53: read udp 10.217.0.22:54353->10.217.4.10:53: read: connection refused 2025-12-08T17:44:31.192095230+00:00 stderr F I1208 17:44:31.192042 1 target.go:150] Updated secret openshift-kube-apiserver/kubelet-client 2025-12-08T17:44:31.193710495+00:00 stderr F I1208 17:44:31.193681 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kubelet-client -n openshift-kube-apiserver because it changed 2025-12-08T17:44:31.591517936+00:00 stderr F I1208 17:44:31.591458 1 target.go:150] Updated secret openshift-config-managed/kube-scheduler-client-cert-key 2025-12-08T17:44:31.592148593+00:00 stderr F I1208 17:44:31.592122 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kube-scheduler-client-cert-key -n openshift-config-managed because it changed 2025-12-08T17:44:32.187342827+00:00 stderr F I1208 17:44:32.187284 1 request.go:752] "Waited before sending request" delay="1.996293912s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver" 2025-12-08T17:44:32.861346742+00:00 stderr P I1208 17:44:32.861268 1 core.go:352] ConfigMap "openshift-config-managed/kube-apiserver-client-ca" changes: {"data":{"ca-bundle.crt":"-----BEGIN CERTIFICATE-----\nMIIDMDCCAhigAwIBAgIIIzF/30wVgUkwDQYJKoZIhvcNAQELBQAwNjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSAwHgYDVQQDExdhZG1pbi1rdWJlY29uZmlnLXNpZ25lcjAe\nFw0yNTExMDIwNzM0MDdaFw0zNTEwMzEwNzM0MDdaMDYxEjAQBgNVBAsTCW9wZW5z\naGlmdDEgMB4GA1UEAxMXYWRtaW4ta3ViZWNvbmZpZy1zaWduZXIwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEaFvaxE/Ah0Q+4T67KuL6N5MoncMcfqtm\njKd8txx/b3t8o2WCAMF0IKDNMDDobraupmimcAQwOWen0WJzp3DqjVAIKabrG/DZ\nXqsx3xVHxhSvFOKEFQbiFu6HL0FvXs1bsMkm5YAcM/voHkGHefR+5YEgpgTuhZ6a\n9muG9cxUjlZ/BmMP3UwsgmRfxQ7TG3Ixf/mp++cLxi114b8ld8S4XtVuG//82BzB\nvk3J6+7tnRjli/AHSm0fx7ZvgRPY1b1IGSvGUMc6Qrc+nim/Ufd017TeFlkwKIRP\nPnUGuz0S/5Rz9XMoWJ/OHi/vB0eQs3pyqHBDPgTYCt1NZUO9nN7tAgMBAAGjQjBA\nMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBT9Y6Mp\nr1Yg0NUI8hFWKTlX1CJd6zANBgkqhkiG9w0BAQsFAAOCAQEAWD6f4r0mm7D7+uVn\nCs3w3zq0JIVBUtBTXYuSbgpnpmSVALRCSnRyguNx37SHU2qXVF7+hsaWYuO/B6iR\nZ5uZ6fkLEEFI5YN7q1HBEPBRaBFtJ7fSOBl9iSsHI11DX53+wRhJR319P3fZ18eq\nGwTdUHTy+L9ec1NjaJvOz2eJEVB3O2A9ySh+Rhdv75mFqTbNvxyf5fjw7OHDd5ti\nWPCT1UzyXUXpE8ET6HA59gQO3Ix/VPzZTpNWX1FAXDYpYFkK1t9Ifzjdqf3/P+uP\nvwMtUNixJg8RYhfRNZ4RbfULWU9Y0DpadRVX5WppGBTRNAAgmNGBYPPR7HuxVGx1\nReJ2Bg==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIIHuh6I9HTH+QwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM1WhcNMjYx\nMDI4MDgxNzM2WjAmMSQwIgYDVQQDDBtrdWJlLWNzci1zaWduZXJfQDE3NjIwNzE0\nNTUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb91UVWMxGg+Q7+o+5\ntt2z/Oq+7Mc1SNGJ4oF0Y99WM1Y7tIo5NOKWiIXE16rGuzY4+D9O53fh14ngzJm3\nWh3GjCGvr8/2W7J1BblXwuKwDuRl+OfJvyPtETUeME42Y9V6XP/B60iaaEYhm5t7\nTDf7TmmjEpqeWix43KqHnpcW9Zr8tM+tHLrGwcHnb+z3LvGkQA7mbXsaHiuryCVx\nudxQYNKWgtAkw3OOtuVyJ2gGD7iYVni1jg7nc9ZhQOYBoYRbAw3zh36CY50dZA89\nhDKYsVVourd9xfAdwSmrcgtsVo1X0ucCpEEUYKEz3/udgk+Dgf2hy3flIMcg9kcI\nS8xzAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0G\nA1UdDgQWBBSGMLijow+n1t5asMgXimhNhoMUvDAfBgNVHSMEGDAWgBQwDwrhoAwS\n6q2GA5CnoIQZVVim5jANBgkqhkiG9w0BAQsFAAOCAQEAhDT7ncsDiLG3QyVtffPp\nTjWscZowuHbVwUjwpZt5OxQFonSJxNCnqrj4MlIqqxraH5nnsrW5FqWyWWeMmXpz\nbFkiVhPFCVGjmRN9V1LXjHDc4lufe3ixq+MvgtU1YL/WJBmUxxw5dPifTT15IApJ\n6stLJ0QtHBNeEIIroUFpDB+O7OJYZ85ed6o6gT4n/v9nxBaLsZNpO2TzaWfI0Bst\nFEoPsfPKgBvwg9+2GijlP/VyKmP2gFdFm25PWeROU1VZzPrEhaOliO+/YXHt32YU\nJkxTF/smrLzxRRbb507cuvWEilzud93YbHmAhAj1h0CpDdzjxYDr5zRYJSP7BV+6\nUA==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDiTCCAnGgAwIBAgIIOsA0z3vOTSgwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM0WhcNMjcx\nMDIzMDgxNzM1WjBSMVAwTgYDVQQDDEdvcGVuc2hpZnQta3ViZS1jb250cm9sbGVy\nLW1hbmFnZXItb3BlcmF0b3JfY3NyLXNpZ25lci1zaWduZXJAMTc2MjA3MTQ1NTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPY8aATxrjWpRKfnFU7BgmD1\nkNEst9RVY3fzGsARc5tF3x+zhl9BbsoJ9NAncCj+KnkAIYk4pJMU6BHIWIakpvqV\nLsnlPM45VU8ocwOWb5Z7g88YdqHGRWeZqZt/rPXmH/846iVGDstB0YQWgKKKK97X\nvjKMsq9ALSVj8gRWai7B7MVP/bZ4FgeqsYq6zIH9XKdPO8ev20qffrob4nmLGHdJ\nikAliwIy6nkVYrATKOS8t56votMD7xuFQ8rM6uQ0YVejA5rE/Tmq4/NsFpfFfkCP\nMU0vO2XwqCBV81XKD00SmW9MXIxbTq5lU9YjjE1sDFREtN4uZL4nEDa2+wnvIxEC\nAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFDAPCuGgDBLqrYYDkKeghBlVWKbmMB8GA1UdIwQYMBaAFDAPCuGgDBLqrYYD\nkKeghBlVWKbmMA0GCSqGSIb3DQEBCwUAA4IBAQDS31/o3/4/9DBLFVcLvIwOQCK1\n0ZwJUa/8iK5GaMlMH6cNfvVZBrb4SwTVx0WXI5wrIXrlvYc+PtXL0MJeyIJmMpoU\nRyQAJZsh8cckeQjghV2Pf7wMfEbHudKTp8uoQDUBntkfNhJa4pPxmNWuhOrlvdB5\nEF/6IGviKAdSy0jcNpscvD3W0oSpCYRW0Ki/25LaFvIqP2Xy/cNJlWhzJWqZbK6k\nR9I4knhvIv/JYmppOVXw1rEvP+8Pn8UF2oSfFXcN5W+j4YIIhrAUnjAbaflpyX8k\nAUEKdtgVNNe7RlW9nQrhO8GqFJItitBbNtVuSkw9XIlQ0gc40E7mgB7Mjnbu\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDQjCCAiqgAwIBAgIICN23rtJ7PrYwDQYJKoZIhvcNAQELBQAwPzESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSkwJwYDVQQDEyBrdWJlLWFwaXNlcnZlci10by1rdWJlbGV0\nLXNpZ25lcjAeFw0yNTExMDIwNzM0MTFaFw0yNjExMDIwNzM0MTFaMD8xEjAQBgNV\nBAsTCW9wZW5zaGlmdDEpMCcGA1UEAxMga3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxl\ndC1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzRlKEKNEy\nCyd+PPBkdzQZd3BeUb0b2qi1h8fbUiSNENrOpafKxxAHcr3a4KWB6sKhi28r14mF\nxcJ2Yb92/jLpkS15p629AUrGXxKuL8QtkBsY3dH0CqKMBedO6oxodva9Avc+3DMI\nvvYBJFy+4on/0JbM54fduvDmcEmhBtgRItK3Z87VbhemVPj7uDi9EV381uRMlmq4\ncgtD5mfS1yeRu0ut5IIr7/PN1G+93slLGQkHveqWlsFrDYd8Qm5PqirRBYy+18RC\nmEuNirFX3yPrEGwMvRlJyFia0RKuK69bFL2vduI5Wu7h/6VKP0/vEpEYqI6bJYoV\nbUjA2vqrV/1VAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBRNj9dsSIhfwKZ491Q/XZYt2lRWLzANBgkqhkiG9w0BAQsF\nAAOCAQEANmt7a5N+zch83D5+YeC4VQoFvpkHPKEYYT0EM/ykjch3453RmQWQEwkj\nVvstV1U16YpnEI61l1s347RHi64SwtlwV6tiNCpopDF2u3Bb+eiJqrlIC69EFnZE\n1426AVmZZq3sWu3eKB0HgT5u6B1rErSTl3c4hK4SiDsWWlVktBSN0BS4cD+urSAF\nc673/wLKCjq2I+9i3Wv2K7Ton3w5oaETE7lgQyImbKOVhJhFrPGu9fKXaeWlyXGY\nj7tz68vNTvecRynKrmzUJ9BBMfAXTrCowitzjBjanFitgXK4DnQMkb+8lv2Txb/n\nkB7RzcFDyIVd3g5XWBujR3fkQFWsNQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDlTCCAn2gAwIBAgIIV5i/4m8WRp0wDQYJKoZIhvcNAQELBQAwWDFWMFQGA1UE\nAwxNb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtYXBpc2Vy\ndmVyLXRvLWt1YmVsZXQtc2lnbmVyQDE3NjUyMTU4NjMwHhcNMjUxMjA4MTc0NDIz\nWhcNMjYxMjA4MTc0NDI0WjBYMVYwVAYDVQQDDE1vcGVuc2hpZnQta3ViZS1hcGlz\nZXJ2ZXItb3BlcmF0b3Jfa3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxldC1zaWduZXJA\nMTc2NTIxNTg2MzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMQk29Lc\nviCBF+ZN194ChQgHyYo1iN6wW+jZDEEIQpkmIfgrPnEOPL8+9d3SN92BqqYGdwnp\n5TdyDJBFBjrM8iKKvrq6x+EcyQJU6/Q+41bpPSLsziclImlDUUE29OYj6poxfNi1\nQBeFL1q4j9/ks+AfMnpjEbiGjxjJ8cV8++3NERSB1jJLft1rYcnQvgBuE64jqipO\nbNczVjMjcq0g+H+qpZknHlFueBqi5F/Nj/hC7QZbS96VThCxM123zqORBAfU5Fj0\ndMk3XqYTM1mpfyQHihtlyG3vsPXI/CBZgno6CI+KuXZJ46IjNNmiImyVJNKe7tXS\niWxbKKtEZHAvMcsCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFKzukDY3Odtmp3C+ncqem+63g2NuMB8GA1UdIwQYMBaA\nFKzukDY3Odtmp3C+ncqem+63g2NuMA0GCSqGSIb3DQEBCwUAA4IBAQBC2NLbh36L\n05mNq0+V4avx/2/xXvih+RtebPhiF8w8WG7WWRiIlK/yn8+iToFX/07+HWbBSK3g\nu5Yqac0eh8iKLkG+eIFiXpZR4B4Ha3ZRoU4N6dBMohIChZNugHGtjhfFjDpjFY8N\n9jMoZmTtjtK7RW2tu1qRyJcNSk8ou6nYNo/fB9PHWP5E12cWdg2ZQyESq+zE2dFo\n/dNjvb2y+GneObWzG9nclr6L7f6jI4LSOujO9ZA28xW4lf2EmosQ2HOeun48vA3O\n0C9lO1/SqcPkA6TtMHsoXZDSRv+mH62ugEZkDn8lgOizTm3l+jU9UA4RSvRD7ghR\nuXScj56hVynp\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDNDCCAhygAwIBAgIILvKlXd2YBKIwDQYJKoZIhvcNAQELBQAwODESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVy\nMB4XDTI1MTEwMjA3MzQxMFoXDTI2MTEwMjA3MzQxMFowODESMBAGA1UECxMJb3Bl\nbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOnFEdxnqqE7l+5NxN82TBBycC2F\n5cUboCcUVr211lubqmjCusnJzzz+6rKpO8uFqqGeu4C7rGpudOaK52IZhG0WP8b7\n0raQhnM4DV8t2SHDV4GhFUiuNE+b4FJrZ6jiljQo2g9ZeeCZgdmmBrIFHBXDFzEc\nA1RPScqOtvBbbH064Zd267gOmVPJnWmxDXo6X/RGYCm1YUS6FQ2WWpl707ComvgZ\nAvWGSA4H1sZirMQ+ug3bctkLb+SiXUzf+tLnGIHPeqDNfMrNUhxBl6dDhlMbUIzY\nrVxgDD8y3i7eg5i5HG8yntl8epgs2gn47wfavfjLqATBlciJPZY6Qv3BFQIDAQAB\no0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nWi6wmZ50AVdwxaiO3WjTvp82+sEwDQYJKoZIhvcNAQELBQADggEBAA4siMWwOGL5\nCiCxbiJsuKMbCLlFR6JaD43FWnUCZDoe7np7W76Oh4py4Zht7XlZKotcXrrfRYVO\ndVht66PCbSujy375p/B6c3isG4h/1cNSGDm1uhAkHXGZ88S2wSjKT5YJ/HUAkvyj\nadQgZeO7Q60YBSDE/67Ldq1zqvBrMF2k8pF49p1AdAtf4OSDzIaGGPUQJTFExA0E\n03xMlOPNhYZ8MgFT2XE6nRT74lCAfK9krAsZLtFuAtp/14t013PD0FqTTQRUmuSj\nO6pJKDTH8tZ3ieXxSzRR+j4p5hkHaehgQVyUbwiw8WVXkd6NcWR6yQcSeqIsTSCD\neMDdTmmKyuo=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhzCCAm+gAwIBAgIIcuJfJWKJ/NEwDQYJKoZIhvcNAQELBQAwUTFPME0GA1UE\nAwxGb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtY29udHJv\nbC1wbGFuZS1zaWduZXJAMTc2NTIxNTg2MzAeFw0yNTEyMDgxNzQ0MjNaFw0yNjAy\nMDYxNzQ0MjRaMFExTzBNBgNVBAMMRm9wZW5zaGlmdC1rdWJlLWFwaXNlcnZlci1v\ncGVyYXRvcl9rdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyQDE3NjUyMTU4NjMwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC86U3wU8BYhyUyqYM3Vuy/Kvfu\nQlUYxAqiVxA50CLa488sawRtfVN03+NKfPtzoj6xg5nYxR0a/+IP95n2YltFsU5k\nyumfMqcMWP1gZeUuqq0tHgy/GYvD4uF2IWLRMYMdYrsbJlOPWRCnRfWtXN7LJHAY\nBQwKW01c7MOm8AMOT5sGCw7 2025-12-08T17:44:32.861408654+00:00 stderr F z1GwROdLkjebZSAWeWP+uho5ubO7R9yFVrMJGzBum\nXUceaUrjiVyDCVdMBMttbZtjYYwW1NqDl4P4CgtW+CRONRTW8FNDdldzjm2fo/HL\n/frz934yfHA6c6xDWRI4+BEKJpecqxBUoC6xeGNdPd3KFmqPFRp7N/oERpH5AgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRrr6Rca+ABepsTcpAiBlwqNlkzNzAfBgNVHSMEGDAWgBRrr6Rca+ABepsTcpAi\nBlwqNlkzNzANBgkqhkiG9w0BAQsFAAOCAQEArD1l55HNxEi+lDb8LV+9Zzmb+gxB\nDq27GP6pZD+v8cHdoet3SgTFXeYKrd/Aw34+ZJceKPQrhoLtGkl+UW9T50ymZmVx\nENwuX+8e/OxAYAcKZdAwlCmPBV2A+puager7UZ6cE35W22ZqqijJ3J+nB7BmCtQ7\nqooWmH+OcHkw9Eoa8BbWCAH8nItf7bglCui0yQb4MCbrGMCHOVKwInTpI2biAdb6\nvQwXe1ofL4bVZt0eiPk2tuhljglLjV23q/aaFqTXC7T6UIKtb0olqNjGO10Aasew\ntAxUmbhL/uOz2X2JztYbjYPfVWbeUefTtX8tXV8oqflB6auskk/m2wMUbw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDSDCCAjCgAwIBAgIIV//3Qv2OxM4wDQYJKoZIhvcNAQELBQAwQjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSwwKgYDVQQDEyNrdWJlbGV0LWJvb3RzdHJhcC1rdWJlY29u\nZmlnLXNpZ25lcjAeFw0yNTExMDIwNzM0MDhaFw0zNTEwMzEwNzM0MDhaMEIxEjAQ\nBgNVBAsTCW9wZW5zaGlmdDEsMCoGA1UEAxMja3ViZWxldC1ib290c3RyYXAta3Vi\nZWNvbmZpZy1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc\nZBn9WZ5wLPck6+g3f4p0NBGyE6LaXnWavXx9m0sVdTVQooknndmKufeYkXGZ5Lb+\nfbMAp/6swgSJ1DjdBj06rCqJEZfdZl3uZoD/Th4ha2Phl12bXaNYLiuOu5BOZ3UW\n08y1Wab9Y9zc0o4Z71pHH4o9TH3QPNT6BqAz4kkgD6t1r/R7E7lrZbx+7e+0JBAW\nRgufaFOX1AYU5B4+pSM21eJY7oP1P9I4DMeeJW39opCCHAuUQgHpOV1YPtRqEPJ4\n9matas8qm5qIMIPbGEGFckSJqgny9YCfHaLezJtZMIHJgz5LW4H91gQCGvfSbLtH\nYxYO/PcTXQCnDYNwqf29AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBSCg7Y8QkzypoNAqYVPNK5YWMVIXzANBgkqhkiG\n9w0BAQsFAAOCAQEAsP1NR5ZgC7F5FvoU2CXla4FufdeYTk4bjjNoFJMqnvI5gcY6\nJDxP1Rl2YBHCpRxngtBFxw3Xoe8UnBKzZWsS5wLUYRloprhGVBSLM0vqJJOvP7M0\njt3SLuB7h0dG2GO9yQ4y10+xVWxP5Os9wcbQcRgTQKL3gHmCq4aQN1cqJSxyJ/ut\nlbfYlM/xBcfLMY5Leeas6y2FPCFIEONh1U9FJZlF3YkhPp+XD7aePtC4tJqsokMc\nP80IwPn54aDT9akRPsOteB6C+xSAz2TlfWaJ/l/x9yXK+HJrRhMartqyN511SeEd\nDpNcMW9qPTjJzBj+N3f0ZfvbTmhVSvV65ZEtAw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhTCCAm2gAwIBAgIIfksp5LDEMMgwDQYJKoZIhvcNAQELBQAwUDFOMEwGA1UE\nAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX25vZGUtc3lzdGVt\nLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MB4XDTI1MTEwMjA3NTEyN1oXDTI4MTEw\nMTA3NTEyOFowUDFOMEwGA1UEAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9w\nZXJhdG9yX25vZGUtc3lzdGVtLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA402A+B9GaYmzBVLJEyrGvytCY0Vb\nvvIKHW2kxl9IFN3+LNZHW/mKeJfz/hBTm2bs+6uRCDlMSyONDlVUWVsuE+q0+F42\n0n3VyxWRSrDZ2ur5oNxmoBSsHRM+PxccQ6X3JTZyO397LHNOzxAs/Es+St8A8sbY\nGLc1lNqeOLvwAOT5d2PrFlYCAfXYs/UVIaio846jidKKN1f8Z6W5pgdAHuTXbyBQ\nLDQh6s43TBPhww1KszmcwURjEBDCT6KlhsM/quMd9XlMU0ZEAMf6XxsqvW8ia8C8\nF+RNAaGkwmiS4qZ+hJ4KIUnWM84j+bsyNBqlHFKi1e7LsKRyjnQ288FqIQIDAQAB\no2MwYTAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\n9O+dX0K7dzD13bshuR70sHbUAeAwHwYDVR0jBBgwFoAU9O+dX0K7dzD13bshuR70\nsHbUAeAwDQYJKoZIhvcNAQELBQADggEBADPRGSn1U/YwBUkpU7vTzsxqLaVJW21o\n6hV/W2IjjGNGqp6c2kSH/3ZGSEjNwIJqKRFpC2gmTPgAqnC4nDosOHx5F5HXTmrU\n1l2Ivcm1Ep+t/zBgNHjBi3yommx8n2iTTdakpQaq7/u1s0I4UiRqXydjoGXp7H1C\naAsmRlK8ovgEAWzItjeMBzy65wqiStPBK+XAIddqznHCxrRyH5xk3HcnyMG4GDWl\nrogdK8yTGCuZVCvGfe9Hwm8tyYrxDRNvRLTc0ssTonAwnR/7IzaVVc9Pp0svCynJ\n6VX3FGhgWwDVWeajj8yrXeR42az/Rr1TAAOZtJMW+4hIkaU0/+msvgw=\n-----END CERTIFICATE-----\n"},"metadata":{"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ca-bundle.crt":{}},"f:metadata":{"f:annotations":{".":{},"f:openshift.io/owning-component":{}}}},"manager":"cluster-kube-apiserver-operator","operation":"Update","time":"2025-12-08T17:44:30Z"}],"resourceVersion":null,"uid":"e7bc0b2c-2af6-488e-bf6f-25875798350a"}} 2025-12-08T17:44:32.861563308+00:00 stderr F I1208 17:44:32.861509 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/kube-apiserver-client-ca -n openshift-config-managed: 2025-12-08T17:44:32.861563308+00:00 stderr F cause by changes in data.ca-bundle.crt 2025-12-08T17:44:33.188007162+00:00 stderr F I1208 17:44:33.187692 1 request.go:752] "Waited before sending request" delay="2.589437421s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/trusted-ca-bundle" 2025-12-08T17:44:34.387469891+00:00 stderr F I1208 17:44:34.387150 1 request.go:752] "Waited before sending request" delay="1.995742848s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:44:35.587739980+00:00 stderr F I1208 17:44:35.587648 1 request.go:752] "Waited before sending request" delay="1.986194207s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:44:39.586868204+00:00 stderr F I1208 17:44:39.586215 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/webhook-authenticator -n openshift-kube-apiserver because it changed 2025-12-08T17:44:39.603603149+00:00 stderr F I1208 17:44:39.603517 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'StartingNewRevision' new revision 12 triggered by "optional secret/webhook-authenticator has changed" 2025-12-08T17:44:40.993537814+00:00 stderr F I1208 17:44:40.993413 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/kube-apiserver-pod-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:41.587340007+00:00 stderr F I1208 17:44:41.586769 1 request.go:752] "Waited before sending request" delay="1.195764163s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/revision-pruner-11-crc" 2025-12-08T17:44:42.393781946+00:00 stderr F I1208 17:44:42.392103 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/config-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:42.587410964+00:00 stderr F I1208 17:44:42.587322 1 request.go:752] "Waited before sending request" delay="1.395747477s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:44:43.787684861+00:00 stderr F I1208 17:44:43.787309 1 request.go:752] "Waited before sending request" delay="1.395530051s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps" 2025-12-08T17:44:43.795791607+00:00 stderr F I1208 17:44:43.792832 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/kube-apiserver-cert-syncer-kubeconfig-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:44.987589868+00:00 stderr F I1208 17:44:44.987068 1 request.go:752] "Waited before sending request" delay="1.395855219s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets/localhost-recovery-client-token" 2025-12-08T17:44:45.194316550+00:00 stderr F I1208 17:44:45.193192 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/oauth-metadata-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:46.187297641+00:00 stderr F I1208 17:44:46.186965 1 request.go:752] "Waited before sending request" delay="1.347201626s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver" 2025-12-08T17:44:46.592152285+00:00 stderr F I1208 17:44:46.591714 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/bound-sa-token-signing-certs-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:47.187941233+00:00 stderr F I1208 17:44:47.187673 1 request.go:752] "Waited before sending request" delay="1.397372702s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/revision-pruner-11-crc" 2025-12-08T17:44:47.995015910+00:00 stderr F I1208 17:44:47.992105 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/etcd-serving-ca-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:48.387820720+00:00 stderr F I1208 17:44:48.387751 1 request.go:752] "Waited before sending request" delay="1.382772315s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets/node-kubeconfigs" 2025-12-08T17:44:49.399164000+00:00 stderr F I1208 17:44:49.397509 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/kube-apiserver-server-ca-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:49.587345786+00:00 stderr F I1208 17:44:49.587284 1 request.go:752] "Waited before sending request" delay="1.390650645s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:44:50.786767781+00:00 stderr F I1208 17:44:50.786704 1 request.go:752] "Waited before sending request" delay="1.389408421s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps" 2025-12-08T17:44:50.793019395+00:00 stderr F I1208 17:44:50.792853 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/kubelet-serving-ca-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:51.787829605+00:00 stderr F I1208 17:44:51.787739 1 request.go:752] "Waited before sending request" delay="1.398084201s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/services/apiserver" 2025-12-08T17:44:52.196937028+00:00 stderr F I1208 17:44:52.195178 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/sa-token-signing-certs-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:52.987868526+00:00 stderr F I1208 17:44:52.987218 1 request.go:752] "Waited before sending request" delay="1.39441124s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-audit-policies" 2025-12-08T17:44:53.593794397+00:00 stderr F I1208 17:44:53.593704 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/kube-apiserver-audit-policies-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:53.987546372+00:00 stderr F I1208 17:44:53.987457 1 request.go:752] "Waited before sending request" delay="1.383467815s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets/node-kubeconfigs" 2025-12-08T17:44:54.997797482+00:00 stderr F I1208 17:44:54.997696 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretCreated' Created Secret/etcd-client-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:55.187022857+00:00 stderr F I1208 17:44:55.186863 1 request.go:752] "Waited before sending request" delay="1.39553153s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:44:56.187393202+00:00 stderr F I1208 17:44:56.187329 1 request.go:752] "Waited before sending request" delay="1.394948994s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/trusted-ca-bundle" 2025-12-08T17:44:57.402429571+00:00 stderr F I1208 17:44:57.402354 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretCreated' Created Secret/localhost-recovery-serving-certkey-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:58.197515314+00:00 stderr F I1208 17:44:58.196758 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretCreated' Created Secret/localhost-recovery-client-token-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:44:59.194347431+00:00 stderr F I1208 17:44:59.191796 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretCreated' Created Secret/webhook-authenticator-12 -n openshift-kube-apiserver because it was missing 2025-12-08T17:45:00.193032979+00:00 stderr F I1208 17:45:00.192969 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RevisionTriggered' new revision 12 triggered by "optional secret/webhook-authenticator has changed" 2025-12-08T17:45:01.388579355+00:00 stderr F I1208 17:45:01.387931 1 request.go:752] "Waited before sending request" delay="1.195054943s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-audit-policies" 2025-12-08T17:45:02.588256786+00:00 stderr F I1208 17:45:02.586836 1 request.go:752] "Waited before sending request" delay="1.58849624s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/localhost-recovery-client" 2025-12-08T17:45:03.796010951+00:00 stderr F I1208 17:45:03.794575 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'PodCreated' Created Pod/revision-pruner-12-crc -n openshift-kube-apiserver because it was missing 2025-12-08T17:45:04.987573437+00:00 stderr F I1208 17:45:04.986859 1 request.go:752] "Waited before sending request" delay="1.187191564s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver" 2025-12-08T17:45:05.988053936+00:00 stderr F I1208 17:45:05.987539 1 request.go:752] "Waited before sending request" delay="1.195165496s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:45:06.391240944+00:00 stderr F I1208 17:45:06.391173 1 installer_controller.go:562] node crc with revision 11 is the oldest and needs new revision 12 2025-12-08T17:45:06.391297516+00:00 stderr F I1208 17:45:06.391234 1 installer_controller.go:570] "crc" moving to (v1.NodeStatus) { 2025-12-08T17:45:06.391297516+00:00 stderr F NodeName: (string) (len=3) "crc", 2025-12-08T17:45:06.391297516+00:00 stderr F CurrentRevision: (int32) 11, 2025-12-08T17:45:06.391297516+00:00 stderr F TargetRevision: (int32) 12, 2025-12-08T17:45:06.391297516+00:00 stderr F LastFailedRevision: (int32) 11, 2025-12-08T17:45:06.391297516+00:00 stderr F LastFailedTime: (*v1.Time)(0xc00679f848)(2025-11-03 08:56:07 +0000 UTC), 2025-12-08T17:45:06.391297516+00:00 stderr F LastFailedReason: (string) (len=15) "InstallerFailed", 2025-12-08T17:45:06.391297516+00:00 stderr F LastFailedCount: (int) 1, 2025-12-08T17:45:06.391297516+00:00 stderr F LastFallbackCount: (int) 0, 2025-12-08T17:45:06.391297516+00:00 stderr F LastFailedRevisionErrors: ([]string) (len=1 cap=1) { 2025-12-08T17:45:06.391297516+00:00 stderr F (string) (len=73) "installer: The container could not be located when the pod was terminated" 2025-12-08T17:45:06.391297516+00:00 stderr F } 2025-12-08T17:45:06.391297516+00:00 stderr F } 2025-12-08T17:45:06.422590297+00:00 stderr F I1208 17:45:06.422521 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'NodeTargetRevisionChanged' Updating node "crc" from revision 11 to 12 because node crc with revision 11 is the oldest 2025-12-08T17:45:06.434401915+00:00 stderr F I1208 17:45:06.434308 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:06Z","message":"NodeInstallerProgressing: 1 node is at revision 11; 0 nodes have achieved new revision 12","reason":"NodeInstaller","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11; 0 nodes have achieved new revision 12","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:45:06.446492002+00:00 stderr F I1208 17:45:06.445137 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Progressing changed from False to True ("NodeInstallerProgressing: 1 node is at revision 11; 0 nodes have achieved new revision 12"),Available message changed from "StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11" to "StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11; 0 nodes have achieved new revision 12" 2025-12-08T17:45:07.187252123+00:00 stderr F I1208 17:45:07.187185 1 request.go:752] "Waited before sending request" delay="1.194205609s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:45:08.188354639+00:00 stderr F I1208 17:45:08.187812 1 request.go:752] "Waited before sending request" delay="1.747507335s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-audit-policies" 2025-12-08T17:45:09.386764235+00:00 stderr F I1208 17:45:09.386699 1 request.go:752] "Waited before sending request" delay="1.994035854s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver" 2025-12-08T17:45:10.388112638+00:00 stderr F I1208 17:45:10.388027 1 request.go:752] "Waited before sending request" delay="1.196264246s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:45:10.594285094+00:00 stderr F I1208 17:45:10.594235 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'PodCreated' Created Pod/installer-12-crc -n openshift-kube-apiserver because it was missing 2025-12-08T17:45:11.190970497+00:00 stderr F I1208 17:45:11.189353 1 installer_controller.go:550] "crc" is in transition to 12, but has not made progress because installer is not finished, but in Pending phase 2025-12-08T17:45:11.790511669+00:00 stderr F I1208 17:45:11.790047 1 request.go:752] "Waited before sending request" delay="1.19530301s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:45:14.187591668+00:00 stderr F I1208 17:45:14.187068 1 request.go:752] "Waited before sending request" delay="1.104089641s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-audit-policies" 2025-12-08T17:45:14.393256640+00:00 stderr F I1208 17:45:14.393148 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapCreated' Created ConfigMap/user-client-ca -n openshift-kube-apiserver because it was missing 2025-12-08T17:45:14.590088368+00:00 stderr F I1208 17:45:14.590041 1 installer_controller.go:550] "crc" is in transition to 12, but has not made progress because installer is not finished, but in Running phase 2025-12-08T17:45:15.386999511+00:00 stderr F I1208 17:45:15.386947 1 request.go:752] "Waited before sending request" delay="1.397216367s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets/localhost-recovery-client-token" 2025-12-08T17:45:16.042819789+00:00 stderr F I1208 17:45:16.042515 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.04247097 +0000 UTC))" 2025-12-08T17:45:16.042967133+00:00 stderr F I1208 17:45:16.042942 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.042916472 +0000 UTC))" 2025-12-08T17:45:16.043085147+00:00 stderr F I1208 17:45:16.043042 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.043010264 +0000 UTC))" 2025-12-08T17:45:16.043205710+00:00 stderr F I1208 17:45:16.043174 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.043137818 +0000 UTC))" 2025-12-08T17:45:16.043295462+00:00 stderr F I1208 17:45:16.043273 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.043245411 +0000 UTC))" 2025-12-08T17:45:16.043367544+00:00 stderr F I1208 17:45:16.043347 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.043325463 +0000 UTC))" 2025-12-08T17:45:16.043436676+00:00 stderr F I1208 17:45:16.043417 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.043397255 +0000 UTC))" 2025-12-08T17:45:16.043504548+00:00 stderr F I1208 17:45:16.043485 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.043464707 +0000 UTC))" 2025-12-08T17:45:16.043571110+00:00 stderr F I1208 17:45:16.043552 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.043532709 +0000 UTC))" 2025-12-08T17:45:16.043655222+00:00 stderr F I1208 17:45:16.043634 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.043614861 +0000 UTC))" 2025-12-08T17:45:16.043724154+00:00 stderr F I1208 17:45:16.043705 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.043685953 +0000 UTC))" 2025-12-08T17:45:16.044170637+00:00 stderr F I1208 17:45:16.044142 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-apiserver-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-apiserver-operator.svc,metrics.openshift-kube-apiserver-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:13 +0000 UTC to 2027-11-02 07:52:14 +0000 UTC (now=2025-12-08 17:45:16.044115366 +0000 UTC))" 2025-12-08T17:45:16.057806227+00:00 stderr F I1208 17:45:16.057757 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215860\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:19 +0000 UTC to 2028-12-08 16:44:19 +0000 UTC (now=2025-12-08 17:45:16.057709933 +0000 UTC))" 2025-12-08T17:45:16.387740326+00:00 stderr F I1208 17:45:16.387651 1 request.go:752] "Waited before sending request" delay="1.788083663s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:45:16.998608314+00:00 stderr P I1208 17:45:16.998525 1 core.go:352] ConfigMap "openshift-config-managed/kube-apiserver-client-ca" changes: {"data":{"ca-bundle.crt":"-----BEGIN CERTIFICATE-----\nMIIFWzCCA0OgAwIBAgIUGCGM8Q3O0omYhECixt5AvIY+d4owDQYJKoZIhvcNAQEL\nBQAwPTESMBAGA1UECwwJb3BlbnNoaWZ0MScwJQYDVQQDDB5hZG1pbi1rdWJlY29u\nZmlnLXNpZ25lci1jdXN0b20wHhcNMjUxMjA4MTc0NTA5WhcNMzUxMjA2MTc0NTA5\nWjA9MRIwEAYDVQQLDAlvcGVuc2hpZnQxJzAlBgNVBAMMHmFkbWluLWt1YmVjb25m\naWctc2lnbmVyLWN1c3RvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB\nANpMZjkbItUcAdd+J4eSy87mk6EfZSZD8WMH+LJpdHgeZFUqJZfH8qVDAn3QyEkE\nPn1W+6zV/EUmgAx76hvaAfYq9U7ic5RYK8jJt2j6Tb0SMG+/kvaEohwCnX5GDSek\nzzSRKc6aHZwjkR3d4QpY8BOzMx8lBIIl/px2xsw3QGtihaeBbnYa7CcbWznR/V0b\nfJ/o/oMd5okhZtJZkc0w6o4codNaSIFu1MbPPBCK6OwVfoD43uq+y/Wcinv3M1sw\nKKFaW9gaMFAkStevvcQcFFSSRej8CuZK+o2H+2OxTVi19P4WmIDn9A22MPrlIGno\nOcQPfFayfIczLMiUNe6bjueCMkVEIfTszMKUALNlzHPQ1W15CC3Bqg4xqnRL9JpL\nE1DBQwhuq4lvAxFItsJhQCagWlHgyinbVZHOB/QS+RZ4Vo2DcIkTcXRxZ7KUz/mj\nitF8kCdDz6aUiPeDNGm2M4fKBdWqrgHLUqfATGq3Qh545HpZ6QqYffvLLNLuKxM0\nim+qD5wCgoJPROitdK5plsPfe/C4zjoYc7oFKlXM389DNj0KxwRvMUE6kZoptjUo\nd676JxYQF3XrZnIpZ+PlIqXt2R+ahpuz0BvBMAlwrqEhDP9CsCHx8sRXNrw3OSSp\n9LZ5CRFampF4RoHWikdd8uybWY05f7Eis/o2gEPJrCUnAgMBAAGjUzBRMB0GA1Ud\nDgQWBBScadKZBJR7Ydm+yfS6UsTlrSxg1DAfBgNVHSMEGDAWgBScadKZBJR7Ydm+\nyfS6UsTlrSxg1DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBp\nOh3vzzZhIkTOOVScU0gxIIl6mpSRScsJkK3TfSdvQWCrNlYJmPcO1aD/YAJv6fmT\nfih+NkBD00qrrkzcdOsaDJXsT2oaB4h8VJQrTDcal3V4F3jWS8j0bn8QukgwWtIq\nevP2sTS8oIzx59k/e2EhjdCgDjnTEBDajXfn9UXRjh+3ZHqmFtZYdz/uZWmBxeLK\n6Kqi0GtFwP6dfylQzg1IXB0C4D2xqVoHEimKIrBQyak8RmDKzleRxvIOUSj3o5DM\nVEyajcJQ6XaD+IwMGh1/DVLxN4uTMbMZDwv+gWl3TvK++f+TSSTMhy+92A2WecDO\nPNLD7xiX5wc6ge5Dh9AzzoOW3tP1iiB9Y0iCmxuj4SUhR0hfgQlRY6sxF40E8xWO\nNNQYbDo+rEwE7frnykHMfqclzJ/a8ax3+lzfM4CvYOmj97909M+2pc0d8Dnbkg75\nncxbob8nQ2UTmQ4nu3qFCZ+5ssDtQaDBXCzSbrSUiFpYtZ1vDZMXcBcoPtri29Ih\ndhUSPKLUHmHzvcEK1n8PPRcfKHjES8s0ankZfnKkcU11Yjhx8eeKUT+s2Iq+Tl6e\ndHKDccvC42BF9X0NarLfvMJcrQu1mPjBYs6WX9a2v9uvg0DG4OAZnu6oQ+gw25LT\n31bGdVPDcEEpNdcMmlY/LsOs00DNYQmW8rdLjfhjrQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIIHuh6I9HTH+QwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM1WhcNMjYx\nMDI4MDgxNzM2WjAmMSQwIgYDVQQDDBtrdWJlLWNzci1zaWduZXJfQDE3NjIwNzE0\nNTUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb91UVWMxGg+Q7+o+5\ntt2z/Oq+7Mc1SNGJ4oF0Y99WM1Y7tIo5NOKWiIXE16rGuzY4+D9O53fh14ngzJm3\nWh3GjCGvr8/2W7J1BblXwuKwDuRl+OfJvyPtETUeME42Y9V6XP/B60iaaEYhm5t7\nTDf7TmmjEpqeWix43KqHnpcW9Zr8tM+tHLrGwcHnb+z3LvGkQA7mbXsaHiuryCVx\nudxQYNKWgtAkw3OOtuVyJ2gGD7iYVni1jg7nc9ZhQOYBoYRbAw3zh36CY50dZA89\nhDKYsVVourd9xfAdwSmrcgtsVo1X0ucCpEEUYKEz3/udgk+Dgf2hy3flIMcg9kcI\nS8xzAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0G\nA1UdDgQWBBSGMLijow+n1t5asMgXimhNhoMUvDAfBgNVHSMEGDAWgBQwDwrhoAwS\n6q2GA5CnoIQZVVim5jANBgkqhkiG9w0BAQsFAAOCAQEAhDT7ncsDiLG3QyVtffPp\nTjWscZowuHbVwUjwpZt5OxQFonSJxNCnqrj4MlIqqxraH5nnsrW5FqWyWWeMmXpz\nbFkiVhPFCVGjmRN9V1LXjHDc4lufe3ixq+MvgtU1YL/WJBmUxxw5dPifTT15IApJ\n6stLJ0QtHBNeEIIroUFpDB+O7OJYZ85ed6o6gT4n/v9nxBaLsZNpO2TzaWfI0Bst\nFEoPsfPKgBvwg9+2GijlP/VyKmP2gFdFm25PWeROU1VZzPrEhaOliO+/YXHt32YU\nJkxTF/smrLzxRRbb507cuvWEilzud93YbHmAhAj1h0CpDdzjxYDr5zRYJSP7BV+6\nUA==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDiTCCAnGgAwIBAgIIOsA0z3vOTSgwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM0WhcNMjcx\nMDIzMDgxNzM1WjBSMVAwTgYDVQQDDEdvcGVuc2hpZnQta3ViZS1jb250cm9sbGVy\nLW1hbmFnZXItb3BlcmF0b3JfY3NyLXNpZ25lci1zaWduZXJAMTc2MjA3MTQ1NTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPY8aATxrjWpRKfnFU7BgmD1\nkNEst9RVY3fzGsARc5tF3x+zhl9BbsoJ9NAncCj+KnkAIYk4pJMU6BHIWIakpvqV\nLsnlPM45VU8ocwOWb5Z7g88YdqHGRWeZqZt/rPXmH/846iVGDstB0YQWgKKKK97X\nvjKMsq9ALSVj8gRWai7B7MVP/bZ4FgeqsYq6zIH9XKdPO8ev20qffrob4nmLGHdJ\nikAliwIy6nkVYrATKOS8t56votMD7xuFQ8rM6uQ0YVejA5rE/Tmq4/NsFpfFfkCP\nMU0vO2XwqCBV81XKD00SmW9MXIxbTq5lU9YjjE1sDFREtN4uZL4nEDa2+wnvIxEC\nAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFDAPCuGgDBLqrYYDkKeghBlVWKbmMB8GA1UdIwQYMBaAFDAPCuGgDBLqrYYD\nkKeghBlVWKbmMA0GCSqGSIb3DQEBCwUAA4IBAQDS31/o3/4/9DBLFVcLvIwOQCK1\n0ZwJUa/8iK5GaMlMH6cNfvVZBrb4SwTVx0WXI5wrIXrlvYc+PtXL0MJeyIJmMpoU\nRyQAJZsh8cckeQjghV2Pf7wMfEbHudKTp8uoQDUBntkfNhJa4pPxmNWuhOrlvdB5\nEF/6IGviKAdSy0jcNpscvD3W0oSpCYRW0Ki/25LaFvIqP2Xy/cNJlWhzJWqZbK6k\nR9I4knhvIv/JYmppOVXw1rEvP+8Pn8UF2oSfFXcN5W+j4YIIhrAUnjAbaflpyX8k\nAUEKdtgVNNe7RlW9nQrhO8GqFJItitBbNtVuSkw9XIlQ0gc40E7mgB7Mjnbu\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDQjCCAiqgAwIBAgIICN23rtJ7PrYwDQYJKoZIhvcNAQELBQAwPzESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSkwJwYDVQQDEyBrdWJlLWFwaXNlcnZlci10by1rdWJlbGV0\nLXNpZ25lcjAeFw0yNTExMDIwNzM0MTFaFw0yNjExMDIwNzM0MTFaMD8xEjAQBgNV\nBAsTCW9wZW5zaGlmdDEpMCcGA1UEAxMga3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxl\ndC1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzRlKEKNEy\nCyd+PPBkdzQZd3BeUb0b2qi1h8fbUiSNENrOpafKxxAHcr3a4KWB6sKhi28r14mF\nxcJ2Yb92/jLpkS15p629AUrGXxKuL8QtkBsY3dH0CqKMBedO6oxodva9Avc+3DMI\nvvYBJFy+4on/0JbM54fduvDmcEmhBtgRItK3Z87VbhemVPj7uDi9EV381uRMlmq4\ncgtD5mfS1yeRu0ut5IIr7/PN1G+93slLGQkHveqWlsFrDYd8Qm5PqirRBYy+18RC\nmEuNirFX3yPrEGwMvRlJyFia0RKuK69bFL2vduI5Wu7h/6VKP0/vEpEYqI6bJYoV\nbUjA2vqrV/1VAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBRNj9dsSIhfwKZ491Q/XZYt2lRWLzANBgkqhkiG9w0BAQsF\nAAOCAQEANmt7a5N+zch83D5+YeC4VQoFvpkHPKEYYT0EM/ykjch3453RmQWQEwkj\nVvstV1U16YpnEI61l1s347RHi64SwtlwV6tiNCpopDF2u3Bb+eiJqrlIC69EFnZE\n1426AVmZZq3sWu3eKB0HgT5u6B1rErSTl3c4hK4SiDsWWlVktBSN0BS4cD+urSAF\nc673/wLKCjq2I+9i3Wv2K7Ton3w5oaETE7lgQyImbKOVhJhFrPGu9fKXaeWlyXGY\nj7tz68vNTvecRynKrmzUJ9BBMfAXTrCowitzjBjanFitgXK4DnQMkb+8lv2Txb/n\nkB7RzcFDyIVd3g5XWBujR3fkQFWsNQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDlTCCAn2gAwIBAgIIV5i/4m8WRp0wDQYJKoZIhvcNAQELBQAwWDFWMFQGA1UE\nAwxNb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtYXBpc2Vy\ndmVyLXRvLWt1YmVsZXQtc2lnbmVyQDE3NjUyMTU4NjMwHhcNMjUxMjA4MTc0NDIz\nWhcNMjYxMjA4MTc0NDI0WjBYMVYwVAYDVQQDDE1vcGVuc2hpZnQta3ViZS1hcGlz\nZXJ2ZXItb3BlcmF0b3Jfa3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxldC1zaWduZXJA\nMTc2NTIxNTg2MzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMQk29Lc\nviCBF+ZN194ChQgHyYo1iN6wW+jZDEEIQpkmIfgrPnEOPL8+9d3SN92BqqYGdwnp\n5TdyDJBFBjrM8iKKvrq6x+EcyQJU6/Q+41bpPSLsziclImlDUUE29OYj6poxfNi1\nQBeFL1q4j9/ks+AfMnpjEbiGjxjJ8cV8++3NERSB1jJLft1rYcnQvgBuE64jqipO\nbNczVjMjcq0g+H+qpZknHlFueBqi5F/Nj/hC7QZbS96VThCxM123zqORBAfU5Fj0\ndMk3XqYTM1mpfyQHihtlyG3vsPXI/CBZgno6CI+KuXZJ46IjNNmiImyVJNKe7tXS\niWxbKKtEZHAvMcsCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFKzukDY3Odtmp3C+ncqem+63g2NuMB8GA1UdIwQYMBaA\nFKzukDY3Odtmp3C+ncqem+63g2NuMA0GCSqGSIb3DQEBCwUAA4IBAQBC2NLbh36L\n05mNq0+V4avx/2/xXvih+RtebPhiF8w8WG7WWRiIlK/yn8+iToFX/07+HWbBSK3g\nu5Yqac0eh8iKLkG+eIFiXpZR4B4Ha3ZRoU4N6dBMohIChZNugHGtjhfFjDpjFY8N\n9jMoZmTtjtK7RW2tu1qRyJcNSk8ou6nYNo/fB9PHWP5E12cWdg2ZQyESq+zE2dFo\n/dNjvb2y+GneObWzG9nclr6L7f6jI4LSOujO9ZA28xW4lf2EmosQ2HOeun48vA3O\n0C9lO1/SqcPkA6TtMHsoXZDSRv+mH62ugEZkDn8lgOizTm3l+jU9UA4RSvRD7ghR\nuXScj56hVynp\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDNDCCAhygAwIBAgIILvKlXd2YBKIwDQYJKoZIhvcNAQELBQAwODESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVy\nMB4XDTI1MTEwMjA3MzQxMFoXDTI2MTEwMjA3MzQxMFowODESMBAGA1UECxMJb3Bl\nbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOnFEdxnqqE7l+5NxN82TBBycC2F\n5cUboCcUVr211lubqmjCusnJzzz+6rKpO8uFqqGeu4C7rGpudOaK52IZhG0WP8b7\n0raQhnM4DV8t2SHDV4GhFUiuNE+b4FJrZ6jiljQo2g9ZeeCZgdmmBrIFHBXDFzEc\nA1RPScqOtvBbbH064Zd267gOmVPJnWmxDXo6X/RGYCm1YUS6FQ2WWpl707ComvgZ\nAvWGSA4H1sZirMQ+ug3bctkLb+SiXUzf+tLnGIHPeqDNfMrNUhxBl6dDhlMbUIzY\nrVxgDD8y3i7eg5i5HG8yntl8epgs2gn47wfavfjLqATBlciJPZY6Qv3BFQIDAQAB\no0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nWi6wmZ50AVdwxaiO3WjTvp82+sEwDQYJKoZIhvcNAQELBQADggEBAA4siMWwOGL5\nCiCxbiJsuKMbCLlFR6JaD43FWnUCZDoe7np7W76Oh4py4Zht7XlZKotcXrrfRYVO\ndVht66PCbSujy375p/B6c3isG4h/1cNSGDm1uhAkHXGZ88S2wSjKT5YJ/HUAkvyj\nadQgZeO7Q60YBSDE/67Ldq1zqvBrMF2k8pF49p1AdAtf4OSDzIaGGPUQJ 2025-12-08T17:45:16.998668145+00:00 stderr F TFExA0E\n03xMlOPNhYZ8MgFT2XE6nRT74lCAfK9krAsZLtFuAtp/14t013PD0FqTTQRUmuSj\nO6pJKDTH8tZ3ieXxSzRR+j4p5hkHaehgQVyUbwiw8WVXkd6NcWR6yQcSeqIsTSCD\neMDdTmmKyuo=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhzCCAm+gAwIBAgIIcuJfJWKJ/NEwDQYJKoZIhvcNAQELBQAwUTFPME0GA1UE\nAwxGb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtY29udHJv\nbC1wbGFuZS1zaWduZXJAMTc2NTIxNTg2MzAeFw0yNTEyMDgxNzQ0MjNaFw0yNjAy\nMDYxNzQ0MjRaMFExTzBNBgNVBAMMRm9wZW5zaGlmdC1rdWJlLWFwaXNlcnZlci1v\ncGVyYXRvcl9rdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyQDE3NjUyMTU4NjMwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC86U3wU8BYhyUyqYM3Vuy/Kvfu\nQlUYxAqiVxA50CLa488sawRtfVN03+NKfPtzoj6xg5nYxR0a/+IP95n2YltFsU5k\nyumfMqcMWP1gZeUuqq0tHgy/GYvD4uF2IWLRMYMdYrsbJlOPWRCnRfWtXN7LJHAY\nBQwKW01c7MOm8AMOT5sGCw7z1GwROdLkjebZSAWeWP+uho5ubO7R9yFVrMJGzBum\nXUceaUrjiVyDCVdMBMttbZtjYYwW1NqDl4P4CgtW+CRONRTW8FNDdldzjm2fo/HL\n/frz934yfHA6c6xDWRI4+BEKJpecqxBUoC6xeGNdPd3KFmqPFRp7N/oERpH5AgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRrr6Rca+ABepsTcpAiBlwqNlkzNzAfBgNVHSMEGDAWgBRrr6Rca+ABepsTcpAi\nBlwqNlkzNzANBgkqhkiG9w0BAQsFAAOCAQEArD1l55HNxEi+lDb8LV+9Zzmb+gxB\nDq27GP6pZD+v8cHdoet3SgTFXeYKrd/Aw34+ZJceKPQrhoLtGkl+UW9T50ymZmVx\nENwuX+8e/OxAYAcKZdAwlCmPBV2A+puager7UZ6cE35W22ZqqijJ3J+nB7BmCtQ7\nqooWmH+OcHkw9Eoa8BbWCAH8nItf7bglCui0yQb4MCbrGMCHOVKwInTpI2biAdb6\nvQwXe1ofL4bVZt0eiPk2tuhljglLjV23q/aaFqTXC7T6UIKtb0olqNjGO10Aasew\ntAxUmbhL/uOz2X2JztYbjYPfVWbeUefTtX8tXV8oqflB6auskk/m2wMUbw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDSDCCAjCgAwIBAgIIV//3Qv2OxM4wDQYJKoZIhvcNAQELBQAwQjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSwwKgYDVQQDEyNrdWJlbGV0LWJvb3RzdHJhcC1rdWJlY29u\nZmlnLXNpZ25lcjAeFw0yNTExMDIwNzM0MDhaFw0zNTEwMzEwNzM0MDhaMEIxEjAQ\nBgNVBAsTCW9wZW5zaGlmdDEsMCoGA1UEAxMja3ViZWxldC1ib290c3RyYXAta3Vi\nZWNvbmZpZy1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc\nZBn9WZ5wLPck6+g3f4p0NBGyE6LaXnWavXx9m0sVdTVQooknndmKufeYkXGZ5Lb+\nfbMAp/6swgSJ1DjdBj06rCqJEZfdZl3uZoD/Th4ha2Phl12bXaNYLiuOu5BOZ3UW\n08y1Wab9Y9zc0o4Z71pHH4o9TH3QPNT6BqAz4kkgD6t1r/R7E7lrZbx+7e+0JBAW\nRgufaFOX1AYU5B4+pSM21eJY7oP1P9I4DMeeJW39opCCHAuUQgHpOV1YPtRqEPJ4\n9matas8qm5qIMIPbGEGFckSJqgny9YCfHaLezJtZMIHJgz5LW4H91gQCGvfSbLtH\nYxYO/PcTXQCnDYNwqf29AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBSCg7Y8QkzypoNAqYVPNK5YWMVIXzANBgkqhkiG\n9w0BAQsFAAOCAQEAsP1NR5ZgC7F5FvoU2CXla4FufdeYTk4bjjNoFJMqnvI5gcY6\nJDxP1Rl2YBHCpRxngtBFxw3Xoe8UnBKzZWsS5wLUYRloprhGVBSLM0vqJJOvP7M0\njt3SLuB7h0dG2GO9yQ4y10+xVWxP5Os9wcbQcRgTQKL3gHmCq4aQN1cqJSxyJ/ut\nlbfYlM/xBcfLMY5Leeas6y2FPCFIEONh1U9FJZlF3YkhPp+XD7aePtC4tJqsokMc\nP80IwPn54aDT9akRPsOteB6C+xSAz2TlfWaJ/l/x9yXK+HJrRhMartqyN511SeEd\nDpNcMW9qPTjJzBj+N3f0ZfvbTmhVSvV65ZEtAw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhTCCAm2gAwIBAgIIfksp5LDEMMgwDQYJKoZIhvcNAQELBQAwUDFOMEwGA1UE\nAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX25vZGUtc3lzdGVt\nLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MB4XDTI1MTEwMjA3NTEyN1oXDTI4MTEw\nMTA3NTEyOFowUDFOMEwGA1UEAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9w\nZXJhdG9yX25vZGUtc3lzdGVtLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA402A+B9GaYmzBVLJEyrGvytCY0Vb\nvvIKHW2kxl9IFN3+LNZHW/mKeJfz/hBTm2bs+6uRCDlMSyONDlVUWVsuE+q0+F42\n0n3VyxWRSrDZ2ur5oNxmoBSsHRM+PxccQ6X3JTZyO397LHNOzxAs/Es+St8A8sbY\nGLc1lNqeOLvwAOT5d2PrFlYCAfXYs/UVIaio846jidKKN1f8Z6W5pgdAHuTXbyBQ\nLDQh6s43TBPhww1KszmcwURjEBDCT6KlhsM/quMd9XlMU0ZEAMf6XxsqvW8ia8C8\nF+RNAaGkwmiS4qZ+hJ4KIUnWM84j+bsyNBqlHFKi1e7LsKRyjnQ288FqIQIDAQAB\no2MwYTAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\n9O+dX0K7dzD13bshuR70sHbUAeAwHwYDVR0jBBgwFoAU9O+dX0K7dzD13bshuR70\nsHbUAeAwDQYJKoZIhvcNAQELBQADggEBADPRGSn1U/YwBUkpU7vTzsxqLaVJW21o\n6hV/W2IjjGNGqp6c2kSH/3ZGSEjNwIJqKRFpC2gmTPgAqnC4nDosOHx5F5HXTmrU\n1l2Ivcm1Ep+t/zBgNHjBi3yommx8n2iTTdakpQaq7/u1s0I4UiRqXydjoGXp7H1C\naAsmRlK8ovgEAWzItjeMBzy65wqiStPBK+XAIddqznHCxrRyH5xk3HcnyMG4GDWl\nrogdK8yTGCuZVCvGfe9Hwm8tyYrxDRNvRLTc0ssTonAwnR/7IzaVVc9Pp0svCynJ\n6VX3FGhgWwDVWeajj8yrXeR42az/Rr1TAAOZtJMW+4hIkaU0/+msvgw=\n-----END CERTIFICATE-----\n"},"metadata":{"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ca-bundle.crt":{}},"f:metadata":{"f:annotations":{".":{},"f:openshift.io/owning-component":{}}}},"manager":"cluster-kube-apiserver-operator","operation":"Update","time":"2025-12-08T17:45:16Z"}],"resourceVersion":null,"uid":"e7bc0b2c-2af6-488e-bf6f-25875798350a"}} 2025-12-08T17:45:17.000910338+00:00 stderr F I1208 17:45:17.000857 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/kube-apiserver-client-ca -n openshift-config-managed: 2025-12-08T17:45:17.000910338+00:00 stderr F cause by changes in data.ca-bundle.crt 2025-12-08T17:45:17.587136149+00:00 stderr F I1208 17:45:17.587076 1 request.go:752] "Waited before sending request" delay="1.388527825s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/secrets/node-kubeconfigs" 2025-12-08T17:45:18.390322398+00:00 stderr F I1208 17:45:18.390257 1 installer_controller.go:550] "crc" is in transition to 12, but has not made progress because installer is not finished, but in Running phase 2025-12-08T17:45:20.190933051+00:00 stderr F I1208 17:45:20.189474 1 installer_controller.go:550] "crc" is in transition to 12, but has not made progress because installer is not finished, but in Running phase 2025-12-08T17:45:50.268311912+00:00 stderr F I1208 17:45:50.267755 1 termination_observer.go:236] Observed event "TerminationPreShutdownHooksFinished" for API server pod "kube-apiserver-crc" (last termination at 2025-12-08 17:44:01 +0000 UTC) at 2025-12-08 17:45:50 +0000 UTC 2025-12-08T17:45:52.300413968+00:00 stderr F I1208 17:45:52.299722 1 termination_observer.go:236] Observed event "TerminationGracefulTerminationFinished" for API server pod "kube-apiserver-crc" (last termination at 2025-12-08 17:44:01 +0000 UTC) at 2025-12-08 17:45:52 +0000 UTC 2025-12-08T17:45:55.279450766+00:00 stderr F E1208 17:45:55.278948 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.279521828+00:00 stderr F E1208 17:45:55.278954 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.284010113+00:00 stderr F E1208 17:45:55.283953 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.287138547+00:00 stderr F E1208 17:45:55.287082 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.287138547+00:00 stderr F E1208 17:45:55.287127 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.289388965+00:00 stderr F E1208 17:45:55.289331 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.291411876+00:00 stderr F E1208 17:45:55.291312 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.300032864+00:00 stderr F E1208 17:45:55.299974 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.300080106+00:00 stderr F E1208 17:45:55.300038 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.302001343+00:00 stderr F E1208 17:45:55.301937 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.304292542+00:00 stderr F E1208 17:45:55.304251 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.320734765+00:00 stderr F E1208 17:45:55.320692 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.322341974+00:00 stderr F E1208 17:45:55.322308 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.327685335+00:00 stderr F E1208 17:45:55.327646 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.370602572+00:00 stderr F E1208 17:45:55.370539 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.454922483+00:00 stderr F E1208 17:45:55.454545 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.478780409+00:00 stderr F E1208 17:45:55.478720 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.617828403+00:00 stderr F E1208 17:45:55.617771 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.682347959+00:00 stderr F E1208 17:45:55.682276 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.878281951+00:00 stderr F E1208 17:45:55.878216 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:55.942063765+00:00 stderr F E1208 17:45:55.942001 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.078984735+00:00 stderr F E1208 17:45:56.078947 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.282652838+00:00 stderr F E1208 17:45:56.282590 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.478567758+00:00 stderr F E1208 17:45:56.478512 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.585297592+00:00 stderr F E1208 17:45:56.585223 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.678785358+00:00 stderr F E1208 17:45:56.678711 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:56.882293896+00:00 stderr F E1208 17:45:56.882230 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.078983541+00:00 stderr F E1208 17:45:57.078528 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.279869610+00:00 stderr F E1208 17:45:57.279806 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.482436261+00:00 stderr F E1208 17:45:57.482369 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.679470145+00:00 stderr F E1208 17:45:57.679410 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.869928892+00:00 stderr F E1208 17:45:57.869801 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:57.878616612+00:00 stderr F E1208 17:45:57.878576 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.085146562+00:00 stderr F E1208 17:45:58.085050 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.341605549+00:00 stderr F E1208 17:45:58.341557 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.521440668+00:00 stderr F E1208 17:45:58.521390 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:58.732946606+00:00 stderr F E1208 17:45:58.732275 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:59.624728924+00:00 stderr F E1208 17:45:59.624644 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:45:59.804464409+00:00 stderr F E1208 17:45:59.804338 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:00.021363709+00:00 stderr F E1208 17:46:00.021273 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:00.433638824+00:00 stderr F E1208 17:46:00.433567 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:02.187687302+00:00 stderr F E1208 17:46:02.187221 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:02.367330164+00:00 stderr F E1208 17:46:02.366919 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:02.588428990+00:00 stderr F E1208 17:46:02.588311 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.557199529+00:00 stderr F E1208 17:46:05.556735 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:07.311707092+00:00 stderr F E1208 17:46:07.311412 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:07.490100467+00:00 stderr F E1208 17:46:07.490000 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:07.716623786+00:00 stderr F E1208 17:46:07.716243 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:15.802010974+00:00 stderr F E1208 17:46:15.801256 1 base_controller.go:279] "Unhandled Error" err="Installer reconciliation failed: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:17.555748773+00:00 stderr F E1208 17:46:17.555642 1 termination_observer.go:175] "Unhandled Error" err="key failed with : unable to list pods in \"openshift-kube-apiserver\" namespace: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:17.733865619+00:00 stderr F E1208 17:46:17.733775 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:17.962137220+00:00 stderr F E1208 17:46:17.962079 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:20.840088554+00:00 stderr F E1208 17:46:20.839287 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-apiserver-operator/leases/kube-apiserver-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:20.841222208+00:00 stderr F E1208 17:46:20.841155 1 leaderelection.go:436] error retrieving resource lock openshift-kube-apiserver-operator/kube-apiserver-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-apiserver-operator/leases/kube-apiserver-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.207022914+00:00 stderr F E1208 17:46:22.206972 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.222063825+00:00 stderr F E1208 17:46:22.221997 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.253155538+00:00 stderr F E1208 17:46:22.253097 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.295236631+00:00 stderr F E1208 17:46:22.295180 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.344631694+00:00 stderr F E1208 17:46:22.344584 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.401522341+00:00 stderr F E1208 17:46:22.401431 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.406092749+00:00 stderr F E1208 17:46:22.406045 1 base_controller.go:279] "Unhandled Error" err="kube-apiserver-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.435964225+00:00 stderr F E1208 17:46:22.435852 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.605092312+00:00 stderr F E1208 17:46:22.605027 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.709653400+00:00 stderr F E1208 17:46:22.709600 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.721658900+00:00 stderr F E1208 17:46:22.721585 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.741581878+00:00 stderr F E1208 17:46:22.741518 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.801592760+00:00 stderr F E1208 17:46:22.801530 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.203091091+00:00 stderr F E1208 17:46:23.203007 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.405378093+00:00 stderr F E1208 17:46:23.405317 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.002512596+00:00 stderr F E1208 17:46:24.001809 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.217756107+00:00 stderr F E1208 17:46:24.217608 1 base_controller.go:279] "Unhandled Error" err="KubeAPIServerStaticResources-StaticResources reconciliation failed: [\"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/services/apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole-node-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints-node-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole-crd-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints-crd-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-auth-delegator.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-auth-delegator\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-node-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-node-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-crd-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-crd-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-rolebinding-kube-system.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:controller:kube-apiserver-check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-apiserver/rolebindings/system:openshift:controller:check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/delegated-incluster-authentication-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/authentication-reader-for-authenticated-users\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/localhost-recovery-client-crb.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-apiserver-recovery\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/localhost-recovery-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/localhost-recovery-client\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/apiserver.openshift.io_apirequestcount.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/apirequestcounts.apiserver.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-flowschema.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-flowschema-storage-version-migration\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-prioritylevelconfiguration.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-prioritylevel-storage-version-migration\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-flowschema-v1beta3.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-flowschema-storage-version-migration-v1beta3\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-prioritylevelconfiguration-v1beta3.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-prioritylevel-storage-version-migration-v1beta3\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/api-usage.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/api-usage\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/audit-errors.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/audit-errors\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-requests.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-requests\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-slos-basic.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-slos-basic\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/podsecurity-violations.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/podsecurity\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-slos-extended.yaml\" (string): Delete \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-slos-extended\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeAPIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=KubeAPIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.603368212+00:00 stderr F E1208 17:46:24.603276 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.800656053+00:00 stderr F E1208 17:46:24.800571 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.601015776+00:00 stderr F E1208 17:46:25.600946 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.800259097+00:00 stderr F E1208 17:46:25.800188 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.204657486+00:00 stderr F E1208 17:46:26.204584 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.431002239+00:00 stderr F E1208 17:46:26.430939 1 base_controller.go:279] "Unhandled Error" err="KubeAPIServerStaticResources-StaticResources reconciliation failed: [\"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/services/apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole-node-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints-node-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole-crd-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints-crd-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-auth-delegator.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-auth-delegator\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-node-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-node-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-crd-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-crd-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-rolebinding-kube-system.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:controller:kube-apiserver-check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-apiserver/rolebindings/system:openshift:controller:check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/delegated-incluster-authentication-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/authentication-reader-for-authenticated-users\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/localhost-recovery-client-crb.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-apiserver-recovery\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/localhost-recovery-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/localhost-recovery-client\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/apiserver.openshift.io_apirequestcount.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/apirequestcounts.apiserver.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-flowschema.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-flowschema-storage-version-migration\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-prioritylevelconfiguration.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-prioritylevel-storage-version-migration\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-flowschema-v1beta3.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-flowschema-storage-version-migration-v1beta3\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-prioritylevelconfiguration-v1beta3.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-prioritylevel-storage-version-migration-v1beta3\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/api-usage.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/api-usage\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/audit-errors.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/audit-errors\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-requests.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-requests\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-slos-basic.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-slos-basic\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/podsecurity-violations.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/podsecurity\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-slos-extended.yaml\" (string): Delete \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-slos-extended\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeAPIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=KubeAPIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.800981994+00:00 stderr F E1208 17:46:26.800923 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.599783851+00:00 stderr F E1208 17:46:27.599721 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.400369091+00:00 stderr F E1208 17:46:28.400280 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=kube-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.599422746+00:00 stderr F E1208 17:46:28.599078 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.816602545+00:00 stderr F E1208 17:46:28.816195 1 base_controller.go:279] "Unhandled Error" err="KubeAPIServerStaticResources-StaticResources reconciliation failed: [\"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/services/apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole-node-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints-node-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrole-crd-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:controller:check-endpoints-crd-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-auth-delegator.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-auth-delegator\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-node-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-node-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-clusterrolebinding-crd-reader.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:controller:kube-apiserver-check-endpoints-crd-reader\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-rolebinding-kube-system.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:controller:kube-apiserver-check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/check-endpoints-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-apiserver/rolebindings/system:openshift:controller:check-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/delegated-incluster-authentication-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/authentication-reader-for-authenticated-users\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/localhost-recovery-client-crb.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-apiserver-recovery\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/localhost-recovery-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/localhost-recovery-client\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/apiserver.openshift.io_apirequestcount.yaml\" (string): Get \"https://10.217.4.1:443/apis/apiextensions.k8s.io/v1/customresourcedefinitions/apirequestcounts.apiserver.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-flowschema.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-flowschema-storage-version-migration\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-prioritylevelconfiguration.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-prioritylevel-storage-version-migration\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-flowschema-v1beta3.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-flowschema-storage-version-migration-v1beta3\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-apiserver/storage-version-migration-prioritylevelconfiguration-v1beta3.yaml\" (string): Get \"https://10.217.4.1:443/apis/migration.k8s.io/v1alpha1/storageversionmigrations/flowcontrol-prioritylevel-storage-version-migration-v1beta3\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/api-usage.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/api-usage\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/audit-errors.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/audit-errors\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-requests.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-requests\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-slos-basic.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-slos-basic\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/podsecurity-violations.yaml\" (string): Get \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/podsecurity\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/alerts/kube-apiserver-slos-extended.yaml\" (string): Delete \"https://10.217.4.1:443/apis/monitoring.coreos.com/v1/namespaces/openshift-kube-apiserver/prometheusrules/kube-apiserver-slos-extended\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeAPIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=KubeAPIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:29.201635371+00:00 stderr F E1208 17:46:29.201576 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-apiserver-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeapiservers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:30.683039537+00:00 stderr F E1208 17:46:30.682377 1 base_controller.go:279] "Unhandled Error" err="KubeAPIServerStaticResources-StaticResources reconciliation failed: \"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:32.928242799+00:00 stderr F I1208 17:46:32.928157 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:35.207517882+00:00 stderr F I1208 17:46:35.207441 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:38.041066154+00:00 stderr F I1208 17:46:38.040392 1 termination_observer.go:130] Observed termination of API server pod "kube-apiserver-crc" at 2025-12-08 17:46:10 +0000 UTC 2025-12-08T17:46:59.633397015+00:00 stderr F I1208 17:46:59.632665 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:59.888508915+00:00 stderr F I1208 17:46:59.888196 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:00.810901252+00:00 stderr F I1208 17:47:00.810449 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:01.599908119+00:00 stderr F I1208 17:47:01.599757 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:01.976719521+00:00 stderr F I1208 17:47:01.976296 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:02.678698078+00:00 stderr F I1208 17:47:02.678599 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.821355558+00:00 stderr F I1208 17:47:03.820382 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.997309337+00:00 stderr F I1208 17:47:03.997236 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:05.212269003+00:00 stderr F I1208 17:47:05.212140 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:08.761573492+00:00 stderr F I1208 17:47:08.761492 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:09.628027537+00:00 stderr F I1208 17:47:09.627704 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.041499844+00:00 stderr F I1208 17:47:10.041169 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.610107963+00:00 stderr F I1208 17:47:10.610046 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:10.610227277+00:00 stderr F I1208 17:47:10.610198 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.611654341+00:00 stderr F I1208 17:47:10.611519 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:11.017193088+00:00 stderr F I1208 17:47:11.017029 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:11.809918202+00:00 stderr F I1208 17:47:11.809769 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.435174773+00:00 stderr F I1208 17:47:13.435091 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.810334563+00:00 stderr F I1208 17:47:13.810270 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.010920678+00:00 stderr F I1208 17:47:14.010848 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.231320225+00:00 stderr F I1208 17:47:14.231241 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.231453439+00:00 stderr F I1208 17:47:14.231409 1 externalloadbalancer.go:27] syncing external loadbalancer hostnames: api.crc.testing 2025-12-08T17:47:14.611907306+00:00 stderr F I1208 17:47:14.611766 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.633343301+00:00 stderr F W1208 17:47:14.633276 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:15.097515133+00:00 stderr F I1208 17:47:15.097447 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:15.637114208+00:00 stderr F W1208 17:47:15.636760 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:16.913018193+00:00 stderr F I1208 17:47:16.912623 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.096555530+00:00 stderr F I1208 17:47:17.096430 1 reflector.go:430] "Caches populated" type="*v1.KubeAPIServer" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.641124403+00:00 stderr F E1208 17:47:17.641053 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:17.700467241+00:00 stderr F I1208 17:47:17.700383 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:17.723556788+00:00 stderr F E1208 17:47:17.723481 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:17.742562866+00:00 stderr F W1208 17:47:17.742501 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:18.746779138+00:00 stderr F W1208 17:47:18.746685 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:19.982976022+00:00 stderr F I1208 17:47:19.982406 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:20.555820575+00:00 stderr F I1208 17:47:20.555416 1 reflector.go:430] "Caches populated" type="*v1.Authentication" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:20.752163616+00:00 stderr F E1208 17:47:20.752070 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:20.816436159+00:00 stderr F I1208 17:47:20.816339 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:20.840770395+00:00 stderr F E1208 17:47:20.840654 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:20.869187300+00:00 stderr F W1208 17:47:20.869089 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:21.085810999+00:00 stderr F I1208 17:47:21.085712 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:21.085902432+00:00 stderr F I1208 17:47:21.085830 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.087216383+00:00 stderr F I1208 17:47:21.087140 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:21.312436433+00:00 stderr F I1208 17:47:21.312345 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.873940708+00:00 stderr F W1208 17:47:21.873696 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:23.879912435+00:00 stderr F E1208 17:47:23.878311 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:23.945895261+00:00 stderr F I1208 17:47:23.945018 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:23.974902014+00:00 stderr F E1208 17:47:23.974359 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:24.000896523+00:00 stderr F W1208 17:47:24.000223 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:25.003641199+00:00 stderr F W1208 17:47:25.003082 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:27.009028817+00:00 stderr F E1208 17:47:27.007757 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:27.057548464+00:00 stderr F I1208 17:47:27.057476 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:27.079546706+00:00 stderr F E1208 17:47:27.079472 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:27.129529140+00:00 stderr F W1208 17:47:27.129453 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:28.133699130+00:00 stderr F W1208 17:47:28.133242 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:30.137411824+00:00 stderr F E1208 17:47:30.137346 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:30.177027591+00:00 stderr F I1208 17:47:30.176951 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:30.197526007+00:00 stderr F E1208 17:47:30.197495 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:30.286561700+00:00 stderr F W1208 17:47:30.286516 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:31.289823652+00:00 stderr F W1208 17:47:31.289734 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:33.295264812+00:00 stderr F E1208 17:47:33.294413 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:33.342927592+00:00 stderr F I1208 17:47:33.342767 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:33.367852436+00:00 stderr F E1208 17:47:33.367765 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:33.540299535+00:00 stderr F W1208 17:47:33.540207 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:34.544695382+00:00 stderr F W1208 17:47:34.544584 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:35.074338225+00:00 stderr F I1208 17:47:35.073558 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:36.549213552+00:00 stderr F E1208 17:47:36.549130 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:36.602203640+00:00 stderr F I1208 17:47:36.601605 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:36.635064005+00:00 stderr F E1208 17:47:36.634988 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:36.964329089+00:00 stderr F W1208 17:47:36.964270 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:37.160387941+00:00 stderr F I1208 17:47:37.160319 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:37.319291824+00:00 stderr F I1208 17:47:37.319192 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:37.969524212+00:00 stderr F W1208 17:47:37.968967 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:38.438109132+00:00 stderr F I1208 17:47:38.437560 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:39.841017564+00:00 stderr F I1208 17:47:39.840397 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:39.972499503+00:00 stderr F E1208 17:47:39.972425 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:40.021242407+00:00 stderr F I1208 17:47:40.021172 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:40.053556065+00:00 stderr F E1208 17:47:40.053481 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:40.059214153+00:00 stderr F W1208 17:47:40.058636 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:40.912214794+00:00 stderr F I1208 17:47:40.912062 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:41.063562479+00:00 stderr F W1208 17:47:41.063467 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:41.699304051+00:00 stderr F I1208 17:47:41.699205 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:43.067980566+00:00 stderr F E1208 17:47:43.067869 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:43.127642634+00:00 stderr F I1208 17:47:43.127527 1 helpers.go:188] lister was stale at resourceVersion=38622, live get showed resourceVersion=38946 2025-12-08T17:47:43.159509168+00:00 stderr F E1208 17:47:43.159396 1 base_controller.go:279] "Unhandled Error" err="webhookSupportabilityController reconciliation failed: KubeAPIServer.operator.openshift.io \"cluster\" is invalid: status.nodeStatuses[0].currentRevision: Invalid value: \"integer\": must only increase" 2025-12-08T17:47:43.174158259+00:00 stderr F W1208 17:47:43.174062 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:43.333209646+00:00 stderr F I1208 17:47:43.333127 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:43.418102438+00:00 stderr F I1208 17:47:43.418009 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:43.418296324+00:00 stderr F I1208 17:47:43.418206 1 servicehostname.go:46] syncing servicenetwork hostnames: [10.217.4.1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local openshift openshift.default openshift.default.svc openshift.default.svc.cluster.local] 2025-12-08T17:47:44.178646228+00:00 stderr F W1208 17:47:44.178374 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:44.754651470+00:00 stderr F I1208 17:47:44.754576 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:45.597158752+00:00 stderr F I1208 17:47:45.596820 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubeapiservers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:45.604655058+00:00 stderr F I1208 17:47:45.604584 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeAPIServerStaticResourcesDegraded: \"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeAPIServerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:06Z","message":"NodeInstallerProgressing: 1 node is at revision 11; 0 nodes have achieved new revision 12","reason":"NodeInstaller","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11; 0 nodes have achieved new revision 12","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:45.617411339+00:00 stderr F I1208 17:47:45.617318 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeAPIServerStaticResourcesDegraded: \"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeAPIServerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:45:06Z","message":"NodeInstallerProgressing: 1 node is at revision 11; 0 nodes have achieved new revision 12","reason":"NodeInstaller","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11; 0 nodes have achieved new revision 12","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:45.617483571+00:00 stderr F I1208 17:47:45.617460 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready" to "NodeControllerDegraded: All master nodes are ready\nKubeAPIServerStaticResourcesDegraded: \"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeAPIServerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:45.624741430+00:00 stderr F E1208 17:47:45.624669 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-apiserver reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-apiserver\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:45.834967968+00:00 stderr F I1208 17:47:45.831118 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeAPIServerStaticResourcesDegraded: \"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeAPIServerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:47:45Z","message":"NodeInstallerProgressing: 1 node is at revision 12","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 12","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:45.843951290+00:00 stderr F I1208 17:47:45.843834 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Progressing changed from True to False ("NodeInstallerProgressing: 1 node is at revision 12"),Available message changed from "StaticPodsAvailable: 1 nodes are active; 1 node is at revision 11; 0 nodes have achieved new revision 12" to "StaticPodsAvailable: 1 nodes are active; 1 node is at revision 12" 2025-12-08T17:47:45.972572429+00:00 stderr F I1208 17:47:45.972507 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.183140558+00:00 stderr F E1208 17:47:46.183029 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:46.202249189+00:00 stderr F I1208 17:47:46.202174 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.229193837+00:00 stderr F W1208 17:47:46.228998 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:46.470808444+00:00 stderr F I1208 17:47:46.470722 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready\nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:47:45Z","message":"NodeInstallerProgressing: 1 node is at revision 12","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 12","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:46.481236851+00:00 stderr F I1208 17:47:46.481142 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nKubeAPIServerStaticResourcesDegraded: \"assets/kube-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeAPIServerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused" to "NodeControllerDegraded: All master nodes are ready\nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:46.998670980+00:00 stderr F I1208 17:47:46.998572 1 request.go:752] "Waited before sending request" delay="1.162942038s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:47:47.234154772+00:00 stderr F W1208 17:47:47.234048 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:47.249507116+00:00 stderr F I1208 17:47:47.249445 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:48.198790279+00:00 stderr F I1208 17:47:48.198696 1 request.go:752] "Waited before sending request" delay="1.9918058s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods?labelSelector=app%3Dopenshift-kube-apiserver" 2025-12-08T17:47:48.822217594+00:00 stderr F I1208 17:47:48.822117 1 reflector.go:430] "Caches populated" type="*v1alpha1.StorageVersionMigration" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:49.238467927+00:00 stderr F E1208 17:47:49.238402 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:47:49.398797204+00:00 stderr F I1208 17:47:49.398719 1 request.go:752] "Waited before sending request" delay="1.995230228s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/revision-pruner-12-crc" 2025-12-08T17:47:49.516421127+00:00 stderr F I1208 17:47:49.516317 1 reflector.go:430] "Caches populated" type="*v1.Scheduler" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:50.851426841+00:00 stderr F I1208 17:47:50.850947 1 status_controller.go:230] clusteroperator/kube-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:47:45Z","message":"NodeInstallerProgressing: 1 node is at revision 12","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T08:08:37Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 12","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:28Z","message":"KubeletMinorVersionUpgradeable: Kubelet and API server minor versions are synced.","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:29Z","message":"PodSecurityInconclusiveEvaluationConditionsDetected: Could not evaluate violations for namespaces: [kube-node-lease openshift openshift-apiserver-operator openshift-cloud-network-config-controller openshift-cluster-samples-operator openshift-cluster-storage-operator openshift-config-managed openshift-config-operator openshift-console openshift-console-operator openshift-console-user-settings openshift-controller-manager openshift-controller-manager-operator openshift-dns-operator openshift-host-network openshift-ingress-canary openshift-ingress-operator openshift-kube-controller-manager-operator openshift-kube-storage-version-migrator openshift-kube-storage-version-migrator-operator openshift-network-console openshift-network-diagnostics openshift-node openshift-route-controller-manager openshift-service-ca openshift-service-ca-operator openshift-user-workload-monitoring]","reason":"PodSecurityInconclusive_PSViolationDecisionInconclusive","status":"True","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:50.887052612+00:00 stderr F I1208 17:47:50.885242 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-apiserver-operator", Name:"kube-apiserver-operator", UID:"3a8705c5-b62b-40a4-8e43-30f0569fa490", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-apiserver changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nTargetConfigControllerDegraded: \"configmap/config\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/kube-apiserver-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/configmaps/kube-apiserver-pod\": dial tcp 10.217.4.1:443: connect: connection refused" to "NodeControllerDegraded: All master nodes are ready" 2025-12-08T17:47:50.998419028+00:00 stderr F I1208 17:47:50.998328 1 request.go:752] "Waited before sending request" delay="1.161854244s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/kube-system/configmaps?resourceVersion=38689" 2025-12-08T17:47:51.000773613+00:00 stderr F I1208 17:47:51.000710 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:52.198649930+00:00 stderr F I1208 17:47:52.198532 1 request.go:752] "Waited before sending request" delay="1.336534614s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" 2025-12-08T17:47:54.401579876+00:00 stderr F I1208 17:47:54.401495 1 reflector.go:430] "Caches populated" type="*v1.OAuth" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:55.820232948+00:00 stderr F I1208 17:47:55.820126 1 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:59.549170909+00:00 stderr F I1208 17:47:59.548851 1 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Resource=prometheusrules" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:06.477717206+00:00 stderr F I1208 17:48:06.477151 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:09.607453318+00:00 stderr F I1208 17:48:09.606610 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:53:46.089042612+00:00 stderr F I1208 17:53:46.088470 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:54:35.511568799+00:00 stderr F I1208 17:54:35.510960 1 reflector.go:430] "Caches populated" type="*v1.Event" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:54:50.580505539+00:00 stderr F W1208 17:54:50.579832 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:54:51.584540348+00:00 stderr F W1208 17:54:51.584228 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:54:53.588415885+00:00 stderr F E1208 17:54:53.588106 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:10.616984352+00:00 stderr F I1208 17:55:10.615090 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:55:10.671795886+00:00 stderr F W1208 17:55:10.671733 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:11.676943786+00:00 stderr F W1208 17:55:11.676307 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:13.694505440+00:00 stderr F E1208 17:55:13.693417 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:13.743291424+00:00 stderr F W1208 17:55:13.743212 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:14.751044463+00:00 stderr F W1208 17:55:14.750701 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:16.764513918+00:00 stderr F E1208 17:55:16.762813 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:16.810015183+00:00 stderr F W1208 17:55:16.809868 1 degraded_webhook.go:147] failed to connect to webhook "prometheusrules.monitoring.rhobs" via service "obo-prometheus-operator-admission-webhook-service.openshift-operators.svc:443": dial tcp 10.217.4.107:443: connect: connection refused 2025-12-08T17:55:17.814152945+00:00 stderr F W1208 17:55:17.813573 1 degraded_webhook.go:147] failed to connect to webhook "prometheusrules.monitoring.rhobs" via service "obo-prometheus-operator-admission-webhook-service.openshift-operators.svc:443": dial tcp 10.217.4.107:443: connect: connection refused 2025-12-08T17:55:19.834448353+00:00 stderr F E1208 17:55:19.833312 1 degraded_webhook.go:68] prometheusrules.monitoring.rhobs: dial tcp 10.217.4.107:443: connect: connection refused 2025-12-08T17:55:19.864915563+00:00 stderr F W1208 17:55:19.864318 1 degraded_webhook.go:147] failed to connect to webhook "elastic-agent-validation-v1alpha1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:20.893510494+00:00 stderr F W1208 17:55:20.893442 1 degraded_webhook.go:147] failed to connect to webhook "elastic-agent-validation-v1alpha1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:22.897848293+00:00 stderr F E1208 17:55:22.897468 1 degraded_webhook.go:68] elastic-agent-validation-v1alpha1.k8s.elastic.co: dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:22.907715558+00:00 stderr F W1208 17:55:22.907665 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:23.919934973+00:00 stderr F W1208 17:55:23.919853 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:25.923960872+00:00 stderr F E1208 17:55:25.923640 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:25.931661974+00:00 stderr F W1208 17:55:25.931607 1 degraded_webhook.go:147] failed to connect to webhook "alertmanagerconfigs.monitoring.rhobs" via service "obo-prometheus-operator-admission-webhook-service.openshift-operators.svc:443": dial tcp 10.217.4.107:443: connect: connection refused 2025-12-08T17:55:26.938917092+00:00 stderr F W1208 17:55:26.937568 1 degraded_webhook.go:147] failed to connect to webhook "alertmanagerconfigs.monitoring.rhobs" via service "obo-prometheus-operator-admission-webhook-service.openshift-operators.svc:443": dial tcp 10.217.4.107:443: connect: connection refused 2025-12-08T17:55:28.945732319+00:00 stderr F E1208 17:55:28.945669 1 degraded_webhook.go:68] alertmanagerconfigs.monitoring.rhobs: dial tcp 10.217.4.107:443: connect: connection refused 2025-12-08T17:55:28.949037370+00:00 stderr F W1208 17:55:28.948935 1 degraded_webhook.go:147] failed to connect to webhook "elastic-beat-validation-v1beta1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:29.952891665+00:00 stderr F W1208 17:55:29.952810 1 degraded_webhook.go:147] failed to connect to webhook "elastic-beat-validation-v1beta1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:31.955992950+00:00 stderr F E1208 17:55:31.955933 1 degraded_webhook.go:68] elastic-beat-validation-v1beta1.k8s.elastic.co: dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:31.957845011+00:00 stderr F W1208 17:55:31.957800 1 degraded_webhook.go:147] failed to connect to webhook "elastic-ent-validation-v1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:32.962921630+00:00 stderr F W1208 17:55:32.962063 1 degraded_webhook.go:147] failed to connect to webhook "elastic-ent-validation-v1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:34.966919969+00:00 stderr F E1208 17:55:34.966358 1 degraded_webhook.go:68] elastic-ent-validation-v1.k8s.elastic.co: dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:34.984960813+00:00 stderr F W1208 17:55:34.984586 1 degraded_webhook.go:147] failed to connect to webhook "elastic-agent-validation-v1alpha1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:35.989070817+00:00 stderr F W1208 17:55:35.989010 1 degraded_webhook.go:147] failed to connect to webhook "elastic-agent-validation-v1alpha1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:38.000163821+00:00 stderr F E1208 17:55:37.997781 1 degraded_webhook.go:68] elastic-agent-validation-v1alpha1.k8s.elastic.co: dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:38.004734465+00:00 stderr F W1208 17:55:38.002479 1 degraded_webhook.go:147] failed to connect to webhook "elastic-apm-validation-v1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:39.014000530+00:00 stderr F W1208 17:55:39.012963 1 degraded_webhook.go:147] failed to connect to webhook "elastic-apm-validation-v1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:41.025994258+00:00 stderr F E1208 17:55:41.025696 1 degraded_webhook.go:68] elastic-apm-validation-v1.k8s.elastic.co: dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:41.028861357+00:00 stderr F W1208 17:55:41.028817 1 degraded_webhook.go:147] failed to connect to webhook "elastic-es-validation-v1.k8s.elastic.co" via service "elastic-operator-service.service-telemetry.svc:443": dial tcp 10.217.5.72:443: connect: connection refused 2025-12-08T17:55:42.099594818+00:00 stderr F W1208 17:55:42.099063 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:43.108297066+00:00 stderr F W1208 17:55:43.107751 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:45.112142582+00:00 stderr F E1208 17:55:45.111963 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:45.145113196+00:00 stderr F W1208 17:55:45.144963 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:46.149985360+00:00 stderr F W1208 17:55:46.149315 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:48.238335523+00:00 stderr F W1208 17:55:48.238283 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:49.244184854+00:00 stderr F W1208 17:55:49.244143 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:51.256762908+00:00 stderr F W1208 17:55:51.256689 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:52.262379812+00:00 stderr F W1208 17:55:52.262272 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:54.266366261+00:00 stderr F E1208 17:55:54.265832 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:55:54.349570794+00:00 stderr F W1208 17:55:54.349492 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:55.353698037+00:00 stderr F W1208 17:55:55.353640 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:57.401981892+00:00 stderr F W1208 17:55:57.401863 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:55:58.406807424+00:00 stderr F W1208 17:55:58.406331 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:56:00.426734030+00:00 stderr F W1208 17:56:00.426657 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:01.431982884+00:00 stderr F W1208 17:56:01.431424 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:03.443005356+00:00 stderr F E1208 17:56:03.441845 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:03.496249307+00:00 stderr F W1208 17:56:03.496135 1 degraded_webhook.go:147] failed to connect to webhook "webhook.cert-manager.io" via service "cert-manager-webhook.cert-manager.svc:443": dial tcp 10.217.5.149:443: connect: connection refused 2025-12-08T17:56:04.562241008+00:00 stderr F W1208 17:56:04.562007 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:05.568952332+00:00 stderr F W1208 17:56:05.565533 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:07.569418774+00:00 stderr F E1208 17:56:07.569347 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:07.914012480+00:00 stderr F W1208 17:56:07.913954 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:08.919959183+00:00 stderr F W1208 17:56:08.918203 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:56:10.923872259+00:00 stderr F E1208 17:56:10.923412 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:13.536980952+00:00 stderr F W1208 17:57:13.536375 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:14.236929039+00:00 stderr F I1208 17:57:14.236802 1 externalloadbalancer.go:27] syncing external loadbalancer hostnames: api.crc.testing 2025-12-08T17:57:14.542568820+00:00 stderr F W1208 17:57:14.542469 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:16.546657801+00:00 stderr F E1208 17:57:16.546288 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:16.623371942+00:00 stderr F W1208 17:57:16.623283 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:17.630493329+00:00 stderr F W1208 17:57:17.630132 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:19.635704279+00:00 stderr F E1208 17:57:19.635619 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:28.607723628+00:00 stderr F W1208 17:57:28.607175 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:29.613498740+00:00 stderr F W1208 17:57:29.612827 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:31.618678940+00:00 stderr F E1208 17:57:31.618622 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:37.635701147+00:00 stderr F W1208 17:57:37.635026 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:38.643525434+00:00 stderr F W1208 17:57:38.643455 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:40.648678329+00:00 stderr F E1208 17:57:40.648000 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:40.740705707+00:00 stderr F W1208 17:57:40.740620 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:41.744636453+00:00 stderr F W1208 17:57:41.744565 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:43.418751842+00:00 stderr F I1208 17:57:43.418323 1 servicehostname.go:46] syncing servicenetwork hostnames: [10.217.4.1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local openshift openshift.default openshift.default.svc openshift.default.svc.cluster.local] 2025-12-08T17:57:43.749811379+00:00 stderr F E1208 17:57:43.749460 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:45.988100679+00:00 stderr F W1208 17:57:45.988018 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:46.991946975+00:00 stderr F W1208 17:57:46.991846 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:49.009969001+00:00 stderr F E1208 17:57:49.009506 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:49.128544616+00:00 stderr F W1208 17:57:49.128473 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:50.137823232+00:00 stderr F W1208 17:57:50.137619 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:52.142290698+00:00 stderr F E1208 17:57:52.142069 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:52.307773625+00:00 stderr F W1208 17:57:52.307704 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:53.312768200+00:00 stderr F W1208 17:57:53.312185 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:55.316455196+00:00 stderr F E1208 17:57:55.316260 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:55.911711032+00:00 stderr F W1208 17:57:55.911638 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:56.917205530+00:00 stderr F W1208 17:57:56.916609 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:58.922357483+00:00 stderr F E1208 17:57:58.922172 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:57:58.996104180+00:00 stderr F W1208 17:57:58.996049 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:00.000695284+00:00 stderr F W1208 17:58:00.000619 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:02.009776170+00:00 stderr F E1208 17:58:02.009116 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:07.278236887+00:00 stderr F W1208 17:58:07.276957 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:08.282484223+00:00 stderr F W1208 17:58:08.282372 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:10.291313572+00:00 stderr F E1208 17:58:10.291058 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:10.383489705+00:00 stderr F W1208 17:58:10.383411 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:11.387590976+00:00 stderr F W1208 17:58:11.387339 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:13.398622273+00:00 stderr F E1208 17:58:13.398395 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:14.308677054+00:00 stderr F W1208 17:58:14.308430 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:15.318137314+00:00 stderr F W1208 17:58:15.317255 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:17.322199729+00:00 stderr F E1208 17:58:17.322157 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:17.342015401+00:00 stderr F W1208 17:58:17.341973 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:18.345856137+00:00 stderr F W1208 17:58:18.345791 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:58:20.350067086+00:00 stderr F E1208 17:58:20.349600 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:59:05.615767465+00:00 stderr F W1208 17:59:05.615399 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:59:06.621446860+00:00 stderr F W1208 17:59:06.619870 1 degraded_webhook.go:147] failed to connect to webhook "monitoringconfigmaps.openshift.io" via service "cluster-monitoring-operator.openshift-monitoring.svc:8443": dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T17:59:08.625546359+00:00 stderr F E1208 17:59:08.625026 1 degraded_webhook.go:68] monitoringconfigmaps.openshift.io: dial tcp: lookup cluster-monitoring-operator.openshift-monitoring.svc on 10.217.4.10:53: no such host 2025-12-08T18:02:09.093663538+00:00 stderr F I1208 18:02:09.091808 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:02:59.619252836+00:00 stderr F I1208 18:02:59.618551 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" ././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611513033052 5ustar zuulzuul././@LongLink0000644000000000000000000000031400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611521033051 5ustar zuulzuul././@LongLink0000644000000000000000000000032100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000337133015115611513033066 0ustar zuulzuul2025-12-08T17:44:20.340957834+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="log level info" 2025-12-08T17:44:20.340957834+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="TLS keys set, using https for metrics" 2025-12-08T17:44:20.341152781+00:00 stderr F W1208 17:44:20.341128 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:20.342842886+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:44:20.352540601+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:44:20.355447460+00:00 stderr F W1208 17:44:20.354958 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:20.382537179+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="skipping irrelevant gvr" gvr="apps/v1, Resource=deployments" 2025-12-08T17:44:20.382586740+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="skipping irrelevant gvr" gvr="rbac.authorization.k8s.io/v1, Resource=clusterroles" 2025-12-08T17:44:20.382623251+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="skipping irrelevant gvr" gvr="rbac.authorization.k8s.io/v1, Resource=clusterrolebindings" 2025-12-08T17:44:20.434461885+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="detected ability to filter informers" canFilter=true 2025-12-08T17:44:20.440142600+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="registering owner reference fixer" gvr="/v1, Resource=services" 2025-12-08T17:44:20.440142600+00:00 stderr F W1208 17:44:20.438519 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:20.440425408+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="connection established. cluster-version: v1.33.5" 2025-12-08T17:44:20.440465289+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="operator ready" 2025-12-08T17:44:20.440485660+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="starting informers..." 2025-12-08T17:44:20.440552191+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="informers started" 2025-12-08T17:44:20.440573622+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="waiting for caches to sync..." 2025-12-08T17:44:20.542329247+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="starting workers..." 2025-12-08T17:44:20.544013794+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="connection established. cluster-version: v1.33.5" 2025-12-08T17:44:20.544053645+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="operator ready" 2025-12-08T17:44:20.544053645+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="starting informers..." 2025-12-08T17:44:20.544088586+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="informers started" 2025-12-08T17:44:20.544111526+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="waiting for caches to sync..." 2025-12-08T17:44:20.546890382+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=7AN9M namespace=default 2025-12-08T17:44:20.546890382+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=7AN9M namespace=default 2025-12-08T17:44:20.546890382+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=uBQW8 namespace=hostpath-provisioner 2025-12-08T17:44:20.546890382+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=uBQW8 namespace=hostpath-provisioner 2025-12-08T17:44:20.561833189+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace hostpath-provisioner" id=uBQW8 namespace=hostpath-provisioner 2025-12-08T17:44:20.561833189+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=zaRMM namespace=kube-node-lease 2025-12-08T17:44:20.561833189+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=zaRMM namespace=kube-node-lease 2025-12-08T17:44:20.565944322+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW 2025-12-08T17:44:20.565944322+00:00 stderr F time="2025-12-08T17:44:20Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:44:20.565944322+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace default" id=7AN9M namespace=default 2025-12-08T17:44:20.565944322+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=YQv/i namespace=kube-public 2025-12-08T17:44:20.565944322+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=YQv/i namespace=kube-public 2025-12-08T17:44:20.569051137+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 2025-12-08T17:44:20.569051137+00:00 stderr F time="2025-12-08T17:44:20Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:44:20.647868936+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="starting workers..." 2025-12-08T17:44:20.662099274+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace kube-node-lease" id=zaRMM namespace=kube-node-lease 2025-12-08T17:44:20.662854765+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=CEchG namespace=kube-system 2025-12-08T17:44:20.662854765+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace kube-public" id=YQv/i namespace=kube-public 2025-12-08T17:44:20.662887816+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=kHt9+ namespace=openshift 2025-12-08T17:44:20.662887816+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=kHt9+ namespace=openshift 2025-12-08T17:44:20.663121212+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=CEchG namespace=kube-system 2025-12-08T17:44:20.761636320+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace openshift" id=kHt9+ namespace=openshift 2025-12-08T17:44:20.761636320+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=24Ikw namespace=openshift-apiserver 2025-12-08T17:44:20.761636320+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=24Ikw namespace=openshift-apiserver 2025-12-08T17:44:20.761636320+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace kube-system" id=CEchG namespace=kube-system 2025-12-08T17:44:20.761636320+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=X/6aM namespace=openshift-apiserver-operator 2025-12-08T17:44:20.761636320+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=X/6aM namespace=openshift-apiserver-operator 2025-12-08T17:44:20.868998368+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace openshift-apiserver" id=24Ikw namespace=openshift-apiserver 2025-12-08T17:44:20.868998368+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=rpxuy namespace=openshift-authentication 2025-12-08T17:44:20.868998368+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=rpxuy namespace=openshift-authentication 2025-12-08T17:44:20.958266343+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="No subscriptions were found in namespace openshift-apiserver-operator" id=X/6aM namespace=openshift-apiserver-operator 2025-12-08T17:44:20.958266343+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="resolving sources" id=ePLhx namespace=openshift-authentication-operator 2025-12-08T17:44:20.958266343+00:00 stderr F time="2025-12-08T17:44:20Z" level=info msg="checking if subscriptions need update" id=ePLhx namespace=openshift-authentication-operator 2025-12-08T17:44:21.064159092+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="No subscriptions were found in namespace openshift-authentication" id=rpxuy namespace=openshift-authentication 2025-12-08T17:44:21.064212963+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="resolving sources" id=fiZcC namespace=openshift-cloud-network-config-controller 2025-12-08T17:44:21.064212963+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="checking if subscriptions need update" id=fiZcC namespace=openshift-cloud-network-config-controller 2025-12-08T17:44:21.259951523+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="No subscriptions were found in namespace openshift-authentication-operator" id=ePLhx namespace=openshift-authentication-operator 2025-12-08T17:44:21.259951523+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="resolving sources" id=gAkza namespace=openshift-cloud-platform-infra 2025-12-08T17:44:21.259951523+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="checking if subscriptions need update" id=gAkza namespace=openshift-cloud-platform-infra 2025-12-08T17:44:21.460290507+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW 2025-12-08T17:44:21.460290507+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW 2025-12-08T17:44:21.460369759+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="creating desired pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:44:21.461913652+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="No subscriptions were found in namespace openshift-cloud-network-config-controller" id=fiZcC namespace=openshift-cloud-network-config-controller 2025-12-08T17:44:21.461913652+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="resolving sources" id=XjZUa namespace=openshift-cluster-machine-approver 2025-12-08T17:44:21.461913652+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="checking if subscriptions need update" id=XjZUa namespace=openshift-cluster-machine-approver 2025-12-08T17:44:21.660312692+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="No subscriptions were found in namespace openshift-cloud-platform-infra" id=gAkza namespace=openshift-cloud-platform-infra 2025-12-08T17:44:21.660312692+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="resolving sources" id=61ZXu namespace=openshift-cluster-samples-operator 2025-12-08T17:44:21.660312692+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="checking if subscriptions need update" id=61ZXu namespace=openshift-cluster-samples-operator 2025-12-08T17:44:21.660631322+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 2025-12-08T17:44:21.660631322+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 2025-12-08T17:44:21.660715034+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="creating desired pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:44:21.854188672+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="No subscriptions were found in namespace openshift-cluster-machine-approver" id=XjZUa namespace=openshift-cluster-machine-approver 2025-12-08T17:44:21.854226993+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="resolving sources" id=MZJ6l namespace=openshift-cluster-storage-operator 2025-12-08T17:44:21.854226993+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="checking if subscriptions need update" id=MZJ6l namespace=openshift-cluster-storage-operator 2025-12-08T17:44:21.878026891+00:00 stderr F I1208 17:44:21.876929 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:21.878026891+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="catalog update required at 2025-12-08 17:44:21.877341283 +0000 UTC m=+1.839775755" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW 2025-12-08T17:44:22.053231661+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="No subscriptions were found in namespace openshift-cluster-samples-operator" id=61ZXu namespace=openshift-cluster-samples-operator 2025-12-08T17:44:22.053231661+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="resolving sources" id=XGz5m namespace=openshift-cluster-version 2025-12-08T17:44:22.053231661+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="checking if subscriptions need update" id=XGz5m namespace=openshift-cluster-version 2025-12-08T17:44:22.065452124+00:00 stderr F I1208 17:44:22.064142 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:22.065452124+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="catalog update required at 2025-12-08 17:44:22.064224411 +0000 UTC m=+2.026658883" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 2025-12-08T17:44:22.260210647+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="No subscriptions were found in namespace openshift-cluster-storage-operator" id=MZJ6l namespace=openshift-cluster-storage-operator 2025-12-08T17:44:22.260210647+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="resolving sources" id=9rmFC namespace=openshift-config 2025-12-08T17:44:22.260210647+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="checking if subscriptions need update" id=9rmFC namespace=openshift-config 2025-12-08T17:44:22.287936482+00:00 stderr F I1208 17:44:22.287031 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:22.287936482+00:00 stderr F time="2025-12-08T17:44:22Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=ctJSW 2025-12-08T17:44:22.287936482+00:00 stderr F time="2025-12-08T17:44:22Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=ctJSW 2025-12-08T17:44:22.287936482+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=ctJSW 2025-12-08T17:44:22.463343217+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="No subscriptions were found in namespace openshift-cluster-version" id=XGz5m namespace=openshift-cluster-version 2025-12-08T17:44:22.463343217+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="resolving sources" id=kFdwA namespace=openshift-config-managed 2025-12-08T17:44:22.463343217+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="checking if subscriptions need update" id=kFdwA namespace=openshift-config-managed 2025-12-08T17:44:22.463343217+00:00 stderr F I1208 17:44:22.461017 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:22.463343217+00:00 stderr F time="2025-12-08T17:44:22Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=Objd1 2025-12-08T17:44:22.463343217+00:00 stderr F time="2025-12-08T17:44:22Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=Objd1 2025-12-08T17:44:22.463343217+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Objd1 2025-12-08T17:44:22.654955584+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="No subscriptions were found in namespace openshift-config" id=9rmFC namespace=openshift-config 2025-12-08T17:44:22.654955584+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="resolving sources" id=ETetH namespace=openshift-config-operator 2025-12-08T17:44:22.654955584+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="checking if subscriptions need update" id=ETetH namespace=openshift-config-operator 2025-12-08T17:44:22.878191493+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ 2025-12-08T17:44:22.878191493+00:00 stderr F time="2025-12-08T17:44:22Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:44:23.052205179+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="No subscriptions were found in namespace openshift-config-managed" id=kFdwA namespace=openshift-config-managed 2025-12-08T17:44:23.052205179+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="resolving sources" id=JQKMO namespace=openshift-console 2025-12-08T17:44:23.052205179+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="checking if subscriptions need update" id=JQKMO namespace=openshift-console 2025-12-08T17:44:23.459507779+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="No subscriptions were found in namespace openshift-config-operator" id=ETetH namespace=openshift-config-operator 2025-12-08T17:44:23.459507779+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="resolving sources" id=BeNyj namespace=openshift-console-operator 2025-12-08T17:44:23.459507779+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="checking if subscriptions need update" id=BeNyj namespace=openshift-console-operator 2025-12-08T17:44:23.653568903+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="No subscriptions were found in namespace openshift-console" id=JQKMO namespace=openshift-console 2025-12-08T17:44:23.653568903+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="resolving sources" id=UAgwQ namespace=openshift-console-user-settings 2025-12-08T17:44:23.653568903+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="checking if subscriptions need update" id=UAgwQ namespace=openshift-console-user-settings 2025-12-08T17:44:23.675901462+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ 2025-12-08T17:44:23.675901462+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ 2025-12-08T17:44:23.675901462+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="creating desired pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:44:23.853397863+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="No subscriptions were found in namespace openshift-console-operator" id=BeNyj namespace=openshift-console-operator 2025-12-08T17:44:23.853397863+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="resolving sources" id=W/wlt namespace=openshift-controller-manager 2025-12-08T17:44:23.853397863+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="checking if subscriptions need update" id=W/wlt namespace=openshift-controller-manager 2025-12-08T17:44:23.875300130+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f 2025-12-08T17:44:23.875300130+00:00 stderr F time="2025-12-08T17:44:23Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:44:24.053036169+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="No subscriptions were found in namespace openshift-console-user-settings" id=UAgwQ namespace=openshift-console-user-settings 2025-12-08T17:44:24.053036169+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="resolving sources" id=1Tauu namespace=openshift-controller-manager-operator 2025-12-08T17:44:24.053036169+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="checking if subscriptions need update" id=1Tauu namespace=openshift-controller-manager-operator 2025-12-08T17:44:24.076576071+00:00 stderr F I1208 17:44:24.069868 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:24.076576071+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="catalog update required at 2025-12-08 17:44:24.069981321 +0000 UTC m=+4.032415793" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ 2025-12-08T17:44:24.262025899+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="No subscriptions were found in namespace openshift-controller-manager" id=W/wlt namespace=openshift-controller-manager 2025-12-08T17:44:24.262025899+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="resolving sources" id=btv1w namespace=openshift-dns 2025-12-08T17:44:24.262025899+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="checking if subscriptions need update" id=btv1w namespace=openshift-dns 2025-12-08T17:44:24.470736283+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="No subscriptions were found in namespace openshift-controller-manager-operator" id=1Tauu namespace=openshift-controller-manager-operator 2025-12-08T17:44:24.470833885+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="resolving sources" id=xqd82 namespace=openshift-dns-operator 2025-12-08T17:44:24.470867806+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="checking if subscriptions need update" id=xqd82 namespace=openshift-dns-operator 2025-12-08T17:44:24.470947398+00:00 stderr F I1208 17:44:24.470816 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:24.471067341+00:00 stderr F time="2025-12-08T17:44:24Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=25IcJ 2025-12-08T17:44:24.471099182+00:00 stderr F time="2025-12-08T17:44:24Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=25IcJ 2025-12-08T17:44:24.471161034+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=25IcJ 2025-12-08T17:44:24.664531109+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="No subscriptions were found in namespace openshift-dns" id=btv1w namespace=openshift-dns 2025-12-08T17:44:24.664610701+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="resolving sources" id=0YZXU namespace=openshift-etcd 2025-12-08T17:44:24.664653572+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="checking if subscriptions need update" id=0YZXU namespace=openshift-etcd 2025-12-08T17:44:24.849974137+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="No subscriptions were found in namespace openshift-dns-operator" id=xqd82 namespace=openshift-dns-operator 2025-12-08T17:44:24.850082690+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="resolving sources" id=ecVs1 namespace=openshift-etcd-operator 2025-12-08T17:44:24.850134141+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="checking if subscriptions need update" id=ecVs1 namespace=openshift-etcd-operator 2025-12-08T17:44:24.860076073+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f 2025-12-08T17:44:24.860135904+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f 2025-12-08T17:44:24.860254587+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="creating desired pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:44:25.076191027+00:00 stderr F I1208 17:44:25.074606 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:25.076191027+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="catalog update required at 2025-12-08 17:44:25.075165619 +0000 UTC m=+5.037600091" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f 2025-12-08T17:44:25.267915247+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="No subscriptions were found in namespace openshift-etcd" id=0YZXU namespace=openshift-etcd 2025-12-08T17:44:25.267915247+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="resolving sources" id=qdz/m namespace=openshift-host-network 2025-12-08T17:44:25.267915247+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="checking if subscriptions need update" id=qdz/m namespace=openshift-host-network 2025-12-08T17:44:25.451523345+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="No subscriptions were found in namespace openshift-etcd-operator" id=ecVs1 namespace=openshift-etcd-operator 2025-12-08T17:44:25.451523345+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="resolving sources" id=FiEX7 namespace=openshift-image-registry 2025-12-08T17:44:25.451523345+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="checking if subscriptions need update" id=FiEX7 namespace=openshift-image-registry 2025-12-08T17:44:25.475615802+00:00 stderr F I1208 17:44:25.467604 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:25.475615802+00:00 stderr F time="2025-12-08T17:44:25Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=CPF7f 2025-12-08T17:44:25.475615802+00:00 stderr F time="2025-12-08T17:44:25Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=CPF7f 2025-12-08T17:44:25.475615802+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=CPF7f 2025-12-08T17:44:25.650705518+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="No subscriptions were found in namespace openshift-host-network" id=qdz/m namespace=openshift-host-network 2025-12-08T17:44:25.650705518+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="resolving sources" id=jv8co namespace=openshift-infra 2025-12-08T17:44:25.650705518+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="checking if subscriptions need update" id=jv8co namespace=openshift-infra 2025-12-08T17:44:25.664588137+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fLg45 2025-12-08T17:44:25.664588137+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fLg45 2025-12-08T17:44:25.854132918+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="No subscriptions were found in namespace openshift-image-registry" id=FiEX7 namespace=openshift-image-registry 2025-12-08T17:44:25.854132918+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="resolving sources" id=DwuE1 namespace=openshift-ingress 2025-12-08T17:44:25.854132918+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="checking if subscriptions need update" id=DwuE1 namespace=openshift-ingress 2025-12-08T17:44:26.255785613+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="No subscriptions were found in namespace openshift-infra" id=jv8co namespace=openshift-infra 2025-12-08T17:44:26.255785613+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="resolving sources" id=QmIA8 namespace=openshift-ingress-canary 2025-12-08T17:44:26.255785613+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="checking if subscriptions need update" id=QmIA8 namespace=openshift-ingress-canary 2025-12-08T17:44:26.257997404+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fLg45 2025-12-08T17:44:26.257997404+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fLg45 2025-12-08T17:44:26.257997404+00:00 stderr F time="2025-12-08T17:44:26Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=fLg45 2025-12-08T17:44:26.257997404+00:00 stderr F time="2025-12-08T17:44:26Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=fLg45 2025-12-08T17:44:26.257997404+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=fLg45 2025-12-08T17:44:26.257997404+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=CONNECTING" 2025-12-08T17:44:26.349426187+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:44:26.456193049+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="No subscriptions were found in namespace openshift-ingress" id=DwuE1 namespace=openshift-ingress 2025-12-08T17:44:26.456269571+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="resolving sources" id=2/x0G namespace=openshift-ingress-operator 2025-12-08T17:44:26.456293082+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="checking if subscriptions need update" id=2/x0G namespace=openshift-ingress-operator 2025-12-08T17:44:26.653690287+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="No subscriptions were found in namespace openshift-ingress-canary" id=QmIA8 namespace=openshift-ingress-canary 2025-12-08T17:44:26.653726098+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="resolving sources" id=E8pz5 namespace=openshift-kni-infra 2025-12-08T17:44:26.653726098+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="checking if subscriptions need update" id=E8pz5 namespace=openshift-kni-infra 2025-12-08T17:44:26.656968816+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=XbTcT 2025-12-08T17:44:26.656968816+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=XbTcT 2025-12-08T17:44:27.054244283+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="No subscriptions were found in namespace openshift-ingress-operator" id=2/x0G namespace=openshift-ingress-operator 2025-12-08T17:44:27.054244283+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="resolving sources" id=e+3g9 namespace=openshift-kube-apiserver 2025-12-08T17:44:27.054244283+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="checking if subscriptions need update" id=e+3g9 namespace=openshift-kube-apiserver 2025-12-08T17:44:27.252187882+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="No subscriptions were found in namespace openshift-kni-infra" id=E8pz5 namespace=openshift-kni-infra 2025-12-08T17:44:27.252293855+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="resolving sources" id=VK2W0 namespace=openshift-kube-apiserver-operator 2025-12-08T17:44:27.252318256+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="checking if subscriptions need update" id=VK2W0 namespace=openshift-kube-apiserver-operator 2025-12-08T17:44:27.464914254+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="No subscriptions were found in namespace openshift-kube-apiserver" id=e+3g9 namespace=openshift-kube-apiserver 2025-12-08T17:44:27.464914254+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="resolving sources" id=d8wXW namespace=openshift-kube-controller-manager 2025-12-08T17:44:27.464914254+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="checking if subscriptions need update" id=d8wXW namespace=openshift-kube-controller-manager 2025-12-08T17:44:27.464914254+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=o6u7o 2025-12-08T17:44:27.464914254+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=o6u7o 2025-12-08T17:44:27.650035524+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="No subscriptions were found in namespace openshift-kube-apiserver-operator" id=VK2W0 namespace=openshift-kube-apiserver-operator 2025-12-08T17:44:27.650149437+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="resolving sources" id=+F4U0 namespace=openshift-kube-controller-manager-operator 2025-12-08T17:44:27.650190088+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="checking if subscriptions need update" id=+F4U0 namespace=openshift-kube-controller-manager-operator 2025-12-08T17:44:27.664554330+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=XbTcT 2025-12-08T17:44:27.664637342+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=XbTcT 2025-12-08T17:44:27.664716214+00:00 stderr F time="2025-12-08T17:44:27Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=XbTcT 2025-12-08T17:44:27.664746495+00:00 stderr F time="2025-12-08T17:44:27Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=XbTcT 2025-12-08T17:44:27.664782536+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=XbTcT 2025-12-08T17:44:27.665371552+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=CONNECTING" 2025-12-08T17:44:27.688997556+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:44:27.865318355+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="No subscriptions were found in namespace openshift-kube-controller-manager" id=d8wXW namespace=openshift-kube-controller-manager 2025-12-08T17:44:27.865318355+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="resolving sources" id=joNOY namespace=openshift-kube-scheduler 2025-12-08T17:44:27.865318355+00:00 stderr F time="2025-12-08T17:44:27Z" level=info msg="checking if subscriptions need update" id=joNOY namespace=openshift-kube-scheduler 2025-12-08T17:44:28.055841142+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="No subscriptions were found in namespace openshift-kube-controller-manager-operator" id=+F4U0 namespace=openshift-kube-controller-manager-operator 2025-12-08T17:44:28.055841142+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="resolving sources" id=BMByR namespace=openshift-kube-scheduler-operator 2025-12-08T17:44:28.055841142+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="checking if subscriptions need update" id=BMByR namespace=openshift-kube-scheduler-operator 2025-12-08T17:44:28.259608791+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=o6u7o 2025-12-08T17:44:28.259608791+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=o6u7o 2025-12-08T17:44:28.259693153+00:00 stderr F time="2025-12-08T17:44:28Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=o6u7o 2025-12-08T17:44:28.259693153+00:00 stderr F time="2025-12-08T17:44:28Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=o6u7o 2025-12-08T17:44:28.259703953+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=o6u7o 2025-12-08T17:44:28.260091594+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=CONNECTING" 2025-12-08T17:44:28.267183438+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=TRANSIENT_FAILURE" 2025-12-08T17:44:28.458915887+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="No subscriptions were found in namespace openshift-kube-scheduler" id=joNOY namespace=openshift-kube-scheduler 2025-12-08T17:44:28.458915887+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="resolving sources" id=UQdII namespace=openshift-kube-storage-version-migrator 2025-12-08T17:44:28.458915887+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="checking if subscriptions need update" id=UQdII namespace=openshift-kube-storage-version-migrator 2025-12-08T17:44:28.654030180+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="No subscriptions were found in namespace openshift-kube-scheduler-operator" id=BMByR namespace=openshift-kube-scheduler-operator 2025-12-08T17:44:28.654030180+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="resolving sources" id=4QfdU namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T17:44:28.654030180+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="checking if subscriptions need update" id=4QfdU namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T17:44:28.658154442+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=1RG8H 2025-12-08T17:44:28.658154442+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=1RG8H 2025-12-08T17:44:29.062925113+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="No subscriptions were found in namespace openshift-kube-storage-version-migrator" id=UQdII namespace=openshift-kube-storage-version-migrator 2025-12-08T17:44:29.062925113+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="resolving sources" id=EPpvN namespace=openshift-machine-api 2025-12-08T17:44:29.062925113+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="checking if subscriptions need update" id=EPpvN namespace=openshift-machine-api 2025-12-08T17:44:29.258956190+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="No subscriptions were found in namespace openshift-kube-storage-version-migrator-operator" id=4QfdU namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T17:44:29.258956190+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="resolving sources" id=gPJdp namespace=openshift-machine-config-operator 2025-12-08T17:44:29.258956190+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="checking if subscriptions need update" id=gPJdp namespace=openshift-machine-config-operator 2025-12-08T17:44:29.451986535+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="No subscriptions were found in namespace openshift-machine-api" id=EPpvN namespace=openshift-machine-api 2025-12-08T17:44:29.451986535+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="resolving sources" id=eu1AP namespace=openshift-marketplace 2025-12-08T17:44:29.451986535+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="checking if subscriptions need update" id=eu1AP namespace=openshift-marketplace 2025-12-08T17:44:29.458747050+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=2TBIp 2025-12-08T17:44:29.458747050+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=2TBIp 2025-12-08T17:44:29.650274984+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="No subscriptions were found in namespace openshift-machine-config-operator" id=gPJdp namespace=openshift-machine-config-operator 2025-12-08T17:44:29.650274984+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="resolving sources" id=C4B7p namespace=openshift-monitoring 2025-12-08T17:44:29.650274984+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="checking if subscriptions need update" id=C4B7p namespace=openshift-monitoring 2025-12-08T17:44:29.655592999+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=1RG8H 2025-12-08T17:44:29.655592999+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=1RG8H 2025-12-08T17:44:29.814124473+00:00 stderr F time="2025-12-08T17:44:29Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=1RG8H 2025-12-08T17:44:29.814124473+00:00 stderr F time="2025-12-08T17:44:29Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=1RG8H 2025-12-08T17:44:29.814124473+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=1RG8H 2025-12-08T17:44:29.814548675+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=CONNECTING" 2025-12-08T17:44:29.827000795+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:44:29.851586105+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=eu1AP namespace=openshift-marketplace 2025-12-08T17:44:29.851586105+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="resolving sources" id=qm0uI namespace=openshift-multus 2025-12-08T17:44:29.851586105+00:00 stderr F time="2025-12-08T17:44:29Z" level=info msg="checking if subscriptions need update" id=qm0uI namespace=openshift-multus 2025-12-08T17:44:30.050934913+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="No subscriptions were found in namespace openshift-monitoring" id=C4B7p namespace=openshift-monitoring 2025-12-08T17:44:30.050934913+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="resolving sources" id=K5EdJ namespace=openshift-network-console 2025-12-08T17:44:30.050934913+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="checking if subscriptions need update" id=K5EdJ namespace=openshift-network-console 2025-12-08T17:44:30.256484650+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=2TBIp 2025-12-08T17:44:30.256484650+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=2TBIp 2025-12-08T17:44:30.256484650+00:00 stderr F time="2025-12-08T17:44:30Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=2TBIp 2025-12-08T17:44:30.256484650+00:00 stderr F time="2025-12-08T17:44:30Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=2TBIp 2025-12-08T17:44:30.256484650+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=2TBIp 2025-12-08T17:44:30.450918323+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="No subscriptions were found in namespace openshift-multus" id=qm0uI namespace=openshift-multus 2025-12-08T17:44:30.450963884+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="resolving sources" id=xIn+z namespace=openshift-network-diagnostics 2025-12-08T17:44:30.450963884+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="checking if subscriptions need update" id=xIn+z namespace=openshift-network-diagnostics 2025-12-08T17:44:30.656267145+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="No subscriptions were found in namespace openshift-network-console" id=K5EdJ namespace=openshift-network-console 2025-12-08T17:44:30.656267145+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="resolving sources" id=mKtq2 namespace=openshift-network-node-identity 2025-12-08T17:44:30.656267145+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="checking if subscriptions need update" id=mKtq2 namespace=openshift-network-node-identity 2025-12-08T17:44:30.663617254+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=yPQ8E 2025-12-08T17:44:30.663617254+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=yPQ8E 2025-12-08T17:44:31.049803369+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="No subscriptions were found in namespace openshift-network-diagnostics" id=xIn+z namespace=openshift-network-diagnostics 2025-12-08T17:44:31.049831079+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="resolving sources" id=E0MBv namespace=openshift-network-operator 2025-12-08T17:44:31.049831079+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="checking if subscriptions need update" id=E0MBv namespace=openshift-network-operator 2025-12-08T17:44:31.254687717+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="No subscriptions were found in namespace openshift-network-node-identity" id=mKtq2 namespace=openshift-network-node-identity 2025-12-08T17:44:31.254718678+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="resolving sources" id=Jsdgn namespace=openshift-node 2025-12-08T17:44:31.254718678+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="checking if subscriptions need update" id=Jsdgn namespace=openshift-node 2025-12-08T17:44:31.450118158+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="No subscriptions were found in namespace openshift-network-operator" id=E0MBv namespace=openshift-network-operator 2025-12-08T17:44:31.450149319+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="resolving sources" id=IXQ/Y namespace=openshift-nutanix-infra 2025-12-08T17:44:31.450149319+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="checking if subscriptions need update" id=IXQ/Y namespace=openshift-nutanix-infra 2025-12-08T17:44:31.457481909+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=Xy8Lb 2025-12-08T17:44:31.457481909+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=Xy8Lb 2025-12-08T17:44:31.650612047+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="No subscriptions were found in namespace openshift-node" id=Jsdgn namespace=openshift-node 2025-12-08T17:44:31.650646108+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="resolving sources" id=/OhQ/ namespace=openshift-oauth-apiserver 2025-12-08T17:44:31.650646108+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="checking if subscriptions need update" id=/OhQ/ namespace=openshift-oauth-apiserver 2025-12-08T17:44:31.656081677+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=yPQ8E 2025-12-08T17:44:31.656081677+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=yPQ8E 2025-12-08T17:44:31.656081677+00:00 stderr F time="2025-12-08T17:44:31Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=yPQ8E 2025-12-08T17:44:31.656081677+00:00 stderr F time="2025-12-08T17:44:31Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=yPQ8E 2025-12-08T17:44:31.656081677+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=yPQ8E 2025-12-08T17:44:31.850611382+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="No subscriptions were found in namespace openshift-nutanix-infra" id=IXQ/Y namespace=openshift-nutanix-infra 2025-12-08T17:44:31.850611382+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="resolving sources" id=c85tX namespace=openshift-openstack-infra 2025-12-08T17:44:31.850611382+00:00 stderr F time="2025-12-08T17:44:31Z" level=info msg="checking if subscriptions need update" id=c85tX namespace=openshift-openstack-infra 2025-12-08T17:44:32.049719923+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="No subscriptions were found in namespace openshift-oauth-apiserver" id=/OhQ/ namespace=openshift-oauth-apiserver 2025-12-08T17:44:32.049797055+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="resolving sources" id=srXA4 namespace=openshift-operator-lifecycle-manager 2025-12-08T17:44:32.049820886+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="checking if subscriptions need update" id=srXA4 namespace=openshift-operator-lifecycle-manager 2025-12-08T17:44:32.255793665+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=Xy8Lb 2025-12-08T17:44:32.255793665+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=Xy8Lb 2025-12-08T17:44:32.255828376+00:00 stderr F time="2025-12-08T17:44:32Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=Xy8Lb 2025-12-08T17:44:32.255837466+00:00 stderr F time="2025-12-08T17:44:32Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=Xy8Lb 2025-12-08T17:44:32.255846056+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=Xy8Lb 2025-12-08T17:44:32.450844715+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="No subscriptions were found in namespace openshift-openstack-infra" id=c85tX namespace=openshift-openstack-infra 2025-12-08T17:44:32.450844715+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="resolving sources" id=5qCJo namespace=openshift-operators 2025-12-08T17:44:32.450844715+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="checking if subscriptions need update" id=5qCJo namespace=openshift-operators 2025-12-08T17:44:32.649637267+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="No subscriptions were found in namespace openshift-operator-lifecycle-manager" id=srXA4 namespace=openshift-operator-lifecycle-manager 2025-12-08T17:44:32.649668318+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="resolving sources" id=jAfbg namespace=openshift-ovirt-infra 2025-12-08T17:44:32.649668318+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="checking if subscriptions need update" id=jAfbg namespace=openshift-ovirt-infra 2025-12-08T17:44:32.656137515+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=2Acti 2025-12-08T17:44:32.656137515+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=2Acti 2025-12-08T17:44:33.053910805+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="No subscriptions were found in namespace openshift-operators" id=5qCJo namespace=openshift-operators 2025-12-08T17:44:33.053959226+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="resolving sources" id=wuWgp namespace=openshift-ovn-kubernetes 2025-12-08T17:44:33.053959226+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="checking if subscriptions need update" id=wuWgp namespace=openshift-ovn-kubernetes 2025-12-08T17:44:33.250702023+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="No subscriptions were found in namespace openshift-ovirt-infra" id=jAfbg namespace=openshift-ovirt-infra 2025-12-08T17:44:33.250702023+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="resolving sources" id=lL/Rh namespace=openshift-route-controller-manager 2025-12-08T17:44:33.250702023+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="checking if subscriptions need update" id=lL/Rh namespace=openshift-route-controller-manager 2025-12-08T17:44:33.450531933+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="No subscriptions were found in namespace openshift-ovn-kubernetes" id=wuWgp namespace=openshift-ovn-kubernetes 2025-12-08T17:44:33.450563054+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="resolving sources" id=gohnh namespace=openshift-service-ca 2025-12-08T17:44:33.450563054+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="checking if subscriptions need update" id=gohnh namespace=openshift-service-ca 2025-12-08T17:44:33.456154537+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=lgU8X 2025-12-08T17:44:33.456154537+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=lgU8X 2025-12-08T17:44:33.650221940+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="No subscriptions were found in namespace openshift-route-controller-manager" id=lL/Rh namespace=openshift-route-controller-manager 2025-12-08T17:44:33.650265151+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="resolving sources" id=wmMtu namespace=openshift-service-ca-operator 2025-12-08T17:44:33.650265151+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="checking if subscriptions need update" id=wmMtu namespace=openshift-service-ca-operator 2025-12-08T17:44:33.655864544+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=2Acti 2025-12-08T17:44:33.655864544+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=2Acti 2025-12-08T17:44:33.655921205+00:00 stderr F time="2025-12-08T17:44:33Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=2Acti 2025-12-08T17:44:33.655921205+00:00 stderr F time="2025-12-08T17:44:33Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=2Acti 2025-12-08T17:44:33.655939856+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=2Acti 2025-12-08T17:44:33.850255537+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="No subscriptions were found in namespace openshift-service-ca" id=gohnh namespace=openshift-service-ca 2025-12-08T17:44:33.850340719+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="resolving sources" id=BNWUy namespace=openshift-user-workload-monitoring 2025-12-08T17:44:33.850368340+00:00 stderr F time="2025-12-08T17:44:33Z" level=info msg="checking if subscriptions need update" id=BNWUy namespace=openshift-user-workload-monitoring 2025-12-08T17:44:34.050570430+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="No subscriptions were found in namespace openshift-service-ca-operator" id=wmMtu namespace=openshift-service-ca-operator 2025-12-08T17:44:34.050570430+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="resolving sources" id=0K0ky namespace=openshift-vsphere-infra 2025-12-08T17:44:34.050570430+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="checking if subscriptions need update" id=0K0ky namespace=openshift-vsphere-infra 2025-12-08T17:44:34.255617044+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=lgU8X 2025-12-08T17:44:34.255617044+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=lgU8X 2025-12-08T17:44:34.255692526+00:00 stderr F time="2025-12-08T17:44:34Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=lgU8X 2025-12-08T17:44:34.255692526+00:00 stderr F time="2025-12-08T17:44:34Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=lgU8X 2025-12-08T17:44:34.255701626+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=lgU8X 2025-12-08T17:44:34.452404152+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="No subscriptions were found in namespace openshift-user-workload-monitoring" id=BNWUy namespace=openshift-user-workload-monitoring 2025-12-08T17:44:34.651512753+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="No subscriptions were found in namespace openshift-vsphere-infra" id=0K0ky namespace=openshift-vsphere-infra 2025-12-08T17:44:34.855908908+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3A+a6 2025-12-08T17:44:34.855908908+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3A+a6 2025-12-08T17:44:35.057134137+00:00 stderr F time="2025-12-08T17:44:35Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=gVs28 2025-12-08T17:44:35.057134137+00:00 stderr F time="2025-12-08T17:44:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=gVs28 2025-12-08T17:44:36.055921851+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3A+a6 2025-12-08T17:44:36.055921851+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3A+a6 2025-12-08T17:44:36.056051374+00:00 stderr F time="2025-12-08T17:44:36Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=3A+a6 2025-12-08T17:44:36.056051374+00:00 stderr F time="2025-12-08T17:44:36Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=3A+a6 2025-12-08T17:44:36.056051374+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=3A+a6 2025-12-08T17:44:36.256993965+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=gVs28 2025-12-08T17:44:36.256993965+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=gVs28 2025-12-08T17:44:36.257137049+00:00 stderr F time="2025-12-08T17:44:36Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=gVs28 2025-12-08T17:44:36.257137049+00:00 stderr F time="2025-12-08T17:44:36Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=gVs28 2025-12-08T17:44:36.257137049+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=gVs28 2025-12-08T17:44:36.655418943+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Gqga4 2025-12-08T17:44:36.655418943+00:00 stderr F time="2025-12-08T17:44:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Gqga4 2025-12-08T17:44:37.256450790+00:00 stderr F time="2025-12-08T17:44:37Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Gqga4 2025-12-08T17:44:37.256522582+00:00 stderr F time="2025-12-08T17:44:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Gqga4 2025-12-08T17:44:37.256634505+00:00 stderr F time="2025-12-08T17:44:37Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=Gqga4 2025-12-08T17:44:37.256670086+00:00 stderr F time="2025-12-08T17:44:37Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=Gqga4 2025-12-08T17:44:37.256699167+00:00 stderr F time="2025-12-08T17:44:37Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=Gqga4 2025-12-08T17:44:42.773156002+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=FZh7K 2025-12-08T17:44:42.773156002+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=FZh7K 2025-12-08T17:44:42.782017808+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=FZh7K 2025-12-08T17:44:42.782017808+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=FZh7K 2025-12-08T17:44:42.782017808+00:00 stderr F time="2025-12-08T17:44:42Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=FZh7K 2025-12-08T17:44:42.782064140+00:00 stderr F time="2025-12-08T17:44:42Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=FZh7K 2025-12-08T17:44:42.782064140+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=FZh7K 2025-12-08T17:44:42.791921204+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=wj6+a 2025-12-08T17:44:42.791921204+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=wj6+a 2025-12-08T17:44:42.799869026+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=wj6+a 2025-12-08T17:44:42.799869026+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=wj6+a 2025-12-08T17:44:42.799947018+00:00 stderr F time="2025-12-08T17:44:42Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=wj6+a 2025-12-08T17:44:42.799947018+00:00 stderr F time="2025-12-08T17:44:42Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=wj6+a 2025-12-08T17:44:42.799956648+00:00 stderr F time="2025-12-08T17:44:42Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=wj6+a 2025-12-08T17:44:43.364216798+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=/gg2F 2025-12-08T17:44:43.364216798+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=/gg2F 2025-12-08T17:44:43.563085682+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=DPDWp 2025-12-08T17:44:43.563085682+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=DPDWp 2025-12-08T17:44:44.563855428+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=/gg2F 2025-12-08T17:44:44.563855428+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=/gg2F 2025-12-08T17:44:44.563855428+00:00 stderr F time="2025-12-08T17:44:44Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=/gg2F 2025-12-08T17:44:44.563855428+00:00 stderr F time="2025-12-08T17:44:44Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=/gg2F 2025-12-08T17:44:44.563855428+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=/gg2F 2025-12-08T17:44:44.762730892+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=DPDWp 2025-12-08T17:44:44.762730892+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=DPDWp 2025-12-08T17:44:44.762730892+00:00 stderr F time="2025-12-08T17:44:44Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=DPDWp 2025-12-08T17:44:44.762730892+00:00 stderr F time="2025-12-08T17:44:44Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=DPDWp 2025-12-08T17:44:44.762730892+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=DPDWp 2025-12-08T17:44:45.365402461+00:00 stderr F time="2025-12-08T17:44:45Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=9O6Cx 2025-12-08T17:44:45.365402461+00:00 stderr F time="2025-12-08T17:44:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=9O6Cx 2025-12-08T17:44:45.564474970+00:00 stderr F time="2025-12-08T17:44:45Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=pRC3r 2025-12-08T17:44:45.564474970+00:00 stderr F time="2025-12-08T17:44:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=pRC3r 2025-12-08T17:44:46.566719918+00:00 stderr F time="2025-12-08T17:44:46Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=9O6Cx 2025-12-08T17:44:46.566719918+00:00 stderr F time="2025-12-08T17:44:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=9O6Cx 2025-12-08T17:44:46.566780279+00:00 stderr F time="2025-12-08T17:44:46Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=9O6Cx 2025-12-08T17:44:46.566780279+00:00 stderr F time="2025-12-08T17:44:46Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=9O6Cx 2025-12-08T17:44:46.566780279+00:00 stderr F time="2025-12-08T17:44:46Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=9O6Cx 2025-12-08T17:44:46.763574365+00:00 stderr F time="2025-12-08T17:44:46Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=pRC3r 2025-12-08T17:44:46.763574365+00:00 stderr F time="2025-12-08T17:44:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=pRC3r 2025-12-08T17:44:46.763645197+00:00 stderr F time="2025-12-08T17:44:46Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=pRC3r 2025-12-08T17:44:46.763645197+00:00 stderr F time="2025-12-08T17:44:46Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=pRC3r 2025-12-08T17:44:46.763645197+00:00 stderr F time="2025-12-08T17:44:46Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=pRC3r 2025-12-08T17:44:47.365118423+00:00 stderr F time="2025-12-08T17:44:47Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=r2v8F 2025-12-08T17:44:47.365118423+00:00 stderr F time="2025-12-08T17:44:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=r2v8F 2025-12-08T17:44:47.568468602+00:00 stderr F time="2025-12-08T17:44:47Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=QgT5A 2025-12-08T17:44:47.568468602+00:00 stderr F time="2025-12-08T17:44:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=QgT5A 2025-12-08T17:44:48.563761515+00:00 stderr F time="2025-12-08T17:44:48Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=r2v8F 2025-12-08T17:44:48.563761515+00:00 stderr F time="2025-12-08T17:44:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=r2v8F 2025-12-08T17:44:48.563761515+00:00 stderr F time="2025-12-08T17:44:48Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=r2v8F 2025-12-08T17:44:48.563761515+00:00 stderr F time="2025-12-08T17:44:48Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=r2v8F 2025-12-08T17:44:48.563761515+00:00 stderr F time="2025-12-08T17:44:48Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=r2v8F 2025-12-08T17:44:48.763069611+00:00 stderr F time="2025-12-08T17:44:48Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=QgT5A 2025-12-08T17:44:48.763069611+00:00 stderr F time="2025-12-08T17:44:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=QgT5A 2025-12-08T17:44:48.763069611+00:00 stderr F time="2025-12-08T17:44:48Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=QgT5A 2025-12-08T17:44:48.763069611+00:00 stderr F time="2025-12-08T17:44:48Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=QgT5A 2025-12-08T17:44:48.763069611+00:00 stderr F time="2025-12-08T17:44:48Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=QgT5A 2025-12-08T17:44:49.364379583+00:00 stderr F time="2025-12-08T17:44:49Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=7Dtal 2025-12-08T17:44:49.364379583+00:00 stderr F time="2025-12-08T17:44:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=7Dtal 2025-12-08T17:44:49.563583285+00:00 stderr F time="2025-12-08T17:44:49Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=utc8B 2025-12-08T17:44:49.563583285+00:00 stderr F time="2025-12-08T17:44:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=utc8B 2025-12-08T17:44:50.565035900+00:00 stderr F time="2025-12-08T17:44:50Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=7Dtal 2025-12-08T17:44:50.565035900+00:00 stderr F time="2025-12-08T17:44:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=7Dtal 2025-12-08T17:44:50.565035900+00:00 stderr F time="2025-12-08T17:44:50Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=7Dtal 2025-12-08T17:44:50.565035900+00:00 stderr F time="2025-12-08T17:44:50Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-w7jrs has not yet reported ready" id=7Dtal 2025-12-08T17:44:50.565035900+00:00 stderr F time="2025-12-08T17:44:50Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=7Dtal 2025-12-08T17:44:50.763920865+00:00 stderr F time="2025-12-08T17:44:50Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=utc8B 2025-12-08T17:44:50.763920865+00:00 stderr F time="2025-12-08T17:44:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=utc8B 2025-12-08T17:44:50.763967356+00:00 stderr F time="2025-12-08T17:44:50Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=utc8B 2025-12-08T17:44:50.763978266+00:00 stderr F time="2025-12-08T17:44:50Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-marketplace not ready for update: update pod redhat-marketplace-6m6rs has not yet reported ready" id=utc8B 2025-12-08T17:44:50.763988447+00:00 stderr F time="2025-12-08T17:44:50Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=utc8B 2025-12-08T17:44:51.163532385+00:00 stderr F time="2025-12-08T17:44:51Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=xoONl 2025-12-08T17:44:51.163532385+00:00 stderr F time="2025-12-08T17:44:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=xoONl 2025-12-08T17:44:51.763864107+00:00 stderr F time="2025-12-08T17:44:51Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=xoONl 2025-12-08T17:44:51.763963040+00:00 stderr F time="2025-12-08T17:44:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=xoONl 2025-12-08T17:44:51.764116624+00:00 stderr F time="2025-12-08T17:44:51Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=xoONl 2025-12-08T17:44:51.764156247+00:00 stderr F time="2025-12-08T17:44:51Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-n5vp7 has not yet reported ready" id=xoONl 2025-12-08T17:44:51.764187497+00:00 stderr F time="2025-12-08T17:44:51Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=xoONl 2025-12-08T17:44:52.293553336+00:00 stderr F time="2025-12-08T17:44:52Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=WAezo 2025-12-08T17:44:52.293669601+00:00 stderr F time="2025-12-08T17:44:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=WAezo 2025-12-08T17:44:52.964436834+00:00 stderr F time="2025-12-08T17:44:52Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=WAezo 2025-12-08T17:44:52.964436834+00:00 stderr F time="2025-12-08T17:44:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=WAezo 2025-12-08T17:44:52.964487606+00:00 stderr F time="2025-12-08T17:44:52Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=WAezo 2025-12-08T17:44:52.964487606+00:00 stderr F time="2025-12-08T17:44:52Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-sb7gg has not yet reported ready" id=WAezo 2025-12-08T17:44:52.964495706+00:00 stderr F time="2025-12-08T17:44:52Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=WAezo 2025-12-08T17:44:53.163316818+00:00 stderr F time="2025-12-08T17:44:53Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=lxtyb 2025-12-08T17:44:53.163316818+00:00 stderr F time="2025-12-08T17:44:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=lxtyb 2025-12-08T17:44:53.764425284+00:00 stderr F time="2025-12-08T17:44:53Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=kAst8 2025-12-08T17:44:53.764425284+00:00 stderr F time="2025-12-08T17:44:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=kAst8 2025-12-08T17:44:54.364954454+00:00 stderr F time="2025-12-08T17:44:54Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=lxtyb 2025-12-08T17:44:54.364954454+00:00 stderr F time="2025-12-08T17:44:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=lxtyb 2025-12-08T17:44:54.964971289+00:00 stderr F time="2025-12-08T17:44:54Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=kAst8 2025-12-08T17:44:54.964971289+00:00 stderr F time="2025-12-08T17:44:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=kAst8 2025-12-08T17:44:55.565077467+00:00 stderr F time="2025-12-08T17:44:55Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=lOJO4 2025-12-08T17:44:55.567213076+00:00 stderr F time="2025-12-08T17:44:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=lOJO4 2025-12-08T17:44:56.163138258+00:00 stderr F time="2025-12-08T17:44:56Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=1m+kK 2025-12-08T17:44:56.163138258+00:00 stderr F time="2025-12-08T17:44:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=1m+kK 2025-12-08T17:44:56.765220401+00:00 stderr F time="2025-12-08T17:44:56Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=lOJO4 2025-12-08T17:44:56.765220401+00:00 stderr F time="2025-12-08T17:44:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=lOJO4 2025-12-08T17:44:57.364137376+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=1m+kK 2025-12-08T17:44:57.364137376+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=1m+kK 2025-12-08T17:44:57.965329664+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=IIMyG 2025-12-08T17:44:57.965329664+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=IIMyG 2025-12-08T17:44:57.994377101+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=READY" 2025-12-08T17:44:57.994377101+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=READY" 2025-12-08T17:44:57.994619959+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="resolving sources" id=zzlK+ namespace=openshift-marketplace 2025-12-08T17:44:57.994619959+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="checking if subscriptions need update" id=zzlK+ namespace=openshift-marketplace 2025-12-08T17:44:57.999068272+00:00 stderr F time="2025-12-08T17:44:57Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=zzlK+ namespace=openshift-marketplace 2025-12-08T17:44:58.565163364+00:00 stderr F time="2025-12-08T17:44:58Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=zBuu5 2025-12-08T17:44:58.565163364+00:00 stderr F time="2025-12-08T17:44:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=zBuu5 2025-12-08T17:44:59.165971022+00:00 stderr F time="2025-12-08T17:44:59Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=IIMyG 2025-12-08T17:44:59.165971022+00:00 stderr F time="2025-12-08T17:44:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=IIMyG 2025-12-08T17:44:59.166066034+00:00 stderr F time="2025-12-08T17:44:59Z" level=info msg="catalog polling result: update pod community-operators-sb7gg failed to start" UpdatePod=community-operators-sb7gg 2025-12-08T17:44:59.562869695+00:00 stderr F time="2025-12-08T17:44:59Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="update pod community-operators-sb7gg in a Failed state: deleted update pod" id=IIMyG 2025-12-08T17:44:59.569049078+00:00 stderr F E1208 17:44:59.568784 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-marketplace/community-operators\" failed: couldn't ensure registry server - error ensuring updated catalog source pod: : update pod community-operators-sb7gg in a Failed state: deleted update pod" logger="UnhandledError" 2025-12-08T17:44:59.763557719+00:00 stderr F time="2025-12-08T17:44:59Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=zBuu5 2025-12-08T17:44:59.763557719+00:00 stderr F time="2025-12-08T17:44:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=zBuu5 2025-12-08T17:45:00.366182637+00:00 stderr F time="2025-12-08T17:45:00Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=NG+/G 2025-12-08T17:45:00.366182637+00:00 stderr F time="2025-12-08T17:45:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=NG+/G 2025-12-08T17:45:00.965153004+00:00 stderr F time="2025-12-08T17:45:00Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=hUj93 2025-12-08T17:45:00.965153004+00:00 stderr F time="2025-12-08T17:45:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=hUj93 2025-12-08T17:45:01.565460027+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=NG+/G 2025-12-08T17:45:01.565551069+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=NG+/G 2025-12-08T17:45:02.164167286+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=hUj93 2025-12-08T17:45:02.164226348+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=hUj93 2025-12-08T17:45:02.365482228+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=nmctQ 2025-12-08T17:45:02.365482228+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=nmctQ 2025-12-08T17:45:02.965154243+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gXXhH 2025-12-08T17:45:02.965154243+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gXXhH 2025-12-08T17:45:03.565931730+00:00 stderr F time="2025-12-08T17:45:03Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=nmctQ 2025-12-08T17:45:03.565931730+00:00 stderr F time="2025-12-08T17:45:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=nmctQ 2025-12-08T17:45:04.168664861+00:00 stderr F time="2025-12-08T17:45:04Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gXXhH 2025-12-08T17:45:04.168664861+00:00 stderr F time="2025-12-08T17:45:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gXXhH 2025-12-08T17:45:04.363784740+00:00 stderr F time="2025-12-08T17:45:04Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=b3FII 2025-12-08T17:45:04.363784740+00:00 stderr F time="2025-12-08T17:45:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=b3FII 2025-12-08T17:45:04.963926189+00:00 stderr F time="2025-12-08T17:45:04Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=BMo2F 2025-12-08T17:45:04.963926189+00:00 stderr F time="2025-12-08T17:45:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=BMo2F 2025-12-08T17:45:05.563519853+00:00 stderr F time="2025-12-08T17:45:05Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=b3FII 2025-12-08T17:45:05.563519853+00:00 stderr F time="2025-12-08T17:45:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=b3FII 2025-12-08T17:45:06.163590270+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=BMo2F 2025-12-08T17:45:06.163590270+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=BMo2F 2025-12-08T17:45:06.266969517+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=READY" 2025-12-08T17:45:06.266969517+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="resolving sources" id=w+kf9 namespace=openshift-marketplace 2025-12-08T17:45:06.266969517+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="checking if subscriptions need update" id=w+kf9 namespace=openshift-marketplace 2025-12-08T17:45:06.269729273+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=w+kf9 namespace=openshift-marketplace 2025-12-08T17:45:06.364007426+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=o3njm 2025-12-08T17:45:06.364007426+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=o3njm 2025-12-08T17:45:06.964415852+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=o3njm 2025-12-08T17:45:06.964415852+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=o3njm 2025-12-08T17:45:07.367287483+00:00 stderr F time="2025-12-08T17:45:07Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=YO250 2025-12-08T17:45:07.367287483+00:00 stderr F time="2025-12-08T17:45:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=YO250 2025-12-08T17:45:07.964209062+00:00 stderr F time="2025-12-08T17:45:07Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=YO250 2025-12-08T17:45:07.964209062+00:00 stderr F time="2025-12-08T17:45:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=YO250 2025-12-08T17:45:08.368955004+00:00 stderr F time="2025-12-08T17:45:08Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=st9f6 2025-12-08T17:45:08.368955004+00:00 stderr F time="2025-12-08T17:45:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=st9f6 2025-12-08T17:45:08.963115257+00:00 stderr F time="2025-12-08T17:45:08Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=st9f6 2025-12-08T17:45:08.963115257+00:00 stderr F time="2025-12-08T17:45:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=st9f6 2025-12-08T17:45:09.786128257+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=READY" 2025-12-08T17:45:09.786177859+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="resolving sources" id=edqSN namespace=openshift-marketplace 2025-12-08T17:45:09.786177859+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="checking if subscriptions need update" id=edqSN namespace=openshift-marketplace 2025-12-08T17:45:09.788722690+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=edqSN namespace=openshift-marketplace 2025-12-08T17:45:09.791325932+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=QFziP 2025-12-08T17:45:09.791325932+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=QFziP 2025-12-08T17:45:09.963032909+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=QFziP 2025-12-08T17:45:09.963032909+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=QFziP 2025-12-08T17:45:10.363706038+00:00 stderr F time="2025-12-08T17:45:10Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=9fI57 2025-12-08T17:45:10.363706038+00:00 stderr F time="2025-12-08T17:45:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=9fI57 2025-12-08T17:45:10.963361934+00:00 stderr F time="2025-12-08T17:45:10Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=9fI57 2025-12-08T17:45:10.963361934+00:00 stderr F time="2025-12-08T17:45:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=9fI57 2025-12-08T17:45:11.363818006+00:00 stderr F time="2025-12-08T17:45:11Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3uIIK 2025-12-08T17:45:11.363818006+00:00 stderr F time="2025-12-08T17:45:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3uIIK 2025-12-08T17:45:11.965501608+00:00 stderr F time="2025-12-08T17:45:11Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3uIIK 2025-12-08T17:45:11.965501608+00:00 stderr F time="2025-12-08T17:45:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=3uIIK 2025-12-08T17:46:59.588580614+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="resolving sources" id=LG+Es namespace=openshift-monitoring 2025-12-08T17:46:59.588580614+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="checking if subscriptions need update" id=LG+Es namespace=openshift-monitoring 2025-12-08T17:46:59.588754909+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="resolving sources" id=euRK1 namespace=openshift-operator-lifecycle-manager 2025-12-08T17:46:59.588754909+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="checking if subscriptions need update" id=euRK1 namespace=openshift-operator-lifecycle-manager 2025-12-08T17:46:59.591820867+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="No subscriptions were found in namespace openshift-operator-lifecycle-manager" id=euRK1 namespace=openshift-operator-lifecycle-manager 2025-12-08T17:46:59.591820867+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="resolving sources" id=1Vbqt namespace=openshift-operators 2025-12-08T17:46:59.591820867+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="checking if subscriptions need update" id=1Vbqt namespace=openshift-operators 2025-12-08T17:46:59.591976042+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="No subscriptions were found in namespace openshift-monitoring" id=LG+Es namespace=openshift-monitoring 2025-12-08T17:46:59.594110558+00:00 stderr F time="2025-12-08T17:46:59Z" level=info msg="No subscriptions were found in namespace openshift-operators" id=1Vbqt namespace=openshift-operators 2025-12-08T17:47:14.708182527+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=Zi6H9 namespace=default 2025-12-08T17:47:14.708182527+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=Zi6H9 namespace=default 2025-12-08T17:47:14.708182527+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=NXt8H namespace=hostpath-provisioner 2025-12-08T17:47:14.708182527+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=NXt8H namespace=hostpath-provisioner 2025-12-08T17:47:14.710441117+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace hostpath-provisioner" id=NXt8H namespace=hostpath-provisioner 2025-12-08T17:47:14.710441117+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=oHtMM namespace=kube-node-lease 2025-12-08T17:47:14.710441117+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=oHtMM namespace=kube-node-lease 2025-12-08T17:47:14.710605272+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace default" id=Zi6H9 namespace=default 2025-12-08T17:47:14.710605272+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=rYTsX namespace=kube-public 2025-12-08T17:47:14.710605272+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=rYTsX namespace=kube-public 2025-12-08T17:47:14.712130391+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace kube-public" id=rYTsX namespace=kube-public 2025-12-08T17:47:14.712130391+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=YkXHB namespace=kube-system 2025-12-08T17:47:14.712130391+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=YkXHB namespace=kube-system 2025-12-08T17:47:14.712346787+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace kube-node-lease" id=oHtMM namespace=kube-node-lease 2025-12-08T17:47:14.712346787+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=zrb+I namespace=openshift 2025-12-08T17:47:14.712346787+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=zrb+I namespace=openshift 2025-12-08T17:47:14.714507416+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace kube-system" id=YkXHB namespace=kube-system 2025-12-08T17:47:14.714507416+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=SpO39 namespace=openshift-apiserver 2025-12-08T17:47:14.714507416+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=SpO39 namespace=openshift-apiserver 2025-12-08T17:47:14.714675271+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace openshift" id=zrb+I namespace=openshift 2025-12-08T17:47:14.714675271+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=UWDe/ namespace=openshift-apiserver-operator 2025-12-08T17:47:14.714688801+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=UWDe/ namespace=openshift-apiserver-operator 2025-12-08T17:47:14.718258443+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace openshift-apiserver" id=SpO39 namespace=openshift-apiserver 2025-12-08T17:47:14.718258443+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=3C3Mj namespace=openshift-authentication 2025-12-08T17:47:14.718258443+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=3C3Mj namespace=openshift-authentication 2025-12-08T17:47:14.719298907+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace openshift-apiserver-operator" id=UWDe/ namespace=openshift-apiserver-operator 2025-12-08T17:47:14.719298907+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=fda0I namespace=openshift-authentication-operator 2025-12-08T17:47:14.719298907+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=fda0I namespace=openshift-authentication-operator 2025-12-08T17:47:14.719744931+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace openshift-authentication" id=3C3Mj namespace=openshift-authentication 2025-12-08T17:47:14.719744931+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=n+33u namespace=openshift-cloud-network-config-controller 2025-12-08T17:47:14.719744931+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=n+33u namespace=openshift-cloud-network-config-controller 2025-12-08T17:47:14.720699370+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace openshift-authentication-operator" id=fda0I namespace=openshift-authentication-operator 2025-12-08T17:47:14.720699370+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=iDKy0 namespace=openshift-cloud-platform-infra 2025-12-08T17:47:14.720699370+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=iDKy0 namespace=openshift-cloud-platform-infra 2025-12-08T17:47:14.912381164+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="No subscriptions were found in namespace openshift-cloud-network-config-controller" id=n+33u namespace=openshift-cloud-network-config-controller 2025-12-08T17:47:14.912417335+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="resolving sources" id=wmR8R namespace=openshift-cluster-machine-approver 2025-12-08T17:47:14.912417335+00:00 stderr F time="2025-12-08T17:47:14Z" level=info msg="checking if subscriptions need update" id=wmR8R namespace=openshift-cluster-machine-approver 2025-12-08T17:47:15.120557198+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="No subscriptions were found in namespace openshift-cloud-platform-infra" id=iDKy0 namespace=openshift-cloud-platform-infra 2025-12-08T17:47:15.120557198+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="resolving sources" id=5Wl0j namespace=openshift-cluster-samples-operator 2025-12-08T17:47:15.120557198+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="checking if subscriptions need update" id=5Wl0j namespace=openshift-cluster-samples-operator 2025-12-08T17:47:15.311685104+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="No subscriptions were found in namespace openshift-cluster-machine-approver" id=wmR8R namespace=openshift-cluster-machine-approver 2025-12-08T17:47:15.311685104+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="resolving sources" id=dqwu8 namespace=openshift-cluster-storage-operator 2025-12-08T17:47:15.311685104+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="checking if subscriptions need update" id=dqwu8 namespace=openshift-cluster-storage-operator 2025-12-08T17:47:15.512374462+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="No subscriptions were found in namespace openshift-cluster-samples-operator" id=5Wl0j namespace=openshift-cluster-samples-operator 2025-12-08T17:47:15.512374462+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="resolving sources" id=k+h65 namespace=openshift-cluster-version 2025-12-08T17:47:15.512440584+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="checking if subscriptions need update" id=k+h65 namespace=openshift-cluster-version 2025-12-08T17:47:15.918272959+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="No subscriptions were found in namespace openshift-cluster-storage-operator" id=dqwu8 namespace=openshift-cluster-storage-operator 2025-12-08T17:47:15.918395523+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="resolving sources" id=WyHfg namespace=openshift-config 2025-12-08T17:47:15.918436104+00:00 stderr F time="2025-12-08T17:47:15Z" level=info msg="checking if subscriptions need update" id=WyHfg namespace=openshift-config 2025-12-08T17:47:16.111217122+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="No subscriptions were found in namespace openshift-cluster-version" id=k+h65 namespace=openshift-cluster-version 2025-12-08T17:47:16.111217122+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="resolving sources" id=LewO2 namespace=openshift-config-managed 2025-12-08T17:47:16.111252833+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="checking if subscriptions need update" id=LewO2 namespace=openshift-config-managed 2025-12-08T17:47:16.312570501+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="No subscriptions were found in namespace openshift-config" id=WyHfg namespace=openshift-config 2025-12-08T17:47:16.312570501+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="resolving sources" id=+TYhV namespace=openshift-config-operator 2025-12-08T17:47:16.312570501+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="checking if subscriptions need update" id=+TYhV namespace=openshift-config-operator 2025-12-08T17:47:16.511589716+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="No subscriptions were found in namespace openshift-config-managed" id=LewO2 namespace=openshift-config-managed 2025-12-08T17:47:16.511694379+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="resolving sources" id=rf85W namespace=openshift-console 2025-12-08T17:47:16.511719680+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="checking if subscriptions need update" id=rf85W namespace=openshift-console 2025-12-08T17:47:16.711338094+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="No subscriptions were found in namespace openshift-config-operator" id=+TYhV namespace=openshift-config-operator 2025-12-08T17:47:16.711455458+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="resolving sources" id=VlUhj namespace=openshift-console-operator 2025-12-08T17:47:16.711485029+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="checking if subscriptions need update" id=VlUhj namespace=openshift-console-operator 2025-12-08T17:47:16.911027300+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="No subscriptions were found in namespace openshift-console" id=rf85W namespace=openshift-console 2025-12-08T17:47:16.911027300+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="resolving sources" id=dLQXs namespace=openshift-console-user-settings 2025-12-08T17:47:16.911027300+00:00 stderr F time="2025-12-08T17:47:16Z" level=info msg="checking if subscriptions need update" id=dLQXs namespace=openshift-console-user-settings 2025-12-08T17:47:17.111847902+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="No subscriptions were found in namespace openshift-console-operator" id=VlUhj namespace=openshift-console-operator 2025-12-08T17:47:17.111847902+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="resolving sources" id=sWQ/D namespace=openshift-controller-manager 2025-12-08T17:47:17.111922734+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="checking if subscriptions need update" id=sWQ/D namespace=openshift-controller-manager 2025-12-08T17:47:17.310122013+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="No subscriptions were found in namespace openshift-console-user-settings" id=dLQXs namespace=openshift-console-user-settings 2025-12-08T17:47:17.310122013+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="resolving sources" id=gO/C/ namespace=openshift-controller-manager-operator 2025-12-08T17:47:17.310161554+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="checking if subscriptions need update" id=gO/C/ namespace=openshift-controller-manager-operator 2025-12-08T17:47:17.511144001+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="No subscriptions were found in namespace openshift-controller-manager" id=sWQ/D namespace=openshift-controller-manager 2025-12-08T17:47:17.511144001+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="resolving sources" id=U+zRZ namespace=openshift-dns 2025-12-08T17:47:17.511144001+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="checking if subscriptions need update" id=U+zRZ namespace=openshift-dns 2025-12-08T17:47:17.711422196+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="No subscriptions were found in namespace openshift-controller-manager-operator" id=gO/C/ namespace=openshift-controller-manager-operator 2025-12-08T17:47:17.711422196+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="resolving sources" id=Ndzr5 namespace=openshift-dns-operator 2025-12-08T17:47:17.711422196+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="checking if subscriptions need update" id=Ndzr5 namespace=openshift-dns-operator 2025-12-08T17:47:17.915345175+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="No subscriptions were found in namespace openshift-dns" id=U+zRZ namespace=openshift-dns 2025-12-08T17:47:17.915413497+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="resolving sources" id=CASIy namespace=openshift-etcd 2025-12-08T17:47:17.915413497+00:00 stderr F time="2025-12-08T17:47:17Z" level=info msg="checking if subscriptions need update" id=CASIy namespace=openshift-etcd 2025-12-08T17:47:18.111935724+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="No subscriptions were found in namespace openshift-dns-operator" id=Ndzr5 namespace=openshift-dns-operator 2025-12-08T17:47:18.111935724+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="resolving sources" id=4x7Xp namespace=openshift-etcd-operator 2025-12-08T17:47:18.111987896+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="checking if subscriptions need update" id=4x7Xp namespace=openshift-etcd-operator 2025-12-08T17:47:18.313476439+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="No subscriptions were found in namespace openshift-etcd" id=CASIy namespace=openshift-etcd 2025-12-08T17:47:18.313476439+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="resolving sources" id=y8G2m namespace=openshift-host-network 2025-12-08T17:47:18.313476439+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="checking if subscriptions need update" id=y8G2m namespace=openshift-host-network 2025-12-08T17:47:18.511743929+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="No subscriptions were found in namespace openshift-etcd-operator" id=4x7Xp namespace=openshift-etcd-operator 2025-12-08T17:47:18.511743929+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="resolving sources" id=kHq/H namespace=openshift-image-registry 2025-12-08T17:47:18.511743929+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="checking if subscriptions need update" id=kHq/H namespace=openshift-image-registry 2025-12-08T17:47:18.711412345+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="No subscriptions were found in namespace openshift-host-network" id=y8G2m namespace=openshift-host-network 2025-12-08T17:47:18.711412345+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="resolving sources" id=KB4bh namespace=openshift-infra 2025-12-08T17:47:18.711412345+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="checking if subscriptions need update" id=KB4bh namespace=openshift-infra 2025-12-08T17:47:18.911603967+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="No subscriptions were found in namespace openshift-image-registry" id=kHq/H namespace=openshift-image-registry 2025-12-08T17:47:18.911603967+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="resolving sources" id=8B2SD namespace=openshift-ingress 2025-12-08T17:47:18.911655628+00:00 stderr F time="2025-12-08T17:47:18Z" level=info msg="checking if subscriptions need update" id=8B2SD namespace=openshift-ingress 2025-12-08T17:47:19.111059915+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="No subscriptions were found in namespace openshift-infra" id=KB4bh namespace=openshift-infra 2025-12-08T17:47:19.111059915+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="resolving sources" id=67owW namespace=openshift-ingress-canary 2025-12-08T17:47:19.111059915+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="checking if subscriptions need update" id=67owW namespace=openshift-ingress-canary 2025-12-08T17:47:19.311489125+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="No subscriptions were found in namespace openshift-ingress" id=8B2SD namespace=openshift-ingress 2025-12-08T17:47:19.311663140+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="resolving sources" id=GwmOY namespace=openshift-ingress-operator 2025-12-08T17:47:19.311735942+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="checking if subscriptions need update" id=GwmOY namespace=openshift-ingress-operator 2025-12-08T17:47:19.511397257+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="No subscriptions were found in namespace openshift-ingress-canary" id=67owW namespace=openshift-ingress-canary 2025-12-08T17:47:19.511558052+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="resolving sources" id=Weuan namespace=openshift-kni-infra 2025-12-08T17:47:19.511625154+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="checking if subscriptions need update" id=Weuan namespace=openshift-kni-infra 2025-12-08T17:47:19.711448545+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="No subscriptions were found in namespace openshift-ingress-operator" id=GwmOY namespace=openshift-ingress-operator 2025-12-08T17:47:19.711595050+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="resolving sources" id=kTQCt namespace=openshift-kube-apiserver 2025-12-08T17:47:19.711653902+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="checking if subscriptions need update" id=kTQCt namespace=openshift-kube-apiserver 2025-12-08T17:47:19.912328188+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="No subscriptions were found in namespace openshift-kni-infra" id=Weuan namespace=openshift-kni-infra 2025-12-08T17:47:19.912328188+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="resolving sources" id=HxRov namespace=openshift-kube-apiserver-operator 2025-12-08T17:47:19.912328188+00:00 stderr F time="2025-12-08T17:47:19Z" level=info msg="checking if subscriptions need update" id=HxRov namespace=openshift-kube-apiserver-operator 2025-12-08T17:47:20.111521048+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="No subscriptions were found in namespace openshift-kube-apiserver" id=kTQCt namespace=openshift-kube-apiserver 2025-12-08T17:47:20.111650462+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="resolving sources" id=wHlvv namespace=openshift-kube-controller-manager 2025-12-08T17:47:20.111682903+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="checking if subscriptions need update" id=wHlvv namespace=openshift-kube-controller-manager 2025-12-08T17:47:20.312523696+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="No subscriptions were found in namespace openshift-kube-apiserver-operator" id=HxRov namespace=openshift-kube-apiserver-operator 2025-12-08T17:47:20.312523696+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="resolving sources" id=/N20Y namespace=openshift-kube-controller-manager-operator 2025-12-08T17:47:20.312587868+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="checking if subscriptions need update" id=/N20Y namespace=openshift-kube-controller-manager-operator 2025-12-08T17:47:20.511362526+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="No subscriptions were found in namespace openshift-kube-controller-manager" id=wHlvv namespace=openshift-kube-controller-manager 2025-12-08T17:47:20.511362526+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="resolving sources" id=kBO4W namespace=openshift-kube-scheduler 2025-12-08T17:47:20.511362526+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="checking if subscriptions need update" id=kBO4W namespace=openshift-kube-scheduler 2025-12-08T17:47:20.711210896+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="No subscriptions were found in namespace openshift-kube-controller-manager-operator" id=/N20Y namespace=openshift-kube-controller-manager-operator 2025-12-08T17:47:20.711210896+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="resolving sources" id=EjVYb namespace=openshift-kube-scheduler-operator 2025-12-08T17:47:20.711210896+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="checking if subscriptions need update" id=EjVYb namespace=openshift-kube-scheduler-operator 2025-12-08T17:47:20.913140264+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="No subscriptions were found in namespace openshift-kube-scheduler" id=kBO4W namespace=openshift-kube-scheduler 2025-12-08T17:47:20.913140264+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="resolving sources" id=G9M3S namespace=openshift-kube-storage-version-migrator 2025-12-08T17:47:20.913140264+00:00 stderr F time="2025-12-08T17:47:20Z" level=info msg="checking if subscriptions need update" id=G9M3S namespace=openshift-kube-storage-version-migrator 2025-12-08T17:47:21.111302571+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="No subscriptions were found in namespace openshift-kube-scheduler-operator" id=EjVYb namespace=openshift-kube-scheduler-operator 2025-12-08T17:47:21.111355753+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="resolving sources" id=lhlPI namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T17:47:21.111355753+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="checking if subscriptions need update" id=lhlPI namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T17:47:21.312191645+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="No subscriptions were found in namespace openshift-kube-storage-version-migrator" id=G9M3S namespace=openshift-kube-storage-version-migrator 2025-12-08T17:47:21.312239307+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="resolving sources" id=wWpcS namespace=openshift-machine-api 2025-12-08T17:47:21.312239307+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="checking if subscriptions need update" id=wWpcS namespace=openshift-machine-api 2025-12-08T17:47:21.511024904+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="No subscriptions were found in namespace openshift-kube-storage-version-migrator-operator" id=lhlPI namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T17:47:21.511079296+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="resolving sources" id=5tZ0V namespace=openshift-machine-config-operator 2025-12-08T17:47:21.511100296+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="checking if subscriptions need update" id=5tZ0V namespace=openshift-machine-config-operator 2025-12-08T17:47:21.711199485+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="No subscriptions were found in namespace openshift-machine-api" id=wWpcS namespace=openshift-machine-api 2025-12-08T17:47:21.711242336+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="resolving sources" id=XdrQs namespace=openshift-marketplace 2025-12-08T17:47:21.711242336+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="checking if subscriptions need update" id=XdrQs namespace=openshift-marketplace 2025-12-08T17:47:21.915297510+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="No subscriptions were found in namespace openshift-machine-config-operator" id=5tZ0V namespace=openshift-machine-config-operator 2025-12-08T17:47:21.915297510+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="resolving sources" id=c/5m0 namespace=openshift-monitoring 2025-12-08T17:47:21.915297510+00:00 stderr F time="2025-12-08T17:47:21Z" level=info msg="checking if subscriptions need update" id=c/5m0 namespace=openshift-monitoring 2025-12-08T17:47:22.113655794+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=XdrQs namespace=openshift-marketplace 2025-12-08T17:47:22.113655794+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="resolving sources" id=fwSxE namespace=openshift-multus 2025-12-08T17:47:22.113655794+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="checking if subscriptions need update" id=fwSxE namespace=openshift-multus 2025-12-08T17:47:22.346296289+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="No subscriptions were found in namespace openshift-monitoring" id=c/5m0 namespace=openshift-monitoring 2025-12-08T17:47:22.346296289+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="resolving sources" id=DWnql namespace=openshift-network-console 2025-12-08T17:47:22.346296289+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="checking if subscriptions need update" id=DWnql namespace=openshift-network-console 2025-12-08T17:47:22.510756965+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="No subscriptions were found in namespace openshift-multus" id=fwSxE namespace=openshift-multus 2025-12-08T17:47:22.510826027+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="resolving sources" id=i/ZB/ namespace=openshift-network-diagnostics 2025-12-08T17:47:22.510848888+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="checking if subscriptions need update" id=i/ZB/ namespace=openshift-network-diagnostics 2025-12-08T17:47:22.716501701+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=yGVz6 2025-12-08T17:47:22.716594834+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=yGVz6 2025-12-08T17:47:22.717084819+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=UoId2 2025-12-08T17:47:22.717129401+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=UoId2 2025-12-08T17:47:22.723642506+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=yGVz6 2025-12-08T17:47:22.723717759+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=yGVz6 2025-12-08T17:47:22.724383029+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=UoId2 2025-12-08T17:47:22.724428691+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=UoId2 2025-12-08T17:47:22.915463824+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="No subscriptions were found in namespace openshift-network-console" id=DWnql namespace=openshift-network-console 2025-12-08T17:47:22.915463824+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="resolving sources" id=U6EEM namespace=openshift-network-node-identity 2025-12-08T17:47:22.915463824+00:00 stderr F time="2025-12-08T17:47:22Z" level=info msg="checking if subscriptions need update" id=U6EEM namespace=openshift-network-node-identity 2025-12-08T17:47:23.110616298+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="No subscriptions were found in namespace openshift-network-diagnostics" id=i/ZB/ namespace=openshift-network-diagnostics 2025-12-08T17:47:23.110641539+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="resolving sources" id=lDVsW namespace=openshift-network-operator 2025-12-08T17:47:23.110641539+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="checking if subscriptions need update" id=lDVsW namespace=openshift-network-operator 2025-12-08T17:47:23.114552361+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=3lc4Y 2025-12-08T17:47:23.114552361+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=3lc4Y 2025-12-08T17:47:23.516696721+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="No subscriptions were found in namespace openshift-network-node-identity" id=U6EEM namespace=openshift-network-node-identity 2025-12-08T17:47:23.516696721+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="resolving sources" id=5IOFS namespace=openshift-node 2025-12-08T17:47:23.516696721+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="checking if subscriptions need update" id=5IOFS namespace=openshift-node 2025-12-08T17:47:23.710500872+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="No subscriptions were found in namespace openshift-network-operator" id=lDVsW namespace=openshift-network-operator 2025-12-08T17:47:23.710500872+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="resolving sources" id=6Yxr0 namespace=openshift-nutanix-infra 2025-12-08T17:47:23.710500872+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="checking if subscriptions need update" id=6Yxr0 namespace=openshift-nutanix-infra 2025-12-08T17:47:23.918002523+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=4Go1E 2025-12-08T17:47:23.918002523+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=4Go1E 2025-12-08T17:47:23.925102947+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="No subscriptions were found in namespace openshift-node" id=5IOFS namespace=openshift-node 2025-12-08T17:47:23.925139249+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="resolving sources" id=Pr3iH namespace=openshift-oauth-apiserver 2025-12-08T17:47:23.925139249+00:00 stderr F time="2025-12-08T17:47:23Z" level=info msg="checking if subscriptions need update" id=Pr3iH namespace=openshift-oauth-apiserver 2025-12-08T17:47:24.120475757+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="No subscriptions were found in namespace openshift-nutanix-infra" id=6Yxr0 namespace=openshift-nutanix-infra 2025-12-08T17:47:24.120475757+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="resolving sources" id=Kg/VK namespace=openshift-openstack-infra 2025-12-08T17:47:24.120475757+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="checking if subscriptions need update" id=Kg/VK namespace=openshift-openstack-infra 2025-12-08T17:47:24.120475757+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=3lc4Y 2025-12-08T17:47:24.120475757+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=3lc4Y 2025-12-08T17:47:24.311104478+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="No subscriptions were found in namespace openshift-oauth-apiserver" id=Pr3iH namespace=openshift-oauth-apiserver 2025-12-08T17:47:24.311104478+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="resolving sources" id=Tl3iW namespace=openshift-operator-lifecycle-manager 2025-12-08T17:47:24.311104478+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="checking if subscriptions need update" id=Tl3iW namespace=openshift-operator-lifecycle-manager 2025-12-08T17:47:24.512241409+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="No subscriptions were found in namespace openshift-openstack-infra" id=Kg/VK namespace=openshift-openstack-infra 2025-12-08T17:47:24.512241409+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="resolving sources" id=X/LKy namespace=openshift-operators 2025-12-08T17:47:24.512241409+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="checking if subscriptions need update" id=X/LKy namespace=openshift-operators 2025-12-08T17:47:24.714636251+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=4Go1E 2025-12-08T17:47:24.714636251+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=4Go1E 2025-12-08T17:47:24.911270921+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="No subscriptions were found in namespace openshift-operator-lifecycle-manager" id=Tl3iW namespace=openshift-operator-lifecycle-manager 2025-12-08T17:47:24.911318402+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="resolving sources" id=Gk/0o namespace=openshift-ovirt-infra 2025-12-08T17:47:24.911318402+00:00 stderr F time="2025-12-08T17:47:24Z" level=info msg="checking if subscriptions need update" id=Gk/0o namespace=openshift-ovirt-infra 2025-12-08T17:47:25.110592285+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="No subscriptions were found in namespace openshift-operators" id=X/LKy namespace=openshift-operators 2025-12-08T17:47:25.110592285+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="resolving sources" id=oD52S namespace=openshift-ovn-kubernetes 2025-12-08T17:47:25.110592285+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="checking if subscriptions need update" id=oD52S namespace=openshift-ovn-kubernetes 2025-12-08T17:47:25.115930993+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=UfECE 2025-12-08T17:47:25.115930993+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=UfECE 2025-12-08T17:47:25.511660680+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="No subscriptions were found in namespace openshift-ovirt-infra" id=Gk/0o namespace=openshift-ovirt-infra 2025-12-08T17:47:25.511660680+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="resolving sources" id=4IzY2 namespace=openshift-route-controller-manager 2025-12-08T17:47:25.511660680+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="checking if subscriptions need update" id=4IzY2 namespace=openshift-route-controller-manager 2025-12-08T17:47:25.710724887+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="No subscriptions were found in namespace openshift-ovn-kubernetes" id=oD52S namespace=openshift-ovn-kubernetes 2025-12-08T17:47:25.710822600+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="resolving sources" id=RxEs9 namespace=openshift-service-ca 2025-12-08T17:47:25.710934663+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="checking if subscriptions need update" id=RxEs9 namespace=openshift-service-ca 2025-12-08T17:47:25.922496433+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cSbcU 2025-12-08T17:47:25.922653778+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cSbcU 2025-12-08T17:47:25.923374161+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="No subscriptions were found in namespace openshift-route-controller-manager" id=4IzY2 namespace=openshift-route-controller-manager 2025-12-08T17:47:25.923434353+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="resolving sources" id=5ZVQi namespace=openshift-service-ca-operator 2025-12-08T17:47:25.923456693+00:00 stderr F time="2025-12-08T17:47:25Z" level=info msg="checking if subscriptions need update" id=5ZVQi namespace=openshift-service-ca-operator 2025-12-08T17:47:26.119099012+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="No subscriptions were found in namespace openshift-service-ca" id=RxEs9 namespace=openshift-service-ca 2025-12-08T17:47:26.119099012+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="resolving sources" id=K84CS namespace=openshift-user-workload-monitoring 2025-12-08T17:47:26.119099012+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="checking if subscriptions need update" id=K84CS namespace=openshift-user-workload-monitoring 2025-12-08T17:47:26.119099012+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=UfECE 2025-12-08T17:47:26.119099012+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=UfECE 2025-12-08T17:47:26.311392116+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="No subscriptions were found in namespace openshift-service-ca-operator" id=5ZVQi namespace=openshift-service-ca-operator 2025-12-08T17:47:26.311392116+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="resolving sources" id=w7eUi namespace=openshift-vsphere-infra 2025-12-08T17:47:26.311392116+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="checking if subscriptions need update" id=w7eUi namespace=openshift-vsphere-infra 2025-12-08T17:47:26.511542926+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="No subscriptions were found in namespace openshift-user-workload-monitoring" id=K84CS namespace=openshift-user-workload-monitoring 2025-12-08T17:47:26.710982744+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="No subscriptions were found in namespace openshift-vsphere-infra" id=w7eUi namespace=openshift-vsphere-infra 2025-12-08T17:47:26.916918007+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=iPGOS 2025-12-08T17:47:26.916918007+00:00 stderr F time="2025-12-08T17:47:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=iPGOS 2025-12-08T17:47:27.115276200+00:00 stderr F time="2025-12-08T17:47:27Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cSbcU 2025-12-08T17:47:27.115276200+00:00 stderr F time="2025-12-08T17:47:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cSbcU 2025-12-08T17:47:27.715622949+00:00 stderr F time="2025-12-08T17:47:27Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=iPGOS 2025-12-08T17:47:27.715622949+00:00 stderr F time="2025-12-08T17:47:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=iPGOS 2025-12-08T17:47:30.636577658+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Z0OHL 2025-12-08T17:47:30.636577658+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Z0OHL 2025-12-08T17:47:30.636577658+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=qAdo5 2025-12-08T17:47:30.636577658+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=qAdo5 2025-12-08T17:47:30.647775810+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Z0OHL 2025-12-08T17:47:30.647775810+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Z0OHL 2025-12-08T17:47:30.648622327+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=qAdo5 2025-12-08T17:47:30.648622327+00:00 stderr F time="2025-12-08T17:47:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=qAdo5 2025-12-08T17:47:31.226600391+00:00 stderr F time="2025-12-08T17:47:31Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=g271V 2025-12-08T17:47:31.226679094+00:00 stderr F time="2025-12-08T17:47:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=g271V 2025-12-08T17:47:31.428220318+00:00 stderr F time="2025-12-08T17:47:31Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Nkrlz 2025-12-08T17:47:31.428220318+00:00 stderr F time="2025-12-08T17:47:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Nkrlz 2025-12-08T17:47:32.428703582+00:00 stderr F time="2025-12-08T17:47:32Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=g271V 2025-12-08T17:47:32.428703582+00:00 stderr F time="2025-12-08T17:47:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=g271V 2025-12-08T17:47:32.628932106+00:00 stderr F time="2025-12-08T17:47:32Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Nkrlz 2025-12-08T17:47:32.628988578+00:00 stderr F time="2025-12-08T17:47:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=Nkrlz 2025-12-08T17:47:49.978478912+00:00 stderr F time="2025-12-08T17:47:49Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=i1NiK 2025-12-08T17:47:49.978478912+00:00 stderr F time="2025-12-08T17:47:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=i1NiK 2025-12-08T17:47:49.978580855+00:00 stderr F time="2025-12-08T17:47:49Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cxf5/ 2025-12-08T17:47:49.978580855+00:00 stderr F time="2025-12-08T17:47:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cxf5/ 2025-12-08T17:47:49.989002743+00:00 stderr F time="2025-12-08T17:47:49Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=i1NiK 2025-12-08T17:47:49.989002743+00:00 stderr F time="2025-12-08T17:47:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=i1NiK 2025-12-08T17:47:50.170131385+00:00 stderr F time="2025-12-08T17:47:50Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cxf5/ 2025-12-08T17:47:50.170131385+00:00 stderr F time="2025-12-08T17:47:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=cxf5/ 2025-12-08T17:47:50.769540103+00:00 stderr F time="2025-12-08T17:47:50Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=KFMMO 2025-12-08T17:47:50.769540103+00:00 stderr F time="2025-12-08T17:47:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=KFMMO 2025-12-08T17:47:50.970611653+00:00 stderr F time="2025-12-08T17:47:50Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=is9NX 2025-12-08T17:47:50.970611653+00:00 stderr F time="2025-12-08T17:47:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=is9NX 2025-12-08T17:47:51.969776366+00:00 stderr F time="2025-12-08T17:47:51Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=KFMMO 2025-12-08T17:47:51.969776366+00:00 stderr F time="2025-12-08T17:47:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=KFMMO 2025-12-08T17:47:52.170691410+00:00 stderr F time="2025-12-08T17:47:52Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=is9NX 2025-12-08T17:47:52.170691410+00:00 stderr F time="2025-12-08T17:47:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=is9NX 2025-12-08T17:47:54.096208107+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Svrao 2025-12-08T17:47:54.096208107+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Svrao 2025-12-08T17:47:54.096480355+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=T6OrV 2025-12-08T17:47:54.096480355+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=T6OrV 2025-12-08T17:47:54.169166655+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=T6OrV 2025-12-08T17:47:54.169166655+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=T6OrV 2025-12-08T17:47:54.369532546+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Svrao 2025-12-08T17:47:54.369532546+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=Svrao 2025-12-08T17:47:54.969860470+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=ivQc+ 2025-12-08T17:47:54.969860470+00:00 stderr F time="2025-12-08T17:47:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=ivQc+ 2025-12-08T17:47:55.170428318+00:00 stderr F time="2025-12-08T17:47:55Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=579kB 2025-12-08T17:47:55.170428318+00:00 stderr F time="2025-12-08T17:47:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=579kB 2025-12-08T17:47:56.169078583+00:00 stderr F time="2025-12-08T17:47:56Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=ivQc+ 2025-12-08T17:47:56.169078583+00:00 stderr F time="2025-12-08T17:47:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=ivQc+ 2025-12-08T17:47:56.369430464+00:00 stderr F time="2025-12-08T17:47:56Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=579kB 2025-12-08T17:47:56.369430464+00:00 stderr F time="2025-12-08T17:47:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=579kB 2025-12-08T17:47:58.498313245+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=7nSHg 2025-12-08T17:47:58.498313245+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=7nSHg 2025-12-08T17:47:58.499992556+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fAu7Q 2025-12-08T17:47:58.499992556+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fAu7Q 2025-12-08T17:47:58.506110151+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=7nSHg 2025-12-08T17:47:58.506110151+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=7nSHg 2025-12-08T17:47:58.695010205+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fAu7Q 2025-12-08T17:47:58.695010205+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=fAu7Q 2025-12-08T17:47:59.297521825+00:00 stderr F time="2025-12-08T17:47:59Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=HfNMW 2025-12-08T17:47:59.297600317+00:00 stderr F time="2025-12-08T17:47:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=HfNMW 2025-12-08T17:47:59.494497015+00:00 stderr F time="2025-12-08T17:47:59Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gVzJr 2025-12-08T17:47:59.494497015+00:00 stderr F time="2025-12-08T17:47:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gVzJr 2025-12-08T17:48:00.694170022+00:00 stderr F time="2025-12-08T17:48:00Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=HfNMW 2025-12-08T17:48:00.694170022+00:00 stderr F time="2025-12-08T17:48:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=HfNMW 2025-12-08T17:48:00.894941456+00:00 stderr F time="2025-12-08T17:48:00Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gVzJr 2025-12-08T17:48:00.894941456+00:00 stderr F time="2025-12-08T17:48:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-zfv6j current-pod.namespace=openshift-marketplace id=gVzJr 2025-12-08T17:48:09.342935745+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=m7bnv 2025-12-08T17:48:09.342935745+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=m7bnv 2025-12-08T17:48:09.353106913+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=q61wP 2025-12-08T17:48:09.353106913+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=q61wP 2025-12-08T17:48:09.353624988+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=m7bnv 2025-12-08T17:48:09.353624988+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=m7bnv 2025-12-08T17:48:09.358416813+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=IDLE" 2025-12-08T17:48:09.364250870+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=CONNECTING" 2025-12-08T17:48:09.378030937+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=IDLE" 2025-12-08T17:48:09.378030937+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=CONNECTING" 2025-12-08T17:48:09.380423709+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:48:09.399949440+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=IDLE" 2025-12-08T17:48:09.399949440+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=CONNECTING" 2025-12-08T17:48:09.401329581+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:48:09.408617892+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=TRANSIENT_FAILURE" 2025-12-08T17:48:09.419225253+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=IDLE" 2025-12-08T17:48:09.419225253+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=CONNECTING" 2025-12-08T17:48:09.419225253+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:48:09.542268465+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=OY1Eh 2025-12-08T17:48:09.542268465+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-lxwl6 current-pod.namespace=openshift-marketplace id=OY1Eh 2025-12-08T17:48:09.739139803+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=q61wP 2025-12-08T17:48:09.739139803+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-r22jf current-pod.namespace=openshift-marketplace id=q61wP 2025-12-08T17:48:10.542084386+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=4M/9Q 2025-12-08T17:48:10.542084386+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-rvglb current-pod.namespace=openshift-marketplace id=4M/9Q 2025-12-08T17:48:10.739548690+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=OY1Eh 2025-12-08T17:48:10.739548690+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=OY1Eh 2025-12-08T17:48:10.739606532+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="creating desired pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=OY1Eh pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:48:11.149830623+00:00 stderr F I1208 17:48:11.148807 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:48:11.739402072+00:00 stderr F time="2025-12-08T17:48:11Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=4M/9Q 2025-12-08T17:48:11.739402072+00:00 stderr F time="2025-12-08T17:48:11Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=4M/9Q 2025-12-08T17:48:11.739447943+00:00 stderr F time="2025-12-08T17:48:11Z" level=info msg="creating desired pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace id=4M/9Q pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:48:11.940066862+00:00 stderr F time="2025-12-08T17:48:11Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=LncKd 2025-12-08T17:48:11.940066862+00:00 stderr F time="2025-12-08T17:48:11Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=LncKd isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:48:12.145448567+00:00 stderr F I1208 17:48:12.145398 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:48:12.940134230+00:00 stderr F time="2025-12-08T17:48:12Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=1bLyS 2025-12-08T17:48:12.940134230+00:00 stderr F time="2025-12-08T17:48:12Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=1bLyS isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:48:13.139949356+00:00 stderr F time="2025-12-08T17:48:13Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=LncKd 2025-12-08T17:48:13.139949356+00:00 stderr F time="2025-12-08T17:48:13Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=LncKd 2025-12-08T17:48:13.140023428+00:00 stderr F time="2025-12-08T17:48:13Z" level=info msg="creating desired pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=LncKd pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:48:13.546390153+00:00 stderr F I1208 17:48:13.546230 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:48:14.139288641+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=1bLyS 2025-12-08T17:48:14.139288641+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=1bLyS 2025-12-08T17:48:14.139341452+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="creating desired pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=1bLyS pod.name= pod.namespace=openshift-marketplace 2025-12-08T17:48:14.340804288+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=S4JN8 2025-12-08T17:48:14.340804288+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=S4JN8 2025-12-08T17:48:14.543041857+00:00 stderr F I1208 17:48:14.542978 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:48:15.340182255+00:00 stderr F time="2025-12-08T17:48:15Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=iz0un 2025-12-08T17:48:15.340182255+00:00 stderr F time="2025-12-08T17:48:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=iz0un 2025-12-08T17:48:15.539576777+00:00 stderr F time="2025-12-08T17:48:15Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=S4JN8 2025-12-08T17:48:15.539576777+00:00 stderr F time="2025-12-08T17:48:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=S4JN8 2025-12-08T17:48:16.339377556+00:00 stderr F time="2025-12-08T17:48:16Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KGRvO 2025-12-08T17:48:16.339377556+00:00 stderr F time="2025-12-08T17:48:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KGRvO 2025-12-08T17:48:16.540986336+00:00 stderr F time="2025-12-08T17:48:16Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=iz0un 2025-12-08T17:48:16.540986336+00:00 stderr F time="2025-12-08T17:48:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=iz0un 2025-12-08T17:48:17.341632830+00:00 stderr F time="2025-12-08T17:48:17Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=WURfz 2025-12-08T17:48:17.341632830+00:00 stderr F time="2025-12-08T17:48:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=WURfz 2025-12-08T17:48:17.540926210+00:00 stderr F time="2025-12-08T17:48:17Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KGRvO 2025-12-08T17:48:17.540926210+00:00 stderr F time="2025-12-08T17:48:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KGRvO 2025-12-08T17:48:18.339709627+00:00 stderr F time="2025-12-08T17:48:18Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IyE+d 2025-12-08T17:48:18.339790820+00:00 stderr F time="2025-12-08T17:48:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IyE+d 2025-12-08T17:48:18.543850054+00:00 stderr F time="2025-12-08T17:48:18Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=WURfz 2025-12-08T17:48:18.543850054+00:00 stderr F time="2025-12-08T17:48:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=WURfz 2025-12-08T17:48:19.338919758+00:00 stderr F time="2025-12-08T17:48:19Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=aNHBc 2025-12-08T17:48:19.338919758+00:00 stderr F time="2025-12-08T17:48:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=aNHBc 2025-12-08T17:48:19.540084425+00:00 stderr F time="2025-12-08T17:48:19Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IyE+d 2025-12-08T17:48:19.540084425+00:00 stderr F time="2025-12-08T17:48:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IyE+d 2025-12-08T17:48:20.339928045+00:00 stderr F time="2025-12-08T17:48:20Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=JPeqD 2025-12-08T17:48:20.339928045+00:00 stderr F time="2025-12-08T17:48:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=JPeqD 2025-12-08T17:48:20.539022219+00:00 stderr F time="2025-12-08T17:48:20Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=aNHBc 2025-12-08T17:48:20.539022219+00:00 stderr F time="2025-12-08T17:48:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=aNHBc 2025-12-08T17:48:21.341100366+00:00 stderr F time="2025-12-08T17:48:21Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=pgq7b 2025-12-08T17:48:21.341100366+00:00 stderr F time="2025-12-08T17:48:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=pgq7b 2025-12-08T17:48:21.539581931+00:00 stderr F time="2025-12-08T17:48:21Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=JPeqD 2025-12-08T17:48:21.539581931+00:00 stderr F time="2025-12-08T17:48:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=JPeqD 2025-12-08T17:48:22.340060070+00:00 stderr F time="2025-12-08T17:48:22Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=7Ro6X 2025-12-08T17:48:22.340060070+00:00 stderr F time="2025-12-08T17:48:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=7Ro6X 2025-12-08T17:48:22.540767702+00:00 stderr F time="2025-12-08T17:48:22Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=pgq7b 2025-12-08T17:48:22.540767702+00:00 stderr F time="2025-12-08T17:48:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=pgq7b 2025-12-08T17:48:23.338926791+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2LC/m 2025-12-08T17:48:23.338926791+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2LC/m 2025-12-08T17:48:23.539522821+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=7Ro6X 2025-12-08T17:48:23.539522821+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=7Ro6X 2025-12-08T17:48:23.812185460+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-marketplace state.State=READY" 2025-12-08T17:48:23.812185460+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="resolving sources" id=JCNmq namespace=openshift-marketplace 2025-12-08T17:48:23.812185460+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="checking if subscriptions need update" id=JCNmq namespace=openshift-marketplace 2025-12-08T17:48:23.815194991+00:00 stderr F time="2025-12-08T17:48:23Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=JCNmq namespace=openshift-marketplace 2025-12-08T17:48:24.340108703+00:00 stderr F time="2025-12-08T17:48:24Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=HAN7Q 2025-12-08T17:48:24.340108703+00:00 stderr F time="2025-12-08T17:48:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=HAN7Q 2025-12-08T17:48:24.539801825+00:00 stderr F time="2025-12-08T17:48:24Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2LC/m 2025-12-08T17:48:24.539801825+00:00 stderr F time="2025-12-08T17:48:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2LC/m 2025-12-08T17:48:25.324680101+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=certified-operators state.State=READY" 2025-12-08T17:48:25.324731913+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="resolving sources" id=tzyvh namespace=openshift-marketplace 2025-12-08T17:48:25.324731913+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="checking if subscriptions need update" id=tzyvh namespace=openshift-marketplace 2025-12-08T17:48:25.328261209+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=tzyvh namespace=openshift-marketplace 2025-12-08T17:48:25.342910223+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=817fB 2025-12-08T17:48:25.342910223+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=817fB 2025-12-08T17:48:25.489375355+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=redhat-operators state.State=READY" 2025-12-08T17:48:25.489417426+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="resolving sources" id=1rU/X namespace=openshift-marketplace 2025-12-08T17:48:25.489417426+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="checking if subscriptions need update" id=1rU/X namespace=openshift-marketplace 2025-12-08T17:48:25.491982663+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=1rU/X namespace=openshift-marketplace 2025-12-08T17:48:25.539097729+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=HAN7Q 2025-12-08T17:48:25.539163881+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=HAN7Q 2025-12-08T17:48:26.339481715+00:00 stderr F time="2025-12-08T17:48:26Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2q3pd 2025-12-08T17:48:26.339570697+00:00 stderr F time="2025-12-08T17:48:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2q3pd 2025-12-08T17:48:26.538699003+00:00 stderr F time="2025-12-08T17:48:26Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=817fB 2025-12-08T17:48:26.538823247+00:00 stderr F time="2025-12-08T17:48:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=817fB 2025-12-08T17:48:27.339530822+00:00 stderr F time="2025-12-08T17:48:27Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=eBGvy 2025-12-08T17:48:27.339608164+00:00 stderr F time="2025-12-08T17:48:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=eBGvy 2025-12-08T17:48:27.539156532+00:00 stderr F time="2025-12-08T17:48:27Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2q3pd 2025-12-08T17:48:27.539245465+00:00 stderr F time="2025-12-08T17:48:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=2q3pd 2025-12-08T17:48:28.339976202+00:00 stderr F time="2025-12-08T17:48:28Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=moxsR 2025-12-08T17:48:28.340036203+00:00 stderr F time="2025-12-08T17:48:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=moxsR 2025-12-08T17:48:28.540350884+00:00 stderr F time="2025-12-08T17:48:28Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=eBGvy 2025-12-08T17:48:28.540427696+00:00 stderr F time="2025-12-08T17:48:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=eBGvy 2025-12-08T17:48:29.340110821+00:00 stderr F time="2025-12-08T17:48:29Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=cioJ0 2025-12-08T17:48:29.340192554+00:00 stderr F time="2025-12-08T17:48:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=cioJ0 2025-12-08T17:48:29.540383130+00:00 stderr F time="2025-12-08T17:48:29Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=moxsR 2025-12-08T17:48:29.540383130+00:00 stderr F time="2025-12-08T17:48:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=moxsR 2025-12-08T17:48:30.339673103+00:00 stderr F time="2025-12-08T17:48:30Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=OKf42 2025-12-08T17:48:30.339673103+00:00 stderr F time="2025-12-08T17:48:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=OKf42 2025-12-08T17:48:30.540157749+00:00 stderr F time="2025-12-08T17:48:30Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=cioJ0 2025-12-08T17:48:30.540157749+00:00 stderr F time="2025-12-08T17:48:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=cioJ0 2025-12-08T17:48:31.339420331+00:00 stderr F time="2025-12-08T17:48:31Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZQHFL 2025-12-08T17:48:31.339420331+00:00 stderr F time="2025-12-08T17:48:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZQHFL 2025-12-08T17:48:31.539420093+00:00 stderr F time="2025-12-08T17:48:31Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=OKf42 2025-12-08T17:48:31.539420093+00:00 stderr F time="2025-12-08T17:48:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=OKf42 2025-12-08T17:48:32.340390507+00:00 stderr F time="2025-12-08T17:48:32Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=eGaH4 2025-12-08T17:48:32.340390507+00:00 stderr F time="2025-12-08T17:48:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=eGaH4 2025-12-08T17:48:32.539599114+00:00 stderr F time="2025-12-08T17:48:32Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZQHFL 2025-12-08T17:48:32.539599114+00:00 stderr F time="2025-12-08T17:48:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZQHFL 2025-12-08T17:48:33.140215895+00:00 stderr F time="2025-12-08T17:48:33Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=eGaH4 2025-12-08T17:48:33.140215895+00:00 stderr F time="2025-12-08T17:48:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=eGaH4 2025-12-08T17:48:36.582007839+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="state.Key.Namespace=openshift-marketplace state.Key.Name=community-operators state.State=READY" 2025-12-08T17:48:36.582065291+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="resolving sources" id=V6Ups namespace=openshift-marketplace 2025-12-08T17:48:36.582065291+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="checking if subscriptions need update" id=V6Ups namespace=openshift-marketplace 2025-12-08T17:48:36.584785643+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=V6Ups namespace=openshift-marketplace 2025-12-08T17:48:36.586663989+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=lXAFw 2025-12-08T17:48:36.586663989+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=lXAFw 2025-12-08T17:48:36.595235079+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=lXAFw 2025-12-08T17:48:36.595235079+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=lXAFw 2025-12-08T17:48:36.608517461+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZxAxF 2025-12-08T17:48:36.608517461+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZxAxF 2025-12-08T17:48:36.615953756+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZxAxF 2025-12-08T17:48:36.616021488+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ZxAxF 2025-12-08T17:53:18.660374889+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="resolving sources" id=ynVxA namespace=openstack 2025-12-08T17:53:18.660374889+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="checking if subscriptions need update" id=ynVxA namespace=openstack 2025-12-08T17:53:18.669228660+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="No subscriptions were found in namespace openstack" id=ynVxA namespace=openstack 2025-12-08T17:53:18.677273869+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="resolving sources" id=/SauP namespace=openstack 2025-12-08T17:53:18.677273869+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="checking if subscriptions need update" id=/SauP namespace=openstack 2025-12-08T17:53:18.761119258+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="No subscriptions were found in namespace openstack" id=/SauP namespace=openstack 2025-12-08T17:53:18.761119258+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="resolving sources" id=PZ56z namespace=openstack 2025-12-08T17:53:18.761119258+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="checking if subscriptions need update" id=PZ56z namespace=openstack 2025-12-08T17:53:18.767940094+00:00 stderr F time="2025-12-08T17:53:18Z" level=info msg="No subscriptions were found in namespace openstack" id=PZ56z namespace=openstack 2025-12-08T17:53:19.342747613+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="resolving sources" id=qzYfZ namespace=openstack-operators 2025-12-08T17:53:19.342747613+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="checking if subscriptions need update" id=qzYfZ namespace=openstack-operators 2025-12-08T17:53:19.349335052+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="No subscriptions were found in namespace openstack-operators" id=qzYfZ namespace=openstack-operators 2025-12-08T17:53:19.390251615+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="resolving sources" id=GE248 namespace=openstack-operators 2025-12-08T17:53:19.390251615+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="checking if subscriptions need update" id=GE248 namespace=openstack-operators 2025-12-08T17:53:19.448682804+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="No subscriptions were found in namespace openstack-operators" id=GE248 namespace=openstack-operators 2025-12-08T17:53:19.449087765+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="resolving sources" id=FQw7n namespace=openstack-operators 2025-12-08T17:53:19.449087765+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="checking if subscriptions need update" id=FQw7n namespace=openstack-operators 2025-12-08T17:53:19.452108177+00:00 stderr F time="2025-12-08T17:53:19Z" level=info msg="No subscriptions were found in namespace openstack-operators" id=FQw7n namespace=openstack-operators 2025-12-08T17:54:14.397472340+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=MDmEC 2025-12-08T17:54:14.397472340+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=MDmEC 2025-12-08T17:54:14.408291434+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=MDmEC 2025-12-08T17:54:14.408291434+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=MDmEC 2025-12-08T17:54:14.421135444+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0LqNl 2025-12-08T17:54:14.421135444+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0LqNl 2025-12-08T17:54:14.430284392+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0LqNl 2025-12-08T17:54:14.430284392+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0LqNl 2025-12-08T17:54:15.616094564+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=4JHEs 2025-12-08T17:54:15.616094564+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=4JHEs 2025-12-08T17:54:15.625314626+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=4JHEs 2025-12-08T17:54:15.625314626+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=4JHEs 2025-12-08T17:54:15.794512236+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=Xq0cf 2025-12-08T17:54:15.794512236+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=Xq0cf 2025-12-08T17:54:16.393858012+00:00 stderr F time="2025-12-08T17:54:16Z" level=info msg="evaluating current pod" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=Xq0cf 2025-12-08T17:54:16.393858012+00:00 stderr F time="2025-12-08T17:54:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-marketplace catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-marketplace-xp5vr current-pod.namespace=openshift-marketplace id=Xq0cf 2025-12-08T17:54:28.303950075+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="resolving sources" id=Zksfw namespace=service-telemetry 2025-12-08T17:54:28.303950075+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="checking if subscriptions need update" id=Zksfw namespace=service-telemetry 2025-12-08T17:54:28.314986842+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="No subscriptions were found in namespace service-telemetry" id=Zksfw namespace=service-telemetry 2025-12-08T17:54:28.318129627+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="resolving sources" id=akFye namespace=service-telemetry 2025-12-08T17:54:28.318129627+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="checking if subscriptions need update" id=akFye namespace=service-telemetry 2025-12-08T17:54:28.399128256+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=M/Ihf 2025-12-08T17:54:28.399128256+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=M/Ihf 2025-12-08T17:54:28.408385285+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=M/Ihf 2025-12-08T17:54:28.408385285+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=M/Ihf 2025-12-08T17:54:28.408446677+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="catalog update required at 2025-12-08 17:54:28.408402336 +0000 UTC m=+608.370836818" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=M/Ihf 2025-12-08T17:54:28.409730071+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="No subscriptions were found in namespace service-telemetry" id=akFye namespace=service-telemetry 2025-12-08T17:54:28.409730071+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="resolving sources" id=HmKsB namespace=service-telemetry 2025-12-08T17:54:28.409730071+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="checking if subscriptions need update" id=HmKsB namespace=service-telemetry 2025-12-08T17:54:28.411630662+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="No subscriptions were found in namespace service-telemetry" id=HmKsB namespace=service-telemetry 2025-12-08T17:54:28.419865444+00:00 stderr F I1208 17:54:28.419819 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:54:28.420054049+00:00 stderr F time="2025-12-08T17:54:28Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=M/Ihf 2025-12-08T17:54:28.420054049+00:00 stderr F time="2025-12-08T17:54:28Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=M/Ihf 2025-12-08T17:54:28.420068059+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=M/Ihf 2025-12-08T17:54:28.436597074+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=gbOGT 2025-12-08T17:54:28.436597074+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=gbOGT 2025-12-08T17:54:28.602546910+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=gbOGT 2025-12-08T17:54:28.602546910+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=gbOGT 2025-12-08T17:54:28.602546910+00:00 stderr F time="2025-12-08T17:54:28Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=gbOGT 2025-12-08T17:54:28.602546910+00:00 stderr F time="2025-12-08T17:54:28Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=gbOGT 2025-12-08T17:54:28.602546910+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=gbOGT 2025-12-08T17:54:28.604496463+00:00 stderr F time="2025-12-08T17:54:28Z" level=error msg="UpdateStatus - error while setting CatalogSource status" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="Operation cannot be fulfilled on catalogsources.operators.coreos.com \"certified-operators\": the object has been modified; please apply your changes to the latest version and try again" id=gbOGT 2025-12-08T17:54:28.604599595+00:00 stderr F E1208 17:54:28.604584 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-marketplace/certified-operators\" failed: Operation cannot be fulfilled on catalogsources.operators.coreos.com \"certified-operators\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:54:28.998187647+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=pPUtd 2025-12-08T17:54:28.998245338+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=pPUtd 2025-12-08T17:54:29.597626009+00:00 stderr F time="2025-12-08T17:54:29Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=pPUtd 2025-12-08T17:54:29.597742942+00:00 stderr F time="2025-12-08T17:54:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=pPUtd 2025-12-08T17:54:29.598014460+00:00 stderr F time="2025-12-08T17:54:29Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=pPUtd 2025-12-08T17:54:29.598124943+00:00 stderr F time="2025-12-08T17:54:29Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=pPUtd 2025-12-08T17:54:29.598202225+00:00 stderr F time="2025-12-08T17:54:29Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=pPUtd 2025-12-08T17:54:29.997900381+00:00 stderr F time="2025-12-08T17:54:29Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yPP8K 2025-12-08T17:54:29.997900381+00:00 stderr F time="2025-12-08T17:54:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yPP8K 2025-12-08T17:54:30.599622244+00:00 stderr F time="2025-12-08T17:54:30Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yPP8K 2025-12-08T17:54:30.599699756+00:00 stderr F time="2025-12-08T17:54:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yPP8K 2025-12-08T17:54:30.599820779+00:00 stderr F time="2025-12-08T17:54:30Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=yPP8K 2025-12-08T17:54:30.599849810+00:00 stderr F time="2025-12-08T17:54:30Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=yPP8K 2025-12-08T17:54:30.599904761+00:00 stderr F time="2025-12-08T17:54:30Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=yPP8K 2025-12-08T17:54:30.999567287+00:00 stderr F time="2025-12-08T17:54:30Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yVLid 2025-12-08T17:54:30.999567287+00:00 stderr F time="2025-12-08T17:54:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yVLid 2025-12-08T17:54:31.597272511+00:00 stderr F time="2025-12-08T17:54:31Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yVLid 2025-12-08T17:54:31.597272511+00:00 stderr F time="2025-12-08T17:54:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=yVLid 2025-12-08T17:54:31.597322133+00:00 stderr F time="2025-12-08T17:54:31Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=yVLid 2025-12-08T17:54:31.597333053+00:00 stderr F time="2025-12-08T17:54:31Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=yVLid 2025-12-08T17:54:31.597333053+00:00 stderr F time="2025-12-08T17:54:31Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=yVLid 2025-12-08T17:54:31.997805821+00:00 stderr F time="2025-12-08T17:54:31Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=OehyQ 2025-12-08T17:54:31.997805821+00:00 stderr F time="2025-12-08T17:54:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=OehyQ 2025-12-08T17:54:32.601922359+00:00 stderr F time="2025-12-08T17:54:32Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=OehyQ 2025-12-08T17:54:32.601922359+00:00 stderr F time="2025-12-08T17:54:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=OehyQ 2025-12-08T17:54:32.601922359+00:00 stderr F time="2025-12-08T17:54:32Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=OehyQ 2025-12-08T17:54:32.601922359+00:00 stderr F time="2025-12-08T17:54:32Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=OehyQ 2025-12-08T17:54:32.601922359+00:00 stderr F time="2025-12-08T17:54:32Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=OehyQ 2025-12-08T17:54:32.996798035+00:00 stderr F time="2025-12-08T17:54:32Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=u+b1z 2025-12-08T17:54:32.996798035+00:00 stderr F time="2025-12-08T17:54:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=u+b1z 2025-12-08T17:54:33.598699683+00:00 stderr F time="2025-12-08T17:54:33Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=u+b1z 2025-12-08T17:54:33.598699683+00:00 stderr F time="2025-12-08T17:54:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=u+b1z 2025-12-08T17:54:33.598699683+00:00 stderr F time="2025-12-08T17:54:33Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=u+b1z 2025-12-08T17:54:33.598699683+00:00 stderr F time="2025-12-08T17:54:33Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=u+b1z 2025-12-08T17:54:33.598699683+00:00 stderr F time="2025-12-08T17:54:33Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=u+b1z 2025-12-08T17:54:33.998015209+00:00 stderr F time="2025-12-08T17:54:33Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=uJGBR 2025-12-08T17:54:33.998015209+00:00 stderr F time="2025-12-08T17:54:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=uJGBR 2025-12-08T17:54:34.601237952+00:00 stderr F time="2025-12-08T17:54:34Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=uJGBR 2025-12-08T17:54:34.601237952+00:00 stderr F time="2025-12-08T17:54:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=uJGBR 2025-12-08T17:54:34.601237952+00:00 stderr F time="2025-12-08T17:54:34Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=uJGBR 2025-12-08T17:54:34.601237952+00:00 stderr F time="2025-12-08T17:54:34Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=uJGBR 2025-12-08T17:54:34.601237952+00:00 stderr F time="2025-12-08T17:54:34Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=uJGBR 2025-12-08T17:54:38.888074684+00:00 stderr F time="2025-12-08T17:54:38Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rwyye 2025-12-08T17:54:38.888074684+00:00 stderr F time="2025-12-08T17:54:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rwyye 2025-12-08T17:54:38.896524541+00:00 stderr F time="2025-12-08T17:54:38Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rwyye 2025-12-08T17:54:38.896524541+00:00 stderr F time="2025-12-08T17:54:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rwyye 2025-12-08T17:54:38.896609543+00:00 stderr F time="2025-12-08T17:54:38Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=Rwyye 2025-12-08T17:54:38.896609543+00:00 stderr F time="2025-12-08T17:54:38Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-tkpnz has not yet reported ready" id=Rwyye 2025-12-08T17:54:38.896609543+00:00 stderr F time="2025-12-08T17:54:38Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=Rwyye 2025-12-08T17:54:39.212329920+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Gvru5 2025-12-08T17:54:39.212329920+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Gvru5 2025-12-08T17:54:39.220482098+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Gvru5 2025-12-08T17:54:39.220482098+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Gvru5 2025-12-08T17:54:39.484644538+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3USHs 2025-12-08T17:54:39.484644538+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3USHs 2025-12-08T17:54:40.084650764+00:00 stderr F time="2025-12-08T17:54:40Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3USHs 2025-12-08T17:54:40.084650764+00:00 stderr F time="2025-12-08T17:54:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3USHs 2025-12-08T17:54:40.685462892+00:00 stderr F time="2025-12-08T17:54:40Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=IZUel 2025-12-08T17:54:40.685462892+00:00 stderr F time="2025-12-08T17:54:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=IZUel 2025-12-08T17:54:41.285447158+00:00 stderr F time="2025-12-08T17:54:41Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=IZUel 2025-12-08T17:54:41.285447158+00:00 stderr F time="2025-12-08T17:54:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=IZUel 2025-12-08T17:54:41.286142407+00:00 stderr F time="2025-12-08T17:54:41Z" level=info msg="catalog update required at 2025-12-08 17:54:41.285543271 +0000 UTC m=+621.247977783" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=IZUel 2025-12-08T17:54:41.490232419+00:00 stderr F I1208 17:54:41.489542 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:54:41.490232419+00:00 stderr F time="2025-12-08T17:54:41Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=IZUel 2025-12-08T17:54:41.490232419+00:00 stderr F time="2025-12-08T17:54:41Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=IZUel 2025-12-08T17:54:41.490232419+00:00 stderr F time="2025-12-08T17:54:41Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=IZUel 2025-12-08T17:54:42.083439873+00:00 stderr F time="2025-12-08T17:54:42Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=YCRzu 2025-12-08T17:54:42.083439873+00:00 stderr F time="2025-12-08T17:54:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=YCRzu 2025-12-08T17:54:42.285922402+00:00 stderr F time="2025-12-08T17:54:42Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lydET 2025-12-08T17:54:42.285922402+00:00 stderr F time="2025-12-08T17:54:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lydET 2025-12-08T17:54:43.284394192+00:00 stderr F time="2025-12-08T17:54:43Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=YCRzu 2025-12-08T17:54:43.284394192+00:00 stderr F time="2025-12-08T17:54:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=YCRzu 2025-12-08T17:54:43.284488235+00:00 stderr F time="2025-12-08T17:54:43Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=YCRzu 2025-12-08T17:54:43.284488235+00:00 stderr F time="2025-12-08T17:54:43Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=YCRzu 2025-12-08T17:54:43.284501165+00:00 stderr F time="2025-12-08T17:54:43Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=YCRzu 2025-12-08T17:54:43.484393695+00:00 stderr F time="2025-12-08T17:54:43Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lydET 2025-12-08T17:54:43.484393695+00:00 stderr F time="2025-12-08T17:54:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lydET 2025-12-08T17:54:44.083849176+00:00 stderr F time="2025-12-08T17:54:44Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=mq36p 2025-12-08T17:54:44.083849176+00:00 stderr F time="2025-12-08T17:54:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=mq36p 2025-12-08T17:54:44.283944201+00:00 stderr F time="2025-12-08T17:54:44Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=VYrt+ 2025-12-08T17:54:44.283944201+00:00 stderr F time="2025-12-08T17:54:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=VYrt+ 2025-12-08T17:54:45.284513507+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=mq36p 2025-12-08T17:54:45.284513507+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=mq36p 2025-12-08T17:54:45.284635800+00:00 stderr F time="2025-12-08T17:54:45Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=mq36p 2025-12-08T17:54:45.284669531+00:00 stderr F time="2025-12-08T17:54:45Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=mq36p 2025-12-08T17:54:45.284711762+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=mq36p 2025-12-08T17:54:45.482691871+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=VYrt+ 2025-12-08T17:54:45.482691871+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=VYrt+ 2025-12-08T17:54:45.883133927+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Dvvh9 2025-12-08T17:54:45.883133927+00:00 stderr F time="2025-12-08T17:54:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Dvvh9 2025-12-08T17:54:46.485731074+00:00 stderr F time="2025-12-08T17:54:46Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Dvvh9 2025-12-08T17:54:46.485935159+00:00 stderr F time="2025-12-08T17:54:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Dvvh9 2025-12-08T17:54:46.486149055+00:00 stderr F time="2025-12-08T17:54:46Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=Dvvh9 2025-12-08T17:54:46.486215846+00:00 stderr F time="2025-12-08T17:54:46Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=Dvvh9 2025-12-08T17:54:46.486270748+00:00 stderr F time="2025-12-08T17:54:46Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=Dvvh9 2025-12-08T17:54:46.885139961+00:00 stderr F time="2025-12-08T17:54:46Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BAyo+ 2025-12-08T17:54:46.885139961+00:00 stderr F time="2025-12-08T17:54:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BAyo+ 2025-12-08T17:54:47.485300253+00:00 stderr F time="2025-12-08T17:54:47Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BAyo+ 2025-12-08T17:54:47.485399606+00:00 stderr F time="2025-12-08T17:54:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BAyo+ 2025-12-08T17:54:47.485516229+00:00 stderr F time="2025-12-08T17:54:47Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=BAyo+ 2025-12-08T17:54:47.485554880+00:00 stderr F time="2025-12-08T17:54:47Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=BAyo+ 2025-12-08T17:54:47.485586501+00:00 stderr F time="2025-12-08T17:54:47Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=BAyo+ 2025-12-08T17:54:50.015424002+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="removed client for deleted catalogsource" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:54:51.044569517+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="resolving sources" id=8gBbl namespace=service-telemetry 2025-12-08T17:54:51.044569517+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="checking if subscriptions need update" id=8gBbl namespace=service-telemetry 2025-12-08T17:54:51.078118820+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="No subscriptions were found in namespace service-telemetry" id=8gBbl namespace=service-telemetry 2025-12-08T17:54:51.078118820+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="resolving sources" id=z2tm7 namespace=service-telemetry 2025-12-08T17:54:51.078118820+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="checking if subscriptions need update" id=z2tm7 namespace=service-telemetry 2025-12-08T17:54:51.085996772+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="No subscriptions were found in namespace service-telemetry" id=z2tm7 namespace=service-telemetry 2025-12-08T17:54:51.085996772+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="resolving sources" id=hLXoX namespace=service-telemetry 2025-12-08T17:54:51.085996772+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="checking if subscriptions need update" id=hLXoX namespace=service-telemetry 2025-12-08T17:54:51.178503261+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="No subscriptions were found in namespace service-telemetry" id=hLXoX namespace=service-telemetry 2025-12-08T17:54:52.074378751+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=pkGY+ 2025-12-08T17:54:52.074378751+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=pkGY+ 2025-12-08T17:54:52.083350062+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=pkGY+ 2025-12-08T17:54:52.083350062+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=pkGY+ 2025-12-08T17:54:52.083504456+00:00 stderr F time="2025-12-08T17:54:52Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=pkGY+ 2025-12-08T17:54:52.083523597+00:00 stderr F time="2025-12-08T17:54:52Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-hl4hq has not yet reported ready" id=pkGY+ 2025-12-08T17:54:52.083538827+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=pkGY+ 2025-12-08T17:54:52.085329696+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:52.093075914+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.093075914+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.101012087+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.101012087+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.273157710+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.273157710+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.276984443+00:00 stderr F time="2025-12-08T17:54:52Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:52.276984443+00:00 stderr F E1208 17:54:52.276604 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:54:52.277776254+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:52.870780762+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:52.870780762+00:00 stderr F time="2025-12-08T17:54:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:53.070687761+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=WveS5 2025-12-08T17:54:53.070687761+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=WveS5 2025-12-08T17:54:53.108349656+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="resolving sources" id=EJGsM namespace=cert-manager-operator 2025-12-08T17:54:53.108349656+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="checking if subscriptions need update" id=EJGsM namespace=cert-manager-operator 2025-12-08T17:54:53.114766508+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="No subscriptions were found in namespace cert-manager-operator" id=EJGsM namespace=cert-manager-operator 2025-12-08T17:54:53.167673882+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="resolving sources" id=OAW2a namespace=cert-manager-operator 2025-12-08T17:54:53.167673882+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="checking if subscriptions need update" id=OAW2a namespace=cert-manager-operator 2025-12-08T17:54:53.213460654+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="No subscriptions were found in namespace cert-manager-operator" id=OAW2a namespace=cert-manager-operator 2025-12-08T17:54:53.213460654+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="resolving sources" id=bnK36 namespace=cert-manager-operator 2025-12-08T17:54:53.213460654+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="checking if subscriptions need update" id=bnK36 namespace=cert-manager-operator 2025-12-08T17:54:53.215864559+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="No subscriptions were found in namespace cert-manager-operator" id=bnK36 namespace=cert-manager-operator 2025-12-08T17:54:53.670688219+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:53.670688219+00:00 stderr F time="2025-12-08T17:54:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:54.028377304+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="resolving sources" id=gBY57 namespace=cert-manager-operator 2025-12-08T17:54:54.028377304+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="checking if subscriptions need update" id=gBY57 namespace=cert-manager-operator 2025-12-08T17:54:54.033057201+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="No subscriptions were found in namespace cert-manager-operator" id=gBY57 namespace=cert-manager-operator 2025-12-08T17:54:54.033402800+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="resolving sources" id=pBYMs namespace=cert-manager-operator 2025-12-08T17:54:54.033402800+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="checking if subscriptions need update" id=pBYMs namespace=cert-manager-operator 2025-12-08T17:54:54.131921711+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="No subscriptions were found in namespace cert-manager-operator" id=pBYMs namespace=cert-manager-operator 2025-12-08T17:54:54.131921711+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="resolving sources" id=Tah6F namespace=cert-manager-operator 2025-12-08T17:54:54.131921711+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="checking if subscriptions need update" id=Tah6F namespace=cert-manager-operator 2025-12-08T17:54:54.133455153+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="No subscriptions were found in namespace cert-manager-operator" id=Tah6F namespace=cert-manager-operator 2025-12-08T17:54:54.270447089+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=WveS5 2025-12-08T17:54:54.270447089+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=WveS5 2025-12-08T17:54:54.470841912+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:54.470841912+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:54.480809370+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="requesting snapshot for catalog source openshift-marketplace/redhat-operators" 2025-12-08T17:54:54.481046707+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="requesting snapshot for catalog source openshift-marketplace/certified-operators" 2025-12-08T17:54:54.481139829+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="requesting snapshot for catalog source openshift-marketplace/community-operators" 2025-12-08T17:54:55.068571948+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:55.069499633+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KSElN 2025-12-08T17:54:55.069499633+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KSElN 2025-12-08T17:54:55.335004118+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:55.335110981+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="resolving sources" id=N3sCS namespace=openshift-operators 2025-12-08T17:54:55.335110981+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="checking if subscriptions need update" id=N3sCS namespace=openshift-operators 2025-12-08T17:54:55.339315763+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="checking for existing installplan" channel=stable id=N3sCS namespace=openshift-operators pkg=cluster-observability-operator source=redhat-operators sub=cluster-observability-operator 2025-12-08T17:54:55.339315763+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="resolving subscriptions in namespace" id=N3sCS namespace=openshift-operators 2025-12-08T17:54:55.423337195+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="unpacking bundles" id=N3sCS namespace=openshift-operators 2025-12-08T17:54:55.671359430+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:55.671359430+00:00 stderr F time="2025-12-08T17:54:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:56.121061551+00:00 stderr F time="2025-12-08T17:54:56Z" level=info msg="unpacking is not complete yet, requeueing" id=N3sCS namespace=openshift-operators 2025-12-08T17:54:56.671913356+00:00 stderr F time="2025-12-08T17:54:56Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:56.671913356+00:00 stderr F time="2025-12-08T17:54:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:56.871636680+00:00 stderr F time="2025-12-08T17:54:56Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KSElN 2025-12-08T17:54:56.871636680+00:00 stderr F time="2025-12-08T17:54:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=KSElN 2025-12-08T17:54:57.072043404+00:00 stderr F time="2025-12-08T17:54:57Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:57.072043404+00:00 stderr F time="2025-12-08T17:54:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:57.872558017+00:00 stderr F time="2025-12-08T17:54:57Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:57.872558017+00:00 stderr F time="2025-12-08T17:54:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:54:58.271303567+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:58.271303567+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:58.913073098+00:00 stderr F time="2025-12-08T17:54:58Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"openshift-cert-manager-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:58.913073098+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rjojM 2025-12-08T17:54:58.913130889+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rjojM 2025-12-08T17:54:58.913130889+00:00 stderr F E1208 17:54:58.913090 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/openshift-cert-manager-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"openshift-cert-manager-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:54:58.917244781+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:58.918145805+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:58.918163645+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:54:58.924923707+00:00 stderr F time="2025-12-08T17:54:58Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:58.924923707+00:00 stderr F E1208 17:54:58.924835 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:54:58.926372946+00:00 stderr F time="2025-12-08T17:54:58Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:54:59.870710959+00:00 stderr F time="2025-12-08T17:54:59Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:54:59.870710959+00:00 stderr F time="2025-12-08T17:54:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:00.072033807+00:00 stderr F time="2025-12-08T17:55:00Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:00.072033807+00:00 stderr F time="2025-12-08T17:55:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:00.470964562+00:00 stderr F time="2025-12-08T17:55:00Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rjojM 2025-12-08T17:55:00.470964562+00:00 stderr F time="2025-12-08T17:55:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rjojM 2025-12-08T17:55:01.071286738+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=L3Ebd 2025-12-08T17:55:01.071359860+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=L3Ebd 2025-12-08T17:55:01.122324732+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="resolving sources" id=sR+Q4 namespace=openshift-operators 2025-12-08T17:55:01.122324732+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="checking if subscriptions need update" id=sR+Q4 namespace=openshift-operators 2025-12-08T17:55:01.128586470+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="checking for existing installplan" channel=stable id=sR+Q4 namespace=openshift-operators pkg=cluster-observability-operator source=redhat-operators sub=cluster-observability-operator 2025-12-08T17:55:01.128586470+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="resolving subscriptions in namespace" id=sR+Q4 namespace=openshift-operators 2025-12-08T17:55:01.214201684+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="unpacking bundles" id=sR+Q4 namespace=openshift-operators 2025-12-08T17:55:01.214367249+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="unpacking is not complete yet, requeueing" id=sR+Q4 namespace=openshift-operators 2025-12-08T17:55:01.471595661+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:01.471595661+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:01.671618494+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:01.671618494+00:00 stderr F time="2025-12-08T17:55:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:02.070168779+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BY0PW 2025-12-08T17:55:02.070168779+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BY0PW 2025-12-08T17:55:03.069747439+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:03.069747439+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:03.080347205+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:03.080410786+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="resolving sources" id=j2Ian namespace=service-telemetry 2025-12-08T17:55:03.080410786+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="checking if subscriptions need update" id=j2Ian namespace=service-telemetry 2025-12-08T17:55:03.084279360+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="checking for existing installplan" channel=stable id=j2Ian namespace=service-telemetry pkg=elasticsearch-eck-operator-certified source=certified-operators sub=elasticsearch-eck-operator-certified 2025-12-08T17:55:03.084279360+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="resolving subscriptions in namespace" id=j2Ian namespace=service-telemetry 2025-12-08T17:55:03.160587623+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="unpacking bundles" id=j2Ian namespace=service-telemetry 2025-12-08T17:55:03.270236954+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:03.270236954+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:03.286171323+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:03.286242365+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="resolving sources" id=ayCl1 namespace=cert-manager-operator 2025-12-08T17:55:03.286242365+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="checking if subscriptions need update" id=ayCl1 namespace=cert-manager-operator 2025-12-08T17:55:03.289592275+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="checking for existing installplan" channel=stable-v1 id=ayCl1 namespace=cert-manager-operator pkg=openshift-cert-manager-operator source=redhat-operators sub=openshift-cert-manager-operator 2025-12-08T17:55:03.289592275+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="resolving subscriptions in namespace" id=ayCl1 namespace=cert-manager-operator 2025-12-08T17:55:03.319800608+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="unpacking bundles" id=ayCl1 namespace=cert-manager-operator 2025-12-08T17:55:03.470996657+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=L3Ebd 2025-12-08T17:55:03.470996657+00:00 stderr F time="2025-12-08T17:55:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=L3Ebd 2025-12-08T17:55:04.126093357+00:00 stderr F time="2025-12-08T17:55:04Z" level=info msg="unpacking is not complete yet, requeueing" id=j2Ian namespace=service-telemetry 2025-12-08T17:55:04.467663259+00:00 stderr F I1208 17:55:04.467295 1 request.go:752] "Waited before sending request" delay="1.147398458s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-marketplace/configmaps" 2025-12-08T17:55:04.526075891+00:00 stderr F time="2025-12-08T17:55:04Z" level=info msg="unpacking is not complete yet, requeueing" id=ayCl1 namespace=cert-manager-operator 2025-12-08T17:55:04.675693797+00:00 stderr F time="2025-12-08T17:55:04Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BY0PW 2025-12-08T17:55:04.675693797+00:00 stderr F time="2025-12-08T17:55:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=BY0PW 2025-12-08T17:55:04.871515667+00:00 stderr F time="2025-12-08T17:55:04Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:04.871515667+00:00 stderr F time="2025-12-08T17:55:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:05.269995040+00:00 stderr F time="2025-12-08T17:55:05Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:05.270092913+00:00 stderr F time="2025-12-08T17:55:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:05.871365904+00:00 stderr F time="2025-12-08T17:55:05Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=9PuxB 2025-12-08T17:55:05.871365904+00:00 stderr F time="2025-12-08T17:55:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=9PuxB 2025-12-08T17:55:06.214707223+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="resolving sources" id=6ZU61 namespace=openshift-operators 2025-12-08T17:55:06.214707223+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="checking if subscriptions need update" id=6ZU61 namespace=openshift-operators 2025-12-08T17:55:06.219074651+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="checking for existing installplan" channel=stable id=6ZU61 namespace=openshift-operators pkg=cluster-observability-operator source=redhat-operators sub=cluster-observability-operator 2025-12-08T17:55:06.219074651+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="resolving subscriptions in namespace" id=6ZU61 namespace=openshift-operators 2025-12-08T17:55:06.272865048+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=qvKhx 2025-12-08T17:55:06.272865048+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=qvKhx 2025-12-08T17:55:06.322803372+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="unpacking bundles" id=6ZU61 namespace=openshift-operators 2025-12-08T17:55:06.355751569+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=cluster-observability-operator.clusterserviceversion.yaml 2025-12-08T17:55:06.394360009+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagerconfigs.yaml 2025-12-08T17:55:06.448630409+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagers.yaml 2025-12-08T17:55:06.454332162+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_monitoringstacks.yaml 2025-12-08T17:55:06.458273298+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_podmonitors.yaml 2025-12-08T17:55:06.462485952+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_probes.yaml 2025-12-08T17:55:06.471012961+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:06.471012961+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:06.500344570+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusagents.yaml 2025-12-08T17:55:06.548962149+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheuses.yaml 2025-12-08T17:55:06.549823612+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusrules.yaml 2025-12-08T17:55:06.605412858+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_scrapeconfigs.yaml 2025-12-08T17:55:06.610056243+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_servicemonitors.yaml 2025-12-08T17:55:06.610342081+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosqueriers.yaml 2025-12-08T17:55:06.639663619+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosrulers.yaml 2025-12-08T17:55:06.639663619+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=PodDisruptionBudget" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_policy_v1_poddisruptionbudget.yaml 2025-12-08T17:55:06.639663619+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_v1_service.yaml 2025-12-08T17:55:06.639663619+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator_v1_service.yaml 2025-12-08T17:55:06.640604694+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=PrometheusRule" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_monitoring.coreos.com_v1_prometheusrule.yaml 2025-12-08T17:55:06.640604694+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=RoleBinding" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_rbac.authorization.k8s.io_v1_rolebinding.yaml 2025-12-08T17:55:06.640604694+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_v1_service.yaml 2025-12-08T17:55:06.641358195+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_observabilityinstallers.yaml 2025-12-08T17:55:06.642473925+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_uiplugins.yaml 2025-12-08T17:55:06.652416873+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_perses.yaml 2025-12-08T17:55:06.653372839+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdashboards.yaml 2025-12-08T17:55:06.654374895+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdatasources.yaml 2025-12-08T17:55:06.654420316+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=ServiceAccount" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses_v1_serviceaccount.yaml 2025-12-08T17:55:06.654541980+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:06.654661074+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:06.654783357+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:06.654886610+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:06.871684094+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:06.871819657+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:07.001927248+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="resolution caused subscription changes, creating installplan" id=6ZU61 namespace=openshift-operators 2025-12-08T17:55:07.007564400+00:00 stderr F time="2025-12-08T17:55:07Z" level=warning msg="no installplan found with matching generation, creating new one" id=6ZU61 namespace=openshift-operators 2025-12-08T17:55:07.012421931+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg=syncing id=ZDzEQ ip=install-5nc8t namespace=openshift-operators phase= 2025-12-08T17:55:07.012421931+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="skip processing installplan without status - subscription sync responsible for initial status" id=ZDzEQ ip=install-5nc8t namespace=openshift-operators phase= 2025-12-08T17:55:07.047980398+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg=syncing id=C7wZ6 ip=install-5nc8t namespace=openshift-operators phase=Installing 2025-12-08T17:55:07.094693575+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=cluster-observability-operator.clusterserviceversion.yaml 2025-12-08T17:55:07.131968278+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagerconfigs.yaml 2025-12-08T17:55:07.161604545+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagers.yaml 2025-12-08T17:55:07.167295279+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_monitoringstacks.yaml 2025-12-08T17:55:07.171239005+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_podmonitors.yaml 2025-12-08T17:55:07.175462448+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_probes.yaml 2025-12-08T17:55:07.228578728+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusagents.yaml 2025-12-08T17:55:07.281583884+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheuses.yaml 2025-12-08T17:55:07.282505480+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusrules.yaml 2025-12-08T17:55:07.320803050+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_scrapeconfigs.yaml 2025-12-08T17:55:07.324860109+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_servicemonitors.yaml 2025-12-08T17:55:07.325509397+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosqueriers.yaml 2025-12-08T17:55:07.374671110+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosrulers.yaml 2025-12-08T17:55:07.374904806+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=PodDisruptionBudget" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_policy_v1_poddisruptionbudget.yaml 2025-12-08T17:55:07.375081311+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_v1_service.yaml 2025-12-08T17:55:07.375261026+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator_v1_service.yaml 2025-12-08T17:55:07.375461381+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=PrometheusRule" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_monitoring.coreos.com_v1_prometheusrule.yaml 2025-12-08T17:55:07.375585094+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=RoleBinding" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_rbac.authorization.k8s.io_v1_rolebinding.yaml 2025-12-08T17:55:07.375750669+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_v1_service.yaml 2025-12-08T17:55:07.378182444+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_observabilityinstallers.yaml 2025-12-08T17:55:07.379746216+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_uiplugins.yaml 2025-12-08T17:55:07.390551547+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_perses.yaml 2025-12-08T17:55:07.391700528+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdashboards.yaml 2025-12-08T17:55:07.392746876+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdatasources.yaml 2025-12-08T17:55:07.392820468+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=ServiceAccount" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses_v1_serviceaccount.yaml 2025-12-08T17:55:07.392968972+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:07.393090765+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:07.393212358+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:07.393349382+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:07.812217695+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="resolving sources" id=ILPZH namespace=openshift-operators 2025-12-08T17:55:07.812217695+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="checking if subscriptions need update" id=ILPZH namespace=openshift-operators 2025-12-08T17:55:07.837254629+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="subscriptions were updated, wait for a new resolution" id=ILPZH namespace=openshift-operators 2025-12-08T17:55:07.837254629+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="resolving sources" id=c2KY8 namespace=openshift-operators 2025-12-08T17:55:07.837254629+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="checking if subscriptions need update" id=c2KY8 namespace=openshift-operators 2025-12-08T17:55:07.845737577+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="resolving subscriptions in namespace" id=c2KY8 namespace=openshift-operators 2025-12-08T17:55:07.860606297+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="no subscriptions were updated" id=c2KY8 namespace=openshift-operators 2025-12-08T17:55:08.037180668+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="resolving sources" id=EszgY namespace=openshift-operators 2025-12-08T17:55:08.037180668+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="checking if subscriptions need update" id=EszgY namespace=openshift-operators 2025-12-08T17:55:08.074437561+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:08.074437561+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:08.272507321+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=9PuxB 2025-12-08T17:55:08.272507321+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=9PuxB 2025-12-08T17:55:08.418634884+00:00 stderr F time="2025-12-08T17:55:08Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:08.418634884+00:00 stderr F E1208 17:55:08.417845 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:08.421995664+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:08.471926668+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:08.471926668+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:08.614275739+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="resolving subscriptions in namespace" id=EszgY namespace=openshift-operators 2025-12-08T17:55:08.671206360+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=qvKhx 2025-12-08T17:55:08.671206360+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=qvKhx 2025-12-08T17:55:08.810993672+00:00 stderr F time="2025-12-08T17:55:08Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:08.810993672+00:00 stderr F E1208 17:55:08.810194 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:08.816581073+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:09.021251221+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="no subscriptions were updated" id=EszgY namespace=openshift-operators 2025-12-08T17:55:09.132920305+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="resolving sources" id=O8N/d namespace=service-telemetry 2025-12-08T17:55:09.132920305+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="checking if subscriptions need update" id=O8N/d namespace=service-telemetry 2025-12-08T17:55:09.281928845+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=aq4DC 2025-12-08T17:55:09.281928845+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=aq4DC 2025-12-08T17:55:09.471919348+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:09.471919348+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:09.611949086+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="checking for existing installplan" channel=stable id=O8N/d namespace=service-telemetry pkg=elasticsearch-eck-operator-certified source=certified-operators sub=elasticsearch-eck-operator-certified 2025-12-08T17:55:09.611949086+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="resolving subscriptions in namespace" id=O8N/d namespace=service-telemetry 2025-12-08T17:55:09.810660084+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="resolving sources" id=8TfkK namespace=cert-manager-operator 2025-12-08T17:55:09.810660084+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="checking if subscriptions need update" id=8TfkK namespace=cert-manager-operator 2025-12-08T17:55:10.273940780+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="unpacking bundles" id=O8N/d namespace=service-telemetry 2025-12-08T17:55:10.281086993+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:10.281086993+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:10.282783479+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=agents.agent.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.286929060+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=apmservers.apm.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.288461772+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=beats.beat.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.290250769+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticmapsservers.maps.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.294227457+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearch-eck-operator-certified.clusterserviceversion.yaml 2025-12-08T17:55:10.294509024+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearchautoscalers.autoscaling.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.303827735+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearches.elasticsearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.307532264+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=enterprisesearches.enterprisesearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.312468747+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=kibanas.kibana.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.315819787+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=logstashes.logstash.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.319977970+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.crd.yaml 2025-12-08T17:55:10.419113637+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="checking for existing installplan" channel=stable-v1 id=8TfkK namespace=cert-manager-operator pkg=openshift-cert-manager-operator source=redhat-operators sub=openshift-cert-manager-operator 2025-12-08T17:55:10.419113637+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="resolving subscriptions in namespace" id=8TfkK namespace=cert-manager-operator 2025-12-08T17:55:10.440867902+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="resolution caused subscription changes, creating installplan" id=O8N/d namespace=service-telemetry 2025-12-08T17:55:10.672044574+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:10.672044574+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:10.835820041+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="unpacking bundles" id=8TfkK namespace=cert-manager-operator 2025-12-08T17:55:10.836116719+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="unpacking is not complete yet, requeueing" id=8TfkK namespace=cert-manager-operator 2025-12-08T17:55:11.071115683+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=aq4DC 2025-12-08T17:55:11.071215146+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=aq4DC 2025-12-08T17:55:11.416936549+00:00 stderr F time="2025-12-08T17:55:11Z" level=warning msg="no installplan found with matching generation, creating new one" id=O8N/d namespace=service-telemetry 2025-12-08T17:55:11.471289272+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:11.471289272+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:11.671652973+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:11.671652973+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:11.806255266+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg=syncing id=sbifA ip=install-sk9l5 namespace=service-telemetry phase= 2025-12-08T17:55:11.806255266+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="skip processing installplan without status - subscription sync responsible for initial status" id=sbifA ip=install-sk9l5 namespace=service-telemetry phase= 2025-12-08T17:55:12.229305381+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:12.229380683+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="resolving sources" id=W0KP4 namespace=cert-manager-operator 2025-12-08T17:55:12.229380683+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="checking if subscriptions need update" id=W0KP4 namespace=cert-manager-operator 2025-12-08T17:55:12.270830208+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:12.270933501+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:12.423768755+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg=syncing id=RKGNQ ip=install-sk9l5 namespace=service-telemetry phase=Installing 2025-12-08T17:55:12.478298102+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=N/dMg 2025-12-08T17:55:12.478298102+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=N/dMg 2025-12-08T17:55:12.810944294+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="checking for existing installplan" channel=stable-v1 id=W0KP4 namespace=cert-manager-operator pkg=openshift-cert-manager-operator source=redhat-operators sub=openshift-cert-manager-operator 2025-12-08T17:55:12.810944294+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="resolving subscriptions in namespace" id=W0KP4 namespace=cert-manager-operator 2025-12-08T17:55:13.009836696+00:00 stderr F time="2025-12-08T17:55:13Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:13.009989090+00:00 stderr F E1208 17:55:13.009975 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:13.011255874+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:13.071009152+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:13.071009152+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:13.428119702+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=agents.agent.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.451653146+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=apmservers.apm.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.453533275+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=beats.beat.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.455524979+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticmapsservers.maps.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.461081759+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearch-eck-operator-certified.clusterserviceversion.yaml 2025-12-08T17:55:13.463283148+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearchautoscalers.autoscaling.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.477646425+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearches.elasticsearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.482977358+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=enterprisesearches.enterprisesearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.488666321+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=kibanas.kibana.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.495959418+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=logstashes.logstash.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.495959418+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.crd.yaml 2025-12-08T17:55:13.885952683+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=N/dMg 2025-12-08T17:55:13.885952683+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=N/dMg 2025-12-08T17:55:13.938953169+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="unpacking bundles" id=W0KP4 namespace=cert-manager-operator 2025-12-08T17:55:13.938953169+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="unpacking is not complete yet, requeueing" id=W0KP4 namespace=cert-manager-operator 2025-12-08T17:55:13.938953169+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="resolving sources" id=nZkTZ namespace=openshift-operators 2025-12-08T17:55:13.938953169+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="checking if subscriptions need update" id=nZkTZ namespace=openshift-operators 2025-12-08T17:55:14.016122876+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="resolving sources" id=JN+ho namespace=service-telemetry 2025-12-08T17:55:14.016122876+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="checking if subscriptions need update" id=JN+ho namespace=service-telemetry 2025-12-08T17:55:14.069389639+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:14.069389639+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:14.271226490+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:14.271226490+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:15.091840544+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=K15Y7 2025-12-08T17:55:15.091972797+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=K15Y7 2025-12-08T17:55:15.270821300+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:15.270821300+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:15.413834989+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="resolving subscriptions in namespace" id=nZkTZ namespace=openshift-operators 2025-12-08T17:55:15.473220107+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:15.473220107+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:15.815538230+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="error updating InstallPlan status" id=C7wZ6 ip=install-5nc8t namespace=openshift-operators phase=Installing updateError="Operation cannot be fulfilled on installplans.operators.coreos.com \"install-5nc8t\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.815593092+00:00 stderr F E1208 17:55:15.815565 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/install-5nc8t\" failed: error updating InstallPlan status: Operation cannot be fulfilled on installplans.operators.coreos.com \"install-5nc8t\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:15.818635533+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg=syncing id=BCLLf ip=install-5nc8t namespace=openshift-operators phase=Installing 2025-12-08T17:55:16.236988832+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="no subscriptions were updated" id=nZkTZ namespace=openshift-operators 2025-12-08T17:55:16.270103173+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:16.270103173+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:16.287404608+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:16.408469546+00:00 stderr F time="2025-12-08T17:55:16Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:16.408519357+00:00 stderr F E1208 17:55:16.408468 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:16.409626738+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:16.476861277+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=K15Y7 2025-12-08T17:55:16.476861277+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=K15Y7 2025-12-08T17:55:16.616598898+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="subscriptions were updated, wait for a new resolution" id=JN+ho namespace=service-telemetry 2025-12-08T17:55:16.616598898+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="resolving sources" id=ZeAAi namespace=cert-manager-operator 2025-12-08T17:55:16.616598898+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="checking if subscriptions need update" id=ZeAAi namespace=cert-manager-operator 2025-12-08T17:55:16.906918940+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=cluster-observability-operator.clusterserviceversion.yaml 2025-12-08T17:55:16.967928942+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagerconfigs.yaml 2025-12-08T17:55:17.004084125+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagers.yaml 2025-12-08T17:55:17.009990364+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_monitoringstacks.yaml 2025-12-08T17:55:17.011844564+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="error updating InstallPlan status" id=RKGNQ ip=install-sk9l5 namespace=service-telemetry phase=Installing updateError="Operation cannot be fulfilled on installplans.operators.coreos.com \"install-sk9l5\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:17.011913386+00:00 stderr F E1208 17:55:17.011868 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/install-sk9l5\" failed: error updating InstallPlan status: Operation cannot be fulfilled on installplans.operators.coreos.com \"install-sk9l5\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:17.013849357+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg=syncing id=Uox9O ip=install-sk9l5 namespace=service-telemetry phase=Installing 2025-12-08T17:55:17.018156294+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_podmonitors.yaml 2025-12-08T17:55:17.024518815+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_probes.yaml 2025-12-08T17:55:17.062472537+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusagents.yaml 2025-12-08T17:55:17.114661061+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheuses.yaml 2025-12-08T17:55:17.115563456+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusrules.yaml 2025-12-08T17:55:17.166015903+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_scrapeconfigs.yaml 2025-12-08T17:55:17.172940399+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_servicemonitors.yaml 2025-12-08T17:55:17.172940399+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosqueriers.yaml 2025-12-08T17:55:17.216064780+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosrulers.yaml 2025-12-08T17:55:17.216064780+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=PodDisruptionBudget" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_policy_v1_poddisruptionbudget.yaml 2025-12-08T17:55:17.216064780+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_v1_service.yaml 2025-12-08T17:55:17.216064780+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator_v1_service.yaml 2025-12-08T17:55:17.219040380+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=PrometheusRule" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_monitoring.coreos.com_v1_prometheusrule.yaml 2025-12-08T17:55:17.219040380+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=RoleBinding" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_rbac.authorization.k8s.io_v1_rolebinding.yaml 2025-12-08T17:55:17.219040380+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_v1_service.yaml 2025-12-08T17:55:17.221688102+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_observabilityinstallers.yaml 2025-12-08T17:55:17.224793135+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_uiplugins.yaml 2025-12-08T17:55:17.274318497+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_perses.yaml 2025-12-08T17:55:17.274318497+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdashboards.yaml 2025-12-08T17:55:17.275072758+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdatasources.yaml 2025-12-08T17:55:17.275072758+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=ServiceAccount" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses_v1_serviceaccount.yaml 2025-12-08T17:55:17.277076521+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:17.277076521+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:17.277076521+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:17.277076521+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:17.402472926+00:00 stderr F I1208 17:55:17.402252 1 request.go:752] "Waited before sending request" delay="1.114679138s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/apis/operators.coreos.com/v1alpha1/namespaces/openshift-operators/subscriptions/cluster-observability-operator/status" 2025-12-08T17:55:17.420529613+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:17.420529613+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:17.427854979+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:17.427854979+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:17.607193735+00:00 stderr F time="2025-12-08T17:55:17Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:17.607232136+00:00 stderr F E1208 17:55:17.607219 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:17.608412579+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:17.669053960+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:17.669053960+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:17.806089918+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="checking for existing installplan" channel=stable-v1 id=ZeAAi namespace=cert-manager-operator pkg=openshift-cert-manager-operator source=redhat-operators sub=openshift-cert-manager-operator 2025-12-08T17:55:17.806089918+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="resolving subscriptions in namespace" id=ZeAAi namespace=cert-manager-operator 2025-12-08T17:55:18.029976803+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=agents.agent.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.038078431+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=apmservers.apm.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.043420545+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=beats.beat.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.047323900+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticmapsservers.maps.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.055524240+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearch-eck-operator-certified.clusterserviceversion.yaml 2025-12-08T17:55:18.066825564+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearchautoscalers.autoscaling.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.078343404+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=elasticsearches.elasticsearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.095958989+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=enterprisesearches.enterprisesearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.101647592+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=kibanas.kibana.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.109579556+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=logstashes.logstash.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.111430335+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 key=stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.crd.yaml 2025-12-08T17:55:18.433936114+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:18.433936114+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:18.474922227+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:18.474922227+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:18.630863714+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:18.871069357+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="unpacking bundles" id=ZeAAi namespace=cert-manager-operator 2025-12-08T17:55:18.908530256+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=acme.cert-manager.io_challenges.yaml 2025-12-08T17:55:18.909429620+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=acme.cert-manager.io_orders.yaml 2025-12-08T17:55:18.909542953+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator-controller-manager-metrics-service_v1_service.yaml 2025-12-08T17:55:18.909682987+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:18.912688248+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator.clusterserviceversion.yaml 2025-12-08T17:55:18.913921181+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_certificaterequests.yaml 2025-12-08T17:55:18.916479600+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_certificates.yaml 2025-12-08T17:55:18.933300783+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_clusterissuers.yaml 2025-12-08T17:55:18.977951305+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_issuers.yaml 2025-12-08T17:55:18.983916825+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=operator.openshift.io_certmanagers.yaml 2025-12-08T17:55:19.011232809+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=operator.openshift.io_istiocsrs.yaml 2025-12-08T17:55:19.071968904+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:19.071968904+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:19.176501727+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="resolution caused subscription changes, creating installplan" id=ZeAAi namespace=cert-manager-operator 2025-12-08T17:55:19.209531606+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="resolving sources" id=NE086 namespace=openshift-operators 2025-12-08T17:55:19.209531606+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="checking if subscriptions need update" id=NE086 namespace=openshift-operators 2025-12-08T17:55:19.271924195+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:19.271924195+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:19.411400888+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:19.870007520+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:19.870083322+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:20.020021407+00:00 stderr F time="2025-12-08T17:55:20Z" level=warning msg="no installplan found with matching generation, creating new one" id=ZeAAi namespace=cert-manager-operator 2025-12-08T17:55:20.072151450+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:20.072151450+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:20.235761584+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="resolving subscriptions in namespace" id=NE086 namespace=openshift-operators 2025-12-08T17:55:20.630783814+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="no subscriptions were updated" id=NE086 namespace=openshift-operators 2025-12-08T17:55:20.671946361+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:20.671946361+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:20.689922426+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:20.882918019+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:20.882918019+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:21.410870237+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="resolving sources" id=Cw20H namespace=service-telemetry 2025-12-08T17:55:21.410870237+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="checking if subscriptions need update" id=Cw20H namespace=service-telemetry 2025-12-08T17:55:21.471743935+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:21.471743935+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:21.612959275+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="resolving sources" id=kzSny namespace=openshift-operators 2025-12-08T17:55:21.612959275+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="checking if subscriptions need update" id=kzSny namespace=openshift-operators 2025-12-08T17:55:21.670893464+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:21.670893464+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:21.672786625+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:22.069463940+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:22.069463940+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:22.420894808+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="resolving subscriptions in namespace" id=Cw20H namespace=service-telemetry 2025-12-08T17:55:22.612490284+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="resolving subscriptions in namespace" id=kzSny namespace=openshift-operators 2025-12-08T17:55:22.670259839+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:22.670259839+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:22.681510572+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:22.870252581+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:22.870252581+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:23.008296225+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="no subscriptions were updated" id=Cw20H namespace=service-telemetry 2025-12-08T17:55:23.222585232+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="no subscriptions were updated" id=kzSny namespace=openshift-operators 2025-12-08T17:55:23.270955684+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:23.270955684+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:23.876072979+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:23.876072979+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.070013110+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.070013110+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.471442946+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.471442946+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.608673241+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="resolving sources" id=B5Y0j namespace=cert-manager-operator 2025-12-08T17:55:24.608673241+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="checking if subscriptions need update" id=B5Y0j namespace=cert-manager-operator 2025-12-08T17:55:24.819128097+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:24.870658790+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.870658790+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:24.884050518+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:25.414428941+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg=syncing id=DImu+ ip=install-l78bn namespace=cert-manager-operator phase=Installing 2025-12-08T17:55:25.469515623+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:25.469515623+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:25.669043048+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:25.669043048+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:26.242383930+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=acme.cert-manager.io_challenges.yaml 2025-12-08T17:55:26.243168702+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=acme.cert-manager.io_orders.yaml 2025-12-08T17:55:26.243268644+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator-controller-manager-metrics-service_v1_service.yaml 2025-12-08T17:55:26.243341536+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:26.245745482+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator.clusterserviceversion.yaml 2025-12-08T17:55:26.246617596+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_certificaterequests.yaml 2025-12-08T17:55:26.248772855+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_certificates.yaml 2025-12-08T17:55:26.261389161+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_clusterissuers.yaml 2025-12-08T17:55:26.273619647+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_issuers.yaml 2025-12-08T17:55:26.273937856+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:26.273937856+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:26.277834082+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=operator.openshift.io_certmanagers.yaml 2025-12-08T17:55:26.283559849+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=operator.openshift.io_istiocsrs.yaml 2025-12-08T17:55:26.469362488+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:26.469362488+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:26.808438043+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="resolving sources" id=3ezB5 namespace=service-telemetry 2025-12-08T17:55:26.808438043+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="checking if subscriptions need update" id=3ezB5 namespace=service-telemetry 2025-12-08T17:55:27.010948639+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="subscriptions were updated, wait for a new resolution" id=B5Y0j namespace=cert-manager-operator 2025-12-08T17:55:27.010948639+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="resolving sources" id=Oeu6B namespace=openshift-operators 2025-12-08T17:55:27.010948639+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="checking if subscriptions need update" id=Oeu6B namespace=openshift-operators 2025-12-08T17:55:27.075741447+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:27.075741447+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:27.081219667+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:27.281920595+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:27.281920595+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:27.315918728+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:27.671443113+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:27.671443113+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.070571035+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.070571035+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.420234260+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="resolving subscriptions in namespace" id=3ezB5 namespace=service-telemetry 2025-12-08T17:55:28.673929021+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.673929021+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.676276635+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="resolving subscriptions in namespace" id=Oeu6B namespace=openshift-operators 2025-12-08T17:55:28.871935354+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.871935354+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:28.878414932+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:29.010085705+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="no subscriptions were updated" id=3ezB5 namespace=service-telemetry 2025-12-08T17:55:29.216913250+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="no subscriptions were updated" id=Oeu6B namespace=openshift-operators 2025-12-08T17:55:29.269397220+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:29.269397220+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:29.669173030+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:29.669244832+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:29.671187026+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:30.271628731+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:30.271628731+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:30.412124476+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg=syncing id=TwH/W ip=install-sk9l5 namespace=service-telemetry phase=Complete 2025-12-08T17:55:30.414210784+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg=syncing id=kTAwv ip=install-l78bn namespace=cert-manager-operator phase=Installing 2025-12-08T17:55:30.472543975+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:30.472543975+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:30.609618776+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="resolving sources" id=ofWv5 namespace=cert-manager-operator 2025-12-08T17:55:30.609682698+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="checking if subscriptions need update" id=ofWv5 namespace=cert-manager-operator 2025-12-08T17:55:31.011039031+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="resolving sources" id=pkXq+ namespace=service-telemetry 2025-12-08T17:55:31.011039031+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="checking if subscriptions need update" id=pkXq+ namespace=service-telemetry 2025-12-08T17:55:31.070453411+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:31.070453411+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:31.246432300+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=acme.cert-manager.io_challenges.yaml 2025-12-08T17:55:31.247267652+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=acme.cert-manager.io_orders.yaml 2025-12-08T17:55:31.247377505+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator-controller-manager-metrics-service_v1_service.yaml 2025-12-08T17:55:31.247637172+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:31.250302196+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager-operator.clusterserviceversion.yaml 2025-12-08T17:55:31.251379416+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_certificaterequests.yaml 2025-12-08T17:55:31.255107068+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_certificates.yaml 2025-12-08T17:55:31.271846047+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:31.271846047+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:31.273181953+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_clusterissuers.yaml 2025-12-08T17:55:31.288492724+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=cert-manager.io_issuers.yaml 2025-12-08T17:55:31.292250817+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=operator.openshift.io_certmanagers.yaml 2025-12-08T17:55:31.296955876+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 key=operator.openshift.io_istiocsrs.yaml 2025-12-08T17:55:31.871914143+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:31.871914143+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:32.009651242+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="resolving subscriptions in namespace" id=ofWv5 namespace=cert-manager-operator 2025-12-08T17:55:32.071375856+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:32.071375856+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:32.073185406+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:32.215980704+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg=syncing id=QN2jm ip=install-l78bn namespace=cert-manager-operator phase=Complete 2025-12-08T17:55:32.409084613+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="resolving subscriptions in namespace" id=pkXq+ namespace=service-telemetry 2025-12-08T17:55:32.511162944+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:32.511215885+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:32.606668905+00:00 stderr F time="2025-12-08T17:55:32Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:32.606838429+00:00 stderr F E1208 17:55:32.606781 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:32.608146015+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:32.813999344+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="no subscriptions were updated" id=ofWv5 namespace=cert-manager-operator 2025-12-08T17:55:32.873647510+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:32.873647510+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:33.010970858+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="no subscriptions were updated" id=pkXq+ namespace=service-telemetry 2025-12-08T17:55:33.281429189+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:33.281429189+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:33.323929486+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:33.670794673+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:33.670794673+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.071936351+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.071936351+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.209663740+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="resolving sources" id=MiwqA namespace=openshift-operators 2025-12-08T17:55:34.209663740+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="checking if subscriptions need update" id=MiwqA namespace=openshift-operators 2025-12-08T17:55:34.433478612+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg=syncing id=RZxgW ip=install-5nc8t namespace=openshift-operators phase=Installing 2025-12-08T17:55:34.607778934+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="resolving sources" id=nP0bq namespace=cert-manager-operator 2025-12-08T17:55:34.607778934+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="checking if subscriptions need update" id=nP0bq namespace=cert-manager-operator 2025-12-08T17:55:34.670051333+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.670051333+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.873925027+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.873925027+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:34.873925027+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:35.057270728+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=cluster-observability-operator.clusterserviceversion.yaml 2025-12-08T17:55:35.101670006+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagerconfigs.yaml 2025-12-08T17:55:35.139105183+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_alertmanagers.yaml 2025-12-08T17:55:35.145519100+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_monitoringstacks.yaml 2025-12-08T17:55:35.151302919+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_podmonitors.yaml 2025-12-08T17:55:35.155809962+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_probes.yaml 2025-12-08T17:55:35.191789039+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusagents.yaml 2025-12-08T17:55:35.252918527+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheuses.yaml 2025-12-08T17:55:35.253898263+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_prometheusrules.yaml 2025-12-08T17:55:35.269948614+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:35.269948614+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:35.293294115+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_scrapeconfigs.yaml 2025-12-08T17:55:35.298397864+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_servicemonitors.yaml 2025-12-08T17:55:35.299290659+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosqueriers.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=monitoring.rhobs_thanosrulers.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=PodDisruptionBudget" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_policy_v1_poddisruptionbudget.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator-admission-webhook_v1_service.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=obo-prometheus-operator_v1_service.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=PrometheusRule" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_monitoring.coreos.com_v1_prometheusrule.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=RoleBinding" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_rbac.authorization.k8s.io_v1_rolebinding.yaml 2025-12-08T17:55:35.336922672+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=Service" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability-operator_v1_service.yaml 2025-12-08T17:55:35.341179879+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_observabilityinstallers.yaml 2025-12-08T17:55:35.341179879+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=observability.openshift.io_uiplugins.yaml 2025-12-08T17:55:35.347775640+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_perses.yaml 2025-12-08T17:55:35.360420816+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdashboards.yaml 2025-12-08T17:55:35.365225888+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses.dev_persesdatasources.yaml 2025-12-08T17:55:35.365259919+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=ServiceAccount" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=perses_v1_serviceaccount.yaml 2025-12-08T17:55:35.365412313+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:35.365544797+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdashboard-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:35.365698031+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:35.365830385+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="added to bundle, Kind=ClusterRole" configmap=openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 key=persesdatasource-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:35.413003489+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="resolving subscriptions in namespace" id=MiwqA namespace=openshift-operators 2025-12-08T17:55:35.413143983+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg=syncing id=TM/c+ ip=install-l78bn namespace=cert-manager-operator phase=Complete 2025-12-08T17:55:35.669674372+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:35.669674372+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:35.820936293+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="resolving subscriptions in namespace" id=nP0bq namespace=cert-manager-operator 2025-12-08T17:55:36.018227146+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="no subscriptions were updated" id=MiwqA namespace=openshift-operators 2025-12-08T17:55:36.069699789+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:36.069699789+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:36.228431274+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:36.423034425+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="no subscriptions were updated" id=nP0bq namespace=cert-manager-operator 2025-12-08T17:55:36.477528789+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:36.477528789+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:36.871186632+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:36.871186632+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:36.890288516+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:37.215757457+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="resolving sources" id=GMuiD namespace=service-telemetry 2025-12-08T17:55:37.215757457+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="checking if subscriptions need update" id=GMuiD namespace=service-telemetry 2025-12-08T17:55:37.287722151+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:37.287722151+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:37.413922525+00:00 stderr F time="2025-12-08T17:55:37Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:37.413922525+00:00 stderr F E1208 17:55:37.413567 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:37.414828809+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:37.608946645+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="resolving sources" id=SIQJr namespace=openshift-operators 2025-12-08T17:55:37.608946645+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="checking if subscriptions need update" id=SIQJr namespace=openshift-operators 2025-12-08T17:55:37.869951887+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:37.869951887+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:38.071347103+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:38.071347103+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:38.217242217+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="resolving subscriptions in namespace" id=GMuiD namespace=service-telemetry 2025-12-08T17:55:38.424926815+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="resolving subscriptions in namespace" id=SIQJr namespace=openshift-operators 2025-12-08T17:55:38.611863475+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="no subscriptions were updated" id=GMuiD namespace=service-telemetry 2025-12-08T17:55:38.671921734+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:38.671996456+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:38.676621022+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:38.827471971+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="no subscriptions were updated" id=SIQJr namespace=openshift-operators 2025-12-08T17:55:38.869229067+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:38.869229067+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:39.282018114+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:39.282018114+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:39.310060623+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:39.609153591+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="resolving sources" id=X0f0Z namespace=cert-manager-operator 2025-12-08T17:55:39.609153591+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="checking if subscriptions need update" id=X0f0Z namespace=cert-manager-operator 2025-12-08T17:55:39.809451487+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="resolving sources" id=mfqL3 namespace=cert-manager 2025-12-08T17:55:39.809451487+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="checking if subscriptions need update" id=mfqL3 namespace=cert-manager 2025-12-08T17:55:39.873087583+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:39.873141694+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:40.071687602+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:40.071687602+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:40.204015863+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="No subscriptions were found in namespace cert-manager" id=mfqL3 namespace=cert-manager 2025-12-08T17:55:40.204108917+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="resolving sources" id=eZUEs namespace=service-telemetry 2025-12-08T17:55:40.204134107+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="checking if subscriptions need update" id=eZUEs namespace=service-telemetry 2025-12-08T17:55:40.424372420+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="resolving subscriptions in namespace" id=X0f0Z namespace=cert-manager-operator 2025-12-08T17:55:40.672768336+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:40.672839968+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:40.815867723+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="no subscriptions were updated" id=X0f0Z namespace=cert-manager-operator 2025-12-08T17:55:40.871295524+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:40.871295524+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:41.009405123+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="resolving subscriptions in namespace" id=eZUEs namespace=service-telemetry 2025-12-08T17:55:41.410490579+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="no subscriptions were updated" id=eZUEs namespace=service-telemetry 2025-12-08T17:55:41.470866716+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:41.470866716+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:41.669742403+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:41.669742403+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:41.671750157+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:42.206343967+00:00 stderr F time="2025-12-08T17:55:42Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:42.206375188+00:00 stderr F E1208 17:55:42.206342 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"cluster-observability-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:42.207496159+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:42.413170212+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:42.413170212+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:42.472129440+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:42.472129440+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:42.821192689+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="resolving sources" id=GK3gV namespace=service-telemetry 2025-12-08T17:55:42.821192689+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="checking if subscriptions need update" id=GK3gV namespace=service-telemetry 2025-12-08T17:55:42.872305661+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:42.872305661+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:42.901361628+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:42.901361628+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="resolving sources" id=X5Tcx namespace=cert-manager-operator 2025-12-08T17:55:42.901361628+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="checking if subscriptions need update" id=X5Tcx namespace=cert-manager-operator 2025-12-08T17:55:43.470217787+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:43.470217787+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:43.671046448+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:43.671046448+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:44.010420900+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="resolving subscriptions in namespace" id=GK3gV namespace=service-telemetry 2025-12-08T17:55:44.209794031+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="resolving subscriptions in namespace" id=X5Tcx namespace=cert-manager-operator 2025-12-08T17:55:44.270087326+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:44.270087326+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:44.475280296+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:44.475280296+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:44.615210846+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="no subscriptions were updated" id=GK3gV namespace=service-telemetry 2025-12-08T17:55:44.815503272+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="no subscriptions were updated" id=X5Tcx namespace=cert-manager-operator 2025-12-08T17:55:45.070019585+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:45.070019585+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:45.272623505+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:45.272623505+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:45.608206354+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="resolving sources" id=zG1AZ namespace=cert-manager-operator 2025-12-08T17:55:45.608206354+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="checking if subscriptions need update" id=zG1AZ namespace=cert-manager-operator 2025-12-08T17:55:45.820262412+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:45.820567020+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="resolving sources" id=5WrzD namespace=openshift-operators 2025-12-08T17:55:45.820594961+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="checking if subscriptions need update" id=5WrzD namespace=openshift-operators 2025-12-08T17:55:45.828053456+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:45.828053456+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:46.069204243+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:46.069270054+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:46.471382219+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:46.471450581+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:46.610519316+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="resolving subscriptions in namespace" id=zG1AZ namespace=cert-manager-operator 2025-12-08T17:55:46.815222223+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="resolving subscriptions in namespace" id=5WrzD namespace=openshift-operators 2025-12-08T17:55:47.019549400+00:00 stderr F time="2025-12-08T17:55:47Z" level=info msg="no subscriptions were updated" id=zG1AZ namespace=cert-manager-operator 2025-12-08T17:55:47.228400301+00:00 stderr F time="2025-12-08T17:55:47Z" level=info msg="no subscriptions were updated" id=5WrzD namespace=openshift-operators 2025-12-08T17:55:48.224966417+00:00 stderr F time="2025-12-08T17:55:48Z" level=info msg="resolving sources" id=KZ64C namespace=openshift-operators 2025-12-08T17:55:48.224966417+00:00 stderr F time="2025-12-08T17:55:48Z" level=info msg="checking if subscriptions need update" id=KZ64C namespace=openshift-operators 2025-12-08T17:55:48.815170481+00:00 stderr F time="2025-12-08T17:55:48Z" level=info msg="resolving subscriptions in namespace" id=KZ64C namespace=openshift-operators 2025-12-08T17:55:49.047174018+00:00 stderr F time="2025-12-08T17:55:49Z" level=info msg="no subscriptions were updated" id=KZ64C namespace=openshift-operators 2025-12-08T17:55:50.690941242+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="error updating InstallPlan status" id=RZxgW ip=install-5nc8t namespace=openshift-operators phase=Installing updateError="Operation cannot be fulfilled on installplans.operators.coreos.com \"install-5nc8t\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:50.690941242+00:00 stderr F E1208 17:55:50.688159 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/install-5nc8t\" failed: error updating InstallPlan status: Operation cannot be fulfilled on installplans.operators.coreos.com \"install-5nc8t\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:50.690941242+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:55:50.690941242+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg=syncing id=38t2H ip=install-5nc8t namespace=openshift-operators phase=Complete 2025-12-08T17:55:50.698311385+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg=syncing id=WbKl8 ip=install-5nc8t namespace=openshift-operators phase=Complete 2025-12-08T17:55:50.698311385+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:50.698311385+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:55:50.706021016+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:50.706021016+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:55:50.711257150+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:50.711257150+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:55:50.719313321+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="resolving sources" id=c6bup namespace=openshift-operators 2025-12-08T17:55:50.719313321+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="checking if subscriptions need update" id=c6bup namespace=openshift-operators 2025-12-08T17:55:50.731718882+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="resolving subscriptions in namespace" id=c6bup namespace=openshift-operators 2025-12-08T17:55:50.742666801+00:00 stderr F time="2025-12-08T17:55:50Z" level=info msg="no subscriptions were updated" id=c6bup namespace=openshift-operators 2025-12-08T17:56:05.620704752+00:00 stderr F time="2025-12-08T17:56:05Z" level=info msg="resolving sources" id=75oti namespace=service-telemetry 2025-12-08T17:56:05.620704752+00:00 stderr F time="2025-12-08T17:56:05Z" level=info msg="checking if subscriptions need update" id=75oti namespace=service-telemetry 2025-12-08T17:56:05.632050573+00:00 stderr F time="2025-12-08T17:56:05Z" level=info msg="resolving subscriptions in namespace" id=75oti namespace=service-telemetry 2025-12-08T17:56:05.637069271+00:00 stderr F time="2025-12-08T17:56:05Z" level=info msg="no subscriptions were updated" id=75oti namespace=service-telemetry 2025-12-08T17:56:07.069347573+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:07.074844763+00:00 stderr F time="2025-12-08T17:56:07Z" level=error msg="registry service not healthy: could not get service account" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="serviceaccounts \"infrawatch-operators\" not found" id=W0kc6 2025-12-08T17:56:07.074884474+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="registry service status invalid, need to overwrite" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=W0kc6 2025-12-08T17:56:07.075363498+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:07.075363498+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:07.085640059+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:07.085926757+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:07.091562632+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:07.091562632+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:07.093345601+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="of 0 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=W0kc6 2025-12-08T17:56:07.093345601+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=W0kc6 2025-12-08T17:56:07.093413103+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="creating desired pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=W0kc6 pod.name= pod.namespace=service-telemetry 2025-12-08T17:56:07.479796225+00:00 stderr F I1208 17:56:07.479368 1 warnings.go:110] "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"registry-server\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"registry-server\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"registry-server\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"registry-server\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")" 2025-12-08T17:56:07.673861160+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="evaluating current pod" correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry 2025-12-08T17:56:08.102766609+00:00 stderr F time="2025-12-08T17:56:08Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:08.273159964+00:00 stderr F time="2025-12-08T17:56:08Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:08.674259590+00:00 stderr F time="2025-12-08T17:56:08Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:08.674340243+00:00 stderr F time="2025-12-08T17:56:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:09.072639403+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:09.072639403+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:09.380200871+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="pod spec diff: v1.PodSpec{\n- \tVolumes: []v1.Volume{\n- \t\t{\n- \t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\tVolumeSource: v1.VolumeSource{Projected: s\"&ProjectedVolumeSource{Sources:[\"...},\n- \t\t},\n- \t},\n+ \tVolumes: nil,\n \tInitContainers: nil,\n \tContainers: []v1.Container{\n \t\t{\n \t\t\t... // 3 identical fields\n \t\t\tArgs: nil,\n \t\t\tWorkingDir: \"\",\n \t\t\tPorts: []v1.ContainerPort{\n \t\t\t\t{\n \t\t\t\t\tName: \"grpc\",\n \t\t\t\t\tHostPort: 0,\n \t\t\t\t\tContainerPort: 50051,\n- \t\t\t\t\tProtocol: \"TCP\",\n+ \t\t\t\t\tProtocol: \"\",\n \t\t\t\t\tHostIP: \"\",\n \t\t\t\t},\n \t\t\t},\n \t\t\tEnvFrom: nil,\n \t\t\tEnv: nil,\n \t\t\tResources: {Requests: {s\"cpu\": {i: {...}, s: \"10m\", Format: \"DecimalSI\"}, s\"memory\": {i: {...}, s: \"50Mi\", Format: \"BinarySI\"}}},\n \t\t\tResizePolicy: nil,\n \t\t\tRestartPolicy: nil,\n- \t\t\tVolumeMounts: []v1.VolumeMount{\n- \t\t\t\t{\n- \t\t\t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\t\t\tReadOnly: true,\n- \t\t\t\t\tMountPath: \"/var/run/secrets/kubernetes.io/serviceaccount\",\n- \t\t\t\t},\n- \t\t\t},\n+ \t\t\tVolumeMounts: nil,\n \t\t\tVolumeDevices: nil,\n \t\t\tLivenessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 10,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tReadinessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 5,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tStartupProbe: &v1.Probe{\n \t\t\t\t... // 2 identical fields\n \t\t\t\tTimeoutSeconds: 5,\n \t\t\t\tPeriodSeconds: 10,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n \t\t\t\tFailureThreshold: 10,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tLifecycle: nil,\n- \t\t\tTerminationMessagePath: \"/dev/termination-log\",\n+ \t\t\tTerminationMessagePath: \"\",\n \t\t\tTerminationMessagePolicy: \"FallbackToLogsOnError\",\n \t\t\tImagePullPolicy: \"Always\",\n \t\t\tSecurityContext: &v1.SecurityContext{\n- \t\t\t\tCapabilities: s\"&Capabilities{Add:[],Drop:[MKNOD],}\",\n+ \t\t\t\tCapabilities: nil,\n \t\t\t\tPrivileged: nil,\n \t\t\t\tSELinuxOptions: nil,\n \t\t\t\t... // 9 identical fields\n \t\t\t},\n \t\t\tStdin: false,\n \t\t\tStdinOnce: false,\n \t\t\tTTY: false,\n \t\t},\n \t},\n \tEphemeralContainers: nil,\n- \tRestartPolicy: \"Always\",\n+ \tRestartPolicy: \"\",\n- \tTerminationGracePeriodSeconds: &30,\n+ \tTerminationGracePeriodSeconds: nil,\n \tActiveDeadlineSeconds: nil,\n- \tDNSPolicy: \"ClusterFirst\",\n+ \tDNSPolicy: \"\",\n \tNodeSelector: {\"kubernetes.io/os\": \"linux\"},\n \tServiceAccountName: \"infrawatch-operators\",\n- \tDeprecatedServiceAccount: \"infrawatch-operators\",\n+ \tDeprecatedServiceAccount: \"\",\n \tAutomountServiceAccountToken: nil,\n- \tNodeName: \"crc\",\n+ \tNodeName: \"\",\n \tHostNetwork: false,\n \tHostPID: false,\n \tHostIPC: false,\n \tShareProcessNamespace: nil,\n- \tSecurityContext: s\"&PodSecurityContext{SELinuxOptions:&SELinuxOptions{User:,Role:,Type:,Level:s0:c26,c10,},RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmo\"...,\n+ \tSecurityContext: nil,\n \tImagePullSecrets: {{Name: \"infrawatch-operators-dockercfg-bcx4t\"}},\n \tHostname: \"\",\n \tSubdomain: \"\",\n \tAffinity: nil,\n- \tSchedulerName: \"default-scheduler\",\n+ \tSchedulerName: \"\",\n- \tTolerations: []v1.Toleration{\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/not-ready\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/unreachable\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/memory-pressure\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoSchedule\",\n- \t\t},\n- \t},\n+ \tTolerations: nil,\n \tHostAliases: nil,\n \tPriorityClassName: \"\",\n- \tPriority: &0,\n+ \tPriority: nil,\n \tDNSConfig: nil,\n \tReadinessGates: nil,\n \tRuntimeClassName: nil,\n- \tEnableServiceLinks: &true,\n+ \tEnableServiceLinks: nil,\n- \tPreemptionPolicy: &\"PreemptLowerPriority\",\n+ \tPreemptionPolicy: nil,\n \tOverhead: nil,\n \tTopologySpreadConstraints: nil,\n \t... // 6 identical fields\n }\n" correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry 2025-12-08T17:56:09.380200871+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="of 1 pods matching label selector, 0 have the correct images and matching hash" correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry 2025-12-08T17:56:09.380200871+00:00 stderr F time="2025-12-08T17:56:09Z" level=error msg="registry service not healthy: one or more required resources are missing" isCurrentServiceAccountNil=false isServiceNil=true numCurrentPods=0 2025-12-08T17:56:09.384926631+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="pod spec diff: v1.PodSpec{\n- \tVolumes: []v1.Volume{\n- \t\t{\n- \t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\tVolumeSource: v1.VolumeSource{Projected: s\"&ProjectedVolumeSource{Sources:[\"...},\n- \t\t},\n- \t},\n+ \tVolumes: nil,\n \tInitContainers: nil,\n \tContainers: []v1.Container{\n \t\t{\n \t\t\t... // 3 identical fields\n \t\t\tArgs: nil,\n \t\t\tWorkingDir: \"\",\n \t\t\tPorts: []v1.ContainerPort{\n \t\t\t\t{\n \t\t\t\t\tName: \"grpc\",\n \t\t\t\t\tHostPort: 0,\n \t\t\t\t\tContainerPort: 50051,\n- \t\t\t\t\tProtocol: \"TCP\",\n+ \t\t\t\t\tProtocol: \"\",\n \t\t\t\t\tHostIP: \"\",\n \t\t\t\t},\n \t\t\t},\n \t\t\tEnvFrom: nil,\n \t\t\tEnv: nil,\n \t\t\tResources: {Requests: {s\"cpu\": {i: {...}, s: \"10m\", Format: \"DecimalSI\"}, s\"memory\": {i: {...}, s: \"50Mi\", Format: \"BinarySI\"}}},\n \t\t\tResizePolicy: nil,\n \t\t\tRestartPolicy: nil,\n- \t\t\tVolumeMounts: []v1.VolumeMount{\n- \t\t\t\t{\n- \t\t\t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\t\t\tReadOnly: true,\n- \t\t\t\t\tMountPath: \"/var/run/secrets/kubernetes.io/serviceaccount\",\n- \t\t\t\t},\n- \t\t\t},\n+ \t\t\tVolumeMounts: nil,\n \t\t\tVolumeDevices: nil,\n \t\t\tLivenessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 10,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tReadinessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 5,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tStartupProbe: &v1.Probe{\n \t\t\t\t... // 2 identical fields\n \t\t\t\tTimeoutSeconds: 5,\n \t\t\t\tPeriodSeconds: 10,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n \t\t\t\tFailureThreshold: 10,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tLifecycle: nil,\n- \t\t\tTerminationMessagePath: \"/dev/termination-log\",\n+ \t\t\tTerminationMessagePath: \"\",\n \t\t\tTerminationMessagePolicy: \"FallbackToLogsOnError\",\n \t\t\tImagePullPolicy: \"Always\",\n \t\t\tSecurityContext: &v1.SecurityContext{\n- \t\t\t\tCapabilities: s\"&Capabilities{Add:[],Drop:[MKNOD],}\",\n+ \t\t\t\tCapabilities: nil,\n \t\t\t\tPrivileged: nil,\n \t\t\t\tSELinuxOptions: nil,\n \t\t\t\t... // 9 identical fields\n \t\t\t},\n \t\t\tStdin: false,\n \t\t\tStdinOnce: false,\n \t\t\tTTY: false,\n \t\t},\n \t},\n \tEphemeralContainers: nil,\n- \tRestartPolicy: \"Always\",\n+ \tRestartPolicy: \"\",\n- \tTerminationGracePeriodSeconds: &30,\n+ \tTerminationGracePeriodSeconds: nil,\n \tActiveDeadlineSeconds: nil,\n- \tDNSPolicy: \"ClusterFirst\",\n+ \tDNSPolicy: \"\",\n \tNodeSelector: {\"kubernetes.io/os\": \"linux\"},\n \tServiceAccountName: \"infrawatch-operators\",\n- \tDeprecatedServiceAccount: \"infrawatch-operators\",\n+ \tDeprecatedServiceAccount: \"\",\n \tAutomountServiceAccountToken: nil,\n- \tNodeName: \"crc\",\n+ \tNodeName: \"\",\n \tHostNetwork: false,\n \tHostPID: false,\n \tHostIPC: false,\n \tShareProcessNamespace: nil,\n- \tSecurityContext: s\"&PodSecurityContext{SELinuxOptions:&SELinuxOptions{User:,Role:,Type:,Level:s0:c26,c10,},RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmo\"...,\n+ \tSecurityContext: nil,\n \tImagePullSecrets: {{Name: \"infrawatch-operators-dockercfg-bcx4t\"}},\n \tHostname: \"\",\n \tSubdomain: \"\",\n \tAffinity: nil,\n- \tSchedulerName: \"default-scheduler\",\n+ \tSchedulerName: \"\",\n- \tTolerations: []v1.Toleration{\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/not-ready\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/unreachable\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/memory-pressure\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoSchedule\",\n- \t\t},\n- \t},\n+ \tTolerations: nil,\n \tHostAliases: nil,\n \tPriorityClassName: \"\",\n- \tPriority: &0,\n+ \tPriority: nil,\n \tDNSConfig: nil,\n \tReadinessGates: nil,\n \tRuntimeClassName: nil,\n- \tEnableServiceLinks: &true,\n+ \tEnableServiceLinks: nil,\n- \tPreemptionPolicy: &\"PreemptLowerPriority\",\n+ \tPreemptionPolicy: nil,\n \tOverhead: nil,\n \tTopologySpreadConstraints: nil,\n \t... // 6 identical fields\n }\n" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:09.384926631+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="of 1 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:09.384926631+00:00 stderr F time="2025-12-08T17:56:09Z" level=error msg="registry service not healthy: one or more required resources are missing" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=A9sc1 isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:56:09.391250544+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:09.391347157+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="resolving sources" id=Hffeh namespace=service-telemetry 2025-12-08T17:56:09.391347157+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="checking if subscriptions need update" id=Hffeh namespace=service-telemetry 2025-12-08T17:56:09.402668848+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="checking for existing installplan" channel=unstable id=Hffeh namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:09.402668848+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="resolving subscriptions in namespace" id=Hffeh namespace=service-telemetry 2025-12-08T17:56:09.416716453+00:00 stderr F I1208 17:56:09.416663 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:09.428141718+00:00 stderr F E1208 17:56:09.428015 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:09.434572124+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="resolving sources" id=4ecvK namespace=service-telemetry 2025-12-08T17:56:09.434637865+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="checking if subscriptions need update" id=4ecvK namespace=service-telemetry 2025-12-08T17:56:09.444487665+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="checking for existing installplan" channel=unstable id=4ecvK namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:09.444545397+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="resolving subscriptions in namespace" id=4ecvK namespace=service-telemetry 2025-12-08T17:56:09.474944671+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:09.475011243+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:09.587870520+00:00 stderr F I1208 17:56:09.587821 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:09.587971383+00:00 stderr F E1208 17:56:09.587952 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:09.599421777+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="resolving sources" id=GZZ11 namespace=service-telemetry 2025-12-08T17:56:09.599465818+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="checking if subscriptions need update" id=GZZ11 namespace=service-telemetry 2025-12-08T17:56:09.987207497+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="checking for existing installplan" channel=unstable id=GZZ11 namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:09.987283140+00:00 stderr F time="2025-12-08T17:56:09Z" level=info msg="resolving subscriptions in namespace" id=GZZ11 namespace=service-telemetry 2025-12-08T17:56:10.189198060+00:00 stderr F E1208 17:56:10.188754 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:10.189455617+00:00 stderr F I1208 17:56:10.189415 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:10.210895326+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="resolving sources" id=8WlLA namespace=service-telemetry 2025-12-08T17:56:10.210983668+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="checking if subscriptions need update" id=8WlLA namespace=service-telemetry 2025-12-08T17:56:10.590584454+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="checking for existing installplan" channel=unstable id=8WlLA namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:10.590584454+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="resolving subscriptions in namespace" id=8WlLA namespace=service-telemetry 2025-12-08T17:56:10.673579061+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:10.673579061+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:10.786429148+00:00 stderr F E1208 17:56:10.786189 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:10.786429148+00:00 stderr F I1208 17:56:10.786232 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:10.828036019+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="resolving sources" id=mL93m namespace=service-telemetry 2025-12-08T17:56:10.828036019+00:00 stderr F time="2025-12-08T17:56:10Z" level=info msg="checking if subscriptions need update" id=mL93m namespace=service-telemetry 2025-12-08T17:56:11.071852670+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="evaluating current pod" correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry 2025-12-08T17:56:11.073212708+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="pod spec diff: v1.PodSpec{\n- \tVolumes: []v1.Volume{\n- \t\t{\n- \t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\tVolumeSource: v1.VolumeSource{Projected: s\"&ProjectedVolumeSource{Sources:[\"...},\n- \t\t},\n- \t},\n+ \tVolumes: nil,\n \tInitContainers: nil,\n \tContainers: []v1.Container{\n \t\t{\n \t\t\t... // 3 identical fields\n \t\t\tArgs: nil,\n \t\t\tWorkingDir: \"\",\n \t\t\tPorts: []v1.ContainerPort{\n \t\t\t\t{\n \t\t\t\t\tName: \"grpc\",\n \t\t\t\t\tHostPort: 0,\n \t\t\t\t\tContainerPort: 50051,\n- \t\t\t\t\tProtocol: \"TCP\",\n+ \t\t\t\t\tProtocol: \"\",\n \t\t\t\t\tHostIP: \"\",\n \t\t\t\t},\n \t\t\t},\n \t\t\tEnvFrom: nil,\n \t\t\tEnv: nil,\n \t\t\tResources: {Requests: {s\"cpu\": {i: {...}, s: \"10m\", Format: \"DecimalSI\"}, s\"memory\": {i: {...}, s: \"50Mi\", Format: \"BinarySI\"}}},\n \t\t\tResizePolicy: nil,\n \t\t\tRestartPolicy: nil,\n- \t\t\tVolumeMounts: []v1.VolumeMount{\n- \t\t\t\t{\n- \t\t\t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\t\t\tReadOnly: true,\n- \t\t\t\t\tMountPath: \"/var/run/secrets/kubernetes.io/serviceaccount\",\n- \t\t\t\t},\n- \t\t\t},\n+ \t\t\tVolumeMounts: nil,\n \t\t\tVolumeDevices: nil,\n \t\t\tLivenessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 10,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tReadinessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 5,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tStartupProbe: &v1.Probe{\n \t\t\t\t... // 2 identical fields\n \t\t\t\tTimeoutSeconds: 5,\n \t\t\t\tPeriodSeconds: 10,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n \t\t\t\tFailureThreshold: 10,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tLifecycle: nil,\n- \t\t\tTerminationMessagePath: \"/dev/termination-log\",\n+ \t\t\tTerminationMessagePath: \"\",\n \t\t\tTerminationMessagePolicy: \"FallbackToLogsOnError\",\n \t\t\tImagePullPolicy: \"Always\",\n \t\t\tSecurityContext: &v1.SecurityContext{\n- \t\t\t\tCapabilities: s\"&Capabilities{Add:[],Drop:[MKNOD],}\",\n+ \t\t\t\tCapabilities: nil,\n \t\t\t\tPrivileged: nil,\n \t\t\t\tSELinuxOptions: nil,\n \t\t\t\t... // 9 identical fields\n \t\t\t},\n \t\t\tStdin: false,\n \t\t\tStdinOnce: false,\n \t\t\tTTY: false,\n \t\t},\n \t},\n \tEphemeralContainers: nil,\n- \tRestartPolicy: \"Always\",\n+ \tRestartPolicy: \"\",\n- \tTerminationGracePeriodSeconds: &30,\n+ \tTerminationGracePeriodSeconds: nil,\n \tActiveDeadlineSeconds: nil,\n- \tDNSPolicy: \"ClusterFirst\",\n+ \tDNSPolicy: \"\",\n \tNodeSelector: {\"kubernetes.io/os\": \"linux\"},\n \tServiceAccountName: \"infrawatch-operators\",\n- \tDeprecatedServiceAccount: \"infrawatch-operators\",\n+ \tDeprecatedServiceAccount: \"\",\n \tAutomountServiceAccountToken: nil,\n- \tNodeName: \"crc\",\n+ \tNodeName: \"\",\n \tHostNetwork: false,\n \tHostPID: false,\n \tHostIPC: false,\n \tShareProcessNamespace: nil,\n- \tSecurityContext: s\"&PodSecurityContext{SELinuxOptions:&SELinuxOptions{User:,Role:,Type:,Level:s0:c26,c10,},RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmo\"...,\n+ \tSecurityContext: nil,\n \tImagePullSecrets: {{Name: \"infrawatch-operators-dockercfg-bcx4t\"}},\n \tHostname: \"\",\n \tSubdomain: \"\",\n \tAffinity: nil,\n- \tSchedulerName: \"default-scheduler\",\n+ \tSchedulerName: \"\",\n- \tTolerations: []v1.Toleration{\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/not-ready\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/unreachable\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/memory-pressure\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoSchedule\",\n- \t\t},\n- \t},\n+ \tTolerations: nil,\n \tHostAliases: nil,\n \tPriorityClassName: \"\",\n- \tPriority: &0,\n+ \tPriority: nil,\n \tDNSConfig: nil,\n \tReadinessGates: nil,\n \tRuntimeClassName: nil,\n- \tEnableServiceLinks: &true,\n+ \tEnableServiceLinks: nil,\n- \tPreemptionPolicy: &\"PreemptLowerPriority\",\n+ \tPreemptionPolicy: nil,\n \tOverhead: nil,\n \tTopologySpreadConstraints: nil,\n \t... // 6 identical fields\n }\n" correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry 2025-12-08T17:56:11.073212708+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="of 1 pods matching label selector, 0 have the correct images and matching hash" correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry 2025-12-08T17:56:11.073212708+00:00 stderr F time="2025-12-08T17:56:11Z" level=error msg="registry service not healthy: one or more required resources are missing" isCurrentServiceAccountNil=false isServiceNil=false numCurrentPods=0 2025-12-08T17:56:11.191480193+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="checking for existing installplan" channel=unstable id=mL93m namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:11.191480193+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="resolving subscriptions in namespace" id=mL93m namespace=service-telemetry 2025-12-08T17:56:11.272955188+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:11.273962595+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="pod spec diff: v1.PodSpec{\n- \tVolumes: []v1.Volume{\n- \t\t{\n- \t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\tVolumeSource: v1.VolumeSource{Projected: s\"&ProjectedVolumeSource{Sources:[\"...},\n- \t\t},\n- \t},\n+ \tVolumes: nil,\n \tInitContainers: nil,\n \tContainers: []v1.Container{\n \t\t{\n \t\t\t... // 3 identical fields\n \t\t\tArgs: nil,\n \t\t\tWorkingDir: \"\",\n \t\t\tPorts: []v1.ContainerPort{\n \t\t\t\t{\n \t\t\t\t\tName: \"grpc\",\n \t\t\t\t\tHostPort: 0,\n \t\t\t\t\tContainerPort: 50051,\n- \t\t\t\t\tProtocol: \"TCP\",\n+ \t\t\t\t\tProtocol: \"\",\n \t\t\t\t\tHostIP: \"\",\n \t\t\t\t},\n \t\t\t},\n \t\t\tEnvFrom: nil,\n \t\t\tEnv: nil,\n \t\t\tResources: {Requests: {s\"cpu\": {i: {...}, s: \"10m\", Format: \"DecimalSI\"}, s\"memory\": {i: {...}, s: \"50Mi\", Format: \"BinarySI\"}}},\n \t\t\tResizePolicy: nil,\n \t\t\tRestartPolicy: nil,\n- \t\t\tVolumeMounts: []v1.VolumeMount{\n- \t\t\t\t{\n- \t\t\t\t\tName: \"kube-api-access-vbzcn\",\n- \t\t\t\t\tReadOnly: true,\n- \t\t\t\t\tMountPath: \"/var/run/secrets/kubernetes.io/serviceaccount\",\n- \t\t\t\t},\n- \t\t\t},\n+ \t\t\tVolumeMounts: nil,\n \t\t\tVolumeDevices: nil,\n \t\t\tLivenessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 10,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tReadinessProbe: &v1.Probe{\n \t\t\t\tProbeHandler: {Exec: &{Command: {\"grpc_health_probe\", \"-addr=:50051\"}}},\n \t\t\t\tInitialDelaySeconds: 5,\n \t\t\t\tTimeoutSeconds: 5,\n- \t\t\t\tPeriodSeconds: 10,\n+ \t\t\t\tPeriodSeconds: 0,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n- \t\t\t\tFailureThreshold: 3,\n+ \t\t\t\tFailureThreshold: 0,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tStartupProbe: &v1.Probe{\n \t\t\t\t... // 2 identical fields\n \t\t\t\tTimeoutSeconds: 5,\n \t\t\t\tPeriodSeconds: 10,\n- \t\t\t\tSuccessThreshold: 1,\n+ \t\t\t\tSuccessThreshold: 0,\n \t\t\t\tFailureThreshold: 10,\n \t\t\t\tTerminationGracePeriodSeconds: nil,\n \t\t\t},\n \t\t\tLifecycle: nil,\n- \t\t\tTerminationMessagePath: \"/dev/termination-log\",\n+ \t\t\tTerminationMessagePath: \"\",\n \t\t\tTerminationMessagePolicy: \"FallbackToLogsOnError\",\n \t\t\tImagePullPolicy: \"Always\",\n \t\t\tSecurityContext: &v1.SecurityContext{\n- \t\t\t\tCapabilities: s\"&Capabilities{Add:[],Drop:[MKNOD],}\",\n+ \t\t\t\tCapabilities: nil,\n \t\t\t\tPrivileged: nil,\n \t\t\t\tSELinuxOptions: nil,\n \t\t\t\t... // 9 identical fields\n \t\t\t},\n \t\t\tStdin: false,\n \t\t\tStdinOnce: false,\n \t\t\tTTY: false,\n \t\t},\n \t},\n \tEphemeralContainers: nil,\n- \tRestartPolicy: \"Always\",\n+ \tRestartPolicy: \"\",\n- \tTerminationGracePeriodSeconds: &30,\n+ \tTerminationGracePeriodSeconds: nil,\n \tActiveDeadlineSeconds: nil,\n- \tDNSPolicy: \"ClusterFirst\",\n+ \tDNSPolicy: \"\",\n \tNodeSelector: {\"kubernetes.io/os\": \"linux\"},\n \tServiceAccountName: \"infrawatch-operators\",\n- \tDeprecatedServiceAccount: \"infrawatch-operators\",\n+ \tDeprecatedServiceAccount: \"\",\n \tAutomountServiceAccountToken: nil,\n- \tNodeName: \"crc\",\n+ \tNodeName: \"\",\n \tHostNetwork: false,\n \tHostPID: false,\n \tHostIPC: false,\n \tShareProcessNamespace: nil,\n- \tSecurityContext: s\"&PodSecurityContext{SELinuxOptions:&SELinuxOptions{User:,Role:,Type:,Level:s0:c26,c10,},RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmo\"...,\n+ \tSecurityContext: nil,\n \tImagePullSecrets: {{Name: \"infrawatch-operators-dockercfg-bcx4t\"}},\n \tHostname: \"\",\n \tSubdomain: \"\",\n \tAffinity: nil,\n- \tSchedulerName: \"default-scheduler\",\n+ \tSchedulerName: \"\",\n- \tTolerations: []v1.Toleration{\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/not-ready\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/unreachable\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoExecute\",\n- \t\t\tTolerationSeconds: &300,\n- \t\t},\n- \t\t{\n- \t\t\tKey: \"node.kubernetes.io/memory-pressure\",\n- \t\t\tOperator: \"Exists\",\n- \t\t\tEffect: \"NoSchedule\",\n- \t\t},\n- \t},\n+ \tTolerations: nil,\n \tHostAliases: nil,\n \tPriorityClassName: \"\",\n- \tPriority: &0,\n+ \tPriority: nil,\n \tDNSConfig: nil,\n \tReadinessGates: nil,\n \tRuntimeClassName: nil,\n- \tEnableServiceLinks: &true,\n+ \tEnableServiceLinks: nil,\n- \tPreemptionPolicy: &\"PreemptLowerPriority\",\n+ \tPreemptionPolicy: nil,\n \tOverhead: nil,\n \tTopologySpreadConstraints: nil,\n \t... // 6 identical fields\n }\n" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:11.273962595+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="of 1 pods matching label selector, 0 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=false correctImages=true current-pod.name=infrawatch-operators-xmhcm current-pod.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:11.273962595+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="registry pods invalid, need to overwrite" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=A9sc1 2025-12-08T17:56:11.274965654+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="deleting current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=A9sc1 pod.name=infrawatch-operators-xmhcm pod.namespace=service-telemetry 2025-12-08T17:56:11.385613579+00:00 stderr F time="2025-12-08T17:56:11Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:11.385664411+00:00 stderr F E1208 17:56:11.385607 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:11.386963537+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:11.586382539+00:00 stderr F E1208 17:56:11.586278 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:11.586382539+00:00 stderr F I1208 17:56:11.586325 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:11.668009138+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="resolving sources" id=cTUwd namespace=service-telemetry 2025-12-08T17:56:11.668009138+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="checking if subscriptions need update" id=cTUwd namespace=service-telemetry 2025-12-08T17:56:11.876021776+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="creating desired pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=A9sc1 pod.name= pod.namespace=service-telemetry 2025-12-08T17:56:11.990217380+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="checking for existing installplan" channel=unstable id=cTUwd namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:11.990217380+00:00 stderr F time="2025-12-08T17:56:11Z" level=info msg="resolving subscriptions in namespace" id=cTUwd namespace=service-telemetry 2025-12-08T17:56:12.191202895+00:00 stderr F E1208 17:56:12.191135 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:12.191449732+00:00 stderr F I1208 17:56:12.191216 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:12.275923979+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:12.275923979+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:12.353976031+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="resolving sources" id=/W6/d namespace=service-telemetry 2025-12-08T17:56:12.353976031+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="checking if subscriptions need update" id=/W6/d namespace=service-telemetry 2025-12-08T17:56:12.590970675+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="checking for existing installplan" channel=unstable id=/W6/d namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:12.590970675+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="resolving subscriptions in namespace" id=/W6/d namespace=service-telemetry 2025-12-08T17:56:12.678093285+00:00 stderr F I1208 17:56:12.677991 1 warnings.go:110] "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"registry-server\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"registry-server\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"registry-server\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"registry-server\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")" 2025-12-08T17:56:12.678221309+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="multiple pods found for selector" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=A9sc1 selector="olm.catalogSource=infrawatch-operators" 2025-12-08T17:56:12.788837023+00:00 stderr F E1208 17:56:12.788732 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:12.788837023+00:00 stderr F I1208 17:56:12.788801 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:12.873033244+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:12.873033244+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:13.110409037+00:00 stderr F time="2025-12-08T17:56:13Z" level=info msg="resolving sources" id=4i4FZ namespace=service-telemetry 2025-12-08T17:56:13.110409037+00:00 stderr F time="2025-12-08T17:56:13Z" level=info msg="checking if subscriptions need update" id=4i4FZ namespace=service-telemetry 2025-12-08T17:56:13.400957560+00:00 stderr F time="2025-12-08T17:56:13Z" level=info msg="checking for existing installplan" channel=unstable id=4i4FZ namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:13.400957560+00:00 stderr F time="2025-12-08T17:56:13Z" level=info msg="resolving subscriptions in namespace" id=4i4FZ namespace=service-telemetry 2025-12-08T17:56:13.592188217+00:00 stderr F E1208 17:56:13.592126 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators" logger="UnhandledError" 2025-12-08T17:56:13.592248669+00:00 stderr F I1208 17:56:13.592174 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:13.872357675+00:00 stderr F time="2025-12-08T17:56:13Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:13.872357675+00:00 stderr F time="2025-12-08T17:56:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:14.237826673+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="resolving sources" id=6cMrX namespace=service-telemetry 2025-12-08T17:56:14.237826673+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="checking if subscriptions need update" id=6cMrX namespace=service-telemetry 2025-12-08T17:56:14.247171440+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="checking for existing installplan" channel=unstable id=6cMrX namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:14.247171440+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="resolving subscriptions in namespace" id=6cMrX namespace=service-telemetry 2025-12-08T17:56:14.252136027+00:00 stderr F I1208 17:56:14.252091 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: no registry client established for catalogsource service-telemetry/infrawatch-operators 2025-12-08T17:56:14.273117592+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:14.273117592+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:14.473779038+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Km1M5 2025-12-08T17:56:14.473779038+00:00 stderr F time="2025-12-08T17:56:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Km1M5 2025-12-08T17:56:15.472578205+00:00 stderr F time="2025-12-08T17:56:15Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:15.472578205+00:00 stderr F time="2025-12-08T17:56:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:15.477442039+00:00 stderr F time="2025-12-08T17:56:15Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:15.477442039+00:00 stderr F E1208 17:56:15.477411 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:15.478663822+00:00 stderr F time="2025-12-08T17:56:15Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:15.873773513+00:00 stderr F time="2025-12-08T17:56:15Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:15.873773513+00:00 stderr F time="2025-12-08T17:56:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:16.872447787+00:00 stderr F time="2025-12-08T17:56:16Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Km1M5 2025-12-08T17:56:16.872447787+00:00 stderr F time="2025-12-08T17:56:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Km1M5 2025-12-08T17:56:16.873008642+00:00 stderr F time="2025-12-08T17:56:16Z" level=info msg="state.Key.Namespace=service-telemetry state.Key.Name=infrawatch-operators state.State=CONNECTING" 2025-12-08T17:56:16.878527994+00:00 stderr F time="2025-12-08T17:56:16Z" level=info msg="state.Key.Namespace=service-telemetry state.Key.Name=infrawatch-operators state.State=TRANSIENT_FAILURE" 2025-12-08T17:56:17.072776343+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:17.072776343+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:17.275128587+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:17.275128587+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:17.279711482+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:17.279906407+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:17.279906407+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="resolving sources" id=BO6Yq namespace=service-telemetry 2025-12-08T17:56:17.279906407+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="checking if subscriptions need update" id=BO6Yq namespace=service-telemetry 2025-12-08T17:56:17.297257194+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="checking for existing installplan" channel=unstable id=BO6Yq namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:17.297257194+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="resolving subscriptions in namespace" id=BO6Yq namespace=service-telemetry 2025-12-08T17:56:17.303948147+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:17.308331977+00:00 stderr F I1208 17:56:17.308234 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:17.323540335+00:00 stderr F E1208 17:56:17.323379 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:17.330008052+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="resolving sources" id=AS/FL namespace=service-telemetry 2025-12-08T17:56:17.330008052+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="checking if subscriptions need update" id=AS/FL namespace=service-telemetry 2025-12-08T17:56:17.414210763+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="checking for existing installplan" channel=unstable id=AS/FL namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:17.414210763+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="resolving subscriptions in namespace" id=AS/FL namespace=service-telemetry 2025-12-08T17:56:17.477178441+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:17.480069940+00:00 stderr F E1208 17:56:17.480040 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:17.480107941+00:00 stderr F I1208 17:56:17.480083 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:17.492233614+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="resolving sources" id=r9WzN namespace=service-telemetry 2025-12-08T17:56:17.492233614+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="checking if subscriptions need update" id=r9WzN namespace=service-telemetry 2025-12-08T17:56:17.889282009+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="checking for existing installplan" channel=unstable id=r9WzN namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:17.889282009+00:00 stderr F time="2025-12-08T17:56:17Z" level=info msg="resolving subscriptions in namespace" id=r9WzN namespace=service-telemetry 2025-12-08T17:56:18.077723209+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:18.080385532+00:00 stderr F E1208 17:56:18.080255 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:18.080385532+00:00 stderr F I1208 17:56:18.080355 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:18.101747429+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="resolving sources" id=e0ApI namespace=service-telemetry 2025-12-08T17:56:18.101747429+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="checking if subscriptions need update" id=e0ApI namespace=service-telemetry 2025-12-08T17:56:18.273631135+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=EHo7m 2025-12-08T17:56:18.273631135+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=EHo7m 2025-12-08T17:56:18.475061592+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:18.475061592+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:18.482664791+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="checking for existing installplan" channel=unstable id=e0ApI namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:18.482664791+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="resolving subscriptions in namespace" id=e0ApI namespace=service-telemetry 2025-12-08T17:56:18.672006546+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:18.672006546+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:18.678108973+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:18.680503879+00:00 stderr F E1208 17:56:18.680452 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:18.680558841+00:00 stderr F I1208 17:56:18.680521 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:18.722021679+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="resolving sources" id=IJjrF namespace=service-telemetry 2025-12-08T17:56:18.722021679+00:00 stderr F time="2025-12-08T17:56:18Z" level=info msg="checking if subscriptions need update" id=IJjrF namespace=service-telemetry 2025-12-08T17:56:19.082854040+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="checking for existing installplan" channel=unstable id=IJjrF namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:19.082854040+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="resolving subscriptions in namespace" id=IJjrF namespace=service-telemetry 2025-12-08T17:56:19.279604699+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:19.283910927+00:00 stderr F E1208 17:56:19.283812 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:19.284049441+00:00 stderr F I1208 17:56:19.283987 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:19.365693201+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="resolving sources" id=f3Gin namespace=service-telemetry 2025-12-08T17:56:19.365693201+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="checking if subscriptions need update" id=f3Gin namespace=service-telemetry 2025-12-08T17:56:19.674182166+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:19.674182166+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:19.682383981+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="checking for existing installplan" channel=unstable id=f3Gin namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:19.682459413+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="resolving subscriptions in namespace" id=f3Gin namespace=service-telemetry 2025-12-08T17:56:19.874298527+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:19.874378459+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:19.881054833+00:00 stderr F time="2025-12-08T17:56:19Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:19.881054833+00:00 stderr F E1208 17:56:19.880512 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:19.881054833+00:00 stderr F I1208 17:56:19.880729 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:20.042269186+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="resolving sources" id=VXSPc namespace=service-telemetry 2025-12-08T17:56:20.042351298+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="checking if subscriptions need update" id=VXSPc namespace=service-telemetry 2025-12-08T17:56:20.073403351+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=EHo7m 2025-12-08T17:56:20.073403351+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=EHo7m 2025-12-08T17:56:20.483480753+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="checking for existing installplan" channel=unstable id=VXSPc namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:20.483587116+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="resolving subscriptions in namespace" id=VXSPc namespace=service-telemetry 2025-12-08T17:56:20.673619010+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:20.673751804+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:20.682262908+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:20.684147980+00:00 stderr F E1208 17:56:20.684104 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:20.684453008+00:00 stderr F I1208 17:56:20.684359 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:20.882997986+00:00 stderr F time="2025-12-08T17:56:20Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:20.883050047+00:00 stderr F E1208 17:56:20.882997 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:20.884317042+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:21.006138374+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="resolving sources" id=98CTu namespace=service-telemetry 2025-12-08T17:56:21.006138374+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="checking if subscriptions need update" id=98CTu namespace=service-telemetry 2025-12-08T17:56:21.075574000+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:21.075574000+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:21.273635094+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=OCAIk 2025-12-08T17:56:21.273635094+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=OCAIk 2025-12-08T17:56:21.282489107+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="checking for existing installplan" channel=unstable id=98CTu namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:21.282489107+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="resolving subscriptions in namespace" id=98CTu namespace=service-telemetry 2025-12-08T17:56:21.478013472+00:00 stderr F time="2025-12-08T17:56:21Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:21.480071359+00:00 stderr F E1208 17:56:21.480029 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:21.480103340+00:00 stderr F I1208 17:56:21.480085 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:22.073447360+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:22.073447360+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:22.122072705+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="resolving sources" id=/h0lT namespace=service-telemetry 2025-12-08T17:56:22.122072705+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="checking if subscriptions need update" id=/h0lT namespace=service-telemetry 2025-12-08T17:56:22.147970016+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="checking for existing installplan" channel=unstable id=/h0lT namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:22.147970016+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="resolving subscriptions in namespace" id=/h0lT namespace=service-telemetry 2025-12-08T17:56:22.152272044+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:22.157656502+00:00 stderr F I1208 17:56:22.157568 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:22.273854090+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:22.273854090+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:22.282223190+00:00 stderr F time="2025-12-08T17:56:22Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:22.282267241+00:00 stderr F E1208 17:56:22.282224 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:22.283583257+00:00 stderr F time="2025-12-08T17:56:22Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:23.077189474+00:00 stderr F time="2025-12-08T17:56:23Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=OCAIk 2025-12-08T17:56:23.077189474+00:00 stderr F time="2025-12-08T17:56:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=OCAIk 2025-12-08T17:56:23.273294365+00:00 stderr F time="2025-12-08T17:56:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:23.273294365+00:00 stderr F time="2025-12-08T17:56:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:23.473026165+00:00 stderr F time="2025-12-08T17:56:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:23.473026165+00:00 stderr F time="2025-12-08T17:56:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:24.273860560+00:00 stderr F time="2025-12-08T17:56:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:24.273860560+00:00 stderr F time="2025-12-08T17:56:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:24.472428679+00:00 stderr F time="2025-12-08T17:56:24Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Ma7UG 2025-12-08T17:56:24.472428679+00:00 stderr F time="2025-12-08T17:56:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Ma7UG 2025-12-08T17:56:24.672380685+00:00 stderr F time="2025-12-08T17:56:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:24.672380685+00:00 stderr F time="2025-12-08T17:56:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:25.472663945+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:25.472663945+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:25.483998116+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:25.483998116+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving sources" id=IbDwB namespace=service-telemetry 2025-12-08T17:56:25.483998116+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking if subscriptions need update" id=IbDwB namespace=service-telemetry 2025-12-08T17:56:25.500675114+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking for existing installplan" channel=unstable id=IbDwB namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:25.500675114+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving subscriptions in namespace" id=IbDwB namespace=service-telemetry 2025-12-08T17:56:25.509294750+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:25.514519114+00:00 stderr F E1208 17:56:25.514456 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:25.514625307+00:00 stderr F I1208 17:56:25.514562 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:25.521024072+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving sources" id=ku8Ss namespace=service-telemetry 2025-12-08T17:56:25.521024072+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking if subscriptions need update" id=ku8Ss namespace=service-telemetry 2025-12-08T17:56:25.610493757+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking for existing installplan" channel=unstable id=ku8Ss namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:25.610493757+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving subscriptions in namespace" id=ku8Ss namespace=service-telemetry 2025-12-08T17:56:25.612993466+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:25.614980391+00:00 stderr F E1208 17:56:25.614909 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:25.615015602+00:00 stderr F I1208 17:56:25.614973 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:25.626686542+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving sources" id=ilAOQ namespace=service-telemetry 2025-12-08T17:56:25.626686542+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking if subscriptions need update" id=ilAOQ namespace=service-telemetry 2025-12-08T17:56:25.651603726+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking for existing installplan" channel=unstable id=ilAOQ namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:25.651603726+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving subscriptions in namespace" id=ilAOQ namespace=service-telemetry 2025-12-08T17:56:25.656189901+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:25.658142545+00:00 stderr F E1208 17:56:25.658116 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:25.658207706+00:00 stderr F I1208 17:56:25.658180 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:25.680231891+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving sources" id=CSjVV namespace=service-telemetry 2025-12-08T17:56:25.680231891+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking if subscriptions need update" id=CSjVV namespace=service-telemetry 2025-12-08T17:56:25.875077278+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:25.875077278+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:25.882990154+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="checking for existing installplan" channel=unstable id=CSjVV namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:25.882990154+00:00 stderr F time="2025-12-08T17:56:25Z" level=info msg="resolving subscriptions in namespace" id=CSjVV namespace=service-telemetry 2025-12-08T17:56:26.079254670+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:26.084039861+00:00 stderr F E1208 17:56:26.083952 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"/service-telemetry\" failed: error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" logger="UnhandledError" 2025-12-08T17:56:26.084118334+00:00 stderr F I1208 17:56:26.084071 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"", Name:"service-telemetry", UID:"c400eb0e-dca7-4430-a24c-7b131f28cf1c", APIVersion:"v1", ResourceVersion:"43418", FieldPath:""}): type: 'Warning' reason: 'ResolutionFailed' error using catalogsource service-telemetry/infrawatch-operators: failed to list bundles: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:26.126300711+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="resolving sources" id=caKh/ namespace=service-telemetry 2025-12-08T17:56:26.126300711+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="checking if subscriptions need update" id=caKh/ namespace=service-telemetry 2025-12-08T17:56:26.193402042+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="state.Key.Namespace=service-telemetry state.Key.Name=infrawatch-operators state.State=READY" 2025-12-08T17:56:26.272688368+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Ma7UG 2025-12-08T17:56:26.272688368+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=Ma7UG 2025-12-08T17:56:26.674297479+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:26.674297479+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:26.684859109+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="checking for existing installplan" channel=unstable id=caKh/ namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:26.684859109+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="resolving subscriptions in namespace" id=caKh/ namespace=service-telemetry 2025-12-08T17:56:26.873282279+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:26.873282279+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:26.876102617+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="requesting snapshot for catalog source service-telemetry/infrawatch-operators" 2025-12-08T17:56:27.092316779+00:00 stderr F time="2025-12-08T17:56:27Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:27.261276525+00:00 stderr F time="2025-12-08T17:56:27Z" level=info msg="unpacking bundles" id=caKh/ namespace=service-telemetry 2025-12-08T17:56:27.473370826+00:00 stderr F time="2025-12-08T17:56:27Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UEBjM 2025-12-08T17:56:27.473437297+00:00 stderr F time="2025-12-08T17:56:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UEBjM 2025-12-08T17:56:28.073945705+00:00 stderr F time="2025-12-08T17:56:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:28.073996816+00:00 stderr F time="2025-12-08T17:56:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:28.473763545+00:00 stderr F time="2025-12-08T17:56:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:28.473763545+00:00 stderr F time="2025-12-08T17:56:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:29.517975331+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="unpacking is not complete yet, requeueing" id=caKh/ namespace=service-telemetry 2025-12-08T17:56:29.517975331+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="resolving sources" id=54o0k namespace=service-telemetry 2025-12-08T17:56:29.517975331+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="checking if subscriptions need update" id=54o0k namespace=service-telemetry 2025-12-08T17:56:29.529980113+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="checking for existing installplan" channel=unstable id=54o0k namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:29.529980113+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="resolving subscriptions in namespace" id=54o0k namespace=service-telemetry 2025-12-08T17:56:29.673042745+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:29.673118227+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:29.803014665+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="unpacking bundles" id=54o0k namespace=service-telemetry 2025-12-08T17:56:29.803229041+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="unpacking is not complete yet, requeueing" id=54o0k namespace=service-telemetry 2025-12-08T17:56:29.872814406+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UEBjM 2025-12-08T17:56:29.872814406+00:00 stderr F time="2025-12-08T17:56:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UEBjM 2025-12-08T17:56:29.876230095+00:00 stderr F time="2025-12-08T17:56:29Z" level=error msg="UpdateStatus - error while setting CatalogSource status" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="Operation cannot be fulfilled on catalogsources.operators.coreos.com \"infrawatch-operators\": the object has been modified; please apply your changes to the latest version and try again" id=UEBjM 2025-12-08T17:56:29.876287756+00:00 stderr F E1208 17:56:29.876255 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/infrawatch-operators\" failed: Operation cannot be fulfilled on catalogsources.operators.coreos.com \"infrawatch-operators\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:30.090292958+00:00 stderr F time="2025-12-08T17:56:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:30.090292958+00:00 stderr F time="2025-12-08T17:56:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:31.073784549+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=v/mkp 2025-12-08T17:56:31.073784549+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=v/mkp 2025-12-08T17:56:31.273202670+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:31.273202670+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:31.279456744+00:00 stderr F time="2025-12-08T17:56:31Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:31.279500265+00:00 stderr F E1208 17:56:31.279477 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"elasticsearch-eck-operator-certified\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:31.280740557+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:31.473815893+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=U9/RK 2025-12-08T17:56:31.473815893+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=U9/RK 2025-12-08T17:56:31.673578422+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:31.673578422+00:00 stderr F time="2025-12-08T17:56:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:32.874181527+00:00 stderr F time="2025-12-08T17:56:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:32.874181527+00:00 stderr F time="2025-12-08T17:56:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:33.273670677+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:33.273670677+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:33.273807820+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:33.273861622+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="resolving sources" id=D0P1J namespace=service-telemetry 2025-12-08T17:56:33.273861622+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="checking if subscriptions need update" id=D0P1J namespace=service-telemetry 2025-12-08T17:56:33.292359583+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="checking for existing installplan" channel=unstable id=D0P1J namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:33.292359583+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="resolving subscriptions in namespace" id=D0P1J namespace=service-telemetry 2025-12-08T17:56:33.446620117+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="unpacking bundles" id=D0P1J namespace=service-telemetry 2025-12-08T17:56:33.447007797+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="unpacking is not complete yet, requeueing" id=D0P1J namespace=service-telemetry 2025-12-08T17:56:33.474172856+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=v/mkp 2025-12-08T17:56:33.474172856+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=v/mkp 2025-12-08T17:56:33.872438893+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=U9/RK 2025-12-08T17:56:33.872438893+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=U9/RK 2025-12-08T17:56:34.473729786+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:34.473729786+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:34.518677099+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="resolving sources" id=yY0Et namespace=service-telemetry 2025-12-08T17:56:34.518724230+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="checking if subscriptions need update" id=yY0Et namespace=service-telemetry 2025-12-08T17:56:34.528262548+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="checking for existing installplan" channel=unstable id=yY0Et namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:34.528262548+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="resolving subscriptions in namespace" id=yY0Et namespace=service-telemetry 2025-12-08T17:56:34.677478101+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="unpacking bundles" id=yY0Et namespace=service-telemetry 2025-12-08T17:56:34.677819640+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="unpacking is not complete yet, requeueing" id=yY0Et namespace=service-telemetry 2025-12-08T17:56:34.873220136+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:34.873285168+00:00 stderr F time="2025-12-08T17:56:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:35.073189451+00:00 stderr F time="2025-12-08T17:56:35Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=9L3sX 2025-12-08T17:56:35.073189451+00:00 stderr F time="2025-12-08T17:56:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=9L3sX 2025-12-08T17:56:35.473308967+00:00 stderr F time="2025-12-08T17:56:35Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=rTVLK 2025-12-08T17:56:35.473308967+00:00 stderr F time="2025-12-08T17:56:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=rTVLK 2025-12-08T17:56:36.074599779+00:00 stderr F time="2025-12-08T17:56:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:36.074699732+00:00 stderr F time="2025-12-08T17:56:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:36.473380811+00:00 stderr F time="2025-12-08T17:56:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:36.473380811+00:00 stderr F time="2025-12-08T17:56:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:37.472229402+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=9L3sX 2025-12-08T17:56:37.472229402+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=9L3sX 2025-12-08T17:56:37.674669903+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:37.674669903+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:37.690802853+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:37.690863064+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="resolving sources" id=YfrXV namespace=service-telemetry 2025-12-08T17:56:37.690917296+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="checking if subscriptions need update" id=YfrXV namespace=service-telemetry 2025-12-08T17:56:37.705457196+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="checking for existing installplan" channel=unstable id=YfrXV namespace=service-telemetry pkg=service-telemetry-operator source=infrawatch-operators sub=service-telemetry-operator 2025-12-08T17:56:37.705457196+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="resolving subscriptions in namespace" id=YfrXV namespace=service-telemetry 2025-12-08T17:56:37.873549920+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=rTVLK 2025-12-08T17:56:37.873549920+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=rTVLK 2025-12-08T17:56:37.877769160+00:00 stderr F time="2025-12-08T17:56:37Z" level=error msg="UpdateStatus - error while setting CatalogSource status" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="Operation cannot be fulfilled on catalogsources.operators.coreos.com \"infrawatch-operators\": the object has been modified; please apply your changes to the latest version and try again" id=rTVLK 2025-12-08T17:56:37.877819341+00:00 stderr F E1208 17:56:37.877790 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/infrawatch-operators\" failed: Operation cannot be fulfilled on catalogsources.operators.coreos.com \"infrawatch-operators\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:37.890616525+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="unpacking bundles" id=YfrXV namespace=service-telemetry 2025-12-08T17:56:37.893189312+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator.v1.10.x.clusterserviceversion.yaml 2025-12-08T17:56:37.894593038+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator_v1alpha1_interconnect_crd.yaml 2025-12-08T17:56:37.901249562+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=infra.watch_servicetelemetrys.yaml 2025-12-08T17:56:37.903177202+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=service-telemetry-operator.clusterserviceversion.yaml 2025-12-08T17:56:37.909767535+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smart-gateway-operator.clusterserviceversion.yaml 2025-12-08T17:56:37.910559505+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smartgateway.infra.watch_smartgateways.yaml 2025-12-08T17:56:37.913000389+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="resolution caused subscription changes, creating installplan" id=YfrXV namespace=service-telemetry 2025-12-08T17:56:37.921852580+00:00 stderr F time="2025-12-08T17:56:37Z" level=warning msg="no installplan found with matching generation, creating new one" id=YfrXV namespace=service-telemetry 2025-12-08T17:56:37.925705420+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg=syncing id=Q1o+g ip=install-s8bl7 namespace=service-telemetry phase= 2025-12-08T17:56:37.925705420+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg="skip processing installplan without status - subscription sync responsible for initial status" id=Q1o+g ip=install-s8bl7 namespace=service-telemetry phase= 2025-12-08T17:56:37.942148089+00:00 stderr F time="2025-12-08T17:56:37Z" level=info msg=syncing id=6o9lN ip=install-s8bl7 namespace=service-telemetry phase=Installing 2025-12-08T17:56:38.073125655+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:38.073125655+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:38.494485135+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator.v1.10.x.clusterserviceversion.yaml 2025-12-08T17:56:38.495819660+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator_v1alpha1_interconnect_crd.yaml 2025-12-08T17:56:38.572338545+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="resolving sources" id=tBpr/ namespace=service-telemetry 2025-12-08T17:56:38.572338545+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="checking if subscriptions need update" id=tBpr/ namespace=service-telemetry 2025-12-08T17:56:38.879553929+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="checking for existing installplan" channel=1.10.x id=tBpr/ namespace=service-telemetry pkg=amq7-interconnect-operator source=redhat-operators sub=amq7-interconnect-operator-1.10.x-redhat-operators-openshift-marketplace 2025-12-08T17:56:39.073076786+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=aCpmp 2025-12-08T17:56:39.073076786+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=aCpmp 2025-12-08T17:56:39.275066044+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:39.275066044+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:39.473226283+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UPgNx 2025-12-08T17:56:39.473226283+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UPgNx 2025-12-08T17:56:39.500407441+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=infra.watch_servicetelemetrys.yaml 2025-12-08T17:56:39.503266505+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=service-telemetry-operator.clusterserviceversion.yaml 2025-12-08T17:56:39.674938494+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:39.674938494+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:40.079480655+00:00 stderr F time="2025-12-08T17:56:40Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:40.079480655+00:00 stderr F E1208 17:56:40.079425 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:40.080638574+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:40.672357408+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:40.672357408+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:40.897812198+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smart-gateway-operator.clusterserviceversion.yaml 2025-12-08T17:56:40.898571998+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smartgateway.infra.watch_smartgateways.yaml 2025-12-08T17:56:41.272986664+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=aCpmp 2025-12-08T17:56:41.272986664+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=aCpmp 2025-12-08T17:56:41.484925932+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="checking for existing installplan" channel=unstable id=tBpr/ namespace=service-telemetry pkg=smart-gateway-operator source=infrawatch-operators sub=smart-gateway-operator-unstable-infrawatch-operators-service-telemetry 2025-12-08T17:56:41.672405032+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:41.672405032+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:41.871916065+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UPgNx 2025-12-08T17:56:41.871916065+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=UPgNx 2025-12-08T17:56:42.071681435+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:42.071681435+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:42.484766140+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="error updating InstallPlan status" id=6o9lN ip=install-s8bl7 namespace=service-telemetry phase=Installing updateError="Operation cannot be fulfilled on installplans.operators.coreos.com \"install-s8bl7\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:56:42.484821351+00:00 stderr F E1208 17:56:42.484795 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/install-s8bl7\" failed: error updating InstallPlan status: Operation cannot be fulfilled on installplans.operators.coreos.com \"install-s8bl7\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:42.486034292+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg=syncing id=Uh8ma ip=install-s8bl7 namespace=service-telemetry phase=Installing 2025-12-08T17:56:42.685282589+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="subscriptions were updated, wait for a new resolution" id=tBpr/ namespace=service-telemetry 2025-12-08T17:56:42.685282589+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="resolving sources" id=BUPOO namespace=service-telemetry 2025-12-08T17:56:42.685282589+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="checking if subscriptions need update" id=BUPOO namespace=service-telemetry 2025-12-08T17:56:42.880092740+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:42.880092740+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:42.928770650+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator.v1.10.x.clusterserviceversion.yaml 2025-12-08T17:56:42.931079970+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator_v1alpha1_interconnect_crd.yaml 2025-12-08T17:56:43.073160266+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=3t7yr 2025-12-08T17:56:43.073160266+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=3t7yr 2025-12-08T17:56:43.273119841+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:43.273178072+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:43.275848462+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:43.872667938+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:43.872667938+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:44.316123034+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=infra.watch_servicetelemetrys.yaml 2025-12-08T17:56:44.322120390+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=service-telemetry-operator.clusterserviceversion.yaml 2025-12-08T17:56:44.672544381+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=3t7yr 2025-12-08T17:56:44.672579622+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=3t7yr 2025-12-08T17:56:44.878246806+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:44.878246806+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:44.883190095+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="resolving subscriptions in namespace" id=BUPOO namespace=service-telemetry 2025-12-08T17:56:45.073179410+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:45.073179410+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:45.472872425+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:45.472872425+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:45.479169300+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="no subscriptions were updated" id=BUPOO namespace=service-telemetry 2025-12-08T17:56:45.484923030+00:00 stderr F time="2025-12-08T17:56:45Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"amq7-interconnect-operator-1.10.x-redhat-operators-openshift-marketplace\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:45.484970191+00:00 stderr F E1208 17:56:45.484952 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator-1.10.x-redhat-operators-openshift-marketplace\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"amq7-interconnect-operator-1.10.x-redhat-operators-openshift-marketplace\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:45.486237383+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:45.872594990+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:45.872594990+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:46.271983468+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:46.271983468+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:46.675186233+00:00 stderr F I1208 17:56:46.675125 1 request.go:752] "Waited before sending request" delay="1.188666823s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/apis/operators.coreos.com/v1alpha1/namespaces/service-telemetry/subscriptions/service-telemetry-operator/status" 2025-12-08T17:56:46.689051445+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:46.689051445+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:46.691523459+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smart-gateway-operator.clusterserviceversion.yaml 2025-12-08T17:56:46.692450894+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smartgateway.infra.watch_smartgateways.yaml 2025-12-08T17:56:47.072050794+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:47.072050794+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:47.282604796+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:47.472258503+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:47.472258503+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:48.073012771+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:48.073012771+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:48.272354761+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:48.272354761+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:48.672343033+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:48.672343033+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:48.880297368+00:00 stderr F time="2025-12-08T17:56:48Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:48.880297368+00:00 stderr F E1208 17:56:48.880239 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:48.881434777+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:49.073030474+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:49.073030474+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:49.080156740+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="resolving sources" id=Gnx1R namespace=service-telemetry 2025-12-08T17:56:49.080156740+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="checking if subscriptions need update" id=Gnx1R namespace=service-telemetry 2025-12-08T17:56:49.472797621+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:49.472797621+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:49.474744942+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:49.872303041+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:49.872303041+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:49.883064102+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="error updating InstallPlan status" id=Uh8ma ip=install-s8bl7 namespace=service-telemetry phase=Installing updateError="Operation cannot be fulfilled on installplans.operators.coreos.com \"install-s8bl7\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:56:49.883109783+00:00 stderr F E1208 17:56:49.883090 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/install-s8bl7\" failed: error updating InstallPlan status: Operation cannot be fulfilled on installplans.operators.coreos.com \"install-s8bl7\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:49.884321654+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg=syncing id=qt5cL ip=install-s8bl7 namespace=service-telemetry phase=Installing 2025-12-08T17:56:50.272845278+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:50.272845278+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:50.517767266+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator.v1.10.x.clusterserviceversion.yaml 2025-12-08T17:56:50.519515592+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf key=interconnect-operator_v1alpha1_interconnect_crd.yaml 2025-12-08T17:56:50.872445846+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:50.872445846+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:51.074824875+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:51.074824875+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:51.480448684+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="resolving subscriptions in namespace" id=Gnx1R namespace=service-telemetry 2025-12-08T17:56:51.672434321+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:51.672434321+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:51.735346893+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=infra.watch_servicetelemetrys.yaml 2025-12-08T17:56:51.737826267+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199 key=service-telemetry-operator.clusterserviceversion.yaml 2025-12-08T17:56:51.876816842+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:51.876816842+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:52.067459694+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="no subscriptions were updated" id=Gnx1R namespace=service-telemetry 2025-12-08T17:56:52.272642047+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:52.272642047+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:52.291112528+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:52.672674699+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:52.672674699+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:53.488634052+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:53.488634052+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:53.492062121+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:53.492062121+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:53.872386031+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:53.872439962+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:53.879240839+00:00 stderr F time="2025-12-08T17:56:53Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"smart-gateway-operator-unstable-infrawatch-operators-service-telemetry\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:53.879335942+00:00 stderr F E1208 17:56:53.879318 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator-unstable-infrawatch-operators-service-telemetry\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"smart-gateway-operator-unstable-infrawatch-operators-service-telemetry\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:53.880600605+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:54.473407816+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:54.473407816+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:54.673563627+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:54.673563627+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:54.769331455+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="added to bundle, Kind=ClusterServiceVersion" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smart-gateway-operator.clusterserviceversion.yaml 2025-12-08T17:56:54.770207857+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="added to bundle, Kind=CustomResourceDefinition" configmap=service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73 key=smartgateway.infra.watch_smartgateways.yaml 2025-12-08T17:56:54.883120662+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:55.072863161+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:55.072863161+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:55.483850081+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="resolving sources" id=Mf2UL namespace=service-telemetry 2025-12-08T17:56:55.483850081+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="checking if subscriptions need update" id=Mf2UL namespace=service-telemetry 2025-12-08T17:56:55.675912990+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:55.675912990+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:55.873099623+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:55.873163115+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:56.472105217+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:56.472178579+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:56.673129719+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:56.673129719+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:56.677565695+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:57.072383273+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:57.072383273+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:57.079946591+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="resolving subscriptions in namespace" id=Mf2UL namespace=service-telemetry 2025-12-08T17:56:57.289284570+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg=syncing id=ZDuw3 ip=install-s8bl7 namespace=service-telemetry phase=Complete 2025-12-08T17:56:57.632593424+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="no subscriptions were updated" id=Mf2UL namespace=service-telemetry 2025-12-08T17:56:57.678457300+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:57.678520542+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:57.872575314+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:57.872575314+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:58.272323610+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:58.272382901+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:56:58.672491447+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:58.672491447+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:56:58.690086866+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:59.272290061+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:59.272290061+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:56:59.472685308+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:59.472740739+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:56:59.478867749+00:00 stderr F time="2025-12-08T17:56:59Z" level=warning msg="an error was encountered during reconciliation" error="Operation cannot be fulfilled on subscriptions.operators.coreos.com \"smart-gateway-operator-unstable-infrawatch-operators-service-telemetry\": the object has been modified; please apply your changes to the latest version and try again" reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:59.478968652+00:00 stderr F E1208 17:56:59.478955 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator-unstable-infrawatch-operators-service-telemetry\" failed: Operation cannot be fulfilled on subscriptions.operators.coreos.com \"smart-gateway-operator-unstable-infrawatch-operators-service-telemetry\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:59.480184253+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:56:59.882320912+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="resolving sources" id=nvZSL namespace=service-telemetry 2025-12-08T17:56:59.882320912+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="checking if subscriptions need update" id=nvZSL namespace=service-telemetry 2025-12-08T17:57:00.073201910+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:00.073201910+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:00.273210077+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:00.273210077+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:00.876439270+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:00.876439270+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:00.887740445+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="resolving subscriptions in namespace" id=nvZSL namespace=service-telemetry 2025-12-08T17:57:01.073485029+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:01.073485029+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:01.255148558+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="no subscriptions were updated" id=nvZSL namespace=service-telemetry 2025-12-08T17:57:01.672619866+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:01.672619866+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:01.871930424+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:01.871930424+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:02.272581245+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:02.272581245+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:02.276313603+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:02.483933058+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:02.672628429+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:02.672628429+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:03.072212041+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:03.072212041+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:03.480075318+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="resolving sources" id=zV2oi namespace=service-telemetry 2025-12-08T17:57:03.480075318+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="checking if subscriptions need update" id=zV2oi namespace=service-telemetry 2025-12-08T17:57:03.672246981+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:03.672246981+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:03.873134871+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:03.873134871+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:04.473600452+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:04.473600452+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:04.479970357+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="resolving subscriptions in namespace" id=zV2oi namespace=service-telemetry 2025-12-08T17:57:04.671846932+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:04.671846932+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:04.684097111+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:05.060308043+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="no subscriptions were updated" id=zV2oi namespace=service-telemetry 2025-12-08T17:57:05.272686833+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:05.272686833+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:05.473484280+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:05.473484280+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:06.072793972+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:06.072793972+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:06.272622223+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:06.272622223+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:06.482285802+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="resolving sources" id=/Q/xF namespace=service-telemetry 2025-12-08T17:57:06.482285802+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="checking if subscriptions need update" id=/Q/xF namespace=service-telemetry 2025-12-08T17:57:06.672310038+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:06.672310038+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:06.696040177+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:07.283924860+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:07.283924860+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:07.472162739+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:07.472162739+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:07.481095712+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:07.884927876+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="resolving subscriptions in namespace" id=/Q/xF namespace=service-telemetry 2025-12-08T17:57:08.073157235+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:08.073157235+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:08.258348695+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="no subscriptions were updated" id=/Q/xF namespace=service-telemetry 2025-12-08T17:57:08.272397022+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:08.272397022+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:08.873324964+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:08.873324964+00:00 stderr F time="2025-12-08T17:57:08Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:09.073200288+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:09.073200288+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:09.672736526+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:09.672736526+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:09.680398336+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="resolving sources" id=43a7b namespace=service-telemetry 2025-12-08T17:57:09.680398336+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="checking if subscriptions need update" id=43a7b namespace=service-telemetry 2025-12-08T17:57:09.873160963+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:09.873160963+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:09.882310742+00:00 stderr F time="2025-12-08T17:57:09Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:10.473753648+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:10.473753648+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:10.673183549+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:10.673183549+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:10.696563288+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:11.081250442+00:00 stderr F time="2025-12-08T17:57:11Z" level=info msg="resolving subscriptions in namespace" id=43a7b namespace=service-telemetry 2025-12-08T17:57:11.272755187+00:00 stderr F time="2025-12-08T17:57:11Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:11.272755187+00:00 stderr F time="2025-12-08T17:57:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:11.462619509+00:00 stderr F time="2025-12-08T17:57:11Z" level=info msg="no subscriptions were updated" id=43a7b namespace=service-telemetry 2025-12-08T17:57:11.473201885+00:00 stderr F time="2025-12-08T17:57:11Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:11.473201885+00:00 stderr F time="2025-12-08T17:57:11Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:12.073508282+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:12.073508282+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:12.273035136+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:12.273035136+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:12.873743774+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:12.873840227+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:12.886586959+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="resolving sources" id=Lkfgl namespace=service-telemetry 2025-12-08T17:57:12.886671171+00:00 stderr F time="2025-12-08T17:57:12Z" level=info msg="checking if subscriptions need update" id=Lkfgl namespace=service-telemetry 2025-12-08T17:57:13.073388301+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:13.073388301+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:13.082674033+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:13.672114747+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:13.672114747+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:13.873146390+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:13.873146390+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:13.882144405+00:00 stderr F time="2025-12-08T17:57:13Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:14.280328931+00:00 stderr F time="2025-12-08T17:57:14Z" level=info msg="resolving subscriptions in namespace" id=Lkfgl namespace=service-telemetry 2025-12-08T17:57:14.474491095+00:00 stderr F time="2025-12-08T17:57:14Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:14.474491095+00:00 stderr F time="2025-12-08T17:57:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:14.649088178+00:00 stderr F time="2025-12-08T17:57:14Z" level=info msg="no subscriptions were updated" id=Lkfgl namespace=service-telemetry 2025-12-08T17:57:14.673740302+00:00 stderr F time="2025-12-08T17:57:14Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:14.673740302+00:00 stderr F time="2025-12-08T17:57:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:15.275988609+00:00 stderr F time="2025-12-08T17:57:15Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:15.275988609+00:00 stderr F time="2025-12-08T17:57:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:15.473916152+00:00 stderr F time="2025-12-08T17:57:15Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:15.473916152+00:00 stderr F time="2025-12-08T17:57:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:16.073418588+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:16.073418588+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:16.081467978+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="resolving sources" id=GALul namespace=service-telemetry 2025-12-08T17:57:16.081467978+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="checking if subscriptions need update" id=GALul namespace=service-telemetry 2025-12-08T17:57:16.273320922+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:16.273320922+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:16.304322700+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:16.873602428+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:16.873602428+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:16.874209225+00:00 stderr F time="2025-12-08T17:57:16Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:17.072930617+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:17.072930617+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:17.280825810+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="resolving subscriptions in namespace" id=GALul namespace=service-telemetry 2025-12-08T17:57:17.670649447+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="no subscriptions were updated" id=GALul namespace=service-telemetry 2025-12-08T17:57:17.673404469+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:17.673404469+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:17.873193020+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:17.873193020+00:00 stderr F time="2025-12-08T17:57:17Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:18.472560852+00:00 stderr F time="2025-12-08T17:57:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:18.472560852+00:00 stderr F time="2025-12-08T17:57:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:18.673041692+00:00 stderr F time="2025-12-08T17:57:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:18.673041692+00:00 stderr F time="2025-12-08T17:57:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:19.081902456+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="resolving sources" id=kLbWs namespace=service-telemetry 2025-12-08T17:57:19.081902456+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="checking if subscriptions need update" id=kLbWs namespace=service-telemetry 2025-12-08T17:57:19.273060181+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:19.273060181+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:19.472359889+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:19.472359889+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:19.682366327+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:19.872658430+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:19.872658430+00:00 stderr F time="2025-12-08T17:57:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:20.272647813+00:00 stderr F time="2025-12-08T17:57:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:20.272647813+00:00 stderr F time="2025-12-08T17:57:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:20.280087436+00:00 stderr F time="2025-12-08T17:57:20Z" level=info msg="resolving subscriptions in namespace" id=kLbWs namespace=service-telemetry 2025-12-08T17:57:20.619179110+00:00 stderr F time="2025-12-08T17:57:20Z" level=info msg="no subscriptions were updated" id=kLbWs namespace=service-telemetry 2025-12-08T17:57:20.672810929+00:00 stderr F time="2025-12-08T17:57:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:20.672810929+00:00 stderr F time="2025-12-08T17:57:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:21.073057598+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:21.073057598+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:21.474250783+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:21.474250783+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:21.487756154+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:21.872370577+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:21.872370577+00:00 stderr F time="2025-12-08T17:57:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:22.082811215+00:00 stderr F time="2025-12-08T17:57:22Z" level=info msg="resolving sources" id=PU5rv namespace=service-telemetry 2025-12-08T17:57:22.082904237+00:00 stderr F time="2025-12-08T17:57:22Z" level=info msg="checking if subscriptions need update" id=PU5rv namespace=service-telemetry 2025-12-08T17:57:22.274053593+00:00 stderr F time="2025-12-08T17:57:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:22.274053593+00:00 stderr F time="2025-12-08T17:57:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:22.673824520+00:00 stderr F time="2025-12-08T17:57:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:22.673987254+00:00 stderr F time="2025-12-08T17:57:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:23.073445712+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:23.073445712+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:23.083534426+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="resolving subscriptions in namespace" id=PU5rv namespace=service-telemetry 2025-12-08T17:57:23.281364876+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:23.472258995+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:23.472258995+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:23.644099096+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="no subscriptions were updated" id=PU5rv namespace=service-telemetry 2025-12-08T17:57:23.872814772+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:23.872814772+00:00 stderr F time="2025-12-08T17:57:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:24.272930168+00:00 stderr F time="2025-12-08T17:57:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:24.272930168+00:00 stderr F time="2025-12-08T17:57:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:24.672409567+00:00 stderr F time="2025-12-08T17:57:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:24.672409567+00:00 stderr F time="2025-12-08T17:57:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:24.672554400+00:00 stderr F time="2025-12-08T17:57:24Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:25.074898465+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:25.074898465+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:25.082894273+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="resolving sources" id=ie0Fz namespace=service-telemetry 2025-12-08T17:57:25.082894273+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="checking if subscriptions need update" id=ie0Fz namespace=service-telemetry 2025-12-08T17:57:25.473535512+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:25.473535512+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:25.873210796+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:25.873210796+00:00 stderr F time="2025-12-08T17:57:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:26.083189663+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg="resolving subscriptions in namespace" id=ie0Fz namespace=service-telemetry 2025-12-08T17:57:26.272629583+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:26.272629583+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:26.426722222+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg="no subscriptions were updated" id=ie0Fz namespace=service-telemetry 2025-12-08T17:57:26.481842700+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:26.674084145+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:26.674084145+00:00 stderr F time="2025-12-08T17:57:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:27.073282056+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:27.073282056+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:27.472832178+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:27.472832178+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:27.874831322+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:27.874831322+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:27.875034627+00:00 stderr F time="2025-12-08T17:57:27Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:28.082782837+00:00 stderr F time="2025-12-08T17:57:28Z" level=info msg="resolving sources" id=bxonU namespace=service-telemetry 2025-12-08T17:57:28.082782837+00:00 stderr F time="2025-12-08T17:57:28Z" level=info msg="checking if subscriptions need update" id=bxonU namespace=service-telemetry 2025-12-08T17:57:28.271801236+00:00 stderr F time="2025-12-08T17:57:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:28.271801236+00:00 stderr F time="2025-12-08T17:57:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:28.672918608+00:00 stderr F time="2025-12-08T17:57:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:28.672918608+00:00 stderr F time="2025-12-08T17:57:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:29.075001805+00:00 stderr F time="2025-12-08T17:57:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:29.075001805+00:00 stderr F time="2025-12-08T17:57:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:29.083248311+00:00 stderr F time="2025-12-08T17:57:29Z" level=info msg="resolving subscriptions in namespace" id=bxonU namespace=service-telemetry 2025-12-08T17:57:29.451148946+00:00 stderr F time="2025-12-08T17:57:29Z" level=info msg="no subscriptions were updated" id=bxonU namespace=service-telemetry 2025-12-08T17:57:29.475031599+00:00 stderr F time="2025-12-08T17:57:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:29.475113401+00:00 stderr F time="2025-12-08T17:57:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:30.290125038+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:30.292781877+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:30.292839489+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:30.299519683+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:30.299519683+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:30.672566323+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:30.672566323+00:00 stderr F time="2025-12-08T17:57:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:31.072434142+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:31.072434142+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:31.481967684+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="resolving sources" id=mgWQ3 namespace=service-telemetry 2025-12-08T17:57:31.481967684+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="checking if subscriptions need update" id=mgWQ3 namespace=service-telemetry 2025-12-08T17:57:31.481967684+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:31.487690593+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:31.487690593+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:31.871793821+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:31.871793821+00:00 stderr F time="2025-12-08T17:57:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:32.273133138+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:32.273133138+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:32.479456360+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg="resolving subscriptions in namespace" id=mgWQ3 namespace=service-telemetry 2025-12-08T17:57:32.672969637+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:32.672969637+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:32.844692299+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg="no subscriptions were updated" id=mgWQ3 namespace=service-telemetry 2025-12-08T17:57:32.881775728+00:00 stderr F time="2025-12-08T17:57:32Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:33.073558554+00:00 stderr F time="2025-12-08T17:57:33Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:33.073558554+00:00 stderr F time="2025-12-08T17:57:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:33.473407659+00:00 stderr F time="2025-12-08T17:57:33Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:33.473407659+00:00 stderr F time="2025-12-08T17:57:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:33.873263494+00:00 stderr F time="2025-12-08T17:57:33Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:33.873263494+00:00 stderr F time="2025-12-08T17:57:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:34.274589346+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:34.274589346+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:34.274589346+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:34.484497251+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg="resolving sources" id=debqd namespace=service-telemetry 2025-12-08T17:57:34.484497251+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg="checking if subscriptions need update" id=debqd namespace=service-telemetry 2025-12-08T17:57:34.673404563+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:34.673404563+00:00 stderr F time="2025-12-08T17:57:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:35.073156986+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:35.073156986+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:35.473564885+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:35.473564885+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:35.483267875+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="resolving subscriptions in namespace" id=debqd namespace=service-telemetry 2025-12-08T17:57:35.873826840+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:35.873969233+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:35.888909649+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:35.913398272+00:00 stderr F time="2025-12-08T17:57:35Z" level=info msg="no subscriptions were updated" id=debqd namespace=service-telemetry 2025-12-08T17:57:36.272516194+00:00 stderr F time="2025-12-08T17:57:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:36.272516194+00:00 stderr F time="2025-12-08T17:57:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:36.673116188+00:00 stderr F time="2025-12-08T17:57:36Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:36.673116188+00:00 stderr F time="2025-12-08T17:57:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:37.072702776+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:37.072702776+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:37.472131968+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:37.472131968+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:37.472233871+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:37.481645255+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="resolving sources" id=dNsTI namespace=service-telemetry 2025-12-08T17:57:37.481645255+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="checking if subscriptions need update" id=dNsTI namespace=service-telemetry 2025-12-08T17:57:37.873229525+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:37.873229525+00:00 stderr F time="2025-12-08T17:57:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:38.273061869+00:00 stderr F time="2025-12-08T17:57:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:38.273061869+00:00 stderr F time="2025-12-08T17:57:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:38.483756685+00:00 stderr F time="2025-12-08T17:57:38Z" level=info msg="resolving subscriptions in namespace" id=dNsTI namespace=service-telemetry 2025-12-08T17:57:38.673182431+00:00 stderr F time="2025-12-08T17:57:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:38.673182431+00:00 stderr F time="2025-12-08T17:57:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:38.825807065+00:00 stderr F time="2025-12-08T17:57:38Z" level=info msg="no subscriptions were updated" id=dNsTI namespace=service-telemetry 2025-12-08T17:57:39.073316773+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:39.073316773+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:39.887086765+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:57:39.889410225+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:39.889475716+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:57:39.894634690+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:39.894716872+00:00 stderr F time="2025-12-08T17:57:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:57:40.272833315+00:00 stderr F time="2025-12-08T17:57:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:40.272990269+00:00 stderr F time="2025-12-08T17:57:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:57:40.484232468+00:00 stderr F time="2025-12-08T17:57:40Z" level=info msg="resolving sources" id=Ub2pX namespace=service-telemetry 2025-12-08T17:57:40.484232468+00:00 stderr F time="2025-12-08T17:57:40Z" level=info msg="checking if subscriptions need update" id=Ub2pX namespace=service-telemetry 2025-12-08T17:57:40.674547947+00:00 stderr F time="2025-12-08T17:57:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:40.674614849+00:00 stderr F time="2025-12-08T17:57:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:57:41.481266526+00:00 stderr F time="2025-12-08T17:57:41Z" level=info msg="resolving subscriptions in namespace" id=Ub2pX namespace=service-telemetry 2025-12-08T17:57:41.851392354+00:00 stderr F time="2025-12-08T17:57:41Z" level=info msg="no subscriptions were updated" id=Ub2pX namespace=service-telemetry 2025-12-08T17:57:43.281783682+00:00 stderr F time="2025-12-08T17:57:43Z" level=info msg="resolving sources" id=gi1So namespace=service-telemetry 2025-12-08T17:57:43.281783682+00:00 stderr F time="2025-12-08T17:57:43Z" level=info msg="checking if subscriptions need update" id=gi1So namespace=service-telemetry 2025-12-08T17:57:44.282711662+00:00 stderr F time="2025-12-08T17:57:44Z" level=info msg="resolving subscriptions in namespace" id=gi1So namespace=service-telemetry 2025-12-08T17:57:44.662936839+00:00 stderr F time="2025-12-08T17:57:44Z" level=info msg="no subscriptions were updated" id=gi1So namespace=service-telemetry 2025-12-08T17:59:25.935248626+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="resolving sources" id=cDdFL namespace=openshift-operator-lifecycle-manager 2025-12-08T17:59:25.935248626+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="checking if subscriptions need update" id=cDdFL namespace=openshift-operator-lifecycle-manager 2025-12-08T17:59:25.935528033+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="resolving sources" id=0NouQ namespace=openshift-operators 2025-12-08T17:59:25.935540623+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="checking if subscriptions need update" id=0NouQ namespace=openshift-operators 2025-12-08T17:59:25.938409339+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="No subscriptions were found in namespace openshift-operator-lifecycle-manager" id=cDdFL namespace=openshift-operator-lifecycle-manager 2025-12-08T17:59:25.938427109+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="resolving sources" id=4FwwV namespace=service-telemetry 2025-12-08T17:59:25.938434420+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="checking if subscriptions need update" id=4FwwV namespace=service-telemetry 2025-12-08T17:59:25.949489311+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="resolving subscriptions in namespace" id=0NouQ namespace=openshift-operators 2025-12-08T17:59:25.960235294+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="resolving subscriptions in namespace" id=4FwwV namespace=service-telemetry 2025-12-08T17:59:25.960606044+00:00 stderr F time="2025-12-08T17:59:25Z" level=info msg="no subscriptions were updated" id=0NouQ namespace=openshift-operators 2025-12-08T17:59:26.113049271+00:00 stderr F time="2025-12-08T17:59:26Z" level=info msg="no subscriptions were updated" id=4FwwV namespace=service-telemetry 2025-12-08T17:59:27.143327515+00:00 stderr F time="2025-12-08T17:59:27Z" level=info msg="resolving sources" id=oaHig namespace=cert-manager-operator 2025-12-08T17:59:27.143327515+00:00 stderr F time="2025-12-08T17:59:27Z" level=info msg="checking if subscriptions need update" id=oaHig namespace=cert-manager-operator 2025-12-08T17:59:27.942547280+00:00 stderr F time="2025-12-08T17:59:27Z" level=info msg="resolving sources" id=FL1P8 namespace=openshift-monitoring 2025-12-08T17:59:27.942547280+00:00 stderr F time="2025-12-08T17:59:27Z" level=info msg="checking if subscriptions need update" id=FL1P8 namespace=openshift-monitoring 2025-12-08T17:59:28.338442933+00:00 stderr F time="2025-12-08T17:59:28Z" level=info msg="No subscriptions were found in namespace openshift-monitoring" id=FL1P8 namespace=openshift-monitoring 2025-12-08T17:59:28.545136781+00:00 stderr F time="2025-12-08T17:59:28Z" level=info msg="resolving subscriptions in namespace" id=oaHig namespace=cert-manager-operator 2025-12-08T17:59:28.747847353+00:00 stderr F time="2025-12-08T17:59:28Z" level=info msg="no subscriptions were updated" id=oaHig namespace=cert-manager-operator 2025-12-08T17:59:38.454327473+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing id=Ctqxf ip=install-5nc8t namespace=openshift-operators phase=Complete 2025-12-08T17:59:38.454327473+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing id=s5TAV ip=install-l78bn namespace=cert-manager-operator phase=Complete 2025-12-08T17:59:38.454385085+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing id=iebmi ip=install-s8bl7 namespace=service-telemetry phase=Complete 2025-12-08T17:59:38.454385085+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:38.455076863+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing id=Jrg02 ip=install-sk9l5 namespace=service-telemetry phase=Complete 2025-12-08T17:59:38.455076863+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:38.467422179+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.467422179+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.469152144+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.469152144+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.475655635+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.475655635+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.478151131+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.478151131+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.658321130+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.658321130+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.674857706+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="resolving sources" id=vhFbA namespace=openshift-operators 2025-12-08T17:59:38.674857706+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="checking if subscriptions need update" id=vhFbA namespace=openshift-operators 2025-12-08T17:59:38.675167804+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:38.696002573+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="resolving subscriptions in namespace" id=vhFbA namespace=openshift-operators 2025-12-08T17:59:38.715726842+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="no subscriptions were updated" id=vhFbA namespace=openshift-operators 2025-12-08T17:59:38.859030169+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:38.859030169+00:00 stderr F time="2025-12-08T17:59:38Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:39.458397196+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:39.458397196+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:39.657726659+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:39.657726659+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:39.661606592+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:39.661697624+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="resolving sources" id=Mz4q/ namespace=service-telemetry 2025-12-08T17:59:39.661734155+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="checking if subscriptions need update" id=Mz4q/ namespace=service-telemetry 2025-12-08T17:59:39.706006432+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="resolving subscriptions in namespace" id=Mz4q/ namespace=service-telemetry 2025-12-08T17:59:39.908680994+00:00 stderr F time="2025-12-08T17:59:39Z" level=info msg="no subscriptions were updated" id=Mz4q/ namespace=service-telemetry 2025-12-08T17:59:40.259862869+00:00 stderr F time="2025-12-08T17:59:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:40.259862869+00:00 stderr F time="2025-12-08T17:59:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:40.458439262+00:00 stderr F time="2025-12-08T17:59:40Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:40.458790652+00:00 stderr F time="2025-12-08T17:59:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:41.057948173+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:41.058056566+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:41.083139967+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="resolving sources" id=O2AMj namespace=cert-manager-operator 2025-12-08T17:59:41.083139967+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="checking if subscriptions need update" id=O2AMj namespace=cert-manager-operator 2025-12-08T17:59:41.095539744+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="resolving subscriptions in namespace" id=O2AMj namespace=cert-manager-operator 2025-12-08T17:59:41.113363954+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="no subscriptions were updated" id=O2AMj namespace=cert-manager-operator 2025-12-08T17:59:41.265964506+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:41.265964506+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:41.657416732+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:41.657416732+00:00 stderr F time="2025-12-08T17:59:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:42.060896666+00:00 stderr F time="2025-12-08T17:59:42Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:42.060896666+00:00 stderr F time="2025-12-08T17:59:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:42.060896666+00:00 stderr F time="2025-12-08T17:59:42Z" level=info msg="resolving sources" id=a2rRL namespace=service-telemetry 2025-12-08T17:59:42.060896666+00:00 stderr F time="2025-12-08T17:59:42Z" level=info msg="checking if subscriptions need update" id=a2rRL namespace=service-telemetry 2025-12-08T17:59:42.482084738+00:00 stderr F time="2025-12-08T17:59:42Z" level=info msg="resolving subscriptions in namespace" id=a2rRL namespace=service-telemetry 2025-12-08T17:59:42.840962025+00:00 stderr F time="2025-12-08T17:59:42Z" level=info msg="no subscriptions were updated" id=a2rRL namespace=service-telemetry 2025-12-08T17:59:49.064000087+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:49.064000087+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lYPKj 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lYPKj 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Ut40Z 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Ut40Z 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:49.070003785+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:49.663806941+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:49.663806941+00:00 stderr F time="2025-12-08T17:59:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:50.263378768+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:50.263378768+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:50.664202849+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lYPKj 2025-12-08T17:59:50.664202849+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lYPKj 2025-12-08T17:59:50.863813159+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Ut40Z 2025-12-08T17:59:50.863813159+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Ut40Z 2025-12-08T17:59:50.863813159+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="catalog update required at 2025-12-08 17:59:50.863684365 +0000 UTC m=+930.826118837" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=Ut40Z 2025-12-08T17:59:51.263827027+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:51.263827027+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:51.674796625+00:00 stderr F I1208 17:59:51.674719 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:59:51.674922419+00:00 stderr F time="2025-12-08T17:59:51Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=Ut40Z 2025-12-08T17:59:51.674922419+00:00 stderr F time="2025-12-08T17:59:51Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=Ut40Z 2025-12-08T17:59:51.674934279+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=Ut40Z 2025-12-08T17:59:51.864774871+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:51.864774871+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:52.266385003+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=PV+c+ 2025-12-08T17:59:52.266385003+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=PV+c+ 2025-12-08T17:59:52.863967648+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:52.863967648+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:52.866826274+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:52.866953257+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="resolving sources" id=tEXCw namespace=service-telemetry 2025-12-08T17:59:52.866953257+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="checking if subscriptions need update" id=tEXCw namespace=service-telemetry 2025-12-08T17:59:52.897441589+00:00 stderr F time="2025-12-08T17:59:52Z" level=info msg="resolving subscriptions in namespace" id=tEXCw namespace=service-telemetry 2025-12-08T17:59:53.066844183+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="no subscriptions were updated" id=tEXCw namespace=service-telemetry 2025-12-08T17:59:53.263845544+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=n9lAq 2025-12-08T17:59:53.263845544+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=n9lAq 2025-12-08T17:59:53.463927296+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:53.463927296+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:53.471505406+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T17:59:53.678486518+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="resolving sources" id=mq2Wq namespace=service-telemetry 2025-12-08T17:59:53.678486518+00:00 stderr F time="2025-12-08T17:59:53Z" level=info msg="checking if subscriptions need update" id=mq2Wq namespace=service-telemetry 2025-12-08T17:59:54.463903593+00:00 stderr F time="2025-12-08T17:59:54Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:54.463903593+00:00 stderr F time="2025-12-08T17:59:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:54.662982999+00:00 stderr F time="2025-12-08T17:59:54Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=PV+c+ 2025-12-08T17:59:54.662982999+00:00 stderr F time="2025-12-08T17:59:54Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=PV+c+ 2025-12-08T17:59:54.673454484+00:00 stderr F time="2025-12-08T17:59:54Z" level=info msg="resolving subscriptions in namespace" id=mq2Wq namespace=service-telemetry 2025-12-08T17:59:55.051306450+00:00 stderr F time="2025-12-08T17:59:55Z" level=info msg="no subscriptions were updated" id=mq2Wq namespace=service-telemetry 2025-12-08T17:59:55.064450506+00:00 stderr F time="2025-12-08T17:59:55Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:55.064515378+00:00 stderr F time="2025-12-08T17:59:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T17:59:55.665332538+00:00 stderr F time="2025-12-08T17:59:55Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=n9lAq 2025-12-08T17:59:55.665332538+00:00 stderr F time="2025-12-08T17:59:55Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=n9lAq 2025-12-08T17:59:56.072258129+00:00 stderr F time="2025-12-08T17:59:56Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:56.072258129+00:00 stderr F time="2025-12-08T17:59:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:56.263951720+00:00 stderr F time="2025-12-08T17:59:56Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8BQ8e 2025-12-08T17:59:56.263951720+00:00 stderr F time="2025-12-08T17:59:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8BQ8e 2025-12-08T17:59:56.663705293+00:00 stderr F time="2025-12-08T17:59:56Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:56.663705293+00:00 stderr F time="2025-12-08T17:59:56Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T17:59:57.265728115+00:00 stderr F time="2025-12-08T17:59:57Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=t0j7F 2025-12-08T17:59:57.265728115+00:00 stderr F time="2025-12-08T17:59:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=t0j7F 2025-12-08T17:59:57.663574258+00:00 stderr F time="2025-12-08T17:59:57Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:57.663574258+00:00 stderr F time="2025-12-08T17:59:57Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:58.264925032+00:00 stderr F time="2025-12-08T17:59:58Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:58.264925032+00:00 stderr F time="2025-12-08T17:59:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T17:59:58.666192364+00:00 stderr F time="2025-12-08T17:59:58Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8BQ8e 2025-12-08T17:59:58.666192364+00:00 stderr F time="2025-12-08T17:59:58Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8BQ8e 2025-12-08T17:59:59.265719821+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:59.265719821+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:59.265719821+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="resolving sources" id=QoBqr namespace=service-telemetry 2025-12-08T17:59:59.265719821+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="checking if subscriptions need update" id=QoBqr namespace=service-telemetry 2025-12-08T17:59:59.292997038+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="resolving subscriptions in namespace" id=QoBqr namespace=service-telemetry 2025-12-08T17:59:59.437112728+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="no subscriptions were updated" id=QoBqr namespace=service-telemetry 2025-12-08T17:59:59.462922796+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=t0j7F 2025-12-08T17:59:59.462922796+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=t0j7F 2025-12-08T17:59:59.462972598+00:00 stderr F time="2025-12-08T17:59:59Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=t0j7F 2025-12-08T17:59:59.462993438+00:00 stderr F time="2025-12-08T17:59:59Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=t0j7F 2025-12-08T17:59:59.462993438+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=t0j7F 2025-12-08T17:59:59.667052115+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T17:59:59.667052115+00:00 stderr F time="2025-12-08T17:59:59Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:00.077078717+00:00 stderr F time="2025-12-08T18:00:00Z" level=info msg="resolving sources" id=o0DP6 namespace=service-telemetry 2025-12-08T18:00:00.077078717+00:00 stderr F time="2025-12-08T18:00:00Z" level=info msg="checking if subscriptions need update" id=o0DP6 namespace=service-telemetry 2025-12-08T18:00:01.071218741+00:00 stderr F time="2025-12-08T18:00:01Z" level=info msg="resolving subscriptions in namespace" id=o0DP6 namespace=service-telemetry 2025-12-08T18:00:01.433929330+00:00 stderr F time="2025-12-08T18:00:01Z" level=info msg="no subscriptions were updated" id=o0DP6 namespace=service-telemetry 2025-12-08T18:00:02.097277475+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=6Td43 2025-12-08T18:00:02.097277475+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=6Td43 2025-12-08T18:00:02.106275451+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=6Td43 2025-12-08T18:00:02.106275451+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=6Td43 2025-12-08T18:00:02.106357653+00:00 stderr F time="2025-12-08T18:00:02Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=6Td43 2025-12-08T18:00:02.106357653+00:00 stderr F time="2025-12-08T18:00:02Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=6Td43 2025-12-08T18:00:02.106379194+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=6Td43 2025-12-08T18:00:02.356929683+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ejSQk 2025-12-08T18:00:02.356929683+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ejSQk 2025-12-08T18:00:02.365074097+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ejSQk 2025-12-08T18:00:02.365074097+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=ejSQk 2025-12-08T18:00:02.694730066+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Duwc6 2025-12-08T18:00:02.694730066+00:00 stderr F time="2025-12-08T18:00:02Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Duwc6 2025-12-08T18:00:03.294666023+00:00 stderr F time="2025-12-08T18:00:03Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Duwc6 2025-12-08T18:00:03.294666023+00:00 stderr F time="2025-12-08T18:00:03Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Duwc6 2025-12-08T18:00:04.690036088+00:00 stderr F time="2025-12-08T18:00:04Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=HuBQJ 2025-12-08T18:00:04.690036088+00:00 stderr F time="2025-12-08T18:00:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=HuBQJ 2025-12-08T18:00:04.696031776+00:00 stderr F time="2025-12-08T18:00:04Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=HuBQJ 2025-12-08T18:00:04.696031776+00:00 stderr F time="2025-12-08T18:00:04Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=HuBQJ 2025-12-08T18:00:04.696080847+00:00 stderr F time="2025-12-08T18:00:04Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=HuBQJ 2025-12-08T18:00:04.696080847+00:00 stderr F time="2025-12-08T18:00:04Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace error="catalog polling: community-operators not ready for update: update pod community-operators-jlbqc has not yet reported ready" id=HuBQJ 2025-12-08T18:00:04.696080847+00:00 stderr F time="2025-12-08T18:00:04Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace id=HuBQJ 2025-12-08T18:00:05.325409697+00:00 stderr F time="2025-12-08T18:00:05Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=tSeVb 2025-12-08T18:00:05.325409697+00:00 stderr F time="2025-12-08T18:00:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=tSeVb 2025-12-08T18:00:05.496592549+00:00 stderr F time="2025-12-08T18:00:05Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=tSeVb 2025-12-08T18:00:05.496592549+00:00 stderr F time="2025-12-08T18:00:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=tSeVb 2025-12-08T18:00:05.895204741+00:00 stderr F time="2025-12-08T18:00:05Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=OmWcP 2025-12-08T18:00:05.895204741+00:00 stderr F time="2025-12-08T18:00:05Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=OmWcP 2025-12-08T18:00:06.494638095+00:00 stderr F time="2025-12-08T18:00:06Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=OmWcP 2025-12-08T18:00:06.494638095+00:00 stderr F time="2025-12-08T18:00:06Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=OmWcP 2025-12-08T18:00:21.684437503+00:00 stderr F time="2025-12-08T18:00:21Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0qNJu 2025-12-08T18:00:21.684437503+00:00 stderr F time="2025-12-08T18:00:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0qNJu 2025-12-08T18:00:21.694357503+00:00 stderr F time="2025-12-08T18:00:21Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0qNJu 2025-12-08T18:00:21.694357503+00:00 stderr F time="2025-12-08T18:00:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=0qNJu 2025-12-08T18:00:25.407462149+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:00:25.407650044+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:00:25.414231947+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.414231947+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.414720509+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.414720509+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.417899613+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.417899613+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.420352897+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.420352897+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.421807606+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.421807606+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:25.434398437+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:00:25.434535600+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="resolving sources" id=V6Oho namespace=openshift-operators 2025-12-08T18:00:25.434535600+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="checking if subscriptions need update" id=V6Oho namespace=openshift-operators 2025-12-08T18:00:25.456849877+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="resolving subscriptions in namespace" id=V6Oho namespace=openshift-operators 2025-12-08T18:00:25.552548454+00:00 stderr F time="2025-12-08T18:00:25Z" level=info msg="no subscriptions were updated" id=V6Oho namespace=openshift-operators 2025-12-08T18:00:26.012979772+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:26.012979772+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:26.023896759+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:00:26.023992592+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="resolving sources" id=VseUm namespace=cert-manager-operator 2025-12-08T18:00:26.023992592+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="checking if subscriptions need update" id=VseUm namespace=cert-manager-operator 2025-12-08T18:00:26.041457501+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="resolving subscriptions in namespace" id=VseUm namespace=cert-manager-operator 2025-12-08T18:00:26.065535564+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="no subscriptions were updated" id=VseUm namespace=cert-manager-operator 2025-12-08T18:00:26.211226815+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:26.211226815+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:26.812010175+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:26.812010175+00:00 stderr F time="2025-12-08T18:00:26Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:27.010519625+00:00 stderr F time="2025-12-08T18:00:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:27.010519625+00:00 stderr F time="2025-12-08T18:00:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:27.612064964+00:00 stderr F time="2025-12-08T18:00:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:27.612064964+00:00 stderr F time="2025-12-08T18:00:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:27.811724684+00:00 stderr F time="2025-12-08T18:00:27Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:27.811724684+00:00 stderr F time="2025-12-08T18:00:27Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:28.411283892+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:28.411283892+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:28.612440192+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:28.612440192+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:28.615019350+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:00:28.615536503+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="resolving sources" id=Sj4kU namespace=service-telemetry 2025-12-08T18:00:28.615536503+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="checking if subscriptions need update" id=Sj4kU namespace=service-telemetry 2025-12-08T18:00:28.667101139+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="resolving subscriptions in namespace" id=Sj4kU namespace=service-telemetry 2025-12-08T18:00:28.826594363+00:00 stderr F time="2025-12-08T18:00:28Z" level=info msg="no subscriptions were updated" id=Sj4kU namespace=service-telemetry 2025-12-08T18:00:29.212018240+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:29.212018240+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:29.212297297+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:00:29.413678373+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:29.413678373+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:29.423939312+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg="resolving sources" id=YwVC/ namespace=service-telemetry 2025-12-08T18:00:29.423939312+00:00 stderr F time="2025-12-08T18:00:29Z" level=info msg="checking if subscriptions need update" id=YwVC/ namespace=service-telemetry 2025-12-08T18:00:30.011123974+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:30.011123974+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:00:30.211543184+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:30.211543184+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:30.424026752+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="resolving subscriptions in namespace" id=YwVC/ namespace=service-telemetry 2025-12-08T18:00:30.809661623+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="no subscriptions were updated" id=YwVC/ namespace=service-telemetry 2025-12-08T18:00:30.811157633+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:30.811212175+00:00 stderr F time="2025-12-08T18:00:30Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:00:31.012288221+00:00 stderr F time="2025-12-08T18:00:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:31.012442035+00:00 stderr F time="2025-12-08T18:00:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:31.611686034+00:00 stderr F time="2025-12-08T18:00:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:31.611686034+00:00 stderr F time="2025-12-08T18:00:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:00:31.812208618+00:00 stderr F time="2025-12-08T18:00:31Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:31.812208618+00:00 stderr F time="2025-12-08T18:00:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:32.211353684+00:00 stderr F time="2025-12-08T18:00:32Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:32.211353684+00:00 stderr F time="2025-12-08T18:00:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:00:32.225532127+00:00 stderr F time="2025-12-08T18:00:32Z" level=info msg="resolving sources" id=8Evvx namespace=service-telemetry 2025-12-08T18:00:32.225631840+00:00 stderr F time="2025-12-08T18:00:32Z" level=info msg="checking if subscriptions need update" id=8Evvx namespace=service-telemetry 2025-12-08T18:00:33.222336051+00:00 stderr F time="2025-12-08T18:00:33Z" level=info msg="resolving subscriptions in namespace" id=8Evvx namespace=service-telemetry 2025-12-08T18:00:33.628994305+00:00 stderr F time="2025-12-08T18:00:33Z" level=info msg="no subscriptions were updated" id=8Evvx namespace=service-telemetry 2025-12-08T18:01:18.211346182+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DsLUi 2025-12-08T18:01:18.211346182+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DsLUi 2025-12-08T18:01:18.219179051+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DsLUi 2025-12-08T18:01:18.219237293+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DsLUi 2025-12-08T18:01:18.219313615+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="catalog update required at 2025-12-08 18:01:18.219289064 +0000 UTC m=+1018.181723546" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=DsLUi 2025-12-08T18:01:18.228221972+00:00 stderr F I1208 18:01:18.228160 1 warnings.go:110] "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"registry-server\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"registry-server\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"registry-server\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"registry-server\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")" 2025-12-08T18:01:18.228405227+00:00 stderr F time="2025-12-08T18:01:18Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=DsLUi 2025-12-08T18:01:18.228461188+00:00 stderr F time="2025-12-08T18:01:18Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=DsLUi 2025-12-08T18:01:18.228538930+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=DsLUi 2025-12-08T18:01:18.235283690+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:01:18.235283690+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:01:18.241013912+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:18.241013912+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:18.410667093+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:18.410736984+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:18.614034021+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=/Ky8K 2025-12-08T18:01:18.614034021+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=/Ky8K 2025-12-08T18:01:19.409857173+00:00 stderr F time="2025-12-08T18:01:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:19.410020437+00:00 stderr F time="2025-12-08T18:01:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:19.610369475+00:00 stderr F time="2025-12-08T18:01:19Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:19.610455587+00:00 stderr F time="2025-12-08T18:01:19Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:20.410595344+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=/Ky8K 2025-12-08T18:01:20.410595344+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=/Ky8K 2025-12-08T18:01:20.410595344+00:00 stderr F time="2025-12-08T18:01:20Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=/Ky8K 2025-12-08T18:01:20.410595344+00:00 stderr F time="2025-12-08T18:01:20Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=/Ky8K 2025-12-08T18:01:20.410595344+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=/Ky8K 2025-12-08T18:01:20.610077228+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:20.610077228+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:20.809604814+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:20.809604814+00:00 stderr F time="2025-12-08T18:01:20Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:21.611835867+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WXurH 2025-12-08T18:01:21.611835867+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WXurH 2025-12-08T18:01:21.811833645+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:21.811833645+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:21.814656400+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:01:21.814759423+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="resolving sources" id=5AMdU namespace=service-telemetry 2025-12-08T18:01:21.814759423+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="checking if subscriptions need update" id=5AMdU namespace=service-telemetry 2025-12-08T18:01:21.845313017+00:00 stderr F time="2025-12-08T18:01:21Z" level=info msg="resolving subscriptions in namespace" id=5AMdU namespace=service-telemetry 2025-12-08T18:01:22.009808850+00:00 stderr F time="2025-12-08T18:01:22Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:22.009808850+00:00 stderr F time="2025-12-08T18:01:22Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:22.018410379+00:00 stderr F time="2025-12-08T18:01:22Z" level=info msg=syncing reconciling="*v1alpha1.Subscription" selflink= 2025-12-08T18:01:22.115528126+00:00 stderr F time="2025-12-08T18:01:22Z" level=info msg="no subscriptions were updated" id=5AMdU namespace=service-telemetry 2025-12-08T18:01:22.626942051+00:00 stderr F time="2025-12-08T18:01:22Z" level=info msg="resolving sources" id=E8aRa namespace=service-telemetry 2025-12-08T18:01:22.626942051+00:00 stderr F time="2025-12-08T18:01:22Z" level=info msg="checking if subscriptions need update" id=E8aRa namespace=service-telemetry 2025-12-08T18:01:23.010409528+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:23.010409528+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:23.210934119+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:23.210934119+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace 2025-12-08T18:01:23.410290411+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WXurH 2025-12-08T18:01:23.410290411+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WXurH 2025-12-08T18:01:23.410344023+00:00 stderr F time="2025-12-08T18:01:23Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=WXurH 2025-12-08T18:01:23.410360513+00:00 stderr F time="2025-12-08T18:01:23Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=WXurH 2025-12-08T18:01:23.410375723+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=WXurH 2025-12-08T18:01:23.619816703+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="resolving subscriptions in namespace" id=E8aRa namespace=service-telemetry 2025-12-08T18:01:23.978097658+00:00 stderr F time="2025-12-08T18:01:23Z" level=info msg="no subscriptions were updated" id=E8aRa namespace=service-telemetry 2025-12-08T18:01:24.011460437+00:00 stderr F time="2025-12-08T18:01:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:24.011460437+00:00 stderr F time="2025-12-08T18:01:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:24.210459508+00:00 stderr F time="2025-12-08T18:01:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:24.210459508+00:00 stderr F time="2025-12-08T18:01:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace 2025-12-08T18:01:24.810539395+00:00 stderr F time="2025-12-08T18:01:24Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:24.810539395+00:00 stderr F time="2025-12-08T18:01:24Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:25.010919724+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:25.010919724+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace 2025-12-08T18:01:25.610799196+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:25.610799196+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:25.610860607+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="resolving sources" id=rfBh5 namespace=service-telemetry 2025-12-08T18:01:25.610860607+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="checking if subscriptions need update" id=rfBh5 namespace=service-telemetry 2025-12-08T18:01:25.810248759+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="evaluating current pod" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:25.810248759+00:00 stderr F time="2025-12-08T18:01:25Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry 2025-12-08T18:01:26.424607367+00:00 stderr F time="2025-12-08T18:01:26Z" level=info msg="resolving subscriptions in namespace" id=rfBh5 namespace=service-telemetry 2025-12-08T18:01:26.801692833+00:00 stderr F time="2025-12-08T18:01:26Z" level=info msg="no subscriptions were updated" id=rfBh5 namespace=service-telemetry 2025-12-08T18:01:28.222233707+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="resolving sources" id=XMVUx namespace=service-telemetry 2025-12-08T18:01:28.222233707+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="checking if subscriptions need update" id=XMVUx namespace=service-telemetry 2025-12-08T18:01:28.642230466+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=h1Qcu 2025-12-08T18:01:28.642230466+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=h1Qcu 2025-12-08T18:01:28.653314251+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=h1Qcu 2025-12-08T18:01:28.653314251+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=h1Qcu 2025-12-08T18:01:28.653362142+00:00 stderr F time="2025-12-08T18:01:28Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=h1Qcu 2025-12-08T18:01:28.653362142+00:00 stderr F time="2025-12-08T18:01:28Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=h1Qcu 2025-12-08T18:01:28.653362142+00:00 stderr F time="2025-12-08T18:01:28Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=h1Qcu 2025-12-08T18:01:29.001108057+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WvUzz 2025-12-08T18:01:29.001108057+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WvUzz 2025-12-08T18:01:29.009584093+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WvUzz 2025-12-08T18:01:29.009584093+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WvUzz 2025-12-08T18:01:29.222335611+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="resolving subscriptions in namespace" id=XMVUx namespace=service-telemetry 2025-12-08T18:01:29.239479388+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=NUbuI 2025-12-08T18:01:29.239479388+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=NUbuI 2025-12-08T18:01:29.581979113+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="no subscriptions were updated" id=XMVUx namespace=service-telemetry 2025-12-08T18:01:29.840037728+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=NUbuI 2025-12-08T18:01:29.840037728+00:00 stderr F time="2025-12-08T18:01:29Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=NUbuI 2025-12-08T18:01:31.530543956+00:00 stderr F time="2025-12-08T18:01:31Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=N8sJI 2025-12-08T18:01:31.530612558+00:00 stderr F time="2025-12-08T18:01:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=N8sJI 2025-12-08T18:01:31.542455144+00:00 stderr F time="2025-12-08T18:01:31Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=N8sJI 2025-12-08T18:01:31.542512756+00:00 stderr F time="2025-12-08T18:01:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=N8sJI 2025-12-08T18:01:31.542576727+00:00 stderr F time="2025-12-08T18:01:31Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=N8sJI 2025-12-08T18:01:31.542606498+00:00 stderr F time="2025-12-08T18:01:31Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry error="catalog polling: infrawatch-operators not ready for update: update pod infrawatch-operators-b88kp has not yet reported ready" id=N8sJI 2025-12-08T18:01:31.542631089+00:00 stderr F time="2025-12-08T18:01:31Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry id=N8sJI 2025-12-08T18:01:32.027644950+00:00 stderr F time="2025-12-08T18:01:32Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=faZy0 2025-12-08T18:01:32.027644950+00:00 stderr F time="2025-12-08T18:01:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=faZy0 2025-12-08T18:01:32.038083327+00:00 stderr F time="2025-12-08T18:01:32Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=faZy0 2025-12-08T18:01:32.038083327+00:00 stderr F time="2025-12-08T18:01:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=faZy0 2025-12-08T18:01:32.439695847+00:00 stderr F time="2025-12-08T18:01:32Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=a1CzI 2025-12-08T18:01:32.439695847+00:00 stderr F time="2025-12-08T18:01:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=a1CzI 2025-12-08T18:01:33.040614725+00:00 stderr F time="2025-12-08T18:01:33Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=a1CzI 2025-12-08T18:01:33.040614725+00:00 stderr F time="2025-12-08T18:01:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=a1CzI 2025-12-08T18:01:48.237604929+00:00 stderr F time="2025-12-08T18:01:48Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WyV6C 2025-12-08T18:01:48.237604929+00:00 stderr F time="2025-12-08T18:01:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WyV6C 2025-12-08T18:01:48.248481599+00:00 stderr F time="2025-12-08T18:01:48Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WyV6C 2025-12-08T18:01:48.248481599+00:00 stderr F time="2025-12-08T18:01:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=WyV6C 2025-12-08T18:02:40.514337667+00:00 stderr F time="2025-12-08T18:02:40Z" level=info msg="resolving sources" id=PQR28 namespace=openshift-must-gather-gctth 2025-12-08T18:02:40.514337667+00:00 stderr F time="2025-12-08T18:02:40Z" level=info msg="checking if subscriptions need update" id=PQR28 namespace=openshift-must-gather-gctth 2025-12-08T18:02:40.524725994+00:00 stderr F time="2025-12-08T18:02:40Z" level=info msg="No subscriptions were found in namespace openshift-must-gather-gctth" id=PQR28 namespace=openshift-must-gather-gctth 2025-12-08T18:02:40.551003593+00:00 stderr F time="2025-12-08T18:02:40Z" level=info msg="resolving sources" id=51luy namespace=openshift-must-gather-gctth 2025-12-08T18:02:40.551003593+00:00 stderr F time="2025-12-08T18:02:40Z" level=info msg="checking if subscriptions need update" id=51luy namespace=openshift-must-gather-gctth 2025-12-08T18:02:40.621908442+00:00 stderr F time="2025-12-08T18:02:40Z" level=info msg="No subscriptions were found in namespace openshift-must-gather-gctth" id=51luy namespace=openshift-must-gather-gctth 2025-12-08T18:02:45.855083059+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=ixQxG 2025-12-08T18:02:45.855083059+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=ixQxG 2025-12-08T18:02:45.855322775+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=6T9Yo 2025-12-08T18:02:45.855322775+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=6T9Yo 2025-12-08T18:02:45.863112512+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=6T9Yo 2025-12-08T18:02:45.863112512+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=6T9Yo 2025-12-08T18:02:45.863388460+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=ixQxG 2025-12-08T18:02:45.863388460+00:00 stderr F time="2025-12-08T18:02:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=ixQxG 2025-12-08T18:02:46.453510011+00:00 stderr F time="2025-12-08T18:02:46Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Ie8FX 2025-12-08T18:02:46.453510011+00:00 stderr F time="2025-12-08T18:02:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Ie8FX 2025-12-08T18:02:46.653540278+00:00 stderr F time="2025-12-08T18:02:46Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=BT+tO 2025-12-08T18:02:46.653540278+00:00 stderr F time="2025-12-08T18:02:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=BT+tO 2025-12-08T18:02:47.653466132+00:00 stderr F time="2025-12-08T18:02:47Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Ie8FX 2025-12-08T18:02:47.653466132+00:00 stderr F time="2025-12-08T18:02:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=Ie8FX 2025-12-08T18:02:47.856829356+00:00 stderr F time="2025-12-08T18:02:47Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=BT+tO 2025-12-08T18:02:47.856829356+00:00 stderr F time="2025-12-08T18:02:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=BT+tO 2025-12-08T18:04:12.176930774+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=xbP3Y 2025-12-08T18:04:12.176930774+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=xbP3Y 2025-12-08T18:04:12.176930774+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=q5NWD 2025-12-08T18:04:12.176930774+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=q5NWD 2025-12-08T18:04:12.187937806+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=xbP3Y 2025-12-08T18:04:12.187937806+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=xbP3Y 2025-12-08T18:04:12.191007157+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=q5NWD 2025-12-08T18:04:12.191007157+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=q5NWD 2025-12-08T18:04:12.770975888+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eZop7 2025-12-08T18:04:12.771108892+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eZop7 2025-12-08T18:04:12.970111879+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=qGSVh 2025-12-08T18:04:12.970193831+00:00 stderr F time="2025-12-08T18:04:12Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=qGSVh 2025-12-08T18:04:13.969981266+00:00 stderr F time="2025-12-08T18:04:13Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eZop7 2025-12-08T18:04:13.970074688+00:00 stderr F time="2025-12-08T18:04:13Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eZop7 2025-12-08T18:04:14.173936035+00:00 stderr F time="2025-12-08T18:04:14Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=qGSVh 2025-12-08T18:04:14.173936035+00:00 stderr F time="2025-12-08T18:04:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=qGSVh 2025-12-08T18:04:14.770800274+00:00 stderr F time="2025-12-08T18:04:14Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=bzgMP 2025-12-08T18:04:14.770800274+00:00 stderr F time="2025-12-08T18:04:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=bzgMP 2025-12-08T18:04:14.970182531+00:00 stderr F time="2025-12-08T18:04:14Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=LL/q7 2025-12-08T18:04:14.970182531+00:00 stderr F time="2025-12-08T18:04:14Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=LL/q7 2025-12-08T18:04:15.970102900+00:00 stderr F time="2025-12-08T18:04:15Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=bzgMP 2025-12-08T18:04:15.970217023+00:00 stderr F time="2025-12-08T18:04:15Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=bzgMP 2025-12-08T18:04:16.169970540+00:00 stderr F time="2025-12-08T18:04:16Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=LL/q7 2025-12-08T18:04:16.169970540+00:00 stderr F time="2025-12-08T18:04:16Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=LL/q7 2025-12-08T18:04:31.519346739+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IGyWq 2025-12-08T18:04:31.519346739+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IGyWq 2025-12-08T18:04:31.520457829+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DfE7a 2025-12-08T18:04:31.520457829+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DfE7a 2025-12-08T18:04:31.529991361+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IGyWq 2025-12-08T18:04:31.529991361+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=IGyWq 2025-12-08T18:04:31.529991361+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="catalog update required at 2025-12-08 18:04:31.529217091 +0000 UTC m=+1211.491651603" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=IGyWq 2025-12-08T18:04:31.529991361+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DfE7a 2025-12-08T18:04:31.529991361+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=DfE7a 2025-12-08T18:04:31.724531951+00:00 stderr F I1208 18:04:31.723590 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T18:04:31.724531951+00:00 stderr F time="2025-12-08T18:04:31Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=IGyWq 2025-12-08T18:04:31.724531951+00:00 stderr F time="2025-12-08T18:04:31Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=IGyWq 2025-12-08T18:04:31.724531951+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=IGyWq 2025-12-08T18:04:32.317915797+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Qc8PR 2025-12-08T18:04:32.317915797+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Qc8PR 2025-12-08T18:04:32.424454302+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=e3q1U namespace=kube-public 2025-12-08T18:04:32.424454302+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=e3q1U namespace=kube-public 2025-12-08T18:04:32.424534104+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=uDJ1x namespace=openshift-authentication 2025-12-08T18:04:32.424550525+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=uDJ1x namespace=openshift-authentication 2025-12-08T18:04:32.429436625+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace kube-public" id=e3q1U namespace=kube-public 2025-12-08T18:04:32.429473666+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=r0HNN namespace=openshift-cloud-platform-infra 2025-12-08T18:04:32.429473666+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=r0HNN namespace=openshift-cloud-platform-infra 2025-12-08T18:04:32.429559768+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-authentication" id=uDJ1x namespace=openshift-authentication 2025-12-08T18:04:32.429635840+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=ekirr namespace=openshift-cluster-storage-operator 2025-12-08T18:04:32.429719012+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=ekirr namespace=openshift-cluster-storage-operator 2025-12-08T18:04:32.432464525+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-cloud-platform-infra" id=r0HNN namespace=openshift-cloud-platform-infra 2025-12-08T18:04:32.432566947+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=v1PL8 namespace=openshift-console 2025-12-08T18:04:32.432598058+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=v1PL8 namespace=openshift-console 2025-12-08T18:04:32.432912387+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-cluster-storage-operator" id=ekirr namespace=openshift-cluster-storage-operator 2025-12-08T18:04:32.432970228+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=DaWBF namespace=openshift 2025-12-08T18:04:32.433003209+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=DaWBF namespace=openshift 2025-12-08T18:04:32.435273599+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-console" id=v1PL8 namespace=openshift-console 2025-12-08T18:04:32.435416393+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=v1EGD namespace=openshift-apiserver-operator 2025-12-08T18:04:32.435474805+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=v1EGD namespace=openshift-apiserver-operator 2025-12-08T18:04:32.436915123+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift" id=DaWBF namespace=openshift 2025-12-08T18:04:32.437014306+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=Rvj5x namespace=openshift-cluster-samples-operator 2025-12-08T18:04:32.437066337+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=Rvj5x namespace=openshift-cluster-samples-operator 2025-12-08T18:04:32.438414592+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-apiserver-operator" id=v1EGD namespace=openshift-apiserver-operator 2025-12-08T18:04:32.438695660+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=YOkv2 namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T18:04:32.438812373+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=YOkv2 namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T18:04:32.439165703+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-cluster-samples-operator" id=Rvj5x namespace=openshift-cluster-samples-operator 2025-12-08T18:04:32.439270556+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=yM2GB namespace=openshift-nutanix-infra 2025-12-08T18:04:32.439322387+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=yM2GB namespace=openshift-nutanix-infra 2025-12-08T18:04:32.441167145+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-kube-storage-version-migrator-operator" id=YOkv2 namespace=openshift-kube-storage-version-migrator-operator 2025-12-08T18:04:32.441167145+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=EugXi namespace=openshift-host-network 2025-12-08T18:04:32.441212457+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=EugXi namespace=openshift-host-network 2025-12-08T18:04:32.441436833+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-nutanix-infra" id=yM2GB namespace=openshift-nutanix-infra 2025-12-08T18:04:32.441534335+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=Ql3YG namespace=openshift-machine-config-operator 2025-12-08T18:04:32.441594227+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=Ql3YG namespace=openshift-machine-config-operator 2025-12-08T18:04:32.517857110+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=GOLgo 2025-12-08T18:04:32.517991333+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=GOLgo 2025-12-08T18:04:32.628685069+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-host-network" id=EugXi namespace=openshift-host-network 2025-12-08T18:04:32.628685069+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=nmuzU namespace=openshift-openstack-infra 2025-12-08T18:04:32.628736120+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=nmuzU namespace=openshift-openstack-infra 2025-12-08T18:04:32.828447357+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="No subscriptions were found in namespace openshift-machine-config-operator" id=Ql3YG namespace=openshift-machine-config-operator 2025-12-08T18:04:32.828447357+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="resolving sources" id=bf3DI namespace=openshift-service-ca-operator 2025-12-08T18:04:32.828487798+00:00 stderr F time="2025-12-08T18:04:32Z" level=info msg="checking if subscriptions need update" id=bf3DI namespace=openshift-service-ca-operator 2025-12-08T18:04:33.029047466+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="No subscriptions were found in namespace openshift-openstack-infra" id=nmuzU namespace=openshift-openstack-infra 2025-12-08T18:04:33.029169080+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="resolving sources" id=qiEuY namespace=openstack-operators 2025-12-08T18:04:33.029201940+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="checking if subscriptions need update" id=qiEuY namespace=openstack-operators 2025-12-08T18:04:33.228563557+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="No subscriptions were found in namespace openshift-service-ca-operator" id=bf3DI namespace=openshift-service-ca-operator 2025-12-08T18:04:33.228749812+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="resolving sources" id=FiW0X namespace=default 2025-12-08T18:04:33.228818134+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="checking if subscriptions need update" id=FiW0X namespace=default 2025-12-08T18:04:33.428138331+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="No subscriptions were found in namespace openstack-operators" id=qiEuY namespace=openstack-operators 2025-12-08T18:04:33.428194912+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="resolving sources" id=SX2/d namespace=kube-system 2025-12-08T18:04:33.428194912+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="checking if subscriptions need update" id=SX2/d namespace=kube-system 2025-12-08T18:04:33.526282434+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Qc8PR 2025-12-08T18:04:33.526416947+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=Qc8PR 2025-12-08T18:04:33.628130354+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="No subscriptions were found in namespace default" id=FiW0X namespace=default 2025-12-08T18:04:33.628232887+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="resolving sources" id=mWszX namespace=openshift-image-registry 2025-12-08T18:04:33.628258148+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="checking if subscriptions need update" id=mWszX namespace=openshift-image-registry 2025-12-08T18:04:33.716629372+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=GOLgo 2025-12-08T18:04:33.716724464+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=GOLgo 2025-12-08T18:04:33.829145445+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="No subscriptions were found in namespace kube-system" id=SX2/d namespace=kube-system 2025-12-08T18:04:33.829271809+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="resolving sources" id=AsdzO namespace=openshift-ingress-canary 2025-12-08T18:04:33.829302990+00:00 stderr F time="2025-12-08T18:04:33Z" level=info msg="checking if subscriptions need update" id=AsdzO namespace=openshift-ingress-canary 2025-12-08T18:04:34.028190404+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="No subscriptions were found in namespace openshift-image-registry" id=mWszX namespace=openshift-image-registry 2025-12-08T18:04:34.028314427+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="resolving sources" id=uG1QA namespace=openshift-operators 2025-12-08T18:04:34.028350808+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="checking if subscriptions need update" id=uG1QA namespace=openshift-operators 2025-12-08T18:04:34.116679770+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lDPV0 2025-12-08T18:04:34.116773803+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lDPV0 2025-12-08T18:04:34.228936027+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="No subscriptions were found in namespace openshift-ingress-canary" id=AsdzO namespace=openshift-ingress-canary 2025-12-08T18:04:34.229042821+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="resolving sources" id=RG/L5 namespace=openshift-route-controller-manager 2025-12-08T18:04:34.229073742+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="checking if subscriptions need update" id=RG/L5 namespace=openshift-route-controller-manager 2025-12-08T18:04:34.627940580+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="No subscriptions were found in namespace openshift-route-controller-manager" id=RG/L5 namespace=openshift-route-controller-manager 2025-12-08T18:04:34.627983761+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="resolving sources" id=qgmsS namespace=service-telemetry 2025-12-08T18:04:34.627983761+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="checking if subscriptions need update" id=qgmsS namespace=service-telemetry 2025-12-08T18:04:34.716196830+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lDPV0 2025-12-08T18:04:34.716196830+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=lDPV0 2025-12-08T18:04:34.716257861+00:00 stderr F time="2025-12-08T18:04:34Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=lDPV0 2025-12-08T18:04:34.716257861+00:00 stderr F time="2025-12-08T18:04:34Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=lDPV0 2025-12-08T18:04:34.716267721+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=lDPV0 2025-12-08T18:04:34.837442726+00:00 stderr F time="2025-12-08T18:04:34Z" level=info msg="resolving subscriptions in namespace" id=uG1QA namespace=openshift-operators 2025-12-08T18:04:35.240041092+00:00 stderr F time="2025-12-08T18:04:35Z" level=info msg="no subscriptions were updated" id=uG1QA namespace=openshift-operators 2025-12-08T18:04:35.316679215+00:00 stderr F time="2025-12-08T18:04:35Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ENJ64 2025-12-08T18:04:35.316679215+00:00 stderr F time="2025-12-08T18:04:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ENJ64 2025-12-08T18:04:35.517765448+00:00 stderr F time="2025-12-08T18:04:35Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8ypPc 2025-12-08T18:04:35.517765448+00:00 stderr F time="2025-12-08T18:04:35Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8ypPc 2025-12-08T18:04:36.033469625+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="resolving sources" id=2WVFh namespace=openshift-config 2025-12-08T18:04:36.033469625+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="checking if subscriptions need update" id=2WVFh namespace=openshift-config 2025-12-08T18:04:36.428556813+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="No subscriptions were found in namespace openshift-config" id=2WVFh namespace=openshift-config 2025-12-08T18:04:36.428556813+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="resolving sources" id=PcOSr namespace=openshift-console-operator 2025-12-08T18:04:36.428556813+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="checking if subscriptions need update" id=PcOSr namespace=openshift-console-operator 2025-12-08T18:04:36.519700589+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ENJ64 2025-12-08T18:04:36.519700589+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ENJ64 2025-12-08T18:04:36.632857281+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="resolving subscriptions in namespace" id=qgmsS namespace=service-telemetry 2025-12-08T18:04:36.716924380+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8ypPc 2025-12-08T18:04:36.716924380+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8ypPc 2025-12-08T18:04:36.716924380+00:00 stderr F time="2025-12-08T18:04:36Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=8ypPc 2025-12-08T18:04:36.716924380+00:00 stderr F time="2025-12-08T18:04:36Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=8ypPc 2025-12-08T18:04:36.716924380+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=8ypPc 2025-12-08T18:04:36.829126666+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="No subscriptions were found in namespace openshift-console-operator" id=PcOSr namespace=openshift-console-operator 2025-12-08T18:04:36.829174757+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="resolving sources" id=1XymN namespace=openshift-ingress 2025-12-08T18:04:36.829174757+00:00 stderr F time="2025-12-08T18:04:36Z" level=info msg="checking if subscriptions need update" id=1XymN namespace=openshift-ingress 2025-12-08T18:04:37.115741597+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=SAD0M 2025-12-08T18:04:37.115741597+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=SAD0M 2025-12-08T18:04:37.181617204+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="no subscriptions were updated" id=qgmsS namespace=service-telemetry 2025-12-08T18:04:37.226598437+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="No subscriptions were found in namespace openshift-ingress" id=1XymN namespace=openshift-ingress 2025-12-08T18:04:37.226656388+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="resolving sources" id=2Tjyk namespace=openshift-monitoring 2025-12-08T18:04:37.226656388+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="checking if subscriptions need update" id=2Tjyk namespace=openshift-monitoring 2025-12-08T18:04:37.716387225+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=SAD0M 2025-12-08T18:04:37.716387225+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=SAD0M 2025-12-08T18:04:37.716466977+00:00 stderr F time="2025-12-08T18:04:37Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=SAD0M 2025-12-08T18:04:37.716466977+00:00 stderr F time="2025-12-08T18:04:37Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=SAD0M 2025-12-08T18:04:37.716466977+00:00 stderr F time="2025-12-08T18:04:37Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=SAD0M 2025-12-08T18:04:38.229549055+00:00 stderr F time="2025-12-08T18:04:38Z" level=info msg="No subscriptions were found in namespace openshift-monitoring" id=2Tjyk namespace=openshift-monitoring 2025-12-08T18:04:38.229600897+00:00 stderr F time="2025-12-08T18:04:38Z" level=info msg="resolving sources" id=UB2Q0 namespace=openshift-network-diagnostics 2025-12-08T18:04:38.229600897+00:00 stderr F time="2025-12-08T18:04:38Z" level=info msg="checking if subscriptions need update" id=UB2Q0 namespace=openshift-network-diagnostics 2025-12-08T18:04:39.031535124+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="resolving sources" id=D23Xn namespace=openshift-authentication-operator 2025-12-08T18:04:39.031535124+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="checking if subscriptions need update" id=D23Xn namespace=openshift-authentication-operator 2025-12-08T18:04:39.228172569+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="No subscriptions were found in namespace openshift-network-diagnostics" id=UB2Q0 namespace=openshift-network-diagnostics 2025-12-08T18:04:39.228172569+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="resolving sources" id=IKZtN namespace=openshift-controller-manager 2025-12-08T18:04:39.228172569+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="checking if subscriptions need update" id=IKZtN namespace=openshift-controller-manager 2025-12-08T18:04:39.440928801+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="No subscriptions were found in namespace openshift-authentication-operator" id=D23Xn namespace=openshift-authentication-operator 2025-12-08T18:04:39.440928801+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="resolving sources" id=i3dbh namespace=openshift-dns 2025-12-08T18:04:39.440928801+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="checking if subscriptions need update" id=i3dbh namespace=openshift-dns 2025-12-08T18:04:39.628021223+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="No subscriptions were found in namespace openshift-controller-manager" id=IKZtN namespace=openshift-controller-manager 2025-12-08T18:04:39.628043244+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="resolving sources" id=jNYbJ namespace=openshift-kube-controller-manager 2025-12-08T18:04:39.628043244+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="checking if subscriptions need update" id=jNYbJ namespace=openshift-kube-controller-manager 2025-12-08T18:04:39.828214202+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="No subscriptions were found in namespace openshift-dns" id=i3dbh namespace=openshift-dns 2025-12-08T18:04:39.828214202+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="resolving sources" id=wvVy0 namespace=openshift-network-node-identity 2025-12-08T18:04:39.828244892+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="checking if subscriptions need update" id=wvVy0 namespace=openshift-network-node-identity 2025-12-08T18:04:40.028304539+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="No subscriptions were found in namespace openshift-kube-controller-manager" id=jNYbJ namespace=openshift-kube-controller-manager 2025-12-08T18:04:40.028330019+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="resolving sources" id=Xaegh namespace=openstack 2025-12-08T18:04:40.028330019+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="checking if subscriptions need update" id=Xaegh namespace=openstack 2025-12-08T18:04:40.039830934+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=cdd3K 2025-12-08T18:04:40.039830934+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=cdd3K 2025-12-08T18:04:40.040426170+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=0fPIe 2025-12-08T18:04:40.040426170+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=0fPIe 2025-12-08T18:04:40.050870167+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=cdd3K 2025-12-08T18:04:40.050870167+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=cdd3K 2025-12-08T18:04:40.050967350+00:00 stderr F time="2025-12-08T18:04:40Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=cdd3K 2025-12-08T18:04:40.050967350+00:00 stderr F time="2025-12-08T18:04:40Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace error="catalog polling: certified-operators not ready for update: update pod certified-operators-p8pz8 has not yet reported ready" id=cdd3K 2025-12-08T18:04:40.050967350+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace id=cdd3K 2025-12-08T18:04:40.051951525+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=0fPIe 2025-12-08T18:04:40.051951525+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=0fPIe 2025-12-08T18:04:40.226666018+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="No subscriptions were found in namespace openshift-network-node-identity" id=wvVy0 namespace=openshift-network-node-identity 2025-12-08T18:04:40.226973437+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="resolving sources" id=Uyy5y namespace=cert-manager 2025-12-08T18:04:40.226973437+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="checking if subscriptions need update" id=Uyy5y namespace=cert-manager 2025-12-08T18:04:40.427008932+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="No subscriptions were found in namespace openstack" id=Xaegh namespace=openstack 2025-12-08T18:04:40.427008932+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="resolving sources" id=XPFEy namespace=openshift-apiserver 2025-12-08T18:04:40.427008932+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="checking if subscriptions need update" id=XPFEy namespace=openshift-apiserver 2025-12-08T18:04:40.637212427+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="No subscriptions were found in namespace cert-manager" id=Uyy5y namespace=cert-manager 2025-12-08T18:04:40.637212427+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="resolving sources" id=0SwaQ namespace=openshift-cluster-machine-approver 2025-12-08T18:04:40.637212427+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="checking if subscriptions need update" id=0SwaQ namespace=openshift-cluster-machine-approver 2025-12-08T18:04:40.638865841+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=x+ENl 2025-12-08T18:04:40.638865841+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=x+ENl 2025-12-08T18:04:40.832215759+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="No subscriptions were found in namespace openshift-apiserver" id=XPFEy namespace=openshift-apiserver 2025-12-08T18:04:40.832215759+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="resolving sources" id=tAPMd namespace=openshift-marketplace 2025-12-08T18:04:40.832215759+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="checking if subscriptions need update" id=tAPMd namespace=openshift-marketplace 2025-12-08T18:04:40.838059594+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rX8kx 2025-12-08T18:04:40.838059594+00:00 stderr F time="2025-12-08T18:04:40Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rX8kx 2025-12-08T18:04:41.029199272+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="No subscriptions were found in namespace openshift-cluster-machine-approver" id=0SwaQ namespace=openshift-cluster-machine-approver 2025-12-08T18:04:41.029269644+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="resolving sources" id=+DYnm namespace=openshift-node 2025-12-08T18:04:41.029269644+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="checking if subscriptions need update" id=+DYnm namespace=openshift-node 2025-12-08T18:04:41.229622108+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="No subscriptions were found in namespace openshift-marketplace" id=tAPMd namespace=openshift-marketplace 2025-12-08T18:04:41.229622108+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="resolving sources" id=y2Y22 namespace=openshift-controller-manager-operator 2025-12-08T18:04:41.229622108+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="checking if subscriptions need update" id=y2Y22 namespace=openshift-controller-manager-operator 2025-12-08T18:04:41.432771825+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="No subscriptions were found in namespace openshift-node" id=+DYnm namespace=openshift-node 2025-12-08T18:04:41.432771825+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="resolving sources" id=Ps+Az namespace=openshift-machine-api 2025-12-08T18:04:41.432771825+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="checking if subscriptions need update" id=Ps+Az namespace=openshift-machine-api 2025-12-08T18:04:41.627911751+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="No subscriptions were found in namespace openshift-controller-manager-operator" id=y2Y22 namespace=openshift-controller-manager-operator 2025-12-08T18:04:41.627911751+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="resolving sources" id=zLppp namespace=openshift-kube-scheduler-operator 2025-12-08T18:04:41.627911751+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="checking if subscriptions need update" id=zLppp namespace=openshift-kube-scheduler-operator 2025-12-08T18:04:41.828011287+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="No subscriptions were found in namespace openshift-machine-api" id=Ps+Az namespace=openshift-machine-api 2025-12-08T18:04:41.828011287+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="resolving sources" id=QTaB2 namespace=openshift-etcd-operator 2025-12-08T18:04:41.828156041+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="checking if subscriptions need update" id=QTaB2 namespace=openshift-etcd-operator 2025-12-08T18:04:41.837200500+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="evaluating current pod" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=x+ENl 2025-12-08T18:04:41.837200500+00:00 stderr F time="2025-12-08T18:04:41Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=community-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=community-operators-zdvxg current-pod.namespace=openshift-marketplace id=x+ENl 2025-12-08T18:04:42.030916198+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="No subscriptions were found in namespace openshift-kube-scheduler-operator" id=zLppp namespace=openshift-kube-scheduler-operator 2025-12-08T18:04:42.030947719+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="resolving sources" id=t7PBh namespace=openshift-config-managed 2025-12-08T18:04:42.030982900+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="checking if subscriptions need update" id=t7PBh namespace=openshift-config-managed 2025-12-08T18:04:42.037842242+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rX8kx 2025-12-08T18:04:42.037842242+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=rX8kx 2025-12-08T18:04:42.037919044+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="catalog update required at 2025-12-08 18:04:42.037869312 +0000 UTC m=+1222.000303794" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=rX8kx 2025-12-08T18:04:42.227833071+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="No subscriptions were found in namespace openshift-etcd-operator" id=QTaB2 namespace=openshift-etcd-operator 2025-12-08T18:04:42.227833071+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="resolving sources" id=2eYsa namespace=openshift-infra 2025-12-08T18:04:42.227865542+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="checking if subscriptions need update" id=2eYsa namespace=openshift-infra 2025-12-08T18:04:42.427415813+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="No subscriptions were found in namespace openshift-config-managed" id=t7PBh namespace=openshift-config-managed 2025-12-08T18:04:42.427415813+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="resolving sources" id=S2Smt namespace=kube-node-lease 2025-12-08T18:04:42.427455394+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="checking if subscriptions need update" id=S2Smt namespace=kube-node-lease 2025-12-08T18:04:42.443992573+00:00 stderr F I1208 18:04:42.443846 1 warnings.go:110] "Warning: spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T18:04:42.444086205+00:00 stderr F time="2025-12-08T18:04:42Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=rX8kx 2025-12-08T18:04:42.444086205+00:00 stderr F time="2025-12-08T18:04:42Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=rX8kx 2025-12-08T18:04:42.444086205+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=rX8kx 2025-12-08T18:04:42.629279467+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="No subscriptions were found in namespace openshift-infra" id=2eYsa namespace=openshift-infra 2025-12-08T18:04:42.629320018+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="resolving sources" id=L1Vjs namespace=openshift-dns-operator 2025-12-08T18:04:42.629320018+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="checking if subscriptions need update" id=L1Vjs namespace=openshift-dns-operator 2025-12-08T18:04:42.639036046+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=m6XNu 2025-12-08T18:04:42.639036046+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=m6XNu 2025-12-08T18:04:42.830866003+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="No subscriptions were found in namespace kube-node-lease" id=S2Smt namespace=kube-node-lease 2025-12-08T18:04:42.831075928+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="resolving sources" id=OVT3s namespace=openshift-etcd 2025-12-08T18:04:42.831142830+00:00 stderr F time="2025-12-08T18:04:42Z" level=info msg="checking if subscriptions need update" id=OVT3s namespace=openshift-etcd 2025-12-08T18:04:43.230171553+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="No subscriptions were found in namespace openshift-dns-operator" id=L1Vjs namespace=openshift-dns-operator 2025-12-08T18:04:43.230171553+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="resolving sources" id=jcGk3 namespace=openshift-multus 2025-12-08T18:04:43.230171553+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="checking if subscriptions need update" id=jcGk3 namespace=openshift-multus 2025-12-08T18:04:43.428948715+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="No subscriptions were found in namespace openshift-etcd" id=OVT3s namespace=openshift-etcd 2025-12-08T18:04:43.428948715+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="resolving sources" id=vy6CN namespace=openshift-kni-infra 2025-12-08T18:04:43.428948715+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="checking if subscriptions need update" id=vy6CN namespace=openshift-kni-infra 2025-12-08T18:04:43.438330913+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=m6XNu 2025-12-08T18:04:43.438330913+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=m6XNu 2025-12-08T18:04:43.628574788+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="No subscriptions were found in namespace openshift-multus" id=jcGk3 namespace=openshift-multus 2025-12-08T18:04:43.628574788+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="resolving sources" id=OpYRL namespace=openshift-kube-storage-version-migrator 2025-12-08T18:04:43.628574788+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="checking if subscriptions need update" id=OpYRL namespace=openshift-kube-storage-version-migrator 2025-12-08T18:04:43.641133731+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=5gl5/ 2025-12-08T18:04:43.641133731+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=5gl5/ 2025-12-08T18:04:43.829172448+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="No subscriptions were found in namespace openshift-kni-infra" id=vy6CN namespace=openshift-kni-infra 2025-12-08T18:04:43.829196389+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="resolving sources" id=rN0Kd namespace=openshift-oauth-apiserver 2025-12-08T18:04:43.829196389+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="checking if subscriptions need update" id=rN0Kd namespace=openshift-oauth-apiserver 2025-12-08T18:04:44.027357124+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="No subscriptions were found in namespace openshift-kube-storage-version-migrator" id=OpYRL namespace=openshift-kube-storage-version-migrator 2025-12-08T18:04:44.027357124+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="resolving sources" id=ao+dv namespace=openshift-cluster-version 2025-12-08T18:04:44.027357124+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="checking if subscriptions need update" id=ao+dv namespace=openshift-cluster-version 2025-12-08T18:04:44.230133522+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="No subscriptions were found in namespace openshift-oauth-apiserver" id=rN0Kd namespace=openshift-oauth-apiserver 2025-12-08T18:04:44.230133522+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="resolving sources" id=R4ovn namespace=openshift-kube-scheduler 2025-12-08T18:04:44.230133522+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="checking if subscriptions need update" id=R4ovn namespace=openshift-kube-scheduler 2025-12-08T18:04:44.241107023+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=T08r7 2025-12-08T18:04:44.241107023+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=T08r7 2025-12-08T18:04:44.428388679+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="No subscriptions were found in namespace openshift-cluster-version" id=ao+dv namespace=openshift-cluster-version 2025-12-08T18:04:44.428490412+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="resolving sources" id=mUH2z namespace=openshift-operator-lifecycle-manager 2025-12-08T18:04:44.428516043+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="checking if subscriptions need update" id=mUH2z namespace=openshift-operator-lifecycle-manager 2025-12-08T18:04:44.631439205+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="No subscriptions were found in namespace openshift-kube-scheduler" id=R4ovn namespace=openshift-kube-scheduler 2025-12-08T18:04:44.631439205+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="resolving sources" id=hUCbE namespace=openshift-kube-apiserver 2025-12-08T18:04:44.631439205+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="checking if subscriptions need update" id=hUCbE namespace=openshift-kube-apiserver 2025-12-08T18:04:44.839954094+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="No subscriptions were found in namespace openshift-operator-lifecycle-manager" id=mUH2z namespace=openshift-operator-lifecycle-manager 2025-12-08T18:04:44.839954094+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="resolving sources" id=+iQAz namespace=openshift-network-console 2025-12-08T18:04:44.839954094+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="checking if subscriptions need update" id=+iQAz namespace=openshift-network-console 2025-12-08T18:04:44.846273932+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=5gl5/ 2025-12-08T18:04:44.846273932+00:00 stderr F time="2025-12-08T18:04:44Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=5gl5/ 2025-12-08T18:04:45.029534372+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="No subscriptions were found in namespace openshift-kube-apiserver" id=hUCbE namespace=openshift-kube-apiserver 2025-12-08T18:04:45.029688416+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="resolving sources" id=WsfVQ namespace=openshift-network-operator 2025-12-08T18:04:45.029740438+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="checking if subscriptions need update" id=WsfVQ namespace=openshift-network-operator 2025-12-08T18:04:45.228138750+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="No subscriptions were found in namespace openshift-network-console" id=+iQAz namespace=openshift-network-console 2025-12-08T18:04:45.228138750+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="resolving sources" id=M99ih namespace=openshift-user-workload-monitoring 2025-12-08T18:04:45.228138750+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="checking if subscriptions need update" id=M99ih namespace=openshift-user-workload-monitoring 2025-12-08T18:04:45.430425874+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="No subscriptions were found in namespace openshift-network-operator" id=WsfVQ namespace=openshift-network-operator 2025-12-08T18:04:45.430425874+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="resolving sources" id=cXctB namespace=openshift-config-operator 2025-12-08T18:04:45.430425874+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="checking if subscriptions need update" id=cXctB namespace=openshift-config-operator 2025-12-08T18:04:45.437399620+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=T08r7 2025-12-08T18:04:45.437399620+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=T08r7 2025-12-08T18:04:45.437459451+00:00 stderr F time="2025-12-08T18:04:45Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=T08r7 2025-12-08T18:04:45.437459451+00:00 stderr F time="2025-12-08T18:04:45Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=T08r7 2025-12-08T18:04:45.437459451+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=T08r7 2025-12-08T18:04:45.628277122+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="No subscriptions were found in namespace openshift-user-workload-monitoring" id=M99ih namespace=openshift-user-workload-monitoring 2025-12-08T18:04:45.628277122+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="resolving sources" id=Ywaxq namespace=openshift-ingress-operator 2025-12-08T18:04:45.628277122+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="checking if subscriptions need update" id=Ywaxq namespace=openshift-ingress-operator 2025-12-08T18:04:45.828709517+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="No subscriptions were found in namespace openshift-config-operator" id=cXctB namespace=openshift-config-operator 2025-12-08T18:04:45.828709517+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="resolving sources" id=98WBm namespace=openshift-kube-apiserver-operator 2025-12-08T18:04:45.828709517+00:00 stderr F time="2025-12-08T18:04:45Z" level=info msg="checking if subscriptions need update" id=98WBm namespace=openshift-kube-apiserver-operator 2025-12-08T18:04:46.028005622+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="No subscriptions were found in namespace openshift-ingress-operator" id=Ywaxq namespace=openshift-ingress-operator 2025-12-08T18:04:46.028047883+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="resolving sources" id=eoTp3 namespace=openshift-vsphere-infra 2025-12-08T18:04:46.028047883+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="checking if subscriptions need update" id=eoTp3 namespace=openshift-vsphere-infra 2025-12-08T18:04:46.038330596+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eHZn1 2025-12-08T18:04:46.038330596+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eHZn1 2025-12-08T18:04:46.229325421+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="No subscriptions were found in namespace openshift-kube-apiserver-operator" id=98WBm namespace=openshift-kube-apiserver-operator 2025-12-08T18:04:46.229359552+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="resolving sources" id=cSwYm namespace=openshift-must-gather-gctth 2025-12-08T18:04:46.229359552+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="checking if subscriptions need update" id=cSwYm namespace=openshift-must-gather-gctth 2025-12-08T18:04:46.238678280+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3bNUk 2025-12-08T18:04:46.238678280+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3bNUk 2025-12-08T18:04:46.428539914+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="No subscriptions were found in namespace openshift-vsphere-infra" id=eoTp3 namespace=openshift-vsphere-infra 2025-12-08T18:04:46.428539914+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="resolving sources" id=A4TFR namespace=openshift-cloud-network-config-controller 2025-12-08T18:04:46.428539914+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="checking if subscriptions need update" id=A4TFR namespace=openshift-cloud-network-config-controller 2025-12-08T18:04:46.633702015+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="No subscriptions were found in namespace openshift-must-gather-gctth" id=cSwYm namespace=openshift-must-gather-gctth 2025-12-08T18:04:46.633702015+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="resolving sources" id=okqnj namespace=openshift-ovirt-infra 2025-12-08T18:04:46.633702015+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="checking if subscriptions need update" id=okqnj namespace=openshift-ovirt-infra 2025-12-08T18:04:46.828030459+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="No subscriptions were found in namespace openshift-cloud-network-config-controller" id=A4TFR namespace=openshift-cloud-network-config-controller 2025-12-08T18:04:46.828064980+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="resolving sources" id=3303s namespace=cert-manager-operator 2025-12-08T18:04:46.828064980+00:00 stderr F time="2025-12-08T18:04:46Z" level=info msg="checking if subscriptions need update" id=3303s namespace=cert-manager-operator 2025-12-08T18:04:47.027775896+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="No subscriptions were found in namespace openshift-ovirt-infra" id=okqnj namespace=openshift-ovirt-infra 2025-12-08T18:04:47.027775896+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="resolving sources" id=ZqZeo namespace=hostpath-provisioner 2025-12-08T18:04:47.027811276+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="checking if subscriptions need update" id=ZqZeo namespace=hostpath-provisioner 2025-12-08T18:04:47.238591407+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="evaluating current pod" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eHZn1 2025-12-08T18:04:47.238591407+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=infrawatch-operators catalogsource.namespace=service-telemetry correctHash=true correctImages=true current-pod.name=infrawatch-operators-tv99j current-pod.namespace=service-telemetry id=eHZn1 2025-12-08T18:04:47.428239226+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="No subscriptions were found in namespace hostpath-provisioner" id=ZqZeo namespace=hostpath-provisioner 2025-12-08T18:04:47.428239226+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="resolving sources" id=N/Mh7 namespace=openshift-console-user-settings 2025-12-08T18:04:47.428239226+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="checking if subscriptions need update" id=N/Mh7 namespace=openshift-console-user-settings 2025-12-08T18:04:47.438741615+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3bNUk 2025-12-08T18:04:47.438741615+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=3bNUk 2025-12-08T18:04:47.633958262+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="resolving subscriptions in namespace" id=3303s namespace=cert-manager-operator 2025-12-08T18:04:47.827857204+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="No subscriptions were found in namespace openshift-console-user-settings" id=N/Mh7 namespace=openshift-console-user-settings 2025-12-08T18:04:47.827857204+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="resolving sources" id=0XLTG namespace=openshift-ovn-kubernetes 2025-12-08T18:04:47.827857204+00:00 stderr F time="2025-12-08T18:04:47Z" level=info msg="checking if subscriptions need update" id=0XLTG namespace=openshift-ovn-kubernetes 2025-12-08T18:04:48.038718226+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=E9771 2025-12-08T18:04:48.038718226+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=E9771 2025-12-08T18:04:48.046361499+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="no subscriptions were updated" id=3303s namespace=cert-manager-operator 2025-12-08T18:04:48.227414950+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="No subscriptions were found in namespace openshift-ovn-kubernetes" id=0XLTG namespace=openshift-ovn-kubernetes 2025-12-08T18:04:48.227414950+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="resolving sources" id=ZLVTs namespace=openshift-service-ca 2025-12-08T18:04:48.227414950+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="checking if subscriptions need update" id=ZLVTs namespace=openshift-service-ca 2025-12-08T18:04:48.237807836+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rm4J7 2025-12-08T18:04:48.237807836+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rm4J7 2025-12-08T18:04:48.627775298+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="No subscriptions were found in namespace openshift-service-ca" id=ZLVTs namespace=openshift-service-ca 2025-12-08T18:04:48.627775298+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="resolving sources" id=5PnEg namespace=openshift-kube-controller-manager-operator 2025-12-08T18:04:48.627775298+00:00 stderr F time="2025-12-08T18:04:48Z" level=info msg="checking if subscriptions need update" id=5PnEg namespace=openshift-kube-controller-manager-operator 2025-12-08T18:04:49.027765116+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="No subscriptions were found in namespace openshift-kube-controller-manager-operator" id=5PnEg namespace=openshift-kube-controller-manager-operator 2025-12-08T18:04:49.238384371+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=E9771 2025-12-08T18:04:49.238384371+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=E9771 2025-12-08T18:04:49.238465113+00:00 stderr F time="2025-12-08T18:04:49Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=E9771 2025-12-08T18:04:49.238465113+00:00 stderr F time="2025-12-08T18:04:49Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=E9771 2025-12-08T18:04:49.238474833+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=E9771 2025-12-08T18:04:49.438583751+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rm4J7 2025-12-08T18:04:49.438583751+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=Rm4J7 2025-12-08T18:04:49.838799985+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8cMow 2025-12-08T18:04:49.838799985+00:00 stderr F time="2025-12-08T18:04:49Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8cMow 2025-12-08T18:04:50.438502039+00:00 stderr F time="2025-12-08T18:04:50Z" level=info msg="evaluating current pod" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8cMow 2025-12-08T18:04:50.438502039+00:00 stderr F time="2025-12-08T18:04:50Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=certified-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=certified-operators-58d6l current-pod.namespace=openshift-marketplace id=8cMow 2025-12-08T18:04:51.082024005+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="resolving sources" id=FcSHG namespace=openshift-must-gather-gctth 2025-12-08T18:04:51.082024005+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="checking if subscriptions need update" id=FcSHG namespace=openshift-must-gather-gctth 2025-12-08T18:04:51.085983710+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="No subscriptions were found in namespace openshift-must-gather-gctth" id=FcSHG namespace=openshift-must-gather-gctth 2025-12-08T18:04:51.670554853+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=/nF0c 2025-12-08T18:04:51.670554853+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=/nF0c 2025-12-08T18:04:51.681610177+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=/nF0c 2025-12-08T18:04:51.681610177+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=/nF0c 2025-12-08T18:04:51.681610177+00:00 stderr F time="2025-12-08T18:04:51Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=/nF0c 2025-12-08T18:04:51.681610177+00:00 stderr F time="2025-12-08T18:04:51Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=/nF0c 2025-12-08T18:04:51.681610177+00:00 stderr F time="2025-12-08T18:04:51Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=/nF0c 2025-12-08T18:04:52.683284531+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ZmJ18 2025-12-08T18:04:52.683284531+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ZmJ18 2025-12-08T18:04:52.693697937+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ZmJ18 2025-12-08T18:04:52.693697937+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=ZmJ18 2025-12-08T18:04:52.693787220+00:00 stderr F time="2025-12-08T18:04:52Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=ZmJ18 2025-12-08T18:04:52.693787220+00:00 stderr F time="2025-12-08T18:04:52Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=ZmJ18 2025-12-08T18:04:52.693798530+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=ZmJ18 2025-12-08T18:04:53.695563036+00:00 stderr F time="2025-12-08T18:04:53Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=m1Ghs 2025-12-08T18:04:53.695563036+00:00 stderr F time="2025-12-08T18:04:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=m1Ghs 2025-12-08T18:04:53.706709282+00:00 stderr F time="2025-12-08T18:04:53Z" level=info msg="evaluating current pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=m1Ghs 2025-12-08T18:04:53.706709282+00:00 stderr F time="2025-12-08T18:04:53Z" level=info msg="of 1 pods matching label selector, 1 have the correct images and matching hash" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace correctHash=true correctImages=true current-pod.name=redhat-operators-xpnf9 current-pod.namespace=openshift-marketplace id=m1Ghs 2025-12-08T18:04:53.706709282+00:00 stderr F time="2025-12-08T18:04:53Z" level=error msg="error ensuring registry server: could not ensure update pod" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=m1Ghs 2025-12-08T18:04:53.706709282+00:00 stderr F time="2025-12-08T18:04:53Z" level=error msg="error ensuring registry server: ensure update pod error is not of type UpdateNotReadyErr" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace error="catalog polling: redhat-operators not ready for update: update pod redhat-operators-5gtms has not yet reported ready" id=m1Ghs 2025-12-08T18:04:53.706709282+00:00 stderr F time="2025-12-08T18:04:53Z" level=info msg="requeueing registry server for catalog update check: update pod not yet ready" catalogsource.name=redhat-operators catalogsource.namespace=openshift-marketplace id=m1Ghs 2025-12-08T18:04:56.891813112+00:00 stderr F time="2025-12-08T18:04:56Z" level=info msg="resolving sources" id=bpAZQ namespace=openshift-must-gather-gctth 2025-12-08T18:04:56.891813112+00:00 stderr F time="2025-12-08T18:04:56Z" level=info msg="checking if subscriptions need update" id=bpAZQ namespace=openshift-must-gather-gctth 2025-12-08T18:04:56.982017755+00:00 stderr F time="2025-12-08T18:04:56Z" level=info msg="No subscriptions were found in namespace openshift-must-gather-gctth" id=bpAZQ namespace=openshift-must-gather-gctth ././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611513033101 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000271115115611513033104 0ustar zuulzuul2025-12-08T17:58:12.584939762+00:00 stderr F 2025/12/08 17:58:12 provider.go:129: Defaulting client-id to system:serviceaccount:service-telemetry:smart-gateway 2025-12-08T17:58:12.584939762+00:00 stderr F 2025/12/08 17:58:12 provider.go:134: Defaulting client-secret to service account token /var/run/secrets/kubernetes.io/serviceaccount/token 2025-12-08T17:58:12.584939762+00:00 stderr F 2025/12/08 17:58:12 provider.go:358: Delegation of authentication and authorization to OpenShift is enabled for bearer tokens and client certificates. 2025-12-08T17:58:12.602968718+00:00 stderr F 2025/12/08 17:58:12 oauthproxy.go:210: mapping path "/" => upstream "http://localhost:8081/" 2025-12-08T17:58:12.602968718+00:00 stderr F 2025/12/08 17:58:12 oauthproxy.go:237: OAuthProxy configured for Client ID: system:serviceaccount:service-telemetry:smart-gateway 2025-12-08T17:58:12.602968718+00:00 stderr F 2025/12/08 17:58:12 oauthproxy.go:247: Cookie settings: name:_oauth_proxy secure(https):true httponly:true expiry:168h0m0s domain: samesite: refresh:disabled 2025-12-08T17:58:12.604788775+00:00 stderr F 2025/12/08 17:58:12 http.go:64: HTTP: listening on 127.0.0.1:4180 2025-12-08T17:58:12.605104674+00:00 stderr F 2025/12/08 17:58:12 http.go:110: HTTPS: listening on [::]:8083 2025-12-08T17:58:12.605163145+00:00 stderr F I1208 17:58:12.605142 1 dynamic_serving_content.go:135] "Starting controller" name="serving::/etc/tls/private/tls.crt::/etc/tls/private/tls.key" ././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000112315115611513033100 0ustar zuulzuul2025-12-08T17:58:36.058138882+00:00 stdout F bridge-ce ==> (/tmp/smartgateway) 2025-12-08T17:58:36.068625393+00:00 stderr F PN_TRANSPORT_CLOSED: proton:io: Connection refused - disconnected default-interconnect.service-telemetry.svc.cluster.local:5673 2025-12-08T17:58:36.068625393+00:00 stderr F Exit AMQP RCV thread... 2025-12-08T17:58:37.059076802+00:00 stdout F Joining amqp_rcv_th... 2025-12-08T17:58:37.059076802+00:00 stdout F Cancel socket_snd_th... 2025-12-08T17:58:37.059076802+00:00 stdout F Joining socket_snd_th... 2025-12-08T17:58:37.060023056+00:00 stderr F Exit SOCKET thread... ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/2.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000202315115611513033100 0ustar zuulzuul2025-12-08T17:58:51.231103799+00:00 stdout F bridge-ee ==> (/tmp/smartgateway) 2025-12-08T17:58:51.237683852+00:00 stdout F bridge-ee ==> (amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/anycast/ceilometer/cloud1-metering.sample) 2025-12-08T17:59:51.303293686+00:00 stdout F in: 3(0), amqp_overrun: 0(0), out: 3(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:00:50.316933550+00:00 stdout F in: 3(0), amqp_overrun: 0(0), out: 3(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:01:49.323932861+00:00 stdout F in: 3(0), amqp_overrun: 0(0), out: 3(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:02:48.330718573+00:00 stdout F in: 3(0), amqp_overrun: 0(0), out: 3(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:03:47.349034186+00:00 stdout F in: 3(0), amqp_overrun: 0(0), out: 3(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:04:46.380378907+00:00 stdout F in: 3(0), amqp_overrun: 0(0), out: 3(0), sock_overrun: 0(0), link_credit_average: -nan ././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000250015115611513033100 0ustar zuulzuul2025-12-08T17:58:34.516087066+00:00 stdout F 2025-12-08 17:58:34 [INFO] initialized handler [transport pair: socket0, handler: ceilometer-metrics] 2025-12-08T17:58:34.516087066+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded transport [transport: socket0] 2025-12-08T17:58:34.530797176+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded application plugin [application: prometheus] 2025-12-08T17:58:34.531065863+00:00 stdout F 2025-12-08 17:58:34 [INFO] metric server at : 127.0.0.1:8081 [plugin: Prometheus] 2025-12-08T17:58:34.531511106+00:00 stdout F 2025-12-08 17:58:34 [INFO] socket listening on /tmp/smartgateway [plugin: socket] 2025-12-08T17:58:35.535629277+00:00 stdout F 2025-12-08 17:58:35 [INFO] registered collector tracking metrics with 1 label [plugin: Prometheus] 2025-12-08T17:58:35.535683669+00:00 stdout F 2025-12-08 17:58:35 [INFO] registered expiry process for metrics with interval 0s [plugin: Prometheus] 2025-12-08T17:59:31.026299783+00:00 stdout F 2025-12-08 17:59:31 [INFO] registered collector tracking metrics with 9 labels [plugin: Prometheus] 2025-12-08T17:59:31.026299783+00:00 stdout F 2025-12-08 17:59:31 [INFO] registered expiry process for metrics with interval 100s [plugin: Prometheus] 2025-12-08T18:02:58.552519092+00:00 stdout F 2025-12-08 18:02:58 [WARN] prometheus collector expired [plugin: Prometheus] ././@LongLink0000644000000000000000000000023300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000755000175000017500000000000015115611514033160 5ustar zuulzuul././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000755000175000017500000000000015115611521033156 5ustar zuulzuul././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000644000175000017500000000451715115611514033171 0ustar zuulzuul2025-12-08T17:58:09.609574662+00:00 stdout F ts=2025-12-08T17:58:09.6094999Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:148 msg="Starting prometheus-config-reloader" version="(version=1.24, branch=, revision=unknown)" build_context="(go=go1.24.6 (Red Hat 1.24.6-1.el9_6), platform=linux/amd64, user=, date=20251110-21:03:26, tags=unknown)" 2025-12-08T17:58:09.610193657+00:00 stdout F ts=2025-12-08T17:58:09.610165917Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:58:10.016051267+00:00 stdout F level=info ts=2025-12-08T17:58:10.015941374Z caller=reloader.go:282 msg="reloading via HTTP" 2025-12-08T17:58:10.709781128+00:00 stdout F ts=2025-12-08T17:58:10.709232264Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:202 msg="Starting web server for metrics" listen=localhost:8080 2025-12-08T17:58:10.810569752+00:00 stdout F ts=2025-12-08T17:58:10.8104887Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/github.com/prometheus/exporter-toolkit@v0.14.1/web/tls_config.go:346 msg="Listening on" address=127.0.0.1:8080 2025-12-08T17:58:10.810569752+00:00 stdout F ts=2025-12-08T17:58:10.810531292Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/github.com/prometheus/exporter-toolkit@v0.14.1/web/tls_config.go:349 msg="TLS is disabled." http2=false address=127.0.0.1:8080 2025-12-08T17:58:10.909193631+00:00 stdout F level=info ts=2025-12-08T17:58:10.909101889Z caller=reloader.go:548 msg="Reload triggered" cfg_in=/etc/prometheus/config/prometheus.yaml.gz cfg_out=/etc/prometheus/config_out/prometheus.env.yaml cfg_dirs= watched_dirs=/etc/prometheus/rules/prometheus-default-rulefiles-0 2025-12-08T17:58:10.909389266+00:00 stdout F level=info ts=2025-12-08T17:58:10.909183361Z caller=reloader.go:330 msg="started watching config file and directories for changes" cfg=/etc/prometheus/config/prometheus.yaml.gz cfgDirs= out=/etc/prometheus/config_out/prometheus.env.yaml dirs=/etc/prometheus/rules/prometheus-default-rulefiles-0 2025-12-08T17:58:12.186274718+00:00 stdout F level=info ts=2025-12-08T17:58:12.185973861Z caller=reloader.go:548 msg="Reload triggered" cfg_in=/etc/prometheus/config/prometheus.yaml.gz cfg_out=/etc/prometheus/config_out/prometheus.env.yaml cfg_dirs= watched_dirs=/etc/prometheus/rules/prometheus-default-rulefiles-0 ././@LongLink0000644000000000000000000000024600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000755000175000017500000000000015115611521033156 5ustar zuulzuul././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000644000175000017500000001437515115611514033174 0ustar zuulzuul2025-12-08T17:58:01.602742190+00:00 stderr F time=2025-12-08T17:58:01.602Z level=INFO source=main.go:1557 msg="updated GOGC" old=100 new=75 2025-12-08T17:58:01.610196173+00:00 stderr F time=2025-12-08T17:58:01.610Z level=INFO source=main.go:688 msg="Leaving GOMAXPROCS=12: CPU quota undefined" component=automaxprocs 2025-12-08T17:58:01.610981563+00:00 stderr F time=2025-12-08T17:58:01.610Z level=INFO source=memlimit.go:198 msg="GOMEMLIMIT is updated" component=automemlimit package=github.com/KimMachineGun/automemlimit/memlimit GOMEMLIMIT=30284937216 previous=9223372036854775807 2025-12-08T17:58:01.611036294+00:00 stderr F time=2025-12-08T17:58:01.610Z level=INFO source=main.go:781 msg="Starting Prometheus Server" mode=server version="(version=3.8.0, branch=HEAD, revision=e44ed351cdf0181f9fde56ba096f4d949f9e295d)" 2025-12-08T17:58:01.611103626+00:00 stderr F time=2025-12-08T17:58:01.611Z level=INFO source=main.go:786 msg="operational information" build_context="(go=go1.25.4, platform=linux/amd64, user=root@e0c39c41863e, date=20251202-09:08:25, tags=netgo,builtinassets)" host_details="(Linux 5.14.0-570.57.1.el9_6.x86_64 #1 SMP PREEMPT_DYNAMIC Sun Oct 19 22:05:48 EDT 2025 x86_64 prometheus-default-0 (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)" 2025-12-08T17:58:01.622358797+00:00 stderr F time=2025-12-08T17:58:01.622Z level=INFO source=web.go:663 msg="Start listening for connections" component=web address=127.0.0.1:9090 2025-12-08T17:58:01.623833365+00:00 stderr F time=2025-12-08T17:58:01.623Z level=INFO source=main.go:1301 msg="Starting TSDB ..." 2025-12-08T17:58:01.627076299+00:00 stderr F time=2025-12-08T17:58:01.626Z level=INFO source=tls_config.go:354 msg="Listening on" component=web address=127.0.0.1:9090 2025-12-08T17:58:01.627180072+00:00 stderr F time=2025-12-08T17:58:01.627Z level=INFO source=tls_config.go:400 msg="TLS is disabled." component=web http2=false address=127.0.0.1:9090 2025-12-08T17:58:01.634147841+00:00 stderr F time=2025-12-08T17:58:01.633Z level=INFO source=head.go:666 msg="Replaying on-disk memory mappable chunks if any" component=tsdb 2025-12-08T17:58:01.634147841+00:00 stderr F time=2025-12-08T17:58:01.634Z level=INFO source=head.go:752 msg="On-disk memory mappable chunks replay completed" component=tsdb duration=9.451µs 2025-12-08T17:58:01.634147841+00:00 stderr F time=2025-12-08T17:58:01.634Z level=INFO source=head.go:760 msg="Replaying WAL, this may take a while" component=tsdb 2025-12-08T17:58:01.635079466+00:00 stderr F time=2025-12-08T17:58:01.635Z level=INFO source=head.go:833 msg="WAL segment loaded" component=tsdb segment=0 maxSegment=0 duration=862.073µs 2025-12-08T17:58:01.635092546+00:00 stderr F time=2025-12-08T17:58:01.635Z level=INFO source=head.go:870 msg="WAL replay completed" component=tsdb checkpoint_replay_duration=32.781µs wal_replay_duration=889.344µs wbl_replay_duration=190ns chunk_snapshot_load_duration=0s mmap_chunk_replay_duration=9.451µs total_replay_duration=958.106µs 2025-12-08T17:58:01.636708258+00:00 stderr F time=2025-12-08T17:58:01.636Z level=INFO source=main.go:1322 msg="filesystem information" fs_type=XFS_SUPER_MAGIC 2025-12-08T17:58:01.636708258+00:00 stderr F time=2025-12-08T17:58:01.636Z level=INFO source=main.go:1325 msg="TSDB started" 2025-12-08T17:58:01.636723228+00:00 stderr F time=2025-12-08T17:58:01.636Z level=INFO source=main.go:1510 msg="Loading configuration file" filename=/etc/prometheus/config_out/prometheus.env.yaml 2025-12-08T17:58:01.637745395+00:00 stderr F time=2025-12-08T17:58:01.637Z level=INFO source=kubernetes.go:313 msg="Using pod service account via in-cluster config" component="discovery manager notify" discovery=kubernetes config=config-0 2025-12-08T17:58:01.640132836+00:00 stderr F time=2025-12-08T17:58:01.640Z level=INFO source=main.go:1550 msg="Completed loading of configuration file" db_storage=1.23µs remote_storage=2.01µs web_handler=760ns query_engine=1.16µs scrape=227.805µs scrape_sd=1.53µs notify=253.967µs notify_sd=2.503614ms rules=32.951µs tracing=4.23µs filename=/etc/prometheus/config_out/prometheus.env.yaml totalDuration=3.404908ms 2025-12-08T17:58:01.640132836+00:00 stderr F time=2025-12-08T17:58:01.640Z level=INFO source=main.go:1286 msg="Server is ready to receive web requests." 2025-12-08T17:58:01.640213968+00:00 stderr F time=2025-12-08T17:58:01.640Z level=INFO source=manager.go:190 msg="Starting rule manager..." component="rule manager" 2025-12-08T17:58:01.653361188+00:00 stderr F time=2025-12-08T17:58:01.653Z level=INFO source=warnings.go:110 msg="Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" component=k8s_client_runtime 2025-12-08T17:58:01.660240916+00:00 stderr F time=2025-12-08T17:58:01.656Z level=INFO source=warnings.go:110 msg="Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" component=k8s_client_runtime 2025-12-08T17:58:10.811230349+00:00 stderr F time=2025-12-08T17:58:10.811Z level=INFO source=main.go:1510 msg="Loading configuration file" filename=/etc/prometheus/config_out/prometheus.env.yaml 2025-12-08T17:58:10.812077872+00:00 stderr F time=2025-12-08T17:58:10.812Z level=INFO source=main.go:1550 msg="Completed loading of configuration file" db_storage=1.99µs remote_storage=1.82µs web_handler=570ns query_engine=1.521µs scrape=102.632µs scrape_sd=1.54µs notify=339.769µs notify_sd=11.38µs rules=51.322µs tracing=2.28µs filename=/etc/prometheus/config_out/prometheus.env.yaml totalDuration=891.994µs 2025-12-08T17:58:12.185047867+00:00 stderr F time=2025-12-08T17:58:12.184Z level=INFO source=main.go:1510 msg="Loading configuration file" filename=/etc/prometheus/config_out/prometheus.env.yaml 2025-12-08T17:58:12.185804556+00:00 stderr F time=2025-12-08T17:58:12.185Z level=INFO source=main.go:1550 msg="Completed loading of configuration file" db_storage=1.5µs remote_storage=1.99µs web_handler=360ns query_engine=830ns scrape=58.801µs scrape_sd=60.932µs notify=352.079µs notify_sd=9.38µs rules=30.001µs tracing=2.23µs filename=/etc/prometheus/config_out/prometheus.env.yaml totalDuration=1.509799ms 2025-12-08T18:04:30.660607825+00:00 stderr F time=2025-12-08T18:04:30.660Z level=INFO source=warnings.go:110 msg="Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" component=k8s_client_runtime ././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000755000175000017500000000000015115611521033156 5ustar zuulzuul././@LongLink0000644000000000000000000000026500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000644000175000017500000000127715115611514033171 0ustar zuulzuul2025-12-08T17:57:50.609796420+00:00 stdout F ts=2025-12-08T17:57:50.609666867Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:148 msg="Starting prometheus-config-reloader" version="(version=1.24, branch=, revision=unknown)" build_context="(go=go1.24.6 (Red Hat 1.24.6-1.el9_6), platform=linux/amd64, user=, date=20251110-21:03:26, tags=unknown)" 2025-12-08T17:57:50.809616144+00:00 stdout F ts=2025-12-08T17:57:50.809511671Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:57:51.210078494+00:00 stdout F level=info ts=2025-12-08T17:57:51.20992465Z caller=reloader.go:282 msg="reloading via HTTP" ././@LongLink0000644000000000000000000000024700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000755000175000017500000000000015115611521033156 5ustar zuulzuul././@LongLink0000644000000000000000000000025400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prom0000644000175000017500000000271315115611514033165 0ustar zuulzuul2025-12-08T17:58:10.510167279+00:00 stderr F 2025/12/08 17:58:10 provider.go:129: Defaulting client-id to system:serviceaccount:service-telemetry:prometheus-stf 2025-12-08T17:58:10.510167279+00:00 stderr F 2025/12/08 17:58:10 provider.go:134: Defaulting client-secret to service account token /var/run/secrets/kubernetes.io/serviceaccount/token 2025-12-08T17:58:10.511195985+00:00 stderr F 2025/12/08 17:58:10 provider.go:358: Delegation of authentication and authorization to OpenShift is enabled for bearer tokens and client certificates. 2025-12-08T17:58:10.527341352+00:00 stderr F 2025/12/08 17:58:10 oauthproxy.go:210: mapping path "/" => upstream "http://localhost:9090/" 2025-12-08T17:58:10.527375683+00:00 stderr F 2025/12/08 17:58:10 oauthproxy.go:237: OAuthProxy configured for Client ID: system:serviceaccount:service-telemetry:prometheus-stf 2025-12-08T17:58:10.527375683+00:00 stderr F 2025/12/08 17:58:10 oauthproxy.go:247: Cookie settings: name:_oauth_proxy secure(https):true httponly:true expiry:168h0m0s domain: samesite: refresh:disabled 2025-12-08T17:58:10.528119502+00:00 stderr F 2025/12/08 17:58:10 http.go:64: HTTP: listening on 127.0.0.1:4180 2025-12-08T17:58:10.528559223+00:00 stderr F 2025/12/08 17:58:10 http.go:110: HTTPS: listening on [::]:9092 2025-12-08T17:58:10.528609485+00:00 stderr F I1208 17:58:10.528577 1 dynamic_serving_content.go:135] "Starting controller" name="serving::/etc/tls/private/tls.crt::/etc/tls/private/tls.key" ././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611514033054 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000644000175000017500000075157115115611514033076 0ustar zuulzuul2025-12-08T17:42:24.634777286+00:00 stderr F + timeout 3m /bin/bash -exuo pipefail -c 'while [ -n "$(ss -Htanop \( sport = 10357 \))" ]; do sleep 1; done' 2025-12-08T17:42:24.638769948+00:00 stderr F ++ ss -Htanop '(' sport = 10357 ')' 2025-12-08T17:42:24.647194320+00:00 stderr F + '[' -n '' ']' 2025-12-08T17:42:24.647865619+00:00 stderr F + exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2 2025-12-08T17:42:24.820843946+00:00 stderr F I1208 17:42:24.820727 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:42:24.822102240+00:00 stderr F I1208 17:42:24.822057 1 observer_polling.go:159] Starting file observer 2025-12-08T17:42:24.845651193+00:00 stderr F I1208 17:42:24.845545 1 builder.go:304] cluster-policy-controller version 4.20.0-202510211040.p2.g47c7831.assembly.stream.el9-47c7831-47c783103216aa5e1242632127a5d8f98b8b7455 2025-12-08T17:42:24.848507576+00:00 stderr F I1208 17:42:24.848464 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:42:43.141965387+00:00 stderr F I1208 17:42:43.141909 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:42:43.147438426+00:00 stderr F I1208 17:42:43.147380 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:42:43.147438426+00:00 stderr F I1208 17:42:43.147428 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:42:43.147461537+00:00 stderr F I1208 17:42:43.147453 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:42:43.147469177+00:00 stderr F I1208 17:42:43.147462 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:42:43.158052715+00:00 stderr F I1208 17:42:43.157559 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:42:43.159673579+00:00 stderr F I1208 17:42:43.158381 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:42:43.159673579+00:00 stderr F W1208 17:42:43.159388 1 builder.go:364] unable to get control plane topology, using HA cluster values for leader election: infrastructures.config.openshift.io "cluster" is forbidden: User "system:kube-controller-manager" cannot get resource "infrastructures" in API group "config.openshift.io" at the cluster scope 2025-12-08T17:42:43.160857331+00:00 stderr F I1208 17:42:43.160795 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ControlPlaneTopology' unable to get control plane topology, using HA cluster values for leader election: infrastructures.config.openshift.io "cluster" is forbidden: User "system:kube-controller-manager" cannot get resource "infrastructures" in API group "config.openshift.io" at the cluster scope 2025-12-08T17:42:43.162464076+00:00 stderr F I1208 17:42:43.162427 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:42:43.162482486+00:00 stderr F I1208 17:42:43.162470 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:42:43.162760614+00:00 stderr F I1208 17:42:43.162740 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:42:43.162784864+00:00 stderr F I1208 17:42:43.162761 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-controller-manager/cluster-policy-controller-lock... 2025-12-08T17:42:43.163086742+00:00 stderr F I1208 17:42:43.163051 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:42:43.16299196 +0000 UTC))" 2025-12-08T17:42:43.163137174+00:00 stderr F I1208 17:42:43.163107 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:42:43.163320539+00:00 stderr F I1208 17:42:43.163292 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215744\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:42:43.163246417 +0000 UTC))" 2025-12-08T17:42:43.163320539+00:00 stderr F I1208 17:42:43.163316 1 secure_serving.go:211] Serving securely on 127.0.0.1:10357 2025-12-08T17:42:43.163362010+00:00 stderr F I1208 17:42:43.163342 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:42:43.163373660+00:00 stderr F I1208 17:42:43.163365 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:42:43.163451842+00:00 stderr F I1208 17:42:43.163426 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:42:43.163514004+00:00 stderr F I1208 17:42:43.163492 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:42:43.163525264+00:00 stderr F I1208 17:42:43.163499 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:42:43.166759163+00:00 stderr F I1208 17:42:43.166538 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.166932658+00:00 stderr F I1208 17:42:43.166847 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.167209455+00:00 stderr F I1208 17:42:43.167146 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.167466412+00:00 stderr F I1208 17:42:43.167447 1 leaderelection.go:271] successfully acquired lease openshift-kube-controller-manager/cluster-policy-controller-lock 2025-12-08T17:42:43.167613876+00:00 stderr F I1208 17:42:43.167494 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-controller-manager", Name:"cluster-policy-controller-lock", UID:"1f64f82e-dd4b-4305-8b94-092ec0886571", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"35834", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' crc_137256fd-47aa-4be9-8fd9-103db7e1cd12 became leader 2025-12-08T17:42:43.170863485+00:00 stderr F I1208 17:42:43.170478 1 policy_controller.go:78] Starting "openshift.io/cluster-quota-reconciliation" 2025-12-08T17:42:43.209480788+00:00 stderr F E1208 17:42:43.209405 1 reconciliation_controller.go:121] "Unhandled Error" err="initial discovery check failure, continuing and counting on future sync update: unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1" 2025-12-08T17:42:43.211445392+00:00 stderr F I1208 17:42:43.211410 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="statefulsets.apps" 2025-12-08T17:42:43.211859173+00:00 stderr F I1208 17:42:43.211827 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="roles.rbac.authorization.k8s.io" 2025-12-08T17:42:43.211936175+00:00 stderr F I1208 17:42:43.211910 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="projecthelmchartrepositories.helm.openshift.io" 2025-12-08T17:42:43.211972156+00:00 stderr F I1208 17:42:43.211955 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="egressfirewalls.k8s.ovn.org" 2025-12-08T17:42:43.212025708+00:00 stderr F I1208 17:42:43.211996 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="machinehealthchecks.machine.openshift.io" 2025-12-08T17:42:43.212038838+00:00 stderr F I1208 17:42:43.212033 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="endpointslices.discovery.k8s.io" 2025-12-08T17:42:43.212094850+00:00 stderr F I1208 17:42:43.212066 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="prometheusrules.monitoring.coreos.com" 2025-12-08T17:42:43.212133371+00:00 stderr F I1208 17:42:43.212104 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="servicemonitors.monitoring.coreos.com" 2025-12-08T17:42:43.212161991+00:00 stderr F I1208 17:42:43.212144 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="alertingrules.monitoring.openshift.io" 2025-12-08T17:42:43.212229983+00:00 stderr F I1208 17:42:43.212200 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="egressrouters.network.operator.openshift.io" 2025-12-08T17:42:43.212265974+00:00 stderr F I1208 17:42:43.212246 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="catalogsources.operators.coreos.com" 2025-12-08T17:42:43.212555392+00:00 stderr F I1208 17:42:43.212517 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="jobs.batch" 2025-12-08T17:42:43.212570692+00:00 stderr F I1208 17:42:43.212562 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="rolebindings.rbac.authorization.k8s.io" 2025-12-08T17:42:43.212635814+00:00 stderr F I1208 17:42:43.212604 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="prometheuses.monitoring.coreos.com" 2025-12-08T17:42:43.212649294+00:00 stderr F I1208 17:42:43.212643 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="horizontalpodautoscalers.autoscaling" 2025-12-08T17:42:43.212902541+00:00 stderr F I1208 17:42:43.212852 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="ingresses.networking.k8s.io" 2025-12-08T17:42:43.212922032+00:00 stderr F I1208 17:42:43.212914 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="imagepolicies.config.openshift.io" 2025-12-08T17:42:43.212966853+00:00 stderr F I1208 17:42:43.212945 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="subscriptions.operators.coreos.com" 2025-12-08T17:42:43.212995554+00:00 stderr F I1208 17:42:43.212979 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="networkpolicies.networking.k8s.io" 2025-12-08T17:42:43.213052415+00:00 stderr F I1208 17:42:43.213003 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="probes.monitoring.coreos.com" 2025-12-08T17:42:43.213052415+00:00 stderr F I1208 17:42:43.213037 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="serviceaccounts" 2025-12-08T17:42:43.213064186+00:00 stderr F I1208 17:42:43.213057 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="referencegrants.gateway.networking.k8s.io" 2025-12-08T17:42:43.213093356+00:00 stderr F I1208 17:42:43.213077 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="egressqoses.k8s.ovn.org" 2025-12-08T17:42:43.213130087+00:00 stderr F I1208 17:42:43.213102 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="alertmanagers.monitoring.coreos.com" 2025-12-08T17:42:43.213130087+00:00 stderr F I1208 17:42:43.213126 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="thanosrulers.monitoring.coreos.com" 2025-12-08T17:42:43.213160128+00:00 stderr F I1208 17:42:43.213144 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="ingresscontrollers.operator.openshift.io" 2025-12-08T17:42:43.213182699+00:00 stderr F I1208 17:42:43.213168 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="operatorgroups.operators.coreos.com" 2025-12-08T17:42:43.213212380+00:00 stderr F I1208 17:42:43.213192 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="daemonsets.apps" 2025-12-08T17:42:43.213246281+00:00 stderr F I1208 17:42:43.213225 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="podnetworkconnectivitychecks.controlplane.operator.openshift.io" 2025-12-08T17:42:43.213258231+00:00 stderr F I1208 17:42:43.213253 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="controlplanemachinesets.machine.openshift.io" 2025-12-08T17:42:43.213302472+00:00 stderr F I1208 17:42:43.213281 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="alertmanagerconfigs.monitoring.coreos.com" 2025-12-08T17:42:43.213342513+00:00 stderr F I1208 17:42:43.213322 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="operatorpkis.network.operator.openshift.io" 2025-12-08T17:42:43.213371494+00:00 stderr F I1208 17:42:43.213355 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="overlappingrangeipreservations.whereabouts.cni.cncf.io" 2025-12-08T17:42:43.213417295+00:00 stderr F I1208 17:42:43.213396 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="nodeslicepools.whereabouts.cni.cncf.io" 2025-12-08T17:42:43.213499087+00:00 stderr F I1208 17:42:43.213468 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="podtemplates" 2025-12-08T17:42:43.213499087+00:00 stderr F I1208 17:42:43.213494 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="replicasets.apps" 2025-12-08T17:42:43.213547509+00:00 stderr F I1208 17:42:43.213517 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="deployments.apps" 2025-12-08T17:42:43.213558139+00:00 stderr F I1208 17:42:43.213552 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="cronjobs.batch" 2025-12-08T17:42:43.213606130+00:00 stderr F I1208 17:42:43.213585 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="poddisruptionbudgets.policy" 2025-12-08T17:42:43.213616661+00:00 stderr F I1208 17:42:43.213609 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="clusterserviceversions.operators.coreos.com" 2025-12-08T17:42:43.214183697+00:00 stderr F I1208 17:42:43.214032 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="csistoragecapacities.storage.k8s.io" 2025-12-08T17:42:43.214183697+00:00 stderr F I1208 17:42:43.214062 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="dnsrecords.ingress.operator.openshift.io" 2025-12-08T17:42:43.214183697+00:00 stderr F I1208 17:42:43.214132 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="ipaddresses.ipam.cluster.x-k8s.io" 2025-12-08T17:42:43.214183697+00:00 stderr F I1208 17:42:43.214154 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="userdefinednetworks.k8s.ovn.org" 2025-12-08T17:42:43.214183697+00:00 stderr F I1208 17:42:43.214178 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="alertrelabelconfigs.monitoring.openshift.io" 2025-12-08T17:42:43.214235198+00:00 stderr F I1208 17:42:43.214201 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="leases.coordination.k8s.io" 2025-12-08T17:42:43.214729522+00:00 stderr F I1208 17:42:43.214693 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="grpcroutes.gateway.networking.k8s.io" 2025-12-08T17:42:43.214895096+00:00 stderr F I1208 17:42:43.214844 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="gateways.gateway.networking.k8s.io" 2025-12-08T17:42:43.216802427+00:00 stderr F I1208 17:42:43.216756 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="metal3remediationtemplates.infrastructure.cluster.x-k8s.io" 2025-12-08T17:42:43.217176208+00:00 stderr F I1208 17:42:43.217137 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="ipaddressclaims.ipam.cluster.x-k8s.io" 2025-12-08T17:42:43.217213949+00:00 stderr F I1208 17:42:43.217194 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="machines.machine.openshift.io" 2025-12-08T17:42:43.217291451+00:00 stderr F I1208 17:42:43.217261 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="controllerrevisions.apps" 2025-12-08T17:42:43.217334333+00:00 stderr F I1208 17:42:43.217309 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="egressservices.k8s.ovn.org" 2025-12-08T17:42:43.217406885+00:00 stderr F I1208 17:42:43.217377 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="machinesets.machine.openshift.io" 2025-12-08T17:42:43.217437925+00:00 stderr F I1208 17:42:43.217419 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="operatorconditions.operators.coreos.com" 2025-12-08T17:42:43.217492427+00:00 stderr F I1208 17:42:43.217466 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="ippools.whereabouts.cni.cncf.io" 2025-12-08T17:42:43.217535368+00:00 stderr F I1208 17:42:43.217514 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="httproutes.gateway.networking.k8s.io" 2025-12-08T17:42:43.217576019+00:00 stderr F I1208 17:42:43.217556 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="network-attachment-definitions.k8s.cni.cncf.io" 2025-12-08T17:42:43.217615080+00:00 stderr F I1208 17:42:43.217594 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="podmonitors.monitoring.coreos.com" 2025-12-08T17:42:43.217723133+00:00 stderr F I1208 17:42:43.217690 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="machineautoscalers.autoscaling.openshift.io" 2025-12-08T17:42:43.217734983+00:00 stderr F I1208 17:42:43.217725 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="metal3remediations.infrastructure.cluster.x-k8s.io" 2025-12-08T17:42:43.217780205+00:00 stderr F I1208 17:42:43.217759 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="ipamclaims.k8s.cni.cncf.io" 2025-12-08T17:42:43.217809055+00:00 stderr F I1208 17:42:43.217792 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="limitranges" 2025-12-08T17:42:43.217844626+00:00 stderr F I1208 17:42:43.217819 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="endpoints" 2025-12-08T17:42:43.217908398+00:00 stderr F I1208 17:42:43.217868 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="installplans.operators.coreos.com" 2025-12-08T17:42:43.217923338+00:00 stderr F I1208 17:42:43.217909 1 policy_controller.go:88] Started "openshift.io/cluster-quota-reconciliation" 2025-12-08T17:42:43.217931319+00:00 stderr F I1208 17:42:43.217918 1 policy_controller.go:78] Starting "openshift.io/cluster-csr-approver" 2025-12-08T17:42:43.218163515+00:00 stderr F I1208 17:42:43.218126 1 clusterquotamapping.go:127] Starting ClusterQuotaMappingController controller 2025-12-08T17:42:43.218163515+00:00 stderr F I1208 17:42:43.218153 1 reconciliation_controller.go:140] Starting the cluster quota reconciliation controller 2025-12-08T17:42:43.218376031+00:00 stderr F I1208 17:42:43.218336 1 resource_quota_monitor.go:308] "QuotaMonitor running" 2025-12-08T17:42:43.226035960+00:00 stderr F I1208 17:42:43.225975 1 policy_controller.go:88] Started "openshift.io/cluster-csr-approver" 2025-12-08T17:42:43.226035960+00:00 stderr F I1208 17:42:43.226000 1 policy_controller.go:78] Starting "openshift.io/podsecurity-admission-label-syncer" 2025-12-08T17:42:43.226303717+00:00 stderr F I1208 17:42:43.226243 1 base_controller.go:76] Waiting for caches to sync for WebhookAuthenticatorCertApprover_csr-approver-controller 2025-12-08T17:42:43.233756930+00:00 stderr F I1208 17:42:43.233654 1 reconciliation_controller.go:171] error occurred GetQuotableResources err=unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1 2025-12-08T17:42:43.233811322+00:00 stderr F E1208 17:42:43.233768 1 reconciliation_controller.go:172] "Unhandled Error" err="unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1" 2025-12-08T17:42:43.235636771+00:00 stderr F I1208 17:42:43.235591 1 policy_controller.go:88] Started "openshift.io/podsecurity-admission-label-syncer" 2025-12-08T17:42:43.235636771+00:00 stderr F I1208 17:42:43.235613 1 policy_controller.go:78] Starting "openshift.io/privileged-namespaces-psa-label-syncer" 2025-12-08T17:42:43.235797336+00:00 stderr F I1208 17:42:43.235761 1 reconciliation_controller.go:207] syncing resource quota controller with updated resources from discovery: added: [/v1, Resource=configmaps /v1, Resource=endpoints /v1, Resource=events /v1, Resource=limitranges /v1, Resource=persistentvolumeclaims /v1, Resource=pods /v1, Resource=podtemplates /v1, Resource=replicationcontrollers /v1, Resource=resourcequotas /v1, Resource=secrets /v1, Resource=serviceaccounts /v1, Resource=services apps/v1, Resource=controllerrevisions apps/v1, Resource=daemonsets apps/v1, Resource=deployments apps/v1, Resource=replicasets apps/v1, Resource=statefulsets autoscaling.openshift.io/v1beta1, Resource=machineautoscalers autoscaling/v2, Resource=horizontalpodautoscalers batch/v1, Resource=cronjobs batch/v1, Resource=jobs config.openshift.io/v1, Resource=imagepolicies controlplane.operator.openshift.io/v1alpha1, Resource=podnetworkconnectivitychecks coordination.k8s.io/v1, Resource=leases discovery.k8s.io/v1, Resource=endpointslices events.k8s.io/v1, Resource=events gateway.networking.k8s.io/v1, Resource=gateways gateway.networking.k8s.io/v1, Resource=grpcroutes gateway.networking.k8s.io/v1, Resource=httproutes gateway.networking.k8s.io/v1beta1, Resource=referencegrants helm.openshift.io/v1beta1, Resource=projecthelmchartrepositories infrastructure.cluster.x-k8s.io/v1beta1, Resource=metal3remediations infrastructure.cluster.x-k8s.io/v1beta1, Resource=metal3remediationtemplates ingress.operator.openshift.io/v1, Resource=dnsrecords ipam.cluster.x-k8s.io/v1beta1, Resource=ipaddressclaims ipam.cluster.x-k8s.io/v1beta1, Resource=ipaddresses k8s.cni.cncf.io/v1, Resource=network-attachment-definitions k8s.cni.cncf.io/v1alpha1, Resource=ipamclaims k8s.ovn.org/v1, Resource=egressfirewalls k8s.ovn.org/v1, Resource=egressqoses k8s.ovn.org/v1, Resource=egressservices k8s.ovn.org/v1, Resource=userdefinednetworks machine.openshift.io/v1, Resource=controlplanemachinesets machine.openshift.io/v1beta1, Resource=machinehealthchecks machine.openshift.io/v1beta1, Resource=machines machine.openshift.io/v1beta1, Resource=machinesets monitoring.coreos.com/v1, Resource=alertmanagers monitoring.coreos.com/v1, Resource=podmonitors monitoring.coreos.com/v1, Resource=probes monitoring.coreos.com/v1, Resource=prometheuses monitoring.coreos.com/v1, Resource=prometheusrules monitoring.coreos.com/v1, Resource=servicemonitors monitoring.coreos.com/v1, Resource=thanosrulers monitoring.coreos.com/v1beta1, Resource=alertmanagerconfigs monitoring.openshift.io/v1, Resource=alertingrules monitoring.openshift.io/v1, Resource=alertrelabelconfigs network.operator.openshift.io/v1, Resource=egressrouters network.operator.openshift.io/v1, Resource=operatorpkis networking.k8s.io/v1, Resource=ingresses networking.k8s.io/v1, Resource=networkpolicies operator.openshift.io/v1, Resource=ingresscontrollers operators.coreos.com/v1, Resource=operatorgroups operators.coreos.com/v1alpha1, Resource=catalogsources operators.coreos.com/v1alpha1, Resource=clusterserviceversions operators.coreos.com/v1alpha1, Resource=installplans operators.coreos.com/v1alpha1, Resource=subscriptions operators.coreos.com/v2, Resource=operatorconditions policy/v1, Resource=poddisruptionbudgets rbac.authorization.k8s.io/v1, Resource=rolebindings rbac.authorization.k8s.io/v1, Resource=roles storage.k8s.io/v1, Resource=csistoragecapacities whereabouts.cni.cncf.io/v1alpha1, Resource=ippools whereabouts.cni.cncf.io/v1alpha1, Resource=nodeslicepools whereabouts.cni.cncf.io/v1alpha1, Resource=overlappingrangeipreservations], removed: [] 2025-12-08T17:42:43.235797336+00:00 stderr F I1208 17:42:43.235783 1 base_controller.go:76] Waiting for caches to sync for pod-security-admission-label-synchronization-controller 2025-12-08T17:42:43.241713727+00:00 stderr F I1208 17:42:43.241666 1 policy_controller.go:88] Started "openshift.io/privileged-namespaces-psa-label-syncer" 2025-12-08T17:42:43.241713727+00:00 stderr F I1208 17:42:43.241679 1 policy_controller.go:78] Starting "openshift.io/namespace-security-allocation" 2025-12-08T17:42:43.241839730+00:00 stderr F I1208 17:42:43.241818 1 privileged_namespaces_controller.go:75] "Starting" controller="privileged-namespaces-psa-label-syncer" 2025-12-08T17:42:43.241839730+00:00 stderr F I1208 17:42:43.241832 1 shared_informer.go:350] "Waiting for caches to sync" controller="privileged-namespaces-psa-label-syncer" 2025-12-08T17:42:43.249521550+00:00 stderr F I1208 17:42:43.249479 1 policy_controller.go:88] Started "openshift.io/namespace-security-allocation" 2025-12-08T17:42:43.249521550+00:00 stderr F I1208 17:42:43.249501 1 policy_controller.go:78] Starting "openshift.io/resourcequota" 2025-12-08T17:42:43.249549821+00:00 stderr F I1208 17:42:43.249521 1 base_controller.go:76] Waiting for caches to sync for namespace-security-allocation-controller 2025-12-08T17:42:43.263898342+00:00 stderr F I1208 17:42:43.263822 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:42:43.263982734+00:00 stderr F I1208 17:42:43.263951 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:42:43.263982734+00:00 stderr F I1208 17:42:43.263975 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:42:43.264240811+00:00 stderr F I1208 17:42:43.264211 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:42:43.264171969 +0000 UTC))" 2025-12-08T17:42:43.264607682+00:00 stderr F I1208 17:42:43.264569 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:42:43.264539799 +0000 UTC))" 2025-12-08T17:42:43.264940741+00:00 stderr F I1208 17:42:43.264902 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215744\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:42:43.264852589 +0000 UTC))" 2025-12-08T17:42:43.265173317+00:00 stderr F I1208 17:42:43.265148 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:42:43.265126886 +0000 UTC))" 2025-12-08T17:42:43.265203658+00:00 stderr F I1208 17:42:43.265183 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:42:43.265166887 +0000 UTC))" 2025-12-08T17:42:43.265233089+00:00 stderr F I1208 17:42:43.265214 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:42:43.265197608 +0000 UTC))" 2025-12-08T17:42:43.265275840+00:00 stderr F I1208 17:42:43.265245 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:42:43.265228929 +0000 UTC))" 2025-12-08T17:42:43.265300671+00:00 stderr F I1208 17:42:43.265282 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:42:43.2652645 +0000 UTC))" 2025-12-08T17:42:43.265337262+00:00 stderr F I1208 17:42:43.265314 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:42:43.265297041 +0000 UTC))" 2025-12-08T17:42:43.265367993+00:00 stderr F I1208 17:42:43.265350 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:42:43.265332402 +0000 UTC))" 2025-12-08T17:42:43.265402794+00:00 stderr F I1208 17:42:43.265386 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:42:43.265362843 +0000 UTC))" 2025-12-08T17:42:43.265708362+00:00 stderr F I1208 17:42:43.265682 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:42:43.265659341 +0000 UTC))" 2025-12-08T17:42:43.266036141+00:00 stderr F I1208 17:42:43.266005 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215744\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:42:43.265984389 +0000 UTC))" 2025-12-08T17:42:43.583163221+00:00 stderr F I1208 17:42:43.583083 1 policy_controller.go:88] Started "openshift.io/resourcequota" 2025-12-08T17:42:43.583163221+00:00 stderr F I1208 17:42:43.583108 1 policy_controller.go:91] Started Origin Controllers 2025-12-08T17:42:43.583931561+00:00 stderr F I1208 17:42:43.583899 1 resource_quota_controller.go:300] "Starting resource quota controller" 2025-12-08T17:42:43.583931561+00:00 stderr F I1208 17:42:43.583919 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:42:43.584767525+00:00 stderr F I1208 17:42:43.584737 1 resource_quota_monitor.go:308] "QuotaMonitor running" 2025-12-08T17:42:43.600292448+00:00 stderr F I1208 17:42:43.600221 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.603131255+00:00 stderr F I1208 17:42:43.603098 1 reflector.go:430] "Caches populated" type="*v1.Lease" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.603421073+00:00 stderr F I1208 17:42:43.603396 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.606663362+00:00 stderr F I1208 17:42:43.603950 1 reflector.go:430] "Caches populated" type="*v1.CSIStorageCapacity" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.616658384+00:00 stderr F I1208 17:42:43.616589 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.617385254+00:00 stderr F I1208 17:42:43.617354 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.617656702+00:00 stderr F I1208 17:42:43.617624 1 reflector.go:430] "Caches populated" type="*v1.ControllerRevision" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.618021402+00:00 stderr F I1208 17:42:43.617984 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.618277219+00:00 stderr F I1208 17:42:43.618233 1 reflector.go:430] "Caches populated" type="*v1.Job" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.618579947+00:00 stderr F I1208 17:42:43.618532 1 reflector.go:430] "Caches populated" type="*v1.EndpointSlice" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.618753092+00:00 stderr F E1208 17:42:43.618715 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:42:43.619788990+00:00 stderr F I1208 17:42:43.619681 1 reflector.go:430] "Caches populated" type="*v1.CronJob" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.620168650+00:00 stderr F I1208 17:42:43.620096 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.620386777+00:00 stderr F I1208 17:42:43.620292 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.621144807+00:00 stderr F I1208 17:42:43.621090 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.621177928+00:00 stderr F I1208 17:42:43.621147 1 reflector.go:430] "Caches populated" type="*v1.ClusterResourceQuota" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.624410336+00:00 stderr F I1208 17:42:43.624333 1 reflector.go:430] "Caches populated" type="*v2.HorizontalPodAutoscaler" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.625990450+00:00 stderr F I1208 17:42:43.625942 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.626163924+00:00 stderr F I1208 17:42:43.626139 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "basic-user" not found 2025-12-08T17:42:43.626187095+00:00 stderr F I1208 17:42:43.626171 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.626212826+00:00 stderr F I1208 17:42:43.626196 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.626220736+00:00 stderr F I1208 17:42:43.626215 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-autoscaler" not found 2025-12-08T17:42:43.626386660+00:00 stderr F I1208 17:42:43.626358 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-autoscaler-operator" not found 2025-12-08T17:42:43.626386660+00:00 stderr F I1208 17:42:43.626382 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-monitoring-operator" not found 2025-12-08T17:42:43.626419531+00:00 stderr F I1208 17:42:43.626402 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.626419531+00:00 stderr F I1208 17:42:43.626413 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-reader" not found 2025-12-08T17:42:43.626427651+00:00 stderr F I1208 17:42:43.626420 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-samples-operator" not found 2025-12-08T17:42:43.626449982+00:00 stderr F I1208 17:42:43.626433 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-samples-operator-imageconfig-reader" not found 2025-12-08T17:42:43.626449982+00:00 stderr F I1208 17:42:43.626440 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-samples-operator-proxy-reader" not found 2025-12-08T17:42:43.626449982+00:00 stderr F I1208 17:42:43.626445 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-status" not found 2025-12-08T17:42:43.626462152+00:00 stderr F I1208 17:42:43.626456 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.626470423+00:00 stderr F I1208 17:42:43.626461 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "console" not found 2025-12-08T17:42:43.626478633+00:00 stderr F I1208 17:42:43.626469 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:auth-delegator" not found 2025-12-08T17:42:43.626486693+00:00 stderr F I1208 17:42:43.626480 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "console-extensions-reader" not found 2025-12-08T17:42:43.626496133+00:00 stderr F I1208 17:42:43.626486 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "console-operator" not found 2025-12-08T17:42:43.626496133+00:00 stderr F I1208 17:42:43.626492 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:auth-delegator" not found 2025-12-08T17:42:43.626529274+00:00 stderr F I1208 17:42:43.626502 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "control-plane-machine-set-operator" not found 2025-12-08T17:42:43.626529274+00:00 stderr F I1208 17:42:43.626514 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-health-monitor-controller-runner" not found 2025-12-08T17:42:43.626529274+00:00 stderr F I1208 17:42:43.626522 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-provisioner-runner" not found 2025-12-08T17:42:43.626538044+00:00 stderr F I1208 17:42:43.626528 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-provisioner-runner" not found 2025-12-08T17:42:43.626544925+00:00 stderr F I1208 17:42:43.626539 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "csi-snapshot-controller-operator-clusterrole" not found 2025-12-08T17:42:43.626551775+00:00 stderr F I1208 17:42:43.626544 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.626562775+00:00 stderr F I1208 17:42:43.626552 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-image-registry-operator" not found 2025-12-08T17:42:43.627413988+00:00 stderr F I1208 17:42:43.627377 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "dns-monitoring" not found 2025-12-08T17:42:43.627675685+00:00 stderr F I1208 17:42:43.627645 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "helm-chartrepos-viewer" not found 2025-12-08T17:42:43.627675685+00:00 stderr F I1208 17:42:43.627665 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "kube-apiserver" not found 2025-12-08T17:42:43.627690435+00:00 stderr F I1208 17:42:43.627675 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.627690435+00:00 stderr F I1208 17:42:43.627682 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-api-controllers" not found 2025-12-08T17:42:43.627701736+00:00 stderr F I1208 17:42:43.627695 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-api-controllers-metal3-remediation" not found 2025-12-08T17:42:43.627710306+00:00 stderr F I1208 17:42:43.627702 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-api-operator" not found 2025-12-08T17:42:43.627718766+00:00 stderr F I1208 17:42:43.627709 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-api-operator-ext-remediation" not found 2025-12-08T17:42:43.627725776+00:00 stderr F I1208 17:42:43.627719 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-config-controller" not found 2025-12-08T17:42:43.627732717+00:00 stderr F I1208 17:42:43.627726 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-config-daemon" not found 2025-12-08T17:42:43.627739547+00:00 stderr F I1208 17:42:43.627732 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-config-server" not found 2025-12-08T17:42:43.627748007+00:00 stderr F I1208 17:42:43.627742 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-os-builder" not found 2025-12-08T17:42:43.627754877+00:00 stderr F I1208 17:42:43.627749 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:openshift:scc:anyuid" not found 2025-12-08T17:42:43.627761717+00:00 stderr F I1208 17:42:43.627756 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "marketplace-operator" not found 2025-12-08T17:42:43.627791138+00:00 stderr F I1208 17:42:43.627766 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "metrics-daemon-role" not found 2025-12-08T17:42:43.628044625+00:00 stderr F I1208 17:42:43.628020 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "multus-admission-controller-webhook" not found 2025-12-08T17:42:43.628044625+00:00 stderr F I1208 17:42:43.628034 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "multus-ancillary-tools" not found 2025-12-08T17:42:43.628065056+00:00 stderr F I1208 17:42:43.628042 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "multus-ancillary-tools" not found 2025-12-08T17:42:43.628065056+00:00 stderr F I1208 17:42:43.628053 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "multus" not found 2025-12-08T17:42:43.628065056+00:00 stderr F I1208 17:42:43.628060 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "multus-ancillary-tools" not found 2025-12-08T17:42:43.628080326+00:00 stderr F I1208 17:42:43.628065 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "whereabouts-cni" not found 2025-12-08T17:42:43.628080326+00:00 stderr F I1208 17:42:43.628077 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "network-diagnostics" not found 2025-12-08T17:42:43.628089596+00:00 stderr F I1208 17:42:43.628083 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "network-node-identity" not found 2025-12-08T17:42:43.628096696+00:00 stderr F I1208 17:42:43.628090 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:operator-lifecycle-manager" not found 2025-12-08T17:42:43.628105177+00:00 stderr F I1208 17:42:43.628099 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-dns" not found 2025-12-08T17:42:43.628112027+00:00 stderr F I1208 17:42:43.628107 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-dns-operator" not found 2025-12-08T17:42:43.628118747+00:00 stderr F I1208 17:42:43.628113 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:image-pruner" not found 2025-12-08T17:42:43.628149088+00:00 stderr F I1208 17:42:43.628123 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-ingress-operator" not found 2025-12-08T17:42:43.628149088+00:00 stderr F I1208 17:42:43.628135 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-ingress-router" not found 2025-12-08T17:42:43.628149088+00:00 stderr F I1208 17:42:43.628143 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-iptables-alerter" not found 2025-12-08T17:42:43.628164398+00:00 stderr F I1208 17:42:43.628149 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-ovn-kubernetes-control-plane-limited" not found 2025-12-08T17:42:43.628171328+00:00 stderr F I1208 17:42:43.628152 1 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628161 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-ovn-kubernetes-node-limited" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628636 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "openshift-ovn-kubernetes-kube-rbac-proxy" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628650 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:auth-delegator" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628664 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "prometheus-k8s-scheduler-resources" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628671 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "registry-monitoring" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628679 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:registry" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628685 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "router-monitoring" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628697 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "self-access-reviewer" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628704 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "self-provisioner" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628715 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "cluster-admin" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628722 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:node-bootstrapper" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628731 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628739 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:basic-user" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628753 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:build-strategy-docker" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628759 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:build-strategy-jenkinspipeline" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628768 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:build-strategy-source" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628778 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:attachdetach-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628786 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:certificate-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628793 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:clusterrole-aggregation-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628806 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:cronjob-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628812 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:daemon-set-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628820 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:deployment-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628830 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:disruption-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628840 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:endpoint-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628847 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:endpointslice-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628859 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:endpointslicemirroring-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628867 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:ephemeral-volume-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628895 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:expand-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628903 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:generic-garbage-collector" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628917 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:horizontal-pod-autoscaler" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.628924 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:job-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.629220 1 base_controller.go:82] Caches are synced for WebhookAuthenticatorCertApprover_csr-approver-controller 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.629237 1 base_controller.go:119] Starting #1 worker of WebhookAuthenticatorCertApprover_csr-approver-controller controller ... 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.629448 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:legacy-service-account-token-cleaner" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.629461 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:namespace-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr F I1208 17:42:43.629470 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:node-controller" not found 2025-12-08T17:42:43.630452591+00:00 stderr P I1208 17:42:43.629481 1 sccrolecache.go:466] failed to retrieve a ro 2025-12-08T17:42:43.630489982+00:00 stderr F le for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:persistent-volume-binder" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629491 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:pod-garbage-collector" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629497 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:pv-protection-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629509 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:pvc-protection-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629515 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:replicaset-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629522 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:replication-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629536 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:resourcequota-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629544 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:root-ca-cert-publisher" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629551 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:route-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629565 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:selinux-warning-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629573 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:service-account-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629581 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:service-ca-cert-publisher" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629588 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:service-cidrs-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629602 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:service-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629609 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:statefulset-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629618 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:ttl-after-finished-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629642 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:ttl-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629651 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:controller:validatingadmissionpolicy-status-controller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629660 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:deployer" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629674 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:discovery" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629681 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:image-builder" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629687 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:image-puller" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629730 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:kube-controller-manager" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629739 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:kube-dns" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629745 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629757 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:master" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629764 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:monitoring" not found 2025-12-08T17:42:43.630489982+00:00 stderr F I1208 17:42:43.629771 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:node" not found 2025-12-08T17:42:43.637604456+00:00 stderr F I1208 17:42:43.637543 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.637863103+00:00 stderr F I1208 17:42:43.637817 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-config-controller-events" not found 2025-12-08T17:42:43.637962906+00:00 stderr F I1208 17:42:43.637915 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-config-daemon-events" not found 2025-12-08T17:42:43.637962906+00:00 stderr F I1208 17:42:43.637930 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "machine-os-builder-events" not found 2025-12-08T17:42:43.637962906+00:00 stderr F I1208 17:42:43.637937 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:deployer" not found 2025-12-08T17:42:43.637962906+00:00 stderr F I1208 17:42:43.637951 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:image-builder" not found 2025-12-08T17:42:43.637962906+00:00 stderr F I1208 17:42:43.637956 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:image-puller" not found 2025-12-08T17:42:43.637979786+00:00 stderr F I1208 17:42:43.637975 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "external-health-monitor-controller-cfg" not found 2025-12-08T17:42:43.638220293+00:00 stderr F I1208 17:42:43.638184 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.640010742+00:00 stderr F I1208 17:42:43.639938 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "system:node-admin" not found 2025-12-08T17:42:43.640292060+00:00 stderr F I1208 17:42:43.640265 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.640303980+00:00 stderr F I1208 17:42:43.640288 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.640566097+00:00 stderr F E1208 17:42:43.640538 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:43.640623909+00:00 stderr F I1208 17:42:43.640600 1 reflector.go:430] "Caches populated" type="*v1.NetworkPolicy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.641218685+00:00 stderr F E1208 17:42:43.641189 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca" should be enqueued: namespace "openshift-service-ca" not found 2025-12-08T17:42:43.649922032+00:00 stderr F I1208 17:42:43.649366 1 reflector.go:430] "Caches populated" type="*v1.ReplicaSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.655792682+00:00 stderr F I1208 17:42:43.653257 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.774451679+00:00 stderr F I1208 17:42:43.773890 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:42:43.774451679+00:00 stderr F I1208 17:42:43.774223 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:43.777615575+00:00 stderr F I1208 17:42:43.777569 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:42:43.989924816+00:00 stderr F I1208 17:42:43.989333 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.000930956+00:00 stderr F I1208 17:42:44.000361 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000675 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "default" should be enqueued: namespace "default" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000683 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "default" should be enqueued: namespace "default" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000698 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "default" should be enqueued: namespace "default" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000702 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "hostpath-provisioner" should be enqueued: namespace "hostpath-provisioner" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000713 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "hostpath-provisioner" should be enqueued: namespace "hostpath-provisioner" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000722 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "hostpath-provisioner" should be enqueued: namespace "hostpath-provisioner" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000726 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "hostpath-provisioner" should be enqueued: namespace "hostpath-provisioner" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000737 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "hostpath-provisioner" should be enqueued: namespace "hostpath-provisioner" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000741 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-node-lease" should be enqueued: namespace "kube-node-lease" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000748 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-node-lease" should be enqueued: namespace "kube-node-lease" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000758 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-node-lease" should be enqueued: namespace "kube-node-lease" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000767 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-public" should be enqueued: namespace "kube-public" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000771 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-public" should be enqueued: namespace "kube-public" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000781 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-public" should be enqueued: namespace "kube-public" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000798 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000802 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000806 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000818 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000822 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000826 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000830 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000840 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000853 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000857 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000861 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000887 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000891 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000905 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000912 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.000930956+00:00 stderr F E1208 17:42:44.000915 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000922 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000936 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000940 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000947 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000957 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000968 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000972 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000976 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000984 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000987 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.000996 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001003248+00:00 stderr F E1208 17:42:44.001000 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001019228+00:00 stderr F E1208 17:42:44.001013 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001027779+00:00 stderr F E1208 17:42:44.001018 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001038669+00:00 stderr F E1208 17:42:44.001027 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001047159+00:00 stderr F E1208 17:42:44.001041 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001055959+00:00 stderr F E1208 17:42:44.001045 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001055959+00:00 stderr F E1208 17:42:44.001050 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001064660+00:00 stderr F E1208 17:42:44.001053 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.001072810+00:00 stderr F E1208 17:42:44.001065 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "kube-system" should be enqueued: namespace "kube-system" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001079 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver-operator" should be enqueued: namespace "openshift-apiserver-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001088 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver-operator" should be enqueued: namespace "openshift-apiserver-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001093 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver-operator" should be enqueued: namespace "openshift-apiserver-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001103 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver-operator" should be enqueued: namespace "openshift-apiserver-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001107 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver" should be enqueued: namespace "openshift-apiserver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001119 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver" should be enqueued: namespace "openshift-apiserver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001133 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver" should be enqueued: namespace "openshift-apiserver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001137 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-apiserver" should be enqueued: namespace "openshift-apiserver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001141 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication-operator" should be enqueued: namespace "openshift-authentication-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001155 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication-operator" should be enqueued: namespace "openshift-authentication-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001160 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication-operator" should be enqueued: namespace "openshift-authentication-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001164 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication-operator" should be enqueued: namespace "openshift-authentication-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001179 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication" should be enqueued: namespace "openshift-authentication" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001183 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication" should be enqueued: namespace "openshift-authentication" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001187 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication" should be enqueued: namespace "openshift-authentication" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001191 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-authentication" should be enqueued: namespace "openshift-authentication" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001200 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cloud-network-config-controller" should be enqueued: namespace "openshift-cloud-network-config-controller" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001213 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cloud-network-config-controller" should be enqueued: namespace "openshift-cloud-network-config-controller" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001216 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cloud-network-config-controller" should be enqueued: namespace "openshift-cloud-network-config-controller" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001221 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cloud-platform-infra" should be enqueued: namespace "openshift-cloud-platform-infra" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001234 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cloud-platform-infra" should be enqueued: namespace "openshift-cloud-platform-infra" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001238 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cloud-platform-infra" should be enqueued: namespace "openshift-cloud-platform-infra" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001250 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-machine-approver" should be enqueued: namespace "openshift-cluster-machine-approver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001255 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-machine-approver" should be enqueued: namespace "openshift-cluster-machine-approver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001259 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-machine-approver" should be enqueued: namespace "openshift-cluster-machine-approver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001272 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-machine-approver" should be enqueued: namespace "openshift-cluster-machine-approver" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001276 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-samples-operator" should be enqueued: namespace "openshift-cluster-samples-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001281 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-samples-operator" should be enqueued: namespace "openshift-cluster-samples-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001294 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-samples-operator" should be enqueued: namespace "openshift-cluster-samples-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001298 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-samples-operator" should be enqueued: namespace "openshift-cluster-samples-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001302 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-storage-operator" should be enqueued: namespace "openshift-cluster-storage-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001336 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-storage-operator" should be enqueued: namespace "openshift-cluster-storage-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001341 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-storage-operator" should be enqueued: namespace "openshift-cluster-storage-operator" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001345 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-version" should be enqueued: namespace "openshift-cluster-version" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001349 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-version" should be enqueued: namespace "openshift-cluster-version" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001359 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-cluster-version" should be enqueued: namespace "openshift-cluster-version" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001373 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-managed" should be enqueued: namespace "openshift-config-managed" not found 2025-12-08T17:42:44.004934416+00:00 stderr F E1208 17:42:44.001378 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-managed" should be enqueued: namespace "openshift-config-managed" not found 2025-12-08T17:42:44.004934416+00:00 stderr P E1208 17:42: 2025-12-08T17:42:44.005004148+00:00 stderr F 44.001383 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-managed" should be enqueued: namespace "openshift-config-managed" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001387 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-operator" should be enqueued: namespace "openshift-config-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001396 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-operator" should be enqueued: namespace "openshift-config-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001409 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-operator" should be enqueued: namespace "openshift-config-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001413 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config-operator" should be enqueued: namespace "openshift-config-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001417 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config" should be enqueued: namespace "openshift-config" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001421 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config" should be enqueued: namespace "openshift-config" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001431 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-config" should be enqueued: namespace "openshift-config" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001445 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-operator" should be enqueued: namespace "openshift-console-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001454 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-operator" should be enqueued: namespace "openshift-console-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001458 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-operator" should be enqueued: namespace "openshift-console-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001462 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-operator" should be enqueued: namespace "openshift-console-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001466 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-user-settings" should be enqueued: namespace "openshift-console-user-settings" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001475 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-user-settings" should be enqueued: namespace "openshift-console-user-settings" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001479 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console-user-settings" should be enqueued: namespace "openshift-console-user-settings" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001488 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console" should be enqueued: namespace "openshift-console" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001501 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console" should be enqueued: namespace "openshift-console" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001505 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console" should be enqueued: namespace "openshift-console" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001509 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-console" should be enqueued: namespace "openshift-console" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001517 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager-operator" should be enqueued: namespace "openshift-controller-manager-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001522 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager-operator" should be enqueued: namespace "openshift-controller-manager-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001531 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager-operator" should be enqueued: namespace "openshift-controller-manager-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001540 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager-operator" should be enqueued: namespace "openshift-controller-manager-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001544 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager" should be enqueued: namespace "openshift-controller-manager" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001559 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager" should be enqueued: namespace "openshift-controller-manager" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001563 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager" should be enqueued: namespace "openshift-controller-manager" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001569 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-controller-manager" should be enqueued: namespace "openshift-controller-manager" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001579 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns-operator" should be enqueued: namespace "openshift-dns-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001589 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns-operator" should be enqueued: namespace "openshift-dns-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001593 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns-operator" should be enqueued: namespace "openshift-dns-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001597 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns-operator" should be enqueued: namespace "openshift-dns-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001607 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns" should be enqueued: namespace "openshift-dns" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001620 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns" should be enqueued: namespace "openshift-dns" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001624 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns" should be enqueued: namespace "openshift-dns" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001628 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns" should be enqueued: namespace "openshift-dns" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001640 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-dns" should be enqueued: namespace "openshift-dns" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001644 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd-operator" should be enqueued: namespace "openshift-etcd-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001648 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd-operator" should be enqueued: namespace "openshift-etcd-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001652 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd-operator" should be enqueued: namespace "openshift-etcd-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr F E1208 17:42:44.001661 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd-operator" should be enqueued: namespace "openshift-etcd-operator" not found 2025-12-08T17:42:44.005004148+00:00 stderr P E1208 17:42:44.001675 1 podsecurity_label_sync_controller.go:427] failed t 2025-12-08T17:42:44.005034059+00:00 stderr F o determine whether namespace "openshift-etcd" should be enqueued: namespace "openshift-etcd" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001679 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd" should be enqueued: namespace "openshift-etcd" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001683 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd" should be enqueued: namespace "openshift-etcd" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001695 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd" should be enqueued: namespace "openshift-etcd" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001699 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd" should be enqueued: namespace "openshift-etcd" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001703 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-etcd" should be enqueued: namespace "openshift-etcd" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001714 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-host-network" should be enqueued: namespace "openshift-host-network" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001717 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-host-network" should be enqueued: namespace "openshift-host-network" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001727 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-host-network" should be enqueued: namespace "openshift-host-network" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001731 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001740 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001752 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001755 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001759 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001773 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001777 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-image-registry" should be enqueued: namespace "openshift-image-registry" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001781 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001790 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001793 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001802 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001810 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001815 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001823 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001836 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001840 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001844 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001856 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001860 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.001864 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.002151 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.002271 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.005034059+00:00 stderr F E1208 17:42:44.002278 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.008963 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.008985 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009008 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009013 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009016 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009020 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009031 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009035 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009053 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-infra" should be enqueued: namespace "openshift-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009057 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-canary" should be enqueued: namespace "openshift-ingress-canary" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009063 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-canary" should be enqueued: namespace "openshift-ingress-canary" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009067 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-canary" should be enqueued: namespace "openshift-ingress-canary" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009077 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-operator" should be enqueued: namespace "openshift-ingress-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009085 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-operator" should be enqueued: namespace "openshift-ingress-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009089 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-operator" should be enqueued: namespace "openshift-ingress-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009103 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress-operator" should be enqueued: namespace "openshift-ingress-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009106 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress" should be enqueued: namespace "openshift-ingress" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009111 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress" should be enqueued: namespace "openshift-ingress" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009120 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress" should be enqueued: namespace "openshift-ingress" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009124 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ingress" should be enqueued: namespace "openshift-ingress" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009132 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kni-infra" should be enqueued: namespace "openshift-kni-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009145 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kni-infra" should be enqueued: namespace "openshift-kni-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009152 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kni-infra" should be enqueued: namespace "openshift-kni-infra" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009156 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver-operator" should be enqueued: namespace "openshift-kube-apiserver-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009171 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver-operator" should be enqueued: namespace "openshift-kube-apiserver-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009175 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver-operator" should be enqueued: namespace "openshift-kube-apiserver-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009180 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver-operator" should be enqueued: namespace "openshift-kube-apiserver-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009191 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver" should be enqueued: namespace "openshift-kube-apiserver" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009194 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver" should be enqueued: namespace "openshift-kube-apiserver" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009203 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver" should be enqueued: namespace "openshift-kube-apiserver" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009212 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver" should be enqueued: namespace "openshift-kube-apiserver" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009215 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-apiserver" should be enqueued: namespace "openshift-kube-apiserver" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009225 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager-operator" should be enqueued: namespace "openshift-kube-controller-manager-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009239 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager-operator" should be enqueued: namespace "openshift-kube-controller-manager-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009244 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager-operator" should be enqueued: namespace "openshift-kube-controller-manager-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009249 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager-operator" should be enqueued: namespace "openshift-kube-controller-manager-operator" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009258 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager" should be enqueued: namespace "openshift-kube-controller-manager" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009262 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager" should be enqueued: namespace "openshift-kube-controller-manager" not found 2025-12-08T17:42:44.011026522+00:00 stderr F E1208 17:42:44.009278 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager" should be enqueued: namespace "openshift-kube-controller-manager" not found 2025-12-08T17:42:44.011026522+00:00 stderr P E1208 17:42:44.009282 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager" should be enqueued: namespace "openshift-kube-contro 2025-12-08T17:42:44.011268698+00:00 stderr F ller-manager" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009288 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager" should be enqueued: namespace "openshift-kube-controller-manager" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009296 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-controller-manager" should be enqueued: namespace "openshift-kube-controller-manager" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009300 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler-operator" should be enqueued: namespace "openshift-kube-scheduler-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009318 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler-operator" should be enqueued: namespace "openshift-kube-scheduler-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009323 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler-operator" should be enqueued: namespace "openshift-kube-scheduler-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009328 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler-operator" should be enqueued: namespace "openshift-kube-scheduler-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009332 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler" should be enqueued: namespace "openshift-kube-scheduler" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009341 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler" should be enqueued: namespace "openshift-kube-scheduler" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009349 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler" should be enqueued: namespace "openshift-kube-scheduler" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009353 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler" should be enqueued: namespace "openshift-kube-scheduler" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009362 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler" should be enqueued: namespace "openshift-kube-scheduler" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009374 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-scheduler" should be enqueued: namespace "openshift-kube-scheduler" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009378 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator-operator" should be enqueued: namespace "openshift-kube-storage-version-migrator-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009383 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator-operator" should be enqueued: namespace "openshift-kube-storage-version-migrator-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009396 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator-operator" should be enqueued: namespace "openshift-kube-storage-version-migrator-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009400 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator-operator" should be enqueued: namespace "openshift-kube-storage-version-migrator-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009412 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator" should be enqueued: namespace "openshift-kube-storage-version-migrator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009417 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator" should be enqueued: namespace "openshift-kube-storage-version-migrator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009422 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator" should be enqueued: namespace "openshift-kube-storage-version-migrator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009432 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-kube-storage-version-migrator" should be enqueued: namespace "openshift-kube-storage-version-migrator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009436 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009444 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009452 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009456 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009493 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009497 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009505 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009516 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009520 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-api" should be enqueued: namespace "openshift-machine-api" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009528 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009542 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009552 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009556 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009560 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009564 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009573 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr F E1208 17:42:44.009577 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011268698+00:00 stderr P E1208 17:42:44.009591 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace 2025-12-08T17:42:44.011298889+00:00 stderr F "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009595 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-machine-config-operator" should be enqueued: namespace "openshift-machine-config-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009604 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009613 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009617 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009626 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009630 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009646 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009650 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009654 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-marketplace" should be enqueued: namespace "openshift-marketplace" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009688 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-monitoring" should be enqueued: namespace "openshift-monitoring" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009693 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-monitoring" should be enqueued: namespace "openshift-monitoring" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009701 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-monitoring" should be enqueued: namespace "openshift-monitoring" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009704 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-monitoring" should be enqueued: namespace "openshift-monitoring" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009722 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009727 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009731 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009746 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009751 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009755 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009760 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-multus" should be enqueued: namespace "openshift-multus" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009770 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-console" should be enqueued: namespace "openshift-network-console" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009785 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-console" should be enqueued: namespace "openshift-network-console" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009789 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-console" should be enqueued: namespace "openshift-network-console" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009794 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-diagnostics" should be enqueued: namespace "openshift-network-diagnostics" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009806 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-diagnostics" should be enqueued: namespace "openshift-network-diagnostics" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009812 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-diagnostics" should be enqueued: namespace "openshift-network-diagnostics" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009816 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-diagnostics" should be enqueued: namespace "openshift-network-diagnostics" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009820 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-node-identity" should be enqueued: namespace "openshift-network-node-identity" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009828 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-node-identity" should be enqueued: namespace "openshift-network-node-identity" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009837 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-node-identity" should be enqueued: namespace "openshift-network-node-identity" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009841 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-node-identity" should be enqueued: namespace "openshift-network-node-identity" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009850 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-operator" should be enqueued: namespace "openshift-network-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009858 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-operator" should be enqueued: namespace "openshift-network-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009862 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-operator" should be enqueued: namespace "openshift-network-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009871 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-operator" should be enqueued: namespace "openshift-network-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009893 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-network-operator" should be enqueued: namespace "openshift-network-operator" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009897 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-node" should be enqueued: namespace "openshift-node" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009912 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-node" should be enqueued: namespace "openshift-node" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009918 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-node" should be enqueued: namespace "openshift-node" not found 2025-12-08T17:42:44.011298889+00:00 stderr F E1208 17:42:44.009922 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-nutanix-infra" should be enqueued: namespace "openshift-nutanix-infra" not found 2025-12-08T17:42:44.011298889+00:00 stderr P E1208 17:42:44.009926 1 pods 2025-12-08T17:42:44.011330700+00:00 stderr F ecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-nutanix-infra" should be enqueued: namespace "openshift-nutanix-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009936 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-nutanix-infra" should be enqueued: namespace "openshift-nutanix-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009947 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-oauth-apiserver" should be enqueued: namespace "openshift-oauth-apiserver" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009951 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-oauth-apiserver" should be enqueued: namespace "openshift-oauth-apiserver" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009955 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-oauth-apiserver" should be enqueued: namespace "openshift-oauth-apiserver" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009970 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-oauth-apiserver" should be enqueued: namespace "openshift-oauth-apiserver" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009974 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-openstack-infra" should be enqueued: namespace "openshift-openstack-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009978 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-openstack-infra" should be enqueued: namespace "openshift-openstack-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009986 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-openstack-infra" should be enqueued: namespace "openshift-openstack-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.009989 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operator-lifecycle-manager" should be enqueued: namespace "openshift-operator-lifecycle-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010001 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operator-lifecycle-manager" should be enqueued: namespace "openshift-operator-lifecycle-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010005 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operator-lifecycle-manager" should be enqueued: namespace "openshift-operator-lifecycle-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010015 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operator-lifecycle-manager" should be enqueued: namespace "openshift-operator-lifecycle-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010030 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operator-lifecycle-manager" should be enqueued: namespace "openshift-operator-lifecycle-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010034 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operators" should be enqueued: namespace "openshift-operators" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010042 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operators" should be enqueued: namespace "openshift-operators" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010054 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-operators" should be enqueued: namespace "openshift-operators" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010059 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovirt-infra" should be enqueued: namespace "openshift-ovirt-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010063 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovirt-infra" should be enqueued: namespace "openshift-ovirt-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010073 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovirt-infra" should be enqueued: namespace "openshift-ovirt-infra" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010077 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovn-kubernetes" should be enqueued: namespace "openshift-ovn-kubernetes" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010086 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovn-kubernetes" should be enqueued: namespace "openshift-ovn-kubernetes" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010093 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovn-kubernetes" should be enqueued: namespace "openshift-ovn-kubernetes" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010097 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovn-kubernetes" should be enqueued: namespace "openshift-ovn-kubernetes" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010105 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-ovn-kubernetes" should be enqueued: namespace "openshift-ovn-kubernetes" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010114 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-route-controller-manager" should be enqueued: namespace "openshift-route-controller-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010129 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-route-controller-manager" should be enqueued: namespace "openshift-route-controller-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010133 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-route-controller-manager" should be enqueued: namespace "openshift-route-controller-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010137 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-route-controller-manager" should be enqueued: namespace "openshift-route-controller-manager" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010146 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca-operator" should be enqueued: namespace "openshift-service-ca-operator" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010150 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca-operator" should be enqueued: namespace "openshift-service-ca-operator" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010158 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca-operator" should be enqueued: namespace "openshift-service-ca-operator" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010171 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca-operator" should be enqueued: namespace "openshift-service-ca-operator" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010175 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca" should be enqueued: namespace "openshift-service-ca" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010179 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca" should be enqueued: namespace "openshift-service-ca" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010187 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca" should be enqueued: namespace "openshift-service-ca" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010191 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-service-ca" should be enqueued: namespace "openshift-service-ca" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010199 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-user-workload-monitoring" should be enqueued: namespace "openshift-user-workload-monitoring" not found 2025-12-08T17:42:44.011330700+00:00 stderr F E1208 17:42:44.010208 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-user-workload-monitoring" should be enqueued: namespace "openshift-user-workload-monitoring" not found 2025-12-08T17:42:44.011330700+00:00 stderr P E1208 17:42:44.010212 1 podsecurity_label_sync_control 2025-12-08T17:42:44.011358491+00:00 stderr F ler.go:427] failed to determine whether namespace "openshift-user-workload-monitoring" should be enqueued: namespace "openshift-user-workload-monitoring" not found 2025-12-08T17:42:44.011358491+00:00 stderr F E1208 17:42:44.010219 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-vsphere-infra" should be enqueued: namespace "openshift-vsphere-infra" not found 2025-12-08T17:42:44.011358491+00:00 stderr F E1208 17:42:44.010230 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-vsphere-infra" should be enqueued: namespace "openshift-vsphere-infra" not found 2025-12-08T17:42:44.011358491+00:00 stderr F E1208 17:42:44.010235 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift-vsphere-infra" should be enqueued: namespace "openshift-vsphere-infra" not found 2025-12-08T17:42:44.011358491+00:00 stderr F E1208 17:42:44.010239 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift" should be enqueued: namespace "openshift" not found 2025-12-08T17:42:44.011358491+00:00 stderr F E1208 17:42:44.010247 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift" should be enqueued: namespace "openshift" not found 2025-12-08T17:42:44.011358491+00:00 stderr F E1208 17:42:44.010250 1 podsecurity_label_sync_controller.go:427] failed to determine whether namespace "openshift" should be enqueued: namespace "openshift" not found 2025-12-08T17:42:44.183812595+00:00 stderr F I1208 17:42:44.183752 1 reflector.go:430] "Caches populated" type="*v1.ResourceQuota" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.386835023+00:00 stderr F I1208 17:42:44.386433 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.447072276+00:00 stderr F I1208 17:42:44.445913 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.573946997+00:00 stderr F I1208 17:42:44.573838 1 reflector.go:430] "Caches populated" type="*v1.ReplicationController" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.588694129+00:00 stderr F I1208 17:42:44.588592 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.704685943+00:00 stderr F E1208 17:42:44.704646 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:42:44.771622328+00:00 stderr F I1208 17:42:44.771560 1 request.go:752] "Waited before sending request" delay="1.186820633s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/api/v1/namespaces?limit=500&resourceVersion=0" 2025-12-08T17:42:44.774104616+00:00 stderr F I1208 17:42:44.774070 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.787063100+00:00 stderr F I1208 17:42:44.786985 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:44.836336054+00:00 stderr F I1208 17:42:44.836268 1 base_controller.go:82] Caches are synced for pod-security-admission-label-synchronization-controller 2025-12-08T17:42:44.836336054+00:00 stderr F I1208 17:42:44.836306 1 base_controller.go:119] Starting #1 worker of pod-security-admission-label-synchronization-controller controller ... 2025-12-08T17:42:44.842537253+00:00 stderr F I1208 17:42:44.842494 1 shared_informer.go:357] "Caches are synced" controller="privileged-namespaces-psa-label-syncer" 2025-12-08T17:42:44.849959185+00:00 stderr F I1208 17:42:44.849922 1 base_controller.go:82] Caches are synced for namespace-security-allocation-controller 2025-12-08T17:42:44.849959185+00:00 stderr F I1208 17:42:44.849941 1 base_controller.go:119] Starting #1 worker of namespace-security-allocation-controller controller ... 2025-12-08T17:42:44.850007716+00:00 stderr F I1208 17:42:44.849993 1 namespace_scc_allocation_controller.go:111] Repairing SCC UID Allocations 2025-12-08T17:42:44.986797878+00:00 stderr F I1208 17:42:44.986726 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.084279207+00:00 stderr F I1208 17:42:45.084221 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.172623757+00:00 stderr F I1208 17:42:45.172519 1 reflector.go:430] "Caches populated" type="*v1.PodTemplate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.187778860+00:00 stderr F I1208 17:42:45.187686 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.231531834+00:00 stderr F I1208 17:42:45.231349 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.231531834+00:00 stderr F I1208 17:42:45.231369 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.231672018+00:00 stderr F I1208 17:42:45.231634 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.231672018+00:00 stderr F I1208 17:42:45.231654 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.231730489+00:00 stderr F I1208 17:42:45.231673 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.231903074+00:00 stderr F I1208 17:42:45.231859 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.232034567+00:00 stderr F I1208 17:42:45.232008 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.232034567+00:00 stderr F I1208 17:42:45.232015 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.375942432+00:00 stderr F I1208 17:42:45.375176 1 reflector.go:430] "Caches populated" type="*v1.LimitRange" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.392306009+00:00 stderr F I1208 17:42:45.392220 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.575245769+00:00 stderr F I1208 17:42:45.575185 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.587412061+00:00 stderr F I1208 17:42:45.587354 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.772065058+00:00 stderr F I1208 17:42:45.771992 1 request.go:752] "Waited before sending request" delay="2.184475986s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0" 2025-12-08T17:42:45.773787994+00:00 stderr F I1208 17:42:45.773743 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolumeClaim" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.787901809+00:00 stderr F I1208 17:42:45.787833 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.981821999+00:00 stderr F I1208 17:42:45.981751 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:45.987157354+00:00 stderr F I1208 17:42:45.987027 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:46.187167621+00:00 stderr F I1208 17:42:46.187052 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:46.389689034+00:00 stderr F I1208 17:42:46.389596 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:46.403201123+00:00 stderr F I1208 17:42:46.403135 1 namespace_scc_allocation_controller.go:116] Repair complete 2025-12-08T17:42:46.587381087+00:00 stderr F I1208 17:42:46.587203 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:46.786123428+00:00 stderr F I1208 17:42:46.786054 1 request.go:752] "Waited before sending request" delay="3.199396111s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/ingress.operator.openshift.io/v1/dnsrecords?limit=500&resourceVersion=0" 2025-12-08T17:42:46.787562517+00:00 stderr F I1208 17:42:46.787522 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:46.987234483+00:00 stderr F I1208 17:42:46.987172 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:47.188228556+00:00 stderr F I1208 17:42:47.188160 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:47.270843720+00:00 stderr F E1208 17:42:47.270747 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:42:47.387858311+00:00 stderr F I1208 17:42:47.387766 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:47.588221957+00:00 stderr F I1208 17:42:47.588144 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:47.787586045+00:00 stderr F I1208 17:42:47.787491 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:47.986144061+00:00 stderr F I1208 17:42:47.986027 1 request.go:752] "Waited before sending request" delay="4.399216218s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/operators.coreos.com/v2/operatorconditions?limit=500&resourceVersion=0" 2025-12-08T17:42:47.988784303+00:00 stderr F I1208 17:42:47.988679 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:48.190717991+00:00 stderr F I1208 17:42:48.190605 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:48.387935550+00:00 stderr F I1208 17:42:48.387853 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:48.588012589+00:00 stderr F I1208 17:42:48.587949 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:48.787350486+00:00 stderr F I1208 17:42:48.787279 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:48.988059181+00:00 stderr F I1208 17:42:48.987979 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:49.185803264+00:00 stderr F I1208 17:42:49.185739 1 request.go:752] "Waited before sending request" delay="5.598690826s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/autoscaling.openshift.io/v1beta1/machineautoscalers?limit=500&resourceVersion=0" 2025-12-08T17:42:49.187749288+00:00 stderr F I1208 17:42:49.187632 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:49.388687889+00:00 stderr F I1208 17:42:49.388520 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:49.587817600+00:00 stderr F I1208 17:42:49.587632 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:49.788331429+00:00 stderr F I1208 17:42:49.788237 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:49.989308572+00:00 stderr F I1208 17:42:49.989182 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:50.188015502+00:00 stderr F I1208 17:42:50.187910 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:50.385641822+00:00 stderr F I1208 17:42:50.385499 1 request.go:752] "Waited before sending request" delay="6.784955064s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/k8s.cni.cncf.io/v1/network-attachment-definitions?limit=500&resourceVersion=0" 2025-12-08T17:42:50.388538892+00:00 stderr F I1208 17:42:50.388462 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:50.587520399+00:00 stderr F I1208 17:42:50.587411 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:50.618987877+00:00 stderr F I1208 17:42:50.618872 1 reconciliation_controller.go:149] Caches are synced 2025-12-08T17:42:50.687313351+00:00 stderr F I1208 17:42:50.687195 1 reconciliation_controller.go:224] synced cluster resource quota controller 2025-12-08T17:42:51.231738182+00:00 stderr F E1208 17:42:51.231630 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:42:58.888367123+00:00 stderr F E1208 17:42:58.888292 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:43:18.620173201+00:00 stderr F E1208 17:43:18.620007 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:43:20.697712330+00:00 stderr F I1208 17:43:20.697560 1 reconciliation_controller.go:171] error occurred GetQuotableResources err=unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1 2025-12-08T17:43:20.697712330+00:00 stderr F E1208 17:43:20.697687 1 reconciliation_controller.go:172] "Unhandled Error" err="unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1" 2025-12-08T17:43:50.709541950+00:00 stderr F I1208 17:43:50.709410 1 reconciliation_controller.go:171] error occurred GetQuotableResources err=unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1 2025-12-08T17:43:50.709620572+00:00 stderr F E1208 17:43:50.709544 1 reconciliation_controller.go:172] "Unhandled Error" err="unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1" 2025-12-08T17:44:04.024151092+00:00 stderr F E1208 17:44:04.024042 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ImageStream" 2025-12-08T17:44:20.718326288+00:00 stderr F I1208 17:44:20.717220 1 reconciliation_controller.go:171] error occurred GetQuotableResources err=unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1 2025-12-08T17:44:20.718326288+00:00 stderr F E1208 17:44:20.717313 1 reconciliation_controller.go:172] "Unhandled Error" err="unable to retrieve the complete list of server APIs: apps.openshift.io/v1: stale GroupVersion discovery: apps.openshift.io/v1, authorization.openshift.io/v1: stale GroupVersion discovery: authorization.openshift.io/v1, build.openshift.io/v1: stale GroupVersion discovery: build.openshift.io/v1, image.openshift.io/v1: stale GroupVersion discovery: image.openshift.io/v1, oauth.openshift.io/v1: stale GroupVersion discovery: oauth.openshift.io/v1, packages.operators.coreos.com/v1: stale GroupVersion discovery: packages.operators.coreos.com/v1, project.openshift.io/v1: stale GroupVersion discovery: project.openshift.io/v1, quota.openshift.io/v1: stale GroupVersion discovery: quota.openshift.io/v1, route.openshift.io/v1: stale GroupVersion discovery: route.openshift.io/v1, security.openshift.io/v1: stale GroupVersion discovery: security.openshift.io/v1, template.openshift.io/v1: stale GroupVersion discovery: template.openshift.io/v1, user.openshift.io/v1: stale GroupVersion discovery: user.openshift.io/v1" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.604860 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.604820981 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.604927 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.604913083 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.604944 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.604933284 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.604960 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.604949414 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.604977 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.604965175 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.604994 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.604982395 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.605025 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.604999215 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.605044 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.605031666 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.605071 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.605049107 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.605093 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.605079868 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.605330 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:44:30.605313265 +0000 UTC))" 2025-12-08T17:44:30.607934506+00:00 stderr F I1208 17:44:30.605514 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215744\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:44:30.60549723 +0000 UTC))" 2025-12-08T17:44:37.679403839+00:00 stderr F I1208 17:44:37.679357 1 reflector.go:430] "Caches populated" type="*v1.ImageStream" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:43.669043810+00:00 stderr F I1208 17:44:43.668979 1 resource_quota_controller.go:476] "syncing resource quota controller with updated resources from discovery" diff="added: [image.openshift.io/v1, Resource=imagestreams], removed: []" 2025-12-08T17:44:43.669081671+00:00 stderr F I1208 17:44:43.669063 1 shared_informer.go:683] "Warning: resync period is smaller than resync check period and the informer has already started. Changing it to the resync check period" resyncPeriod="6m46.316157812s" resyncCheckPeriod="10m0s" 2025-12-08T17:44:43.669163613+00:00 stderr F I1208 17:44:43.669140 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:44:43.669163613+00:00 stderr F I1208 17:44:43.669152 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:44:43.669163613+00:00 stderr F I1208 17:44:43.669157 1 resource_quota_controller.go:502] "synced quota controller" 2025-12-08T17:44:43.685027075+00:00 stderr F I1208 17:44:43.684975 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:44:50.726463703+00:00 stderr F I1208 17:44:50.726401 1 reconciliation_controller.go:207] syncing resource quota controller with updated resources from discovery: added: [apps.openshift.io/v1, Resource=deploymentconfigs authorization.openshift.io/v1, Resource=rolebindingrestrictions build.openshift.io/v1, Resource=buildconfigs build.openshift.io/v1, Resource=builds image.openshift.io/v1, Resource=imagestreams route.openshift.io/v1, Resource=routes template.openshift.io/v1, Resource=templateinstances template.openshift.io/v1, Resource=templates], removed: [] 2025-12-08T17:44:50.726594236+00:00 stderr F I1208 17:44:50.726564 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="deploymentconfigs.apps.openshift.io" 2025-12-08T17:44:50.726637167+00:00 stderr F I1208 17:44:50.726621 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="templates.template.openshift.io" 2025-12-08T17:44:50.726668008+00:00 stderr F I1208 17:44:50.726652 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="rolebindingrestrictions.authorization.openshift.io" 2025-12-08T17:44:50.726690709+00:00 stderr F I1208 17:44:50.726676 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="templateinstances.template.openshift.io" 2025-12-08T17:44:50.726698799+00:00 stderr F I1208 17:44:50.726691 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="routes.route.openshift.io" 2025-12-08T17:44:50.726721040+00:00 stderr F I1208 17:44:50.726709 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="buildconfigs.build.openshift.io" 2025-12-08T17:44:50.726736170+00:00 stderr F I1208 17:44:50.726725 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="builds.build.openshift.io" 2025-12-08T17:44:50.728831249+00:00 stderr F I1208 17:44:50.728803 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.731455072+00:00 stderr F I1208 17:44:50.731411 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:44:50.731491713+00:00 stderr F I1208 17:44:50.731474 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.731594636+00:00 stderr F I1208 17:44:50.731544 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.731610026+00:00 stderr F I1208 17:44:50.731595 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.731667638+00:00 stderr F I1208 17:44:50.731549 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.732674145+00:00 stderr F I1208 17:44:50.732619 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.732872731+00:00 stderr F I1208 17:44:50.732843 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:50.733192550+00:00 stderr F I1208 17:44:50.733163 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:44:50.827516615+00:00 stderr F I1208 17:44:50.827442 1 reconciliation_controller.go:224] synced cluster resource quota controller 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045543 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.045505684 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045582 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.045564246 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045601 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.045589386 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045615 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.045605627 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045630 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.045619647 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045684 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.045634388 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045702 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.045689479 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045720 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.04570826 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045736 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.04572499 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045751 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.04574206 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045771 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.045759061 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.045999 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:45:16.045982107 +0000 UTC))" 2025-12-08T17:45:16.046329497+00:00 stderr F I1208 17:45:16.046175 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215744\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:45:16.046161442 +0000 UTC))" 2025-12-08T17:45:50.835572640+00:00 stderr F I1208 17:45:50.835500 1 reconciliation_controller.go:171] error occurred GetQuotableResources err=failed to discover resources: Get "https://api-int.crc.testing:6443/api": dial tcp 38.102.83.243:6443: connect: connection refused 2025-12-08T17:45:50.835572640+00:00 stderr F E1208 17:45:50.835541 1 reconciliation_controller.go:172] "Unhandled Error" err="failed to discover resources: Get \"https://api-int.crc.testing:6443/api\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:05.605558751+00:00 stderr F I1208 17:46:05.605494 1 cert_rotation.go:92] "Certificate rotation detected, shutting down client connections to start using new credentials" logger="tls-transport-cache" 2025-12-08T17:46:06.887488929+00:00 stderr F E1208 17:46:06.887430 1 reflector.go:200] "Failed to watch" err="an error on the server (\"Internal Server Error: \\\"/apis/template.openshift.io/v1/templates?allowWatchBookmarks=true&resourceVersion=38787&timeout=5m21s&timeoutSeconds=321&watch=true\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:06.936571542+00:00 stderr F I1208 17:46:06.936498 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:12.167791301+00:00 stderr F E1208 17:46:12.167718 1 reflector.go:200] "Failed to watch" err="an error on the server (\"Internal Server Error: \\\"/apis/template.openshift.io/v1/templateinstances?allowWatchBookmarks=true&resourceVersion=38798&timeout=6m37s&timeoutSeconds=397&watch=true\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:12.493611251+00:00 stderr F E1208 17:46:12.493523 1 reflector.go:200] "Failed to watch" err="an error on the server (\"Internal Server Error: \\\"/apis/route.openshift.io/v1/routes?allowWatchBookmarks=true&resourceVersion=38787&timeout=9m8s&timeoutSeconds=548&watch=true\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:12.864663709+00:00 stderr F E1208 17:46:12.864563 1 reflector.go:200] "Failed to watch" err="an error on the server (\"Internal Server Error: \\\"/apis/build.openshift.io/v1/buildconfigs?allowWatchBookmarks=true&resourceVersion=38828&timeout=5m12s&timeoutSeconds=312&watch=true\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:12.928860726+00:00 stderr F E1208 17:46:12.928795 1 reflector.go:200] "Failed to watch" err="an error on the server (\"Internal Server Error: \\\"/apis/build.openshift.io/v1/builds?allowWatchBookmarks=true&resourceVersion=38785&timeout=9m47s&timeoutSeconds=587&watch=true\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:13.594060882+00:00 stderr F E1208 17:46:13.593968 1 reflector.go:200] "Failed to watch" err="an error on the server (\"Internal Server Error: \\\"/apis/apps.openshift.io/v1/deploymentconfigs?allowWatchBookmarks=true&resourceVersion=38801&timeout=8m0s&timeoutSeconds=480&watch=true\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:19.907781040+00:00 stderr F E1208 17:46:19.907687 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: an error on the server (\"Internal Server Error: \\\"/apis/template.openshift.io/v1/templates?resourceVersion=38787\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:20.092387462+00:00 stderr F I1208 17:46:20.092257 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:23.094825811+00:00 stderr F I1208 17:46:23.094742 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:23.399645941+00:00 stderr F I1208 17:46:23.399566 1 reflector.go:430] "Caches populated" type="*v1.NetworkPolicy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:23.623335865+00:00 stderr F I1208 17:46:23.623265 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:23.623705036+00:00 stderr F I1208 17:46:23.623606 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "external-health-monitor-controller-cfg" not found 2025-12-08T17:46:23.623705036+00:00 stderr F I1208 17:46:23.623633 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "external-health-monitor-controller-cfg" not found 2025-12-08T17:46:24.042421885+00:00 stderr F I1208 17:46:24.042359 1 reflector.go:430] "Caches populated" type="*v1.Lease" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:24.631040812+00:00 stderr F I1208 17:46:24.630936 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:24.910840861+00:00 stderr F I1208 17:46:24.910762 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:24.922339116+00:00 stderr F I1208 17:46:24.922273 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.007954255+00:00 stderr F I1208 17:46:25.007829 1 reflector.go:430] "Caches populated" type="*v1.ReplicationController" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.063759010+00:00 stderr F I1208 17:46:25.063687 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.179369501+00:00 stderr F I1208 17:46:25.179298 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.278649610+00:00 stderr F E1208 17:46:25.278604 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PartialObjectMetadata: an error on the server (\"Internal Server Error: \\\"/apis/template.openshift.io/v1/templateinstances?resourceVersion=38798\\\": Post \\\"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\\\": dial tcp 10.217.4.1:443: connect: connection refused\") has prevented the request from succeeding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.PartialObjectMetadata" 2025-12-08T17:46:25.481182549+00:00 stderr F I1208 17:46:25.480842 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.498349404+00:00 stderr F I1208 17:46:25.498255 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.503344935+00:00 stderr F I1208 17:46:25.503282 1 reflector.go:430] "Caches populated" type="*v1.ControllerRevision" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.595559983+00:00 stderr F I1208 17:46:25.595508 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:25.926863767+00:00 stderr F I1208 17:46:25.926823 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:26.064584101+00:00 stderr F I1208 17:46:26.064460 1 reflector.go:430] "Caches populated" type="*v1.LimitRange" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:26.313972437+00:00 stderr F I1208 17:46:26.313866 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:26.870034037+00:00 stderr F I1208 17:46:26.869962 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:26.985288086+00:00 stderr F I1208 17:46:26.985195 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.103743412+00:00 stderr F I1208 17:46:27.103653 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.297318893+00:00 stderr F I1208 17:46:27.297211 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.341095126+00:00 stderr F I1208 17:46:27.340394 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.416408107+00:00 stderr F I1208 17:46:27.416348 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.669324818+00:00 stderr F I1208 17:46:27.669267 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.681312038+00:00 stderr F I1208 17:46:27.681241 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.838229118+00:00 stderr F I1208 17:46:27.838139 1 reflector.go:430] "Caches populated" type="*v1.CronJob" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.028481799+00:00 stderr F I1208 17:46:28.028337 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.207437500+00:00 stderr F I1208 17:46:28.207371 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.433941118+00:00 stderr F I1208 17:46:28.433829 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.731335225+00:00 stderr F I1208 17:46:28.731278 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.770305455+00:00 stderr F I1208 17:46:28.770157 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.839265845+00:00 stderr F I1208 17:46:28.839197 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.840623545+00:00 stderr F I1208 17:46:28.840564 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.956661009+00:00 stderr F I1208 17:46:28.956586 1 reflector.go:430] "Caches populated" type="*v2.HorizontalPodAutoscaler" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.979743271+00:00 stderr F I1208 17:46:28.979672 1 reflector.go:430] "Caches populated" type="*v1.Job" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.286846480+00:00 stderr F I1208 17:46:29.286763 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.305718405+00:00 stderr F I1208 17:46:29.305644 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.545977537+00:00 stderr F I1208 17:46:29.545806 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.890543310+00:00 stderr F I1208 17:46:29.890473 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.060746439+00:00 stderr F I1208 17:46:30.060652 1 reflector.go:430] "Caches populated" type="*v1.PodTemplate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.068593454+00:00 stderr F I1208 17:46:30.068538 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.247710541+00:00 stderr F I1208 17:46:30.247614 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.267810244+00:00 stderr F I1208 17:46:30.267715 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.285687410+00:00 stderr F I1208 17:46:30.285622 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.880624678+00:00 stderr F I1208 17:46:30.880554 1 reflector.go:430] "Caches populated" type="*v1.ResourceQuota" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:30.914995340+00:00 stderr F I1208 17:46:30.914921 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolumeClaim" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.073367073+00:00 stderr F I1208 17:46:31.073314 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.285697996+00:00 stderr F I1208 17:46:31.285620 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.303299895+00:00 stderr F I1208 17:46:31.303086 1 reflector.go:430] "Caches populated" type="*v1.ClusterResourceQuota" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.556468234+00:00 stderr F I1208 17:46:31.556409 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.563838125+00:00 stderr F I1208 17:46:31.563793 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.916352995+00:00 stderr F I1208 17:46:31.916292 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.202095502+00:00 stderr F I1208 17:46:32.202027 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.222784214+00:00 stderr F I1208 17:46:32.222738 1 reflector.go:430] "Caches populated" type="*v1.ReplicaSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.256188706+00:00 stderr F I1208 17:46:32.256108 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.291021511+00:00 stderr F I1208 17:46:32.290982 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.428850418+00:00 stderr F I1208 17:46:32.428742 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:32.429313463+00:00 stderr F I1208 17:46:32.429274 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.430590691+00:00 stderr F I1208 17:46:32.430521 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:32.544980765+00:00 stderr F I1208 17:46:32.544597 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.558529541+00:00 stderr F I1208 17:46:32.558420 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.894330950+00:00 stderr F I1208 17:46:32.893612 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.894945659+00:00 stderr F I1208 17:46:32.894757 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-health-monitor-controller-runner" not found 2025-12-08T17:46:32.894945659+00:00 stderr F I1208 17:46:32.894774 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-health-monitor-controller-runner" not found 2025-12-08T17:46:33.012422075+00:00 stderr F I1208 17:46:33.012348 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:33.034241039+00:00 stderr F I1208 17:46:33.034142 1 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:33.052247420+00:00 stderr F I1208 17:46:33.052182 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:33.153312534+00:00 stderr F I1208 17:46:33.153226 1 reflector.go:430] "Caches populated" type="*v1.EndpointSlice" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:33.251206542+00:00 stderr F I1208 17:46:33.250671 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:33.690750226+00:00 stderr F I1208 17:46:33.690664 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:34.002215064+00:00 stderr F I1208 17:46:34.002127 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:34.290429745+00:00 stderr F I1208 17:46:34.290383 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:34.389431277+00:00 stderr F I1208 17:46:34.389034 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:34.491491091+00:00 stderr F I1208 17:46:34.491427 1 reflector.go:430] "Caches populated" type="*v1.CSIStorageCapacity" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:34.594749649+00:00 stderr F I1208 17:46:34.594678 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:35.000755946+00:00 stderr F I1208 17:46:35.000688 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:35.160118299+00:00 stderr F I1208 17:46:35.160045 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:35.896628106+00:00 stderr F I1208 17:46:35.896559 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:36.262386795+00:00 stderr F I1208 17:46:36.262343 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:36.844258820+00:00 stderr F I1208 17:46:36.844170 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:36.927421506+00:00 stderr F I1208 17:46:36.927317 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:37.088489481+00:00 stderr F I1208 17:46:37.088270 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:46:37.088489481+00:00 stderr F I1208 17:46:37.088351 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:37.097487901+00:00 stderr F I1208 17:46:37.095286 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:46:37.259215916+00:00 stderr F I1208 17:46:37.259132 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:37.711582533+00:00 stderr F I1208 17:46:37.711536 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:37.728372807+00:00 stderr F I1208 17:46:37.728208 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:37.740705907+00:00 stderr F I1208 17:46:37.740647 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:38.029389583+00:00 stderr F I1208 17:46:38.029325 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:38.961128380+00:00 stderr F I1208 17:46:38.961064 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:39.009897574+00:00 stderr F I1208 17:46:39.009786 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:54.558748380+00:00 stderr F I1208 17:46:54.558653 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:59.856633112+00:00 stderr F I1208 17:46:59.856567 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:53:09.100796974+00:00 stderr F I1208 17:53:09.100735 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:53:18.677116484+00:00 stderr F I1208 17:53:18.676282 1 podsecurity_label_sync_controller.go:304] no service accounts were found in the "openstack" NS 2025-12-08T17:53:18.677394282+00:00 stderr F I1208 17:53:18.677288 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for openstack namespace 2025-12-08T17:53:19.391039386+00:00 stderr F I1208 17:53:19.390699 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for openstack-operators namespace 2025-12-08T17:54:07.436789158+00:00 stderr F I1208 17:54:07.436688 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:54:28.317563191+00:00 stderr F I1208 17:54:28.317294 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for service-telemetry namespace 2025-12-08T17:54:53.169565023+00:00 stderr F I1208 17:54:53.168143 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for cert-manager-operator namespace 2025-12-08T17:54:53.179346566+00:00 stderr F I1208 17:54:53.178547 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for cert-manager-operator namespace 2025-12-08T17:55:11.634720020+00:00 stderr F I1208 17:55:11.634670 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "observability-operator" not found 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022059 1 reconciliation_controller.go:207] syncing resource quota controller with updated resources from discovery: added: [agent.k8s.elastic.co/v1alpha1, Resource=agents apm.k8s.elastic.co/v1, Resource=apmservers autoscaling.k8s.elastic.co/v1alpha1, Resource=elasticsearchautoscalers beat.k8s.elastic.co/v1beta1, Resource=beats elasticsearch.k8s.elastic.co/v1, Resource=elasticsearches enterprisesearch.k8s.elastic.co/v1, Resource=enterprisesearches kibana.k8s.elastic.co/v1, Resource=kibanas logstash.k8s.elastic.co/v1alpha1, Resource=logstashes maps.k8s.elastic.co/v1alpha1, Resource=elasticmapsservers monitoring.rhobs/v1, Resource=alertmanagers monitoring.rhobs/v1, Resource=podmonitors monitoring.rhobs/v1, Resource=probes monitoring.rhobs/v1, Resource=prometheuses monitoring.rhobs/v1, Resource=prometheusrules monitoring.rhobs/v1, Resource=servicemonitors monitoring.rhobs/v1, Resource=thanosrulers monitoring.rhobs/v1alpha1, Resource=alertmanagerconfigs monitoring.rhobs/v1alpha1, Resource=monitoringstacks monitoring.rhobs/v1alpha1, Resource=prometheusagents monitoring.rhobs/v1alpha1, Resource=scrapeconfigs monitoring.rhobs/v1alpha1, Resource=thanosqueriers observability.openshift.io/v1alpha1, Resource=observabilityinstallers perses.dev/v1alpha1, Resource=perses perses.dev/v1alpha1, Resource=persesdashboards perses.dev/v1alpha1, Resource=persesdatasources stackconfigpolicy.k8s.elastic.co/v1alpha1, Resource=stackconfigpolicies], removed: [] 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022170 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="probes.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022186 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="perses.perses.dev" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022217 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="stackconfigpolicies.stackconfigpolicy.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022234 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="podmonitors.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022255 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="thanosqueriers.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022271 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="prometheuses.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022289 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="enterprisesearches.enterprisesearch.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022303 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="monitoringstacks.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022317 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="alertmanagers.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022328 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="scrapeconfigs.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022342 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="persesdatasources.perses.dev" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022360 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="logstashes.logstash.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022373 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="apmservers.apm.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022386 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="elasticmapsservers.maps.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022400 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="agents.agent.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022414 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="prometheusagents.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022427 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="prometheusrules.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022438 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="servicemonitors.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022454 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="thanosrulers.monitoring.rhobs" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022473 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="persesdashboards.perses.dev" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022496 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="beats.beat.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022518 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="elasticsearches.elasticsearch.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022539 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="observabilityinstallers.observability.openshift.io" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022557 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="elasticsearchautoscalers.autoscaling.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022578 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="kibanas.kibana.k8s.elastic.co" 2025-12-08T17:55:21.022933436+00:00 stderr F I1208 17:55:21.022597 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="alertmanagerconfigs.monitoring.rhobs" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.025922 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.025989 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.025997 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.026071 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.026168 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.026390 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.028933219+00:00 stderr F I1208 17:55:21.026946 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.038975688+00:00 stderr F I1208 17:55:21.034355 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.046343097+00:00 stderr F I1208 17:55:21.046308 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.080065405+00:00 stderr F I1208 17:55:21.080002 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.225117498+00:00 stderr F I1208 17:55:21.225009 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.440247457+00:00 stderr F I1208 17:55:21.439794 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.642400688+00:00 stderr F I1208 17:55:21.642229 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:21.827619612+00:00 stderr F I1208 17:55:21.827575 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:22.023917125+00:00 stderr F I1208 17:55:22.023797 1 request.go:752] "Waited before sending request" delay="1.000304219s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/monitoring.rhobs/v1alpha1/thanosqueriers?limit=500&resourceVersion=0" 2025-12-08T17:55:22.034887310+00:00 stderr F I1208 17:55:22.034716 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:22.224980686+00:00 stderr F I1208 17:55:22.224892 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:22.425100851+00:00 stderr F I1208 17:55:22.425063 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:22.624419675+00:00 stderr F I1208 17:55:22.624376 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:22.825197748+00:00 stderr F I1208 17:55:22.825146 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:23.024851741+00:00 stderr F I1208 17:55:23.024806 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:23.226217930+00:00 stderr F I1208 17:55:23.223044 1 request.go:752] "Waited before sending request" delay="2.199434009s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/observability.openshift.io/v1alpha1/observabilityinstallers?limit=500&resourceVersion=0" 2025-12-08T17:55:23.226217930+00:00 stderr F I1208 17:55:23.224182 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:23.425436831+00:00 stderr F I1208 17:55:23.425374 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:23.627758326+00:00 stderr F I1208 17:55:23.627697 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:23.826464067+00:00 stderr F I1208 17:55:23.826239 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:24.026509707+00:00 stderr F I1208 17:55:24.026446 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:24.224850389+00:00 stderr F I1208 17:55:24.223396 1 request.go:752] "Waited before sending request" delay="3.199702632s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/apis/kibana.k8s.elastic.co/v1/kibanas?limit=500&resourceVersion=0" 2025-12-08T17:55:24.224850389+00:00 stderr F I1208 17:55:24.224790 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:24.323099586+00:00 stderr F I1208 17:55:24.323037 1 reconciliation_controller.go:224] synced cluster resource quota controller 2025-12-08T17:55:39.133208461+00:00 stderr F I1208 17:55:39.133146 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for cert-manager namespace 2025-12-08T17:55:54.330855110+00:00 stderr F I1208 17:55:54.330748 1 reconciliation_controller.go:207] syncing resource quota controller with updated resources from discovery: added: [acme.cert-manager.io/v1, Resource=challenges acme.cert-manager.io/v1, Resource=orders cert-manager.io/v1, Resource=certificaterequests cert-manager.io/v1, Resource=certificates cert-manager.io/v1, Resource=issuers operator.openshift.io/v1alpha1, Resource=istiocsrs], removed: [] 2025-12-08T17:55:54.330932302+00:00 stderr F I1208 17:55:54.330908 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="issuers.cert-manager.io" 2025-12-08T17:55:54.331008524+00:00 stderr F I1208 17:55:54.330964 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="certificaterequests.cert-manager.io" 2025-12-08T17:55:54.331008524+00:00 stderr F I1208 17:55:54.330987 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="challenges.acme.cert-manager.io" 2025-12-08T17:55:54.331008524+00:00 stderr F I1208 17:55:54.331002 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="orders.acme.cert-manager.io" 2025-12-08T17:55:54.331068046+00:00 stderr F I1208 17:55:54.331034 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="certificates.cert-manager.io" 2025-12-08T17:55:54.331090627+00:00 stderr F I1208 17:55:54.331075 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="istiocsrs.operator.openshift.io" 2025-12-08T17:55:54.333803091+00:00 stderr F I1208 17:55:54.333767 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:54.333826402+00:00 stderr F I1208 17:55:54.333813 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:54.333826402+00:00 stderr F I1208 17:55:54.333813 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:54.333915604+00:00 stderr F I1208 17:55:54.333896 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:54.334084549+00:00 stderr F I1208 17:55:54.333867 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:54.334272325+00:00 stderr F I1208 17:55:54.334257 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:55:54.431445881+00:00 stderr F I1208 17:55:54.431388 1 reconciliation_controller.go:224] synced cluster resource quota controller 2025-12-08T17:56:23.628715678+00:00 stderr F I1208 17:56:23.628099 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "observability-operator" not found 2025-12-08T17:56:23.628715678+00:00 stderr F I1208 17:56:23.628682 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "observability-operator" not found 2025-12-08T17:56:23.628768699+00:00 stderr F I1208 17:56:23.628739 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "external-health-monitor-controller-cfg" not found 2025-12-08T17:56:23.628768699+00:00 stderr F I1208 17:56:23.628749 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve role from role ref: role.rbac.authorization.k8s.io "external-health-monitor-controller-cfg" not found 2025-12-08T17:56:32.900165794+00:00 stderr F I1208 17:56:32.899645 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-health-monitor-controller-runner" not found 2025-12-08T17:56:32.900165794+00:00 stderr F I1208 17:56:32.900125 1 sccrolecache.go:466] failed to retrieve a role for a rolebinding ref: couldn't retrieve clusterrole from role ref: clusterrole.rbac.authorization.k8s.io "crc-hostpath-external-health-monitor-controller-runner" not found 2025-12-08T17:56:54.448220809+00:00 stderr F I1208 17:56:54.448138 1 reconciliation_controller.go:207] syncing resource quota controller with updated resources from discovery: added: [infra.watch/v1beta1, Resource=servicetelemetrys interconnectedcloud.github.io/v1alpha1, Resource=interconnects smartgateway.infra.watch/v2, Resource=smartgateways], removed: [] 2025-12-08T17:56:54.448347183+00:00 stderr F I1208 17:56:54.448311 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="servicetelemetrys.infra.watch" 2025-12-08T17:56:54.448374963+00:00 stderr F I1208 17:56:54.448360 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="interconnects.interconnectedcloud.github.io" 2025-12-08T17:56:54.448386314+00:00 stderr F I1208 17:56:54.448379 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" resource="smartgateways.smartgateway.infra.watch" 2025-12-08T17:56:54.449668648+00:00 stderr F I1208 17:56:54.449647 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:56:54.449856163+00:00 stderr F I1208 17:56:54.449839 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:56:54.450082578+00:00 stderr F I1208 17:56:54.450062 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:56:54.549571973+00:00 stderr F I1208 17:56:54.548799 1 reconciliation_controller.go:224] synced cluster resource quota controller 2025-12-08T18:01:32.106424899+00:00 stderr F I1208 18:01:32.106358 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T18:02:09.440347209+00:00 stderr F I1208 18:02:09.440263 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:02:40.556985312+00:00 stderr F I1208 18:02:40.550026 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CreatedSCCRanges' created SCC ranges for openshift-must-gather-gctth namespace ././@LongLink0000644000000000000000000000033200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000033700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000644000175000017500000004453515115611514033071 0ustar zuulzuul2025-12-08T17:42:25.331815974+00:00 stderr F + timeout 3m /bin/bash -exuo pipefail -c 'while [ -n "$(ss -Htanop \( sport = 9443 \))" ]; do sleep 1; done' 2025-12-08T17:42:25.336265336+00:00 stderr F ++ ss -Htanop '(' sport = 9443 ')' 2025-12-08T17:42:25.344916938+00:00 stderr F + '[' -n '' ']' 2025-12-08T17:42:25.345626308+00:00 stderr F + exec cluster-kube-controller-manager-operator cert-recovery-controller --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-controller-cert-syncer-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager --listen=0.0.0.0:9443 -v=2 2025-12-08T17:42:25.455790784+00:00 stderr F W1208 17:42:25.455558 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:42:25.456131959+00:00 stderr F I1208 17:42:25.456055 1 crypto.go:594] Generating new CA for cert-recovery-controller-signer@1765215745 cert, and key in /tmp/serving-cert-1289519157/serving-signer.crt, /tmp/serving-cert-1289519157/serving-signer.key 2025-12-08T17:42:25.456131959+00:00 stderr F Validity period of the certificate for "cert-recovery-controller-signer@1765215745" is unset, resetting to 43800h0m0s! 2025-12-08T17:42:26.007994676+00:00 stderr F I1208 17:42:26.007898 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:42:26.008712565+00:00 stderr F I1208 17:42:26.008673 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:42:26.008712565+00:00 stderr F I1208 17:42:26.008689 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:42:26.008712565+00:00 stderr F I1208 17:42:26.008694 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:42:26.008712565+00:00 stderr F I1208 17:42:26.008698 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:42:26.008712565+00:00 stderr F I1208 17:42:26.008701 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:42:26.009097414+00:00 stderr F I1208 17:42:26.008624 1 observer_polling.go:159] Starting file observer 2025-12-08T17:42:26.011348607+00:00 stderr F W1208 17:42:26.011300 1 builder.go:272] unable to get owner reference (falling back to namespace): Get "https://localhost:6443/api/v1/namespaces/openshift-kube-controller-manager/pods": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:26.011455157+00:00 stderr F I1208 17:42:26.011425 1 builder.go:304] cert-recovery-controller version v0.0.0-unknown-afdae35-afdae35 2025-12-08T17:42:26.012530743+00:00 stderr F W1208 17:42:26.012488 1 builder.go:364] unable to get control plane topology, using HA cluster values for leader election: Get "https://localhost:6443/apis/config.openshift.io/v1/infrastructures/cluster": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:26.012588963+00:00 stderr F I1208 17:42:26.012527 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"openshift-kube-controller-manager", Name:"openshift-kube-controller-manager", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ControlPlaneTopology' unable to get control plane topology, using HA cluster values for leader election: Get "https://localhost:6443/apis/config.openshift.io/v1/infrastructures/cluster": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:26.012816112+00:00 stderr F I1208 17:42:26.012776 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-controller-manager/cert-recovery-controller-lock... 2025-12-08T17:42:26.013569349+00:00 stderr F E1208 17:42:26.013523 1 leaderelection.go:436] error retrieving resource lock openshift-kube-controller-manager/cert-recovery-controller-lock: Get "https://localhost:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cert-recovery-controller-lock?timeout=1m47s": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:26.017898687+00:00 stderr F E1208 17:42:26.017810 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://localhost:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp [::1]:6443: connect: connection refused" event="&Event{ObjectMeta:{openshift-kube-controller-manager.187f4e5e54ffa173 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Namespace,Namespace:openshift-kube-controller-manager,Name:openshift-kube-controller-manager,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:ControlPlaneTopology,Message:unable to get control plane topology, using HA cluster values for leader election: Get \"https://localhost:6443/apis/config.openshift.io/v1/infrastructures/cluster\": dial tcp [::1]:6443: connect: connection refused,Source:EventSource{Component:cert-recovery-controller,Host:,},FirstTimestamp:2025-12-08 17:42:26.012455283 +0000 UTC m=+0.662880907,LastTimestamp:2025-12-08 17:42:26.012455283 +0000 UTC m=+0.662880907,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:cert-recovery-controller,ReportingInstance:,}" 2025-12-08T17:42:57.451968841+00:00 stderr F I1208 17:42:57.451855 1 leaderelection.go:271] successfully acquired lease openshift-kube-controller-manager/cert-recovery-controller-lock 2025-12-08T17:42:57.452197497+00:00 stderr F I1208 17:42:57.452131 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-controller-manager", Name:"cert-recovery-controller-lock", UID:"f1b2756b-1b18-4a56-a8df-d7fc2bbd1acf", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"36283", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' crc_8d3258e8-8dcc-41dc-b56c-cd508401927c became leader 2025-12-08T17:42:57.453921425+00:00 stderr F I1208 17:42:57.453801 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:42:57.456997268+00:00 stderr F I1208 17:42:57.456893 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.458399856+00:00 stderr F I1208 17:42:57.458280 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"openshift-kube-controller-manager", Name:"openshift-kube-controller-manager", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:42:57.459916618+00:00 stderr F I1208 17:42:57.458270 1 cmd.go:122] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:42:57.460457112+00:00 stderr F I1208 17:42:57.460355 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.461840710+00:00 stderr F I1208 17:42:57.461753 1 csrcontroller.go:103] Starting CSR controller 2025-12-08T17:42:57.461840710+00:00 stderr F I1208 17:42:57.461788 1 shared_informer.go:350] "Waiting for caches to sync" controller="CSRController" 2025-12-08T17:42:57.462208271+00:00 stderr F I1208 17:42:57.462142 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:42:57.463828514+00:00 stderr F I1208 17:42:57.463744 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.463828514+00:00 stderr F I1208 17:42:57.463796 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.466303112+00:00 stderr F I1208 17:42:57.466227 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.467828274+00:00 stderr F I1208 17:42:57.467714 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.470012803+00:00 stderr F I1208 17:42:57.469855 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.485511146+00:00 stderr F I1208 17:42:57.485433 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubecontrollermanagers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.489148105+00:00 stderr F I1208 17:42:57.489076 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.497574395+00:00 stderr F I1208 17:42:57.497469 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.501078020+00:00 stderr F I1208 17:42:57.500961 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:42:57.562086134+00:00 stderr F I1208 17:42:57.562003 1 shared_informer.go:357] "Caches are synced" controller="CSRController" 2025-12-08T17:42:57.562086134+00:00 stderr F I1208 17:42:57.562077 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager 2025-12-08T17:42:57.562129726+00:00 stderr F I1208 17:42:57.562086 1 base_controller.go:82] Caches are synced for kube-controller-manager 2025-12-08T17:42:57.562129726+00:00 stderr F I1208 17:42:57.562093 1 base_controller.go:119] Starting #1 worker of kube-controller-manager controller ... 2025-12-08T17:42:57.562801274+00:00 stderr F I1208 17:42:57.562741 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:42:57.562801274+00:00 stderr F I1208 17:42:57.562777 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:44:35.632035688+00:00 stderr F I1208 17:44:35.631985 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"openshift-kube-controller-manager", Name:"openshift-kube-controller-manager", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kube-controller-manager-client-cert-key -n openshift-kube-controller-manager because it changed 2025-12-08T17:45:59.496798304+00:00 stderr F E1208 17:45:59.496636 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://localhost:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cert-recovery-controller-lock?timeout=1m47s": dial tcp [::1]:6443: connect: connection refused, falling back to slow path 2025-12-08T17:45:59.498088533+00:00 stderr F E1208 17:45:59.498013 1 leaderelection.go:436] error retrieving resource lock openshift-kube-controller-manager/cert-recovery-controller-lock: Get "https://localhost:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-controller-manager/leases/cert-recovery-controller-lock?timeout=1m47s": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:46:23.475722284+00:00 stderr F I1208 17:46:23.475631 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:24.992568014+00:00 stderr F I1208 17:46:24.992443 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:26.899477841+00:00 stderr F I1208 17:46:26.899402 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:27.755927567+00:00 stderr F I1208 17:46:27.755820 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:28.819122661+00:00 stderr F I1208 17:46:28.819055 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubecontrollermanagers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.288729816+00:00 stderr F I1208 17:46:29.288161 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.364795591+00:00 stderr F I1208 17:46:31.364732 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.538545421+00:00 stderr F I1208 17:46:32.538454 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:32.701568235+00:00 stderr F I1208 17:46:32.701500 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:33.538322260+00:00 stderr F I1208 17:46:33.538222 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:37.519467798+00:00 stderr F I1208 17:46:37.519381 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" ././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/2.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000644000175000017500000324455515115611514033100 0ustar zuulzuul2025-12-08T17:47:02.745819211+00:00 stderr F + timeout 3m /bin/bash -exuo pipefail -c 'while [ -n "$(ss -Htanop \( sport = 10257 \))" ]; do sleep 1; done' 2025-12-08T17:47:02.752006426+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:47:02.767827984+00:00 stderr F + '[' -n '' ']' 2025-12-08T17:47:02.768894228+00:00 stderr F + '[' -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt ']' 2025-12-08T17:47:02.768983551+00:00 stdout F Copying system trust bundle 2025-12-08T17:47:02.768991701+00:00 stderr F + echo 'Copying system trust bundle' 2025-12-08T17:47:02.768991701+00:00 stderr F + cp -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 2025-12-08T17:47:02.772494821+00:00 stderr F + '[' -f /etc/kubernetes/static-pod-resources/configmaps/cloud-config/ca-bundle.pem ']' 2025-12-08T17:47:02.772921044+00:00 stderr P + exec hyperkube kube-controller-manager --openshift-config=/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --authentication-kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --authorization-kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --client-ca-file=/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt --requestheader-client-ca-file=/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt -v=2 --tls-cert-file=/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt --tls-private-key-file=/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key --allocate-node-cidrs=false --cert-dir=/var/run/kubernetes --cloud-provider=external --cluster-cidr=10.217.0.0/22 --cluster-name=crc-rzkkk --cluster-signing-cert-file=/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt --cluster-signing-duration=720h --cluster-signing-key-file=/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key '--controllers=*' --controllers=-bootstrapsigner --controllers=-tokencleaner --controllers=-ttl --controllers=selinux-warning-controller --enable-dynamic-provisioning=true --feature-gates=AWSClusterHostedDNS=false --feature-gates=AWSClusterHostedDNSInstall=false --feature-gates=AWSDedicatedHosts=false --feature-gates=AWSServiceLBNetworkSecurityGroup=false --feature-gates=AdditionalRoutingCapabilities=true --feature-gates=AdminNetworkPolicy=true --feature-gates=AlibabaPlatform=true --feature-gates=AutomatedEtcdBackup=false --feature-gates=AzureClusterHostedDNSInstall=false --feature-gates=AzureDedicatedHosts=false --feature-gates=AzureMultiDisk=false --feature-gates=AzureWorkloadIdentity=true --feature-gates=BootImageSkewEnforcement=false --feature-gates=BootcNodeManagement=false --feature-gates=BuildCSIVolumes=true --feature-gates=CPMSMachineNamePrefix=true --feature-gates=ClusterAPIInstall=false --feature-gates=ClusterAPIInstallIBMCloud=false --feature-gates=ClusterMonitoringConfig=false --feature-gates=ClusterVersionOperatorConfiguration=false --feature-gates=ConsolePluginContentSecurityPolicy=true --feature-gates=DNSNameResolver=false --feature-gates=DualReplica=false --feature-gates=DyanmicServiceEndpointIBMCloud=false --feature-gates=DynamicResourceAllocation=false --feature-gates=EtcdBackendQuota=false --feature-gates=EventedPLEG=false --feature-gates=Example2=false --feature-gates=Example=false --feature-gates=ExternalOIDC=false --feature-gates=ExternalOIDCWithUIDAndExtraClaimMappings=false --feature-gates=ExternalSnapshotMetadata=false --feature-gates=GCPClusterHostedDNS=false --feature-gates=GCPClusterHostedDNSInstall=false --feature-gates=GCPCustomAPIEndpoints=false --feature-gates=GCPCustomAPIEndpointsInstall=false --feature-gates=GatewayAPI=true --feature-gates=GatewayAPIController=true --feature-gates=HighlyAvailableArbiter=true --feature-gates=ImageModeStatusReporting=false --feature-gates=ImageStreamImportMode=false --feature-gates=ImageVolume=true --feature-gates=IngressControllerDynamicConfigurationManager=false --feature-gates=IngressControllerLBSubnetsAWS=true --feature-gates=InsightsConfig=false --feature-gates=InsightsConfigAPI=false --feature-gates=InsightsOnDemandDataGather=false --feature-gates=IrreconcilableMachineConfig=false --feature-gates=KMSEncryptionProvider=false --feature-gates=KMSv1=true --feature-gates=MachineAPIMigration=false --feature-gates=MachineAPIOperatorDisableMachineHealthCheckController=false --feature-gates=MachineConfigNodes=true --feature-gates=ManagedBootImages=true --feature-gates=ManagedBootImagesAWS=true --feature-gates=ManagedBootImagesAzure=false --feature-gates=ManagedBootImagesvSphere=false --feature-gates=MaxUnavailableStatefulSet=false --feature-gates=MetricsCollectionProfiles=true --feature-gates=MinimumKubeletVersion=false --feature-gates=MixedCPUsAllocation=false --feature-gates=MultiArchInstallAzure=fals 2025-12-08T17:47:02.772949765+00:00 stderr F e --feature-gates=MultiDiskSetup=false --feature-gates=MutatingAdmissionPolicy=false --feature-gates=NetworkDiagnosticsConfig=true --feature-gates=NetworkLiveMigration=true --feature-gates=NetworkSegmentation=true --feature-gates=NewOLM=true --feature-gates=NewOLMCatalogdAPIV1Metas=false --feature-gates=NewOLMOwnSingleNamespace=false --feature-gates=NewOLMPreflightPermissionChecks=false --feature-gates=NewOLMWebhookProviderOpenshiftServiceCA=false --feature-gates=NoRegistryClusterOperations=false --feature-gates=NodeSwap=false --feature-gates=NutanixMultiSubnets=false --feature-gates=OVNObservability=false --feature-gates=OpenShiftPodSecurityAdmission=false --feature-gates=PinnedImages=true --feature-gates=PreconfiguredUDNAddresses=false --feature-gates=ProcMountType=true --feature-gates=RouteAdvertisements=true --feature-gates=RouteExternalCertificate=true --feature-gates=SELinuxMount=false --feature-gates=ServiceAccountTokenNodeBinding=true --feature-gates=SetEIPForNLBIngressController=true --feature-gates=ShortCertRotation=false --feature-gates=SignatureStores=false --feature-gates=SigstoreImageVerification=true --feature-gates=SigstoreImageVerificationPKI=false --feature-gates=StoragePerformantSecurityPolicy=true --feature-gates=TranslateStreamCloseWebsocketRequests=false --feature-gates=UpgradeStatus=true --feature-gates=UserNamespacesPodSecurityStandards=true --feature-gates=UserNamespacesSupport=true --feature-gates=VSphereConfigurableMaxAllowedBlockVolumesPerNode=false --feature-gates=VSphereHostVMGroupZonal=false --feature-gates=VSphereMixedNodeEnv=false --feature-gates=VSphereMultiDisk=true --feature-gates=VSphereMultiNetworks=true --feature-gates=VolumeAttributesClass=false --feature-gates=VolumeGroupSnapshot=false --flex-volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec --kube-api-burst=300 --kube-api-qps=150 --leader-elect-renew-deadline=12s --leader-elect-resource-lock=leases --leader-elect-retry-period=3s --leader-elect=true --pv-recycler-pod-template-filepath-hostpath=/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml --pv-recycler-pod-template-filepath-nfs=/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml --root-ca-file=/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt --secure-port=10257 --service-account-private-key-file=/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key --service-cluster-ip-range=10.217.4.0/23 --use-service-account-credentials=true --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875812 1 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875910 1 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875915 1 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875918 1 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875921 1 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875924 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875927 1 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875930 1 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875933 1 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875936 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875939 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875942 1 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:47:02.875951248+00:00 stderr F W1208 17:47:02.875944 1 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875947 1 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875951 1 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875954 1 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875957 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875960 1 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875963 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875966 1 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875969 1 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875972 1 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875975 1 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875978 1 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875981 1 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875985 1 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875988 1 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.875999 1 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.876002 1 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.876006 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:47:02.876013750+00:00 stderr F W1208 17:47:02.876008 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876012 1 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876015 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876018 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876021 1 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876023 1 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876026 1 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876029 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:47:02.876034670+00:00 stderr F W1208 17:47:02.876032 1 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:47:02.876046381+00:00 stderr F W1208 17:47:02.876037 1 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:47:02.876046381+00:00 stderr F W1208 17:47:02.876040 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:47:02.876046381+00:00 stderr F W1208 17:47:02.876042 1 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:47:02.876057201+00:00 stderr F W1208 17:47:02.876045 1 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:47:02.876057201+00:00 stderr F W1208 17:47:02.876049 1 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:47:02.876057201+00:00 stderr F W1208 17:47:02.876051 1 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:47:02.876057201+00:00 stderr F W1208 17:47:02.876054 1 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:47:02.876066981+00:00 stderr F W1208 17:47:02.876057 1 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:47:02.876066981+00:00 stderr F W1208 17:47:02.876061 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:47:02.876066981+00:00 stderr F W1208 17:47:02.876064 1 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:47:02.876076202+00:00 stderr F W1208 17:47:02.876067 1 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:47:02.876076202+00:00 stderr F W1208 17:47:02.876070 1 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:47:02.876076202+00:00 stderr F W1208 17:47:02.876073 1 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:47:02.876085292+00:00 stderr F W1208 17:47:02.876078 1 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:47:02.876085292+00:00 stderr F W1208 17:47:02.876083 1 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:47:02.876094122+00:00 stderr F W1208 17:47:02.876086 1 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:47:02.876094122+00:00 stderr F W1208 17:47:02.876089 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:47:02.876102893+00:00 stderr F W1208 17:47:02.876092 1 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:47:02.876102893+00:00 stderr F W1208 17:47:02.876095 1 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:47:02.876102893+00:00 stderr F W1208 17:47:02.876099 1 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:47:02.876117763+00:00 stderr F W1208 17:47:02.876102 1 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:47:02.876117763+00:00 stderr F W1208 17:47:02.876106 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:47:02.876117763+00:00 stderr F W1208 17:47:02.876109 1 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:47:02.876117763+00:00 stderr F W1208 17:47:02.876111 1 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:47:02.876117763+00:00 stderr F W1208 17:47:02.876114 1 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:47:02.876128383+00:00 stderr F W1208 17:47:02.876117 1 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:47:02.876128383+00:00 stderr F W1208 17:47:02.876120 1 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:47:02.876128383+00:00 stderr F W1208 17:47:02.876123 1 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:47:02.876128383+00:00 stderr F W1208 17:47:02.876125 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:47:02.876137914+00:00 stderr F W1208 17:47:02.876128 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:47:02.876137914+00:00 stderr F W1208 17:47:02.876131 1 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:47:02.876137914+00:00 stderr F W1208 17:47:02.876134 1 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:47:02.876147264+00:00 stderr F W1208 17:47:02.876139 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:47:02.876147264+00:00 stderr F W1208 17:47:02.876142 1 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:47:02.876147264+00:00 stderr F W1208 17:47:02.876145 1 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:47:02.876156434+00:00 stderr F W1208 17:47:02.876148 1 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:47:02.876156434+00:00 stderr F W1208 17:47:02.876151 1 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:47:02.876156434+00:00 stderr F W1208 17:47:02.876154 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:47:02.876165975+00:00 stderr F W1208 17:47:02.876157 1 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:47:02.876165975+00:00 stderr F W1208 17:47:02.876160 1 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:47:02.876165975+00:00 stderr F W1208 17:47:02.876163 1 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:47:02.876176765+00:00 stderr F W1208 17:47:02.876166 1 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:47:02.876176765+00:00 stderr F W1208 17:47:02.876169 1 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:47:02.876176765+00:00 stderr F W1208 17:47:02.876173 1 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:47:02.876187085+00:00 stderr F W1208 17:47:02.876177 1 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:47:02.876187085+00:00 stderr F W1208 17:47:02.876181 1 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:47:02.876187085+00:00 stderr F W1208 17:47:02.876184 1 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:47:02.876301929+00:00 stderr F I1208 17:47:02.876282 1 flags.go:64] FLAG: --allocate-node-cidrs="false" 2025-12-08T17:47:02.876301929+00:00 stderr F I1208 17:47:02.876294 1 flags.go:64] FLAG: --allow-metric-labels="[]" 2025-12-08T17:47:02.876315059+00:00 stderr F I1208 17:47:02.876300 1 flags.go:64] FLAG: --allow-metric-labels-manifest="" 2025-12-08T17:47:02.876315059+00:00 stderr F I1208 17:47:02.876305 1 flags.go:64] FLAG: --allow-untagged-cloud="false" 2025-12-08T17:47:02.876315059+00:00 stderr F I1208 17:47:02.876308 1 flags.go:64] FLAG: --attach-detach-reconcile-sync-period="1m0s" 2025-12-08T17:47:02.876324900+00:00 stderr F I1208 17:47:02.876315 1 flags.go:64] FLAG: --authentication-kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig" 2025-12-08T17:47:02.876324900+00:00 stderr F I1208 17:47:02.876320 1 flags.go:64] FLAG: --authentication-skip-lookup="false" 2025-12-08T17:47:02.876347040+00:00 stderr F I1208 17:47:02.876323 1 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="10s" 2025-12-08T17:47:02.876347040+00:00 stderr F I1208 17:47:02.876327 1 flags.go:64] FLAG: --authentication-tolerate-lookup-failure="false" 2025-12-08T17:47:02.876347040+00:00 stderr F I1208 17:47:02.876330 1 flags.go:64] FLAG: --authorization-always-allow-paths="[/healthz,/readyz,/livez]" 2025-12-08T17:47:02.876347040+00:00 stderr F I1208 17:47:02.876338 1 flags.go:64] FLAG: --authorization-kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig" 2025-12-08T17:47:02.876347040+00:00 stderr F I1208 17:47:02.876342 1 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="10s" 2025-12-08T17:47:02.876357471+00:00 stderr F I1208 17:47:02.876345 1 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="10s" 2025-12-08T17:47:02.876357471+00:00 stderr F I1208 17:47:02.876349 1 flags.go:64] FLAG: --bind-address="0.0.0.0" 2025-12-08T17:47:02.876357471+00:00 stderr F I1208 17:47:02.876355 1 flags.go:64] FLAG: --cert-dir="/var/run/kubernetes" 2025-12-08T17:47:02.876366561+00:00 stderr F I1208 17:47:02.876358 1 flags.go:64] FLAG: --cidr-allocator-type="RangeAllocator" 2025-12-08T17:47:02.876366561+00:00 stderr F I1208 17:47:02.876362 1 flags.go:64] FLAG: --client-ca-file="/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:47:02.876375311+00:00 stderr F I1208 17:47:02.876366 1 flags.go:64] FLAG: --cloud-config="" 2025-12-08T17:47:02.876375311+00:00 stderr F I1208 17:47:02.876369 1 flags.go:64] FLAG: --cloud-provider="external" 2025-12-08T17:47:02.876375311+00:00 stderr F I1208 17:47:02.876373 1 flags.go:64] FLAG: --cluster-cidr="10.217.0.0/22" 2025-12-08T17:47:02.876384581+00:00 stderr F I1208 17:47:02.876376 1 flags.go:64] FLAG: --cluster-name="crc-rzkkk" 2025-12-08T17:47:02.876384581+00:00 stderr F I1208 17:47:02.876379 1 flags.go:64] FLAG: --cluster-signing-cert-file="/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt" 2025-12-08T17:47:02.876393312+00:00 stderr F I1208 17:47:02.876383 1 flags.go:64] FLAG: --cluster-signing-duration="720h0m0s" 2025-12-08T17:47:02.876393312+00:00 stderr F I1208 17:47:02.876387 1 flags.go:64] FLAG: --cluster-signing-key-file="/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:02.876402092+00:00 stderr F I1208 17:47:02.876391 1 flags.go:64] FLAG: --cluster-signing-kube-apiserver-client-cert-file="" 2025-12-08T17:47:02.876402092+00:00 stderr F I1208 17:47:02.876394 1 flags.go:64] FLAG: --cluster-signing-kube-apiserver-client-key-file="" 2025-12-08T17:47:02.876402092+00:00 stderr F I1208 17:47:02.876398 1 flags.go:64] FLAG: --cluster-signing-kubelet-client-cert-file="" 2025-12-08T17:47:02.876411222+00:00 stderr F I1208 17:47:02.876401 1 flags.go:64] FLAG: --cluster-signing-kubelet-client-key-file="" 2025-12-08T17:47:02.876411222+00:00 stderr F I1208 17:47:02.876404 1 flags.go:64] FLAG: --cluster-signing-kubelet-serving-cert-file="" 2025-12-08T17:47:02.876411222+00:00 stderr F I1208 17:47:02.876407 1 flags.go:64] FLAG: --cluster-signing-kubelet-serving-key-file="" 2025-12-08T17:47:02.876426943+00:00 stderr F I1208 17:47:02.876411 1 flags.go:64] FLAG: --cluster-signing-legacy-unknown-cert-file="" 2025-12-08T17:47:02.876426943+00:00 stderr F I1208 17:47:02.876414 1 flags.go:64] FLAG: --cluster-signing-legacy-unknown-key-file="" 2025-12-08T17:47:02.876426943+00:00 stderr F I1208 17:47:02.876417 1 flags.go:64] FLAG: --concurrent-cron-job-syncs="5" 2025-12-08T17:47:02.876426943+00:00 stderr F I1208 17:47:02.876421 1 flags.go:64] FLAG: --concurrent-daemonset-syncs="2" 2025-12-08T17:47:02.876436753+00:00 stderr F I1208 17:47:02.876425 1 flags.go:64] FLAG: --concurrent-deployment-syncs="5" 2025-12-08T17:47:02.876436753+00:00 stderr F I1208 17:47:02.876428 1 flags.go:64] FLAG: --concurrent-endpoint-syncs="5" 2025-12-08T17:47:02.876436753+00:00 stderr F I1208 17:47:02.876431 1 flags.go:64] FLAG: --concurrent-ephemeralvolume-syncs="5" 2025-12-08T17:47:02.876445933+00:00 stderr F I1208 17:47:02.876434 1 flags.go:64] FLAG: --concurrent-gc-syncs="20" 2025-12-08T17:47:02.876445933+00:00 stderr F I1208 17:47:02.876439 1 flags.go:64] FLAG: --concurrent-horizontal-pod-autoscaler-syncs="5" 2025-12-08T17:47:02.876445933+00:00 stderr F I1208 17:47:02.876442 1 flags.go:64] FLAG: --concurrent-job-syncs="5" 2025-12-08T17:47:02.876455074+00:00 stderr F I1208 17:47:02.876446 1 flags.go:64] FLAG: --concurrent-namespace-syncs="10" 2025-12-08T17:47:02.876455074+00:00 stderr F I1208 17:47:02.876449 1 flags.go:64] FLAG: --concurrent-rc-syncs="5" 2025-12-08T17:47:02.876455074+00:00 stderr F I1208 17:47:02.876452 1 flags.go:64] FLAG: --concurrent-replicaset-syncs="5" 2025-12-08T17:47:02.876464394+00:00 stderr F I1208 17:47:02.876455 1 flags.go:64] FLAG: --concurrent-resource-quota-syncs="5" 2025-12-08T17:47:02.876464394+00:00 stderr F I1208 17:47:02.876459 1 flags.go:64] FLAG: --concurrent-service-endpoint-syncs="5" 2025-12-08T17:47:02.876473144+00:00 stderr F I1208 17:47:02.876462 1 flags.go:64] FLAG: --concurrent-service-syncs="1" 2025-12-08T17:47:02.876473144+00:00 stderr F I1208 17:47:02.876466 1 flags.go:64] FLAG: --concurrent-serviceaccount-token-syncs="5" 2025-12-08T17:47:02.876473144+00:00 stderr F I1208 17:47:02.876469 1 flags.go:64] FLAG: --concurrent-statefulset-syncs="5" 2025-12-08T17:47:02.876482264+00:00 stderr F I1208 17:47:02.876472 1 flags.go:64] FLAG: --concurrent-ttl-after-finished-syncs="5" 2025-12-08T17:47:02.876482264+00:00 stderr F I1208 17:47:02.876476 1 flags.go:64] FLAG: --concurrent-validating-admission-policy-status-syncs="5" 2025-12-08T17:47:02.876482264+00:00 stderr F I1208 17:47:02.876479 1 flags.go:64] FLAG: --configure-cloud-routes="true" 2025-12-08T17:47:02.876491425+00:00 stderr F I1208 17:47:02.876482 1 flags.go:64] FLAG: --contention-profiling="false" 2025-12-08T17:47:02.876491425+00:00 stderr F I1208 17:47:02.876486 1 flags.go:64] FLAG: --controller-start-interval="0s" 2025-12-08T17:47:02.876500215+00:00 stderr F I1208 17:47:02.876489 1 flags.go:64] FLAG: --controllers="[*,-bootstrapsigner,-tokencleaner,-ttl,selinux-warning-controller]" 2025-12-08T17:47:02.876500215+00:00 stderr F I1208 17:47:02.876495 1 flags.go:64] FLAG: --disable-attach-detach-reconcile-sync="false" 2025-12-08T17:47:02.876508795+00:00 stderr F I1208 17:47:02.876498 1 flags.go:64] FLAG: --disable-force-detach-on-timeout="false" 2025-12-08T17:47:02.876508795+00:00 stderr F I1208 17:47:02.876502 1 flags.go:64] FLAG: --disable-http2-serving="false" 2025-12-08T17:47:02.876517396+00:00 stderr F I1208 17:47:02.876505 1 flags.go:64] FLAG: --disabled-metrics="[]" 2025-12-08T17:47:02.876517396+00:00 stderr F I1208 17:47:02.876511 1 flags.go:64] FLAG: --emulated-version="[]" 2025-12-08T17:47:02.876531786+00:00 stderr F I1208 17:47:02.876515 1 flags.go:64] FLAG: --enable-dynamic-provisioning="true" 2025-12-08T17:47:02.876531786+00:00 stderr F I1208 17:47:02.876519 1 flags.go:64] FLAG: --enable-garbage-collector="true" 2025-12-08T17:47:02.876531786+00:00 stderr F I1208 17:47:02.876522 1 flags.go:64] FLAG: --enable-hostpath-provisioner="false" 2025-12-08T17:47:02.876531786+00:00 stderr F I1208 17:47:02.876525 1 flags.go:64] FLAG: --enable-leader-migration="false" 2025-12-08T17:47:02.876531786+00:00 stderr F I1208 17:47:02.876529 1 flags.go:64] FLAG: --endpoint-updates-batch-period="0s" 2025-12-08T17:47:02.876541886+00:00 stderr F I1208 17:47:02.876533 1 flags.go:64] FLAG: --endpointslice-updates-batch-period="0s" 2025-12-08T17:47:02.876541886+00:00 stderr F I1208 17:47:02.876536 1 flags.go:64] FLAG: --external-cloud-volume-plugin="" 2025-12-08T17:47:02.876647490+00:00 stderr F I1208 17:47:02.876539 1 flags.go:64] FLAG: --feature-gates=":AWSClusterHostedDNS=false,:AWSClusterHostedDNSInstall=false,:AWSDedicatedHosts=false,:AWSServiceLBNetworkSecurityGroup=false,:AdditionalRoutingCapabilities=true,:AdminNetworkPolicy=true,:AlibabaPlatform=true,:AutomatedEtcdBackup=false,:AzureClusterHostedDNSInstall=false,:AzureDedicatedHosts=false,:AzureMultiDisk=false,:AzureWorkloadIdentity=true,:BootImageSkewEnforcement=false,:BootcNodeManagement=false,:BuildCSIVolumes=true,:CPMSMachineNamePrefix=true,:ClusterAPIInstall=false,:ClusterAPIInstallIBMCloud=false,:ClusterMonitoringConfig=false,:ClusterVersionOperatorConfiguration=false,:ConsolePluginContentSecurityPolicy=true,:DNSNameResolver=false,:DualReplica=false,:DyanmicServiceEndpointIBMCloud=false,:DynamicResourceAllocation=false,:EtcdBackendQuota=false,:EventedPLEG=false,:Example2=false,:Example=false,:ExternalOIDC=false,:ExternalOIDCWithUIDAndExtraClaimMappings=false,:ExternalSnapshotMetadata=false,:GCPClusterHostedDNS=false,:GCPClusterHostedDNSInstall=false,:GCPCustomAPIEndpoints=false,:GCPCustomAPIEndpointsInstall=false,:GatewayAPI=true,:GatewayAPIController=true,:HighlyAvailableArbiter=true,:ImageModeStatusReporting=false,:ImageStreamImportMode=false,:ImageVolume=true,:IngressControllerDynamicConfigurationManager=false,:IngressControllerLBSubnetsAWS=true,:InsightsConfig=false,:InsightsConfigAPI=false,:InsightsOnDemandDataGather=false,:IrreconcilableMachineConfig=false,:KMSEncryptionProvider=false,:KMSv1=true,:MachineAPIMigration=false,:MachineAPIOperatorDisableMachineHealthCheckController=false,:MachineConfigNodes=true,:ManagedBootImages=true,:ManagedBootImagesAWS=true,:ManagedBootImagesAzure=false,:ManagedBootImagesvSphere=false,:MaxUnavailableStatefulSet=false,:MetricsCollectionProfiles=true,:MinimumKubeletVersion=false,:MixedCPUsAllocation=false,:MultiArchInstallAzure=false,:MultiDiskSetup=false,:MutatingAdmissionPolicy=false,:NetworkDiagnosticsConfig=true,:NetworkLiveMigration=true,:NetworkSegmentation=true,:NewOLM=true,:NewOLMCatalogdAPIV1Metas=false,:NewOLMOwnSingleNamespace=false,:NewOLMPreflightPermissionChecks=false,:NewOLMWebhookProviderOpenshiftServiceCA=false,:NoRegistryClusterOperations=false,:NodeSwap=false,:NutanixMultiSubnets=false,:OVNObservability=false,:OpenShiftPodSecurityAdmission=false,:PinnedImages=true,:PreconfiguredUDNAddresses=false,:ProcMountType=true,:RouteAdvertisements=true,:RouteExternalCertificate=true,:SELinuxMount=false,:ServiceAccountTokenNodeBinding=true,:SetEIPForNLBIngressController=true,:ShortCertRotation=false,:SignatureStores=false,:SigstoreImageVerification=true,:SigstoreImageVerificationPKI=false,:StoragePerformantSecurityPolicy=true,:TranslateStreamCloseWebsocketRequests=false,:UpgradeStatus=true,:UserNamespacesPodSecurityStandards=true,:UserNamespacesSupport=true,:VSphereConfigurableMaxAllowedBlockVolumesPerNode=false,:VSphereHostVMGroupZonal=false,:VSphereMixedNodeEnv=false,:VSphereMultiDisk=true,:VSphereMultiNetworks=true,:VolumeAttributesClass=false,:VolumeGroupSnapshot=false" 2025-12-08T17:47:02.876647490+00:00 stderr F I1208 17:47:02.876627 1 flags.go:64] FLAG: --flex-volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" 2025-12-08T17:47:02.876647490+00:00 stderr F I1208 17:47:02.876632 1 flags.go:64] FLAG: --help="false" 2025-12-08T17:47:02.876647490+00:00 stderr F I1208 17:47:02.876636 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-cpu-initialization-period="5m0s" 2025-12-08T17:47:02.876647490+00:00 stderr F I1208 17:47:02.876640 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-downscale-stabilization="5m0s" 2025-12-08T17:47:02.876667390+00:00 stderr F I1208 17:47:02.876645 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-initial-readiness-delay="30s" 2025-12-08T17:47:02.876667390+00:00 stderr F I1208 17:47:02.876648 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-sync-period="15s" 2025-12-08T17:47:02.876667390+00:00 stderr F I1208 17:47:02.876652 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-tolerance="0.1" 2025-12-08T17:47:02.876667390+00:00 stderr F I1208 17:47:02.876658 1 flags.go:64] FLAG: --http2-max-streams-per-connection="0" 2025-12-08T17:47:02.876667390+00:00 stderr F I1208 17:47:02.876662 1 flags.go:64] FLAG: --kube-api-burst="300" 2025-12-08T17:47:02.876677770+00:00 stderr F I1208 17:47:02.876666 1 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" 2025-12-08T17:47:02.876677770+00:00 stderr F I1208 17:47:02.876670 1 flags.go:64] FLAG: --kube-api-qps="150" 2025-12-08T17:47:02.876686711+00:00 stderr F I1208 17:47:02.876674 1 flags.go:64] FLAG: --kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig" 2025-12-08T17:47:02.876686711+00:00 stderr F I1208 17:47:02.876679 1 flags.go:64] FLAG: --large-cluster-size-threshold="50" 2025-12-08T17:47:02.876686711+00:00 stderr F I1208 17:47:02.876683 1 flags.go:64] FLAG: --leader-elect="true" 2025-12-08T17:47:02.876696051+00:00 stderr F I1208 17:47:02.876687 1 flags.go:64] FLAG: --leader-elect-lease-duration="15s" 2025-12-08T17:47:02.876696051+00:00 stderr F I1208 17:47:02.876690 1 flags.go:64] FLAG: --leader-elect-renew-deadline="12s" 2025-12-08T17:47:02.876704751+00:00 stderr F I1208 17:47:02.876694 1 flags.go:64] FLAG: --leader-elect-resource-lock="leases" 2025-12-08T17:47:02.876704751+00:00 stderr F I1208 17:47:02.876697 1 flags.go:64] FLAG: --leader-elect-resource-name="kube-controller-manager" 2025-12-08T17:47:02.876704751+00:00 stderr F I1208 17:47:02.876701 1 flags.go:64] FLAG: --leader-elect-resource-namespace="kube-system" 2025-12-08T17:47:02.876714222+00:00 stderr F I1208 17:47:02.876704 1 flags.go:64] FLAG: --leader-elect-retry-period="3s" 2025-12-08T17:47:02.876714222+00:00 stderr F I1208 17:47:02.876708 1 flags.go:64] FLAG: --leader-migration-config="" 2025-12-08T17:47:02.876714222+00:00 stderr F I1208 17:47:02.876711 1 flags.go:64] FLAG: --legacy-service-account-token-clean-up-period="8760h0m0s" 2025-12-08T17:47:02.876723312+00:00 stderr F I1208 17:47:02.876715 1 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:47:02.876731692+00:00 stderr F I1208 17:47:02.876719 1 flags.go:64] FLAG: --log-json-info-buffer-size="0" 2025-12-08T17:47:02.876731692+00:00 stderr F I1208 17:47:02.876725 1 flags.go:64] FLAG: --log-json-split-stream="false" 2025-12-08T17:47:02.876731692+00:00 stderr F I1208 17:47:02.876729 1 flags.go:64] FLAG: --log-text-info-buffer-size="0" 2025-12-08T17:47:02.876740922+00:00 stderr F I1208 17:47:02.876732 1 flags.go:64] FLAG: --log-text-split-stream="false" 2025-12-08T17:47:02.876740922+00:00 stderr F I1208 17:47:02.876736 1 flags.go:64] FLAG: --logging-format="text" 2025-12-08T17:47:02.876749573+00:00 stderr F I1208 17:47:02.876739 1 flags.go:64] FLAG: --master="" 2025-12-08T17:47:02.876749573+00:00 stderr F I1208 17:47:02.876743 1 flags.go:64] FLAG: --max-endpoints-per-slice="100" 2025-12-08T17:47:02.876749573+00:00 stderr F I1208 17:47:02.876746 1 flags.go:64] FLAG: --min-resync-period="12h0m0s" 2025-12-08T17:47:02.876764023+00:00 stderr F I1208 17:47:02.876750 1 flags.go:64] FLAG: --mirroring-concurrent-service-endpoint-syncs="5" 2025-12-08T17:47:02.876764023+00:00 stderr F I1208 17:47:02.876754 1 flags.go:64] FLAG: --mirroring-endpointslice-updates-batch-period="0s" 2025-12-08T17:47:02.876764023+00:00 stderr F I1208 17:47:02.876757 1 flags.go:64] FLAG: --mirroring-max-endpoints-per-subset="1000" 2025-12-08T17:47:02.876764023+00:00 stderr F I1208 17:47:02.876761 1 flags.go:64] FLAG: --namespace-sync-period="5m0s" 2025-12-08T17:47:02.876773783+00:00 stderr F I1208 17:47:02.876764 1 flags.go:64] FLAG: --node-cidr-mask-size="0" 2025-12-08T17:47:02.876773783+00:00 stderr F I1208 17:47:02.876767 1 flags.go:64] FLAG: --node-cidr-mask-size-ipv4="0" 2025-12-08T17:47:02.876782604+00:00 stderr F I1208 17:47:02.876771 1 flags.go:64] FLAG: --node-cidr-mask-size-ipv6="0" 2025-12-08T17:47:02.876782604+00:00 stderr F I1208 17:47:02.876775 1 flags.go:64] FLAG: --node-eviction-rate="0.1" 2025-12-08T17:47:02.876782604+00:00 stderr F I1208 17:47:02.876779 1 flags.go:64] FLAG: --node-monitor-grace-period="50s" 2025-12-08T17:47:02.876791804+00:00 stderr F I1208 17:47:02.876782 1 flags.go:64] FLAG: --node-monitor-period="5s" 2025-12-08T17:47:02.876791804+00:00 stderr F I1208 17:47:02.876786 1 flags.go:64] FLAG: --node-startup-grace-period="1m0s" 2025-12-08T17:47:02.876800644+00:00 stderr F I1208 17:47:02.876789 1 flags.go:64] FLAG: --node-sync-period="0s" 2025-12-08T17:47:02.876800644+00:00 stderr F I1208 17:47:02.876793 1 flags.go:64] FLAG: --openshift-config="/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml" 2025-12-08T17:47:02.876800644+00:00 stderr F I1208 17:47:02.876797 1 flags.go:64] FLAG: --permit-address-sharing="false" 2025-12-08T17:47:02.876809945+00:00 stderr F I1208 17:47:02.876801 1 flags.go:64] FLAG: --permit-port-sharing="false" 2025-12-08T17:47:02.876809945+00:00 stderr F I1208 17:47:02.876804 1 flags.go:64] FLAG: --profiling="true" 2025-12-08T17:47:02.876818745+00:00 stderr F I1208 17:47:02.876808 1 flags.go:64] FLAG: --pv-recycler-increment-timeout-nfs="30" 2025-12-08T17:47:02.876818745+00:00 stderr F I1208 17:47:02.876811 1 flags.go:64] FLAG: --pv-recycler-minimum-timeout-hostpath="60" 2025-12-08T17:47:02.876818745+00:00 stderr F I1208 17:47:02.876814 1 flags.go:64] FLAG: --pv-recycler-minimum-timeout-nfs="300" 2025-12-08T17:47:02.876827965+00:00 stderr F I1208 17:47:02.876818 1 flags.go:64] FLAG: --pv-recycler-pod-template-filepath-hostpath="/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml" 2025-12-08T17:47:02.876827965+00:00 stderr F I1208 17:47:02.876823 1 flags.go:64] FLAG: --pv-recycler-pod-template-filepath-nfs="/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml" 2025-12-08T17:47:02.876836945+00:00 stderr F I1208 17:47:02.876827 1 flags.go:64] FLAG: --pv-recycler-timeout-increment-hostpath="30" 2025-12-08T17:47:02.876836945+00:00 stderr F I1208 17:47:02.876831 1 flags.go:64] FLAG: --pvclaimbinder-sync-period="15s" 2025-12-08T17:47:02.876845676+00:00 stderr F I1208 17:47:02.876834 1 flags.go:64] FLAG: --requestheader-allowed-names="[]" 2025-12-08T17:47:02.876845676+00:00 stderr F I1208 17:47:02.876839 1 flags.go:64] FLAG: --requestheader-client-ca-file="/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:47:02.876854436+00:00 stderr F I1208 17:47:02.876843 1 flags.go:64] FLAG: --requestheader-extra-headers-prefix="[x-remote-extra-]" 2025-12-08T17:47:02.876854436+00:00 stderr F I1208 17:47:02.876848 1 flags.go:64] FLAG: --requestheader-group-headers="[x-remote-group]" 2025-12-08T17:47:02.876863316+00:00 stderr F I1208 17:47:02.876853 1 flags.go:64] FLAG: --requestheader-uid-headers="[]" 2025-12-08T17:47:02.876863316+00:00 stderr F I1208 17:47:02.876858 1 flags.go:64] FLAG: --requestheader-username-headers="[x-remote-user]" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876862 1 flags.go:64] FLAG: --resource-quota-sync-period="5m0s" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876865 1 flags.go:64] FLAG: --root-ca-file="/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876870 1 flags.go:64] FLAG: --route-reconciliation-period="10s" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876896 1 flags.go:64] FLAG: --secondary-node-eviction-rate="0.01" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876902 1 flags.go:64] FLAG: --secure-port="10257" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876905 1 flags.go:64] FLAG: --service-account-private-key-file="/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876910 1 flags.go:64] FLAG: --service-cluster-ip-range="10.217.4.0/23" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876913 1 flags.go:64] FLAG: --show-hidden-metrics-for-version="" 2025-12-08T17:47:02.876923258+00:00 stderr F I1208 17:47:02.876917 1 flags.go:64] FLAG: --terminated-pod-gc-threshold="12500" 2025-12-08T17:47:02.876940749+00:00 stderr F I1208 17:47:02.876920 1 flags.go:64] FLAG: --tls-cert-file="/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt" 2025-12-08T17:47:02.876940749+00:00 stderr F I1208 17:47:02.876926 1 flags.go:64] FLAG: --tls-cipher-suites="[TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256]" 2025-12-08T17:47:02.876940749+00:00 stderr F I1208 17:47:02.876936 1 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:47:02.876950429+00:00 stderr F I1208 17:47:02.876941 1 flags.go:64] FLAG: --tls-private-key-file="/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:47:02.876961079+00:00 stderr F I1208 17:47:02.876946 1 flags.go:64] FLAG: --tls-sni-cert-key="[]" 2025-12-08T17:47:02.876969780+00:00 stderr F I1208 17:47:02.876958 1 flags.go:64] FLAG: --unhealthy-zone-threshold="0.55" 2025-12-08T17:47:02.876969780+00:00 stderr F I1208 17:47:02.876962 1 flags.go:64] FLAG: --unsupported-kube-api-over-localhost="false" 2025-12-08T17:47:02.876969780+00:00 stderr F I1208 17:47:02.876966 1 flags.go:64] FLAG: --use-service-account-credentials="true" 2025-12-08T17:47:02.876978970+00:00 stderr F I1208 17:47:02.876969 1 flags.go:64] FLAG: --v="2" 2025-12-08T17:47:02.876978970+00:00 stderr F I1208 17:47:02.876974 1 flags.go:64] FLAG: --version="false" 2025-12-08T17:47:02.876987500+00:00 stderr F I1208 17:47:02.876979 1 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877135 1 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877144 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877148 1 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877151 1 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877155 1 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877158 1 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877161 1 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:47:02.877166996+00:00 stderr F W1208 17:47:02.877164 1 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877167 1 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877171 1 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877173 1 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877177 1 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877180 1 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877183 1 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:47:02.877188946+00:00 stderr F W1208 17:47:02.877186 1 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:47:02.877199957+00:00 stderr F W1208 17:47:02.877188 1 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:47:02.877199957+00:00 stderr F W1208 17:47:02.877192 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:47:02.877199957+00:00 stderr F W1208 17:47:02.877194 1 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:47:02.877199957+00:00 stderr F W1208 17:47:02.877197 1 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:47:02.877209717+00:00 stderr F W1208 17:47:02.877200 1 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:47:02.877209717+00:00 stderr F W1208 17:47:02.877203 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:47:02.877209717+00:00 stderr F W1208 17:47:02.877207 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:47:02.877218907+00:00 stderr F W1208 17:47:02.877210 1 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:47:02.877218907+00:00 stderr F W1208 17:47:02.877214 1 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:47:02.877227648+00:00 stderr F W1208 17:47:02.877217 1 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:47:02.877227648+00:00 stderr F W1208 17:47:02.877220 1 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:47:02.877227648+00:00 stderr F W1208 17:47:02.877223 1 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:47:02.877237018+00:00 stderr F W1208 17:47:02.877226 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:47:02.877237018+00:00 stderr F W1208 17:47:02.877229 1 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:47:02.877237018+00:00 stderr F W1208 17:47:02.877233 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:47:02.877246198+00:00 stderr F W1208 17:47:02.877236 1 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:47:02.877246198+00:00 stderr F W1208 17:47:02.877239 1 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:47:02.877246198+00:00 stderr F W1208 17:47:02.877242 1 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:47:02.877255408+00:00 stderr F W1208 17:47:02.877245 1 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:47:02.877255408+00:00 stderr F W1208 17:47:02.877248 1 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:47:02.877255408+00:00 stderr F W1208 17:47:02.877251 1 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:47:02.877269389+00:00 stderr F W1208 17:47:02.877256 1 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:47:02.877269389+00:00 stderr F W1208 17:47:02.877260 1 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:47:02.877269389+00:00 stderr F W1208 17:47:02.877263 1 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:47:02.877269389+00:00 stderr F W1208 17:47:02.877266 1 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:47:02.877278919+00:00 stderr F W1208 17:47:02.877269 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:47:02.877278919+00:00 stderr F W1208 17:47:02.877272 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:47:02.877278919+00:00 stderr F W1208 17:47:02.877276 1 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:47:02.877288059+00:00 stderr F W1208 17:47:02.877279 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:47:02.877288059+00:00 stderr F W1208 17:47:02.877283 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:47:02.877288059+00:00 stderr F W1208 17:47:02.877285 1 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:47:02.877297380+00:00 stderr F W1208 17:47:02.877288 1 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:47:02.877297380+00:00 stderr F W1208 17:47:02.877292 1 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:47:02.877297380+00:00 stderr F W1208 17:47:02.877295 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:47:02.877306620+00:00 stderr F W1208 17:47:02.877298 1 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:47:02.877306620+00:00 stderr F W1208 17:47:02.877302 1 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:47:02.877315430+00:00 stderr F W1208 17:47:02.877305 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:47:02.877315430+00:00 stderr F W1208 17:47:02.877308 1 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:47:02.877315430+00:00 stderr F W1208 17:47:02.877311 1 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:47:02.877324571+00:00 stderr F W1208 17:47:02.877314 1 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:47:02.877324571+00:00 stderr F W1208 17:47:02.877317 1 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:47:02.877324571+00:00 stderr F W1208 17:47:02.877321 1 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:47:02.877333721+00:00 stderr F W1208 17:47:02.877325 1 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:47:02.877333721+00:00 stderr F W1208 17:47:02.877328 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:47:02.877333721+00:00 stderr F W1208 17:47:02.877331 1 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:47:02.877342881+00:00 stderr F W1208 17:47:02.877334 1 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:47:02.877342881+00:00 stderr F W1208 17:47:02.877337 1 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:47:02.877342881+00:00 stderr F W1208 17:47:02.877340 1 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:47:02.877357282+00:00 stderr F W1208 17:47:02.877345 1 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:47:02.877357282+00:00 stderr F W1208 17:47:02.877348 1 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:47:02.877357282+00:00 stderr F W1208 17:47:02.877352 1 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:47:02.877366742+00:00 stderr F W1208 17:47:02.877357 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:47:02.877366742+00:00 stderr F W1208 17:47:02.877360 1 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:47:02.877366742+00:00 stderr F W1208 17:47:02.877363 1 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:47:02.877375882+00:00 stderr F W1208 17:47:02.877366 1 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:47:02.877375882+00:00 stderr F W1208 17:47:02.877369 1 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:47:02.877375882+00:00 stderr F W1208 17:47:02.877372 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:47:02.877384943+00:00 stderr F W1208 17:47:02.877375 1 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:47:02.877384943+00:00 stderr F W1208 17:47:02.877378 1 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:47:02.877384943+00:00 stderr F W1208 17:47:02.877381 1 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:47:02.877393973+00:00 stderr F W1208 17:47:02.877384 1 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:47:02.877393973+00:00 stderr F W1208 17:47:02.877387 1 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:47:02.877393973+00:00 stderr F W1208 17:47:02.877390 1 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:47:02.877402993+00:00 stderr F W1208 17:47:02.877393 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:47:02.877402993+00:00 stderr F W1208 17:47:02.877396 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:47:02.877402993+00:00 stderr F W1208 17:47:02.877399 1 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:47:02.877412003+00:00 stderr F W1208 17:47:02.877402 1 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:47:02.877412003+00:00 stderr F W1208 17:47:02.877406 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:47:02.877412003+00:00 stderr F W1208 17:47:02.877409 1 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:47:02.877420964+00:00 stderr F W1208 17:47:02.877412 1 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:47:02.877420964+00:00 stderr F W1208 17:47:02.877415 1 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:47:02.879623473+00:00 stderr F I1208 17:47:02.879533 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:47:03.705103209+00:00 stderr F I1208 17:47:03.705035 1 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:47:03.705207302+00:00 stderr F I1208 17:47:03.705114 1 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:47:03.711260002+00:00 stderr F I1208 17:47:03.711180 1 controllermanager.go:203] "Starting" version="v1.33.5" 2025-12-08T17:47:03.711260002+00:00 stderr F I1208 17:47:03.711216 1 controllermanager.go:205] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" 2025-12-08T17:47:03.713043229+00:00 stderr F I1208 17:47:03.712970 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:47:03.713069640+00:00 stderr F I1208 17:47:03.712976 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:47:03.713369209+00:00 stderr F I1208 17:47:03.713315 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:47:03.713494653+00:00 stderr F I1208 17:47:03.713375 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:47:03.713355039 +0000 UTC))" 2025-12-08T17:47:03.713511143+00:00 stderr F I1208 17:47:03.713485 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:47:03.713470852 +0000 UTC))" 2025-12-08T17:47:03.713571675+00:00 stderr F I1208 17:47:03.713515 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:47:03.713504893 +0000 UTC))" 2025-12-08T17:47:03.713586916+00:00 stderr F I1208 17:47:03.713564 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:47:03.713555325 +0000 UTC))" 2025-12-08T17:47:03.713602066+00:00 stderr F I1208 17:47:03.713585 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:47:03.713576285 +0000 UTC))" 2025-12-08T17:47:03.713644508+00:00 stderr F I1208 17:47:03.713608 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:47:03.713601266 +0000 UTC))" 2025-12-08T17:47:03.713675259+00:00 stderr F I1208 17:47:03.713634 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:47:03.713626127 +0000 UTC))" 2025-12-08T17:47:03.713675259+00:00 stderr F I1208 17:47:03.713657 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:47:03.713649488 +0000 UTC))" 2025-12-08T17:47:03.713695489+00:00 stderr F I1208 17:47:03.713680 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:47:03.713671098 +0000 UTC))" 2025-12-08T17:47:03.713757911+00:00 stderr F I1208 17:47:03.713710 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:47:03.713699859 +0000 UTC))" 2025-12-08T17:47:03.714103652+00:00 stderr F I1208 17:47:03.714041 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:47:03.714022549 +0000 UTC))" 2025-12-08T17:47:03.714353380+00:00 stderr F I1208 17:47:03.714298 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216023\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216023\" (2025-12-08 16:47:02 +0000 UTC to 2028-12-08 16:47:02 +0000 UTC (now=2025-12-08 17:47:03.714281727 +0000 UTC))" 2025-12-08T17:47:03.714369410+00:00 stderr F I1208 17:47:03.714348 1 secure_serving.go:211] Serving securely on [::]:10257 2025-12-08T17:47:03.714464143+00:00 stderr F I1208 17:47:03.714414 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:47:03.715007440+00:00 stderr F I1208 17:47:03.714957 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... 2025-12-08T17:47:21.585485809+00:00 stderr F I1208 17:47:21.585396 1 leaderelection.go:271] successfully acquired lease kube-system/kube-controller-manager 2025-12-08T17:47:21.586209091+00:00 stderr F I1208 17:47:21.586141 1 event.go:389] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="crc_25c703df-b3e6-4595-8ab3-3112c78d3f5b became leader" 2025-12-08T17:47:21.589608458+00:00 stderr F I1208 17:47:21.589536 1 controllermanager.go:796] "Starting controller" controller="serviceaccount-token-controller" 2025-12-08T17:47:21.591694344+00:00 stderr F I1208 17:47:21.591621 1 controllermanager.go:827] "Started controller" controller="serviceaccount-token-controller" 2025-12-08T17:47:21.591694344+00:00 stderr F I1208 17:47:21.591643 1 shared_informer.go:350] "Waiting for caches to sync" controller="tokens" 2025-12-08T17:47:21.591694344+00:00 stderr F I1208 17:47:21.591659 1 controllermanager.go:796] "Starting controller" controller="certificatesigningrequest-signing-controller" 2025-12-08T17:47:21.595962428+00:00 stderr F I1208 17:47:21.595845 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.596490064+00:00 stderr F I1208 17:47:21.596402 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" 2025-12-08T17:47:21.596490064+00:00 stderr F I1208 17:47:21.596431 1 shared_informer.go:350] "Waiting for caches to sync" controller="certificate-csrsigning-kubelet-serving" 2025-12-08T17:47:21.596521555+00:00 stderr F I1208 17:47:21.596465 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.597143085+00:00 stderr F I1208 17:47:21.596996 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.597574229+00:00 stderr F I1208 17:47:21.597498 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" 2025-12-08T17:47:21.597574229+00:00 stderr F I1208 17:47:21.597517 1 shared_informer.go:350] "Waiting for caches to sync" controller="certificate-csrsigning-kubelet-client" 2025-12-08T17:47:21.597574229+00:00 stderr F I1208 17:47:21.597537 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.598230609+00:00 stderr F I1208 17:47:21.597928 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.598951822+00:00 stderr F I1208 17:47:21.598413 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" 2025-12-08T17:47:21.598951822+00:00 stderr F I1208 17:47:21.598423 1 shared_informer.go:350] "Waiting for caches to sync" controller="certificate-csrsigning-kube-apiserver-client" 2025-12-08T17:47:21.598951822+00:00 stderr F I1208 17:47:21.598435 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.598951822+00:00 stderr F I1208 17:47:21.598840 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.599335994+00:00 stderr F I1208 17:47:21.599247 1 controllermanager.go:827] "Started controller" controller="certificatesigningrequest-signing-controller" 2025-12-08T17:47:21.599335994+00:00 stderr F I1208 17:47:21.599285 1 controllermanager.go:796] "Starting controller" controller="certificatesigningrequest-cleaner-controller" 2025-12-08T17:47:21.599541321+00:00 stderr F I1208 17:47:21.599368 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" 2025-12-08T17:47:21.599541321+00:00 stderr F I1208 17:47:21.599383 1 shared_informer.go:350] "Waiting for caches to sync" controller="certificate-csrsigning-legacy-unknown" 2025-12-08T17:47:21.599541321+00:00 stderr F I1208 17:47:21.599396 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt::/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:47:21.603493965+00:00 stderr F I1208 17:47:21.603075 1 controllermanager.go:827] "Started controller" controller="certificatesigningrequest-cleaner-controller" 2025-12-08T17:47:21.603493965+00:00 stderr F I1208 17:47:21.603110 1 controllermanager.go:790] "Warning: controller is disabled" controller="token-cleaner-controller" 2025-12-08T17:47:21.603493965+00:00 stderr F I1208 17:47:21.603120 1 controllermanager.go:785] "Skipping a cloud provider controller" controller="service-lb-controller" 2025-12-08T17:47:21.603493965+00:00 stderr F I1208 17:47:21.603132 1 controllermanager.go:796] "Starting controller" controller="endpointslice-mirroring-controller" 2025-12-08T17:47:21.603493965+00:00 stderr F I1208 17:47:21.603382 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller" 2025-12-08T17:47:21.607357836+00:00 stderr F I1208 17:47:21.607059 1 controllermanager.go:827] "Started controller" controller="endpointslice-mirroring-controller" 2025-12-08T17:47:21.607357836+00:00 stderr F I1208 17:47:21.607086 1 controllermanager.go:796] "Starting controller" controller="node-ipam-controller" 2025-12-08T17:47:21.607357836+00:00 stderr F I1208 17:47:21.607093 1 controllermanager.go:805] "Warning: skipping controller" controller="node-ipam-controller" 2025-12-08T17:47:21.607357836+00:00 stderr F I1208 17:47:21.607100 1 controllermanager.go:796] "Starting controller" controller="node-lifecycle-controller" 2025-12-08T17:47:21.607357836+00:00 stderr F I1208 17:47:21.607209 1 endpointslicemirroring_controller.go:227] "Starting EndpointSliceMirroring controller" logger="endpointslice-mirroring-controller" 2025-12-08T17:47:21.607357836+00:00 stderr F I1208 17:47:21.607228 1 shared_informer.go:350] "Waiting for caches to sync" controller="endpoint_slice_mirroring" 2025-12-08T17:47:21.607680756+00:00 stderr F I1208 17:47:21.607623 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.612239920+00:00 stderr F I1208 17:47:21.612174 1 node_lifecycle_controller.go:419] "Controller will reconcile labels" logger="node-lifecycle-controller" 2025-12-08T17:47:21.612291682+00:00 stderr F I1208 17:47:21.612254 1 controllermanager.go:827] "Started controller" controller="node-lifecycle-controller" 2025-12-08T17:47:21.612291682+00:00 stderr F I1208 17:47:21.612271 1 controllermanager.go:785] "Skipping a cloud provider controller" controller="node-route-controller" 2025-12-08T17:47:21.612313893+00:00 stderr F I1208 17:47:21.612286 1 controllermanager.go:796] "Starting controller" controller="persistentvolume-protection-controller" 2025-12-08T17:47:21.612461407+00:00 stderr F I1208 17:47:21.612335 1 node_lifecycle_controller.go:453] "Sending events to api server" logger="node-lifecycle-controller" 2025-12-08T17:47:21.612461407+00:00 stderr F I1208 17:47:21.612404 1 node_lifecycle_controller.go:464] "Starting node controller" logger="node-lifecycle-controller" 2025-12-08T17:47:21.612461407+00:00 stderr F I1208 17:47:21.612421 1 shared_informer.go:350] "Waiting for caches to sync" controller="taint" 2025-12-08T17:47:21.616089911+00:00 stderr F I1208 17:47:21.615849 1 controllermanager.go:827] "Started controller" controller="persistentvolume-protection-controller" 2025-12-08T17:47:21.616089911+00:00 stderr F I1208 17:47:21.615944 1 controllermanager.go:796] "Starting controller" controller="ephemeral-volume-controller" 2025-12-08T17:47:21.616313459+00:00 stderr F I1208 17:47:21.616241 1 pv_protection_controller.go:81] "Starting PV protection controller" logger="persistentvolume-protection-controller" 2025-12-08T17:47:21.616313459+00:00 stderr F I1208 17:47:21.616280 1 shared_informer.go:350] "Waiting for caches to sync" controller="PV protection" 2025-12-08T17:47:21.619067686+00:00 stderr F I1208 17:47:21.618992 1 controllermanager.go:827] "Started controller" controller="ephemeral-volume-controller" 2025-12-08T17:47:21.619067686+00:00 stderr F I1208 17:47:21.619024 1 controllermanager.go:779] "Controller is disabled by a feature gate" controller="device-taint-eviction-controller" requiredFeatureGates=["DynamicResourceAllocation","DRADeviceTaints"] 2025-12-08T17:47:21.619106217+00:00 stderr F I1208 17:47:21.619068 1 controllermanager.go:796] "Starting controller" controller="validatingadmissionpolicy-status-controller" 2025-12-08T17:47:21.619168039+00:00 stderr F I1208 17:47:21.619129 1 controller.go:173] "Starting ephemeral volume controller" logger="ephemeral-volume-controller" 2025-12-08T17:47:21.619168039+00:00 stderr F I1208 17:47:21.619148 1 shared_informer.go:350] "Waiting for caches to sync" controller="ephemeral" 2025-12-08T17:47:21.641691608+00:00 stderr F I1208 17:47:21.641587 1 controllermanager.go:827] "Started controller" controller="validatingadmissionpolicy-status-controller" 2025-12-08T17:47:21.641691608+00:00 stderr F I1208 17:47:21.641621 1 controllermanager.go:796] "Starting controller" controller="certificatesigningrequest-approving-controller" 2025-12-08T17:47:21.641824742+00:00 stderr F I1208 17:47:21.641763 1 shared_informer.go:350] "Waiting for caches to sync" controller="validatingadmissionpolicy-status" 2025-12-08T17:47:21.644593439+00:00 stderr F I1208 17:47:21.644537 1 controllermanager.go:827] "Started controller" controller="certificatesigningrequest-approving-controller" 2025-12-08T17:47:21.644593439+00:00 stderr F I1208 17:47:21.644567 1 controllermanager.go:796] "Starting controller" controller="persistentvolume-expander-controller" 2025-12-08T17:47:21.644634460+00:00 stderr F I1208 17:47:21.644597 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-approving-controller" name="csrapproving" 2025-12-08T17:47:21.644655711+00:00 stderr F I1208 17:47:21.644644 1 shared_informer.go:350] "Waiting for caches to sync" controller="certificate-csrapproving" 2025-12-08T17:47:21.650193565+00:00 stderr F I1208 17:47:21.650123 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" 2025-12-08T17:47:21.650244827+00:00 stderr F I1208 17:47:21.650219 1 controllermanager.go:827] "Started controller" controller="persistentvolume-expander-controller" 2025-12-08T17:47:21.650244827+00:00 stderr F I1208 17:47:21.650227 1 controllermanager.go:796] "Starting controller" controller="legacy-serviceaccount-token-cleaner-controller" 2025-12-08T17:47:21.650279158+00:00 stderr F I1208 17:47:21.650267 1 expand_controller.go:329] "Starting expand controller" logger="persistentvolume-expander-controller" 2025-12-08T17:47:21.650279158+00:00 stderr F I1208 17:47:21.650274 1 shared_informer.go:350] "Waiting for caches to sync" controller="expand" 2025-12-08T17:47:21.654012445+00:00 stderr F I1208 17:47:21.653957 1 controllermanager.go:827] "Started controller" controller="legacy-serviceaccount-token-cleaner-controller" 2025-12-08T17:47:21.654133689+00:00 stderr F I1208 17:47:21.654102 1 controllermanager.go:796] "Starting controller" controller="endpoints-controller" 2025-12-08T17:47:21.654363316+00:00 stderr F I1208 17:47:21.654111 1 legacy_serviceaccount_token_cleaner.go:103] "Starting legacy service account token cleaner controller" logger="legacy-serviceaccount-token-cleaner-controller" 2025-12-08T17:47:21.654363316+00:00 stderr F I1208 17:47:21.654350 1 shared_informer.go:350] "Waiting for caches to sync" controller="legacy-service-account-token-cleaner" 2025-12-08T17:47:21.655733680+00:00 stderr F I1208 17:47:21.655691 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.658391744+00:00 stderr F I1208 17:47:21.658341 1 controllermanager.go:827] "Started controller" controller="endpoints-controller" 2025-12-08T17:47:21.658391744+00:00 stderr F I1208 17:47:21.658373 1 controllermanager.go:796] "Starting controller" controller="namespace-controller" 2025-12-08T17:47:21.658626981+00:00 stderr F I1208 17:47:21.658565 1 endpoints_controller.go:187] "Starting endpoint controller" logger="endpoints-controller" 2025-12-08T17:47:21.658626981+00:00 stderr F I1208 17:47:21.658579 1 shared_informer.go:350] "Waiting for caches to sync" controller="endpoint" 2025-12-08T17:47:21.692225839+00:00 stderr F I1208 17:47:21.692099 1 shared_informer.go:357] "Caches are synced" controller="tokens" 2025-12-08T17:47:21.693829619+00:00 stderr F I1208 17:47:21.693718 1 controllermanager.go:827] "Started controller" controller="namespace-controller" 2025-12-08T17:47:21.693829619+00:00 stderr F I1208 17:47:21.693758 1 controllermanager.go:796] "Starting controller" controller="daemonset-controller" 2025-12-08T17:47:21.694046485+00:00 stderr F I1208 17:47:21.693972 1 namespace_controller.go:202] "Starting namespace controller" logger="namespace-controller" 2025-12-08T17:47:21.694046485+00:00 stderr F I1208 17:47:21.693994 1 shared_informer.go:350] "Waiting for caches to sync" controller="namespace" 2025-12-08T17:47:21.696039858+00:00 stderr F I1208 17:47:21.695946 1 controllermanager.go:827] "Started controller" controller="daemonset-controller" 2025-12-08T17:47:21.696039858+00:00 stderr F I1208 17:47:21.695976 1 controllermanager.go:796] "Starting controller" controller="job-controller" 2025-12-08T17:47:21.696232344+00:00 stderr F I1208 17:47:21.696175 1 daemon_controller.go:316] "Starting daemon sets controller" logger="daemonset-controller" 2025-12-08T17:47:21.696232344+00:00 stderr F I1208 17:47:21.696194 1 shared_informer.go:350] "Waiting for caches to sync" controller="daemon sets" 2025-12-08T17:47:21.699816987+00:00 stderr F I1208 17:47:21.699734 1 controllermanager.go:827] "Started controller" controller="job-controller" 2025-12-08T17:47:21.699816987+00:00 stderr F I1208 17:47:21.699753 1 controllermanager.go:796] "Starting controller" controller="horizontal-pod-autoscaler-controller" 2025-12-08T17:47:21.699913760+00:00 stderr F I1208 17:47:21.699848 1 job_controller.go:243] "Starting job controller" logger="job-controller" 2025-12-08T17:47:21.699942471+00:00 stderr F I1208 17:47:21.699919 1 shared_informer.go:350] "Waiting for caches to sync" controller="job" 2025-12-08T17:47:21.713552499+00:00 stderr F I1208 17:47:21.713482 1 controllermanager.go:827] "Started controller" controller="horizontal-pod-autoscaler-controller" 2025-12-08T17:47:21.713552499+00:00 stderr F I1208 17:47:21.713501 1 controllermanager.go:796] "Starting controller" controller="persistentvolume-binder-controller" 2025-12-08T17:47:21.713622471+00:00 stderr F I1208 17:47:21.713562 1 horizontal.go:204] "Starting HPA controller" logger="horizontal-pod-autoscaler-controller" 2025-12-08T17:47:21.713622471+00:00 stderr F I1208 17:47:21.713576 1 shared_informer.go:350] "Waiting for caches to sync" controller="HPA" 2025-12-08T17:47:21.716551534+00:00 stderr F I1208 17:47:21.716451 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/host-path" 2025-12-08T17:47:21.716551534+00:00 stderr F I1208 17:47:21.716470 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/nfs" 2025-12-08T17:47:21.716551534+00:00 stderr F I1208 17:47:21.716483 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" 2025-12-08T17:47:21.716551534+00:00 stderr F I1208 17:47:21.716520 1 controllermanager.go:827] "Started controller" controller="persistentvolume-binder-controller" 2025-12-08T17:47:21.716551534+00:00 stderr F I1208 17:47:21.716530 1 controllermanager.go:796] "Starting controller" controller="garbage-collector-controller" 2025-12-08T17:47:21.716859853+00:00 stderr F I1208 17:47:21.716800 1 pv_controller_base.go:308] "Starting persistent volume controller" logger="persistentvolume-binder-controller" 2025-12-08T17:47:21.716990107+00:00 stderr F I1208 17:47:21.716956 1 shared_informer.go:350] "Waiting for caches to sync" controller="persistent volume" 2025-12-08T17:47:21.723469492+00:00 stderr F I1208 17:47:21.723389 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector" 2025-12-08T17:47:21.723469492+00:00 stderr F I1208 17:47:21.723417 1 shared_informer.go:350] "Waiting for caches to sync" controller="garbage collector" 2025-12-08T17:47:21.723469492+00:00 stderr F I1208 17:47:21.723446 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder" 2025-12-08T17:47:21.723764071+00:00 stderr F I1208 17:47:21.723722 1 controllermanager.go:827] "Started controller" controller="garbage-collector-controller" 2025-12-08T17:47:21.723852574+00:00 stderr F I1208 17:47:21.723824 1 controllermanager.go:790] "Warning: controller is disabled" controller="ttl-controller" 2025-12-08T17:47:21.723977168+00:00 stderr F I1208 17:47:21.723947 1 controllermanager.go:796] "Starting controller" controller="selinux-warning-controller" 2025-12-08T17:47:21.731173405+00:00 stderr F W1208 17:47:21.731091 1 probe.go:272] Flexvolume plugin directory at /etc/kubernetes/kubelet-plugins/volume/exec does not exist. Recreating. 2025-12-08T17:47:21.731806614+00:00 stderr F I1208 17:47:21.731743 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/host-path" 2025-12-08T17:47:21.731806614+00:00 stderr F I1208 17:47:21.731772 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/nfs" 2025-12-08T17:47:21.731806614+00:00 stderr F I1208 17:47:21.731784 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/fc" 2025-12-08T17:47:21.731837225+00:00 stderr F I1208 17:47:21.731805 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" 2025-12-08T17:47:21.731927448+00:00 stderr F I1208 17:47:21.731852 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/csi" 2025-12-08T17:47:21.731927448+00:00 stderr F I1208 17:47:21.731913 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" 2025-12-08T17:47:21.732106194+00:00 stderr F I1208 17:47:21.732065 1 controllermanager.go:827] "Started controller" controller="selinux-warning-controller" 2025-12-08T17:47:21.732106194+00:00 stderr F I1208 17:47:21.732090 1 controllermanager.go:790] "Warning: controller is disabled" controller="bootstrap-signer-controller" 2025-12-08T17:47:21.732129194+00:00 stderr F I1208 17:47:21.732101 1 controllermanager.go:785] "Skipping a cloud provider controller" controller="cloud-node-lifecycle-controller" 2025-12-08T17:47:21.732129194+00:00 stderr F I1208 17:47:21.732115 1 controllermanager.go:796] "Starting controller" controller="persistentvolume-attach-detach-controller" 2025-12-08T17:47:21.732149025+00:00 stderr F I1208 17:47:21.732117 1 selinux_warning_controller.go:348] "Starting SELinux warning controller" logger="selinux-warning-controller" 2025-12-08T17:47:21.732206167+00:00 stderr F I1208 17:47:21.732190 1 shared_informer.go:350] "Waiting for caches to sync" controller="selinux_warning" 2025-12-08T17:47:21.735919393+00:00 stderr F I1208 17:47:21.735760 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/fc" 2025-12-08T17:47:21.735919393+00:00 stderr F I1208 17:47:21.735792 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" 2025-12-08T17:47:21.735919393+00:00 stderr F I1208 17:47:21.735810 1 plugins.go:616] "Loaded volume plugin" pluginName="kubernetes.io/csi" 2025-12-08T17:47:21.735919393+00:00 stderr F I1208 17:47:21.735907 1 controllermanager.go:827] "Started controller" controller="persistentvolume-attach-detach-controller" 2025-12-08T17:47:21.735982885+00:00 stderr F I1208 17:47:21.735919 1 controllermanager.go:796] "Starting controller" controller="persistentvolumeclaim-protection-controller" 2025-12-08T17:47:21.736139500+00:00 stderr F I1208 17:47:21.736076 1 attach_detach_controller.go:338] "Starting attach detach controller" logger="persistentvolume-attach-detach-controller" 2025-12-08T17:47:21.736139500+00:00 stderr F I1208 17:47:21.736093 1 shared_informer.go:350] "Waiting for caches to sync" controller="attach detach" 2025-12-08T17:47:21.738962059+00:00 stderr F I1208 17:47:21.738792 1 controllermanager.go:827] "Started controller" controller="persistentvolumeclaim-protection-controller" 2025-12-08T17:47:21.738962059+00:00 stderr F I1208 17:47:21.738823 1 controllermanager.go:779] "Controller is disabled by a feature gate" controller="volumeattributesclass-protection-controller" requiredFeatureGates=["VolumeAttributesClass"] 2025-12-08T17:47:21.738962059+00:00 stderr F I1208 17:47:21.738841 1 controllermanager.go:796] "Starting controller" controller="ttl-after-finished-controller" 2025-12-08T17:47:21.739200516+00:00 stderr F I1208 17:47:21.739130 1 pvc_protection_controller.go:168] "Starting PVC protection controller" logger="persistentvolumeclaim-protection-controller" 2025-12-08T17:47:21.739200516+00:00 stderr F I1208 17:47:21.739155 1 shared_informer.go:350] "Waiting for caches to sync" controller="PVC protection" 2025-12-08T17:47:21.741257612+00:00 stderr F I1208 17:47:21.741194 1 controllermanager.go:827] "Started controller" controller="ttl-after-finished-controller" 2025-12-08T17:47:21.741257612+00:00 stderr F I1208 17:47:21.741224 1 controllermanager.go:796] "Starting controller" controller="service-ca-certificate-publisher-controller" 2025-12-08T17:47:21.741429997+00:00 stderr F I1208 17:47:21.741321 1 ttlafterfinished_controller.go:112] "Starting TTL after finished controller" logger="ttl-after-finished-controller" 2025-12-08T17:47:21.741429997+00:00 stderr F I1208 17:47:21.741380 1 shared_informer.go:350] "Waiting for caches to sync" controller="TTL after finished" 2025-12-08T17:47:21.744276007+00:00 stderr F I1208 17:47:21.744213 1 controllermanager.go:827] "Started controller" controller="service-ca-certificate-publisher-controller" 2025-12-08T17:47:21.744276007+00:00 stderr F I1208 17:47:21.744250 1 controllermanager.go:796] "Starting controller" controller="endpointslice-controller" 2025-12-08T17:47:21.744397140+00:00 stderr F I1208 17:47:21.744354 1 publisher.go:80] Starting service CA certificate configmap publisher 2025-12-08T17:47:21.744397140+00:00 stderr F I1208 17:47:21.744377 1 shared_informer.go:350] "Waiting for caches to sync" controller="crt configmap" 2025-12-08T17:47:21.747000142+00:00 stderr F I1208 17:47:21.746938 1 controllermanager.go:827] "Started controller" controller="endpointslice-controller" 2025-12-08T17:47:21.747000142+00:00 stderr F I1208 17:47:21.746978 1 controllermanager.go:796] "Starting controller" controller="disruption-controller" 2025-12-08T17:47:21.747264891+00:00 stderr F I1208 17:47:21.747212 1 endpointslice_controller.go:281] "Starting endpoint slice controller" logger="endpointslice-controller" 2025-12-08T17:47:21.747264891+00:00 stderr F I1208 17:47:21.747228 1 shared_informer.go:350] "Waiting for caches to sync" controller="endpoint_slice" 2025-12-08T17:47:21.750869025+00:00 stderr P I1208 17:47:21.750485 1 garbagecollector.go:203] "syncing garbage collector with updated resources from discovery" logger="garbage-collector-controller" diff="added: [/v1, Resource=configmaps /v1, Resource=endpoints /v1, Resource=events /v1, Resource=limitranges /v1, Resource=namespaces /v1, Resource=nodes /v1, Resource=persistentvolumeclaims /v1, Resource=persistentvolumes /v1, Resource=pods /v1, Resource=podtemplates /v1, Resource=replicationcontrollers /v1, Resource=resourcequotas /v1, Resource=secrets /v1, Resource=serviceaccounts /v1, Resource=services admissionregistration.k8s.io/v1, Resource=mutatingwebhookconfigurations admissionregistration.k8s.io/v1, Resource=validatingadmissionpolicies admissionregistration.k8s.io/v1, Resource=validatingadmissionpolicybindings admissionregistration.k8s.io/v1, Resource=validatingwebhookconfigurations apiextensions.k8s.io/v1, Resource=customresourcedefinitions apiregistration.k8s.io/v1, Resource=apiservices apiserver.openshift.io/v1, Resource=apirequestcounts apps.openshift.io/v1, Resource=deploymentconfigs apps/v1, Resource=controllerrevisions apps/v1, Resource=daemonsets apps/v1, Resource=deployments apps/v1, Resource=replicasets apps/v1, Resource=statefulsets authorization.openshift.io/v1, Resource=rolebindingrestrictions autoscaling.openshift.io/v1, Resource=clusterautoscalers autoscaling.openshift.io/v1beta1, Resource=machineautoscalers autoscaling/v2, Resource=horizontalpodautoscalers batch/v1, Resource=cronjobs batch/v1, Resource=jobs build.openshift.io/v1, Resource=buildconfigs build.openshift.io/v1, Resource=builds certificates.k8s.io/v1, Resource=certificatesigningrequests config.openshift.io/v1, Resource=apiservers config.openshift.io/v1, Resource=authentications config.openshift.io/v1, Resource=builds config.openshift.io/v1, Resource=clusterimagepolicies config.openshift.io/v1, Resource=clusteroperators config.openshift.io/v1, Resource=clusterversions config.openshift.io/v1, Resource=consoles config.openshift.io/v1, Resource=dnses config.openshift.io/v1, Resource=featuregates config.openshift.io/v1, Resource=imagecontentpolicies config.openshift.io/v1, Resource=imagedigestmirrorsets config.openshift.io/v1, Resource=imagepolicies config.openshift.io/v1, Resource=images config.openshift.io/v1, Resource=imagetagmirrorsets config.openshift.io/v1, Resource=infrastructures config.openshift.io/v1, Resource=ingresses config.openshift.io/v1, Resource=networks config.openshift.io/v1, Resource=nodes config.openshift.io/v1, Resource=oauths config.openshift.io/v1, Resource=operatorhubs config.openshift.io/v1, Resource=projects config.openshift.io/v1, Resource=proxies config.openshift.io/v1, Resource=schedulers console.openshift.io/v1, Resource=consoleclidownloads console.openshift.io/v1, Resource=consoleexternalloglinks console.openshift.io/v1, Resource=consolelinks console.openshift.io/v1, Resource=consolenotifications console.openshift.io/v1, Resource=consoleplugins console.openshift.io/v1, Resource=consolequickstarts console.openshift.io/v1, Resource=consolesamples console.openshift.io/v1, Resource=consoleyamlsamples controlplane.operator.openshift.io/v1alpha1, Resource=podnetworkconnectivitychecks coordination.k8s.io/v1, Resource=leases discovery.k8s.io/v1, Resource=endpointslices events.k8s.io/v1, Resource=events flowcontrol.apiserver.k8s.io/v1, Resource=flowschemas flowcontrol.apiserver.k8s.io/v1, Resource=prioritylevelconfigurations gateway.networking.k8s.io/v1, Resource=gatewayclasses gateway.networking.k8s.io/v1, Resource=gateways gateway.networking.k8s.io/v1, Resource=grpcroutes gateway.networking.k8s.io/v1, Resource=httproutes gateway.networking.k8s.io/v1beta1, Resource=referencegrants helm.openshift.io/v1beta1, Resource=helmchartrepositories helm.openshift.io/v1beta1, Resource=projecthelmchartrepositories image.openshift.io/v1, Resource=images image.openshift.io/v1, Resource=imagestreams imageregistry.operator.openshift.io/v1, Resource=configs imageregistry.operator.openshift.io/v1, Resource=imagepruners infrastructure.cluster.x-k8s.io/v1beta1, Resource=metal3remediations infrastructure.cluster.x-k8s.io/v1beta1, Resource=metal3remediationtemplates ingress.operator.openshift.io/v1, Resource=dnsrecords ipam.cluster.x-k8s.io/v1beta1, Resource=ipaddressclaims ipam.cluster.x-k8s.io/v1beta1, Resource=ipaddresses k8s.cni.cncf.io/v1, Resource=network-attachment-definitions k8s.cni.cncf.io/v1alpha1, Resource=ipamclaims k8s.ovn.org/v1, Resource=adminpolicybasedexternalroutes k8s.ovn.org/v1, Resource=clusteruserdefinednetworks k8s.ovn.org/v1, Resource=egressfirewalls k8s.ovn.org/v1, Resource=egressips k8s.ovn.org/v1, Resource=egressqoses k8s.ovn.org/v1, Resource=egressservices k8s.ovn.org/v1, Resource=userdefinednetworks machine.openshift.io/v1, Resource=controlplanemachinesets machine.openshift.io/v1beta1, Resource=machinehealthchecks machine.openshift.io/v1beta1, Resource=machines machine.openshift.io/v1beta1, Resource=machinesets machineconfiguration.openshift.io/v1, Resource=containerruntimeconfigs machineconfiguration.openshift.io/v1, Resource=controllerconfigs machineconfiguration.openshift.io/v1, Resource=kubeletconfigs machineconfiguration.openshift.io/v1, Resource=machineconfignodes machineconfiguration.openshift.io/v1, Resource=machineconfigpools machineconfiguration.openshift.io/v1, Resource=machineconfigs machineconfiguration.openshift.io/v1, Resource=machineosbuilds machineconfiguration.openshift.io/v1, Resource=machineosconfigs machineconfiguration.openshift.io/v1, Resource=pinnedimagesets migration.k8s.io/v1alpha1, Resource=storagestates migration.k8s.io/v1alpha1, Resource=storageversionmigrations monitoring.coreos.com/v1, Resource=alertmanagers monitoring.coreos.com/v1, Resource=podmonitors monitoring.coreos.com/v1, Resource=probes monitoring.coreos.com/v1, Resource=prometheuses monitoring.coreos.com/v1, Resource=prometheusrules monitoring.coreos.com/v1, Resource=servicemonitors monitoring.coreos.com/v1, Resource=thanosrulers monitoring.coreos.com/v1beta1, Resource=alertmanagerconfigs monitoring.openshift.io/v1, Resource=alertingrules monitoring.openshift.io/v1, Resource=alertrelabelconfigs network.operator.openshift.io/v1, Resource=egressrouters network.operator.openshift.io/v1, Resource=operatorpkis networking.k8s.io/v1, Resource=ingressclasses networking.k8s.io/v1, Resource=ingresses networking.k8s.io/v1, Resource=ipaddresses networking.k8s.io/v1, Resource=networkpolicies networking.k8s.io/v1, Resource=servicecidrs node.k8s.io/v1, Resource=runtimeclasses oauth.openshift.io/v1, Resource=oauthaccesstokens oauth.openshift.io/v1, Resource=oauthauthorizetokens oauth.openshift.io/v1, Resource=oauthclientauthorizations oauth.openshift.io/v1, Resource=oauthclients oauth.openshift.io/v1, Resource=useroauthaccesstokens operator.openshift.io/v1, Resource=authentications operator.openshift.io/v1, Resource=clustercsidrivers operator.openshift.io/v1, Resource=configs operator.openshift.io/v1, Resource=consoles operator.openshift.io/v1, Resource=csisnapshotcontrollers operator.openshift.io/v1, Resource=dnses operator.openshift.io/v1, Resource=etcds operator.openshift.io/v1, Resource=ingresscontrollers operator.openshift.io/v1, Resource=kubeapiservers operator.openshift.io/v1, Resource=kubecontrollermanagers operator.openshift.io/v1, Resource=kubeschedulers operator.openshift.io/v1, Resource=kubestorageversionmigrators operator.openshift.io/v1, Resource=machineconfigurations operator.openshift.io/v1, Resource=networks operator.openshift.io/v1, Resource=openshiftapiservers operator.openshift.io/v1, Resource=openshiftcontrollermanagers operator.openshift.io/v1, Resource=servicecas operator.openshift.io/v1, Resource=storages operator.openshift.io/v1alpha1, Resource=imagecontentsourcepolicies operators.coreos.com/v1, Resource=olmconfigs operators.coreos.com/v1, Resource=operatorgroups operators.coreos.com/v1, Resource=operators operators.coreos.com/v1alpha1, Resource=catalogsources operators.coreos.com/v1alpha1, Resource=clusterserviceversions operators.coreos.com/v1alpha1, Resource=installplans operators.coreos.com/v1alpha1, Resource=subscriptions operators.coreos.com/v2, Resource=operato 2025-12-08T17:47:21.751012409+00:00 stderr F rconditions policy.networking.k8s.io/v1alpha1, Resource=adminnetworkpolicies policy.networking.k8s.io/v1alpha1, Resource=baselineadminnetworkpolicies policy/v1, Resource=poddisruptionbudgets project.openshift.io/v1, Resource=projects quota.openshift.io/v1, Resource=clusterresourcequotas rbac.authorization.k8s.io/v1, Resource=clusterrolebindings rbac.authorization.k8s.io/v1, Resource=clusterroles rbac.authorization.k8s.io/v1, Resource=rolebindings rbac.authorization.k8s.io/v1, Resource=roles route.openshift.io/v1, Resource=routes samples.operator.openshift.io/v1, Resource=configs scheduling.k8s.io/v1, Resource=priorityclasses security.internal.openshift.io/v1, Resource=rangeallocations security.openshift.io/v1, Resource=rangeallocations security.openshift.io/v1, Resource=securitycontextconstraints storage.k8s.io/v1, Resource=csidrivers storage.k8s.io/v1, Resource=csinodes storage.k8s.io/v1, Resource=csistoragecapacities storage.k8s.io/v1, Resource=storageclasses storage.k8s.io/v1, Resource=volumeattachments template.openshift.io/v1, Resource=brokertemplateinstances template.openshift.io/v1, Resource=templateinstances template.openshift.io/v1, Resource=templates user.openshift.io/v1, Resource=groups user.openshift.io/v1, Resource=identities user.openshift.io/v1, Resource=users whereabouts.cni.cncf.io/v1alpha1, Resource=ippools whereabouts.cni.cncf.io/v1alpha1, Resource=nodeslicepools whereabouts.cni.cncf.io/v1alpha1, Resource=overlappingrangeipreservations], removed: []" 2025-12-08T17:47:21.751714021+00:00 stderr F I1208 17:47:21.751639 1 controllermanager.go:827] "Started controller" controller="disruption-controller" 2025-12-08T17:47:21.751714021+00:00 stderr F I1208 17:47:21.751664 1 disruption.go:455] "Sending events to api server." logger="disruption-controller" 2025-12-08T17:47:21.751714021+00:00 stderr F I1208 17:47:21.751674 1 controllermanager.go:796] "Starting controller" controller="clusterrole-aggregation-controller" 2025-12-08T17:47:21.751714021+00:00 stderr F I1208 17:47:21.751703 1 disruption.go:466] "Starting disruption controller" logger="disruption-controller" 2025-12-08T17:47:21.751754092+00:00 stderr F I1208 17:47:21.751710 1 shared_informer.go:350] "Waiting for caches to sync" controller="disruption" 2025-12-08T17:47:21.753932021+00:00 stderr F I1208 17:47:21.753818 1 controllermanager.go:827] "Started controller" controller="clusterrole-aggregation-controller" 2025-12-08T17:47:21.753932021+00:00 stderr F I1208 17:47:21.753854 1 controllermanager.go:796] "Starting controller" controller="root-ca-certificate-publisher-controller" 2025-12-08T17:47:21.753932021+00:00 stderr F I1208 17:47:21.753905 1 clusterroleaggregation_controller.go:194] "Starting ClusterRoleAggregator controller" logger="clusterrole-aggregation-controller" 2025-12-08T17:47:21.753932021+00:00 stderr F I1208 17:47:21.753917 1 shared_informer.go:350] "Waiting for caches to sync" controller="ClusterRoleAggregator" 2025-12-08T17:47:21.756659697+00:00 stderr F I1208 17:47:21.756569 1 controllermanager.go:827] "Started controller" controller="root-ca-certificate-publisher-controller" 2025-12-08T17:47:21.756659697+00:00 stderr F I1208 17:47:21.756592 1 controllermanager.go:779] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] 2025-12-08T17:47:21.756659697+00:00 stderr F I1208 17:47:21.756606 1 controllermanager.go:779] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] 2025-12-08T17:47:21.756659697+00:00 stderr F I1208 17:47:21.756612 1 controllermanager.go:779] "Controller is disabled by a feature gate" controller="resourceclaim-controller" requiredFeatureGates=["DynamicResourceAllocation"] 2025-12-08T17:47:21.756659697+00:00 stderr F I1208 17:47:21.756620 1 controllermanager.go:796] "Starting controller" controller="serviceaccount-controller" 2025-12-08T17:47:21.756721369+00:00 stderr F I1208 17:47:21.756643 1 publisher.go:107] "Starting root CA cert publisher controller" logger="root-ca-certificate-publisher-controller" 2025-12-08T17:47:21.756721369+00:00 stderr F I1208 17:47:21.756672 1 shared_informer.go:350] "Waiting for caches to sync" controller="crt configmap" 2025-12-08T17:47:21.759024331+00:00 stderr F I1208 17:47:21.758941 1 controllermanager.go:827] "Started controller" controller="serviceaccount-controller" 2025-12-08T17:47:21.759024331+00:00 stderr F I1208 17:47:21.758960 1 controllermanager.go:796] "Starting controller" controller="replicaset-controller" 2025-12-08T17:47:21.759200827+00:00 stderr F I1208 17:47:21.759140 1 serviceaccounts_controller.go:114] "Starting service account controller" logger="serviceaccount-controller" 2025-12-08T17:47:21.759200827+00:00 stderr F I1208 17:47:21.759175 1 shared_informer.go:350] "Waiting for caches to sync" controller="service account" 2025-12-08T17:47:21.761255691+00:00 stderr F I1208 17:47:21.761216 1 controllermanager.go:827] "Started controller" controller="replicaset-controller" 2025-12-08T17:47:21.761340414+00:00 stderr F I1208 17:47:21.761318 1 controllermanager.go:796] "Starting controller" controller="statefulset-controller" 2025-12-08T17:47:21.761439347+00:00 stderr F I1208 17:47:21.761394 1 replica_set.go:219] "Starting controller" logger="replicaset-controller" name="replicaset" 2025-12-08T17:47:21.761439347+00:00 stderr F I1208 17:47:21.761412 1 shared_informer.go:350] "Waiting for caches to sync" controller="ReplicaSet" 2025-12-08T17:47:21.763634686+00:00 stderr F I1208 17:47:21.763575 1 controllermanager.go:827] "Started controller" controller="statefulset-controller" 2025-12-08T17:47:21.763634686+00:00 stderr F I1208 17:47:21.763605 1 controllermanager.go:796] "Starting controller" controller="cronjob-controller" 2025-12-08T17:47:21.763722189+00:00 stderr F I1208 17:47:21.763679 1 stateful_set.go:166] "Starting stateful set controller" logger="statefulset-controller" 2025-12-08T17:47:21.763722189+00:00 stderr F I1208 17:47:21.763694 1 shared_informer.go:350] "Waiting for caches to sync" controller="stateful set" 2025-12-08T17:47:21.765706412+00:00 stderr F I1208 17:47:21.765667 1 controllermanager.go:827] "Started controller" controller="cronjob-controller" 2025-12-08T17:47:21.765803215+00:00 stderr F I1208 17:47:21.765774 1 controllermanager.go:796] "Starting controller" controller="taint-eviction-controller" 2025-12-08T17:47:21.765993230+00:00 stderr F I1208 17:47:21.765747 1 cronjob_controllerv2.go:145] "Starting cronjob controller v2" logger="cronjob-controller" 2025-12-08T17:47:21.765993230+00:00 stderr F I1208 17:47:21.765980 1 shared_informer.go:350] "Waiting for caches to sync" controller="cronjob" 2025-12-08T17:47:21.768157259+00:00 stderr F I1208 17:47:21.768088 1 controllermanager.go:827] "Started controller" controller="taint-eviction-controller" 2025-12-08T17:47:21.768157259+00:00 stderr F I1208 17:47:21.768120 1 controllermanager.go:796] "Starting controller" controller="service-cidr-controller" 2025-12-08T17:47:21.768201940+00:00 stderr F I1208 17:47:21.768157 1 taint_eviction.go:282] "Starting" logger="taint-eviction-controller" controller="taint-eviction-controller" 2025-12-08T17:47:21.768201940+00:00 stderr F I1208 17:47:21.768180 1 taint_eviction.go:288] "Sending events to api server" logger="taint-eviction-controller" 2025-12-08T17:47:21.768201940+00:00 stderr F I1208 17:47:21.768193 1 shared_informer.go:350] "Waiting for caches to sync" controller="taint-eviction-controller" 2025-12-08T17:47:21.770649747+00:00 stderr F I1208 17:47:21.770548 1 controllermanager.go:827] "Started controller" controller="service-cidr-controller" 2025-12-08T17:47:21.770649747+00:00 stderr F I1208 17:47:21.770612 1 controllermanager.go:796] "Starting controller" controller="storage-version-migrator-controller" 2025-12-08T17:47:21.770649747+00:00 stderr F I1208 17:47:21.770628 1 controllermanager.go:805] "Warning: skipping controller" controller="storage-version-migrator-controller" 2025-12-08T17:47:21.770649747+00:00 stderr F I1208 17:47:21.770632 1 servicecidrs_controller.go:136] "Starting" logger="service-cidr-controller" controller="service-cidr-controller" 2025-12-08T17:47:21.770649747+00:00 stderr F I1208 17:47:21.770640 1 controllermanager.go:796] "Starting controller" controller="replicationcontroller-controller" 2025-12-08T17:47:21.770729870+00:00 stderr F I1208 17:47:21.770644 1 shared_informer.go:350] "Waiting for caches to sync" controller="service-cidr-controller" 2025-12-08T17:47:21.775624463+00:00 stderr F I1208 17:47:21.775524 1 controllermanager.go:827] "Started controller" controller="replicationcontroller-controller" 2025-12-08T17:47:21.775624463+00:00 stderr F I1208 17:47:21.775557 1 controllermanager.go:796] "Starting controller" controller="pod-garbage-collector-controller" 2025-12-08T17:47:21.775826589+00:00 stderr F I1208 17:47:21.775745 1 replica_set.go:219] "Starting controller" logger="replicationcontroller-controller" name="replicationcontroller" 2025-12-08T17:47:21.775826589+00:00 stderr F I1208 17:47:21.775778 1 shared_informer.go:350] "Waiting for caches to sync" controller="ReplicationController" 2025-12-08T17:47:21.778333098+00:00 stderr F I1208 17:47:21.778292 1 controllermanager.go:827] "Started controller" controller="pod-garbage-collector-controller" 2025-12-08T17:47:21.778418451+00:00 stderr F I1208 17:47:21.778396 1 controllermanager.go:796] "Starting controller" controller="resourcequota-controller" 2025-12-08T17:47:21.778648258+00:00 stderr F I1208 17:47:21.778395 1 gc_controller.go:99] "Starting GC controller" logger="pod-garbage-collector-controller" 2025-12-08T17:47:21.778648258+00:00 stderr F I1208 17:47:21.778626 1 shared_informer.go:350] "Waiting for caches to sync" controller="GC" 2025-12-08T17:47:21.817312496+00:00 stderr F I1208 17:47:21.817230 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps" 2025-12-08T17:47:21.817468070+00:00 stderr F I1208 17:47:21.817433 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="daemonsets.apps" 2025-12-08T17:47:21.817574874+00:00 stderr F I1208 17:47:21.817544 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io" 2025-12-08T17:47:21.817670817+00:00 stderr F I1208 17:47:21.817640 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io" 2025-12-08T17:47:21.817830402+00:00 stderr F I1208 17:47:21.817803 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="egressfirewalls.k8s.ovn.org" 2025-12-08T17:47:21.817973697+00:00 stderr F I1208 17:47:21.817940 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="alertingrules.monitoring.openshift.io" 2025-12-08T17:47:21.818069810+00:00 stderr F I1208 17:47:21.818039 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling" 2025-12-08T17:47:21.818142652+00:00 stderr F I1208 17:47:21.818121 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch" 2025-12-08T17:47:21.818231055+00:00 stderr F I1208 17:47:21.818204 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagestreams.image.openshift.io" 2025-12-08T17:47:21.818336508+00:00 stderr F I1208 17:47:21.818313 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="gateways.gateway.networking.k8s.io" 2025-12-08T17:47:21.818414651+00:00 stderr F I1208 17:47:21.818393 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controlplanemachinesets.machine.openshift.io" 2025-12-08T17:47:21.818494063+00:00 stderr F I1208 17:47:21.818472 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="probes.monitoring.coreos.com" 2025-12-08T17:47:21.819351690+00:00 stderr F I1208 17:47:21.819293 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="templateinstances.template.openshift.io" 2025-12-08T17:47:21.819351690+00:00 stderr F I1208 17:47:21.819337 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ipaddressclaims.ipam.cluster.x-k8s.io" 2025-12-08T17:47:21.819384501+00:00 stderr F I1208 17:47:21.819354 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="machinehealthchecks.machine.openshift.io" 2025-12-08T17:47:21.819384501+00:00 stderr F I1208 17:47:21.819370 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="alertrelabelconfigs.monitoring.openshift.io" 2025-12-08T17:47:21.819400392+00:00 stderr F I1208 17:47:21.819381 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps" 2025-12-08T17:47:21.819415272+00:00 stderr F I1208 17:47:21.819401 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="network-attachment-definitions.k8s.cni.cncf.io" 2025-12-08T17:47:21.819430032+00:00 stderr F I1208 17:47:21.819420 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="thanosrulers.monitoring.coreos.com" 2025-12-08T17:47:21.819450853+00:00 stderr F I1208 17:47:21.819437 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="egressrouters.network.operator.openshift.io" 2025-12-08T17:47:21.819468864+00:00 stderr F I1208 17:47:21.819455 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="catalogsources.operators.coreos.com" 2025-12-08T17:47:21.819487104+00:00 stderr F I1208 17:47:21.819477 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="clusterserviceversions.operators.coreos.com" 2025-12-08T17:47:21.819504775+00:00 stderr F I1208 17:47:21.819492 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podnetworkconnectivitychecks.controlplane.operator.openshift.io" 2025-12-08T17:47:21.819522935+00:00 stderr F I1208 17:47:21.819512 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="projecthelmchartrepositories.helm.openshift.io" 2025-12-08T17:47:21.819541346+00:00 stderr F I1208 17:47:21.819529 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podmonitors.monitoring.coreos.com" 2025-12-08T17:47:21.819557406+00:00 stderr F I1208 17:47:21.819545 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ippools.whereabouts.cni.cncf.io" 2025-12-08T17:47:21.819581387+00:00 stderr F I1208 17:47:21.819569 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagepolicies.config.openshift.io" 2025-12-08T17:47:21.819596128+00:00 stderr F I1208 17:47:21.819579 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints" 2025-12-08T17:47:21.819596128+00:00 stderr F I1208 17:47:21.819591 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io" 2025-12-08T17:47:21.819645589+00:00 stderr F I1208 17:47:21.819610 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="routes.route.openshift.io" 2025-12-08T17:47:21.819645589+00:00 stderr F I1208 17:47:21.819630 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="grpcroutes.gateway.networking.k8s.io" 2025-12-08T17:47:21.819662100+00:00 stderr F I1208 17:47:21.819647 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="machines.machine.openshift.io" 2025-12-08T17:47:21.819682460+00:00 stderr F I1208 17:47:21.819673 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="alertmanagerconfigs.monitoring.coreos.com" 2025-12-08T17:47:21.819700451+00:00 stderr F I1208 17:47:21.819690 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresscontrollers.operator.openshift.io" 2025-12-08T17:47:21.819743222+00:00 stderr F I1208 17:47:21.819713 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymentconfigs.apps.openshift.io" 2025-12-08T17:47:21.819743222+00:00 stderr F I1208 17:47:21.819734 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="machineautoscalers.autoscaling.openshift.io" 2025-12-08T17:47:21.819763143+00:00 stderr F I1208 17:47:21.819752 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="alertmanagers.monitoring.coreos.com" 2025-12-08T17:47:21.819781063+00:00 stderr F I1208 17:47:21.819769 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="installplans.operators.coreos.com" 2025-12-08T17:47:21.819795564+00:00 stderr F I1208 17:47:21.819779 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io" 2025-12-08T17:47:21.819809894+00:00 stderr F I1208 17:47:21.819798 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="httproutes.gateway.networking.k8s.io" 2025-12-08T17:47:21.819827795+00:00 stderr F I1208 17:47:21.819818 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="metal3remediations.infrastructure.cluster.x-k8s.io" 2025-12-08T17:47:21.819845685+00:00 stderr F I1208 17:47:21.819836 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="userdefinednetworks.k8s.ovn.org" 2025-12-08T17:47:21.819955379+00:00 stderr F I1208 17:47:21.819857 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="egressservices.k8s.ovn.org" 2025-12-08T17:47:21.819955379+00:00 stderr F I1208 17:47:21.819900 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="operatorconditions.operators.coreos.com" 2025-12-08T17:47:21.819955379+00:00 stderr F I1208 17:47:21.819916 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps" 2025-12-08T17:47:21.819955379+00:00 stderr F I1208 17:47:21.819929 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy" 2025-12-08T17:47:21.819987950+00:00 stderr F I1208 17:47:21.819956 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="metal3remediationtemplates.infrastructure.cluster.x-k8s.io" 2025-12-08T17:47:21.819987950+00:00 stderr F I1208 17:47:21.819974 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="servicemonitors.monitoring.coreos.com" 2025-12-08T17:47:21.820003520+00:00 stderr F I1208 17:47:21.819992 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates" 2025-12-08T17:47:21.820046692+00:00 stderr F I1208 17:47:21.820015 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindingrestrictions.authorization.openshift.io" 2025-12-08T17:47:21.820046692+00:00 stderr F I1208 17:47:21.820037 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ipaddresses.ipam.cluster.x-k8s.io" 2025-12-08T17:47:21.820066222+00:00 stderr F I1208 17:47:21.820055 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="operatorgroups.operators.coreos.com" 2025-12-08T17:47:21.820107614+00:00 stderr F I1208 17:47:21.820078 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ipamclaims.k8s.cni.cncf.io" 2025-12-08T17:47:21.820126574+00:00 stderr F I1208 17:47:21.820118 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="prometheusrules.monitoring.coreos.com" 2025-12-08T17:47:21.820168325+00:00 stderr F I1208 17:47:21.820137 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="operatorpkis.network.operator.openshift.io" 2025-12-08T17:47:21.820210937+00:00 stderr F I1208 17:47:21.820182 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="subscriptions.operators.coreos.com" 2025-12-08T17:47:21.820226817+00:00 stderr F I1208 17:47:21.820207 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="nodeslicepools.whereabouts.cni.cncf.io" 2025-12-08T17:47:21.820241398+00:00 stderr F I1208 17:47:21.820223 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="builds.build.openshift.io" 2025-12-08T17:47:21.820255978+00:00 stderr F I1208 17:47:21.820239 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="overlappingrangeipreservations.whereabouts.cni.cncf.io" 2025-12-08T17:47:21.820649300+00:00 stderr F I1208 17:47:21.820596 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts" 2025-12-08T17:47:21.820649300+00:00 stderr F I1208 17:47:21.820617 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io" 2025-12-08T17:47:21.820649300+00:00 stderr F I1208 17:47:21.820638 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="templates.template.openshift.io" 2025-12-08T17:47:21.820678271+00:00 stderr F I1208 17:47:21.820656 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="referencegrants.gateway.networking.k8s.io" 2025-12-08T17:47:21.820678271+00:00 stderr F I1208 17:47:21.820674 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="machinesets.machine.openshift.io" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820682 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="statefulsets.apps" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820700 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="dnsrecords.ingress.operator.openshift.io" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820715 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="prometheuses.monitoring.coreos.com" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820735 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820745 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820756 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820765 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820779 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="buildconfigs.build.openshift.io" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820794 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="egressqoses.k8s.ovn.org" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820807 1 controllermanager.go:827] "Started controller" controller="resourcequota-controller" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820814 1 controllermanager.go:796] "Starting controller" controller="deployment-controller" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820870 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820948 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:47:21.821159957+00:00 stderr F I1208 17:47:21.820993 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller" 2025-12-08T17:47:21.823358696+00:00 stderr F I1208 17:47:21.823311 1 controllermanager.go:827] "Started controller" controller="deployment-controller" 2025-12-08T17:47:21.823624415+00:00 stderr F I1208 17:47:21.823551 1 deployment_controller.go:173] "Starting controller" logger="deployment-controller" controller="deployment" 2025-12-08T17:47:21.823624415+00:00 stderr F I1208 17:47:21.823583 1 shared_informer.go:350] "Waiting for caches to sync" controller="deployment" 2025-12-08T17:47:21.827788076+00:00 stderr F I1208 17:47:21.827728 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.828030473+00:00 stderr F I1208 17:47:21.827984 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"crc\" does not exist" 2025-12-08T17:47:21.832774553+00:00 stderr F I1208 17:47:21.832649 1 resource_quota_controller.go:476] "syncing resource quota controller with updated resources from discovery" logger="resourcequota-controller" diff="added: [/v1, Resource=configmaps /v1, Resource=endpoints /v1, Resource=events /v1, Resource=limitranges /v1, Resource=persistentvolumeclaims /v1, Resource=pods /v1, Resource=podtemplates /v1, Resource=replicationcontrollers /v1, Resource=resourcequotas /v1, Resource=secrets /v1, Resource=serviceaccounts /v1, Resource=services apps.openshift.io/v1, Resource=deploymentconfigs apps/v1, Resource=controllerrevisions apps/v1, Resource=daemonsets apps/v1, Resource=deployments apps/v1, Resource=replicasets apps/v1, Resource=statefulsets authorization.openshift.io/v1, Resource=rolebindingrestrictions autoscaling.openshift.io/v1beta1, Resource=machineautoscalers autoscaling/v2, Resource=horizontalpodautoscalers batch/v1, Resource=cronjobs batch/v1, Resource=jobs build.openshift.io/v1, Resource=buildconfigs build.openshift.io/v1, Resource=builds config.openshift.io/v1, Resource=imagepolicies controlplane.operator.openshift.io/v1alpha1, Resource=podnetworkconnectivitychecks coordination.k8s.io/v1, Resource=leases discovery.k8s.io/v1, Resource=endpointslices events.k8s.io/v1, Resource=events gateway.networking.k8s.io/v1, Resource=gateways gateway.networking.k8s.io/v1, Resource=grpcroutes gateway.networking.k8s.io/v1, Resource=httproutes gateway.networking.k8s.io/v1beta1, Resource=referencegrants helm.openshift.io/v1beta1, Resource=projecthelmchartrepositories image.openshift.io/v1, Resource=imagestreams infrastructure.cluster.x-k8s.io/v1beta1, Resource=metal3remediations infrastructure.cluster.x-k8s.io/v1beta1, Resource=metal3remediationtemplates ingress.operator.openshift.io/v1, Resource=dnsrecords ipam.cluster.x-k8s.io/v1beta1, Resource=ipaddressclaims ipam.cluster.x-k8s.io/v1beta1, Resource=ipaddresses k8s.cni.cncf.io/v1, Resource=network-attachment-definitions k8s.cni.cncf.io/v1alpha1, Resource=ipamclaims k8s.ovn.org/v1, Resource=egressfirewalls k8s.ovn.org/v1, Resource=egressqoses k8s.ovn.org/v1, Resource=egressservices k8s.ovn.org/v1, Resource=userdefinednetworks machine.openshift.io/v1, Resource=controlplanemachinesets machine.openshift.io/v1beta1, Resource=machinehealthchecks machine.openshift.io/v1beta1, Resource=machines machine.openshift.io/v1beta1, Resource=machinesets monitoring.coreos.com/v1, Resource=alertmanagers monitoring.coreos.com/v1, Resource=podmonitors monitoring.coreos.com/v1, Resource=probes monitoring.coreos.com/v1, Resource=prometheuses monitoring.coreos.com/v1, Resource=prometheusrules monitoring.coreos.com/v1, Resource=servicemonitors monitoring.coreos.com/v1, Resource=thanosrulers monitoring.coreos.com/v1beta1, Resource=alertmanagerconfigs monitoring.openshift.io/v1, Resource=alertingrules monitoring.openshift.io/v1, Resource=alertrelabelconfigs network.operator.openshift.io/v1, Resource=egressrouters network.operator.openshift.io/v1, Resource=operatorpkis networking.k8s.io/v1, Resource=ingresses networking.k8s.io/v1, Resource=networkpolicies operator.openshift.io/v1, Resource=ingresscontrollers operators.coreos.com/v1, Resource=operatorgroups operators.coreos.com/v1alpha1, Resource=catalogsources operators.coreos.com/v1alpha1, Resource=clusterserviceversions operators.coreos.com/v1alpha1, Resource=installplans operators.coreos.com/v1alpha1, Resource=subscriptions operators.coreos.com/v2, Resource=operatorconditions policy/v1, Resource=poddisruptionbudgets rbac.authorization.k8s.io/v1, Resource=rolebindings rbac.authorization.k8s.io/v1, Resource=roles route.openshift.io/v1, Resource=routes storage.k8s.io/v1, Resource=csistoragecapacities template.openshift.io/v1, Resource=templateinstances template.openshift.io/v1, Resource=templates whereabouts.cni.cncf.io/v1alpha1, Resource=ippools whereabouts.cni.cncf.io/v1alpha1, Resource=nodeslicepools whereabouts.cni.cncf.io/v1alpha1, Resource=overlappingrangeipreservations], removed: []" 2025-12-08T17:47:21.834959882+00:00 stderr F I1208 17:47:21.834894 1 reflector.go:430] "Caches populated" type="*v1.CSINode" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.835004413+00:00 stderr F I1208 17:47:21.834952 1 reflector.go:430] "Caches populated" type="*v1.PodTemplate" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.835069155+00:00 stderr F I1208 17:47:21.835023 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.835281012+00:00 stderr F I1208 17:47:21.835245 1 reflector.go:430] "Caches populated" type="*v1.ServiceCIDR" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.837488900+00:00 stderr F I1208 17:47:21.836021 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolumeClaim" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.837488900+00:00 stderr F I1208 17:47:21.837033 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:47:21.839346839+00:00 stderr F I1208 17:47:21.839294 1 reflector.go:430] "Caches populated" type="*v1.LimitRange" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.839544365+00:00 stderr F I1208 17:47:21.839504 1 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.839592477+00:00 stderr F I1208 17:47:21.839558 1 reflector.go:430] "Caches populated" type="*v1.StorageClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.839761862+00:00 stderr F I1208 17:47:21.839722 1 reflector.go:430] "Caches populated" type="*v1.CSIStorageCapacity" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.840010250+00:00 stderr F I1208 17:47:21.839949 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolume" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.840043561+00:00 stderr F I1208 17:47:21.840028 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.840114063+00:00 stderr F I1208 17:47:21.840070 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.840169485+00:00 stderr F I1208 17:47:21.840133 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.841002442+00:00 stderr F I1208 17:47:21.840524 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.841002442+00:00 stderr F I1208 17:47:21.840735 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.846253427+00:00 stderr F I1208 17:47:21.841942 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.851762601+00:00 stderr F I1208 17:47:21.850156 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.851762601+00:00 stderr F I1208 17:47:21.850591 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.851762601+00:00 stderr F I1208 17:47:21.850739 1 shared_informer.go:357] "Caches are synced" controller="expand" 2025-12-08T17:47:21.851762601+00:00 stderr F I1208 17:47:21.850762 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.851762601+00:00 stderr F I1208 17:47:21.851333 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.851762601+00:00 stderr F I1208 17:47:21.851365 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.852316018+00:00 stderr F I1208 17:47:21.851955 1 reflector.go:430] "Caches populated" type="*v1.RoleBindingRestriction" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/authorization/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.854673252+00:00 stderr F I1208 17:47:21.854629 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855018573+00:00 stderr F I1208 17:47:21.854970 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855046194+00:00 stderr F I1208 17:47:21.855028 1 reflector.go:430] "Caches populated" type="*v2.HorizontalPodAutoscaler" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.855188978+00:00 stderr F I1208 17:47:21.855160 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855360074+00:00 stderr F I1208 17:47:21.855326 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855446956+00:00 stderr F I1208 17:47:21.855426 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855551910+00:00 stderr F I1208 17:47:21.855507 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855638832+00:00 stderr F I1208 17:47:21.855598 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855715845+00:00 stderr F I1208 17:47:21.855688 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855793897+00:00 stderr F I1208 17:47:21.855758 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855843309+00:00 stderr F I1208 17:47:21.855817 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.855843309+00:00 stderr F I1208 17:47:21.855837 1 reflector.go:430] "Caches populated" type="*v1.VolumeAttachment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.855928971+00:00 stderr F I1208 17:47:21.855903 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856013434+00:00 stderr F I1208 17:47:21.855992 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.856013434+00:00 stderr F I1208 17:47:21.856004 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856044605+00:00 stderr F I1208 17:47:21.856036 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.856121417+00:00 stderr F I1208 17:47:21.856102 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856133128+00:00 stderr F I1208 17:47:21.856116 1 reflector.go:430] "Caches populated" type="*v1.ReplicationController" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.856235281+00:00 stderr F I1208 17:47:21.856211 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856313913+00:00 stderr F I1208 17:47:21.856293 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856345364+00:00 stderr F I1208 17:47:21.856324 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.856354115+00:00 stderr F I1208 17:47:21.856341 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856431597+00:00 stderr F I1208 17:47:21.856413 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856442967+00:00 stderr F I1208 17:47:21.856207 1 reflector.go:430] "Caches populated" type="*v1.ResourceQuota" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.856476868+00:00 stderr F I1208 17:47:21.856458 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856506559+00:00 stderr F I1208 17:47:21.856417 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856506559+00:00 stderr F I1208 17:47:21.855774 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856537300+00:00 stderr F I1208 17:47:21.855823 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856597632+00:00 stderr F I1208 17:47:21.856292 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856690885+00:00 stderr F I1208 17:47:21.856665 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856690885+00:00 stderr F I1208 17:47:21.856678 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856790828+00:00 stderr F I1208 17:47:21.856766 1 reflector.go:430] "Caches populated" type="*v1.BuildConfig" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/build/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.856790828+00:00 stderr F I1208 17:47:21.856783 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856802759+00:00 stderr F I1208 17:47:21.856793 1 reflector.go:430] "Caches populated" type="*v1.TemplateInstance" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/template/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.856945173+00:00 stderr F I1208 17:47:21.856866 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.856945173+00:00 stderr F I1208 17:47:21.856929 1 reflector.go:430] "Caches populated" type="*v1.DeploymentConfig" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/apps/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.857037576+00:00 stderr F I1208 17:47:21.857011 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857078457+00:00 stderr F I1208 17:47:21.857063 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857087327+00:00 stderr F I1208 17:47:21.857074 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857152560+00:00 stderr F I1208 17:47:21.857137 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857209622+00:00 stderr F I1208 17:47:21.857192 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857321226+00:00 stderr F I1208 17:47:21.857017 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857416119+00:00 stderr F I1208 17:47:21.857386 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.857464830+00:00 stderr F I1208 17:47:21.857431 1 reflector.go:430] "Caches populated" type="*v1.CronJob" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.857492351+00:00 stderr F I1208 17:47:21.857445 1 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicy" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.859012508+00:00 stderr F I1208 17:47:21.858613 1 reflector.go:430] "Caches populated" type="*v1.Job" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.859012508+00:00 stderr F I1208 17:47:21.858773 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.859124922+00:00 stderr F I1208 17:47:21.859030 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-machine-config-operator/machine-config-nodes-crd-cleanup-29367829" delay="0s" 2025-12-08T17:47:21.859124922+00:00 stderr F I1208 17:47:21.859081 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29369370" delay="0s" 2025-12-08T17:47:21.859124922+00:00 stderr F I1208 17:47:21.859089 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420250" delay="0s" 2025-12-08T17:47:21.859124922+00:00 stderr F I1208 17:47:21.859112 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420265" delay="0s" 2025-12-08T17:47:21.859754451+00:00 stderr F I1208 17:47:21.859699 1 reflector.go:430] "Caches populated" type="*v1.Route" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/route/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.860337011+00:00 stderr F I1208 17:47:21.860291 1 reflector.go:430] "Caches populated" type="*v1.NetworkPolicy" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.860453624+00:00 stderr F I1208 17:47:21.860417 1 reflector.go:430] "Caches populated" type="*v1.Build" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/build/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.860618639+00:00 stderr F I1208 17:47:21.860580 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.860842586+00:00 stderr F I1208 17:47:21.860805 1 reflector.go:430] "Caches populated" type="*v1.Lease" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.861728264+00:00 stderr F I1208 17:47:21.861691 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.862803698+00:00 stderr F I1208 17:47:21.862774 1 reflector.go:430] "Caches populated" type="*v1.IPAddress" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.863117428+00:00 stderr F I1208 17:47:21.863089 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.867029091+00:00 stderr F I1208 17:47:21.866843 1 shared_informer.go:357] "Caches are synced" controller="cronjob" 2025-12-08T17:47:21.870745948+00:00 stderr F I1208 17:47:21.870689 1 shared_informer.go:357] "Caches are synced" controller="service-cidr-controller" 2025-12-08T17:47:21.879439691+00:00 stderr F I1208 17:47:21.879381 1 reflector.go:430] "Caches populated" type="*v1.ControllerRevision" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.900050571+00:00 stderr F I1208 17:47:21.899976 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrsigning-legacy-unknown" 2025-12-08T17:47:21.901465015+00:00 stderr F I1208 17:47:21.901437 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client" 2025-12-08T17:47:21.903747607+00:00 stderr F I1208 17:47:21.903670 1 reflector.go:430] "Caches populated" type="*v1.EndpointSlice" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.904191720+00:00 stderr F I1208 17:47:21.904118 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrsigning-kubelet-client" 2025-12-08T17:47:21.904191720+00:00 stderr F I1208 17:47:21.904059 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrsigning-kubelet-serving" 2025-12-08T17:47:21.904802280+00:00 stderr F I1208 17:47:21.904731 1 reflector.go:430] "Caches populated" type="*v1.ImageStream" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/image/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.907483084+00:00 stderr F I1208 17:47:21.907443 1 shared_informer.go:357] "Caches are synced" controller="endpoint_slice_mirroring" 2025-12-08T17:47:21.907572328+00:00 stderr F I1208 17:47:21.907466 1 endpointslicemirroring_controller.go:234] "Starting worker threads" logger="endpointslice-mirroring-controller" total=5 2025-12-08T17:47:21.909435136+00:00 stderr F I1208 17:47:21.909379 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.912964436+00:00 stderr F I1208 17:47:21.912922 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.913717661+00:00 stderr F I1208 17:47:21.913688 1 reflector.go:430] "Caches populated" type="*v1.Template" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/template/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.913969249+00:00 stderr F I1208 17:47:21.913930 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.914362171+00:00 stderr F I1208 17:47:21.913382 1 graph_builder.go:728] "replacing virtual item with observed item" logger="garbage-collector-controller" virtual="[v1/Node, namespace: kube-node-lease, name: crc, uid: 23216ff3-032e-49af-af7e-1d23d5907b59]" observed="[v1/Node, namespace: , name: crc, uid: 23216ff3-032e-49af-af7e-1d23d5907b59]" 2025-12-08T17:47:21.914415323+00:00 stderr F I1208 17:47:21.914392 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.916482148+00:00 stderr F I1208 17:47:21.916439 1 shared_informer.go:357] "Caches are synced" controller="PV protection" 2025-12-08T17:47:21.917818560+00:00 stderr F I1208 17:47:21.917791 1 reflector.go:430] "Caches populated" type="*v1.ReplicaSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.918606064+00:00 stderr F I1208 17:47:21.918571 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.920235816+00:00 stderr F I1208 17:47:21.920150 1 graph_builder.go:728] "replacing virtual item with observed item" logger="garbage-collector-controller" virtual="[apps/v1/daemonset, namespace: openshift-ingress-canary, name: ingress-canary, uid: 77896bcd-d1f7-46a2-984f-9205a544fb94]" observed="[apps/v1/DaemonSet, namespace: openshift-ingress-canary, name: ingress-canary, uid: 77896bcd-d1f7-46a2-984f-9205a544fb94]" 2025-12-08T17:47:21.922784926+00:00 stderr F I1208 17:47:21.922730 1 shared_informer.go:350] "Waiting for caches to sync" controller="garbage collector" 2025-12-08T17:47:21.923036914+00:00 stderr F I1208 17:47:21.923012 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.939071399+00:00 stderr F I1208 17:47:21.939015 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420250" delay="1s" 2025-12-08T17:47:21.939071399+00:00 stderr F I1208 17:47:21.939040 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420265" delay="1s" 2025-12-08T17:47:21.939765881+00:00 stderr F I1208 17:47:21.939707 1 reflector.go:430] "Caches populated" type="*v1.RangeAllocation" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/security/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.940132632+00:00 stderr F I1208 17:47:21.940109 1 reflector.go:430] "Caches populated" type="*v1.IngressClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.940245615+00:00 stderr F I1208 17:47:21.940220 1 reflector.go:430] "Caches populated" type="*v1.RuntimeClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.940421701+00:00 stderr F I1208 17:47:21.940396 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.940490263+00:00 stderr F I1208 17:47:21.940455 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.940519484+00:00 stderr F I1208 17:47:21.940402 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.941047960+00:00 stderr F I1208 17:47:21.941009 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.941047960+00:00 stderr F I1208 17:47:21.941039 1 reflector.go:430] "Caches populated" type="*v1.ClusterResourceQuota" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/quota/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.941421553+00:00 stderr F I1208 17:47:21.941399 1 reflector.go:430] "Caches populated" type="*v1.PriorityClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.941652470+00:00 stderr F I1208 17:47:21.941630 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.941750293+00:00 stderr F I1208 17:47:21.941716 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.941831136+00:00 stderr F I1208 17:47:21.940116 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.942337542+00:00 stderr F I1208 17:47:21.942307 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.943024993+00:00 stderr F I1208 17:47:21.942939 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.943499148+00:00 stderr F I1208 17:47:21.943476 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944076757+00:00 stderr F I1208 17:47:21.944056 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944109868+00:00 stderr F I1208 17:47:21.944091 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944206031+00:00 stderr F I1208 17:47:21.944184 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944236312+00:00 stderr F I1208 17:47:21.944220 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944262943+00:00 stderr F I1208 17:47:21.944224 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944363726+00:00 stderr F I1208 17:47:21.944345 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944390907+00:00 stderr F I1208 17:47:21.944372 1 reflector.go:430] "Caches populated" type="*v1.BrokerTemplateInstance" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/template/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.944457659+00:00 stderr F I1208 17:47:21.944346 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944576982+00:00 stderr F I1208 17:47:21.944544 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944668955+00:00 stderr F I1208 17:47:21.944644 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.944713677+00:00 stderr F I1208 17:47:21.944696 1 shared_informer.go:357] "Caches are synced" controller="certificate-csrapproving" 2025-12-08T17:47:21.944925693+00:00 stderr F I1208 17:47:21.944652 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945029246+00:00 stderr F I1208 17:47:21.945012 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945234453+00:00 stderr F I1208 17:47:21.945217 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945406118+00:00 stderr F I1208 17:47:21.945384 1 reflector.go:430] "Caches populated" type="*v1.UserOAuthAccessToken" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/oauth/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.945561953+00:00 stderr F I1208 17:47:21.945538 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945644066+00:00 stderr F I1208 17:47:21.945390 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945711858+00:00 stderr F I1208 17:47:21.945680 1 graph_builder.go:728] "replacing virtual item with observed item" logger="garbage-collector-controller" virtual="[operator.openshift.io/v1/Console, namespace: openshift-console, name: cluster, uid: 72c9b389-7361-48f0-8bf6-56fe26546245]" observed="[operator.openshift.io/v1/Console, namespace: , name: cluster, uid: 72c9b389-7361-48f0-8bf6-56fe26546245]" 2025-12-08T17:47:21.945804371+00:00 stderr F I1208 17:47:21.945786 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945838232+00:00 stderr F I1208 17:47:21.945776 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.945991686+00:00 stderr F I1208 17:47:21.945961 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946089549+00:00 stderr F I1208 17:47:21.946071 1 shared_informer.go:357] "Caches are synced" controller="validatingadmissionpolicy-status" 2025-12-08T17:47:21.946295696+00:00 stderr F I1208 17:47:21.946274 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946326527+00:00 stderr F I1208 17:47:21.946299 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946559084+00:00 stderr F I1208 17:47:21.946490 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946598845+00:00 stderr F I1208 17:47:21.946575 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946622886+00:00 stderr F I1208 17:47:21.946600 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946767690+00:00 stderr F I1208 17:47:21.946745 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946799522+00:00 stderr F I1208 17:47:21.946540 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.946943727+00:00 stderr F I1208 17:47:21.946915 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.947050750+00:00 stderr F I1208 17:47:21.947024 1 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicyBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.947103862+00:00 stderr F I1208 17:47:21.946746 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.947234406+00:00 stderr F I1208 17:47:21.947210 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.947428422+00:00 stderr F I1208 17:47:21.947408 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.947566076+00:00 stderr F I1208 17:47:21.947542 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.947668750+00:00 stderr F I1208 17:47:21.947646 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.947826074+00:00 stderr F I1208 17:47:21.946746 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.948460864+00:00 stderr F I1208 17:47:21.948027 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949154876+00:00 stderr F I1208 17:47:21.948753 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949154876+00:00 stderr F I1208 17:47:21.948838 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949154876+00:00 stderr F I1208 17:47:21.948844 1 reflector.go:430] "Caches populated" type="*v1.FlowSchema" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.949154876+00:00 stderr F I1208 17:47:21.948965 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949154876+00:00 stderr F I1208 17:47:21.949046 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949154876+00:00 stderr F I1208 17:47:21.949121 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949283360+00:00 stderr F I1208 17:47:21.949263 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949283360+00:00 stderr F I1208 17:47:21.949276 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949292450+00:00 stderr F I1208 17:47:21.949281 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949492636+00:00 stderr F I1208 17:47:21.949469 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949527417+00:00 stderr F I1208 17:47:21.949505 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949541828+00:00 stderr F I1208 17:47:21.949530 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949570739+00:00 stderr F I1208 17:47:21.949546 1 shared_informer.go:357] "Caches are synced" controller="TTL after finished" 2025-12-08T17:47:21.949618781+00:00 stderr F I1208 17:47:21.949589 1 shared_informer.go:357] "Caches are synced" controller="PVC protection" 2025-12-08T17:47:21.949686783+00:00 stderr F I1208 17:47:21.949662 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949763366+00:00 stderr F I1208 17:47:21.949739 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.949962232+00:00 stderr F I1208 17:47:21.949932 1 shared_informer.go:357] "Caches are synced" controller="endpoint_slice" 2025-12-08T17:47:21.949978482+00:00 stderr F I1208 17:47:21.949953 1 endpointslice_controller.go:288] "Starting service queue worker threads" logger="endpointslice-controller" total=5 2025-12-08T17:47:21.949986553+00:00 stderr F I1208 17:47:21.949977 1 endpointslice_controller.go:292] "Starting topology queue worker threads" logger="endpointslice-controller" total=1 2025-12-08T17:47:21.950076985+00:00 stderr F I1208 17:47:21.950021 1 topologycache.go:253] "Insufficient node info for topology hints" logger="endpointslice-controller" totalZones=0 totalCPU="0" sufficientNodeInfo=true 2025-12-08T17:47:21.953057849+00:00 stderr F I1208 17:47:21.950793 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.953441251+00:00 stderr F I1208 17:47:21.951554 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.953567745+00:00 stderr F I1208 17:47:21.952440 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.953623897+00:00 stderr F I1208 17:47:21.952517 1 reflector.go:430] "Caches populated" type="*v1.PriorityLevelConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.953719880+00:00 stderr F I1208 17:47:21.952646 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.953768401+00:00 stderr F I1208 17:47:21.952896 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.953775612+00:00 stderr F I1208 17:47:21.953733 1 graph_builder.go:728] "replacing virtual item with observed item" logger="garbage-collector-controller" virtual="[machineconfiguration.openshift.io/v1/MachineConfigPool, namespace: openshift-machine-api, name: master, uid: 3b9df6d6-bacd-4862-b99f-10ec7fcf29ac]" observed="[machineconfiguration.openshift.io/v1/MachineConfigPool, namespace: , name: master, uid: 3b9df6d6-bacd-4862-b99f-10ec7fcf29ac]" 2025-12-08T17:47:21.954006039+00:00 stderr F I1208 17:47:21.953775 1 shared_informer.go:357] "Caches are synced" controller="disruption" 2025-12-08T17:47:21.954006039+00:00 stderr F I1208 17:47:21.953861 1 graph_builder.go:728] "replacing virtual item with observed item" logger="garbage-collector-controller" virtual="[machineconfiguration.openshift.io/v1/MachineConfigPool, namespace: openshift-machine-api, name: worker, uid: 633fcfae-03e0-4a3a-8d5c-de9a658e82f6]" observed="[machineconfiguration.openshift.io/v1/MachineConfigPool, namespace: , name: worker, uid: 633fcfae-03e0-4a3a-8d5c-de9a658e82f6]" 2025-12-08T17:47:21.954113972+00:00 stderr F I1208 17:47:21.953993 1 shared_informer.go:357] "Caches are synced" controller="ClusterRoleAggregator" 2025-12-08T17:47:21.954226246+00:00 stderr F I1208 17:47:21.954179 1 graph_builder.go:728] "replacing virtual item with observed item" logger="garbage-collector-controller" virtual="[operator.openshift.io/v1/DNS, namespace: openshift-dns, name: default, uid: 0f9755ef-acf2-4bc6-a6fc-f491e28e635f]" observed="[operator.openshift.io/v1/DNS, namespace: , name: default, uid: 0f9755ef-acf2-4bc6-a6fc-f491e28e635f]" 2025-12-08T17:47:21.956558389+00:00 stderr F I1208 17:47:21.956516 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.956770226+00:00 stderr F I1208 17:47:21.956732 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.956833938+00:00 stderr F I1208 17:47:21.956801 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.956833938+00:00 stderr F I1208 17:47:21.956826 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.957590611+00:00 stderr F I1208 17:47:21.957164 1 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/security/informers/externalversions/factory.go:125" 2025-12-08T17:47:21.957590611+00:00 stderr F I1208 17:47:21.957505 1 shared_informer.go:357] "Caches are synced" controller="legacy-service-account-token-cleaner" 2025-12-08T17:47:21.958406038+00:00 stderr F I1208 17:47:21.958372 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.958632625+00:00 stderr F I1208 17:47:21.958581 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.958759779+00:00 stderr F I1208 17:47:21.958731 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.959044918+00:00 stderr F I1208 17:47:21.959012 1 shared_informer.go:357] "Caches are synced" controller="endpoint" 2025-12-08T17:47:21.959334777+00:00 stderr F I1208 17:47:21.959304 1 shared_informer.go:357] "Caches are synced" controller="service account" 2025-12-08T17:47:21.961611508+00:00 stderr F I1208 17:47:21.961587 1 shared_informer.go:357] "Caches are synced" controller="ReplicaSet" 2025-12-08T17:47:21.964082116+00:00 stderr F I1208 17:47:21.963189 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.964189920+00:00 stderr F I1208 17:47:21.964170 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.965143219+00:00 stderr F I1208 17:47:21.965106 1 shared_informer.go:357] "Caches are synced" controller="stateful set" 2025-12-08T17:47:21.966847073+00:00 stderr F I1208 17:47:21.966803 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.967457502+00:00 stderr F I1208 17:47:21.967430 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.968258677+00:00 stderr F I1208 17:47:21.968221 1 shared_informer.go:357] "Caches are synced" controller="taint-eviction-controller" 2025-12-08T17:47:21.969657982+00:00 stderr F I1208 17:47:21.969610 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.975527487+00:00 stderr F I1208 17:47:21.973420 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.978686086+00:00 stderr F I1208 17:47:21.978656 1 shared_informer.go:357] "Caches are synced" controller="ReplicationController" 2025-12-08T17:47:21.978703376+00:00 stderr F I1208 17:47:21.978694 1 shared_informer.go:357] "Caches are synced" controller="GC" 2025-12-08T17:47:21.979746759+00:00 stderr F I1208 17:47:21.979584 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.990147417+00:00 stderr F I1208 17:47:21.987648 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:21.990147417+00:00 stderr F I1208 17:47:21.987911 1 endpointslice_controller.go:344] "Error syncing endpoint slices for service, retrying" logger="endpointslice-controller" key="openshift-kube-controller-manager/kube-controller-manager" err="EndpointSlice informer cache is out of date" 2025-12-08T17:47:21.994975009+00:00 stderr F I1208 17:47:21.994922 1 shared_informer.go:357] "Caches are synced" controller="namespace" 2025-12-08T17:47:21.996897439+00:00 stderr F I1208 17:47:21.996293 1 shared_informer.go:357] "Caches are synced" controller="daemon sets" 2025-12-08T17:47:21.996897439+00:00 stderr F I1208 17:47:21.996306 1 shared_informer.go:350] "Waiting for caches to sync" controller="daemon sets" 2025-12-08T17:47:21.996897439+00:00 stderr F I1208 17:47:21.996311 1 shared_informer.go:357] "Caches are synced" controller="daemon sets" 2025-12-08T17:47:22.000307787+00:00 stderr F I1208 17:47:22.000251 1 shared_informer.go:357] "Caches are synced" controller="job" 2025-12-08T17:47:22.000371179+00:00 stderr F I1208 17:47:22.000343 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:22.010854958+00:00 stderr F I1208 17:47:22.010771 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:22.014060200+00:00 stderr F I1208 17:47:22.014012 1 shared_informer.go:357] "Caches are synced" controller="taint" 2025-12-08T17:47:22.014126532+00:00 stderr F I1208 17:47:22.014099 1 node_lifecycle_controller.go:675] "Controller observed a new Node" logger="node-lifecycle-controller" node="crc" 2025-12-08T17:47:22.014148712+00:00 stderr F I1208 17:47:22.014133 1 controller_utils.go:173] "Recording event message for node" logger="node-lifecycle-controller" event="Registered Node crc in Controller" node="crc" 2025-12-08T17:47:22.014190774+00:00 stderr F I1208 17:47:22.014166 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone="" 2025-12-08T17:47:22.014288397+00:00 stderr F I1208 17:47:22.014262 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="crc" 2025-12-08T17:47:22.014392770+00:00 stderr F I1208 17:47:22.014335 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal" 2025-12-08T17:47:22.016015841+00:00 stderr F I1208 17:47:22.015967 1 shared_informer.go:357] "Caches are synced" controller="HPA" 2025-12-08T17:47:22.018283902+00:00 stderr F I1208 17:47:22.018246 1 shared_informer.go:357] "Caches are synced" controller="persistent volume" 2025-12-08T17:47:22.018403416+00:00 stderr F I1208 17:47:22.018367 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:22.021130072+00:00 stderr F I1208 17:47:22.019621 1 shared_informer.go:357] "Caches are synced" controller="ephemeral" 2025-12-08T17:47:22.021130072+00:00 stderr F I1208 17:47:22.021049 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:47:22.025750778+00:00 stderr F I1208 17:47:22.024182 1 shared_informer.go:357] "Caches are synced" controller="deployment" 2025-12-08T17:47:22.028290518+00:00 stderr F I1208 17:47:22.028243 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:22.032540681+00:00 stderr F I1208 17:47:22.032482 1 shared_informer.go:357] "Caches are synced" controller="selinux_warning" 2025-12-08T17:47:22.036257128+00:00 stderr F I1208 17:47:22.036221 1 shared_informer.go:357] "Caches are synced" controller="attach detach" 2025-12-08T17:47:22.038919821+00:00 stderr F I1208 17:47:22.038858 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:47:22.039349716+00:00 stderr F I1208 17:47:22.039330 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:47:22.039380397+00:00 stderr F I1208 17:47:22.039369 1 resource_quota_controller.go:502] "synced quota controller" logger="resourcequota-controller" 2025-12-08T17:47:22.044779346+00:00 stderr F I1208 17:47:22.044717 1 shared_informer.go:357] "Caches are synced" controller="crt configmap" 2025-12-08T17:47:22.056989891+00:00 stderr F I1208 17:47:22.056938 1 shared_informer.go:357] "Caches are synced" controller="crt configmap" 2025-12-08T17:47:22.060911254+00:00 stderr F I1208 17:47:22.058021 1 replica_set.go:626] "Too many replicas" logger="replicaset-controller" replicaSet="openshift-controller-manager/controller-manager-65b6cccf98" need=0 deleting=1 2025-12-08T17:47:22.060911254+00:00 stderr F I1208 17:47:22.058078 1 replica_set.go:253] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="openshift-controller-manager/controller-manager-65b6cccf98" relatedReplicaSets=["openshift-controller-manager/controller-manager-7c9cdb8ff5","openshift-controller-manager/controller-manager-9fd5cc475","openshift-controller-manager/controller-manager-c84474957","openshift-controller-manager/controller-manager-6cd9c44569","openshift-controller-manager/controller-manager-58897fffb5","openshift-controller-manager/controller-manager-5d4c96c665","openshift-controller-manager/controller-manager-5f76cf6594","openshift-controller-manager/controller-manager-65b6cccf98","openshift-controller-manager/controller-manager-6ff9c7475c","openshift-controller-manager/controller-manager-86f48fd68b","openshift-controller-manager/controller-manager-c7d4b49f6","openshift-controller-manager/controller-manager-74bfd85b68"] 2025-12-08T17:47:22.060911254+00:00 stderr F I1208 17:47:22.058181 1 controller_utils.go:618] "Deleting pod" logger="replicaset-controller" controller="controller-manager-65b6cccf98" pod="openshift-controller-manager/controller-manager-65b6cccf98-6wjgz" 2025-12-08T17:47:22.063059742+00:00 stderr F I1208 17:47:22.063021 1 replica_set.go:626] "Too many replicas" logger="replicaset-controller" replicaSet="openshift-route-controller-manager/route-controller-manager-776cdc94d6" need=0 deleting=1 2025-12-08T17:47:22.063078842+00:00 stderr F I1208 17:47:22.063054 1 replica_set.go:253] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="openshift-route-controller-manager/route-controller-manager-776cdc94d6" relatedReplicaSets=["openshift-route-controller-manager/route-controller-manager-66bd94d94f","openshift-route-controller-manager/route-controller-manager-684bc95d64","openshift-route-controller-manager/route-controller-manager-76558c69dc","openshift-route-controller-manager/route-controller-manager-776cdc94d6","openshift-route-controller-manager/route-controller-manager-7d86df95df","openshift-route-controller-manager/route-controller-manager-c47fcf799","openshift-route-controller-manager/route-controller-manager-6975b9f87f","openshift-route-controller-manager/route-controller-manager-58f5cf7b86","openshift-route-controller-manager/route-controller-manager-5fccdd79b9","openshift-route-controller-manager/route-controller-manager-6bc8749ddd","openshift-route-controller-manager/route-controller-manager-6d7f4ff85d","openshift-route-controller-manager/route-controller-manager-7cc45857b6"] 2025-12-08T17:47:22.063157775+00:00 stderr F I1208 17:47:22.063138 1 controller_utils.go:618] "Deleting pod" logger="replicaset-controller" controller="route-controller-manager-776cdc94d6" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q" 2025-12-08T17:47:22.063240747+00:00 stderr F I1208 17:47:22.063213 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-controller-manager/controller-manager" err="Operation cannot be fulfilled on deployments.apps \"controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:22.066937473+00:00 stderr F I1208 17:47:22.066897 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-route-controller-manager/route-controller-manager" err="Operation cannot be fulfilled on deployments.apps \"route-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:22.077183426+00:00 stderr F I1208 17:47:22.077100 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-controller-manager/controller-manager" err="Operation cannot be fulfilled on replicasets.apps \"controller-manager-6cd9c44569\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:22.085028323+00:00 stderr F I1208 17:47:22.084806 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-controller-manager/controller-manager-6cd9c44569" need=1 creating=1 2025-12-08T17:47:22.086957614+00:00 stderr F I1208 17:47:22.085581 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-route-controller-manager/route-controller-manager" err="Operation cannot be fulfilled on replicasets.apps \"route-controller-manager-6975b9f87f\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:22.100990056+00:00 stderr F I1208 17:47:22.099994 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-route-controller-manager/route-controller-manager-6975b9f87f" need=1 creating=1 2025-12-08T17:47:22.103435032+00:00 stderr F I1208 17:47:22.103208 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-controller-manager/controller-manager" err="Operation cannot be fulfilled on deployments.apps \"controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:22.123916948+00:00 stderr F I1208 17:47:22.123850 1 shared_informer.go:357] "Caches are synced" controller="garbage collector" 2025-12-08T17:47:22.123916948+00:00 stderr F I1208 17:47:22.123873 1 garbagecollector.go:235] "synced garbage collector" logger="garbage-collector-controller" 2025-12-08T17:47:22.124038462+00:00 stderr F I1208 17:47:22.123999 1 shared_informer.go:357] "Caches are synced" controller="garbage collector" 2025-12-08T17:47:22.124038462+00:00 stderr F I1208 17:47:22.124019 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller" 2025-12-08T17:47:22.124038462+00:00 stderr F I1208 17:47:22.124026 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller" 2025-12-08T17:47:22.124144855+00:00 stderr F I1208 17:47:22.124075 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[config.openshift.io/v1/ClusterVersion, namespace: , name: version, uid: 81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503]" virtual=false 2025-12-08T17:47:22.124156315+00:00 stderr F I1208 17:47:22.124097 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[operator.openshift.io/v1/Network, namespace: , name: cluster, uid: d56acc66-d25c-4e5c-aa52-5418dd270c94]" virtual=false 2025-12-08T17:47:22.124207287+00:00 stderr F I1208 17:47:22.124135 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[operators.coreos.com/v1alpha1/ClusterServiceVersion, namespace: openshift-operator-lifecycle-manager, name: packageserver, uid: 09b3d4b2-fc47-4ee0-a331-67a39502cf21]" virtual=false 2025-12-08T17:47:22.124215847+00:00 stderr F I1208 17:47:22.124164 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-marketplace, name: marketplace-operator-metrics, uid: 94337474-19e9-47ef-a63f-a5db85f82770]" virtual=false 2025-12-08T17:47:22.124353881+00:00 stderr F I1208 17:47:22.124306 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-config-operator, name: metrics, uid: d0b70160-b97a-47c1-8814-0419134941de]" virtual=false 2025-12-08T17:47:22.124353881+00:00 stderr F I1208 17:47:22.124317 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-storage-version-migrator-operator, name: metrics, uid: e5bc231d-9114-40a5-a422-584788726a16]" virtual=false 2025-12-08T17:47:22.124366102+00:00 stderr F I1208 17:47:22.124340 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: a79ef72a-9de8-4bd8-ab71-7e0b71724a57]" virtual=false 2025-12-08T17:47:22.124424994+00:00 stderr F I1208 17:47:22.124390 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-operator-webhook, uid: 3d8d8642-1cd0-4ece-918a-8ae8e150b269]" virtual=false 2025-12-08T17:47:22.124455574+00:00 stderr F I1208 17:47:22.124427 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: 09b33468-17e9-49a7-b6f4-38686a6730e9]" virtual=false 2025-12-08T17:47:22.124455574+00:00 stderr F I1208 17:47:22.124431 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-monitoring, name: cluster-monitoring-operator, uid: 5ecbdbba-0aac-4ca1-9bd5-63c3dd666779]" virtual=false 2025-12-08T17:47:22.124497836+00:00 stderr F I1208 17:47:22.124465 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-apiserver, name: check-endpoints, uid: d0180f4d-9eb3-45e2-8586-df212e67c7f6]" virtual=false 2025-12-08T17:47:22.124548787+00:00 stderr F I1208 17:47:22.124510 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-diagnostics, name: network-check-target, uid: 2870f12f-1c16-412d-8d85-3f66a56def0d]" virtual=false 2025-12-08T17:47:22.124558398+00:00 stderr F I1208 17:47:22.124531 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-operator-lifecycle-manager, name: olm-operator-metrics, uid: 5f438b20-bbb9-4020-9829-dc86fe8ca8bd]" virtual=false 2025-12-08T17:47:22.124572348+00:00 stderr F I1208 17:47:22.124551 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-service-ca-operator, name: metrics, uid: fda33e4c-938e-4672-b83c-6f709efbd0d6]" virtual=false 2025-12-08T17:47:22.124603959+00:00 stderr F I1208 17:47:22.124570 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-operator, uid: a990d1c8-6964-4853-af7b-2ce1eec0c42d]" virtual=false 2025-12-08T17:47:22.124659051+00:00 stderr F I1208 17:47:22.124624 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: 1d6e0c49-5022-4407-b7e2-606925e10c95]" virtual=false 2025-12-08T17:47:22.124671391+00:00 stderr F I1208 17:47:22.124650 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-control-plane, uid: 502d0892-a804-4fae-ad3f-cb342f49e7aa]" virtual=false 2025-12-08T17:47:22.124701312+00:00 stderr F I1208 17:47:22.124667 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-controller-manager-operator, name: metrics, uid: 756dc7f9-c733-4561-89e6-0982cec51bd4]" virtual=false 2025-12-08T17:47:22.124701312+00:00 stderr F I1208 17:47:22.124648 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-etcd-operator, name: metrics, uid: 518336b3-7008-41e5-9cec-2c1c85f2ff09]" virtual=false 2025-12-08T17:47:22.124713083+00:00 stderr F I1208 17:47:22.124649 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-node, uid: c758def5-7362-4985-b540-393f26fc97a6]" virtual=false 2025-12-08T17:47:22.128246693+00:00 stderr F I1208 17:47:22.128179 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[operator.openshift.io/v1/Network, namespace: , name: cluster, uid: d56acc66-d25c-4e5c-aa52-5418dd270c94]" 2025-12-08T17:47:22.128246693+00:00 stderr F I1208 17:47:22.128216 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-controllers, uid: 3304c944-7c53-445a-9f17-9e3e5f75226c]" virtual=false 2025-12-08T17:47:22.128720538+00:00 stderr F I1208 17:47:22.128668 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[operators.coreos.com/v1alpha1/ClusterServiceVersion, namespace: openshift-operator-lifecycle-manager, name: packageserver, uid: 09b3d4b2-fc47-4ee0-a331-67a39502cf21]" 2025-12-08T17:47:22.128720538+00:00 stderr F I1208 17:47:22.128694 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: c7d72a1b-faa9-4eb8-96e4-0297bb74850e]" virtual=false 2025-12-08T17:47:22.128971597+00:00 stderr F I1208 17:47:22.128931 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[config.openshift.io/v1/ClusterVersion, namespace: , name: version, uid: 81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503]" 2025-12-08T17:47:22.128971597+00:00 stderr F I1208 17:47:22.128951 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-diagnostics, name: network-check-source, uid: 5ad92a4d-4ca9-422b-8abe-d013f8c3121c]" virtual=false 2025-12-08T17:47:22.130257677+00:00 stderr F I1208 17:47:22.130201 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-storage-version-migrator-operator, name: metrics, uid: e5bc231d-9114-40a5-a422-584788726a16]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.130257677+00:00 stderr F I1208 17:47:22.130230 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-cluster-samples-operator, name: metrics, uid: c016741c-0788-45bd-a328-6a4aa719a9ee]" virtual=false 2025-12-08T17:47:22.130777443+00:00 stderr F I1208 17:47:22.130716 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-controller-manager-operator, name: metrics, uid: 756dc7f9-c733-4561-89e6-0982cec51bd4]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.130777443+00:00 stderr F I1208 17:47:22.130761 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-console-operator, name: metrics, uid: 899e5eff-60ef-429f-9533-9c263e2d0ddb]" virtual=false 2025-12-08T17:47:22.131276369+00:00 stderr F I1208 17:47:22.131225 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: 1d6e0c49-5022-4407-b7e2-606925e10c95]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.131276369+00:00 stderr F I1208 17:47:22.131251 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-image-registry, name: image-registry-operator, uid: 21252561-db9f-4519-becf-9b4daef38a73]" virtual=false 2025-12-08T17:47:22.131447944+00:00 stderr F I1208 17:47:22.131405 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-config-operator, name: metrics, uid: d0b70160-b97a-47c1-8814-0419134941de]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.131447944+00:00 stderr F I1208 17:47:22.131427 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-operator-machine-webhook, uid: a39c524d-dab9-414c-a0c5-6c6ece7558fe]" virtual=false 2025-12-08T17:47:22.131636400+00:00 stderr F I1208 17:47:22.131594 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-service-ca-operator, name: metrics, uid: fda33e4c-938e-4672-b83c-6f709efbd0d6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.131636400+00:00 stderr F I1208 17:47:22.131615 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-multus, name: multus-admission-controller, uid: 1c31f0d5-3fc0-4cca-8214-9358f0570149]" virtual=false 2025-12-08T17:47:22.134196121+00:00 stderr F I1208 17:47:22.134138 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-diagnostics, name: network-check-target, uid: 2870f12f-1c16-412d-8d85-3f66a56def0d]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.134224912+00:00 stderr F I1208 17:47:22.134167 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 6029f3c3-1e22-47e0-b96d-b40f71acacb2]" virtual=false 2025-12-08T17:47:22.134460099+00:00 stderr F I1208 17:47:22.134399 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-node, uid: c758def5-7362-4985-b540-393f26fc97a6]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.134538802+00:00 stderr F I1208 17:47:22.134421 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-authentication-operator, name: metrics, uid: 9d158722-c9d8-4574-9c8d-76aff39b1405]" virtual=false 2025-12-08T17:47:22.134793370+00:00 stderr F I1208 17:47:22.134748 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-operator, uid: a990d1c8-6964-4853-af7b-2ce1eec0c42d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.134793370+00:00 stderr F I1208 17:47:22.134772 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-console, name: networking-console-plugin, uid: 99d5d1b5-3679-4a1e-a5a0-7df1601fc793]" virtual=false 2025-12-08T17:47:22.135094610+00:00 stderr F I1208 17:47:22.135042 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-marketplace, name: marketplace-operator-metrics, uid: 94337474-19e9-47ef-a63f-a5db85f82770]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.135094610+00:00 stderr F I1208 17:47:22.135070 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-operator-lifecycle-manager, name: catalog-operator-metrics, uid: c608aa3d-4067-43ca-a0d6-9d04be3b853c]" virtual=false 2025-12-08T17:47:22.135361478+00:00 stderr F I1208 17:47:22.135310 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-apiserver, name: check-endpoints, uid: d0180f4d-9eb3-45e2-8586-df212e67c7f6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.135361478+00:00 stderr F I1208 17:47:22.135333 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-operator, name: metrics, uid: 62887ae0-bdbb-4eca-867b-a51f8a3fa46b]" virtual=false 2025-12-08T17:47:22.135375918+00:00 stderr F I1208 17:47:22.135342 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-operator-webhook, uid: 3d8d8642-1cd0-4ece-918a-8ae8e150b269]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.135468831+00:00 stderr F I1208 17:47:22.135391 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-monitoring, name: cluster-monitoring-operator, uid: 5ecbdbba-0aac-4ca1-9bd5-63c3dd666779]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.135481542+00:00 stderr F I1208 17:47:22.135457 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-dns-operator, name: metrics, uid: 7038e591-b4c2-4e44-9589-0decde72039e]" virtual=false 2025-12-08T17:47:22.135491802+00:00 stderr F I1208 17:47:22.135371 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-apiserver-operator, name: metrics, uid: c89a6cf8-b1f5-433c-a98e-6433ab2d8604]" virtual=false 2025-12-08T17:47:22.135599995+00:00 stderr F I1208 17:47:22.135538 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-control-plane, uid: 502d0892-a804-4fae-ad3f-cb342f49e7aa]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.135599995+00:00 stderr F I1208 17:47:22.135580 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ingress-operator, name: metrics, uid: 0f5c5226-4139-4bd9-a1f1-4819358f4b44]" virtual=false 2025-12-08T17:47:22.135977447+00:00 stderr F I1208 17:47:22.135932 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: 09b33468-17e9-49a7-b6f4-38686a6730e9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.136282017+00:00 stderr F I1208 17:47:22.136223 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: c7d72a1b-faa9-4eb8-96e4-0297bb74850e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.136282017+00:00 stderr F I1208 17:47:22.136258 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-operator-lifecycle-manager, name: package-server-manager-metrics, uid: 199e5ec3-0fe1-4929-8be5-eeb222b837d4]" virtual=false 2025-12-08T17:47:22.136350869+00:00 stderr F I1208 17:47:22.136080 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-operator-lifecycle-manager, name: olm-operator-metrics, uid: 5f438b20-bbb9-4020-9829-dc86fe8ca8bd]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.136379060+00:00 stderr F I1208 17:47:22.136107 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: a79ef72a-9de8-4bd8-ab71-7e0b71724a57]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.136393880+00:00 stderr F I1208 17:47:22.136375 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-cluster-version, name: cluster-version-operator, uid: 3a493813-5327-4484-bc96-a108bced6093]" virtual=false 2025-12-08T17:47:22.136429301+00:00 stderr F I1208 17:47:22.136381 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-apiserver-operator, name: metrics, uid: 67098b90-387a-4f7b-8a68-e484e6889ec7]" virtual=false 2025-12-08T17:47:22.136448132+00:00 stderr F I1208 17:47:22.136234 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: 67b62412-7f65-4222-ba31-74c21dcee1b1]" virtual=false 2025-12-08T17:47:22.136457002+00:00 stderr F I1208 17:47:22.136157 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-diagnostics, name: network-check-source, uid: 5ad92a4d-4ca9-422b-8abe-d013f8c3121c]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.136490303+00:00 stderr F I1208 17:47:22.136006 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-etcd-operator, name: metrics, uid: 518336b3-7008-41e5-9cec-2c1c85f2ff09]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.136521144+00:00 stderr F I1208 17:47:22.136465 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-ingress, name: router-default, uid: 6445a1ec-8ec2-4ec8-b191-9cc7fa235148]" virtual=false 2025-12-08T17:47:22.136610827+00:00 stderr F I1208 17:47:22.136492 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-multus, name: network-metrics-service, uid: 221650ca-5ea9-450a-b2e3-01b15c386136]" virtual=false 2025-12-08T17:47:22.138361282+00:00 stderr F I1208 17:47:22.138294 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-controllers, uid: 3304c944-7c53-445a-9f17-9e3e5f75226c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.138382533+00:00 stderr F I1208 17:47:22.138351 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-controller-manager-operator, name: metrics, uid: 4a7bd676-8b0b-4244-9e34-29c3edb8bb40]" virtual=false 2025-12-08T17:47:22.138796786+00:00 stderr F I1208 17:47:22.138755 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-console-operator, name: metrics, uid: 899e5eff-60ef-429f-9533-9c263e2d0ddb]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.138796786+00:00 stderr F I1208 17:47:22.138783 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-scheduler-operator, name: metrics, uid: 9c082bd0-69fc-4adf-bbca-de0862ba049d]" virtual=false 2025-12-08T17:47:22.139756286+00:00 stderr F I1208 17:47:22.139419 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-image-registry, name: image-registry-operator, uid: 21252561-db9f-4519-becf-9b4daef38a73]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.139756286+00:00 stderr F I1208 17:47:22.139451 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[policy/v1/PodDisruptionBudget, namespace: openshift-operator-lifecycle-manager, name: packageserver-pdb, uid: e1546b15-314b-4240-8e11-d5e915c472c7]" virtual=false 2025-12-08T17:47:22.140518990+00:00 stderr F I1208 17:47:22.140460 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-ingress, name: router-default, uid: 6445a1ec-8ec2-4ec8-b191-9cc7fa235148]" 2025-12-08T17:47:22.140518990+00:00 stderr F I1208 17:47:22.140495 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-service-ca-operator, name: service-ca-operator, uid: 4979bb5a-bf15-43a9-9eee-231d52574ca5]" virtual=false 2025-12-08T17:47:22.140620864+00:00 stderr F I1208 17:47:22.140573 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-multus, name: multus-admission-controller, uid: 1c31f0d5-3fc0-4cca-8214-9358f0570149]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.140620864+00:00 stderr F I1208 17:47:22.140602 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-controller-manager, name: openshift-controller-manager, uid: 3ead3b3c-6cb1-4941-8e45-fcfb01cfa6b6]" virtual=false 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.140901 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: machine-api-operator-machine-webhook, uid: a39c524d-dab9-414c-a0c5-6c6ece7558fe]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.140929 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-dns-operator, name: dns-operator, uid: 50a8933f-a9ac-4a80-b460-c36e9fb81474]" virtual=false 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.140940 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-cluster-samples-operator, name: metrics, uid: c016741c-0788-45bd-a328-6a4aa719a9ee]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.141004 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-console-operator, name: console-operator, uid: 67aaa41b-07c9-42c2-b24b-e21c702aaf38]" virtual=false 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.142501 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-console, name: networking-console-plugin, uid: 99d5d1b5-3679-4a1e-a5a0-7df1601fc793]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.142528 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-dns, name: dns-default, uid: d5a0398f-8bff-46a5-9d7c-f2f4e26b3879]" virtual=false 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.143005 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ingress-operator, name: metrics, uid: 0f5c5226-4139-4bd9-a1f1-4819358f4b44]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.143079 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-scheduler-operator, name: kube-scheduler-operator, uid: 1e85867d-2566-494d-83b0-620b526f122f]" virtual=false 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.143203 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-apiserver-operator, name: metrics, uid: c89a6cf8-b1f5-433c-a98e-6433ab2d8604]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.143231 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: 743c6934-a727-4576-9532-c45126dd50bd]" virtual=false 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.143243 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 6029f3c3-1e22-47e0-b96d-b40f71acacb2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143380710+00:00 stderr F I1208 17:47:22.143288 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-operator-lifecycle-manager, name: catalog-operator, uid: 308b0b19-f7aa-40a2-b0b9-10a5d2b356f8]" virtual=false 2025-12-08T17:47:22.143413701+00:00 stderr F I1208 17:47:22.143391 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-network-operator, name: metrics, uid: 62887ae0-bdbb-4eca-867b-a51f8a3fa46b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143446792+00:00 stderr F I1208 17:47:22.143415 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-operator-lifecycle-manager, name: package-server-manager-metrics, uid: 927e38ed-2fe0-4faa-8d67-18f898398255]" virtual=false 2025-12-08T17:47:22.143519235+00:00 stderr F I1208 17:47:22.143478 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-authentication-operator, name: metrics, uid: 9d158722-c9d8-4574-9c8d-76aff39b1405]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.143535425+00:00 stderr F I1208 17:47:22.143509 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-controller-manager-operator, name: openshift-controller-manager-operator, uid: b6b1ba29-2359-42ad-a1f3-c61c995c528f]" virtual=false 2025-12-08T17:47:22.145813167+00:00 stderr F I1208 17:47:22.143583 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-dns-operator, name: metrics, uid: 7038e591-b4c2-4e44-9589-0decde72039e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.145813167+00:00 stderr F I1208 17:47:22.143607 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-image-registry, name: image-registry-operator, uid: 7693198b-e80b-4c67-a087-641d91ca8741]" virtual=false 2025-12-08T17:47:22.145813167+00:00 stderr F I1208 17:47:22.145506 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-operator-lifecycle-manager, name: catalog-operator-metrics, uid: c608aa3d-4067-43ca-a0d6-9d04be3b853c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.145813167+00:00 stderr F I1208 17:47:22.145543 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 005ad9fa-457c-4a70-9bb0-2a624385cac9]" virtual=false 2025-12-08T17:47:22.146054785+00:00 stderr F I1208 17:47:22.146012 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-operator-lifecycle-manager, name: package-server-manager-metrics, uid: 199e5ec3-0fe1-4929-8be5-eeb222b837d4]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.146054785+00:00 stderr F I1208 17:47:22.146039 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-multus, name: monitor-multus-admission-controller, uid: 01fa16c6-f969-42c1-b69a-43de0deff522]" virtual=false 2025-12-08T17:47:22.146099936+00:00 stderr F I1208 17:47:22.146069 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-apiserver-operator, name: metrics, uid: 67098b90-387a-4f7b-8a68-e484e6889ec7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.146110456+00:00 stderr F I1208 17:47:22.146097 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-ovn-kubernetes, name: monitor-ovn-node, uid: 992d1159-1bbf-4ff7-adf1-580c362b690a]" virtual=false 2025-12-08T17:47:22.146182799+00:00 stderr F I1208 17:47:22.146157 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-cluster-version, name: cluster-version-operator, uid: 3a493813-5327-4484-bc96-a108bced6093]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.146200349+00:00 stderr F I1208 17:47:22.146178 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-route-controller-manager, name: openshift-route-controller-manager, uid: 6c85dcfe-44cb-4596-b7d4-b6e79a18159e]" virtual=false 2025-12-08T17:47:22.146260321+00:00 stderr F I1208 17:47:22.146231 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-kube-scheduler-operator, name: metrics, uid: 9c082bd0-69fc-4adf-bbca-de0862ba049d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.146272162+00:00 stderr F I1208 17:47:22.146257 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-scheduler, name: kube-scheduler, uid: 0ef34820-7d38-4563-bcab-20b7d718ade2]" virtual=false 2025-12-08T17:47:22.146314963+00:00 stderr F I1208 17:47:22.146264 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: 67b62412-7f65-4222-ba31-74c21dcee1b1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.146346424+00:00 stderr F I1208 17:47:22.146320 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-authentication, name: oauth-openshift, uid: f570f814-28f0-43c6-a672-c5ff8b60e0a0]" virtual=false 2025-12-08T17:47:22.147109378+00:00 stderr F I1208 17:47:22.146537 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-multus, name: network-metrics-service, uid: 221650ca-5ea9-450a-b2e3-01b15c386136]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.147109378+00:00 stderr F I1208 17:47:22.146568 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-console, name: console, uid: ce6eb94a-67e7-4594-8332-d294f5f0ed28]" virtual=false 2025-12-08T17:47:22.147109378+00:00 stderr F I1208 17:47:22.146569 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[policy/v1/PodDisruptionBudget, namespace: openshift-operator-lifecycle-manager, name: packageserver-pdb, uid: e1546b15-314b-4240-8e11-d5e915c472c7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.147109378+00:00 stderr F I1208 17:47:22.146587 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-ingress-operator, name: ingress-operator, uid: e02df054-9826-42fd-bd6b-a68c996ebec0]" virtual=false 2025-12-08T17:47:22.147195750+00:00 stderr F I1208 17:47:22.147162 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-controller-manager-operator, name: metrics, uid: 4a7bd676-8b0b-4244-9e34-29c3edb8bb40]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.147209241+00:00 stderr F I1208 17:47:22.147195 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-apiserver, name: openshift-apiserver-operator-check-endpoints, uid: bb13a4c2-4a26-4c4c-be0a-c92c6553ee6e]" virtual=false 2025-12-08T17:47:22.150686970+00:00 stderr F I1208 17:47:22.150615 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-dns, name: dns-default, uid: d5a0398f-8bff-46a5-9d7c-f2f4e26b3879]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"DNS","name":"default","uid":"0f9755ef-acf2-4bc6-a6fc-f491e28e635f","controller":true}] 2025-12-08T17:47:22.150686970+00:00 stderr F I1208 17:47:22.150668 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-etcd-operator, name: etcd-operator, uid: aef2648c-8377-4d11-a4a1-fb24f5095a8d]" virtual=false 2025-12-08T17:47:22.150711861+00:00 stderr F I1208 17:47:22.150680 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-image-registry, name: image-registry-operator, uid: 7693198b-e80b-4c67-a087-641d91ca8741]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.150720561+00:00 stderr F I1208 17:47:22.150705 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: de7baa5b-d04a-4e5e-9669-bf620c8a04d1]" virtual=false 2025-12-08T17:47:22.150866995+00:00 stderr F I1208 17:47:22.150830 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-service-ca-operator, name: service-ca-operator, uid: 4979bb5a-bf15-43a9-9eee-231d52574ca5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.150866995+00:00 stderr F I1208 17:47:22.150855 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-operator-lifecycle-manager, name: olm-operator, uid: cfa008c8-2c0c-470c-bc32-f95c0e394dd5]" virtual=false 2025-12-08T17:47:22.151530907+00:00 stderr F I1208 17:47:22.151476 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-controller-manager-operator, name: openshift-controller-manager-operator, uid: b6b1ba29-2359-42ad-a1f3-c61c995c528f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.151530907+00:00 stderr F I1208 17:47:22.151503 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-image-registry, name: image-registry, uid: fc13ca00-4d30-4cf5-ba4e-0aeb5356211f]" virtual=false 2025-12-08T17:47:22.151911719+00:00 stderr F I1208 17:47:22.151622 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-dns-operator, name: dns-operator, uid: 50a8933f-a9ac-4a80-b460-c36e9fb81474]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.151911719+00:00 stderr F I1208 17:47:22.151642 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-apiserver, name: kube-apiserver, uid: 9de9deed-9721-49d9-9ffc-fe6fde17ec88]" virtual=false 2025-12-08T17:47:22.151911719+00:00 stderr F I1208 17:47:22.151822 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-controller-manager, name: openshift-controller-manager, uid: 3ead3b3c-6cb1-4941-8e45-fcfb01cfa6b6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.151911719+00:00 stderr F I1208 17:47:22.151839 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: b8f2912e-1679-4233-b796-af02cbe7e18b]" virtual=false 2025-12-08T17:47:22.152180677+00:00 stderr F I1208 17:47:22.152013 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-operator-lifecycle-manager, name: catalog-operator, uid: 308b0b19-f7aa-40a2-b0b9-10a5d2b356f8]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.152180677+00:00 stderr F I1208 17:47:22.152037 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: 6380b6dc-76d4-4a0c-bd2b-ad07c6b511ca]" virtual=false 2025-12-08T17:47:22.152195958+00:00 stderr F I1208 17:47:22.152173 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: 743c6934-a727-4576-9532-c45126dd50bd]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.152195958+00:00 stderr F I1208 17:47:22.152188 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-api, name: machine-api-controllers, uid: 8da4502b-6d27-4eb6-af73-39a8176f83fe]" virtual=false 2025-12-08T17:47:22.152498107+00:00 stderr F I1208 17:47:22.152312 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-operator-lifecycle-manager, name: package-server-manager-metrics, uid: 927e38ed-2fe0-4faa-8d67-18f898398255]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.152498107+00:00 stderr F I1208 17:47:22.152333 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-marketplace, name: marketplace-operator, uid: dca395b3-cf6a-4fc7-92bc-15c290009884]" virtual=false 2025-12-08T17:47:22.152498107+00:00 stderr F I1208 17:47:22.152436 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-console-operator, name: console-operator, uid: 67aaa41b-07c9-42c2-b24b-e21c702aaf38]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.152498107+00:00 stderr F I1208 17:47:22.152465 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-multus, name: monitor-network, uid: 9f83a4ef-cd08-40b7-a5a3-f1f39610e4bf]" virtual=false 2025-12-08T17:47:22.154639345+00:00 stderr F I1208 17:47:22.153863 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-route-controller-manager, name: openshift-route-controller-manager, uid: 6c85dcfe-44cb-4596-b7d4-b6e79a18159e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.154639345+00:00 stderr F I1208 17:47:22.153903 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 767bca57-71f8-40a0-8831-50d92f59808c]" virtual=false 2025-12-08T17:47:22.154639345+00:00 stderr F I1208 17:47:22.154024 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-scheduler, name: kube-scheduler, uid: 0ef34820-7d38-4563-bcab-20b7d718ade2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.154639345+00:00 stderr F I1208 17:47:22.154038 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-apiserver-operator, name: openshift-apiserver-operator, uid: 92c9f946-b2a0-4dc5-975c-25ebc3bb9c4e]" virtual=false 2025-12-08T17:47:22.154639345+00:00 stderr F I1208 17:47:22.154162 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-scheduler-operator, name: kube-scheduler-operator, uid: 1e85867d-2566-494d-83b0-620b526f122f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.154639345+00:00 stderr F I1208 17:47:22.154176 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-apiserver, name: openshift-apiserver, uid: 38fd95a7-b03c-4438-abb8-83e272fd6912]" virtual=false 2025-12-08T17:47:22.154733958+00:00 stderr F I1208 17:47:22.154699 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-ingress-operator, name: ingress-operator, uid: e02df054-9826-42fd-bd6b-a68c996ebec0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.154733958+00:00 stderr F I1208 17:47:22.154722 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-cluster-machine-approver, name: cluster-machine-approver, uid: 5a288427-478d-4f77-8ab7-5a6a841b42ec]" virtual=false 2025-12-08T17:47:22.154810360+00:00 stderr F I1208 17:47:22.154778 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-multus, name: monitor-multus-admission-controller, uid: 01fa16c6-f969-42c1-b69a-43de0deff522]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.154810360+00:00 stderr F I1208 17:47:22.154796 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-cluster-version, name: cluster-version-operator, uid: 3cf7eed4-951b-4ed5-b5fe-6175a10b9554]" virtual=false 2025-12-08T17:47:22.155258694+00:00 stderr F I1208 17:47:22.155201 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-authentication, name: oauth-openshift, uid: f570f814-28f0-43c6-a672-c5ff8b60e0a0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.155258694+00:00 stderr F I1208 17:47:22.155236 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-apiserver, name: openshift-apiserver-operator-check-endpoints, uid: bb13a4c2-4a26-4c4c-be0a-c92c6553ee6e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.155258694+00:00 stderr F I1208 17:47:22.155246 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-config-operator, name: config-operator, uid: dd012f1c-40f4-428c-a842-46f94cbe2c6c]" virtual=false 2025-12-08T17:47:22.155278505+00:00 stderr F I1208 17:47:22.155254 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-api, name: machine-api-operator, uid: 81b76741-2b33-44b0-94f3-2547eb6fc915]" virtual=false 2025-12-08T17:47:22.155616495+00:00 stderr F I1208 17:47:22.155572 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-ovn-kubernetes, name: monitor-ovn-node, uid: 992d1159-1bbf-4ff7-adf1-580c362b690a]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.155616495+00:00 stderr F I1208 17:47:22.155594 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-network-diagnostics, name: network-check-source, uid: a77ddccd-3bdf-40da-b44d-39f9ccca28bb]" virtual=false 2025-12-08T17:47:22.155764530+00:00 stderr F I1208 17:47:22.155725 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 005ad9fa-457c-4a70-9bb0-2a624385cac9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.155764530+00:00 stderr F I1208 17:47:22.155745 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-authentication-operator, name: authentication-operator, uid: 01f7855a-a823-4705-8db6-27c45980a6cb]" virtual=false 2025-12-08T17:47:22.156900106+00:00 stderr F I1208 17:47:22.156832 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-console, name: console, uid: ce6eb94a-67e7-4594-8332-d294f5f0ed28]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.156900106+00:00 stderr F I1208 17:47:22.156860 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: c54dfa10-f53a-4c08-8fe5-f78de034450b]" virtual=false 2025-12-08T17:47:22.157581497+00:00 stderr F I1208 17:47:22.157472 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-multus, name: monitor-network, uid: 9f83a4ef-cd08-40b7-a5a3-f1f39610e4bf]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.157581497+00:00 stderr F I1208 17:47:22.157506 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-controller-manager, name: kube-controller-manager, uid: 43eede0a-d0aa-4d52-9cba-d673fe0fc344]" virtual=false 2025-12-08T17:47:22.159421525+00:00 stderr F I1208 17:47:22.159357 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: 6380b6dc-76d4-4a0c-bd2b-ad07c6b511ca]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.159421525+00:00 stderr F I1208 17:47:22.159394 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-network-operator, name: network-operator, uid: 738c1721-593f-4f60-a567-4597ff37ea6a]" virtual=false 2025-12-08T17:47:22.159606310+00:00 stderr F I1208 17:47:22.159531 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-operator-lifecycle-manager, name: olm-operator, uid: cfa008c8-2c0c-470c-bc32-f95c0e394dd5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.159648882+00:00 stderr F I1208 17:47:22.159620 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-oauth-apiserver, name: openshift-oauth-apiserver, uid: 668f8445-4e0b-4306-a0c6-9208bf76efb5]" virtual=false 2025-12-08T17:47:22.159797947+00:00 stderr F I1208 17:47:22.159759 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: b8f2912e-1679-4233-b796-af02cbe7e18b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.159797947+00:00 stderr F I1208 17:47:22.159788 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-ovn-kubernetes, name: monitor-ovn-control-plane-metrics, uid: e8d49b63-d110-45e5-a2ec-20435c71bd60]" virtual=false 2025-12-08T17:47:22.159817578+00:00 stderr F I1208 17:47:22.159789 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-api, name: machine-api-controllers, uid: 8da4502b-6d27-4eb6-af73-39a8176f83fe]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.159826188+00:00 stderr F I1208 17:47:22.159811 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[batch/v1/CronJob, namespace: openshift-machine-config-operator, name: machine-config-nodes-crd-cleanup, uid: 2f1b5315-e7f1-4f76-a4c7-7be559488f49]" virtual=false 2025-12-08T17:47:22.159907421+00:00 stderr F I1208 17:47:22.159850 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-etcd-operator, name: etcd-operator, uid: aef2648c-8377-4d11-a4a1-fb24f5095a8d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.159907421+00:00 stderr F I1208 17:47:22.159868 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[batch/v1/CronJob, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: a64f7156-efdf-4f2f-bdb6-f498fe674093]" virtual=false 2025-12-08T17:47:22.160025175+00:00 stderr F I1208 17:47:22.159992 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: de7baa5b-d04a-4e5e-9669-bf620c8a04d1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.160025175+00:00 stderr F I1208 17:47:22.160011 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[machine.openshift.io/v1beta1/MachineHealthCheck, namespace: openshift-machine-api, name: machine-api-termination-handler, uid: ac1c5de0-6d0d-41cd-814f-3cd5299bedbb]" virtual=false 2025-12-08T17:47:22.160497489+00:00 stderr F I1208 17:47:22.160453 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-marketplace, name: marketplace-operator, uid: dca395b3-cf6a-4fc7-92bc-15c290009884]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.160513870+00:00 stderr F I1208 17:47:22.160499 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-dns, name: node-resolver, uid: 7a3a6da6-fe06-4125-a5c7-e5f524871af3]" virtual=false 2025-12-08T17:47:22.160645484+00:00 stderr F I1208 17:47:22.160606 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-image-registry, name: image-registry, uid: fc13ca00-4d30-4cf5-ba4e-0aeb5356211f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.160664194+00:00 stderr F I1208 17:47:22.160627 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/ControllerRevision, namespace: openshift-ingress-canary, name: ingress-canary-6bb949dcdd, uid: 2999b62e-85cd-47b8-9202-4ef42d700e71]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.162415 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-apiserver, name: openshift-apiserver, uid: 38fd95a7-b03c-4438-abb8-83e272fd6912]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.162458 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-machine-config-operator, name: machine-config-server, uid: d1ca07c1-cb4d-45e8-b23b-37a1f3a3f651]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.162688 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 767bca57-71f8-40a0-8831-50d92f59808c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.162738 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-image-registry, name: node-ca, uid: 191786f0-2ab8-4a50-a3fb-f9953399f287]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.162757 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-cluster-machine-approver, name: cluster-machine-approver, uid: 5a288427-478d-4f77-8ab7-5a6a841b42ec]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.162776 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: 2fe756fc-41fb-44a6-ab78-d1fdba7d2669]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163180 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-apiserver-operator, name: openshift-apiserver-operator, uid: 92c9f946-b2a0-4dc5-975c-25ebc3bb9c4e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163219 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-apiserver, name: kube-apiserver, uid: 9de9deed-9721-49d9-9ffc-fe6fde17ec88]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163236 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-multus, name: multus-additional-cni-plugins, uid: db614dd0-5d3f-4079-bdf6-87e2d4631507]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163233 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-authentication-operator, name: authentication-operator, uid: 01f7855a-a823-4705-8db6-27c45980a6cb]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163271 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-network-diagnostics, name: network-check-target, uid: 9914de7f-6a1d-49fb-87e3-f0d03b5893ec]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163240 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-network-node-identity, name: network-node-identity, uid: af047b3a-df8e-4f5f-bbf5-258cdd2c977b]" virtual=false 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163489 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-network-diagnostics, name: network-check-source, uid: a77ddccd-3bdf-40da-b44d-39f9ccca28bb]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.163910176+00:00 stderr F I1208 17:47:22.163511 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-network-operator, name: iptables-alerter, uid: 04364458-be97-459c-9e4e-c10e4ed7a89c]" virtual=false 2025-12-08T17:47:22.164198535+00:00 stderr F I1208 17:47:22.164088 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-machine-config-operator, name: machine-config-server, uid: d1ca07c1-cb4d-45e8-b23b-37a1f3a3f651]" 2025-12-08T17:47:22.164198535+00:00 stderr F I1208 17:47:22.164139 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: hostpath-provisioner, name: csi-hostpathplugin, uid: 22984672-0d0d-46da-9df6-54a721b7b6bf]" virtual=false 2025-12-08T17:47:22.164198535+00:00 stderr F I1208 17:47:22.164152 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-config-operator, name: config-operator, uid: dd012f1c-40f4-428c-a842-46f94cbe2c6c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.164198535+00:00 stderr F I1208 17:47:22.164181 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-multus, name: multus, uid: c6bd5b69-2014-4db0-b123-1bdb423140f1]" virtual=false 2025-12-08T17:47:22.164382781+00:00 stderr F I1208 17:47:22.164337 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-kube-controller-manager, name: kube-controller-manager, uid: 43eede0a-d0aa-4d52-9cba-d673fe0fc344]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.164415642+00:00 stderr F I1208 17:47:22.164386 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-multus, name: network-metrics-daemon, uid: 9623fe22-300e-4e9e-9241-5a539b90f3a7]" virtual=false 2025-12-08T17:47:22.164415642+00:00 stderr F I1208 17:47:22.164393 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: c54dfa10-f53a-4c08-8fe5-f78de034450b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.164427812+00:00 stderr F I1208 17:47:22.164415 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-ovn-kubernetes, name: ovnkube-node, uid: 68675fbc-dce1-4e0f-93b3-f0fa287b1edd]" virtual=false 2025-12-08T17:47:22.166804007+00:00 stderr F I1208 17:47:22.166367 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: 2fe756fc-41fb-44a6-ab78-d1fdba7d2669]" 2025-12-08T17:47:22.166804007+00:00 stderr F I1208 17:47:22.166397 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[operators.coreos.com/v1/OperatorGroup, namespace: openshift-monitoring, name: openshift-cluster-monitoring, uid: 4ce70de4-7730-472f-aa41-55b320f6a48b]" virtual=false 2025-12-08T17:47:22.166804007+00:00 stderr F I1208 17:47:22.166614 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-image-registry, name: node-ca, uid: 191786f0-2ab8-4a50-a3fb-f9953399f287]" 2025-12-08T17:47:22.166804007+00:00 stderr F I1208 17:47:22.166637 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[operators.coreos.com/v1/OperatorGroup, namespace: openshift-operator-lifecycle-manager, name: olm-operators, uid: 1332ecfd-3d6a-4222-b9b5-6e6e389f06df]" virtual=false 2025-12-08T17:47:22.180463488+00:00 stderr F I1208 17:47:22.180348 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-ovn-kubernetes, name: monitor-ovn-control-plane-metrics, uid: e8d49b63-d110-45e5-a2ec-20435c71bd60]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.180463488+00:00 stderr F I1208 17:47:22.180444 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[operators.coreos.com/v1/OperatorGroup, namespace: openshift-operators, name: global-operators, uid: 83417554-904c-4254-8944-b91da7453b27]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.181018 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-dns, name: node-resolver, uid: 7a3a6da6-fe06-4125-a5c7-e5f524871af3]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"DNS","name":"default","uid":"0f9755ef-acf2-4bc6-a6fc-f491e28e635f","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.181098 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-config-operator, name: openshift-config-operator, uid: bde8337f-c80d-4cd7-8e1c-8853e023fdb8]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182225 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[batch/v1/CronJob, namespace: openshift-machine-config-operator, name: machine-config-nodes-crd-cleanup, uid: 2f1b5315-e7f1-4f76-a4c7-7be559488f49]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503"}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182250 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: 0918fba0-c5ef-42c6-ab99-2fb4dcb34871]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182572 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-network-operator, name: network-operator, uid: 738c1721-593f-4f60-a567-4597ff37ea6a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182632 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-console, name: console, uid: 324553c0-d8c0-43b0-bbae-07283a98bcf1]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182593 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[batch/v1/CronJob, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: a64f7156-efdf-4f2f-bdb6-f498fe674093]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182732 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: multus, uid: 8ed5854e-19c6-4934-895c-1ff820fbc84c]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182825 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[machine.openshift.io/v1beta1/MachineHealthCheck, namespace: openshift-machine-api, name: machine-api-termination-handler, uid: ac1c5de0-6d0d-41cd-814f-3cd5299bedbb]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.182849 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: 8c7ce863-bde8-4efe-8b60-31d289bbf1f9]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183247 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-oauth-apiserver, name: openshift-oauth-apiserver, uid: 668f8445-4e0b-4306-a0c6-9208bf76efb5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183269 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-image-registry, name: node-ca, uid: 1476d897-3740-45c6-b3a2-3403be584014]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183404 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: hostpath-provisioner, name: csi-hostpathplugin, uid: 22984672-0d0d-46da-9df6-54a721b7b6bf]" 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183420 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: machine-api-controllers, uid: 9c65bbd1-8044-4ed7-b28a-8adb920c184f]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183547 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-machine-api, name: machine-api-operator, uid: 81b76741-2b33-44b0-94f3-2547eb6fc915]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183565 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: multus-ac, uid: 8730bc0a-fa8f-4fcf-9d74-e54b3e4f4363]" virtual=false 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183718 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/ServiceMonitor, namespace: openshift-cluster-version, name: cluster-version-operator, uid: 3cf7eed4-951b-4ed5-b5fe-6175a10b9554]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.183926656+00:00 stderr F I1208 17:47:22.183738 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-node-identity, name: network-node-identity, uid: 020e6dbc-bbf9-4bb0-9224-ebe047b05265]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.190594 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-multus, name: multus-additional-cni-plugins, uid: db614dd0-5d3f-4079-bdf6-87e2d4631507]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.190642 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-authentication-operator, name: authentication-operator, uid: 063bc733-6edc-4f47-a43c-73cbfb5c3c8d]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.190840 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-multus, name: multus, uid: c6bd5b69-2014-4db0-b123-1bdb423140f1]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.190913 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-operator, name: iptables-alerter, uid: 67f6de68-003a-46d5-b769-d3ebce26fdf7]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.190996 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/ControllerRevision, namespace: openshift-ingress-canary, name: ingress-canary-6bb949dcdd, uid: 2999b62e-85cd-47b8-9202-4ef42d700e71]" owner=[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"ingress-canary","uid":"77896bcd-d1f7-46a2-984f-9205a544fb94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191021 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-scheduler-operator, name: openshift-kube-scheduler-operator, uid: 736e29a2-1ce3-47d8-ac5c-ddba4e238245]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191464 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-multus, name: network-metrics-daemon, uid: 9623fe22-300e-4e9e-9241-5a539b90f3a7]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191491 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-node, uid: a1d81936-4e38-446d-819b-61c0b05df947]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191599 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-ovn-kubernetes, name: ovnkube-node, uid: 68675fbc-dce1-4e0f-93b3-f0fa287b1edd]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191620 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: machine-api-operator, uid: 27ab1b40-7038-45bc-a0c3-f51f5fd8e027]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191726 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-network-diagnostics, name: network-check-target, uid: 9914de7f-6a1d-49fb-87e3-f0d03b5893ec]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191747 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: bfac9eb9-68bb-4da9-afc6-a17734d95032]" virtual=false 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191814 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[operators.coreos.com/v1/OperatorGroup, namespace: openshift-monitoring, name: openshift-cluster-monitoring, uid: 4ce70de4-7730-472f-aa41-55b320f6a48b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.192019201+00:00 stderr F I1208 17:47:22.191831 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 180b3a1a-50c2-445c-8650-162b0f3a1d99]" virtual=false 2025-12-08T17:47:22.196385569+00:00 stderr F I1208 17:47:22.192607 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-network-operator, name: iptables-alerter, uid: 04364458-be97-459c-9e4e-c10e4ed7a89c]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.196385569+00:00 stderr F I1208 17:47:22.192630 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: cluster-autoscaler, uid: 56382e05-4f3e-45e2-9065-468d4f668091]" virtual=false 2025-12-08T17:47:22.196385569+00:00 stderr F I1208 17:47:22.193219 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/DaemonSet, namespace: openshift-network-node-identity, name: network-node-identity, uid: af047b3a-df8e-4f5f-bbf5-258cdd2c977b]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.196385569+00:00 stderr F I1208 17:47:22.193255 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: multus-ancillary-tools, uid: c21b8763-d204-40e9-8b00-8f8a8767dd88]" virtual=false 2025-12-08T17:47:22.196385569+00:00 stderr F I1208 17:47:22.195105 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[operators.coreos.com/v1/OperatorGroup, namespace: openshift-operator-lifecycle-manager, name: olm-operators, uid: 1332ecfd-3d6a-4222-b9b5-6e6e389f06df]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.196385569+00:00 stderr F I1208 17:47:22.195130 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-service-ca-operator, name: service-ca-operator, uid: c63e31c2-61a6-4920-b002-afb13dcebab4]" virtual=false 2025-12-08T17:47:22.205151965+00:00 stderr F I1208 17:47:22.202253 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[operators.coreos.com/v1/OperatorGroup, namespace: openshift-operators, name: global-operators, uid: 83417554-904c-4254-8944-b91da7453b27]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.205151965+00:00 stderr F I1208 17:47:22.202303 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: machine-api-termination-handler, uid: 1567c8f7-61db-45bd-a1b6-c1f4b610e91b]" virtual=false 2025-12-08T17:47:22.205151965+00:00 stderr F I1208 17:47:22.204224 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-image-registry, name: node-ca, uid: 1476d897-3740-45c6-b3a2-3403be584014]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.205151965+00:00 stderr F I1208 17:47:22.204254 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: 1281df22-53c2-4b26-aa63-b3d41b4761b8]" virtual=false 2025-12-08T17:47:22.219602290+00:00 stderr F I1208 17:47:22.219528 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: multus, uid: 8ed5854e-19c6-4934-895c-1ff820fbc84c]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.219602290+00:00 stderr F I1208 17:47:22.219577 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 78bb60a8-531d-48f1-b653-8764a30ad047]" virtual=false 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.219952 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: 8c7ce863-bde8-4efe-8b60-31d289bbf1f9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.219973 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-diagnostics, name: network-diagnostics, uid: 35a6df22-e2af-4660-b4c6-16bf8a43042f]" virtual=false 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220105 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-scheduler-operator, name: openshift-kube-scheduler-operator, uid: 736e29a2-1ce3-47d8-ac5c-ddba4e238245]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220121 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-control-plane, uid: e7d12ea0-5793-4ac6-b96e-798934641d22]" virtual=false 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220234 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-console, name: console, uid: 324553c0-d8c0-43b0-bbae-07283a98bcf1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220247 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-dns-operator, name: dns-operator, uid: ededd462-781c-45db-afa8-736b78e4df88]" virtual=false 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220412 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-config-operator, name: openshift-config-operator, uid: bde8337f-c80d-4cd7-8e1c-8853e023fdb8]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220434 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-apiserver-operator, name: openshift-apiserver-operator, uid: dab4d42f-1426-4619-af00-f3b882989b05]" virtual=false 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.220851 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-node-identity, name: network-node-identity, uid: 020e6dbc-bbf9-4bb0-9224-ebe047b05265]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.221226 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: multus-ac, uid: 8730bc0a-fa8f-4fcf-9d74-e54b3e4f4363]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.221392926+00:00 stderr F I1208 17:47:22.221253 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-storage-version-migrator-operator, name: kube-storage-version-migrator-operator, uid: 5d04164f-45ca-4c60-8a08-c4459300ecda]" virtual=false 2025-12-08T17:47:22.222979966+00:00 stderr F I1208 17:47:22.222921 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: 0918fba0-c5ef-42c6-ab99-2fb4dcb34871]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.223002987+00:00 stderr F I1208 17:47:22.222971 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-operator, name: cluster-network-operator, uid: 4474bfa7-4a4c-4eef-9e88-1d3ba71df974]" virtual=false 2025-12-08T17:47:22.224905037+00:00 stderr F I1208 17:47:22.220867 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: metrics-daemon-sa, uid: abce43d0-2a82-4a99-9c68-e5d3d0e59581]" virtual=false 2025-12-08T17:47:22.233920700+00:00 stderr F I1208 17:47:22.231244 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: multus-ancillary-tools, uid: c21b8763-d204-40e9-8b00-8f8a8767dd88]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.233920700+00:00 stderr F I1208 17:47:22.231284 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-operator-lifecycle-manager, name: olm-operator-serviceaccount, uid: 0d23d03f-8d1c-40a1-b029-bb73930758f7]" virtual=false 2025-12-08T17:47:22.235380677+00:00 stderr F I1208 17:47:22.235026 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: machine-api-operator, uid: 27ab1b40-7038-45bc-a0c3-f51f5fd8e027]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.235380677+00:00 stderr F I1208 17:47:22.235059 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-marketplace, name: marketplace-operator, uid: c100705c-9ecd-449a-969b-a207f023c1b0]" virtual=false 2025-12-08T17:47:22.235380677+00:00 stderr F I1208 17:47:22.235190 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-operator, name: iptables-alerter, uid: 67f6de68-003a-46d5-b769-d3ebce26fdf7]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.235380677+00:00 stderr F I1208 17:47:22.235204 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: cli, uid: 8c11cc8a-acca-44f2-abbb-b2abc36fc5e1]" virtual=false 2025-12-08T17:47:22.238918458+00:00 stderr F I1208 17:47:22.235841 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: 1281df22-53c2-4b26-aa63-b3d41b4761b8]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.238918458+00:00 stderr F I1208 17:47:22.235862 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: cli-artifacts, uid: fdd6be6b-f6bc-47c7-bda0-1de138bd0a81]" virtual=false 2025-12-08T17:47:22.242963035+00:00 stderr F I1208 17:47:22.242518 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-node, uid: a1d81936-4e38-446d-819b-61c0b05df947]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.242963035+00:00 stderr F I1208 17:47:22.242555 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: driver-toolkit, uid: ab3fdf24-4b72-43fe-8063-bf671901e9c6]" virtual=false 2025-12-08T17:47:22.242963035+00:00 stderr F I1208 17:47:22.242788 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: machine-api-controllers, uid: 9c65bbd1-8044-4ed7-b28a-8adb920c184f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.242963035+00:00 stderr F I1208 17:47:22.242802 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: installer, uid: 3e674476-cd07-42b1-9fe1-083d4990ac3a]" virtual=false 2025-12-08T17:47:22.242963035+00:00 stderr F I1208 17:47:22.242900 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: cluster-autoscaler, uid: 56382e05-4f3e-45e2-9065-468d4f668091]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.242963035+00:00 stderr F I1208 17:47:22.242914 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: installer-artifacts, uid: 0e18a7c9-8c75-4c32-8d92-66303c9d209f]" virtual=false 2025-12-08T17:47:22.243268624+00:00 stderr F I1208 17:47:22.243209 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 180b3a1a-50c2-445c-8650-162b0f3a1d99]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.243291425+00:00 stderr F I1208 17:47:22.243270 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 749fdd6c-f7a2-4743-bbd6-4c00d16e8776]" virtual=false 2025-12-08T17:47:22.249925424+00:00 stderr F I1208 17:47:22.249092 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-ovn-kubernetes, name: ovn-kubernetes-control-plane, uid: e7d12ea0-5793-4ac6-b96e-798934641d22]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.249925424+00:00 stderr F I1208 17:47:22.249160 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: must-gather, uid: efe3cb11-0030-44ec-b454-a1c46849474f]" virtual=false 2025-12-08T17:47:22.255056246+00:00 stderr F I1208 17:47:22.254605 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-diagnostics, name: network-diagnostics, uid: 35a6df22-e2af-4660-b4c6-16bf8a43042f]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.255056246+00:00 stderr F I1208 17:47:22.254644 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: network-tools, uid: dc81d8c3-d5c7-455a-90ee-f6abb15e272d]" virtual=false 2025-12-08T17:47:22.255056246+00:00 stderr F I1208 17:47:22.254948 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-authentication-operator, name: authentication-operator, uid: 063bc733-6edc-4f47-a43c-73cbfb5c3c8d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.255056246+00:00 stderr F I1208 17:47:22.254964 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: oauth-proxy, uid: 5b02e592-b496-421b-aa42-a67b5520f0dd]" virtual=false 2025-12-08T17:47:22.255110848+00:00 stderr F I1208 17:47:22.255074 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-dns-operator, name: dns-operator, uid: ededd462-781c-45db-afa8-736b78e4df88]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.255110848+00:00 stderr F I1208 17:47:22.255087 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: tests, uid: 0e4e0fb5-de56-48a0-8d2f-34844c794213]" virtual=false 2025-12-08T17:47:22.255220281+00:00 stderr F I1208 17:47:22.255180 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: bfac9eb9-68bb-4da9-afc6-a17734d95032]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.255220281+00:00 stderr F I1208 17:47:22.255210 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: tools, uid: a2e57b51-274c-4040-8139-62eb4ada14e2]" virtual=false 2025-12-08T17:47:22.255415767+00:00 stderr F I1208 17:47:22.255384 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 78bb60a8-531d-48f1-b653-8764a30ad047]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.255415767+00:00 stderr F I1208 17:47:22.255403 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-console-operator, name: console-operator, uid: 49c566ed-810f-4e34-89c0-e69f4ba1f5fe]" virtual=false 2025-12-08T17:47:22.260923181+00:00 stderr F I1208 17:47:22.260398 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-service-ca-operator, name: service-ca-operator, uid: c63e31c2-61a6-4920-b002-afb13dcebab4]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.260923181+00:00 stderr F I1208 17:47:22.260426 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-monitoring, name: cluster-monitoring-operator, uid: 55006a20-e0ea-4a38-a3bc-8ae4f1472858]" virtual=false 2025-12-08T17:47:22.260923181+00:00 stderr F I1208 17:47:22.260652 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: machine-api-termination-handler, uid: 1567c8f7-61db-45bd-a1b6-c1f4b610e91b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.260923181+00:00 stderr F I1208 17:47:22.260669 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: bd3ae2d6-78bd-48d1-be9f-a77e56ba96c8]" virtual=false 2025-12-08T17:47:22.264693079+00:00 stderr F I1208 17:47:22.264530 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-network-operator, name: cluster-network-operator, uid: 4474bfa7-4a4c-4eef-9e88-1d3ba71df974]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.264693079+00:00 stderr F I1208 17:47:22.264557 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-controller-manager-operator, name: openshift-controller-manager-operator, uid: d89f5418-8a04-45bf-a138-83af4c524742]" virtual=false 2025-12-08T17:47:22.264734270+00:00 stderr F I1208 17:47:22.264707 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-apiserver-operator, name: openshift-apiserver-operator, uid: dab4d42f-1426-4619-af00-f3b882989b05]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.264734270+00:00 stderr F I1208 17:47:22.264723 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-cluster-machine-approver, name: machine-approver-sa, uid: 9d2e2fd5-f689-4ff8-bed1-c7547bf698d4]" virtual=false 2025-12-08T17:47:22.264920376+00:00 stderr F I1208 17:47:22.264840 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-operator-lifecycle-manager, name: olm-operator-serviceaccount, uid: 0d23d03f-8d1c-40a1-b029-bb73930758f7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.264920376+00:00 stderr F I1208 17:47:22.264861 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-etcd-operator, name: etcd-operator, uid: 0ce3e4fb-39b1-4367-9741-b9e539e4cdc1]" virtual=false 2025-12-08T17:47:22.264992988+00:00 stderr F I1208 17:47:22.264967 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-kube-storage-version-migrator-operator, name: kube-storage-version-migrator-operator, uid: 5d04164f-45ca-4c60-8a08-c4459300ecda]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.264992988+00:00 stderr F I1208 17:47:22.264985 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-ingress-operator, name: ingress-operator, uid: 75f07586-843e-4cd5-a497-25f3abe799ec]" virtual=false 2025-12-08T17:47:22.270917865+00:00 stderr F I1208 17:47:22.270831 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 749fdd6c-f7a2-4743-bbd6-4c00d16e8776]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.271020298+00:00 stderr F I1208 17:47:22.270975 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[network.operator.openshift.io/v1/OperatorPKI, namespace: openshift-network-node-identity, name: network-node-identity, uid: 19863168-4684-4a75-87e9-a586be776b3a]" virtual=false 2025-12-08T17:47:22.271248905+00:00 stderr F I1208 17:47:22.271196 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: cli-artifacts, uid: fdd6be6b-f6bc-47c7-bda0-1de138bd0a81]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.271263055+00:00 stderr F I1208 17:47:22.271245 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[network.operator.openshift.io/v1/OperatorPKI, namespace: openshift-ovn-kubernetes, name: ovn, uid: f3ff9d5c-ea26-43b0-91ee-b403a4b4d4f6]" virtual=false 2025-12-08T17:47:22.271558705+00:00 stderr F I1208 17:47:22.271530 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-multus, name: metrics-daemon-sa, uid: abce43d0-2a82-4a99-9c68-e5d3d0e59581]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.271616897+00:00 stderr F I1208 17:47:22.271597 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[network.operator.openshift.io/v1/OperatorPKI, namespace: openshift-ovn-kubernetes, name: signer, uid: 491c5375-b7a4-4e86-8e7b-e538b36d6095]" virtual=false 2025-12-08T17:47:22.271654398+00:00 stderr F I1208 17:47:22.271619 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-marketplace, name: marketplace-operator, uid: c100705c-9ecd-449a-969b-a207f023c1b0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.271664428+00:00 stderr F I1208 17:47:22.271647 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-apiserver-operator, name: prometheus-k8s, uid: 6ff38600-b4d6-452f-82fc-0d24fdec9101]" virtual=false 2025-12-08T17:47:22.271863335+00:00 stderr F I1208 17:47:22.271834 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: cli, uid: 8c11cc8a-acca-44f2-abbb-b2abc36fc5e1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.271937457+00:00 stderr F I1208 17:47:22.271913 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-apiserver, name: prometheus-k8s, uid: 6dfe5fa1-4be3-47c2-aa7e-e055b2344b88]" virtual=false 2025-12-08T17:47:22.276960015+00:00 stderr F I1208 17:47:22.276890 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-controller-manager-operator, name: openshift-controller-manager-operator, uid: d89f5418-8a04-45bf-a138-83af4c524742]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.276960015+00:00 stderr F I1208 17:47:22.276931 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-authentication-operator, name: prometheus-k8s, uid: a3099dea-12c5-441e-b16c-2f2c07408c1a]" virtual=false 2025-12-08T17:47:22.277175611+00:00 stderr F I1208 17:47:22.277141 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: installer, uid: 3e674476-cd07-42b1-9fe1-083d4990ac3a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.277175611+00:00 stderr F I1208 17:47:22.277163 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-authentication, name: prometheus-k8s, uid: 2a54be00-974b-4c50-8d1c-f7162000d609]" virtual=false 2025-12-08T17:47:22.277311286+00:00 stderr F I1208 17:47:22.277278 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: bd3ae2d6-78bd-48d1-be9f-a77e56ba96c8]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.277361688+00:00 stderr F I1208 17:47:22.277298 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: 6ba7c917-27fc-4114-85d9-07825a840abc]" virtual=false 2025-12-08T17:47:22.277506023+00:00 stderr F I1208 17:47:22.277479 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: installer-artifacts, uid: 0e18a7c9-8c75-4c32-8d92-66303c9d209f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.277506023+00:00 stderr F I1208 17:47:22.277499 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-machine-approver, name: prometheus-k8s, uid: a79b075f-5ddc-4304-a7df-de0caa322fa5]" virtual=false 2025-12-08T17:47:22.277635777+00:00 stderr F I1208 17:47:22.277607 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-console-operator, name: console-operator, uid: 49c566ed-810f-4e34-89c0-e69f4ba1f5fe]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.277635777+00:00 stderr F I1208 17:47:22.277626 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: 77e7db24-b3cb-49de-8ddd-4832c5da528e]" virtual=false 2025-12-08T17:47:22.277785691+00:00 stderr F I1208 17:47:22.277757 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: driver-toolkit, uid: ab3fdf24-4b72-43fe-8063-bf671901e9c6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.277785691+00:00 stderr F I1208 17:47:22.277775 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-samples-operator, name: prometheus-k8s, uid: 39466c9d-67a7-445a-b328-b0ff5f22d5e2]" virtual=false 2025-12-08T17:47:22.277920886+00:00 stderr F I1208 17:47:22.277892 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: must-gather, uid: efe3cb11-0030-44ec-b454-a1c46849474f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.277920886+00:00 stderr F I1208 17:47:22.277911 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-storage-operator, name: csi-snapshot-controller-operator-role, uid: b75388c6-a2ac-4323-97aa-74085d52c30a]" virtual=false 2025-12-08T17:47:22.281300262+00:00 stderr F I1208 17:47:22.281199 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-ingress-operator, name: ingress-operator, uid: 75f07586-843e-4cd5-a497-25f3abe799ec]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.281300262+00:00 stderr F I1208 17:47:22.281232 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-version, name: prometheus-k8s, uid: ffda4cc2-e551-454c-8bf1-be5336168d3f]" virtual=false 2025-12-08T17:47:22.281497758+00:00 stderr F I1208 17:47:22.281463 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-cluster-machine-approver, name: machine-approver-sa, uid: 9d2e2fd5-f689-4ff8-bed1-c7547bf698d4]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.281497758+00:00 stderr F I1208 17:47:22.281484 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: console-configmap-reader, uid: 5f8e3d3f-6c13-40f0-a1c4-0c2b0cee9aba]" virtual=false 2025-12-08T17:47:22.281734515+00:00 stderr F I1208 17:47:22.281698 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-monitoring, name: cluster-monitoring-operator, uid: 55006a20-e0ea-4a38-a3bc-8ae4f1472858]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.281734515+00:00 stderr F I1208 17:47:22.281720 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: console-operator, uid: 1ef3c1b1-685a-4417-9b53-23d61c410f1e]" virtual=false 2025-12-08T17:47:22.281900700+00:00 stderr F I1208 17:47:22.281841 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: network-tools, uid: dc81d8c3-d5c7-455a-90ee-f6abb15e272d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.281900700+00:00 stderr F I1208 17:47:22.281861 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: console-public, uid: a4c72545-652f-48d3-b484-7d5f7a310e7e]" virtual=false 2025-12-08T17:47:22.282154318+00:00 stderr F I1208 17:47:22.282109 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[network.operator.openshift.io/v1/OperatorPKI, namespace: openshift-ovn-kubernetes, name: signer, uid: 491c5375-b7a4-4e86-8e7b-e538b36d6095]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.282154318+00:00 stderr F I1208 17:47:22.282137 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: machine-api-controllers, uid: 909fe707-d07c-45ed-ac20-11669b612d43]" virtual=false 2025-12-08T17:47:22.282428117+00:00 stderr F I1208 17:47:22.282363 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-apiserver-operator, name: prometheus-k8s, uid: 6ff38600-b4d6-452f-82fc-0d24fdec9101]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.282428117+00:00 stderr F I1208 17:47:22.282408 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: machine-approver, uid: b908a159-2cd6-4878-ab3a-f9388352a4d6]" virtual=false 2025-12-08T17:47:22.282614143+00:00 stderr F I1208 17:47:22.282573 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-apiserver, name: prometheus-k8s, uid: 6dfe5fa1-4be3-47c2-aa7e-e055b2344b88]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.282614143+00:00 stderr F I1208 17:47:22.282596 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: openshift-network-public-role, uid: f85f6638-bb46-4041-bf1e-8d05c6621e59]" virtual=false 2025-12-08T17:47:22.282748047+00:00 stderr F I1208 17:47:22.282713 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: tools, uid: a2e57b51-274c-4040-8139-62eb4ada14e2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.282748047+00:00 stderr F I1208 17:47:22.282734 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-operator, name: prometheus-k8s, uid: ac0827c7-174d-4310-a132-b4ec6df8afc8]" virtual=false 2025-12-08T17:47:22.282906482+00:00 stderr F I1208 17:47:22.282842 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[network.operator.openshift.io/v1/OperatorPKI, namespace: openshift-network-node-identity, name: network-node-identity, uid: 19863168-4684-4a75-87e9-a586be776b3a]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.282906482+00:00 stderr F I1208 17:47:22.282861 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: console-operator, uid: 094d60d7-e5b5-4603-ad96-cde7975bd83f]" virtual=false 2025-12-08T17:47:22.283470500+00:00 stderr F I1208 17:47:22.283432 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-etcd-operator, name: etcd-operator, uid: 0ce3e4fb-39b1-4367-9741-b9e539e4cdc1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.283470500+00:00 stderr F I1208 17:47:22.283454 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: coreos-pull-secret-reader, uid: 60a0a208-9961-463e-986f-3c7302769df7]" virtual=false 2025-12-08T17:47:22.283570474+00:00 stderr F I1208 17:47:22.283538 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[network.operator.openshift.io/v1/OperatorPKI, namespace: openshift-ovn-kubernetes, name: ovn, uid: f3ff9d5c-ea26-43b0-91ee-b403a4b4d4f6]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.283583074+00:00 stderr F I1208 17:47:22.283556 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: ingress-operator, uid: 32745b86-26b1-4411-83c8-6769afe0ef84]" virtual=false 2025-12-08T17:47:22.283787390+00:00 stderr F I1208 17:47:22.283750 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: tests, uid: 0e4e0fb5-de56-48a0-8d2f-34844c794213]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.283787390+00:00 stderr F I1208 17:47:22.283772 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: machine-api-controllers, uid: 8913b400-f591-4bff-8f47-c4026984f25f]" virtual=false 2025-12-08T17:47:22.283904734+00:00 stderr F I1208 17:47:22.283849 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[image.openshift.io/v1/ImageStream, namespace: openshift, name: oauth-proxy, uid: 5b02e592-b496-421b-aa42-a67b5520f0dd]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.283904734+00:00 stderr F I1208 17:47:22.283868 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console-operator, name: console-operator, uid: ca4d2ee6-7668-4b43-9cca-59d0d06f542a]" virtual=false 2025-12-08T17:47:22.290573183+00:00 stderr F I1208 17:47:22.290484 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-storage-operator, name: csi-snapshot-controller-operator-role, uid: b75388c6-a2ac-4323-97aa-74085d52c30a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.290573183+00:00 stderr F I1208 17:47:22.290505 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-authentication, name: prometheus-k8s, uid: 2a54be00-974b-4c50-8d1c-f7162000d609]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.290573183+00:00 stderr F I1208 17:47:22.290536 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console-operator, name: prometheus-k8s, uid: 253a0556-00ed-477b-b11e-727c668659f4]" virtual=false 2025-12-08T17:47:22.290573183+00:00 stderr F I1208 17:47:22.290544 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console-user-settings, name: console-user-settings-admin, uid: 8a7d0ef2-903e-4505-8a80-ea97c68b4a10]" virtual=false 2025-12-08T17:47:22.290615305+00:00 stderr F I1208 17:47:22.290539 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-authentication-operator, name: prometheus-k8s, uid: a3099dea-12c5-441e-b16c-2f2c07408c1a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.290615305+00:00 stderr F I1208 17:47:22.290579 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console, name: console-operator, uid: 770c2fcf-bc84-4b08-9f10-7f8e1853cee5]" virtual=false 2025-12-08T17:47:22.290633295+00:00 stderr F I1208 17:47:22.290616 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-machine-approver, name: prometheus-k8s, uid: a79b075f-5ddc-4304-a7df-de0caa322fa5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.290642276+00:00 stderr F I1208 17:47:22.290629 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console, name: prometheus-k8s, uid: 56ff8849-354a-4ede-88f2-4436b0a3bde5]" virtual=false 2025-12-08T17:47:22.290771390+00:00 stderr F I1208 17:47:22.290706 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: 77e7db24-b3cb-49de-8ddd-4832c5da528e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.290771390+00:00 stderr F I1208 17:47:22.290755 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-controller-manager-operator, name: prometheus-k8s, uid: 4d74cb46-6482-4707-bff6-6547ff546015]" virtual=false 2025-12-08T17:47:22.292090312+00:00 stderr F I1208 17:47:22.291404 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: 6ba7c917-27fc-4114-85d9-07825a840abc]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.292090312+00:00 stderr F I1208 17:47:22.291431 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-controller-manager, name: prometheus-k8s, uid: ee712995-32ca-4cd3-addd-56c3dc98ac1b]" virtual=false 2025-12-08T17:47:22.292090312+00:00 stderr F I1208 17:47:22.291541 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-samples-operator, name: prometheus-k8s, uid: 39466c9d-67a7-445a-b328-b0ff5f22d5e2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.292090312+00:00 stderr F I1208 17:47:22.291563 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-dns-operator, name: dns-operator, uid: 79c48787-e4ef-45fe-9cb2-8707e9cd7d61]" virtual=false 2025-12-08T17:47:22.293076852+00:00 stderr F I1208 17:47:22.293005 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: console-operator, uid: 1ef3c1b1-685a-4417-9b53-23d61c410f1e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.293098683+00:00 stderr F I1208 17:47:22.293070 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-dns-operator, name: prometheus-k8s, uid: c584e4f7-dc9a-4554-8568-6104eac0033f]" virtual=false 2025-12-08T17:47:22.293469875+00:00 stderr F I1208 17:47:22.293419 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-cluster-version, name: prometheus-k8s, uid: ffda4cc2-e551-454c-8bf1-be5336168d3f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.293469875+00:00 stderr F I1208 17:47:22.293449 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-etcd-operator, name: prometheus-k8s, uid: d0fac19c-3e45-4cfd-a8ca-f121bc469295]" virtual=false 2025-12-08T17:47:22.293550227+00:00 stderr F I1208 17:47:22.293515 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console-operator, name: console-operator, uid: ca4d2ee6-7668-4b43-9cca-59d0d06f542a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.293559727+00:00 stderr F I1208 17:47:22.293535 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: 8a2a7e9d-fa82-48c8-842c-214567ad94ec]" virtual=false 2025-12-08T17:47:22.293632360+00:00 stderr F I1208 17:47:22.293601 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: openshift-network-public-role, uid: f85f6638-bb46-4041-bf1e-8d05c6621e59]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.293632360+00:00 stderr F I1208 17:47:22.293619 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-image-registry, name: node-ca, uid: 7fd7f4d9-78fa-4f5a-9f7c-d1f6264ee09a]" virtual=false 2025-12-08T17:47:22.294107324+00:00 stderr F I1208 17:47:22.294054 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-operator, name: prometheus-k8s, uid: ac0827c7-174d-4310-a132-b4ec6df8afc8]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.294107324+00:00 stderr F I1208 17:47:22.294087 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-image-registry, name: prometheus-k8s, uid: eac9d6d9-611e-4c5c-8436-393910521b09]" virtual=false 2025-12-08T17:47:22.294494727+00:00 stderr F I1208 17:47:22.294453 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: console-public, uid: a4c72545-652f-48d3-b484-7d5f7a310e7e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.294494727+00:00 stderr F I1208 17:47:22.294476 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ingress-operator, name: ingress-operator, uid: 9ef68a54-5a41-4cec-8a8e-0de1dd9c9610]" virtual=false 2025-12-08T17:47:22.294574120+00:00 stderr F I1208 17:47:22.294543 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: machine-api-controllers, uid: 8913b400-f591-4bff-8f47-c4026984f25f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.294574120+00:00 stderr F I1208 17:47:22.294561 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ingress-operator, name: prometheus-k8s, uid: d6fad1f4-41ae-4595-bdb0-0d02b479944b]" virtual=false 2025-12-08T17:47:22.339352531+00:00 stderr F I1208 17:47:22.339268 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: coreos-pull-secret-reader, uid: 60a0a208-9961-463e-986f-3c7302769df7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.339352531+00:00 stderr F I1208 17:47:22.339315 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kni-infra, name: host-networking-services, uid: 9814d6e2-5d57-4f6a-a185-4f4b991702ec]" virtual=false 2025-12-08T17:47:22.347060893+00:00 stderr F I1208 17:47:22.346997 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: ingress-operator, uid: 32745b86-26b1-4411-83c8-6769afe0ef84]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.347060893+00:00 stderr F I1208 17:47:22.347040 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-apiserver-operator, name: prometheus-k8s, uid: f52bc923-0764-4ed9-8390-8f23448cb6a5]" virtual=false 2025-12-08T17:47:22.347060893+00:00 stderr F I1208 17:47:22.347009 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: machine-approver, uid: b908a159-2cd6-4878-ab3a-f9388352a4d6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.347095074+00:00 stderr F I1208 17:47:22.347067 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-apiserver, name: prometheus-k8s, uid: eae1ab86-3aac-4e4d-844b-b4aab669be67]" virtual=false 2025-12-08T17:47:22.347121335+00:00 stderr F I1208 17:47:22.347067 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: console-configmap-reader, uid: 5f8e3d3f-6c13-40f0-a1c4-0c2b0cee9aba]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.347134986+00:00 stderr F I1208 17:47:22.347123 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config-managed, name: machine-api-controllers, uid: 909fe707-d07c-45ed-ac20-11669b612d43]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.347141976+00:00 stderr F I1208 17:47:22.347125 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-controller-manager-operator, name: prometheus-k8s, uid: 902067e6-576b-4c67-b503-46ba31250666]" virtual=false 2025-12-08T17:47:22.347148916+00:00 stderr F I1208 17:47:22.347139 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-controller-manager, name: prometheus-k8s, uid: 7dff5aea-b0b2-4e11-8d0a-aaee6bc5c894]" virtual=false 2025-12-08T17:47:22.347243309+00:00 stderr F I1208 17:47:22.347220 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-config, name: console-operator, uid: 094d60d7-e5b5-4603-ad96-cde7975bd83f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.347251799+00:00 stderr F I1208 17:47:22.347238 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-scheduler-operator, name: prometheus-k8s, uid: 8906de79-0e02-4f0a-b8fe-92706ec66a89]" virtual=false 2025-12-08T17:47:22.366767743+00:00 stderr F I1208 17:47:22.366689 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-controller-manager, name: prometheus-k8s, uid: ee712995-32ca-4cd3-addd-56c3dc98ac1b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.366767743+00:00 stderr F I1208 17:47:22.366739 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-scheduler, name: prometheus-k8s, uid: 8fcc503b-4599-415b-b68a-1f79d1f6a02d]" virtual=false 2025-12-08T17:47:22.374116275+00:00 stderr F I1208 17:47:22.374050 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console-operator, name: prometheus-k8s, uid: 253a0556-00ed-477b-b11e-727c668659f4]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.374116275+00:00 stderr F I1208 17:47:22.374101 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: cluster-autoscaler, uid: 900244c5-73a7-4efd-89a9-4482faed30bf]" virtual=false 2025-12-08T17:47:22.374328372+00:00 stderr F I1208 17:47:22.374297 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console-user-settings, name: console-user-settings-admin, uid: 8a7d0ef2-903e-4505-8a80-ea97c68b4a10]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.374328372+00:00 stderr F I1208 17:47:22.374318 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: cc0f2db8-631b-40d1-928b-b96bb1c102ea]" virtual=false 2025-12-08T17:47:22.377139930+00:00 stderr F I1208 17:47:22.377088 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console, name: console-operator, uid: 770c2fcf-bc84-4b08-9f10-7f8e1853cee5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.377139930+00:00 stderr F I1208 17:47:22.377119 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 8517d382-106b-43dd-b4d4-88aaaf262062]" virtual=false 2025-12-08T17:47:22.381087605+00:00 stderr F I1208 17:47:22.380925 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-controller-manager-operator, name: prometheus-k8s, uid: 4d74cb46-6482-4707-bff6-6547ff546015]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.381087605+00:00 stderr F I1208 17:47:22.380951 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: machine-api-controllers, uid: 5160c796-e594-4e5c-a8fe-1ea7434a3ac9]" virtual=false 2025-12-08T17:47:22.384024347+00:00 stderr F I1208 17:47:22.383959 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-dns-operator, name: prometheus-k8s, uid: c584e4f7-dc9a-4554-8568-6104eac0033f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.384024347+00:00 stderr F I1208 17:47:22.384009 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: machine-api-operator, uid: b550c9f6-9756-4297-9337-0ffa9ce691e0]" virtual=false 2025-12-08T17:47:22.388118096+00:00 stderr F I1208 17:47:22.387834 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-etcd-operator, name: prometheus-k8s, uid: d0fac19c-3e45-4cfd-a8ca-f121bc469295]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.388118096+00:00 stderr F I1208 17:47:22.388101 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: prometheus-k8s-cluster-autoscaler-operator, uid: 4136e08c-fe31-425a-b7e9-3fafec0549bc]" virtual=false 2025-12-08T17:47:22.391596985+00:00 stderr F I1208 17:47:22.391150 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-console, name: prometheus-k8s, uid: 56ff8849-354a-4ede-88f2-4436b0a3bde5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.391596985+00:00 stderr F I1208 17:47:22.391192 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: prometheus-k8s-machine-api-operator, uid: e8dd2e37-36fc-48cd-909e-a3b3e7472070]" virtual=false 2025-12-08T17:47:22.393637459+00:00 stderr F I1208 17:47:22.393570 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-dns-operator, name: dns-operator, uid: 79c48787-e4ef-45fe-9cb2-8707e9cd7d61]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.393654780+00:00 stderr F I1208 17:47:22.393637 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-config-operator, name: prometheus-k8s, uid: 01b524f2-7ddc-4878-9076-8f4881346e07]" virtual=false 2025-12-08T17:47:22.396960824+00:00 stderr F I1208 17:47:22.396911 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: 8a2a7e9d-fa82-48c8-842c-214567ad94ec]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.396977494+00:00 stderr F I1208 17:47:22.396953 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-marketplace, name: marketplace-operator, uid: dcd874ef-723a-40d7-9425-f4383187a07d]" virtual=false 2025-12-08T17:47:22.419095890+00:00 stderr F I1208 17:47:22.418972 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-image-registry, name: node-ca, uid: 7fd7f4d9-78fa-4f5a-9f7c-d1f6264ee09a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.419095890+00:00 stderr F I1208 17:47:22.419030 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-marketplace, name: openshift-marketplace-metrics, uid: 39ffab32-69c7-4cde-83a4-37704d2add4d]" virtual=false 2025-12-08T17:47:22.423313304+00:00 stderr F I1208 17:47:22.423256 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-image-registry, name: prometheus-k8s, uid: eac9d6d9-611e-4c5c-8436-393910521b09]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.423339994+00:00 stderr F I1208 17:47:22.423304 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-monitoring, name: cluster-monitoring-operator-alert-customization, uid: 7a0fca50-57b4-41d8-922f-f52b7051910d]" virtual=false 2025-12-08T17:47:22.425933366+00:00 stderr F I1208 17:47:22.425838 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ingress-operator, name: ingress-operator, uid: 9ef68a54-5a41-4cec-8a8e-0de1dd9c9610]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.425933366+00:00 stderr F I1208 17:47:22.425868 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-monitoring, name: console-operator, uid: e74750bf-8b67-4097-800a-1f62f1d728e2]" virtual=false 2025-12-08T17:47:22.428060602+00:00 stderr F I1208 17:47:22.428017 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ingress-operator, name: prometheus-k8s, uid: d6fad1f4-41ae-4595-bdb0-0d02b479944b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.428060602+00:00 stderr F I1208 17:47:22.428050 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-multus, name: prometheus-k8s, uid: ef46d29c-9cc9-4ab9-bcf0-8c1a79052bc9]" virtual=false 2025-12-08T17:47:22.436558910+00:00 stderr F I1208 17:47:22.436485 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kni-infra, name: host-networking-services, uid: 9814d6e2-5d57-4f6a-a185-4f4b991702ec]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.436558910+00:00 stderr F I1208 17:47:22.436536 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-multus, name: whereabouts-cni, uid: 3e189f77-ec81-4804-a69e-3406cea72d88]" virtual=false 2025-12-08T17:47:22.469961021+00:00 stderr F I1208 17:47:22.469430 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-apiserver-operator, name: prometheus-k8s, uid: f52bc923-0764-4ed9-8390-8f23448cb6a5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.469961021+00:00 stderr F I1208 17:47:22.469488 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-diagnostics, name: network-diagnostics, uid: edc23d92-6948-48f7-b843-ef543c0aec83]" virtual=false 2025-12-08T17:47:22.470521938+00:00 stderr F I1208 17:47:22.470469 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-apiserver, name: prometheus-k8s, uid: eae1ab86-3aac-4e4d-844b-b4aab669be67]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.470533799+00:00 stderr F I1208 17:47:22.470519 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-diagnostics, name: prometheus-k8s, uid: 6b65bfe8-9060-45f8-b70c-21d30115c6f6]" virtual=false 2025-12-08T17:47:22.473846704+00:00 stderr F I1208 17:47:22.473791 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-controller-manager, name: prometheus-k8s, uid: 7dff5aea-b0b2-4e11-8d0a-aaee6bc5c894]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.473902936+00:00 stderr F I1208 17:47:22.473838 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-node-identity, name: network-node-identity-leases, uid: b346ce36-850b-4b91-84fd-9009ca037189]" virtual=false 2025-12-08T17:47:22.476584110+00:00 stderr F I1208 17:47:22.476524 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-controller-manager-operator, name: prometheus-k8s, uid: 902067e6-576b-4c67-b503-46ba31250666]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.476584110+00:00 stderr F I1208 17:47:22.476564 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-operator, name: prometheus-k8s, uid: 5dbba0e9-dd0c-4e5b-8f0d-878be7cd91a7]" virtual=false 2025-12-08T17:47:22.481510514+00:00 stderr F I1208 17:47:22.481470 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-scheduler-operator, name: prometheus-k8s, uid: 8906de79-0e02-4f0a-b8fe-92706ec66a89]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.481528795+00:00 stderr F I1208 17:47:22.481500 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-nutanix-infra, name: host-networking-services, uid: 20a538b5-51d1-4f13-90b5-d59b3dc493bb]" virtual=false 2025-12-08T17:47:22.498762908+00:00 stderr F I1208 17:47:22.497625 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-kube-scheduler, name: prometheus-k8s, uid: 8fcc503b-4599-415b-b68a-1f79d1f6a02d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.498762908+00:00 stderr F I1208 17:47:22.497674 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-oauth-apiserver, name: prometheus-k8s, uid: 2e1f82c6-2d8a-417d-af7a-bc440909ea57]" virtual=false 2025-12-08T17:47:22.503749414+00:00 stderr F I1208 17:47:22.503680 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: cluster-autoscaler, uid: 900244c5-73a7-4efd-89a9-4482faed30bf]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.503749414+00:00 stderr F I1208 17:47:22.503722 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-openstack-infra, name: host-networking-services, uid: ba93ff46-031f-415f-9480-767006d718e7]" virtual=false 2025-12-08T17:47:22.507458092+00:00 stderr F I1208 17:47:22.507415 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: cc0f2db8-631b-40d1-928b-b96bb1c102ea]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.507458092+00:00 stderr F I1208 17:47:22.507438 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 2c65389f-32ed-4b66-b0bf-f697ede62460]" virtual=false 2025-12-08T17:47:22.511029814+00:00 stderr F I1208 17:47:22.510974 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 8517d382-106b-43dd-b4d4-88aaaf262062]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.511050294+00:00 stderr F I1208 17:47:22.511024 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-operator-lifecycle-manager, name: operator-lifecycle-manager-metrics, uid: 74c4b64c-8513-4ace-b791-ecc4897b9a04]" virtual=false 2025-12-08T17:47:22.516593049+00:00 stderr F I1208 17:47:22.516535 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: machine-api-controllers, uid: 5160c796-e594-4e5c-a8fe-1ea7434a3ac9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.516593049+00:00 stderr F I1208 17:47:22.516571 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-control-plane-limited, uid: f119fd7b-c9f9-41a3-9f6b-6b849a2877ff]" virtual=false 2025-12-08T17:47:22.517427965+00:00 stderr F I1208 17:47:22.517382 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: machine-api-operator, uid: b550c9f6-9756-4297-9337-0ffa9ce691e0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.517427965+00:00 stderr F I1208 17:47:22.517407 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-node-limited, uid: 2e9cfa70-01d3-48a5-af7e-ac6d0f70489f]" virtual=false 2025-12-08T17:47:22.520772460+00:00 stderr F I1208 17:47:22.520706 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: prometheus-k8s-cluster-autoscaler-operator, uid: 4136e08c-fe31-425a-b7e9-3fafec0549bc]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.520790950+00:00 stderr F I1208 17:47:22.520763 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ingress-canary, name: ingress-canary, uid: 0888ad56-e63e-40f9-9ab5-bdfe12ee18ef]" virtual=false 2025-12-08T17:47:22.523516686+00:00 stderr F I1208 17:47:22.523470 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-api, name: prometheus-k8s-machine-api-operator, uid: e8dd2e37-36fc-48cd-909e-a3b3e7472070]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.523516686+00:00 stderr F I1208 17:47:22.523504 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[route.openshift.io/v1/Route, namespace: openshift-ingress-canary, name: canary, uid: c77f72a8-1cd5-4d3c-9619-806f41a86efa]" virtual=false 2025-12-08T17:47:22.526965846+00:00 stderr F I1208 17:47:22.526910 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-machine-config-operator, name: prometheus-k8s, uid: 01b524f2-7ddc-4878-9076-8f4881346e07]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.526987366+00:00 stderr F I1208 17:47:22.526958 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ovn-kubernetes, name: prometheus-k8s, uid: 7a8de694-4980-44f0-938f-b8112d953aa4]" virtual=false 2025-12-08T17:47:22.530489216+00:00 stderr F I1208 17:47:22.530432 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-marketplace, name: marketplace-operator, uid: dcd874ef-723a-40d7-9425-f4383187a07d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.530489216+00:00 stderr F I1208 17:47:22.530462 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-route-controller-manager, name: prometheus-k8s, uid: b42e1181-7f9e-4c64-b0f6-f8615e0c9572]" virtual=false 2025-12-08T17:47:22.554703349+00:00 stderr F I1208 17:47:22.552510 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-marketplace, name: openshift-marketplace-metrics, uid: 39ffab32-69c7-4cde-83a4-37704d2add4d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.554703349+00:00 stderr F I1208 17:47:22.552559 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-service-ca-operator, name: prometheus-k8s, uid: d5f0400c-c866-4461-91ca-1674c874bd8b]" virtual=false 2025-12-08T17:47:22.559038795+00:00 stderr F I1208 17:47:22.558212 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-monitoring, name: console-operator, uid: e74750bf-8b67-4097-800a-1f62f1d728e2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.559038795+00:00 stderr F I1208 17:47:22.558263 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-vsphere-infra, name: host-networking-services, uid: 13dc3db2-b779-4912-a196-ee77feacea00]" virtual=false 2025-12-08T17:47:22.559038795+00:00 stderr F I1208 17:47:22.558812 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-monitoring, name: cluster-monitoring-operator-alert-customization, uid: 7a0fca50-57b4-41d8-922f-f52b7051910d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.559038795+00:00 stderr F I1208 17:47:22.558858 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift, name: copied-csv-viewer, uid: 1487dd1c-4a3b-4683-a18c-da8fd82bf4e5]" virtual=false 2025-12-08T17:47:22.567089558+00:00 stderr F I1208 17:47:22.566513 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-multus, name: whereabouts-cni, uid: 3e189f77-ec81-4804-a69e-3406cea72d88]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.567089558+00:00 stderr F I1208 17:47:22.566556 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: console, uid: 6292b407-20e8-47a5-be99-77a5e1f7a896]" virtual=false 2025-12-08T17:47:22.578041583+00:00 stderr F I1208 17:47:22.577968 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-multus, name: prometheus-k8s, uid: ef46d29c-9cc9-4ab9-bcf0-8c1a79052bc9]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.578072074+00:00 stderr F I1208 17:47:22.578032 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: console-operator, uid: dedf5f86-0501-4a1d-b312-0cf452e2800c]" virtual=false 2025-12-08T17:47:22.605002681+00:00 stderr F I1208 17:47:22.601312 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-diagnostics, name: network-diagnostics, uid: edc23d92-6948-48f7-b843-ef543c0aec83]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.605002681+00:00 stderr F I1208 17:47:22.601359 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: csi-snapshot-controller-operator-authentication-reader, uid: 7fd23cab-d9ed-41e1-b2d7-92b6985e2dd1]" virtual=false 2025-12-08T17:47:22.605002681+00:00 stderr F I1208 17:47:22.603599 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-diagnostics, name: prometheus-k8s, uid: 6b65bfe8-9060-45f8-b70c-21d30115c6f6]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.605002681+00:00 stderr F I1208 17:47:22.603621 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: network-diagnostics, uid: b6e21cd4-4c3d-401f-92cc-7c008ff856b3]" virtual=false 2025-12-08T17:47:22.608502582+00:00 stderr F I1208 17:47:22.607071 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-node-identity, name: network-node-identity-leases, uid: b346ce36-850b-4b91-84fd-9009ca037189]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.608502582+00:00 stderr F I1208 17:47:22.607121 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-apiserver-operator, name: prometheus-k8s, uid: 376b7f3a-eb8d-4641-a54e-19994092fb5d]" virtual=false 2025-12-08T17:47:22.610551156+00:00 stderr F I1208 17:47:22.610501 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-network-operator, name: prometheus-k8s, uid: 5dbba0e9-dd0c-4e5b-8f0d-878be7cd91a7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.610568896+00:00 stderr F I1208 17:47:22.610546 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-apiserver, name: prometheus-k8s, uid: 0d74114b-fee4-4b23-b205-94074f133ad6]" virtual=false 2025-12-08T17:47:22.614962465+00:00 stderr F I1208 17:47:22.614898 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-nutanix-infra, name: host-networking-services, uid: 20a538b5-51d1-4f13-90b5-d59b3dc493bb]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.614983506+00:00 stderr F I1208 17:47:22.614960 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-authentication-operator, name: prometheus-k8s, uid: fef91a3e-8916-4ea4-9554-535d6fd3b728]" virtual=false 2025-12-08T17:47:22.631288099+00:00 stderr F I1208 17:47:22.631182 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-oauth-apiserver, name: prometheus-k8s, uid: 2e1f82c6-2d8a-417d-af7a-bc440909ea57]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.631336481+00:00 stderr F I1208 17:47:22.631274 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-authentication, name: prometheus-k8s, uid: 53fed5e4-a2fc-47bb-8401-e9530ad5c771]" virtual=false 2025-12-08T17:47:22.635320426+00:00 stderr F I1208 17:47:22.635272 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-openstack-infra, name: host-networking-services, uid: ba93ff46-031f-415f-9480-767006d718e7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.635338576+00:00 stderr F I1208 17:47:22.635319 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ResourceQuota, namespace: openshift-host-network, name: host-network-namespace-quotas, uid: 03fe5859-99ee-4852-9ee1-d476868fe54f]" virtual=false 2025-12-08T17:47:22.639624432+00:00 stderr F I1208 17:47:22.639551 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-operator-lifecycle-manager, name: operator-lifecycle-manager-metrics, uid: 74c4b64c-8513-4ace-b791-ecc4897b9a04]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.639624432+00:00 stderr F I1208 17:47:22.639595 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: e384bcab-90cf-46b3-b3fc-3faa6575f457]" virtual=false 2025-12-08T17:47:22.640507149+00:00 stderr F I1208 17:47:22.640459 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 2c65389f-32ed-4b66-b0bf-f697ede62460]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.640507149+00:00 stderr F I1208 17:47:22.640484 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-machine-approver, name: prometheus-k8s, uid: e276478b-d4fd-47f7-887b-1c90c397d03e]" virtual=false 2025-12-08T17:47:22.647604003+00:00 stderr F I1208 17:47:22.647504 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-control-plane-limited, uid: f119fd7b-c9f9-41a3-9f6b-6b849a2877ff]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.647604003+00:00 stderr F I1208 17:47:22.647546 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: d9ba455d-d0ef-4279-b91b-2bb734b7a230]" virtual=false 2025-12-08T17:47:22.650417932+00:00 stderr F I1208 17:47:22.650367 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-node-limited, uid: 2e9cfa70-01d3-48a5-af7e-ac6d0f70489f]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.650417932+00:00 stderr F I1208 17:47:22.650403 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-samples-operator, name: prometheus-k8s, uid: b09df175-99ba-4d3c-bace-ce35d4e83009]" virtual=false 2025-12-08T17:47:22.653905391+00:00 stderr F I1208 17:47:22.653798 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-ingress-canary, name: ingress-canary, uid: 0888ad56-e63e-40f9-9ab5-bdfe12ee18ef]" owner=[{"apiVersion":"apps/v1","kind":"daemonset","name":"ingress-canary","uid":"77896bcd-d1f7-46a2-984f-9205a544fb94","controller":true}] 2025-12-08T17:47:22.653905391+00:00 stderr F I1208 17:47:22.653836 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-storage-operator, name: csi-snapshot-controller-operator-role, uid: 675d5d01-bbab-41f4-b22a-cb301a5bfd68]" virtual=false 2025-12-08T17:47:22.656387599+00:00 stderr F I1208 17:47:22.656229 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[route.openshift.io/v1/Route, namespace: openshift-ingress-canary, name: canary, uid: c77f72a8-1cd5-4d3c-9619-806f41a86efa]" owner=[{"apiVersion":"apps/v1","kind":"daemonset","name":"ingress-canary","uid":"77896bcd-d1f7-46a2-984f-9205a544fb94","controller":true}] 2025-12-08T17:47:22.656387599+00:00 stderr F I1208 17:47:22.656255 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-version, name: prometheus-k8s, uid: 8406bc8f-05b0-490b-b240-1ec3ce9cbd3f]" virtual=false 2025-12-08T17:47:22.660583841+00:00 stderr F I1208 17:47:22.660537 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-ovn-kubernetes, name: prometheus-k8s, uid: 7a8de694-4980-44f0-938f-b8112d953aa4]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.660583841+00:00 stderr F I1208 17:47:22.660572 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: console-configmap-reader, uid: e3dfcc46-dd9b-4c39-b4af-1331760e7f87]" virtual=false 2025-12-08T17:47:22.663803682+00:00 stderr F I1208 17:47:22.663761 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-route-controller-manager, name: prometheus-k8s, uid: b42e1181-7f9e-4c64-b0f6-f8615e0c9572]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.663840953+00:00 stderr F I1208 17:47:22.663797 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: console-operator, uid: ccb991ad-450a-4068-adbb-b37825b8e6da]" virtual=false 2025-12-08T17:47:22.684539405+00:00 stderr F I1208 17:47:22.684446 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-service-ca-operator, name: prometheus-k8s, uid: d5f0400c-c866-4461-91ca-1674c874bd8b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.684539405+00:00 stderr F I1208 17:47:22.684501 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: console-public, uid: 6a7460b0-4e35-480e-a71b-377d30ca36bd]" virtual=false 2025-12-08T17:47:22.688225501+00:00 stderr F I1208 17:47:22.688168 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift-vsphere-infra, name: host-networking-services, uid: 13dc3db2-b779-4912-a196-ee77feacea00]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.688257632+00:00 stderr F I1208 17:47:22.688225 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: machine-api-controllers, uid: 344d915b-b955-4603-a766-f7a1f3fbba7e]" virtual=false 2025-12-08T17:47:22.689901284+00:00 stderr F I1208 17:47:22.689852 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/Role, namespace: openshift, name: copied-csv-viewer, uid: 1487dd1c-4a3b-4683-a18c-da8fd82bf4e5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.689917615+00:00 stderr F I1208 17:47:22.689898 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: machine-approver, uid: 3ffb919c-31ad-497c-b082-f862e1b96a30]" virtual=false 2025-12-08T17:47:22.693956732+00:00 stderr F I1208 17:47:22.693909 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: console, uid: 6292b407-20e8-47a5-be99-77a5e1f7a896]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.693956732+00:00 stderr F I1208 17:47:22.693940 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: openshift-network-public-role-binding, uid: 3f544260-b7f2-4307-aa10-76c309175c0a]" virtual=false 2025-12-08T17:47:22.711610708+00:00 stderr F I1208 17:47:22.711524 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: console-operator, uid: dedf5f86-0501-4a1d-b312-0cf452e2800c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.711610708+00:00 stderr F I1208 17:47:22.711580 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-operator, name: prometheus-k8s, uid: 398479e3-c16d-452b-8f5e-f3c97e9918d2]" virtual=false 2025-12-08T17:47:22.734204239+00:00 stderr F I1208 17:47:22.734128 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: csi-snapshot-controller-operator-authentication-reader, uid: 7fd23cab-d9ed-41e1-b2d7-92b6985e2dd1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.734204239+00:00 stderr F I1208 17:47:22.734176 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: cluster-samples-operator-openshift-config-secret-reader, uid: 8a0fff18-901e-44e5-a8ee-842ba68cfac0]" virtual=false 2025-12-08T17:47:22.736766429+00:00 stderr F I1208 17:47:22.736703 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: kube-system, name: network-diagnostics, uid: b6e21cd4-4c3d-401f-92cc-7c008ff856b3]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.736766429+00:00 stderr F I1208 17:47:22.736736 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: console-operator, uid: d129014f-8b06-4143-b4fa-3a0f098c6559]" virtual=false 2025-12-08T17:47:22.740562149+00:00 stderr F I1208 17:47:22.740499 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-apiserver-operator, name: prometheus-k8s, uid: 376b7f3a-eb8d-4641-a54e-19994092fb5d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.740562149+00:00 stderr F I1208 17:47:22.740533 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: ingress-operator, uid: a361b360-6755-4cb9-b68b-2998a0713612]" virtual=false 2025-12-08T17:47:22.744216554+00:00 stderr F I1208 17:47:22.744179 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-apiserver, name: prometheus-k8s, uid: 0d74114b-fee4-4b23-b205-94074f133ad6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.744216554+00:00 stderr F I1208 17:47:22.744204 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: machine-api-controllers, uid: 25963d68-1e2f-4b0e-a08f-0ce996f7b51f]" virtual=false 2025-12-08T17:47:22.746845907+00:00 stderr F I1208 17:47:22.746809 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-authentication-operator, name: prometheus-k8s, uid: fef91a3e-8916-4ea4-9554-535d6fd3b728]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.746845907+00:00 stderr F I1208 17:47:22.746834 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console-operator, name: console-operator, uid: 6ee80bf7-3aef-44c3-8ae6-babe6e24ccf2]" virtual=false 2025-12-08T17:47:22.763861262+00:00 stderr F I1208 17:47:22.763785 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-authentication, name: prometheus-k8s, uid: 53fed5e4-a2fc-47bb-8401-e9530ad5c771]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.763861262+00:00 stderr F I1208 17:47:22.763820 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console-operator, name: prometheus-k8s, uid: 5702339a-1e79-4ada-a460-4e2eff82de4d]" virtual=false 2025-12-08T17:47:22.767369032+00:00 stderr F I1208 17:47:22.767331 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/ResourceQuota, namespace: openshift-host-network, name: host-network-namespace-quotas, uid: 03fe5859-99ee-4852-9ee1-d476868fe54f]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.767369032+00:00 stderr F I1208 17:47:22.767355 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console-user-settings, name: console-user-settings-admin, uid: eb68d863-03ac-4a8d-8ebf-53c9f9785f85]" virtual=false 2025-12-08T17:47:22.771301657+00:00 stderr F I1208 17:47:22.771272 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: e384bcab-90cf-46b3-b3fc-3faa6575f457]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.771318457+00:00 stderr F I1208 17:47:22.771300 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console, name: console-operator, uid: 1d0d1e48-a9e1-4e4a-8a48-181bf893de48]" virtual=false 2025-12-08T17:47:22.774457756+00:00 stderr F I1208 17:47:22.774399 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-machine-approver, name: prometheus-k8s, uid: e276478b-d4fd-47f7-887b-1c90c397d03e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.774457756+00:00 stderr F I1208 17:47:22.774423 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console, name: prometheus-k8s, uid: 6da3f11b-9f14-4506-a933-bb3207d8d635]" virtual=false 2025-12-08T17:47:22.780766824+00:00 stderr F I1208 17:47:22.780685 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: d9ba455d-d0ef-4279-b91b-2bb734b7a230]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.780766824+00:00 stderr F I1208 17:47:22.780710 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-controller-manager-operator, name: prometheus-k8s, uid: 902b45ff-a672-4079-bc39-9c3db9fbed24]" virtual=false 2025-12-08T17:47:22.784351707+00:00 stderr F I1208 17:47:22.784310 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-samples-operator, name: prometheus-k8s, uid: b09df175-99ba-4d3c-bace-ce35d4e83009]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.784351707+00:00 stderr F I1208 17:47:22.784339 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apiregistration.k8s.io/v1/APIService, namespace: , name: v1.packages.operators.coreos.com, uid: 1eb523e0-0a92-4ae6-84a4-16192bab1fca]" virtual=false 2025-12-08T17:47:22.787135944+00:00 stderr F I1208 17:47:22.787029 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-storage-operator, name: csi-snapshot-controller-operator-role, uid: 675d5d01-bbab-41f4-b22a-cb301a5bfd68]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.787135944+00:00 stderr F I1208 17:47:22.787109 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-kubeapi, uid: 61749708-bb52-4e70-92b1-807c81c26e6a]" virtual=false 2025-12-08T17:47:22.797479580+00:00 stderr F I1208 17:47:22.797392 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-cluster-version, name: prometheus-k8s, uid: 8406bc8f-05b0-490b-b240-1ec3ce9cbd3f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.797499651+00:00 stderr F I1208 17:47:22.797470 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-metal, uid: 02a4dba1-9dec-4992-a11c-4f4c03f7e69c]" virtual=false 2025-12-08T17:47:22.797675866+00:00 stderr F I1208 17:47:22.797623 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: console-configmap-reader, uid: e3dfcc46-dd9b-4c39-b4af-1331760e7f87]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.797687707+00:00 stderr F I1208 17:47:22.797666 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: package-server-manager, uid: 6e667886-db08-4af0-9937-668c3a1a44aa]" virtual=false 2025-12-08T17:47:22.800328590+00:00 stderr F I1208 17:47:22.800257 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: console-operator, uid: ccb991ad-450a-4068-adbb-b37825b8e6da]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.800328590+00:00 stderr F I1208 17:47:22.800315 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: packageserver, uid: 91cc0f79-c51b-424a-a293-e2750a385ac6]" virtual=false 2025-12-08T17:47:22.816851210+00:00 stderr F I1208 17:47:22.816776 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: console-public, uid: 6a7460b0-4e35-480e-a71b-377d30ca36bd]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.816851210+00:00 stderr F I1208 17:47:22.816836 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: marketplace-operator, uid: 9a95e898-e71f-46f4-ab23-671d4fbd8588]" virtual=false 2025-12-08T17:47:22.820053911+00:00 stderr F I1208 17:47:22.820001 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: machine-api-controllers, uid: 344d915b-b955-4603-a766-f7a1f3fbba7e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.820130453+00:00 stderr F I1208 17:47:22.820106 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-all-egress, uid: 69a87104-2112-439f-b270-5e735b3ccc9f]" virtual=false 2025-12-08T17:47:22.823612123+00:00 stderr F I1208 17:47:22.823542 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: machine-approver, uid: 3ffb919c-31ad-497c-b082-f862e1b96a30]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.823681106+00:00 stderr F I1208 17:47:22.823658 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-metrics, uid: 6a07247b-872b-40d1-baeb-45b0e1ec1d09]" virtual=false 2025-12-08T17:47:22.831693187+00:00 stderr F I1208 17:47:22.831647 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-managed, name: openshift-network-public-role-binding, uid: 3f544260-b7f2-4307-aa10-76c309175c0a]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:22.831754589+00:00 stderr F I1208 17:47:22.831734 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: default-deny-all, uid: 26158c82-b93b-4b40-ad84-d46310003f34]" virtual=false 2025-12-08T17:47:22.844325565+00:00 stderr F I1208 17:47:22.843462 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config-operator, name: prometheus-k8s, uid: 398479e3-c16d-452b-8f5e-f3c97e9918d2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.844325565+00:00 stderr F I1208 17:47:22.843518 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: unpack-bundles, uid: d387f0e1-78f2-4f21-8b20-eb44e79f2d4e]" virtual=false 2025-12-08T17:47:22.849235950+00:00 stderr F I1208 17:47:22.849197 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apiregistration.k8s.io/v1/APIService, namespace: , name: v1.packages.operators.coreos.com, uid: 1eb523e0-0a92-4ae6-84a4-16192bab1fca]" 2025-12-08T17:47:22.849235950+00:00 stderr F I1208 17:47:22.849221 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: catalog-operator, uid: 9af2c58b-44d2-43f2-bfa6-abf8a256a724]" virtual=false 2025-12-08T17:47:22.863756207+00:00 stderr F I1208 17:47:22.863708 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: cluster-samples-operator-openshift-config-secret-reader, uid: 8a0fff18-901e-44e5-a8ee-842ba68cfac0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.863821999+00:00 stderr F I1208 17:47:22.863804 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operators, name: default-allow-all, uid: 232e5193-54e7-43f0-ad09-44fb764c3765]" virtual=false 2025-12-08T17:47:22.871431949+00:00 stderr F I1208 17:47:22.871362 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: console-operator, uid: d129014f-8b06-4143-b4fa-3a0f098c6559]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.871431949+00:00 stderr F I1208 17:47:22.871416 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-cluster, uid: bfdb4887-ce6d-45fa-ab0e-d0f8e92c6c08]" virtual=false 2025-12-08T17:47:22.874935229+00:00 stderr F I1208 17:47:22.874868 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: ingress-operator, uid: a361b360-6755-4cb9-b68b-2998a0713612]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.874935229+00:00 stderr F I1208 17:47:22.874922 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: default-deny, uid: 6b29ed9b-3722-4226-8cf0-5f567d0b479b]" virtual=false 2025-12-08T17:47:22.877490100+00:00 stderr F I1208 17:47:22.877440 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-config, name: machine-api-controllers, uid: 25963d68-1e2f-4b0e-a08f-0ce996f7b51f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.877490100+00:00 stderr F I1208 17:47:22.877468 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 60dc7d80-6fce-4c91-9304-81fc23842033]" virtual=false 2025-12-08T17:47:22.882112374+00:00 stderr F I1208 17:47:22.882062 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console-operator, name: console-operator, uid: 6ee80bf7-3aef-44c3-8ae6-babe6e24ccf2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.882112374+00:00 stderr F I1208 17:47:22.882096 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: default-deny-all-traffic, uid: 5d7de421-b53a-4577-ad68-ea7393dc755c]" virtual=false 2025-12-08T17:47:22.895562808+00:00 stderr F I1208 17:47:22.895458 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console-operator, name: prometheus-k8s, uid: 5702339a-1e79-4ada-a460-4e2eff82de4d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.895678162+00:00 stderr F I1208 17:47:22.895641 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: olm-operator, uid: 85980716-45a2-4c0f-be28-4217783526a0]" virtual=false 2025-12-08T17:47:22.907389110+00:00 stderr F I1208 17:47:22.907285 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console-user-settings, name: console-user-settings-admin, uid: eb68d863-03ac-4a8d-8ebf-53c9f9785f85]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.907389110+00:00 stderr F I1208 17:47:22.907342 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-controller-manager, name: prometheus-k8s, uid: 2adafd1c-c1b3-462c-9c6f-384653b668a8]" virtual=false 2025-12-08T17:47:22.911476569+00:00 stderr F I1208 17:47:22.911390 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console, name: console-operator, uid: 1d0d1e48-a9e1-4e4a-8a48-181bf893de48]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.911514220+00:00 stderr F I1208 17:47:22.911472 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-6, uid: 9ceb9b72-2527-47c6-ae4e-f1d92f0aee7d]" virtual=false 2025-12-08T17:47:22.911696996+00:00 stderr F I1208 17:47:22.911651 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-console, name: prometheus-k8s, uid: 6da3f11b-9f14-4506-a933-bb3207d8d635]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.911715507+00:00 stderr F I1208 17:47:22.911687 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-dns-operator, name: dns-operator, uid: 27eea001-0ad1-463e-b0b1-338bbbd06608]" virtual=false 2025-12-08T17:47:22.915173475+00:00 stderr F I1208 17:47:22.915106 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-controller-manager-operator, name: prometheus-k8s, uid: 902b45ff-a672-4079-bc39-9c3db9fbed24]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.915217997+00:00 stderr F I1208 17:47:22.915158 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-dns-operator, name: prometheus-k8s, uid: 2ee6fe08-e91c-466a-ba8b-85813c903312]" virtual=false 2025-12-08T17:47:22.926560784+00:00 stderr F I1208 17:47:22.926464 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: package-server-manager, uid: 6e667886-db08-4af0-9937-668c3a1a44aa]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.926560784+00:00 stderr F I1208 17:47:22.926522 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-10, uid: abc26192-80da-4229-9267-91d2e7c7eb5f]" virtual=false 2025-12-08T17:47:22.926656087+00:00 stderr F I1208 17:47:22.926464 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-kubeapi, uid: 61749708-bb52-4e70-92b1-807c81c26e6a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.926656087+00:00 stderr F I1208 17:47:22.926637 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-2, uid: f5063641-afc0-44e4-b98e-2062cca37149]" virtual=false 2025-12-08T17:47:22.932686866+00:00 stderr F I1208 17:47:22.932609 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-metal, uid: 02a4dba1-9dec-4992-a11c-4f4c03f7e69c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.932686866+00:00 stderr F I1208 17:47:22.932660 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-7, uid: b08accdd-34d1-43ec-b9f0-be52165b199c]" virtual=false 2025-12-08T17:47:22.937464557+00:00 stderr F I1208 17:47:22.937352 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: packageserver, uid: 91cc0f79-c51b-424a-a293-e2750a385ac6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.937464557+00:00 stderr F I1208 17:47:22.937438 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-1, uid: 4768d56f-c284-4d80-a18a-7f2fc97d36f1]" virtual=false 2025-12-08T17:47:22.953366328+00:00 stderr F I1208 17:47:22.953260 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: marketplace-operator, uid: 9a95e898-e71f-46f4-ab23-671d4fbd8588]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.953366328+00:00 stderr F I1208 17:47:22.953341 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-3, uid: ae4fdaf7-2628-4d35-8d24-e03e4228a048]" virtual=false 2025-12-08T17:47:22.954478553+00:00 stderr F I1208 17:47:22.954425 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-all-egress, uid: 69a87104-2112-439f-b270-5e735b3ccc9f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.954478553+00:00 stderr F I1208 17:47:22.954457 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-etcd-operator, name: prometheus-k8s, uid: 5bdcbcd3-fe52-498d-ae10-72a5939afa87]" virtual=false 2025-12-08T17:47:22.959263343+00:00 stderr F I1208 17:47:22.959195 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-metrics, uid: 6a07247b-872b-40d1-baeb-45b0e1ec1d09]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.959263343+00:00 stderr F I1208 17:47:22.959242 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-2, uid: 8901d31a-3867-4570-b0e5-ae6a39cff91d]" virtual=false 2025-12-08T17:47:22.968916997+00:00 stderr F I1208 17:47:22.968794 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: default-deny-all, uid: 26158c82-b93b-4b40-ad84-d46310003f34]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.968916997+00:00 stderr F I1208 17:47:22.968854 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-5, uid: b4c3ff81-9e93-4c5b-b530-e6a169c05a01]" virtual=false 2025-12-08T17:47:22.969814636+00:00 stderr F I1208 17:47:22.969774 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-6, uid: 9ceb9b72-2527-47c6-ae4e-f1d92f0aee7d]" 2025-12-08T17:47:22.969830496+00:00 stderr F I1208 17:47:22.969806 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-9, uid: eb1cf43c-e507-46f9-97cb-9059b8b2bdd3]" virtual=false 2025-12-08T17:47:22.977603890+00:00 stderr F I1208 17:47:22.977541 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: unpack-bundles, uid: d387f0e1-78f2-4f21-8b20-eb44e79f2d4e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.977643972+00:00 stderr F I1208 17:47:22.977597 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-11, uid: c121b5bf-996c-44ca-8254-1a98965ff795]" virtual=false 2025-12-08T17:47:22.984343113+00:00 stderr F I1208 17:47:22.984277 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: catalog-operator, uid: 9af2c58b-44d2-43f2-bfa6-abf8a256a724]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.984396105+00:00 stderr F I1208 17:47:22.984332 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-6, uid: 7fbbc739-803e-4e42-8e9d-30d2b33db770]" virtual=false 2025-12-08T17:47:22.986270854+00:00 stderr F I1208 17:47:22.986209 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-10, uid: abc26192-80da-4229-9267-91d2e7c7eb5f]" 2025-12-08T17:47:22.986362807+00:00 stderr F I1208 17:47:22.986337 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-4, uid: 916d9208-ead5-4b5c-997f-82c9f885ea3e]" virtual=false 2025-12-08T17:47:22.990190907+00:00 stderr F I1208 17:47:22.990162 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-2, uid: f5063641-afc0-44e4-b98e-2062cca37149]" 2025-12-08T17:47:22.990276760+00:00 stderr F I1208 17:47:22.990255 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-8, uid: 389efd57-822c-4936-b5ef-48f1f85dba11]" virtual=false 2025-12-08T17:47:22.995048980+00:00 stderr F I1208 17:47:22.994985 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operators, name: default-allow-all, uid: 232e5193-54e7-43f0-ad09-44fb764c3765]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:22.995138453+00:00 stderr F I1208 17:47:22.995115 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-3, uid: f02d59da-bbe4-4f8b-a03b-6d3ee409478c]" virtual=false 2025-12-08T17:47:23.002751972+00:00 stderr F I1208 17:47:23.001405 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-1, uid: 4768d56f-c284-4d80-a18a-7f2fc97d36f1]" 2025-12-08T17:47:23.002751972+00:00 stderr F I1208 17:47:23.001445 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-etcd, name: revision-status-1, uid: f0431d0b-c8bb-453e-adcc-46f809a3bc04]" virtual=false 2025-12-08T17:47:23.002751972+00:00 stderr F I1208 17:47:23.001544 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-7, uid: b08accdd-34d1-43ec-b9f0-be52165b199c]" 2025-12-08T17:47:23.002751972+00:00 stderr F I1208 17:47:23.001557 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-5, uid: c91d53ac-6902-4aea-bc79-72ae21f68713]" virtual=false 2025-12-08T17:47:23.007137260+00:00 stderr F I1208 17:47:23.007061 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: allow-ingress-cluster, uid: bfdb4887-ce6d-45fa-ab0e-d0f8e92c6c08]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.007247154+00:00 stderr F I1208 17:47:23.007221 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-etcd, name: revision-status-2, uid: c41b0f0a-724d-401f-b8bd-abaae84f2c11]" virtual=false 2025-12-08T17:47:23.010314310+00:00 stderr F I1208 17:47:23.010279 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-machine-api, name: default-deny, uid: 6b29ed9b-3722-4226-8cf0-5f567d0b479b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.010398953+00:00 stderr F I1208 17:47:23.010377 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Secret, namespace: openshift-etcd-operator, name: etcd-client, uid: 3f64a35c-cf8e-418c-a4a0-bf68cea2beb1]" virtual=false 2025-12-08T17:47:23.010717073+00:00 stderr F I1208 17:47:23.010662 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 60dc7d80-6fce-4c91-9304-81fc23842033]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.010731663+00:00 stderr F I1208 17:47:23.010713 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-12, uid: 217d88a8-9d2a-4b34-b7a0-b71225e7b3fb]" virtual=false 2025-12-08T17:47:23.014640337+00:00 stderr F I1208 17:47:23.014163 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: default-deny-all-traffic, uid: 5d7de421-b53a-4577-ad68-ea7393dc755c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.014640337+00:00 stderr F I1208 17:47:23.014195 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-etcd-operator, name: etcd-prometheus-rules, uid: 97274c51-fc95-4eb6-ad00-7a3b4f31f2ca]" virtual=false 2025-12-08T17:47:23.021179442+00:00 stderr F I1208 17:47:23.021131 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-3, uid: ae4fdaf7-2628-4d35-8d24-e03e4228a048]" 2025-12-08T17:47:23.021202683+00:00 stderr F I1208 17:47:23.021176 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 2877fbf1-ff5a-4aa9-b775-2605d4bccd96]" virtual=false 2025-12-08T17:47:23.025225649+00:00 stderr F I1208 17:47:23.025175 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-2, uid: 8901d31a-3867-4570-b0e5-ae6a39cff91d]" 2025-12-08T17:47:23.025302072+00:00 stderr F I1208 17:47:23.025276 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-4, uid: 3c0c45f7-a4ba-4891-8b19-1900e7bfb64c]" virtual=false 2025-12-08T17:47:23.028520474+00:00 stderr F I1208 17:47:23.028456 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-operator-lifecycle-manager, name: olm-operator, uid: 85980716-45a2-4c0f-be28-4217783526a0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.028543665+00:00 stderr F I1208 17:47:23.028507 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 0b5fb497-c46f-4ec8-b3ac-f01c6ed66367]" virtual=false 2025-12-08T17:47:23.031221869+00:00 stderr F I1208 17:47:23.031176 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-controller-manager, name: prometheus-k8s, uid: 2adafd1c-c1b3-462c-9c6f-384653b668a8]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.031242830+00:00 stderr F I1208 17:47:23.031211 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-network-operator, name: openshift-network-operator-ipsec-rules, uid: 4b9d0264-6145-4803-a9c5-e7715dde16c7]" virtual=false 2025-12-08T17:47:23.034016857+00:00 stderr F I1208 17:47:23.033973 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-5, uid: b4c3ff81-9e93-4c5b-b530-e6a169c05a01]" 2025-12-08T17:47:23.034016857+00:00 stderr F I1208 17:47:23.034003 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-image-registry, name: image-registry-rules, uid: a1a802b1-bbfe-4655-8366-9f94d997c9ee]" virtual=false 2025-12-08T17:47:23.037085583+00:00 stderr F I1208 17:47:23.037050 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-9, uid: eb1cf43c-e507-46f9-97cb-9059b8b2bdd3]" 2025-12-08T17:47:23.037106734+00:00 stderr F I1208 17:47:23.037075 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-apiserver, name: kube-apiserver-performance-recording-rules, uid: f911c2bf-6b61-41f5-9e5a-f111e13fea13]" virtual=false 2025-12-08T17:47:23.041153101+00:00 stderr F I1208 17:47:23.041103 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-dns-operator, name: dns-operator, uid: 27eea001-0ad1-463e-b0b1-338bbbd06608]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.041153101+00:00 stderr F I1208 17:47:23.041138 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-scheduler-operator, name: kube-scheduler-operator, uid: b334ae77-e6a0-41f9-b470-35a7cb6618a5]" virtual=false 2025-12-08T17:47:23.043063291+00:00 stderr F I1208 17:47:23.043025 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-11, uid: c121b5bf-996c-44ca-8254-1a98965ff795]" 2025-12-08T17:47:23.043063291+00:00 stderr F I1208 17:47:23.043050 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: be18a557-8d05-434c-9f9b-d928b26e652a]" virtual=false 2025-12-08T17:47:23.048996418+00:00 stderr F I1208 17:47:23.048920 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-dns-operator, name: prometheus-k8s, uid: 2ee6fe08-e91c-466a-ba8b-85813c903312]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.049095581+00:00 stderr F I1208 17:47:23.049070 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-cluster-version, name: cluster-version-operator, uid: f57fe110-2989-4987-a61c-24caa6fc9bb2]" virtual=false 2025-12-08T17:47:23.050390011+00:00 stderr F I1208 17:47:23.050361 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-6, uid: 7fbbc739-803e-4e42-8e9d-30d2b33db770]" 2025-12-08T17:47:23.050473514+00:00 stderr F I1208 17:47:23.050450 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: 28e1dbac-2835-4e00-bbcd-c559366575bb]" virtual=false 2025-12-08T17:47:23.054383028+00:00 stderr F I1208 17:47:23.054313 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-4, uid: 916d9208-ead5-4b5c-997f-82c9f885ea3e]" 2025-12-08T17:47:23.054486201+00:00 stderr F I1208 17:47:23.054460 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-image-registry, name: imagestreams-rules, uid: 7f8f7459-95b2-46d1-a5f5-c98861a56f22]" virtual=false 2025-12-08T17:47:23.057391942+00:00 stderr F I1208 17:47:23.057076 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-8, uid: 389efd57-822c-4936-b5ef-48f1f85dba11]" 2025-12-08T17:47:23.057391942+00:00 stderr F I1208 17:47:23.057124 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-image-registry, name: node-ca, uid: 5f291cdd-c99d-4124-aa53-a0a0ab505e3f]" virtual=false 2025-12-08T17:47:23.061167431+00:00 stderr F I1208 17:47:23.061120 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-3, uid: f02d59da-bbe4-4f8b-a03b-6d3ee409478c]" 2025-12-08T17:47:23.061201082+00:00 stderr F I1208 17:47:23.061164 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-image-registry, name: prometheus-k8s, uid: 531f27d1-3010-40b8-a5d8-aaca67f7d382]" virtual=false 2025-12-08T17:47:23.063715221+00:00 stderr F I1208 17:47:23.063665 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-etcd, name: revision-status-1, uid: f0431d0b-c8bb-453e-adcc-46f809a3bc04]" 2025-12-08T17:47:23.063803704+00:00 stderr F I1208 17:47:23.063783 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ingress-operator, name: ingress-operator, uid: 58ce10a7-6f25-4811-bff2-bc4a3cc330ee]" virtual=false 2025-12-08T17:47:23.067081967+00:00 stderr F I1208 17:47:23.067035 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-5, uid: c91d53ac-6902-4aea-bc79-72ae21f68713]" 2025-12-08T17:47:23.067099388+00:00 stderr F I1208 17:47:23.067081 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ingress-operator, name: prometheus-k8s, uid: 7627ebcc-7f0f-44e2-a90a-25d7f7f4fe20]" virtual=false 2025-12-08T17:47:23.070773764+00:00 stderr F I1208 17:47:23.070513 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-etcd, name: revision-status-2, uid: c41b0f0a-724d-401f-b8bd-abaae84f2c11]" 2025-12-08T17:47:23.070773764+00:00 stderr F I1208 17:47:23.070547 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kni-infra, name: host-networking-system-node, uid: 67f45f81-1023-4da0-9870-d9545d0217d4]" virtual=false 2025-12-08T17:47:23.075870004+00:00 stderr F I1208 17:47:23.075688 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-apiserver, name: revision-status-12, uid: 217d88a8-9d2a-4b34-b7a0-b71225e7b3fb]" 2025-12-08T17:47:23.075870004+00:00 stderr F I1208 17:47:23.075724 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-apiserver-operator, name: prometheus-k8s, uid: 6be26842-4041-4228-90d5-6e63e6300edd]" virtual=false 2025-12-08T17:47:23.093994995+00:00 stderr F I1208 17:47:23.089141 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-etcd-operator, name: prometheus-k8s, uid: 5bdcbcd3-fe52-498d-ae10-72a5939afa87]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.093994995+00:00 stderr F I1208 17:47:23.089201 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-apiserver, name: prometheus-k8s, uid: 0b6bc908-facb-4580-82f5-bff11fb34309]" virtual=false 2025-12-08T17:47:23.097819585+00:00 stderr F I1208 17:47:23.097746 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-scheduler, name: revision-status-4, uid: 3c0c45f7-a4ba-4891-8b19-1900e7bfb64c]" 2025-12-08T17:47:23.097819585+00:00 stderr F I1208 17:47:23.097788 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-controller-manager-operator, name: prometheus-k8s, uid: 04f722e8-f3c8-48f9-8726-d3ed22a8f6db]" virtual=false 2025-12-08T17:47:23.141490010+00:00 stderr F I1208 17:47:23.141399 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Secret, namespace: openshift-etcd-operator, name: etcd-client, uid: 3f64a35c-cf8e-418c-a4a0-bf68cea2beb1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503"}] 2025-12-08T17:47:23.141490010+00:00 stderr F I1208 17:47:23.141460 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-controller-manager, name: prometheus-k8s, uid: a9588721-392a-4c35-83c5-11f82c4c1cad]" virtual=false 2025-12-08T17:47:23.148519070+00:00 stderr F I1208 17:47:23.147990 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-etcd-operator, name: etcd-prometheus-rules, uid: 97274c51-fc95-4eb6-ad00-7a3b4f31f2ca]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.148519070+00:00 stderr F I1208 17:47:23.148056 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-scheduler-operator, name: prometheus-k8s, uid: 2ad12ea6-7b88-4bb1-ad15-8c6ea12eaa71]" virtual=false 2025-12-08T17:47:23.150607486+00:00 stderr F I1208 17:47:23.150535 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 2877fbf1-ff5a-4aa9-b775-2605d4bccd96]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.150628517+00:00 stderr F I1208 17:47:23.150593 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-scheduler, name: prometheus-k8s, uid: 6bd53a77-3f4a-4220-bb5c-858e7a9b0cd9]" virtual=false 2025-12-08T17:47:23.160426936+00:00 stderr F I1208 17:47:23.160346 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 0b5fb497-c46f-4ec8-b3ac-f01c6ed66367]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.160426936+00:00 stderr F I1208 17:47:23.160383 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: cluster-autoscaler, uid: a90481e3-8aed-4ca6-b853-79e50236fafe]" virtual=false 2025-12-08T17:47:23.164240626+00:00 stderr F I1208 17:47:23.163915 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-image-registry, name: image-registry-rules, uid: a1a802b1-bbfe-4655-8366-9f94d997c9ee]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.164240626+00:00 stderr F I1208 17:47:23.163943 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: d68d9d3f-786d-4acb-9f43-c353ffe1644e]" virtual=false 2025-12-08T17:47:23.167014693+00:00 stderr F I1208 17:47:23.166977 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-network-operator, name: openshift-network-operator-ipsec-rules, uid: 4b9d0264-6145-4803-a9c5-e7715dde16c7]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.167034094+00:00 stderr F I1208 17:47:23.167006 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 0c867af6-79d5-465d-b2ab-1be4f069171f]" virtual=false 2025-12-08T17:47:23.171232516+00:00 stderr F I1208 17:47:23.171161 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-apiserver, name: kube-apiserver-performance-recording-rules, uid: f911c2bf-6b61-41f5-9e5a-f111e13fea13]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.171232516+00:00 stderr F I1208 17:47:23.171212 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: machine-api-controllers, uid: 3541bdca-28da-4529-aa34-dc375f9b4fac]" virtual=false 2025-12-08T17:47:23.174330714+00:00 stderr F I1208 17:47:23.174277 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-kube-scheduler-operator, name: kube-scheduler-operator, uid: b334ae77-e6a0-41f9-b470-35a7cb6618a5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.174330714+00:00 stderr F I1208 17:47:23.174307 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: machine-api-operator, uid: 148fb231-346b-4ba3-a947-f077fd1f2673]" virtual=false 2025-12-08T17:47:23.176706759+00:00 stderr F I1208 17:47:23.176586 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: be18a557-8d05-434c-9f9b-d928b26e652a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.176706759+00:00 stderr F I1208 17:47:23.176631 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: prometheus-k8s-cluster-autoscaler-operator, uid: 195e6c38-8d7f-4de3-b20e-404ee68478e3]" virtual=false 2025-12-08T17:47:23.180177928+00:00 stderr F I1208 17:47:23.180127 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-cluster-version, name: cluster-version-operator, uid: f57fe110-2989-4987-a61c-24caa6fc9bb2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.180177928+00:00 stderr F I1208 17:47:23.180153 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: prometheus-k8s-machine-api-operator, uid: bb572898-23e1-47ec-a10d-a96fe1ecbe9d]" virtual=false 2025-12-08T17:47:23.183728739+00:00 stderr F I1208 17:47:23.183684 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: 28e1dbac-2835-4e00-bbcd-c559366575bb]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.183728739+00:00 stderr F I1208 17:47:23.183718 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-config-operator, name: prometheus-k8s, uid: 052638af-4f81-4e6a-8c5e-960132e14b35]" virtual=false 2025-12-08T17:47:23.188057446+00:00 stderr F I1208 17:47:23.187741 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-image-registry, name: imagestreams-rules, uid: 7f8f7459-95b2-46d1-a5f5-c98861a56f22]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.188057446+00:00 stderr F I1208 17:47:23.187837 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-marketplace, name: marketplace-operator, uid: 8f0da541-5375-4494-a502-94aa8540020e]" virtual=false 2025-12-08T17:47:23.190409459+00:00 stderr F I1208 17:47:23.190009 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-image-registry, name: node-ca, uid: 5f291cdd-c99d-4124-aa53-a0a0ab505e3f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.190409459+00:00 stderr F I1208 17:47:23.190038 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-marketplace, name: openshift-marketplace-metrics, uid: e0e93b2f-fa87-4069-8467-6366728d0a62]" virtual=false 2025-12-08T17:47:23.193384413+00:00 stderr F I1208 17:47:23.193344 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-image-registry, name: prometheus-k8s, uid: 531f27d1-3010-40b8-a5d8-aaca67f7d382]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.193384413+00:00 stderr F I1208 17:47:23.193370 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-monitoring, name: cluster-monitoring-operator, uid: 7f4699d7-5595-4e90-9a52-ed4769b0c986]" virtual=false 2025-12-08T17:47:23.196793421+00:00 stderr F I1208 17:47:23.196742 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ingress-operator, name: ingress-operator, uid: 58ce10a7-6f25-4811-bff2-bc4a3cc330ee]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.196793421+00:00 stderr F I1208 17:47:23.196778 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-monitoring, name: cluster-monitoring-operator-alert-customization, uid: fba13e8c-652c-4d18-9044-6893349b700d]" virtual=false 2025-12-08T17:47:23.203361057+00:00 stderr F I1208 17:47:23.203290 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ingress-operator, name: prometheus-k8s, uid: 7627ebcc-7f0f-44e2-a90a-25d7f7f4fe20]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.203361057+00:00 stderr F I1208 17:47:23.203346 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-monitoring, name: console-operator, uid: babad3be-1b02-48ca-8bc3-64551155108c]" virtual=false 2025-12-08T17:47:23.204543474+00:00 stderr F I1208 17:47:23.204499 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kni-infra, name: host-networking-system-node, uid: 67f45f81-1023-4da0-9870-d9545d0217d4]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.204543474+00:00 stderr F I1208 17:47:23.204520 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-multus, name: multus-whereabouts, uid: 306d9ccf-da01-46ab-b78e-02458b50939f]" virtual=false 2025-12-08T17:47:23.212015820+00:00 stderr F I1208 17:47:23.211513 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-apiserver-operator, name: prometheus-k8s, uid: 6be26842-4041-4228-90d5-6e63e6300edd]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.212015820+00:00 stderr F I1208 17:47:23.211989 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-multus, name: prometheus-k8s, uid: 5fec5a95-01ea-4349-8aeb-22c31f6322b6]" virtual=false 2025-12-08T17:47:23.223657066+00:00 stderr F I1208 17:47:23.223585 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-apiserver, name: prometheus-k8s, uid: 0b6bc908-facb-4580-82f5-bff11fb34309]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.223683317+00:00 stderr F I1208 17:47:23.223648 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-diagnostics, name: network-diagnostics, uid: 00a918ea-d345-4c69-8447-3b22d23e9f7b]" virtual=false 2025-12-08T17:47:23.228695505+00:00 stderr F I1208 17:47:23.228644 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-controller-manager-operator, name: prometheus-k8s, uid: 04f722e8-f3c8-48f9-8726-d3ed22a8f6db]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.228736896+00:00 stderr F I1208 17:47:23.228683 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-diagnostics, name: prometheus-k8s, uid: 6695f592-c1ca-4af1-8eaf-0e0b0d8959fc]" virtual=false 2025-12-08T17:47:23.261228259+00:00 stderr F I1208 17:47:23.261161 1 replica_set.go:626] "Too many replicas" logger="replicaset-controller" replicaSet="openshift-controller-manager/controller-manager-6cd9c44569" need=0 deleting=1 2025-12-08T17:47:23.261228259+00:00 stderr F I1208 17:47:23.261210 1 replica_set.go:253] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="openshift-controller-manager/controller-manager-6cd9c44569" relatedReplicaSets=["openshift-controller-manager/controller-manager-86f48fd68b","openshift-controller-manager/controller-manager-c7d4b49f6","openshift-controller-manager/controller-manager-74bfd85b68","openshift-controller-manager/controller-manager-7c9cdb8ff5","openshift-controller-manager/controller-manager-9fd5cc475","openshift-controller-manager/controller-manager-c84474957","openshift-controller-manager/controller-manager-6cd9c44569","openshift-controller-manager/controller-manager-5cb6f9d449","openshift-controller-manager/controller-manager-58897fffb5","openshift-controller-manager/controller-manager-5d4c96c665","openshift-controller-manager/controller-manager-5f76cf6594","openshift-controller-manager/controller-manager-65b6cccf98","openshift-controller-manager/controller-manager-6ff9c7475c"] 2025-12-08T17:47:23.261369684+00:00 stderr F I1208 17:47:23.261320 1 controller_utils.go:618] "Deleting pod" logger="replicaset-controller" controller="controller-manager-6cd9c44569" pod="openshift-controller-manager/controller-manager-6cd9c44569-vhg58" 2025-12-08T17:47:23.265949047+00:00 stderr F I1208 17:47:23.265868 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-controller-manager/controller-manager" err="Operation cannot be fulfilled on deployments.apps \"controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:23.274791796+00:00 stderr F I1208 17:47:23.274597 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-controller-manager/controller-manager-5cb6f9d449" need=1 creating=1 2025-12-08T17:47:23.274791796+00:00 stderr F I1208 17:47:23.274731 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-controller-manager, name: prometheus-k8s, uid: a9588721-392a-4c35-83c5-11f82c4c1cad]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.274820517+00:00 stderr F I1208 17:47:23.274784 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-node-identity, name: network-node-identity-leases, uid: 6546e654-a56c-40cd-b5e5-b0af53dc1922]" virtual=false 2025-12-08T17:47:23.281458206+00:00 stderr F I1208 17:47:23.281360 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-scheduler-operator, name: prometheus-k8s, uid: 2ad12ea6-7b88-4bb1-ad15-8c6ea12eaa71]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.281458206+00:00 stderr F I1208 17:47:23.281426 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-node-identity, name: system:openshift:scc:hostnetwork-v2, uid: 7e7e52cd-a43c-47dd-b267-7ac721bc6113]" virtual=false 2025-12-08T17:47:23.285417400+00:00 stderr F I1208 17:47:23.285291 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-kube-scheduler, name: prometheus-k8s, uid: 6bd53a77-3f4a-4220-bb5c-858e7a9b0cd9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.285417400+00:00 stderr F I1208 17:47:23.285328 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-operator, name: prometheus-k8s, uid: 62b3747f-27ac-4810-bbdd-3307f382f740]" virtual=false 2025-12-08T17:47:23.290179080+00:00 stderr F I1208 17:47:23.290112 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: cluster-autoscaler, uid: a90481e3-8aed-4ca6-b853-79e50236fafe]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.290206921+00:00 stderr F I1208 17:47:23.290165 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-nutanix-infra, name: host-networking-system-node, uid: 24dfe867-53c3-48b6-bbd7-5225bd207aed]" virtual=false 2025-12-08T17:47:23.297892933+00:00 stderr F I1208 17:47:23.297805 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: cluster-autoscaler-operator, uid: d68d9d3f-786d-4acb-9f43-c353ffe1644e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.297892933+00:00 stderr F I1208 17:47:23.297852 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-oauth-apiserver, name: prometheus-k8s, uid: 5aa0e1a1-4826-4259-98cd-75c369bad7a7]" virtual=false 2025-12-08T17:47:23.300107503+00:00 stderr F I1208 17:47:23.300040 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 0c867af6-79d5-465d-b2ab-1be4f069171f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.300127224+00:00 stderr F I1208 17:47:23.300093 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-openstack-infra, name: host-networking-system-node, uid: 46d26db4-f098-4515-a946-0219c7756c23]" virtual=false 2025-12-08T17:47:23.303398217+00:00 stderr F I1208 17:47:23.303341 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: machine-api-controllers, uid: 3541bdca-28da-4529-aa34-dc375f9b4fac]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.303426527+00:00 stderr F I1208 17:47:23.303392 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 68a586c5-c3d1-4cfe-9783-ca5e2027b642]" virtual=false 2025-12-08T17:47:23.307129504+00:00 stderr F I1208 17:47:23.307076 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: machine-api-operator, uid: 148fb231-346b-4ba3-a947-f077fd1f2673]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.307129504+00:00 stderr F I1208 17:47:23.307117 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-operator-lifecycle-manager, name: operator-lifecycle-manager-metrics, uid: 1d42a0b6-dd31-46cb-947c-4809e3fc9a44]" virtual=false 2025-12-08T17:47:23.311686318+00:00 stderr F I1208 17:47:23.311534 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: prometheus-k8s-cluster-autoscaler-operator, uid: 195e6c38-8d7f-4de3-b20e-404ee68478e3]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.311686318+00:00 stderr F I1208 17:47:23.311572 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-control-plane-limited, uid: 80be51ee-dda4-483c-b1c5-948301f3c52e]" virtual=false 2025-12-08T17:47:23.313915978+00:00 stderr F I1208 17:47:23.313839 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-api, name: prometheus-k8s-machine-api-operator, uid: bb572898-23e1-47ec-a10d-a96fe1ecbe9d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.313933588+00:00 stderr F I1208 17:47:23.313909 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-nodes-identity-limited, uid: e866be9e-7de1-4a23-9ef1-15d71a5333a5]" virtual=false 2025-12-08T17:47:23.318157941+00:00 stderr F I1208 17:47:23.318088 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-machine-config-operator, name: prometheus-k8s, uid: 052638af-4f81-4e6a-8c5e-960132e14b35]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.318178222+00:00 stderr F I1208 17:47:23.318145 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ovn-kubernetes, name: prometheus-k8s, uid: 68c8d2f6-d2e7-4ba0-8c6d-3a210481e700]" virtual=false 2025-12-08T17:47:23.321956660+00:00 stderr F I1208 17:47:23.321715 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-marketplace, name: marketplace-operator, uid: 8f0da541-5375-4494-a502-94aa8540020e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.321956660+00:00 stderr F I1208 17:47:23.321765 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-route-controller-manager, name: prometheus-k8s, uid: d3fbd83b-5d77-4307-a4ba-c9c842b63b86]" virtual=false 2025-12-08T17:47:23.324468349+00:00 stderr F I1208 17:47:23.324419 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-marketplace, name: openshift-marketplace-metrics, uid: e0e93b2f-fa87-4069-8467-6366728d0a62]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.324468349+00:00 stderr F I1208 17:47:23.324452 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-service-ca-operator, name: prometheus-k8s, uid: 51617a9b-76c0-4d9f-8068-7c7a521e3991]" virtual=false 2025-12-08T17:47:23.328769845+00:00 stderr F I1208 17:47:23.328712 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-monitoring, name: cluster-monitoring-operator, uid: 7f4699d7-5595-4e90-9a52-ed4769b0c986]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.328788196+00:00 stderr F I1208 17:47:23.328765 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-user-workload-monitoring, name: cluster-monitoring-operator, uid: b9104e38-7421-465a-bf98-b17ca0561bc6]" virtual=false 2025-12-08T17:47:23.331796380+00:00 stderr F I1208 17:47:23.331514 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-monitoring, name: cluster-monitoring-operator-alert-customization, uid: fba13e8c-652c-4d18-9044-6893349b700d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.331796380+00:00 stderr F I1208 17:47:23.331554 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-vsphere-infra, name: host-networking-system-node, uid: 3070c3a2-b2ab-4c6e-9a4b-bdcd112336c5]" virtual=false 2025-12-08T17:47:23.333820544+00:00 stderr F I1208 17:47:23.333773 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-monitoring, name: console-operator, uid: babad3be-1b02-48ca-8bc3-64551155108c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.333820544+00:00 stderr F I1208 17:47:23.333802 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift, name: cluster-samples-operator-openshift-edit, uid: 995e7439-f4f4-4fa5-82e0-6afcc588fd52]" virtual=false 2025-12-08T17:47:23.337786539+00:00 stderr F I1208 17:47:23.337732 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-multus, name: multus-whereabouts, uid: 306d9ccf-da01-46ab-b78e-02458b50939f]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.337809360+00:00 stderr F I1208 17:47:23.337782 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift, name: copied-csv-viewers, uid: 7426cf47-f08c-4e75-99c5-9b5462dc97c2]" virtual=false 2025-12-08T17:47:23.344181040+00:00 stderr F I1208 17:47:23.344121 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-multus, name: prometheus-k8s, uid: 5fec5a95-01ea-4349-8aeb-22c31f6322b6]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.344204691+00:00 stderr F I1208 17:47:23.344181 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-apiserver-operator, name: openshift-apiserver-operator, uid: f8199ef4-1467-44ed-9019-69c1f1737f70]" virtual=false 2025-12-08T17:47:23.353989659+00:00 stderr F I1208 17:47:23.353930 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-diagnostics, name: network-diagnostics, uid: 00a918ea-d345-4c69-8447-3b22d23e9f7b]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.353989659+00:00 stderr F I1208 17:47:23.353974 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-apiserver, name: apiserver, uid: f913dfec-e49a-4051-9533-8f553abc8845]" virtual=false 2025-12-08T17:47:23.362185447+00:00 stderr F I1208 17:47:23.362107 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-diagnostics, name: prometheus-k8s, uid: 6695f592-c1ca-4af1-8eaf-0e0b0d8959fc]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.362185447+00:00 stderr F I1208 17:47:23.362167 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-authentication-operator, name: authentication-operator, uid: 391a5d9a-ccb4-4c96-a945-870a508a19d6]" virtual=false 2025-12-08T17:47:23.419521882+00:00 stderr F I1208 17:47:23.416757 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-operator, name: prometheus-k8s, uid: 62b3747f-27ac-4810-bbdd-3307f382f740]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.419521882+00:00 stderr F I1208 17:47:23.416806 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-authentication, name: oauth-openshift, uid: d3695806-c64f-4466-8682-9f2395f1448f]" virtual=false 2025-12-08T17:47:23.419521882+00:00 stderr F I1208 17:47:23.416964 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-node-identity, name: network-node-identity-leases, uid: 6546e654-a56c-40cd-b5e5-b0af53dc1922]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.419521882+00:00 stderr F I1208 17:47:23.416981 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: 614226dc-6dfc-4b23-a9e9-54341ad46bc9]" virtual=false 2025-12-08T17:47:23.419521882+00:00 stderr F I1208 17:47:23.418152 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-network-node-identity, name: system:openshift:scc:hostnetwork-v2, uid: 7e7e52cd-a43c-47dd-b267-7ac721bc6113]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.419521882+00:00 stderr F I1208 17:47:23.418175 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: 83079835-b3de-4de8-ad7d-f332ab909932]" virtual=false 2025-12-08T17:47:23.420330258+00:00 stderr F I1208 17:47:23.420305 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-apiserver, name: apiserver, uid: f913dfec-e49a-4051-9533-8f553abc8845]" 2025-12-08T17:47:23.420346558+00:00 stderr F I1208 17:47:23.420324 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-cluster-version, name: cluster-version-operator, uid: d5123c8d-63b9-4bc1-a443-acddb48f1d78]" virtual=false 2025-12-08T17:47:23.423706864+00:00 stderr F I1208 17:47:23.423620 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-nutanix-infra, name: host-networking-system-node, uid: 24dfe867-53c3-48b6-bbd7-5225bd207aed]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.423706864+00:00 stderr F I1208 17:47:23.423676 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-config-operator, name: openshift-config-operator, uid: dc451fc9-e781-493f-8e7d-55e9072cc784]" virtual=false 2025-12-08T17:47:23.436955750+00:00 stderr F I1208 17:47:23.435350 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-openstack-infra, name: host-networking-system-node, uid: 46d26db4-f098-4515-a946-0219c7756c23]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.436955750+00:00 stderr F I1208 17:47:23.435412 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-console-operator, name: console-operator, uid: 4982b9f1-eaf4-44fa-a84a-bf9954aedcb1]" virtual=false 2025-12-08T17:47:23.436955750+00:00 stderr F I1208 17:47:23.435588 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-oauth-apiserver, name: prometheus-k8s, uid: 5aa0e1a1-4826-4259-98cd-75c369bad7a7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.436955750+00:00 stderr F I1208 17:47:23.435640 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-console, name: console, uid: 3263a8c6-5259-42d6-a8da-588894b3887d]" virtual=false 2025-12-08T17:47:23.438048225+00:00 stderr F I1208 17:47:23.437950 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-operator-lifecycle-manager, name: collect-profiles, uid: 68a586c5-c3d1-4cfe-9783-ca5e2027b642]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.438071216+00:00 stderr F I1208 17:47:23.438037 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-console, name: downloads, uid: 61eec0c1-c955-4ca2-b98d-b0e62696a08c]" virtual=false 2025-12-08T17:47:23.445517390+00:00 stderr F I1208 17:47:23.443962 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-operator-lifecycle-manager, name: operator-lifecycle-manager-metrics, uid: 1d42a0b6-dd31-46cb-947c-4809e3fc9a44]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.445517390+00:00 stderr F I1208 17:47:23.444021 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-controller-manager-operator, name: openshift-controller-manager-operator, uid: 5effb0d2-94d8-48b7-8c69-e538f7848429]" virtual=false 2025-12-08T17:47:23.445517390+00:00 stderr F I1208 17:47:23.444189 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-control-plane-limited, uid: 80be51ee-dda4-483c-b1c5-948301f3c52e]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.445517390+00:00 stderr F I1208 17:47:23.444210 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-controller-manager, name: controller-manager, uid: 2935ab56-0ed7-4afe-8c71-c57de10607f1]" virtual=false 2025-12-08T17:47:23.447391099+00:00 stderr F I1208 17:47:23.447343 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ovn-kubernetes, name: openshift-ovn-kubernetes-nodes-identity-limited, uid: e866be9e-7de1-4a23-9ef1-15d71a5333a5]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.447409640+00:00 stderr F I1208 17:47:23.447388 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-dns-operator, name: dns-operator, uid: 75c9caa6-d284-4a97-95d2-2a04b51f093f]" virtual=false 2025-12-08T17:47:23.451075315+00:00 stderr F I1208 17:47:23.451001 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-ovn-kubernetes, name: prometheus-k8s, uid: 68c8d2f6-d2e7-4ba0-8c6d-3a210481e700]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.451075315+00:00 stderr F I1208 17:47:23.451035 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-etcd-operator, name: etcd-operator, uid: 7bcc9069-5a71-4f51-8970-90dddeee56b2]" virtual=false 2025-12-08T17:47:23.458974734+00:00 stderr F I1208 17:47:23.458911 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-route-controller-manager, name: prometheus-k8s, uid: d3fbd83b-5d77-4307-a4ba-c9c842b63b86]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.458974734+00:00 stderr F I1208 17:47:23.458938 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: a4c18a44-787c-4851-97ac-f3da87e8d0e3]" virtual=false 2025-12-08T17:47:23.461914656+00:00 stderr F I1208 17:47:23.460735 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-service-ca-operator, name: prometheus-k8s, uid: 51617a9b-76c0-4d9f-8068-7c7a521e3991]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.461914656+00:00 stderr F I1208 17:47:23.460781 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-image-registry, name: image-registry, uid: d3f5db75-c64f-496e-937b-26ce08f3d633]" virtual=false 2025-12-08T17:47:23.465486829+00:00 stderr F I1208 17:47:23.465421 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-user-workload-monitoring, name: cluster-monitoring-operator, uid: b9104e38-7421-465a-bf98-b17ca0561bc6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.465506310+00:00 stderr F I1208 17:47:23.465489 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-ingress-operator, name: ingress-operator, uid: dcd260b6-d741-4056-94e9-f063ec7db58c]" virtual=false 2025-12-08T17:47:23.465532641+00:00 stderr F I1208 17:47:23.465486 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift-vsphere-infra, name: host-networking-system-node, uid: 3070c3a2-b2ab-4c6e-9a4b-bdcd112336c5]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.465560681+00:00 stderr F I1208 17:47:23.465540 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 3a8705c5-b62b-40a4-8e43-30f0569fa490]" virtual=false 2025-12-08T17:47:23.467952267+00:00 stderr F I1208 17:47:23.467914 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift, name: cluster-samples-operator-openshift-edit, uid: 995e7439-f4f4-4fa5-82e0-6afcc588fd52]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.467974438+00:00 stderr F I1208 17:47:23.467960 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 09857aec-2c93-4f0d-9e38-a820bd5b8362]" virtual=false 2025-12-08T17:47:23.470819057+00:00 stderr F I1208 17:47:23.470771 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/RoleBinding, namespace: openshift, name: copied-csv-viewers, uid: 7426cf47-f08c-4e75-99c5-9b5462dc97c2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.470886329+00:00 stderr F I1208 17:47:23.470855 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-scheduler-operator, name: openshift-kube-scheduler-operator, uid: c3ff943a-b570-4a98-8388-1f8a3280a85a]" virtual=false 2025-12-08T17:47:23.473597775+00:00 stderr F I1208 17:47:23.473103 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-authentication, name: oauth-openshift, uid: d3695806-c64f-4466-8682-9f2395f1448f]" 2025-12-08T17:47:23.473597775+00:00 stderr F I1208 17:47:23.473150 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-storage-version-migrator-operator, name: kube-storage-version-migrator-operator, uid: af746821-921a-4842-94da-28c08769612a]" virtual=false 2025-12-08T17:47:23.480290295+00:00 stderr F I1208 17:47:23.480215 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-apiserver-operator, name: openshift-apiserver-operator, uid: f8199ef4-1467-44ed-9019-69c1f1737f70]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.480290295+00:00 stderr F I1208 17:47:23.480258 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-storage-version-migrator, name: migrator, uid: e04da894-1c98-4971-8b8f-a1f4a381dbaf]" virtual=false 2025-12-08T17:47:23.494664377+00:00 stderr F I1208 17:47:23.494581 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-authentication-operator, name: authentication-operator, uid: 391a5d9a-ccb4-4c96-a945-870a508a19d6]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.494664377+00:00 stderr F I1208 17:47:23.494633 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 7b943ba9-3321-444f-9be4-e7b351a28efa]" virtual=false 2025-12-08T17:47:23.511566749+00:00 stderr F I1208 17:47:23.511202 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-controller-manager, name: controller-manager, uid: 2935ab56-0ed7-4afe-8c71-c57de10607f1]" 2025-12-08T17:47:23.511914750+00:00 stderr F I1208 17:47:23.511537 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-api, name: machine-api-operator, uid: 6e3281a2-74ca-4530-b743-ae9a62edcc78]" virtual=false 2025-12-08T17:47:23.525291781+00:00 stderr F I1208 17:47:23.525062 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-image-registry, name: image-registry, uid: d3f5db75-c64f-496e-937b-26ce08f3d633]" 2025-12-08T17:47:23.525345103+00:00 stderr F I1208 17:47:23.525220 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: 12093a0c-63e4-4953-9f6e-fac6da714800]" virtual=false 2025-12-08T17:47:23.547584593+00:00 stderr F I1208 17:47:23.547490 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-cluster-machine-approver, name: machine-approver, uid: 614226dc-6dfc-4b23-a9e9-54341ad46bc9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.547584593+00:00 stderr F I1208 17:47:23.547566 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: 7036b823-caf2-4fe7-9364-95791b080487]" virtual=false 2025-12-08T17:47:23.549702420+00:00 stderr F I1208 17:47:23.549666 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-storage-version-migrator, name: migrator, uid: e04da894-1c98-4971-8b8f-a1f4a381dbaf]" 2025-12-08T17:47:23.549744091+00:00 stderr F I1208 17:47:23.549718 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-marketplace, name: marketplace-operator, uid: d268648d-aa1b-439b-844b-8e7f98ea08a3]" virtual=false 2025-12-08T17:47:23.551555838+00:00 stderr F I1208 17:47:23.551360 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-cluster-samples-operator, name: cluster-samples-operator, uid: 83079835-b3de-4de8-ad7d-f332ab909932]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.551555838+00:00 stderr F I1208 17:47:23.551403 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-multus, name: multus-admission-controller, uid: add425b8-cb71-4a29-b746-fade1ff57eee]" virtual=false 2025-12-08T17:47:23.555102570+00:00 stderr F I1208 17:47:23.555046 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-cluster-version, name: cluster-version-operator, uid: d5123c8d-63b9-4bc1-a443-acddb48f1d78]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.555102570+00:00 stderr F I1208 17:47:23.555092 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-7, uid: 36f6c219-47c9-453e-8adc-81a163318ca3]" virtual=false 2025-12-08T17:47:23.557607169+00:00 stderr F I1208 17:47:23.557557 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-config-operator, name: openshift-config-operator, uid: dc451fc9-e781-493f-8e7d-55e9072cc784]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.557607169+00:00 stderr F I1208 17:47:23.557588 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-operator-lifecycle-manager, name: olm-alert-rules, uid: d90e2096-395c-40fd-9ade-393efa2e6c5b]" virtual=false 2025-12-08T17:47:23.564729823+00:00 stderr F I1208 17:47:23.564678 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-console-operator, name: console-operator, uid: 4982b9f1-eaf4-44fa-a84a-bf9954aedcb1]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.564729823+00:00 stderr F I1208 17:47:23.564716 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-console-operator, name: cluster-monitoring-prometheus-rules, uid: 91547e96-31b3-460d-80d1-83b02bd7d873]" virtual=false 2025-12-08T17:47:23.570268537+00:00 stderr F I1208 17:47:23.570182 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-console, name: downloads, uid: 61eec0c1-c955-4ca2-b98d-b0e62696a08c]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Console","name":"cluster","uid":"72c9b389-7361-48f0-8bf6-56fe26546245","controller":true}] 2025-12-08T17:47:23.570268537+00:00 stderr F I1208 17:47:23.570235 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: ace02251-92c6-4ead-9477-02801ce2fc3d]" virtual=false 2025-12-08T17:47:23.572643182+00:00 stderr F I1208 17:47:23.572590 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-console, name: console, uid: 3263a8c6-5259-42d6-a8da-588894b3887d]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Console","name":"cluster","uid":"72c9b389-7361-48f0-8bf6-56fe26546245","controller":true}] 2025-12-08T17:47:23.572661553+00:00 stderr F I1208 17:47:23.572642 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-ovn-kubernetes, name: networking-rules, uid: 31962b12-2774-4f24-9d5c-f55ad1ee66ac]" virtual=false 2025-12-08T17:47:23.573702125+00:00 stderr F I1208 17:47:23.573669 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-controller-manager-operator, name: openshift-controller-manager-operator, uid: 5effb0d2-94d8-48b7-8c69-e538f7848429]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.573716626+00:00 stderr F I1208 17:47:23.573697 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-image-registry, name: image-registry-operator-alerts, uid: c76d6124-d19c-4231-b946-23f2c04f09c7]" virtual=false 2025-12-08T17:47:23.583278937+00:00 stderr F I1208 17:47:23.583215 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-etcd-operator, name: etcd-operator, uid: 7bcc9069-5a71-4f51-8970-90dddeee56b2]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.583306228+00:00 stderr F I1208 17:47:23.583282 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-multus, name: prometheus-k8s-rules, uid: f66d3c2d-8031-4960-b90a-2518392083d6]" virtual=false 2025-12-08T17:47:23.583555926+00:00 stderr F I1208 17:47:23.583527 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-dns-operator, name: dns-operator, uid: 75c9caa6-d284-4a97-95d2-2a04b51f093f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.583565706+00:00 stderr F I1208 17:47:23.583553 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-cluster-samples-operator, name: samples-operator-alerts, uid: b6a0a11d-7bbc-4d70-aa23-0e557edf8696]" virtual=false 2025-12-08T17:47:23.587327754+00:00 stderr F I1208 17:47:23.587270 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-image-registry, name: cluster-image-registry-operator, uid: a4c18a44-787c-4851-97ac-f3da87e8d0e3]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.587327754+00:00 stderr F I1208 17:47:23.587305 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-dns-operator, name: dns, uid: eb61aaa9-4e25-4e91-a620-88091d39e58f]" virtual=false 2025-12-08T17:47:23.590317658+00:00 stderr F I1208 17:47:23.590281 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-config-operator, name: machine-config-controller, uid: 12093a0c-63e4-4953-9f6e-fac6da714800]" 2025-12-08T17:47:23.590377940+00:00 stderr F I1208 17:47:23.590331 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-network-console, name: networking-console-plugin, uid: e8047e30-a40e-4ced-ae42-eea4288c975a]" virtual=false 2025-12-08T17:47:23.593236310+00:00 stderr F I1208 17:47:23.593187 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-ingress-operator, name: ingress-operator, uid: dcd260b6-d741-4056-94e9-f063ec7db58c]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.593236310+00:00 stderr F I1208 17:47:23.593220 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-network-diagnostics, name: network-check-source, uid: e3b48335-28bd-49bf-9cf0-82069658b68a]" virtual=false 2025-12-08T17:47:23.597044611+00:00 stderr F I1208 17:47:23.597005 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-apiserver-operator, name: kube-apiserver-operator, uid: 3a8705c5-b62b-40a4-8e43-30f0569fa490]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.597044611+00:00 stderr F I1208 17:47:23.597035 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-network-operator, name: network-operator, uid: 2c897060-d3cf-4d7f-8d38-ef464b7a697a]" virtual=false 2025-12-08T17:47:23.600437747+00:00 stderr F I1208 17:47:23.600378 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-controller-manager-operator, name: kube-controller-manager-operator, uid: 09857aec-2c93-4f0d-9e38-a820bd5b8362]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.600456268+00:00 stderr F I1208 17:47:23.600431 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-operator-lifecycle-manager, name: catalog-operator, uid: bc11b984-7cfa-489a-9f9a-5f2c0648078f]" virtual=false 2025-12-08T17:47:23.603822143+00:00 stderr F I1208 17:47:23.603783 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-scheduler-operator, name: openshift-kube-scheduler-operator, uid: c3ff943a-b570-4a98-8388-1f8a3280a85a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.603822143+00:00 stderr F I1208 17:47:23.603809 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-operator-lifecycle-manager, name: olm-operator, uid: e6e8c1a2-3934-417b-9f46-0df6a0dbf8d9]" virtual=false 2025-12-08T17:47:23.608187991+00:00 stderr F I1208 17:47:23.608119 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-kube-storage-version-migrator-operator, name: kube-storage-version-migrator-operator, uid: af746821-921a-4842-94da-28c08769612a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.608218622+00:00 stderr F I1208 17:47:23.608192 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-operator-lifecycle-manager, name: package-server-manager, uid: 8043f85f-0f9a-4179-b841-9d68d3642aae]" virtual=false 2025-12-08T17:47:23.620590621+00:00 stderr F I1208 17:47:23.620545 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-kube-controller-manager, name: revision-status-7, uid: 36f6c219-47c9-453e-8adc-81a163318ca3]" 2025-12-08T17:47:23.620626222+00:00 stderr F I1208 17:47:23.620585 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-ovn-kubernetes, name: ovnkube-control-plane, uid: 8bfd4bef-4292-4ca1-b90f-38cca09fb8f8]" virtual=false 2025-12-08T17:47:23.627749077+00:00 stderr F I1208 17:47:23.627703 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-api, name: control-plane-machine-set-operator, uid: 7b943ba9-3321-444f-9be4-e7b351a28efa]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.627779208+00:00 stderr F I1208 17:47:23.627750 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-service-ca-operator, name: service-ca-operator, uid: 1703c560-9cd5-4273-a6b7-22510bce9318]" virtual=false 2025-12-08T17:47:23.643987727+00:00 stderr F I1208 17:47:23.643895 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-api, name: machine-api-operator, uid: 6e3281a2-74ca-4530-b743-ae9a62edcc78]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.643987727+00:00 stderr F I1208 17:47:23.643944 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Secret, namespace: openshift-operator-lifecycle-manager, name: pprof-cert, uid: 78c31177-72ae-4588-82df-59ba321a257b]" virtual=false 2025-12-08T17:47:23.682515921+00:00 stderr F I1208 17:47:23.682407 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-machine-config-operator, name: machine-config-operator, uid: 7036b823-caf2-4fe7-9364-95791b080487]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.682559072+00:00 stderr F I1208 17:47:23.682487 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-marketplace, name: marketplace-alert-rules, uid: dc8ef252-29f9-421b-9166-5d8a6fb2cb84]" virtual=false 2025-12-08T17:47:23.684335108+00:00 stderr F I1208 17:47:23.684058 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-marketplace, name: marketplace-operator, uid: d268648d-aa1b-439b-844b-8e7f98ea08a3]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.684335108+00:00 stderr F I1208 17:47:23.684129 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-machine-api, name: machine-api-operator-prometheus-rules, uid: ea38d425-d751-4336-a88d-4f52a8920d6e]" virtual=false 2025-12-08T17:47:23.685268817+00:00 stderr F I1208 17:47:23.685199 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-multus, name: multus-admission-controller, uid: add425b8-cb71-4a29-b746-fade1ff57eee]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.685285638+00:00 stderr F I1208 17:47:23.685263 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-cluster-machine-approver, name: machineapprover-rules, uid: 03750884-15c0-4b90-b038-07d324e83865]" virtual=false 2025-12-08T17:47:23.690920105+00:00 stderr F I1208 17:47:23.690849 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-operator-lifecycle-manager, name: olm-alert-rules, uid: d90e2096-395c-40fd-9ade-393efa2e6c5b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.690951096+00:00 stderr F I1208 17:47:23.690918 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-ingress-operator, name: ingress-operator, uid: 25ced67d-4442-487b-9828-7b52d14815c0]" virtual=false 2025-12-08T17:47:23.697481751+00:00 stderr F I1208 17:47:23.697411 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-console-operator, name: cluster-monitoring-prometheus-rules, uid: 91547e96-31b3-460d-80d1-83b02bd7d873]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.697500472+00:00 stderr F I1208 17:47:23.697469 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-ovn-kubernetes, name: master-rules, uid: a8f235d8-6055-4051-84b4-f75387ba159c]" virtual=false 2025-12-08T17:47:23.700217057+00:00 stderr F I1208 17:47:23.700167 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-machine-config-operator, name: machine-config-daemon, uid: ace02251-92c6-4ead-9477-02801ce2fc3d]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.700242238+00:00 stderr F I1208 17:47:23.700220 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-apiserver, name: revision-status-1, uid: 98275840-6123-4655-aacc-f5208af82455]" virtual=false 2025-12-08T17:47:23.703525192+00:00 stderr F I1208 17:47:23.703478 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-ovn-kubernetes, name: networking-rules, uid: 31962b12-2774-4f24-9d5c-f55ad1ee66ac]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.703525192+00:00 stderr F I1208 17:47:23.703507 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-oauth-apiserver, name: revision-status-1, uid: 444fbf90-75af-4a73-9695-57f0d8acfffe]" virtual=false 2025-12-08T17:47:23.707454946+00:00 stderr F I1208 17:47:23.707415 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-image-registry, name: image-registry-operator-alerts, uid: c76d6124-d19c-4231-b946-23f2c04f09c7]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.713472725+00:00 stderr F I1208 17:47:23.713422 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-multus, name: prometheus-k8s-rules, uid: f66d3c2d-8031-4960-b90a-2518392083d6]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.716803540+00:00 stderr F I1208 17:47:23.716766 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-cluster-samples-operator, name: samples-operator-alerts, uid: b6a0a11d-7bbc-4d70-aa23-0e557edf8696]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.720151916+00:00 stderr F I1208 17:47:23.720110 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-dns-operator, name: dns, uid: eb61aaa9-4e25-4e91-a620-88091d39e58f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.723714988+00:00 stderr F I1208 17:47:23.723657 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-network-console, name: networking-console-plugin, uid: e8047e30-a40e-4ced-ae42-eea4288c975a]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.726614809+00:00 stderr F I1208 17:47:23.726571 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-network-diagnostics, name: network-check-source, uid: e3b48335-28bd-49bf-9cf0-82069658b68a]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.729973055+00:00 stderr F I1208 17:47:23.729933 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-network-operator, name: network-operator, uid: 2c897060-d3cf-4d7f-8d38-ef464b7a697a]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.733121013+00:00 stderr F I1208 17:47:23.733061 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-operator-lifecycle-manager, name: olm-operator, uid: e6e8c1a2-3934-417b-9f46-0df6a0dbf8d9]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.740725953+00:00 stderr F I1208 17:47:23.740436 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-operator-lifecycle-manager, name: catalog-operator, uid: bc11b984-7cfa-489a-9f9a-5f2c0648078f]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.747723273+00:00 stderr F I1208 17:47:23.746655 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-operator-lifecycle-manager, name: package-server-manager, uid: 8043f85f-0f9a-4179-b841-9d68d3642aae]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.760991411+00:00 stderr F I1208 17:47:23.758649 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-ovn-kubernetes, name: ovnkube-control-plane, uid: 8bfd4bef-4292-4ca1-b90f-38cca09fb8f8]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:47:23.761978002+00:00 stderr F I1208 17:47:23.761457 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[apps/v1/Deployment, namespace: openshift-service-ca-operator, name: service-ca-operator, uid: 1703c560-9cd5-4273-a6b7-22510bce9318]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.766519045+00:00 stderr F I1208 17:47:23.766476 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-apiserver, name: revision-status-1, uid: 98275840-6123-4655-aacc-f5208af82455]" 2025-12-08T17:47:23.773454854+00:00 stderr F I1208 17:47:23.773391 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ConfigMap, namespace: openshift-oauth-apiserver, name: revision-status-1, uid: 444fbf90-75af-4a73-9695-57f0d8acfffe]" 2025-12-08T17:47:23.776674235+00:00 stderr F I1208 17:47:23.776603 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[v1/Secret, namespace: openshift-operator-lifecycle-manager, name: pprof-cert, uid: 78c31177-72ae-4588-82df-59ba321a257b]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503"}] 2025-12-08T17:47:23.777528382+00:00 stderr F I1208 17:47:23.777478 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-marketplace, name: marketplace-alert-rules, uid: dc8ef252-29f9-421b-9166-5d8a6fb2cb84]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.780786694+00:00 stderr F I1208 17:47:23.780702 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-machine-api, name: machine-api-operator-prometheus-rules, uid: ea38d425-d751-4336-a88d-4f52a8920d6e]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.784016136+00:00 stderr F I1208 17:47:23.783957 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-cluster-machine-approver, name: machineapprover-rules, uid: 03750884-15c0-4b90-b038-07d324e83865]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.787716353+00:00 stderr F I1208 17:47:23.787653 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-ingress-operator, name: ingress-operator, uid: 25ced67d-4442-487b-9828-7b52d14815c0]" owner=[{"apiVersion":"config.openshift.io/v1","kind":"ClusterVersion","name":"version","uid":"81a42ceb-dbf9-4bf3-9fcd-7b6d697f2503","controller":true}] 2025-12-08T17:47:23.789658333+00:00 stderr F I1208 17:47:23.789617 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[monitoring.coreos.com/v1/PrometheusRule, namespace: openshift-ovn-kubernetes, name: master-rules, uid: a8f235d8-6055-4051-84b4-f75387ba159c]" owner=[{"apiVersion":"operator.openshift.io/v1","kind":"Network","name":"cluster","uid":"d56acc66-d25c-4e5c-aa52-5418dd270c94","controller":true,"blockOwnerDeletion":true}] 2025-12-08T17:48:03.250026010+00:00 stderr F I1208 17:48:03.249482 1 replica_set.go:626] "Too many replicas" logger="replicaset-controller" replicaSet="openshift-route-controller-manager/route-controller-manager-6975b9f87f" need=0 deleting=1 2025-12-08T17:48:03.250026010+00:00 stderr F I1208 17:48:03.250002 1 replica_set.go:253] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="openshift-route-controller-manager/route-controller-manager-6975b9f87f" relatedReplicaSets=["openshift-route-controller-manager/route-controller-manager-66bd94d94f","openshift-route-controller-manager/route-controller-manager-76558c69dc","openshift-route-controller-manager/route-controller-manager-776cdc94d6","openshift-route-controller-manager/route-controller-manager-7d86df95df","openshift-route-controller-manager/route-controller-manager-c47fcf799","openshift-route-controller-manager/route-controller-manager-6975b9f87f","openshift-route-controller-manager/route-controller-manager-58f5cf7b86","openshift-route-controller-manager/route-controller-manager-5fccdd79b9","openshift-route-controller-manager/route-controller-manager-6bc8749ddd","openshift-route-controller-manager/route-controller-manager-6d7f4ff85d","openshift-route-controller-manager/route-controller-manager-7cc45857b6","openshift-route-controller-manager/route-controller-manager-7dd6d6d8c8"] 2025-12-08T17:48:03.250421502+00:00 stderr F I1208 17:48:03.250142 1 controller_utils.go:618] "Deleting pod" logger="replicaset-controller" controller="route-controller-manager-6975b9f87f" pod="openshift-route-controller-manager/route-controller-manager-6975b9f87f-8vkdj" 2025-12-08T17:48:03.262392645+00:00 stderr F I1208 17:48:03.259013 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-route-controller-manager/route-controller-manager" err="Operation cannot be fulfilled on deployments.apps \"route-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:48:03.274049697+00:00 stderr F I1208 17:48:03.270298 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-route-controller-manager/route-controller-manager-7dd6d6d8c8" need=1 creating=1 2025-12-08T17:48:03.285500914+00:00 stderr F I1208 17:48:03.285442 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-route-controller-manager/route-controller-manager" err="Operation cannot be fulfilled on deployments.apps \"route-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:48:09.356139994+00:00 stderr F I1208 17:48:09.355280 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-marketplace/marketplace-operator-547dbd544d" need=1 creating=1 2025-12-08T17:53:19.381783415+00:00 stderr F I1208 17:53:19.381176 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openstack-operators, name: default, uid: 71d07df7-3880-40a3-98fb-954e06d37bb4]" virtual=true 2025-12-08T17:53:19.381783415+00:00 stderr F I1208 17:53:19.381534 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openstack-operators, name: builder, uid: 55d60624-7b38-4749-899b-25dd637945ba]" virtual=false 2025-12-08T17:53:19.400091772+00:00 stderr F I1208 17:53:19.399975 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openstack-operators, name: builder, uid: 55d60624-7b38-4749-899b-25dd637945ba]" 2025-12-08T17:53:19.400467462+00:00 stderr F I1208 17:53:19.400402 1 garbagecollector.go:548] "item doesn't have an owner, continue on next item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openstack-operators, name: default, uid: 71d07df7-3880-40a3-98fb-954e06d37bb4]" 2025-12-08T17:53:41.124213749+00:00 stderr F I1208 17:53:41.124038 1 replica_set.go:626] "Too many replicas" logger="replicaset-controller" replicaSet="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988" need=0 deleting=1 2025-12-08T17:53:41.124213749+00:00 stderr F I1208 17:53:41.124143 1 replica_set.go:253] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988" relatedReplicaSets=["openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48","openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988"] 2025-12-08T17:53:41.124286581+00:00 stderr F I1208 17:53:41.124235 1 controller_utils.go:618] "Deleting pod" logger="replicaset-controller" controller="ovnkube-control-plane-57b78d8988" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-x68jp" 2025-12-08T17:53:41.138691472+00:00 stderr F I1208 17:53:41.138576 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-ovn-kubernetes/ovnkube-control-plane" err="Operation cannot be fulfilled on deployments.apps \"ovnkube-control-plane\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:53:41.171214607+00:00 stderr F I1208 17:53:41.171074 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48" need=1 creating=1 2025-12-08T17:53:41.330822266+00:00 stderr F I1208 17:53:41.330670 1 controller_utils.go:618] "Deleting pod" logger="daemonset-controller" controller="ovnkube-node" pod="openshift-ovn-kubernetes/ovnkube-node-wr4x4" 2025-12-08T17:54:50.014221069+00:00 stderr F I1208 17:54:50.014108 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-marketplace, name: redhat-marketplace, uid: 298d54b6-a7d4-42db-8228-ccf2494c46fb]" virtual=false 2025-12-08T17:54:50.014221069+00:00 stderr F I1208 17:54:50.014141 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-marketplace, name: redhat-marketplace, uid: 4349139a-211f-4b82-a867-d0f135c54aa4]" virtual=false 2025-12-08T17:54:50.014303261+00:00 stderr F I1208 17:54:50.014228 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: redhat-marketplace-grpc-server, uid: d8e2a367-fe9f-43f2-8b9f-a3e878703116]" virtual=false 2025-12-08T17:54:50.014303261+00:00 stderr F I1208 17:54:50.014262 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: redhat-marketplace-unpack-bundles, uid: 1bce222b-2fd6-49f3-b48e-0645dc5ba923]" virtual=false 2025-12-08T17:54:50.014427325+00:00 stderr F I1208 17:54:50.014070 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Pod, namespace: openshift-marketplace, name: redhat-marketplace-xp5vr, uid: c9416e49-5134-45de-9eeb-a15be7fdbf63]" virtual=false 2025-12-08T17:54:50.026015937+00:00 stderr F I1208 17:54:50.025918 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[v1/ServiceAccount, namespace: openshift-marketplace, name: redhat-marketplace, uid: 4349139a-211f-4b82-a867-d0f135c54aa4]" propagationPolicy="Background" 2025-12-08T17:54:50.026155911+00:00 stderr F I1208 17:54:50.026117 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[v1/Service, namespace: openshift-marketplace, name: redhat-marketplace, uid: 298d54b6-a7d4-42db-8228-ccf2494c46fb]" propagationPolicy="Background" 2025-12-08T17:54:50.026322285+00:00 stderr F I1208 17:54:50.026291 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: redhat-marketplace-grpc-server, uid: d8e2a367-fe9f-43f2-8b9f-a3e878703116]" propagationPolicy="Background" 2025-12-08T17:54:50.026586752+00:00 stderr F I1208 17:54:50.026563 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[networking.k8s.io/v1/NetworkPolicy, namespace: openshift-marketplace, name: redhat-marketplace-unpack-bundles, uid: 1bce222b-2fd6-49f3-b48e-0645dc5ba923]" propagationPolicy="Background" 2025-12-08T17:54:50.028198515+00:00 stderr F I1208 17:54:50.028162 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[v1/Pod, namespace: openshift-marketplace, name: redhat-marketplace-xp5vr, uid: c9416e49-5134-45de-9eeb-a15be7fdbf63]" propagationPolicy="Background" 2025-12-08T17:54:50.039746177+00:00 stderr F I1208 17:54:50.037140 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[discovery.k8s.io/v1/EndpointSlice, namespace: openshift-marketplace, name: redhat-marketplace-d7cgr, uid: 0e4e2da6-858b-498e-943c-9aabf723c124]" virtual=false 2025-12-08T17:54:50.039746177+00:00 stderr F I1208 17:54:50.037175 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[v1/Secret, namespace: openshift-marketplace, name: redhat-marketplace-dockercfg-gg4w7, uid: b6c8f41a-3f43-4cfb-853f-5068aaaac8e3]" virtual=false 2025-12-08T17:54:50.041320268+00:00 stderr F I1208 17:54:50.041219 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[v1/Secret, namespace: openshift-marketplace, name: redhat-marketplace-dockercfg-gg4w7, uid: b6c8f41a-3f43-4cfb-853f-5068aaaac8e3]" propagationPolicy="Background" 2025-12-08T17:54:50.041626167+00:00 stderr F I1208 17:54:50.041593 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[discovery.k8s.io/v1/EndpointSlice, namespace: openshift-marketplace, name: redhat-marketplace-d7cgr, uid: 0e4e2da6-858b-498e-943c-9aabf723c124]" propagationPolicy="Background" 2025-12-08T17:54:51.164924356+00:00 stderr F I1208 17:54:51.164353 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-image-registry/image-registry-5d9d95bf5b" need=1 creating=1 2025-12-08T17:54:51.185684845+00:00 stderr F I1208 17:54:51.185600 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-image-registry/image-registry" err="Operation cannot be fulfilled on deployments.apps \"image-registry\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:54:53.178092063+00:00 stderr F E1208 17:54:53.178013 1 publisher.go:146] "Unhandled Error" err="syncing \"cert-manager-operator\" failed: Operation cannot be fulfilled on configmaps \"openshift-service-ca.crt\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:54:56.111593717+00:00 stderr F I1208 17:54:56.111502 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="0s" 2025-12-08T17:54:56.111658899+00:00 stderr F I1208 17:54:56.111626 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m59.999996649s" 2025-12-08T17:54:56.131064021+00:00 stderr F I1208 17:54:56.130984 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:54:56.137393061+00:00 stderr F I1208 17:54:56.137293 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:54:56.137393061+00:00 stderr F I1208 17:54:56.137345 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:54:56.147163284+00:00 stderr F I1208 17:54:56.147124 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:54:57.131488653+00:00 stderr F I1208 17:54:57.131378 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m58.86863404s" 2025-12-08T17:54:57.560078897+00:00 stderr F I1208 17:54:57.559708 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:54:58.061434139+00:00 stderr F I1208 17:54:58.060662 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:54:58.909464391+00:00 stderr F I1208 17:54:58.560157 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m57.43985575s" 2025-12-08T17:54:59.330101641+00:00 stderr F I1208 17:54:59.329712 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:00.334023417+00:00 stderr F I1208 17:55:00.330961 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m55.669052656s" 2025-12-08T17:55:00.349847923+00:00 stderr F I1208 17:55:00.349669 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:01.350024099+00:00 stderr F I1208 17:55:01.349937 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m54.650079634s" 2025-12-08T17:55:02.368827097+00:00 stderr F I1208 17:55:02.368737 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:03.369006983+00:00 stderr F I1208 17:55:03.367385 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:03.369857395+00:00 stderr F I1208 17:55:03.369764 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m52.630245967s" 2025-12-08T17:55:04.112032388+00:00 stderr F I1208 17:55:04.111967 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="0s" 2025-12-08T17:55:04.112147681+00:00 stderr F I1208 17:55:04.112106 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" nextSyncIn="9m59.99999519s" 2025-12-08T17:55:04.129019295+00:00 stderr F I1208 17:55:04.128111 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:04.133445255+00:00 stderr F I1208 17:55:04.133337 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:04.135421277+00:00 stderr F I1208 17:55:04.135358 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:04.147598226+00:00 stderr F I1208 17:55:04.147520 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:04.185924646+00:00 stderr F I1208 17:55:04.185184 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:04.511604832+00:00 stderr F I1208 17:55:04.511523 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="0s" 2025-12-08T17:55:04.511651873+00:00 stderr F I1208 17:55:04.511633 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" nextSyncIn="9m59.9999964s" 2025-12-08T17:55:04.520001378+00:00 stderr F I1208 17:55:04.519927 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:04.529732969+00:00 stderr F I1208 17:55:04.529034 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:04.529732969+00:00 stderr F I1208 17:55:04.529249 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:04.542662667+00:00 stderr F I1208 17:55:04.542516 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:04.660387135+00:00 stderr F I1208 17:55:04.660276 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:04.680011683+00:00 stderr F I1208 17:55:04.679946 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:04.972025892+00:00 stderr F I1208 17:55:04.971933 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:05.128812641+00:00 stderr F I1208 17:55:05.128726 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" nextSyncIn="9m58.871293072s" 2025-12-08T17:55:05.187987433+00:00 stderr F I1208 17:55:05.187910 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:05.385899950+00:00 stderr F I1208 17:55:05.385748 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:05.408007164+00:00 stderr F I1208 17:55:05.407926 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:05.521306183+00:00 stderr F I1208 17:55:05.521173 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" nextSyncIn="9m58.478844741s" 2025-12-08T17:55:05.682624055+00:00 stderr F I1208 17:55:05.681070 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" nextSyncIn="9m50.318948768s" 2025-12-08T17:55:05.690727322+00:00 stderr F I1208 17:55:05.690538 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:05.709901398+00:00 stderr F I1208 17:55:05.709785 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5" delay="1s" 2025-12-08T17:55:06.387019251+00:00 stderr F I1208 17:55:06.386067 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" nextSyncIn="9m57.613955545s" 2025-12-08T17:55:06.405408696+00:00 stderr F I1208 17:55:06.405354 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:06.420673636+00:00 stderr F I1208 17:55:06.420596 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:07.406978169+00:00 stderr F I1208 17:55:07.406852 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" nextSyncIn="9m56.593171595s" 2025-12-08T17:55:07.421624013+00:00 stderr F I1208 17:55:07.421506 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" nextSyncIn="9m56.578522931s" 2025-12-08T17:55:07.431714235+00:00 stderr F I1208 17:55:07.431612 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:08.433958966+00:00 stderr F I1208 17:55:08.431959 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" nextSyncIn="9m55.568056228s" 2025-12-08T17:55:08.759939169+00:00 stderr F I1208 17:55:08.757436 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:09.757938395+00:00 stderr F I1208 17:55:09.757817 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" nextSyncIn="9m54.242200049s" 2025-12-08T17:55:09.774402148+00:00 stderr F I1208 17:55:09.774324 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:09.790074960+00:00 stderr F I1208 17:55:09.789900 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8" delay="1s" 2025-12-08T17:55:12.473250637+00:00 stderr F I1208 17:55:12.471449 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:13.334577765+00:00 stderr F I1208 17:55:13.334476 1 replica_set.go:626] "Too many replicas" logger="replicaset-controller" replicaSet="openshift-image-registry/image-registry-66587d64c8" need=0 deleting=1 2025-12-08T17:55:13.334577765+00:00 stderr F I1208 17:55:13.334546 1 replica_set.go:253] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="openshift-image-registry/image-registry-66587d64c8" relatedReplicaSets=["openshift-image-registry/image-registry-66587d64c8","openshift-image-registry/image-registry-7994588b6","openshift-image-registry/image-registry-79b89679dd","openshift-image-registry/image-registry-868c685c8f","openshift-image-registry/image-registry-5d9d95bf5b"] 2025-12-08T17:55:13.334626726+00:00 stderr F I1208 17:55:13.334612 1 controller_utils.go:618] "Deleting pod" logger="replicaset-controller" controller="image-registry-66587d64c8" pod="openshift-image-registry/image-registry-66587d64c8-s6hn4" 2025-12-08T17:55:13.408965597+00:00 stderr F I1208 17:55:13.408272 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-image-registry/image-registry" err="Operation cannot be fulfilled on deployments.apps \"image-registry\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:13.475020194+00:00 stderr F I1208 17:55:13.472254 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" nextSyncIn="9m50.527757731s" 2025-12-08T17:55:13.491586300+00:00 stderr F I1208 17:55:13.490834 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:14.494139860+00:00 stderr F I1208 17:55:14.494063 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" nextSyncIn="9m49.505948873s" 2025-12-08T17:55:14.780539877+00:00 stderr F I1208 17:55:14.780463 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:15.587298507+00:00 stderr F I1208 17:55:15.587187 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-operators/obo-prometheus-operator-86648f486b" need=1 creating=1 2025-12-08T17:55:15.599122865+00:00 stderr F I1208 17:55:15.599040 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/obo-prometheus-operator" err="Operation cannot be fulfilled on deployments.apps \"obo-prometheus-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.610007898+00:00 stderr F I1208 17:55:15.607939 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/obo-prometheus-operator" err="Operation cannot be fulfilled on deployments.apps \"obo-prometheus-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.713684379+00:00 stderr F I1208 17:55:15.713135 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4" need=2 creating=2 2025-12-08T17:55:15.724401587+00:00 stderr F I1208 17:55:15.724350 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/obo-prometheus-operator-admission-webhook" err="Operation cannot be fulfilled on deployments.apps \"obo-prometheus-operator-admission-webhook\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.736206225+00:00 stderr F I1208 17:55:15.736148 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/obo-prometheus-operator-admission-webhook" err="Operation cannot be fulfilled on deployments.apps \"obo-prometheus-operator-admission-webhook\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.766963302+00:00 stderr F I1208 17:55:15.766774 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/obo-prometheus-operator-admission-webhook" err="Operation cannot be fulfilled on deployments.apps \"obo-prometheus-operator-admission-webhook\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.783981900+00:00 stderr F I1208 17:55:15.781388 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" nextSyncIn="9m48.21862446s" 2025-12-08T17:55:15.801463011+00:00 stderr F I1208 17:55:15.801391 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:15.813048343+00:00 stderr F I1208 17:55:15.812979 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3" delay="1s" 2025-12-08T17:55:15.910048103+00:00 stderr F I1208 17:55:15.909169 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-operators/observability-operator-78c97476f4" need=1 creating=1 2025-12-08T17:55:15.921983345+00:00 stderr F I1208 17:55:15.921403 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/observability-operator" err="Operation cannot be fulfilled on deployments.apps \"observability-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:15.995812971+00:00 stderr F I1208 17:55:15.995717 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/observability-operator" err="Operation cannot be fulfilled on deployments.apps \"observability-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:16.158320725+00:00 stderr F I1208 17:55:16.158251 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="openshift-operators/perses-operator-68bdb49cbf" need=1 creating=1 2025-12-08T17:55:16.186718139+00:00 stderr F I1208 17:55:16.186648 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="openshift-operators/perses-operator" err="Operation cannot be fulfilled on deployments.apps \"perses-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:16.748925739+00:00 stderr F I1208 17:55:16.745415 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/elastic-operator-c9c86658" need=1 creating=1 2025-12-08T17:55:16.763662635+00:00 stderr F I1208 17:55:16.763593 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/elastic-operator" err="Operation cannot be fulfilled on deployments.apps \"elastic-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:17.277040420+00:00 stderr F I1208 17:55:17.276767 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/elastic-operator" err="Operation cannot be fulfilled on deployments.apps \"elastic-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:18.033651162+00:00 stderr F I1208 17:55:18.031939 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/elastic-operator" err="Operation cannot be fulfilled on deployments.apps \"elastic-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:18.055470289+00:00 stderr F I1208 17:55:18.051069 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/elastic-operator" err="Operation cannot be fulfilled on deployments.apps \"elastic-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:22.211805061+00:00 stderr F I1208 17:55:22.211718 1 resource_quota_controller.go:476] "syncing resource quota controller with updated resources from discovery" logger="resourcequota-controller" diff="added: [agent.k8s.elastic.co/v1alpha1, Resource=agents apm.k8s.elastic.co/v1, Resource=apmservers autoscaling.k8s.elastic.co/v1alpha1, Resource=elasticsearchautoscalers beat.k8s.elastic.co/v1beta1, Resource=beats elasticsearch.k8s.elastic.co/v1, Resource=elasticsearches enterprisesearch.k8s.elastic.co/v1, Resource=enterprisesearches kibana.k8s.elastic.co/v1, Resource=kibanas logstash.k8s.elastic.co/v1alpha1, Resource=logstashes maps.k8s.elastic.co/v1alpha1, Resource=elasticmapsservers monitoring.rhobs/v1, Resource=alertmanagers monitoring.rhobs/v1, Resource=podmonitors monitoring.rhobs/v1, Resource=probes monitoring.rhobs/v1, Resource=prometheuses monitoring.rhobs/v1, Resource=prometheusrules monitoring.rhobs/v1, Resource=servicemonitors monitoring.rhobs/v1, Resource=thanosrulers monitoring.rhobs/v1alpha1, Resource=alertmanagerconfigs monitoring.rhobs/v1alpha1, Resource=monitoringstacks monitoring.rhobs/v1alpha1, Resource=prometheusagents monitoring.rhobs/v1alpha1, Resource=scrapeconfigs monitoring.rhobs/v1alpha1, Resource=thanosqueriers observability.openshift.io/v1alpha1, Resource=observabilityinstallers perses.dev/v1alpha1, Resource=perses perses.dev/v1alpha1, Resource=persesdashboards perses.dev/v1alpha1, Resource=persesdatasources stackconfigpolicy.k8s.elastic.co/v1alpha1, Resource=stackconfigpolicies], removed: []" 2025-12-08T17:55:22.211871663+00:00 stderr F I1208 17:55:22.211846 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="persesdatasources.perses.dev" 2025-12-08T17:55:22.214699739+00:00 stderr F I1208 17:55:22.214655 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="probes.monitoring.rhobs" 2025-12-08T17:55:22.214725149+00:00 stderr F I1208 17:55:22.214715 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="alertmanagers.monitoring.rhobs" 2025-12-08T17:55:22.214775191+00:00 stderr F I1208 17:55:22.214750 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="prometheusrules.monitoring.rhobs" 2025-12-08T17:55:22.214787611+00:00 stderr F I1208 17:55:22.214779 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="alertmanagerconfigs.monitoring.rhobs" 2025-12-08T17:55:22.214831192+00:00 stderr F I1208 17:55:22.214808 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="thanosrulers.monitoring.rhobs" 2025-12-08T17:55:22.214859663+00:00 stderr F I1208 17:55:22.214838 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="observabilityinstallers.observability.openshift.io" 2025-12-08T17:55:22.214868963+00:00 stderr F I1208 17:55:22.214860 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="kibanas.kibana.k8s.elastic.co" 2025-12-08T17:55:22.214913264+00:00 stderr F I1208 17:55:22.214892 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="elasticmapsservers.maps.k8s.elastic.co" 2025-12-08T17:55:22.214935635+00:00 stderr F I1208 17:55:22.214917 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="prometheuses.monitoring.rhobs" 2025-12-08T17:55:22.214944925+00:00 stderr F I1208 17:55:22.214934 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="thanosqueriers.monitoring.rhobs" 2025-12-08T17:55:22.214984256+00:00 stderr F I1208 17:55:22.214962 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="beats.beat.k8s.elastic.co" 2025-12-08T17:55:22.214994197+00:00 stderr F I1208 17:55:22.214983 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="elasticsearches.elasticsearch.k8s.elastic.co" 2025-12-08T17:55:22.215020137+00:00 stderr F I1208 17:55:22.215000 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="elasticsearchautoscalers.autoscaling.k8s.elastic.co" 2025-12-08T17:55:22.215062008+00:00 stderr F I1208 17:55:22.215041 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="logstashes.logstash.k8s.elastic.co" 2025-12-08T17:55:22.215074819+00:00 stderr F I1208 17:55:22.215067 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="apmservers.apm.k8s.elastic.co" 2025-12-08T17:55:22.215105700+00:00 stderr F I1208 17:55:22.215085 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="prometheusagents.monitoring.rhobs" 2025-12-08T17:55:22.215115800+00:00 stderr F I1208 17:55:22.215104 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="monitoringstacks.monitoring.rhobs" 2025-12-08T17:55:22.215143211+00:00 stderr F I1208 17:55:22.215121 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="scrapeconfigs.monitoring.rhobs" 2025-12-08T17:55:22.215158531+00:00 stderr F I1208 17:55:22.215147 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="persesdashboards.perses.dev" 2025-12-08T17:55:22.215197392+00:00 stderr F I1208 17:55:22.215172 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="servicemonitors.monitoring.rhobs" 2025-12-08T17:55:22.215197392+00:00 stderr F I1208 17:55:22.215193 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="perses.perses.dev" 2025-12-08T17:55:22.215232143+00:00 stderr F I1208 17:55:22.215208 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="stackconfigpolicies.stackconfigpolicy.k8s.elastic.co" 2025-12-08T17:55:22.215259334+00:00 stderr F I1208 17:55:22.215238 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="agents.agent.k8s.elastic.co" 2025-12-08T17:55:22.215269344+00:00 stderr F I1208 17:55:22.215260 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="enterprisesearches.enterprisesearch.k8s.elastic.co" 2025-12-08T17:55:22.215300835+00:00 stderr F I1208 17:55:22.215277 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podmonitors.monitoring.rhobs" 2025-12-08T17:55:22.215787018+00:00 stderr F I1208 17:55:22.215758 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:55:22.216990601+00:00 stderr F I1208 17:55:22.216953 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.217011201+00:00 stderr F I1208 17:55:22.216987 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.217174526+00:00 stderr F I1208 17:55:22.217149 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.218110330+00:00 stderr F I1208 17:55:22.218085 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.220696140+00:00 stderr F I1208 17:55:22.220643 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.220831694+00:00 stderr F I1208 17:55:22.220807 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221049009+00:00 stderr F I1208 17:55:22.221016 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221194743+00:00 stderr F I1208 17:55:22.221164 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221323267+00:00 stderr F I1208 17:55:22.221300 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221376078+00:00 stderr F I1208 17:55:22.221347 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221499771+00:00 stderr F I1208 17:55:22.221020 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221583624+00:00 stderr F I1208 17:55:22.221325 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221682836+00:00 stderr F I1208 17:55:22.221646 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221808930+00:00 stderr F I1208 17:55:22.221790 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.221934134+00:00 stderr F I1208 17:55:22.221905 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.222103188+00:00 stderr F I1208 17:55:22.222083 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.222290923+00:00 stderr F I1208 17:55:22.222272 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.222680694+00:00 stderr F I1208 17:55:22.222366 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.222835828+00:00 stderr F I1208 17:55:22.222397 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.223016653+00:00 stderr F I1208 17:55:22.222426 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.223170477+00:00 stderr F I1208 17:55:22.222456 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.226912077+00:00 stderr F I1208 17:55:22.222483 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.227009200+00:00 stderr F I1208 17:55:22.222506 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.227070271+00:00 stderr F I1208 17:55:22.222532 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.239224609+00:00 stderr F I1208 17:55:22.239165 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.246417092+00:00 stderr F I1208 17:55:22.246356 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.316595591+00:00 stderr F I1208 17:55:22.316529 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:55:22.316595591+00:00 stderr F I1208 17:55:22.316552 1 resource_quota_controller.go:502] "synced quota controller" logger="resourcequota-controller" 2025-12-08T17:55:22.331442361+00:00 stderr F I1208 17:55:22.331317 1 garbagecollector.go:203] "syncing garbage collector with updated resources from discovery" logger="garbage-collector-controller" diff="added: [agent.k8s.elastic.co/v1alpha1, Resource=agents apm.k8s.elastic.co/v1, Resource=apmservers autoscaling.k8s.elastic.co/v1alpha1, Resource=elasticsearchautoscalers beat.k8s.elastic.co/v1beta1, Resource=beats elasticsearch.k8s.elastic.co/v1, Resource=elasticsearches enterprisesearch.k8s.elastic.co/v1, Resource=enterprisesearches kibana.k8s.elastic.co/v1, Resource=kibanas logstash.k8s.elastic.co/v1alpha1, Resource=logstashes maps.k8s.elastic.co/v1alpha1, Resource=elasticmapsservers monitoring.rhobs/v1, Resource=alertmanagers monitoring.rhobs/v1, Resource=podmonitors monitoring.rhobs/v1, Resource=probes monitoring.rhobs/v1, Resource=prometheuses monitoring.rhobs/v1, Resource=prometheusrules monitoring.rhobs/v1, Resource=servicemonitors monitoring.rhobs/v1, Resource=thanosrulers monitoring.rhobs/v1alpha1, Resource=alertmanagerconfigs monitoring.rhobs/v1alpha1, Resource=monitoringstacks monitoring.rhobs/v1alpha1, Resource=prometheusagents monitoring.rhobs/v1alpha1, Resource=scrapeconfigs monitoring.rhobs/v1alpha1, Resource=thanosqueriers observability.openshift.io/v1alpha1, Resource=observabilityinstallers observability.openshift.io/v1alpha1, Resource=uiplugins perses.dev/v1alpha1, Resource=perses perses.dev/v1alpha1, Resource=persesdashboards perses.dev/v1alpha1, Resource=persesdatasources stackconfigpolicy.k8s.elastic.co/v1alpha1, Resource=stackconfigpolicies], removed: []" 2025-12-08T17:55:22.342465478+00:00 stderr F I1208 17:55:22.342409 1 shared_informer.go:350] "Waiting for caches to sync" controller="garbage collector" 2025-12-08T17:55:22.356619828+00:00 stderr F I1208 17:55:22.356510 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:22.443663171+00:00 stderr F I1208 17:55:22.443610 1 shared_informer.go:357] "Caches are synced" controller="garbage collector" 2025-12-08T17:55:22.443763693+00:00 stderr F I1208 17:55:22.443748 1 garbagecollector.go:235] "synced garbage collector" logger="garbage-collector-controller" 2025-12-08T17:55:29.763294413+00:00 stderr F I1208 17:55:29.763204 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager-operator/cert-manager-operator-controller-manager-64c74584c4" need=1 creating=1 2025-12-08T17:55:29.779839636+00:00 stderr F I1208 17:55:29.779783 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="cert-manager-operator/cert-manager-operator-controller-manager" err="Operation cannot be fulfilled on deployments.apps \"cert-manager-operator-controller-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:38.318114655+00:00 stderr F I1208 17:55:38.318037 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-admin, uid: c41f9082-7c51-4533-9c9a-3714b644db6f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificaterequests.cert-manager.io, uid: 37e3a1c4-6031-4a55-a58d-7b4a6b9603bd]" 2025-12-08T17:55:38.318164046+00:00 stderr F I1208 17:55:38.318097 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-admin, uid: c41f9082-7c51-4533-9c9a-3714b644db6f]" virtual=false 2025-12-08T17:55:38.324068808+00:00 stderr F I1208 17:55:38.323690 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-edit, uid: 5d6b5c49-6576-4c97-9b4b-af4655930902]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificaterequests.cert-manager.io, uid: 37e3a1c4-6031-4a55-a58d-7b4a6b9603bd]" 2025-12-08T17:55:38.324068808+00:00 stderr F I1208 17:55:38.323743 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-edit, uid: 5d6b5c49-6576-4c97-9b4b-af4655930902]" virtual=false 2025-12-08T17:55:38.327991806+00:00 stderr F I1208 17:55:38.327922 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-admin, uid: c41f9082-7c51-4533-9c9a-3714b644db6f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificaterequests.cert-manager.io","uid":"37e3a1c4-6031-4a55-a58d-7b4a6b9603bd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.338165725+00:00 stderr F I1208 17:55:38.338074 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-edit, uid: 5d6b5c49-6576-4c97-9b4b-af4655930902]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificaterequests.cert-manager.io","uid":"37e3a1c4-6031-4a55-a58d-7b4a6b9603bd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.350898444+00:00 stderr F I1208 17:55:38.349307 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-view, uid: 65dcae26-836c-406a-8739-4dae7ad33518]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificaterequests.cert-manager.io, uid: 37e3a1c4-6031-4a55-a58d-7b4a6b9603bd]" 2025-12-08T17:55:38.350898444+00:00 stderr F I1208 17:55:38.349377 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-view, uid: 65dcae26-836c-406a-8739-4dae7ad33518]" virtual=false 2025-12-08T17:55:38.354023940+00:00 stderr F I1208 17:55:38.353251 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-crdview, uid: c2dcf1fc-91f0-4243-b5e9-76b90f4d1177]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificaterequests.cert-manager.io, uid: 37e3a1c4-6031-4a55-a58d-7b4a6b9603bd]" 2025-12-08T17:55:38.354023940+00:00 stderr F I1208 17:55:38.353317 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-crdview, uid: c2dcf1fc-91f0-4243-b5e9-76b90f4d1177]" virtual=false 2025-12-08T17:55:38.365620178+00:00 stderr F I1208 17:55:38.365540 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-admin, uid: 2c30aa32-1c46-4140-9c1b-2d681254b7ff]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificates.cert-manager.io, uid: 49466427-0a38-49c9-9a25-9117674612e8]" 2025-12-08T17:55:38.366146642+00:00 stderr F I1208 17:55:38.366095 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-admin, uid: 2c30aa32-1c46-4140-9c1b-2d681254b7ff]" virtual=false 2025-12-08T17:55:38.377375591+00:00 stderr F I1208 17:55:38.376604 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-crdview, uid: c2dcf1fc-91f0-4243-b5e9-76b90f4d1177]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificaterequests.cert-manager.io","uid":"37e3a1c4-6031-4a55-a58d-7b4a6b9603bd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.379622762+00:00 stderr F I1208 17:55:38.378938 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificaterequests.cert-manager.io-v1-view, uid: 65dcae26-836c-406a-8739-4dae7ad33518]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificaterequests.cert-manager.io","uid":"37e3a1c4-6031-4a55-a58d-7b4a6b9603bd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.384391274+00:00 stderr F I1208 17:55:38.384177 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-edit, uid: 42770792-4adf-4b3c-ba5c-438be737e54b]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificates.cert-manager.io, uid: 49466427-0a38-49c9-9a25-9117674612e8]" 2025-12-08T17:55:38.384518377+00:00 stderr F I1208 17:55:38.384457 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-edit, uid: 42770792-4adf-4b3c-ba5c-438be737e54b]" virtual=false 2025-12-08T17:55:38.385068122+00:00 stderr F I1208 17:55:38.385012 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-view, uid: b28bde9e-9d98-47be-a0f1-86c2cf36d996]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificates.cert-manager.io, uid: 49466427-0a38-49c9-9a25-9117674612e8]" 2025-12-08T17:55:38.385068122+00:00 stderr F I1208 17:55:38.385057 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-crdview, uid: 92116ebd-5bac-4a2a-80fd-6a0b3983b8fa]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certificates.cert-manager.io, uid: 49466427-0a38-49c9-9a25-9117674612e8]" 2025-12-08T17:55:38.385106123+00:00 stderr F I1208 17:55:38.385073 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-view, uid: b28bde9e-9d98-47be-a0f1-86c2cf36d996]" virtual=false 2025-12-08T17:55:38.385199135+00:00 stderr F I1208 17:55:38.385137 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-crdview, uid: 92116ebd-5bac-4a2a-80fd-6a0b3983b8fa]" virtual=false 2025-12-08T17:55:38.393695829+00:00 stderr F I1208 17:55:38.393606 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-admin, uid: 2c30aa32-1c46-4140-9c1b-2d681254b7ff]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificates.cert-manager.io","uid":"49466427-0a38-49c9-9a25-9117674612e8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.395496248+00:00 stderr F I1208 17:55:38.395281 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-admin, uid: d02acfaf-e803-4ef6-b895-c62bbd2bc0c2]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certmanagers.operator.openshift.io, uid: b19cb80c-12ec-4500-aed9-8a27166017e9]" 2025-12-08T17:55:38.395496248+00:00 stderr F I1208 17:55:38.395316 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-admin, uid: d02acfaf-e803-4ef6-b895-c62bbd2bc0c2]" virtual=false 2025-12-08T17:55:38.402814170+00:00 stderr F I1208 17:55:38.402455 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-edit, uid: 322a3549-aed4-4cec-acb1-75f7c1e70f96]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certmanagers.operator.openshift.io, uid: b19cb80c-12ec-4500-aed9-8a27166017e9]" 2025-12-08T17:55:38.402814170+00:00 stderr F I1208 17:55:38.402515 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-edit, uid: 322a3549-aed4-4cec-acb1-75f7c1e70f96]" virtual=false 2025-12-08T17:55:38.405564655+00:00 stderr F I1208 17:55:38.405208 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-crdview, uid: 92116ebd-5bac-4a2a-80fd-6a0b3983b8fa]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificates.cert-manager.io","uid":"49466427-0a38-49c9-9a25-9117674612e8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.405564655+00:00 stderr F I1208 17:55:38.405295 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-admin, uid: d02acfaf-e803-4ef6-b895-c62bbd2bc0c2]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certmanagers.operator.openshift.io","uid":"b19cb80c-12ec-4500-aed9-8a27166017e9","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.405722059+00:00 stderr F I1208 17:55:38.405666 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-view, uid: b28bde9e-9d98-47be-a0f1-86c2cf36d996]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificates.cert-manager.io","uid":"49466427-0a38-49c9-9a25-9117674612e8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.409499873+00:00 stderr F I1208 17:55:38.409217 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certificates.cert-manager.io-v1-edit, uid: 42770792-4adf-4b3c-ba5c-438be737e54b]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certificates.cert-manager.io","uid":"49466427-0a38-49c9-9a25-9117674612e8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.417423870+00:00 stderr F I1208 17:55:38.417329 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-edit, uid: 322a3549-aed4-4cec-acb1-75f7c1e70f96]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certmanagers.operator.openshift.io","uid":"b19cb80c-12ec-4500-aed9-8a27166017e9","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.418161510+00:00 stderr F I1208 17:55:38.417915 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-view, uid: 295b07a0-9382-427a-a6f2-0d9ae9ebf7d4]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certmanagers.operator.openshift.io, uid: b19cb80c-12ec-4500-aed9-8a27166017e9]" 2025-12-08T17:55:38.418161510+00:00 stderr F I1208 17:55:38.417986 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-view, uid: 295b07a0-9382-427a-a6f2-0d9ae9ebf7d4]" virtual=false 2025-12-08T17:55:38.436492934+00:00 stderr F I1208 17:55:38.435717 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-crdview, uid: cbe7cd72-a807-47c0-8cec-a0ef3323f849]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: certmanagers.operator.openshift.io, uid: b19cb80c-12ec-4500-aed9-8a27166017e9]" 2025-12-08T17:55:38.436492934+00:00 stderr F I1208 17:55:38.435720 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-view, uid: 295b07a0-9382-427a-a6f2-0d9ae9ebf7d4]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certmanagers.operator.openshift.io","uid":"b19cb80c-12ec-4500-aed9-8a27166017e9","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.436492934+00:00 stderr F I1208 17:55:38.435768 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-crdview, uid: cbe7cd72-a807-47c0-8cec-a0ef3323f849]" virtual=false 2025-12-08T17:55:38.442369235+00:00 stderr F I1208 17:55:38.442013 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-admin, uid: 3acfd789-fb64-4fb4-a412-a44fd30698c8]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: challenges.acme.cert-manager.io, uid: df6c0c19-4208-463a-a441-08e92d252761]" 2025-12-08T17:55:38.442369235+00:00 stderr F I1208 17:55:38.442068 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-admin, uid: 3acfd789-fb64-4fb4-a412-a44fd30698c8]" virtual=false 2025-12-08T17:55:38.447208917+00:00 stderr F I1208 17:55:38.446922 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: certmanagers.operator.openshift.io-v1alpha1-crdview, uid: cbe7cd72-a807-47c0-8cec-a0ef3323f849]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"certmanagers.operator.openshift.io","uid":"b19cb80c-12ec-4500-aed9-8a27166017e9","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.457537891+00:00 stderr F I1208 17:55:38.457338 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-admin, uid: 3acfd789-fb64-4fb4-a412-a44fd30698c8]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"challenges.acme.cert-manager.io","uid":"df6c0c19-4208-463a-a441-08e92d252761","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.460783269+00:00 stderr F I1208 17:55:38.459216 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-edit, uid: e1edf3f4-1610-4562-999e-dfebb8a32904]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: challenges.acme.cert-manager.io, uid: df6c0c19-4208-463a-a441-08e92d252761]" 2025-12-08T17:55:38.460783269+00:00 stderr F I1208 17:55:38.459257 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-edit, uid: e1edf3f4-1610-4562-999e-dfebb8a32904]" virtual=false 2025-12-08T17:55:38.473142739+00:00 stderr F I1208 17:55:38.473076 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-edit, uid: e1edf3f4-1610-4562-999e-dfebb8a32904]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"challenges.acme.cert-manager.io","uid":"df6c0c19-4208-463a-a441-08e92d252761","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.473226731+00:00 stderr F I1208 17:55:38.473203 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-view, uid: 9c91be72-20a5-47ea-923b-da5385b646f0]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: challenges.acme.cert-manager.io, uid: df6c0c19-4208-463a-a441-08e92d252761]" 2025-12-08T17:55:38.473270963+00:00 stderr F I1208 17:55:38.473226 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-view, uid: 9c91be72-20a5-47ea-923b-da5385b646f0]" virtual=false 2025-12-08T17:55:38.483137213+00:00 stderr F I1208 17:55:38.483080 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-crdview, uid: 20ae0984-6919-4eec-bbf0-cf42411f16b3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: challenges.acme.cert-manager.io, uid: df6c0c19-4208-463a-a441-08e92d252761]" 2025-12-08T17:55:38.483163704+00:00 stderr F I1208 17:55:38.483127 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-crdview, uid: 20ae0984-6919-4eec-bbf0-cf42411f16b3]" virtual=false 2025-12-08T17:55:38.484721777+00:00 stderr F I1208 17:55:38.484625 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-view, uid: 9c91be72-20a5-47ea-923b-da5385b646f0]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"challenges.acme.cert-manager.io","uid":"df6c0c19-4208-463a-a441-08e92d252761","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.486296489+00:00 stderr F I1208 17:55:38.485345 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-admin, uid: de938f7d-9e48-48f5-8c7a-cd594899f9a5]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: clusterissuers.cert-manager.io, uid: 9548bb10-61fa-4286-8d8d-ed87e86d91b2]" 2025-12-08T17:55:38.486296489+00:00 stderr F I1208 17:55:38.485375 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-admin, uid: de938f7d-9e48-48f5-8c7a-cd594899f9a5]" virtual=false 2025-12-08T17:55:38.488155751+00:00 stderr F I1208 17:55:38.488099 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-edit, uid: ba1434fb-90db-416d-920a-593052fde097]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: clusterissuers.cert-manager.io, uid: 9548bb10-61fa-4286-8d8d-ed87e86d91b2]" 2025-12-08T17:55:38.488178921+00:00 stderr F I1208 17:55:38.488148 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-edit, uid: ba1434fb-90db-416d-920a-593052fde097]" virtual=false 2025-12-08T17:55:38.491607075+00:00 stderr F I1208 17:55:38.491315 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: challenges.acme.cert-manager.io-v1-crdview, uid: 20ae0984-6919-4eec-bbf0-cf42411f16b3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"challenges.acme.cert-manager.io","uid":"df6c0c19-4208-463a-a441-08e92d252761","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.492469860+00:00 stderr F I1208 17:55:38.491807 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-view, uid: b713cd63-43c3-437d-b168-4e9a805d1b39]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: clusterissuers.cert-manager.io, uid: 9548bb10-61fa-4286-8d8d-ed87e86d91b2]" 2025-12-08T17:55:38.492469860+00:00 stderr F I1208 17:55:38.491856 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-view, uid: b713cd63-43c3-437d-b168-4e9a805d1b39]" virtual=false 2025-12-08T17:55:38.496076698+00:00 stderr F I1208 17:55:38.496026 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-admin, uid: de938f7d-9e48-48f5-8c7a-cd594899f9a5]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"clusterissuers.cert-manager.io","uid":"9548bb10-61fa-4286-8d8d-ed87e86d91b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.498665369+00:00 stderr F I1208 17:55:38.498614 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-edit, uid: ba1434fb-90db-416d-920a-593052fde097]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"clusterissuers.cert-manager.io","uid":"9548bb10-61fa-4286-8d8d-ed87e86d91b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.499589954+00:00 stderr F I1208 17:55:38.499536 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-crdview, uid: a7a7993a-7427-45dc-8bbf-d59330b25ca2]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: clusterissuers.cert-manager.io, uid: 9548bb10-61fa-4286-8d8d-ed87e86d91b2]" 2025-12-08T17:55:38.499589954+00:00 stderr F I1208 17:55:38.499561 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-crdview, uid: a7a7993a-7427-45dc-8bbf-d59330b25ca2]" virtual=false 2025-12-08T17:55:38.504978612+00:00 stderr F I1208 17:55:38.504142 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-view, uid: e3c41bb9-d766-4be1-aea7-6db4d142c301]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: issuers.cert-manager.io, uid: 744d04f7-14d1-4962-b68a-ce803305c2fe]" 2025-12-08T17:55:38.504978612+00:00 stderr F I1208 17:55:38.504189 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-view, uid: e3c41bb9-d766-4be1-aea7-6db4d142c301]" virtual=false 2025-12-08T17:55:38.504978612+00:00 stderr F I1208 17:55:38.504726 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-view, uid: b713cd63-43c3-437d-b168-4e9a805d1b39]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"clusterissuers.cert-manager.io","uid":"9548bb10-61fa-4286-8d8d-ed87e86d91b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.515405259+00:00 stderr F I1208 17:55:38.515321 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: clusterissuers.cert-manager.io-v1-crdview, uid: a7a7993a-7427-45dc-8bbf-d59330b25ca2]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"clusterissuers.cert-manager.io","uid":"9548bb10-61fa-4286-8d8d-ed87e86d91b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.519196932+00:00 stderr F I1208 17:55:38.519131 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-view, uid: e3c41bb9-d766-4be1-aea7-6db4d142c301]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"issuers.cert-manager.io","uid":"744d04f7-14d1-4962-b68a-ce803305c2fe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.520999482+00:00 stderr F I1208 17:55:38.515123 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-admin, uid: 0fda357a-1da7-4c52-92fd-7ab6cbb62d2f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: issuers.cert-manager.io, uid: 744d04f7-14d1-4962-b68a-ce803305c2fe]" 2025-12-08T17:55:38.521030953+00:00 stderr F I1208 17:55:38.521016 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-admin, uid: 0fda357a-1da7-4c52-92fd-7ab6cbb62d2f]" virtual=false 2025-12-08T17:55:38.521689251+00:00 stderr F I1208 17:55:38.521645 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-edit, uid: ec012114-234a-40f1-adbe-d690033eecc6]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: issuers.cert-manager.io, uid: 744d04f7-14d1-4962-b68a-ce803305c2fe]" 2025-12-08T17:55:38.521689251+00:00 stderr F I1208 17:55:38.521675 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-edit, uid: ec012114-234a-40f1-adbe-d690033eecc6]" virtual=false 2025-12-08T17:55:38.521906427+00:00 stderr F I1208 17:55:38.521814 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-crdview, uid: e7e81c03-3a7e-4a11-9b44-03c052755399]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: issuers.cert-manager.io, uid: 744d04f7-14d1-4962-b68a-ce803305c2fe]" 2025-12-08T17:55:38.522022440+00:00 stderr F I1208 17:55:38.521986 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-crdview, uid: e7e81c03-3a7e-4a11-9b44-03c052755399]" virtual=false 2025-12-08T17:55:38.527860580+00:00 stderr F I1208 17:55:38.527763 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-admin, uid: 1bd82aa9-7668-45db-a25a-f30155f3adb3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: istiocsrs.operator.openshift.io, uid: 57300c4e-5a84-457f-a94a-74229e917c5a]" 2025-12-08T17:55:38.527860580+00:00 stderr F I1208 17:55:38.527823 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-admin, uid: 1bd82aa9-7668-45db-a25a-f30155f3adb3]" virtual=false 2025-12-08T17:55:38.536380144+00:00 stderr F I1208 17:55:38.536300 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-edit, uid: 9da1058e-9bdb-4406-a4d1-7b741f784a5b]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: istiocsrs.operator.openshift.io, uid: 57300c4e-5a84-457f-a94a-74229e917c5a]" 2025-12-08T17:55:38.536380144+00:00 stderr F I1208 17:55:38.536352 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-edit, uid: 9da1058e-9bdb-4406-a4d1-7b741f784a5b]" virtual=false 2025-12-08T17:55:38.536646681+00:00 stderr F I1208 17:55:38.536621 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-view, uid: 238e45ae-5696-486b-9ef8-75c2a649c39a]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: istiocsrs.operator.openshift.io, uid: 57300c4e-5a84-457f-a94a-74229e917c5a]" 2025-12-08T17:55:38.536685232+00:00 stderr F I1208 17:55:38.536644 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-view, uid: 238e45ae-5696-486b-9ef8-75c2a649c39a]" virtual=false 2025-12-08T17:55:38.541346350+00:00 stderr F I1208 17:55:38.541278 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-admin, uid: 1bd82aa9-7668-45db-a25a-f30155f3adb3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"istiocsrs.operator.openshift.io","uid":"57300c4e-5a84-457f-a94a-74229e917c5a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.541382421+00:00 stderr F I1208 17:55:38.541361 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-admin, uid: 0fda357a-1da7-4c52-92fd-7ab6cbb62d2f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"issuers.cert-manager.io","uid":"744d04f7-14d1-4962-b68a-ce803305c2fe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.541579667+00:00 stderr F I1208 17:55:38.541533 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-crdview, uid: e7e81c03-3a7e-4a11-9b44-03c052755399]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"issuers.cert-manager.io","uid":"744d04f7-14d1-4962-b68a-ce803305c2fe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.541615868+00:00 stderr F I1208 17:55:38.541596 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: issuers.cert-manager.io-v1-edit, uid: ec012114-234a-40f1-adbe-d690033eecc6]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"issuers.cert-manager.io","uid":"744d04f7-14d1-4962-b68a-ce803305c2fe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.545260577+00:00 stderr F I1208 17:55:38.545221 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-crdview, uid: dfd8bf91-1515-400b-972f-907ea6d3e8b8]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: istiocsrs.operator.openshift.io, uid: 57300c4e-5a84-457f-a94a-74229e917c5a]" 2025-12-08T17:55:38.545281998+00:00 stderr F I1208 17:55:38.545261 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-crdview, uid: dfd8bf91-1515-400b-972f-907ea6d3e8b8]" virtual=false 2025-12-08T17:55:38.550216364+00:00 stderr F I1208 17:55:38.550176 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-admin, uid: f961a0be-f22e-4f5a-a13c-29777008a527]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: orders.acme.cert-manager.io, uid: 37ecc510-be80-47d3-9b53-81f73f3ff35f]" 2025-12-08T17:55:38.550249424+00:00 stderr F I1208 17:55:38.550213 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-admin, uid: f961a0be-f22e-4f5a-a13c-29777008a527]" virtual=false 2025-12-08T17:55:38.552007973+00:00 stderr F I1208 17:55:38.550449 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-view, uid: 238e45ae-5696-486b-9ef8-75c2a649c39a]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"istiocsrs.operator.openshift.io","uid":"57300c4e-5a84-457f-a94a-74229e917c5a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.552007973+00:00 stderr F I1208 17:55:38.550452 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-edit, uid: 9da1058e-9bdb-4406-a4d1-7b741f784a5b]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"istiocsrs.operator.openshift.io","uid":"57300c4e-5a84-457f-a94a-74229e917c5a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.555015316+00:00 stderr F I1208 17:55:38.554958 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-edit, uid: deb9dda9-4ead-40ab-9562-04c3f5864f4d]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: orders.acme.cert-manager.io, uid: 37ecc510-be80-47d3-9b53-81f73f3ff35f]" 2025-12-08T17:55:38.555033566+00:00 stderr F I1208 17:55:38.555013 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-edit, uid: deb9dda9-4ead-40ab-9562-04c3f5864f4d]" virtual=false 2025-12-08T17:55:38.569088112+00:00 stderr F I1208 17:55:38.569001 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: istiocsrs.operator.openshift.io-v1alpha1-crdview, uid: dfd8bf91-1515-400b-972f-907ea6d3e8b8]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"istiocsrs.operator.openshift.io","uid":"57300c4e-5a84-457f-a94a-74229e917c5a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.570261003+00:00 stderr F I1208 17:55:38.570154 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-admin, uid: f961a0be-f22e-4f5a-a13c-29777008a527]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"orders.acme.cert-manager.io","uid":"37ecc510-be80-47d3-9b53-81f73f3ff35f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.575799975+00:00 stderr F I1208 17:55:38.575621 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-edit, uid: deb9dda9-4ead-40ab-9562-04c3f5864f4d]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"orders.acme.cert-manager.io","uid":"37ecc510-be80-47d3-9b53-81f73f3ff35f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.586020876+00:00 stderr F I1208 17:55:38.585935 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-view, uid: 7b764da7-8c5f-4d27-bf40-3cf03aa49137]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: orders.acme.cert-manager.io, uid: 37ecc510-be80-47d3-9b53-81f73f3ff35f]" 2025-12-08T17:55:38.586069658+00:00 stderr F I1208 17:55:38.586032 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-crdview, uid: f7d625c2-6c9f-42f3-985f-c05f11a39a9a]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: orders.acme.cert-manager.io, uid: 37ecc510-be80-47d3-9b53-81f73f3ff35f]" 2025-12-08T17:55:38.586139419+00:00 stderr F I1208 17:55:38.586091 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-view, uid: 7b764da7-8c5f-4d27-bf40-3cf03aa49137]" virtual=false 2025-12-08T17:55:38.588387031+00:00 stderr F I1208 17:55:38.587980 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-crdview, uid: f7d625c2-6c9f-42f3-985f-c05f11a39a9a]" virtual=false 2025-12-08T17:55:38.596891065+00:00 stderr F I1208 17:55:38.596789 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-crdview, uid: f7d625c2-6c9f-42f3-985f-c05f11a39a9a]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"orders.acme.cert-manager.io","uid":"37ecc510-be80-47d3-9b53-81f73f3ff35f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:38.596916725+00:00 stderr F I1208 17:55:38.596835 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: orders.acme.cert-manager.io-v1-view, uid: 7b764da7-8c5f-4d27-bf40-3cf03aa49137]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"orders.acme.cert-manager.io","uid":"37ecc510-be80-47d3-9b53-81f73f3ff35f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:39.430051186+00:00 stderr F I1208 17:55:39.429657 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.437087180+00:00 stderr F I1208 17:55:39.437030 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.441757417+00:00 stderr F I1208 17:55:39.440699 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="cert-manager/cert-manager" err="Operation cannot be fulfilled on deployments.apps \"cert-manager\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:39.441757417+00:00 stderr F E1208 17:55:39.441433 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:39.443940267+00:00 stderr F I1208 17:55:39.443800 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.448231925+00:00 stderr F I1208 17:55:39.447041 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.448231925+00:00 stderr F E1208 17:55:39.447163 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:39.450599400+00:00 stderr F I1208 17:55:39.450548 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.454240950+00:00 stderr F I1208 17:55:39.454193 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.454292182+00:00 stderr F E1208 17:55:39.454263 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:39.460613615+00:00 stderr F I1208 17:55:39.460557 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.464814440+00:00 stderr F I1208 17:55:39.464774 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.464830851+00:00 stderr F E1208 17:55:39.464814 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:39.506422081+00:00 stderr F I1208 17:55:39.506282 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.510524344+00:00 stderr F I1208 17:55:39.510427 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.510617507+00:00 stderr F E1208 17:55:39.510562 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:39.592170805+00:00 stderr F I1208 17:55:39.592068 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.602023705+00:00 stderr F I1208 17:55:39.601951 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.602112048+00:00 stderr F E1208 17:55:39.602073 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:39.764825803+00:00 stderr F I1208 17:55:39.764727 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:39.769993695+00:00 stderr F I1208 17:55:39.769916 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:39.770046696+00:00 stderr F E1208 17:55:39.770021 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:40.093265595+00:00 stderr F I1208 17:55:40.093180 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:40.096719069+00:00 stderr F I1208 17:55:40.096655 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:40.096751660+00:00 stderr F E1208 17:55:40.096733 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:40.451524235+00:00 stderr F I1208 17:55:40.451234 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" need=1 creating=1 2025-12-08T17:55:40.462163487+00:00 stderr F I1208 17:55:40.462093 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" 2025-12-08T17:55:40.475547854+00:00 stderr F I1208 17:55:40.475315 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="cert-manager/cert-manager-webhook" err="Operation cannot be fulfilled on deployments.apps \"cert-manager-webhook\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:40.475896014+00:00 stderr F E1208 17:55:40.475686 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-webhook-7894b5b9b4\" failed with pods \"cert-manager-webhook-7894b5b9b4-\" is forbidden: error looking up service account cert-manager/cert-manager-webhook: serviceaccount \"cert-manager-webhook\" not found" logger="UnhandledError" 2025-12-08T17:55:40.480066208+00:00 stderr F I1208 17:55:40.477661 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" need=1 creating=1 2025-12-08T17:55:40.489427125+00:00 stderr F I1208 17:55:40.489291 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" 2025-12-08T17:55:40.489427125+00:00 stderr F E1208 17:55:40.489365 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-webhook-7894b5b9b4\" failed with pods \"cert-manager-webhook-7894b5b9b4-\" is forbidden: error looking up service account cert-manager/cert-manager-webhook: serviceaccount \"cert-manager-webhook\" not found" logger="UnhandledError" 2025-12-08T17:55:40.490789283+00:00 stderr F I1208 17:55:40.490706 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" need=1 creating=1 2025-12-08T17:55:40.497378513+00:00 stderr F I1208 17:55:40.497198 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" 2025-12-08T17:55:40.497378513+00:00 stderr F E1208 17:55:40.497274 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-webhook-7894b5b9b4\" failed with pods \"cert-manager-webhook-7894b5b9b4-\" is forbidden: error looking up service account cert-manager/cert-manager-webhook: serviceaccount \"cert-manager-webhook\" not found" logger="UnhandledError" 2025-12-08T17:55:40.503132351+00:00 stderr F I1208 17:55:40.503041 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" need=1 creating=1 2025-12-08T17:55:40.510894074+00:00 stderr F I1208 17:55:40.510735 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" 2025-12-08T17:55:40.510894074+00:00 stderr F E1208 17:55:40.510802 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-webhook-7894b5b9b4\" failed with pods \"cert-manager-webhook-7894b5b9b4-\" is forbidden: error looking up service account cert-manager/cert-manager-webhook: serviceaccount \"cert-manager-webhook\" not found" logger="UnhandledError" 2025-12-08T17:55:40.553407371+00:00 stderr F I1208 17:55:40.553245 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" need=1 creating=1 2025-12-08T17:55:40.563123838+00:00 stderr F I1208 17:55:40.562863 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" 2025-12-08T17:55:40.564054403+00:00 stderr F E1208 17:55:40.564011 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-webhook-7894b5b9b4\" failed with pods \"cert-manager-webhook-7894b5b9b4-\" is forbidden: error looking up service account cert-manager/cert-manager-webhook: serviceaccount \"cert-manager-webhook\" not found" logger="UnhandledError" 2025-12-08T17:55:40.650100134+00:00 stderr F I1208 17:55:40.650006 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-webhook-7894b5b9b4" need=1 creating=1 2025-12-08T17:55:40.739001653+00:00 stderr F I1208 17:55:40.738934 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:40.744037371+00:00 stderr F I1208 17:55:40.742277 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:40.744037371+00:00 stderr F E1208 17:55:40.742349 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:41.837180227+00:00 stderr F I1208 17:55:41.836964 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:41.843409328+00:00 stderr F I1208 17:55:41.843346 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:41.848919550+00:00 stderr F I1208 17:55:41.848355 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="cert-manager/cert-manager-cainjector" err="Operation cannot be fulfilled on deployments.apps \"cert-manager-cainjector\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:41.852094417+00:00 stderr F E1208 17:55:41.852050 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:41.853229707+00:00 stderr F I1208 17:55:41.853140 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:41.860945909+00:00 stderr F I1208 17:55:41.859298 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:41.860945909+00:00 stderr F E1208 17:55:41.859370 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:41.860945909+00:00 stderr F I1208 17:55:41.860675 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:41.866898002+00:00 stderr F I1208 17:55:41.865170 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="cert-manager/cert-manager-cainjector" err="Operation cannot be fulfilled on deployments.apps \"cert-manager-cainjector\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:41.866898002+00:00 stderr F I1208 17:55:41.866339 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:41.866898002+00:00 stderr F E1208 17:55:41.866384 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:41.873279717+00:00 stderr F I1208 17:55:41.873069 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:41.876631060+00:00 stderr F I1208 17:55:41.876579 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:41.876631060+00:00 stderr F E1208 17:55:41.876619 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:41.918573691+00:00 stderr F I1208 17:55:41.918487 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:41.921722287+00:00 stderr F I1208 17:55:41.921662 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:41.921748488+00:00 stderr F E1208 17:55:41.921732 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:42.003369318+00:00 stderr F I1208 17:55:42.003293 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:42.009371372+00:00 stderr F I1208 17:55:42.009264 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:42.009371372+00:00 stderr F E1208 17:55:42.009343 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:42.023958422+00:00 stderr F I1208 17:55:42.023892 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:42.027421707+00:00 stderr F I1208 17:55:42.027379 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:42.027468368+00:00 stderr F E1208 17:55:42.027444 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:42.171497321+00:00 stderr F I1208 17:55:42.171421 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:42.175139931+00:00 stderr F I1208 17:55:42.175090 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:42.175169071+00:00 stderr F E1208 17:55:42.175137 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:42.513961508+00:00 stderr F I1208 17:55:42.513192 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:42.517428983+00:00 stderr F I1208 17:55:42.517371 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" 2025-12-08T17:55:42.517458854+00:00 stderr F E1208 17:55:42.517438 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-cainjector-7dbf76d5c8\" failed with pods \"cert-manager-cainjector-7dbf76d5c8-\" is forbidden: error looking up service account cert-manager/cert-manager-cainjector: serviceaccount \"cert-manager-cainjector\" not found" logger="UnhandledError" 2025-12-08T17:55:42.547845178+00:00 stderr F I1208 17:55:42.547776 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-admin, uid: 6906164c-7554-48e4-b1ac-482ed1a79536]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: agents.agent.k8s.elastic.co, uid: a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294]" 2025-12-08T17:55:42.547898149+00:00 stderr F I1208 17:55:42.547828 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-admin, uid: 6906164c-7554-48e4-b1ac-482ed1a79536]" virtual=false 2025-12-08T17:55:42.553231535+00:00 stderr F I1208 17:55:42.552330 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-edit, uid: df6a072a-c255-49fc-a50f-c4bb8147a281]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: agents.agent.k8s.elastic.co, uid: a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294]" 2025-12-08T17:55:42.553231535+00:00 stderr F I1208 17:55:42.552370 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-edit, uid: df6a072a-c255-49fc-a50f-c4bb8147a281]" virtual=false 2025-12-08T17:55:42.557147172+00:00 stderr F I1208 17:55:42.555982 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-admin, uid: 6906164c-7554-48e4-b1ac-482ed1a79536]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"agents.agent.k8s.elastic.co","uid":"a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.557147172+00:00 stderr F I1208 17:55:42.556268 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-view, uid: 2af5de61-005a-44db-9f15-a99fd7fd944a]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: agents.agent.k8s.elastic.co, uid: a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294]" 2025-12-08T17:55:42.557147172+00:00 stderr F I1208 17:55:42.556292 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-view, uid: 2af5de61-005a-44db-9f15-a99fd7fd944a]" virtual=false 2025-12-08T17:55:42.563347513+00:00 stderr F I1208 17:55:42.560304 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-crdview, uid: aa8d0acd-f277-406c-84d3-d43b77e28c4c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: agents.agent.k8s.elastic.co, uid: a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294]" 2025-12-08T17:55:42.563347513+00:00 stderr F I1208 17:55:42.560397 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-crdview, uid: aa8d0acd-f277-406c-84d3-d43b77e28c4c]" virtual=false 2025-12-08T17:55:42.563347513+00:00 stderr F I1208 17:55:42.562150 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-admin, uid: d5cc6408-2a77-4ac4-97fa-6f60ea02b6a8]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: apmservers.apm.k8s.elastic.co, uid: c9b21ae1-47c1-4e51-adac-423008784834]" 2025-12-08T17:55:42.563347513+00:00 stderr F I1208 17:55:42.562201 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-admin, uid: d5cc6408-2a77-4ac4-97fa-6f60ea02b6a8]" virtual=false 2025-12-08T17:55:42.567533108+00:00 stderr F I1208 17:55:42.567440 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-edit, uid: c2e01dbe-85b0-4d2d-9470-58988f112683]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: apmservers.apm.k8s.elastic.co, uid: c9b21ae1-47c1-4e51-adac-423008784834]" 2025-12-08T17:55:42.567569049+00:00 stderr F I1208 17:55:42.567520 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-edit, uid: c2e01dbe-85b0-4d2d-9470-58988f112683]" virtual=false 2025-12-08T17:55:42.570279293+00:00 stderr F I1208 17:55:42.570204 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-view, uid: 5a0a3d4c-890c-4f4c-ab71-23c2a8ecaee9]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: apmservers.apm.k8s.elastic.co, uid: c9b21ae1-47c1-4e51-adac-423008784834]" 2025-12-08T17:55:42.570312894+00:00 stderr F I1208 17:55:42.570264 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-view, uid: 5a0a3d4c-890c-4f4c-ab71-23c2a8ecaee9]" virtual=false 2025-12-08T17:55:42.575832425+00:00 stderr F I1208 17:55:42.573695 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-edit, uid: df6a072a-c255-49fc-a50f-c4bb8147a281]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"agents.agent.k8s.elastic.co","uid":"a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.581343417+00:00 stderr F I1208 17:55:42.581270 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-crdview, uid: e3c5bb34-edd5-4ed8-9605-2c02ef41c9ba]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: apmservers.apm.k8s.elastic.co, uid: c9b21ae1-47c1-4e51-adac-423008784834]" 2025-12-08T17:55:42.581450450+00:00 stderr F I1208 17:55:42.581409 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-crdview, uid: e3c5bb34-edd5-4ed8-9605-2c02ef41c9ba]" virtual=false 2025-12-08T17:55:42.581996844+00:00 stderr F I1208 17:55:42.581965 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-edit, uid: c2e01dbe-85b0-4d2d-9470-58988f112683]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"apmservers.apm.k8s.elastic.co","uid":"c9b21ae1-47c1-4e51-adac-423008784834","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.582077867+00:00 stderr F I1208 17:55:42.582059 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-admin, uid: d5cc6408-2a77-4ac4-97fa-6f60ea02b6a8]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"apmservers.apm.k8s.elastic.co","uid":"c9b21ae1-47c1-4e51-adac-423008784834","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.582160819+00:00 stderr F I1208 17:55:42.582142 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-view, uid: 2af5de61-005a-44db-9f15-a99fd7fd944a]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"agents.agent.k8s.elastic.co","uid":"a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.588794862+00:00 stderr F I1208 17:55:42.588691 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-admin, uid: 948e2995-0434-48d1-963e-43d799b9ed5c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: beats.beat.k8s.elastic.co, uid: edde13c5-dca6-4a16-a681-247019403ab4]" 2025-12-08T17:55:42.588794862+00:00 stderr F I1208 17:55:42.588748 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-admin, uid: 948e2995-0434-48d1-963e-43d799b9ed5c]" virtual=false 2025-12-08T17:55:42.590218630+00:00 stderr F I1208 17:55:42.590142 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-edit, uid: d591a775-6cd4-4c61-aa20-e2daaabc97f1]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: beats.beat.k8s.elastic.co, uid: edde13c5-dca6-4a16-a681-247019403ab4]" 2025-12-08T17:55:42.590370074+00:00 stderr F I1208 17:55:42.590221 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-edit, uid: d591a775-6cd4-4c61-aa20-e2daaabc97f1]" virtual=false 2025-12-08T17:55:42.591933958+00:00 stderr F I1208 17:55:42.591845 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-view, uid: 5a0a3d4c-890c-4f4c-ab71-23c2a8ecaee9]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"apmservers.apm.k8s.elastic.co","uid":"c9b21ae1-47c1-4e51-adac-423008784834","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.605224313+00:00 stderr F I1208 17:55:42.605142 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: agents.agent.k8s.elastic.co-v1alpha1-crdview, uid: aa8d0acd-f277-406c-84d3-d43b77e28c4c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"agents.agent.k8s.elastic.co","uid":"a8b3a9a2-c51e-4e3f-95d6-ebcf8bc9b294","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.605890731+00:00 stderr F I1208 17:55:42.605849 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: apmservers.apm.k8s.elastic.co-v1-crdview, uid: e3c5bb34-edd5-4ed8-9605-2c02ef41c9ba]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"apmservers.apm.k8s.elastic.co","uid":"c9b21ae1-47c1-4e51-adac-423008784834","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.616935414+00:00 stderr F I1208 17:55:42.616817 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-view, uid: 5070eb56-e553-4d44-9dbc-d323e3b57401]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: beats.beat.k8s.elastic.co, uid: edde13c5-dca6-4a16-a681-247019403ab4]" 2025-12-08T17:55:42.617334064+00:00 stderr F I1208 17:55:42.617308 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-crdview, uid: 75b892c0-9b1e-42a8-b3d6-5ff27f13a053]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: beats.beat.k8s.elastic.co, uid: edde13c5-dca6-4a16-a681-247019403ab4]" 2025-12-08T17:55:42.617400286+00:00 stderr F I1208 17:55:42.617366 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-view, uid: 5070eb56-e553-4d44-9dbc-d323e3b57401]" virtual=false 2025-12-08T17:55:42.617590421+00:00 stderr F I1208 17:55:42.616841 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-edit, uid: d591a775-6cd4-4c61-aa20-e2daaabc97f1]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"beats.beat.k8s.elastic.co","uid":"edde13c5-dca6-4a16-a681-247019403ab4","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.617629992+00:00 stderr F I1208 17:55:42.617613 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-crdview, uid: 75b892c0-9b1e-42a8-b3d6-5ff27f13a053]" virtual=false 2025-12-08T17:55:42.619978167+00:00 stderr F I1208 17:55:42.619952 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-admin, uid: 6c694db0-1b93-49d4-ab5e-3041183c2dea]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticmapsservers.maps.k8s.elastic.co, uid: 9fadd4e4-0330-4f8b-8fd4-a8e57a65575b]" 2025-12-08T17:55:42.620249294+00:00 stderr F I1208 17:55:42.620219 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-admin, uid: 6c694db0-1b93-49d4-ab5e-3041183c2dea]" virtual=false 2025-12-08T17:55:42.620544082+00:00 stderr F I1208 17:55:42.620517 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-admin, uid: 948e2995-0434-48d1-963e-43d799b9ed5c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"beats.beat.k8s.elastic.co","uid":"edde13c5-dca6-4a16-a681-247019403ab4","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.629725694+00:00 stderr F I1208 17:55:42.629432 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-view, uid: 5070eb56-e553-4d44-9dbc-d323e3b57401]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"beats.beat.k8s.elastic.co","uid":"edde13c5-dca6-4a16-a681-247019403ab4","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.629725694+00:00 stderr F I1208 17:55:42.629676 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-edit, uid: 5a82397c-5301-4824-8e23-2a16bc010698]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticmapsservers.maps.k8s.elastic.co, uid: 9fadd4e4-0330-4f8b-8fd4-a8e57a65575b]" 2025-12-08T17:55:42.629769835+00:00 stderr F I1208 17:55:42.629698 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-edit, uid: 5a82397c-5301-4824-8e23-2a16bc010698]" virtual=false 2025-12-08T17:55:42.637329373+00:00 stderr F I1208 17:55:42.637244 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-admin, uid: 6c694db0-1b93-49d4-ab5e-3041183c2dea]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticmapsservers.maps.k8s.elastic.co","uid":"9fadd4e4-0330-4f8b-8fd4-a8e57a65575b","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.637487907+00:00 stderr F I1208 17:55:42.637440 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: beats.beat.k8s.elastic.co-v1beta1-crdview, uid: 75b892c0-9b1e-42a8-b3d6-5ff27f13a053]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"beats.beat.k8s.elastic.co","uid":"edde13c5-dca6-4a16-a681-247019403ab4","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.638018972+00:00 stderr F I1208 17:55:42.637599 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-view, uid: 997bb0ba-63e9-42fb-b71d-32c3f093fad4]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticmapsservers.maps.k8s.elastic.co, uid: 9fadd4e4-0330-4f8b-8fd4-a8e57a65575b]" 2025-12-08T17:55:42.638018972+00:00 stderr F I1208 17:55:42.637652 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-view, uid: 997bb0ba-63e9-42fb-b71d-32c3f093fad4]" virtual=false 2025-12-08T17:55:42.645728574+00:00 stderr F I1208 17:55:42.645572 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-view, uid: 997bb0ba-63e9-42fb-b71d-32c3f093fad4]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticmapsservers.maps.k8s.elastic.co","uid":"9fadd4e4-0330-4f8b-8fd4-a8e57a65575b","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.646809993+00:00 stderr F I1208 17:55:42.646764 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-crdview, uid: 376252ff-b8a7-48b6-8276-722b16891f7c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticmapsservers.maps.k8s.elastic.co, uid: 9fadd4e4-0330-4f8b-8fd4-a8e57a65575b]" 2025-12-08T17:55:42.646852815+00:00 stderr F I1208 17:55:42.646804 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-crdview, uid: 376252ff-b8a7-48b6-8276-722b16891f7c]" virtual=false 2025-12-08T17:55:42.651542993+00:00 stderr F I1208 17:55:42.651482 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-edit, uid: 5a82397c-5301-4824-8e23-2a16bc010698]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticmapsservers.maps.k8s.elastic.co","uid":"9fadd4e4-0330-4f8b-8fd4-a8e57a65575b","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.666484864+00:00 stderr F I1208 17:55:42.666414 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticmapsservers.maps.k8s.elastic.co-v1alpha1-crdview, uid: 376252ff-b8a7-48b6-8276-722b16891f7c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticmapsservers.maps.k8s.elastic.co","uid":"9fadd4e4-0330-4f8b-8fd4-a8e57a65575b","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.670359200+00:00 stderr F I1208 17:55:42.670268 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-admin, uid: 1c03c0fe-ea17-4afd-a7de-5b274a60f657]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co, uid: 662f3c0a-48e1-43e1-947c-2be73f053c72]" 2025-12-08T17:55:42.670359200+00:00 stderr F I1208 17:55:42.670310 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-admin, uid: 1c03c0fe-ea17-4afd-a7de-5b274a60f657]" virtual=false 2025-12-08T17:55:42.685009081+00:00 stderr F I1208 17:55:42.680316 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-edit, uid: fa1988ed-bd9f-47de-aa7d-017605879272]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co, uid: 662f3c0a-48e1-43e1-947c-2be73f053c72]" 2025-12-08T17:55:42.685009081+00:00 stderr F I1208 17:55:42.680366 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-edit, uid: fa1988ed-bd9f-47de-aa7d-017605879272]" virtual=false 2025-12-08T17:55:42.685009081+00:00 stderr F I1208 17:55:42.680764 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-admin, uid: 1c03c0fe-ea17-4afd-a7de-5b274a60f657]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearchautoscalers.autoscaling.k8s.elastic.co","uid":"662f3c0a-48e1-43e1-947c-2be73f053c72","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.695560071+00:00 stderr F I1208 17:55:42.695501 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-edit, uid: fa1988ed-bd9f-47de-aa7d-017605879272]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearchautoscalers.autoscaling.k8s.elastic.co","uid":"662f3c0a-48e1-43e1-947c-2be73f053c72","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.707983992+00:00 stderr F I1208 17:55:42.705451 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-view, uid: 7b4d5087-d902-4b74-bafb-f6f0cd18a5b4]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co, uid: 662f3c0a-48e1-43e1-947c-2be73f053c72]" 2025-12-08T17:55:42.707983992+00:00 stderr F I1208 17:55:42.705512 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-view, uid: 7b4d5087-d902-4b74-bafb-f6f0cd18a5b4]" virtual=false 2025-12-08T17:55:42.707983992+00:00 stderr F I1208 17:55:42.705837 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-crdview, uid: df67c388-54a9-42a1-aed6-f5e3c09a72c1]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co, uid: 662f3c0a-48e1-43e1-947c-2be73f053c72]" 2025-12-08T17:55:42.707983992+00:00 stderr F I1208 17:55:42.705892 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-admin, uid: 85532b11-6a56-4434-b562-c9762a739805]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co, uid: 12543b08-c732-485b-92cf-6805dc228669]" 2025-12-08T17:55:42.707983992+00:00 stderr F I1208 17:55:42.705910 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-crdview, uid: df67c388-54a9-42a1-aed6-f5e3c09a72c1]" virtual=false 2025-12-08T17:55:42.707983992+00:00 stderr F I1208 17:55:42.705942 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-admin, uid: 85532b11-6a56-4434-b562-c9762a739805]" virtual=false 2025-12-08T17:55:42.714406058+00:00 stderr F I1208 17:55:42.714341 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-edit, uid: 866afee5-95d5-42fc-b03b-e73b9c970895]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co, uid: 12543b08-c732-485b-92cf-6805dc228669]" 2025-12-08T17:55:42.714440869+00:00 stderr F I1208 17:55:42.714394 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-edit, uid: 866afee5-95d5-42fc-b03b-e73b9c970895]" virtual=false 2025-12-08T17:55:42.714770438+00:00 stderr F I1208 17:55:42.714726 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-view, uid: 0d6dce76-56ab-4d27-9dcb-40e828f23a97]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co, uid: 12543b08-c732-485b-92cf-6805dc228669]" 2025-12-08T17:55:42.714770438+00:00 stderr F I1208 17:55:42.714746 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-view, uid: 0d6dce76-56ab-4d27-9dcb-40e828f23a97]" virtual=false 2025-12-08T17:55:42.724586057+00:00 stderr F I1208 17:55:42.721609 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-crdview, uid: c1a281e7-12dd-40fe-8b19-a59726976e0c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co, uid: 12543b08-c732-485b-92cf-6805dc228669]" 2025-12-08T17:55:42.724586057+00:00 stderr F I1208 17:55:42.721654 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-crdview, uid: c1a281e7-12dd-40fe-8b19-a59726976e0c]" virtual=false 2025-12-08T17:55:42.724586057+00:00 stderr F I1208 17:55:42.721866 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-crdview, uid: df67c388-54a9-42a1-aed6-f5e3c09a72c1]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearchautoscalers.autoscaling.k8s.elastic.co","uid":"662f3c0a-48e1-43e1-947c-2be73f053c72","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.724586057+00:00 stderr F I1208 17:55:42.721947 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearchautoscalers.autoscaling.k8s.elastic.co-v1alpha1-view, uid: 7b4d5087-d902-4b74-bafb-f6f0cd18a5b4]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearchautoscalers.autoscaling.k8s.elastic.co","uid":"662f3c0a-48e1-43e1-947c-2be73f053c72","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.725449422+00:00 stderr F I1208 17:55:42.725350 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-admin, uid: 85532b11-6a56-4434-b562-c9762a739805]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearches.elasticsearch.k8s.elastic.co","uid":"12543b08-c732-485b-92cf-6805dc228669","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.734921281+00:00 stderr F I1208 17:55:42.732016 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-view, uid: 0d6dce76-56ab-4d27-9dcb-40e828f23a97]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearches.elasticsearch.k8s.elastic.co","uid":"12543b08-c732-485b-92cf-6805dc228669","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.734921281+00:00 stderr F I1208 17:55:42.734554 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-edit, uid: 866afee5-95d5-42fc-b03b-e73b9c970895]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearches.elasticsearch.k8s.elastic.co","uid":"12543b08-c732-485b-92cf-6805dc228669","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.745090290+00:00 stderr F I1208 17:55:42.742800 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-admin, uid: e4614f61-57bc-4461-9cea-b84b6a9c38e4]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co, uid: b76ec113-3328-4470-804b-0413e13abb96]" 2025-12-08T17:55:42.745090290+00:00 stderr F I1208 17:55:42.742846 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-admin, uid: e4614f61-57bc-4461-9cea-b84b6a9c38e4]" virtual=false 2025-12-08T17:55:42.747549747+00:00 stderr F I1208 17:55:42.747486 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: elasticsearches.elasticsearch.k8s.elastic.co-v1-crdview, uid: c1a281e7-12dd-40fe-8b19-a59726976e0c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"elasticsearches.elasticsearch.k8s.elastic.co","uid":"12543b08-c732-485b-92cf-6805dc228669","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.750644343+00:00 stderr F I1208 17:55:42.750517 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-edit, uid: 991c571f-afef-4500-b65a-a23ddec15d56]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co, uid: b76ec113-3328-4470-804b-0413e13abb96]" 2025-12-08T17:55:42.750644343+00:00 stderr F I1208 17:55:42.750566 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-edit, uid: 991c571f-afef-4500-b65a-a23ddec15d56]" virtual=false 2025-12-08T17:55:42.759078714+00:00 stderr F I1208 17:55:42.757148 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-view, uid: 95886b0f-054f-4abf-8ee4-06c9d18132d2]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co, uid: b76ec113-3328-4470-804b-0413e13abb96]" 2025-12-08T17:55:42.759078714+00:00 stderr F I1208 17:55:42.757194 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-view, uid: 95886b0f-054f-4abf-8ee4-06c9d18132d2]" virtual=false 2025-12-08T17:55:42.762153919+00:00 stderr F I1208 17:55:42.761222 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-crdview, uid: 32681355-2125-4a58-88ec-1d7867d826a8]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co, uid: b76ec113-3328-4470-804b-0413e13abb96]" 2025-12-08T17:55:42.762153919+00:00 stderr F I1208 17:55:42.761281 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-crdview, uid: 32681355-2125-4a58-88ec-1d7867d826a8]" virtual=false 2025-12-08T17:55:42.767916047+00:00 stderr F I1208 17:55:42.764297 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-admin, uid: e4614f61-57bc-4461-9cea-b84b6a9c38e4]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"enterprisesearches.enterprisesearch.k8s.elastic.co","uid":"b76ec113-3328-4470-804b-0413e13abb96","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.767916047+00:00 stderr F I1208 17:55:42.764397 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-edit, uid: e2aab95f-026f-4746-935d-0b52563e7c04]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: kibanas.kibana.k8s.elastic.co, uid: 4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0]" 2025-12-08T17:55:42.767916047+00:00 stderr F I1208 17:55:42.764420 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-edit, uid: e2aab95f-026f-4746-935d-0b52563e7c04]" virtual=false 2025-12-08T17:55:42.773069588+00:00 stderr F I1208 17:55:42.772997 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-view, uid: d2bdec85-86b4-4f93-8e80-59e1194d31d9]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: kibanas.kibana.k8s.elastic.co, uid: 4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0]" 2025-12-08T17:55:42.773069588+00:00 stderr F I1208 17:55:42.773047 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-view, uid: d2bdec85-86b4-4f93-8e80-59e1194d31d9]" virtual=false 2025-12-08T17:55:42.773147430+00:00 stderr F I1208 17:55:42.773084 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-edit, uid: 991c571f-afef-4500-b65a-a23ddec15d56]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"enterprisesearches.enterprisesearch.k8s.elastic.co","uid":"b76ec113-3328-4470-804b-0413e13abb96","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.778190978+00:00 stderr F I1208 17:55:42.777939 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-edit, uid: e2aab95f-026f-4746-935d-0b52563e7c04]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"kibanas.kibana.k8s.elastic.co","uid":"4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.778190978+00:00 stderr F I1208 17:55:42.778012 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-view, uid: 95886b0f-054f-4abf-8ee4-06c9d18132d2]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"enterprisesearches.enterprisesearch.k8s.elastic.co","uid":"b76ec113-3328-4470-804b-0413e13abb96","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.778190978+00:00 stderr F I1208 17:55:42.778058 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: enterprisesearches.enterprisesearch.k8s.elastic.co-v1-crdview, uid: 32681355-2125-4a58-88ec-1d7867d826a8]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"enterprisesearches.enterprisesearch.k8s.elastic.co","uid":"b76ec113-3328-4470-804b-0413e13abb96","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.787136864+00:00 stderr F I1208 17:55:42.781627 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-admin, uid: c511da03-b723-4d91-b477-ea144be36e7e]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: kibanas.kibana.k8s.elastic.co, uid: 4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0]" 2025-12-08T17:55:42.787136864+00:00 stderr F I1208 17:55:42.781676 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-admin, uid: c511da03-b723-4d91-b477-ea144be36e7e]" virtual=false 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.822257 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-view, uid: d2bdec85-86b4-4f93-8e80-59e1194d31d9]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"kibanas.kibana.k8s.elastic.co","uid":"4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.822975 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-crdview, uid: 03c5d88d-745d-44dd-88ff-343de00134dd]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: kibanas.kibana.k8s.elastic.co, uid: 4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0]" 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.822997 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-admin, uid: b7f76bfb-79e0-4de0-9645-f96ba3d06876]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: logstashes.logstash.k8s.elastic.co, uid: ef9d4a0a-55e1-49f4-8863-a8564cd779c1]" 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.823015 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-crdview, uid: 03c5d88d-745d-44dd-88ff-343de00134dd]" virtual=false 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.823180 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-edit, uid: 11ef3b51-b761-4c6b-b367-06d3a60e9a5f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: logstashes.logstash.k8s.elastic.co, uid: ef9d4a0a-55e1-49f4-8863-a8564cd779c1]" 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.823195 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-admin, uid: b7f76bfb-79e0-4de0-9645-f96ba3d06876]" virtual=false 2025-12-08T17:55:42.823644236+00:00 stderr F I1208 17:55:42.823266 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-edit, uid: 11ef3b51-b761-4c6b-b367-06d3a60e9a5f]" virtual=false 2025-12-08T17:55:42.832652083+00:00 stderr F I1208 17:55:42.832576 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-admin, uid: c511da03-b723-4d91-b477-ea144be36e7e]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"kibanas.kibana.k8s.elastic.co","uid":"4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.841860405+00:00 stderr F I1208 17:55:42.824531 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-view, uid: 0ad728ac-4c9a-43fd-b0fc-fe5e3ec6e2b9]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: logstashes.logstash.k8s.elastic.co, uid: ef9d4a0a-55e1-49f4-8863-a8564cd779c1]" 2025-12-08T17:55:42.841860405+00:00 stderr F I1208 17:55:42.841427 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-crdview, uid: 758932cb-0e72-4e05-8d70-6591b137e5cc]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: logstashes.logstash.k8s.elastic.co, uid: ef9d4a0a-55e1-49f4-8863-a8564cd779c1]" 2025-12-08T17:55:42.841860405+00:00 stderr F I1208 17:55:42.841455 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-edit, uid: a543c5c6-1602-40fe-8461-d9b04f983d49]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co, uid: b08a97b7-58d6-4bb2-aba9-233f595bcf91]" 2025-12-08T17:55:42.841860405+00:00 stderr F I1208 17:55:42.841473 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-view, uid: 0ad728ac-4c9a-43fd-b0fc-fe5e3ec6e2b9]" virtual=false 2025-12-08T17:55:42.841860405+00:00 stderr F I1208 17:55:42.841650 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-crdview, uid: 758932cb-0e72-4e05-8d70-6591b137e5cc]" virtual=false 2025-12-08T17:55:42.841860405+00:00 stderr F I1208 17:55:42.841693 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-edit, uid: a543c5c6-1602-40fe-8461-d9b04f983d49]" virtual=false 2025-12-08T17:55:42.844145248+00:00 stderr F I1208 17:55:42.844062 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-edit, uid: 11ef3b51-b761-4c6b-b367-06d3a60e9a5f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"logstashes.logstash.k8s.elastic.co","uid":"ef9d4a0a-55e1-49f4-8863-a8564cd779c1","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.844182959+00:00 stderr F I1208 17:55:42.844139 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: kibanas.kibana.k8s.elastic.co-v1-crdview, uid: 03c5d88d-745d-44dd-88ff-343de00134dd]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"kibanas.kibana.k8s.elastic.co","uid":"4dcc05ec-a4ba-40b2-9133-63cd9e18cfe0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.844554549+00:00 stderr F I1208 17:55:42.844375 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-admin, uid: b7f76bfb-79e0-4de0-9645-f96ba3d06876]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"logstashes.logstash.k8s.elastic.co","uid":"ef9d4a0a-55e1-49f4-8863-a8564cd779c1","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.854832802+00:00 stderr F I1208 17:55:42.854762 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-edit, uid: a543c5c6-1602-40fe-8461-d9b04f983d49]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"stackconfigpolicies.stackconfigpolicy.k8s.elastic.co","uid":"b08a97b7-58d6-4bb2-aba9-233f595bcf91","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.855145230+00:00 stderr F I1208 17:55:42.855027 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-view, uid: bb33c343-b61c-4ae8-90a9-e65e7ed5aa74]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co, uid: b08a97b7-58d6-4bb2-aba9-233f595bcf91]" 2025-12-08T17:55:42.855145230+00:00 stderr F I1208 17:55:42.855059 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-view, uid: bb33c343-b61c-4ae8-90a9-e65e7ed5aa74]" virtual=false 2025-12-08T17:55:42.855466979+00:00 stderr F I1208 17:55:42.855433 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-crdview, uid: 758932cb-0e72-4e05-8d70-6591b137e5cc]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"logstashes.logstash.k8s.elastic.co","uid":"ef9d4a0a-55e1-49f4-8863-a8564cd779c1","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.855507730+00:00 stderr F I1208 17:55:42.855479 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-admin, uid: 9389249b-e3be-47e3-acbf-dfb67bb5de2c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co, uid: b08a97b7-58d6-4bb2-aba9-233f595bcf91]" 2025-12-08T17:55:42.855530730+00:00 stderr F I1208 17:55:42.855507 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-crdview, uid: 3bd6dca0-1d1f-431d-98db-806593b7ea3d]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co, uid: b08a97b7-58d6-4bb2-aba9-233f595bcf91]" 2025-12-08T17:55:42.855556781+00:00 stderr F I1208 17:55:42.855536 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-admin, uid: 9389249b-e3be-47e3-acbf-dfb67bb5de2c]" virtual=false 2025-12-08T17:55:42.855609163+00:00 stderr F I1208 17:55:42.855586 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-crdview, uid: 3bd6dca0-1d1f-431d-98db-806593b7ea3d]" virtual=false 2025-12-08T17:55:42.858001618+00:00 stderr F I1208 17:55:42.856205 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: logstashes.logstash.k8s.elastic.co-v1alpha1-view, uid: 0ad728ac-4c9a-43fd-b0fc-fe5e3ec6e2b9]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"logstashes.logstash.k8s.elastic.co","uid":"ef9d4a0a-55e1-49f4-8863-a8564cd779c1","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.862192503+00:00 stderr F I1208 17:55:42.862132 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-view, uid: bb33c343-b61c-4ae8-90a9-e65e7ed5aa74]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"stackconfigpolicies.stackconfigpolicy.k8s.elastic.co","uid":"b08a97b7-58d6-4bb2-aba9-233f595bcf91","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.869709950+00:00 stderr F I1208 17:55:42.869577 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-crdview, uid: 3bd6dca0-1d1f-431d-98db-806593b7ea3d]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"stackconfigpolicies.stackconfigpolicy.k8s.elastic.co","uid":"b08a97b7-58d6-4bb2-aba9-233f595bcf91","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:42.869709950+00:00 stderr F I1208 17:55:42.869607 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co-v1alpha1-admin, uid: 9389249b-e3be-47e3-acbf-dfb67bb5de2c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"stackconfigpolicies.stackconfigpolicy.k8s.elastic.co","uid":"b08a97b7-58d6-4bb2-aba9-233f595bcf91","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:43.160210151+00:00 stderr F I1208 17:55:43.160123 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-cainjector-7dbf76d5c8" need=1 creating=1 2025-12-08T17:55:44.589251313+00:00 stderr F I1208 17:55:44.589155 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:44.594571620+00:00 stderr F I1208 17:55:44.594495 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:44.594608531+00:00 stderr F E1208 17:55:44.594572 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:47.500611370+00:00 stderr F I1208 17:55:47.500530 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-admin, uid: fae93f5a-c030-4acc-8c14-05debede27f7]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagerconfigs.monitoring.rhobs, uid: 58b7610f-5085-47a6-b380-dcc6e336c42a]" 2025-12-08T17:55:47.500611370+00:00 stderr F I1208 17:55:47.500580 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-edit, uid: 4cfa504c-3063-4fd4-a31f-7ed101388fef]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagerconfigs.monitoring.rhobs, uid: 58b7610f-5085-47a6-b380-dcc6e336c42a]" 2025-12-08T17:55:47.500662992+00:00 stderr F I1208 17:55:47.500601 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-admin, uid: fae93f5a-c030-4acc-8c14-05debede27f7]" virtual=false 2025-12-08T17:55:47.501164805+00:00 stderr F I1208 17:55:47.500953 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-edit, uid: 4cfa504c-3063-4fd4-a31f-7ed101388fef]" virtual=false 2025-12-08T17:55:47.503506190+00:00 stderr F I1208 17:55:47.503440 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-view, uid: a5e707a6-49f6-4d54-a1ba-e8e4f6623a84]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagerconfigs.monitoring.rhobs, uid: 58b7610f-5085-47a6-b380-dcc6e336c42a]" 2025-12-08T17:55:47.503569281+00:00 stderr F I1208 17:55:47.503500 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-view, uid: a5e707a6-49f6-4d54-a1ba-e8e4f6623a84]" virtual=false 2025-12-08T17:55:47.512812505+00:00 stderr F I1208 17:55:47.512728 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-crdview, uid: ce2e073b-75a6-47c4-a39d-948844cde967]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagerconfigs.monitoring.rhobs, uid: 58b7610f-5085-47a6-b380-dcc6e336c42a]" 2025-12-08T17:55:47.512887698+00:00 stderr F I1208 17:55:47.512786 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-crdview, uid: ce2e073b-75a6-47c4-a39d-948844cde967]" virtual=false 2025-12-08T17:55:47.513487984+00:00 stderr F I1208 17:55:47.513420 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-admin, uid: f7f1e010-90d6-4e40-9f24-dc976474028c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagers.monitoring.rhobs, uid: f8fa72de-5d22-47b0-86bd-542cf32b4703]" 2025-12-08T17:55:47.513503685+00:00 stderr F I1208 17:55:47.513469 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-admin, uid: f7f1e010-90d6-4e40-9f24-dc976474028c]" virtual=false 2025-12-08T17:55:47.530620934+00:00 stderr F I1208 17:55:47.530544 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-admin, uid: fae93f5a-c030-4acc-8c14-05debede27f7]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagerconfigs.monitoring.rhobs","uid":"58b7610f-5085-47a6-b380-dcc6e336c42a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.537795090+00:00 stderr F I1208 17:55:47.537724 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-view, uid: a5e707a6-49f6-4d54-a1ba-e8e4f6623a84]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagerconfigs.monitoring.rhobs","uid":"58b7610f-5085-47a6-b380-dcc6e336c42a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.540216397+00:00 stderr F I1208 17:55:47.539925 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-edit, uid: 520593fa-b03e-4304-87a0-165427f7bfe3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagers.monitoring.rhobs, uid: f8fa72de-5d22-47b0-86bd-542cf32b4703]" 2025-12-08T17:55:47.540216397+00:00 stderr F I1208 17:55:47.539954 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-edit, uid: 520593fa-b03e-4304-87a0-165427f7bfe3]" virtual=false 2025-12-08T17:55:47.545938514+00:00 stderr F I1208 17:55:47.545854 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-crdview, uid: ce2e073b-75a6-47c4-a39d-948844cde967]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagerconfigs.monitoring.rhobs","uid":"58b7610f-5085-47a6-b380-dcc6e336c42a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.555673002+00:00 stderr F I1208 17:55:47.555579 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagerconfigs.monitoring.rhobs-v1alpha1-edit, uid: 4cfa504c-3063-4fd4-a31f-7ed101388fef]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagerconfigs.monitoring.rhobs","uid":"58b7610f-5085-47a6-b380-dcc6e336c42a","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.560013050+00:00 stderr F I1208 17:55:47.559976 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-admin, uid: f7f1e010-90d6-4e40-9f24-dc976474028c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagers.monitoring.rhobs","uid":"f8fa72de-5d22-47b0-86bd-542cf32b4703","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.564334269+00:00 stderr F I1208 17:55:47.564237 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-edit, uid: 520593fa-b03e-4304-87a0-165427f7bfe3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagers.monitoring.rhobs","uid":"f8fa72de-5d22-47b0-86bd-542cf32b4703","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.572221016+00:00 stderr F I1208 17:55:47.572168 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-view, uid: 0b1d6159-e920-4220-9e5c-e807d530bba8]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagers.monitoring.rhobs, uid: f8fa72de-5d22-47b0-86bd-542cf32b4703]" 2025-12-08T17:55:47.572279957+00:00 stderr F I1208 17:55:47.572203 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-view, uid: 0b1d6159-e920-4220-9e5c-e807d530bba8]" virtual=false 2025-12-08T17:55:47.583916906+00:00 stderr F I1208 17:55:47.583837 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-crdview, uid: 306c4221-e8f8-4b1f-901b-f66f8e1ec5d6]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: alertmanagers.monitoring.rhobs, uid: f8fa72de-5d22-47b0-86bd-542cf32b4703]" 2025-12-08T17:55:47.583963978+00:00 stderr F I1208 17:55:47.583891 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-crdview, uid: 306c4221-e8f8-4b1f-901b-f66f8e1ec5d6]" virtual=false 2025-12-08T17:55:47.587503815+00:00 stderr F I1208 17:55:47.587436 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-admin, uid: 7b2bf09c-4f05-46fd-8de6-9cf0b399e9e5]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: monitoringstacks.monitoring.rhobs, uid: 7d952a8a-3ee1-4850-ac83-bc96957a7ebd]" 2025-12-08T17:55:47.587566276+00:00 stderr F I1208 17:55:47.587480 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-admin, uid: 7b2bf09c-4f05-46fd-8de6-9cf0b399e9e5]" virtual=false 2025-12-08T17:55:47.600105881+00:00 stderr F I1208 17:55:47.600048 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-crdview, uid: 306c4221-e8f8-4b1f-901b-f66f8e1ec5d6]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagers.monitoring.rhobs","uid":"f8fa72de-5d22-47b0-86bd-542cf32b4703","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.614394883+00:00 stderr F I1208 17:55:47.614314 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: alertmanagers.monitoring.rhobs-v1-view, uid: 0b1d6159-e920-4220-9e5c-e807d530bba8]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"alertmanagers.monitoring.rhobs","uid":"f8fa72de-5d22-47b0-86bd-542cf32b4703","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.620540631+00:00 stderr F I1208 17:55:47.620458 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-admin, uid: 7b2bf09c-4f05-46fd-8de6-9cf0b399e9e5]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"monitoringstacks.monitoring.rhobs","uid":"7d952a8a-3ee1-4850-ac83-bc96957a7ebd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.629711553+00:00 stderr F I1208 17:55:47.629595 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-edit, uid: fafbe949-9973-48f8-90c8-83652dab4dd0]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: monitoringstacks.monitoring.rhobs, uid: 7d952a8a-3ee1-4850-ac83-bc96957a7ebd]" 2025-12-08T17:55:47.629775674+00:00 stderr F I1208 17:55:47.629709 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-edit, uid: fafbe949-9973-48f8-90c8-83652dab4dd0]" virtual=false 2025-12-08T17:55:47.630155825+00:00 stderr F I1208 17:55:47.630094 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-view, uid: 028ff5bc-8a4a-41de-93b1-384464bf3b09]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: monitoringstacks.monitoring.rhobs, uid: 7d952a8a-3ee1-4850-ac83-bc96957a7ebd]" 2025-12-08T17:55:47.631428390+00:00 stderr F I1208 17:55:47.630157 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-view, uid: 028ff5bc-8a4a-41de-93b1-384464bf3b09]" virtual=false 2025-12-08T17:55:47.631428390+00:00 stderr F I1208 17:55:47.630661 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-crdview, uid: 92910e40-3c8f-49d9-bd99-1f519c9d8096]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: monitoringstacks.monitoring.rhobs, uid: 7d952a8a-3ee1-4850-ac83-bc96957a7ebd]" 2025-12-08T17:55:47.631428390+00:00 stderr F I1208 17:55:47.630698 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-crdview, uid: 92910e40-3c8f-49d9-bd99-1f519c9d8096]" virtual=false 2025-12-08T17:55:47.632926841+00:00 stderr F I1208 17:55:47.632831 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-admin, uid: e1b88a85-6534-4a9e-9b3d-a49c7464518f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: observabilityinstallers.observability.openshift.io, uid: 45204ad9-2eb8-417c-96e2-ee73008e60b2]" 2025-12-08T17:55:47.633243219+00:00 stderr F I1208 17:55:47.632930 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-admin, uid: e1b88a85-6534-4a9e-9b3d-a49c7464518f]" virtual=false 2025-12-08T17:55:47.634208807+00:00 stderr F I1208 17:55:47.634102 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-edit, uid: 93e6f08a-bb6d-44f1-bd6d-990a3ede146b]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: observabilityinstallers.observability.openshift.io, uid: 45204ad9-2eb8-417c-96e2-ee73008e60b2]" 2025-12-08T17:55:47.634208807+00:00 stderr F I1208 17:55:47.634172 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-edit, uid: 93e6f08a-bb6d-44f1-bd6d-990a3ede146b]" virtual=false 2025-12-08T17:55:47.642178405+00:00 stderr F I1208 17:55:47.641937 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-admin, uid: e1b88a85-6534-4a9e-9b3d-a49c7464518f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"observabilityinstallers.observability.openshift.io","uid":"45204ad9-2eb8-417c-96e2-ee73008e60b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.643600044+00:00 stderr F I1208 17:55:47.643545 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-crdview, uid: 92910e40-3c8f-49d9-bd99-1f519c9d8096]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"monitoringstacks.monitoring.rhobs","uid":"7d952a8a-3ee1-4850-ac83-bc96957a7ebd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.643970774+00:00 stderr F I1208 17:55:47.643868 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-edit, uid: fafbe949-9973-48f8-90c8-83652dab4dd0]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"monitoringstacks.monitoring.rhobs","uid":"7d952a8a-3ee1-4850-ac83-bc96957a7ebd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.645807005+00:00 stderr F I1208 17:55:47.645608 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-edit, uid: 93e6f08a-bb6d-44f1-bd6d-990a3ede146b]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"observabilityinstallers.observability.openshift.io","uid":"45204ad9-2eb8-417c-96e2-ee73008e60b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.645807005+00:00 stderr F I1208 17:55:47.645646 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-view, uid: fe245859-9761-41d4-a314-9ceb40b20cee]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: observabilityinstallers.observability.openshift.io, uid: 45204ad9-2eb8-417c-96e2-ee73008e60b2]" 2025-12-08T17:55:47.645807005+00:00 stderr F I1208 17:55:47.645692 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-view, uid: fe245859-9761-41d4-a314-9ceb40b20cee]" virtual=false 2025-12-08T17:55:47.645894847+00:00 stderr F I1208 17:55:47.645664 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: monitoringstacks.monitoring.rhobs-v1alpha1-view, uid: 028ff5bc-8a4a-41de-93b1-384464bf3b09]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"monitoringstacks.monitoring.rhobs","uid":"7d952a8a-3ee1-4850-ac83-bc96957a7ebd","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.666900813+00:00 stderr F I1208 17:55:47.666793 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-view, uid: fe245859-9761-41d4-a314-9ceb40b20cee]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"observabilityinstallers.observability.openshift.io","uid":"45204ad9-2eb8-417c-96e2-ee73008e60b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.670462591+00:00 stderr F I1208 17:55:47.669842 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-crdview, uid: b49d1ab1-094c-4ac8-a6f9-34895451339b]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: observabilityinstallers.observability.openshift.io, uid: 45204ad9-2eb8-417c-96e2-ee73008e60b2]" 2025-12-08T17:55:47.670500042+00:00 stderr F I1208 17:55:47.670452 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-crdview, uid: b49d1ab1-094c-4ac8-a6f9-34895451339b]" virtual=false 2025-12-08T17:55:47.670662477+00:00 stderr F I1208 17:55:47.670629 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-admin, uid: 6b63cd6c-f26d-41ab-94bb-924f6a2dc5f7]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: perses.perses.dev, uid: fb5b3acd-6ecf-4801-8f05-bf6ab211e786]" 2025-12-08T17:55:47.670662477+00:00 stderr F I1208 17:55:47.670651 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-edit, uid: b83cb0a1-add7-444b-bf78-f708462e9ea9]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: perses.perses.dev, uid: fb5b3acd-6ecf-4801-8f05-bf6ab211e786]" 2025-12-08T17:55:47.670684927+00:00 stderr F I1208 17:55:47.670665 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-admin, uid: 6b63cd6c-f26d-41ab-94bb-924f6a2dc5f7]" virtual=false 2025-12-08T17:55:47.670790640+00:00 stderr F I1208 17:55:47.670742 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-edit, uid: b83cb0a1-add7-444b-bf78-f708462e9ea9]" virtual=false 2025-12-08T17:55:47.673180426+00:00 stderr F I1208 17:55:47.673134 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-view, uid: 098725fa-ecae-4b17-9c20-ecc4076ff235]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: perses.perses.dev, uid: fb5b3acd-6ecf-4801-8f05-bf6ab211e786]" 2025-12-08T17:55:47.673198256+00:00 stderr F I1208 17:55:47.673179 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-view, uid: 098725fa-ecae-4b17-9c20-ecc4076ff235]" virtual=false 2025-12-08T17:55:47.680422854+00:00 stderr F I1208 17:55:47.679753 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-crdview, uid: 59442b1e-06d7-4980-b884-ab79cc476b52]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: perses.perses.dev, uid: fb5b3acd-6ecf-4801-8f05-bf6ab211e786]" 2025-12-08T17:55:47.680422854+00:00 stderr F I1208 17:55:47.679800 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-crdview, uid: 59442b1e-06d7-4980-b884-ab79cc476b52]" virtual=false 2025-12-08T17:55:47.681799772+00:00 stderr F I1208 17:55:47.681736 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: observabilityinstallers.observability.openshift.io-v1alpha1-crdview, uid: b49d1ab1-094c-4ac8-a6f9-34895451339b]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"observabilityinstallers.observability.openshift.io","uid":"45204ad9-2eb8-417c-96e2-ee73008e60b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.687834728+00:00 stderr F I1208 17:55:47.687743 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-edit, uid: d1bb90a3-28c9-4aaa-bc3e-27ea5e2a73ec]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdashboards.perses.dev, uid: 36834367-68cc-4a03-83e3-62c6c10cb36f]" 2025-12-08T17:55:47.687834728+00:00 stderr F I1208 17:55:47.687802 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-edit, uid: d1bb90a3-28c9-4aaa-bc3e-27ea5e2a73ec]" virtual=false 2025-12-08T17:55:47.688077934+00:00 stderr F I1208 17:55:47.688015 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-view, uid: 098725fa-ecae-4b17-9c20-ecc4076ff235]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"perses.perses.dev","uid":"fb5b3acd-6ecf-4801-8f05-bf6ab211e786","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.689075972+00:00 stderr F I1208 17:55:47.689021 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-admin, uid: 6b63cd6c-f26d-41ab-94bb-924f6a2dc5f7]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"perses.perses.dev","uid":"fb5b3acd-6ecf-4801-8f05-bf6ab211e786","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.690534382+00:00 stderr F I1208 17:55:47.690454 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-edit, uid: b83cb0a1-add7-444b-bf78-f708462e9ea9]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"perses.perses.dev","uid":"fb5b3acd-6ecf-4801-8f05-bf6ab211e786","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.697342488+00:00 stderr F I1208 17:55:47.697283 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-edit, uid: d1bb90a3-28c9-4aaa-bc3e-27ea5e2a73ec]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdashboards.perses.dev","uid":"36834367-68cc-4a03-83e3-62c6c10cb36f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.697374429+00:00 stderr F I1208 17:55:47.697348 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: perses.perses.dev-v1alpha1-crdview, uid: 59442b1e-06d7-4980-b884-ab79cc476b52]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"perses.perses.dev","uid":"fb5b3acd-6ecf-4801-8f05-bf6ab211e786","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.717560154+00:00 stderr F I1208 17:55:47.717485 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-view, uid: dc558b72-4c3a-425d-a7d0-1ce64f4d5a09]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdashboards.perses.dev, uid: 36834367-68cc-4a03-83e3-62c6c10cb36f]" 2025-12-08T17:55:47.717560154+00:00 stderr F I1208 17:55:47.717540 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-view, uid: dc558b72-4c3a-425d-a7d0-1ce64f4d5a09]" virtual=false 2025-12-08T17:55:47.724652728+00:00 stderr F I1208 17:55:47.724383 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-admin, uid: 6cdca4d6-df09-4dd3-976c-58cc2c8b1c6f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdashboards.perses.dev, uid: 36834367-68cc-4a03-83e3-62c6c10cb36f]" 2025-12-08T17:55:47.724652728+00:00 stderr F I1208 17:55:47.724435 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-admin, uid: 6cdca4d6-df09-4dd3-976c-58cc2c8b1c6f]" virtual=false 2025-12-08T17:55:47.728852094+00:00 stderr F I1208 17:55:47.728721 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-view, uid: dc558b72-4c3a-425d-a7d0-1ce64f4d5a09]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdashboards.perses.dev","uid":"36834367-68cc-4a03-83e3-62c6c10cb36f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.731998890+00:00 stderr F I1208 17:55:47.731869 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-admin, uid: 6cdca4d6-df09-4dd3-976c-58cc2c8b1c6f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdashboards.perses.dev","uid":"36834367-68cc-4a03-83e3-62c6c10cb36f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.732270047+00:00 stderr F I1208 17:55:47.732128 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-crdview, uid: 41915fe1-d038-466c-958f-328a5bc83731]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdashboards.perses.dev, uid: 36834367-68cc-4a03-83e3-62c6c10cb36f]" 2025-12-08T17:55:47.732270047+00:00 stderr F I1208 17:55:47.732173 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-admin, uid: 9d2508c4-0bc9-4018-9a1b-7b1a0e71503d]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdatasources.perses.dev, uid: 815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d]" 2025-12-08T17:55:47.732270047+00:00 stderr F I1208 17:55:47.732191 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-crdview, uid: 41915fe1-d038-466c-958f-328a5bc83731]" virtual=false 2025-12-08T17:55:47.732399161+00:00 stderr F I1208 17:55:47.732374 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-admin, uid: 9d2508c4-0bc9-4018-9a1b-7b1a0e71503d]" virtual=false 2025-12-08T17:55:47.740038221+00:00 stderr F I1208 17:55:47.739987 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-edit, uid: 7fe503b3-456e-4383-b62a-46b1929d2c15]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdatasources.perses.dev, uid: 815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d]" 2025-12-08T17:55:47.740096972+00:00 stderr F I1208 17:55:47.740046 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-edit, uid: 7fe503b3-456e-4383-b62a-46b1929d2c15]" virtual=false 2025-12-08T17:55:47.744016619+00:00 stderr F I1208 17:55:47.743959 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-view, uid: 276eea46-f689-4f57-b277-d55ad29ac2e3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdatasources.perses.dev, uid: 815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d]" 2025-12-08T17:55:47.744035850+00:00 stderr F I1208 17:55:47.744018 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-view, uid: 276eea46-f689-4f57-b277-d55ad29ac2e3]" virtual=false 2025-12-08T17:55:47.745146000+00:00 stderr F I1208 17:55:47.745101 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdashboards.perses.dev-v1alpha1-crdview, uid: 41915fe1-d038-466c-958f-328a5bc83731]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdashboards.perses.dev","uid":"36834367-68cc-4a03-83e3-62c6c10cb36f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.745381287+00:00 stderr F I1208 17:55:47.745351 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-admin, uid: 9d2508c4-0bc9-4018-9a1b-7b1a0e71503d]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdatasources.perses.dev","uid":"815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.752220965+00:00 stderr F I1208 17:55:47.752146 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-crdview, uid: aa7763dd-e7ee-4fc9-9fdc-3ed6863bc5e3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: persesdatasources.perses.dev, uid: 815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d]" 2025-12-08T17:55:47.752220965+00:00 stderr F I1208 17:55:47.752190 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-crdview, uid: aa7763dd-e7ee-4fc9-9fdc-3ed6863bc5e3]" virtual=false 2025-12-08T17:55:47.752435030+00:00 stderr F I1208 17:55:47.752269 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-edit, uid: 7fe503b3-456e-4383-b62a-46b1929d2c15]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdatasources.perses.dev","uid":"815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.757916201+00:00 stderr F I1208 17:55:47.756910 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-admin, uid: cb0e95df-8ebe-4184-9932-467574415bfe]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: podmonitors.monitoring.rhobs, uid: 2362eb08-282d-45b8-b34f-47a5a743bcee]" 2025-12-08T17:55:47.757916201+00:00 stderr F I1208 17:55:47.756949 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-admin, uid: cb0e95df-8ebe-4184-9932-467574415bfe]" virtual=false 2025-12-08T17:55:47.757916201+00:00 stderr F I1208 17:55:47.757225 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-view, uid: 276eea46-f689-4f57-b277-d55ad29ac2e3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdatasources.perses.dev","uid":"815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.762224980+00:00 stderr F I1208 17:55:47.762160 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-edit, uid: 173f3d54-274b-485d-a1b1-6771b23c7fc3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: podmonitors.monitoring.rhobs, uid: 2362eb08-282d-45b8-b34f-47a5a743bcee]" 2025-12-08T17:55:47.762265651+00:00 stderr F I1208 17:55:47.762215 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-edit, uid: 173f3d54-274b-485d-a1b1-6771b23c7fc3]" virtual=false 2025-12-08T17:55:47.762274331+00:00 stderr F I1208 17:55:47.762226 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: persesdatasources.perses.dev-v1alpha1-crdview, uid: aa7763dd-e7ee-4fc9-9fdc-3ed6863bc5e3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"persesdatasources.perses.dev","uid":"815ac180-fc4e-4a71-8f8d-0a1a92d2fc7d","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.763275298+00:00 stderr F I1208 17:55:47.762518 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-admin, uid: cb0e95df-8ebe-4184-9932-467574415bfe]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"podmonitors.monitoring.rhobs","uid":"2362eb08-282d-45b8-b34f-47a5a743bcee","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.770412593+00:00 stderr F I1208 17:55:47.769237 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-view, uid: 7a92613d-24bf-495a-a72c-3fbfd20650e2]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: podmonitors.monitoring.rhobs, uid: 2362eb08-282d-45b8-b34f-47a5a743bcee]" 2025-12-08T17:55:47.770412593+00:00 stderr F I1208 17:55:47.769312 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-view, uid: 7a92613d-24bf-495a-a72c-3fbfd20650e2]" virtual=false 2025-12-08T17:55:47.772293325+00:00 stderr F I1208 17:55:47.772242 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-crdview, uid: 43dace6a-bf9d-458f-b14d-5c726d1f7155]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: podmonitors.monitoring.rhobs, uid: 2362eb08-282d-45b8-b34f-47a5a743bcee]" 2025-12-08T17:55:47.772317066+00:00 stderr F I1208 17:55:47.772285 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-crdview, uid: 43dace6a-bf9d-458f-b14d-5c726d1f7155]" virtual=false 2025-12-08T17:55:47.785920819+00:00 stderr F I1208 17:55:47.783050 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-view, uid: 3ae6af69-da6c-44d1-b663-e68bc6538eb9]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: probes.monitoring.rhobs, uid: e962cc29-35b7-4261-a290-746a9d9e2340]" 2025-12-08T17:55:47.785920819+00:00 stderr F I1208 17:55:47.783136 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-view, uid: 3ae6af69-da6c-44d1-b663-e68bc6538eb9]" virtual=false 2025-12-08T17:55:47.785920819+00:00 stderr F I1208 17:55:47.783606 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-view, uid: 7a92613d-24bf-495a-a72c-3fbfd20650e2]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"podmonitors.monitoring.rhobs","uid":"2362eb08-282d-45b8-b34f-47a5a743bcee","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.785920819+00:00 stderr F I1208 17:55:47.783706 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-edit, uid: 173f3d54-274b-485d-a1b1-6771b23c7fc3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"podmonitors.monitoring.rhobs","uid":"2362eb08-282d-45b8-b34f-47a5a743bcee","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.792498069+00:00 stderr F I1208 17:55:47.791000 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-admin, uid: 6fd01844-1a80-4073-9300-282c9ec80a8e]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: probes.monitoring.rhobs, uid: e962cc29-35b7-4261-a290-746a9d9e2340]" 2025-12-08T17:55:47.792498069+00:00 stderr F I1208 17:55:47.791052 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-admin, uid: 6fd01844-1a80-4073-9300-282c9ec80a8e]" virtual=false 2025-12-08T17:55:47.793939889+00:00 stderr F I1208 17:55:47.793648 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-edit, uid: 0304016a-2c41-4392-a8b9-e9c968f48ced]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: probes.monitoring.rhobs, uid: e962cc29-35b7-4261-a290-746a9d9e2340]" 2025-12-08T17:55:47.793939889+00:00 stderr F I1208 17:55:47.793678 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-edit, uid: 0304016a-2c41-4392-a8b9-e9c968f48ced]" virtual=false 2025-12-08T17:55:47.797322212+00:00 stderr F I1208 17:55:47.796714 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-crdview, uid: e9d1664c-9a87-4009-9a94-06ef19877aae]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: probes.monitoring.rhobs, uid: e962cc29-35b7-4261-a290-746a9d9e2340]" 2025-12-08T17:55:47.797322212+00:00 stderr F I1208 17:55:47.796740 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-crdview, uid: e9d1664c-9a87-4009-9a94-06ef19877aae]" virtual=false 2025-12-08T17:55:47.797322212+00:00 stderr F I1208 17:55:47.797202 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: podmonitors.monitoring.rhobs-v1-crdview, uid: 43dace6a-bf9d-458f-b14d-5c726d1f7155]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"podmonitors.monitoring.rhobs","uid":"2362eb08-282d-45b8-b34f-47a5a743bcee","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.797322212+00:00 stderr F I1208 17:55:47.797278 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-view, uid: 3ae6af69-da6c-44d1-b663-e68bc6538eb9]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"probes.monitoring.rhobs","uid":"e962cc29-35b7-4261-a290-746a9d9e2340","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.797369533+00:00 stderr F I1208 17:55:47.797342 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-admin, uid: 6fd01844-1a80-4073-9300-282c9ec80a8e]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"probes.monitoring.rhobs","uid":"e962cc29-35b7-4261-a290-746a9d9e2340","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.798968798+00:00 stderr F I1208 17:55:47.798921 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-admin, uid: b297051b-23e6-49cc-ba9e-7d04730fbf53]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusagents.monitoring.rhobs, uid: 6834d8a2-d2e3-443e-a82e-85f0494ef495]" 2025-12-08T17:55:47.799029979+00:00 stderr F I1208 17:55:47.799009 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-admin, uid: b297051b-23e6-49cc-ba9e-7d04730fbf53]" virtual=false 2025-12-08T17:55:47.805240049+00:00 stderr F I1208 17:55:47.805164 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-edit, uid: 0304016a-2c41-4392-a8b9-e9c968f48ced]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"probes.monitoring.rhobs","uid":"e962cc29-35b7-4261-a290-746a9d9e2340","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.810995747+00:00 stderr F I1208 17:55:47.809943 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-edit, uid: 9d524298-9877-479b-8e6d-7a2c73d29a80]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusagents.monitoring.rhobs, uid: 6834d8a2-d2e3-443e-a82e-85f0494ef495]" 2025-12-08T17:55:47.810995747+00:00 stderr F I1208 17:55:47.810071 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-edit, uid: 9d524298-9877-479b-8e6d-7a2c73d29a80]" virtual=false 2025-12-08T17:55:47.811037448+00:00 stderr F I1208 17:55:47.810990 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: probes.monitoring.rhobs-v1-crdview, uid: e9d1664c-9a87-4009-9a94-06ef19877aae]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"probes.monitoring.rhobs","uid":"e962cc29-35b7-4261-a290-746a9d9e2340","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.824718144+00:00 stderr F I1208 17:55:47.824648 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-view, uid: 212a098c-9e3b-4f2b-a6fd-e7897c6d9258]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusagents.monitoring.rhobs, uid: 6834d8a2-d2e3-443e-a82e-85f0494ef495]" 2025-12-08T17:55:47.824718144+00:00 stderr F I1208 17:55:47.824694 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-view, uid: 212a098c-9e3b-4f2b-a6fd-e7897c6d9258]" virtual=false 2025-12-08T17:55:47.825191827+00:00 stderr F I1208 17:55:47.825146 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-crdview, uid: dcc43d11-0c05-4c73-8de0-397cf0249e59]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusagents.monitoring.rhobs, uid: 6834d8a2-d2e3-443e-a82e-85f0494ef495]" 2025-12-08T17:55:47.825213967+00:00 stderr F I1208 17:55:47.825178 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-crdview, uid: dcc43d11-0c05-4c73-8de0-397cf0249e59]" virtual=false 2025-12-08T17:55:47.826868503+00:00 stderr F I1208 17:55:47.826824 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-admin, uid: aca6f2d2-53bf-4e19-9675-c252952b7f9a]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheuses.monitoring.rhobs, uid: 88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2]" 2025-12-08T17:55:47.826959236+00:00 stderr F I1208 17:55:47.826928 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-admin, uid: aca6f2d2-53bf-4e19-9675-c252952b7f9a]" virtual=false 2025-12-08T17:55:47.840474146+00:00 stderr F I1208 17:55:47.840063 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-admin, uid: b297051b-23e6-49cc-ba9e-7d04730fbf53]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusagents.monitoring.rhobs","uid":"6834d8a2-d2e3-443e-a82e-85f0494ef495","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.845702929+00:00 stderr F I1208 17:55:47.845641 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-edit, uid: 9d524298-9877-479b-8e6d-7a2c73d29a80]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusagents.monitoring.rhobs","uid":"6834d8a2-d2e3-443e-a82e-85f0494ef495","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.845789032+00:00 stderr F I1208 17:55:47.845732 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-edit, uid: 6e8ee66a-d077-4e9a-94fa-f5c170008027]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheuses.monitoring.rhobs, uid: 88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2]" 2025-12-08T17:55:47.845845223+00:00 stderr F I1208 17:55:47.845794 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-edit, uid: 6e8ee66a-d077-4e9a-94fa-f5c170008027]" virtual=false 2025-12-08T17:55:47.854062059+00:00 stderr F I1208 17:55:47.853980 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-view, uid: 00d787a5-b28b-47ae-8ac2-3cd426cb873e]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheuses.monitoring.rhobs, uid: 88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2]" 2025-12-08T17:55:47.854092209+00:00 stderr F I1208 17:55:47.854035 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-view, uid: 00d787a5-b28b-47ae-8ac2-3cd426cb873e]" virtual=false 2025-12-08T17:55:47.860079554+00:00 stderr F I1208 17:55:47.859987 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-view, uid: 212a098c-9e3b-4f2b-a6fd-e7897c6d9258]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusagents.monitoring.rhobs","uid":"6834d8a2-d2e3-443e-a82e-85f0494ef495","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.871329172+00:00 stderr F I1208 17:55:47.871246 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-crdview, uid: 7dcc191d-eae9-4ea8-9539-7424ef968563]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheuses.monitoring.rhobs, uid: 88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2]" 2025-12-08T17:55:47.871365683+00:00 stderr F I1208 17:55:47.871315 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-crdview, uid: 7dcc191d-eae9-4ea8-9539-7424ef968563]" virtual=false 2025-12-08T17:55:47.877637206+00:00 stderr F I1208 17:55:47.877565 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-edit, uid: 6e8ee66a-d077-4e9a-94fa-f5c170008027]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheuses.monitoring.rhobs","uid":"88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.881938024+00:00 stderr F I1208 17:55:47.881607 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-admin, uid: aca6f2d2-53bf-4e19-9675-c252952b7f9a]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheuses.monitoring.rhobs","uid":"88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.887647970+00:00 stderr F I1208 17:55:47.887575 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-view, uid: 00d787a5-b28b-47ae-8ac2-3cd426cb873e]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheuses.monitoring.rhobs","uid":"88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.888440833+00:00 stderr F I1208 17:55:47.888387 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusagents.monitoring.rhobs-v1alpha1-crdview, uid: dcc43d11-0c05-4c73-8de0-397cf0249e59]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusagents.monitoring.rhobs","uid":"6834d8a2-d2e3-443e-a82e-85f0494ef495","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.898918420+00:00 stderr F I1208 17:55:47.898819 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-admin, uid: 2073dea8-66eb-4f04-b982-351bbedb38ae]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusrules.monitoring.rhobs, uid: 2169c03d-febf-40e6-b8bd-5f7e207ac47f]" 2025-12-08T17:55:47.898918420+00:00 stderr F I1208 17:55:47.898900 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-admin, uid: 2073dea8-66eb-4f04-b982-351bbedb38ae]" virtual=false 2025-12-08T17:55:47.903059204+00:00 stderr F I1208 17:55:47.902317 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheuses.monitoring.rhobs-v1-crdview, uid: 7dcc191d-eae9-4ea8-9539-7424ef968563]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheuses.monitoring.rhobs","uid":"88a7ba7b-4c8b-413c-9d0d-1ede7a5b58b2","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.904981516+00:00 stderr F I1208 17:55:47.904920 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-admin, uid: 2073dea8-66eb-4f04-b982-351bbedb38ae]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusrules.monitoring.rhobs","uid":"2169c03d-febf-40e6-b8bd-5f7e207ac47f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.921012526+00:00 stderr F I1208 17:55:47.920928 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-edit, uid: dc117ceb-e207-4d46-bc2b-e423967d4d6a]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusrules.monitoring.rhobs, uid: 2169c03d-febf-40e6-b8bd-5f7e207ac47f]" 2025-12-08T17:55:47.921012526+00:00 stderr F I1208 17:55:47.920975 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-edit, uid: dc117ceb-e207-4d46-bc2b-e423967d4d6a]" virtual=false 2025-12-08T17:55:47.928215744+00:00 stderr F I1208 17:55:47.926360 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-edit, uid: dc117ceb-e207-4d46-bc2b-e423967d4d6a]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusrules.monitoring.rhobs","uid":"2169c03d-febf-40e6-b8bd-5f7e207ac47f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.928946094+00:00 stderr F I1208 17:55:47.928842 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-view, uid: 57ecfa98-7cc1-4c00-b741-b727402acc37]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusrules.monitoring.rhobs, uid: 2169c03d-febf-40e6-b8bd-5f7e207ac47f]" 2025-12-08T17:55:47.929012726+00:00 stderr F I1208 17:55:47.928954 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-view, uid: 57ecfa98-7cc1-4c00-b741-b727402acc37]" virtual=false 2025-12-08T17:55:47.945699263+00:00 stderr F I1208 17:55:47.943480 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-crdview, uid: 2784bd36-37d9-49f6-a861-83f59d1fdd31]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: prometheusrules.monitoring.rhobs, uid: 2169c03d-febf-40e6-b8bd-5f7e207ac47f]" 2025-12-08T17:55:47.945699263+00:00 stderr F I1208 17:55:47.943528 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-crdview, uid: 2784bd36-37d9-49f6-a861-83f59d1fdd31]" virtual=false 2025-12-08T17:55:47.946548796+00:00 stderr F I1208 17:55:47.946485 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-admin, uid: ffeb1318-cd5e-42eb-bb0e-1a0782563111]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: scrapeconfigs.monitoring.rhobs, uid: 622a968c-0d2a-4eff-864d-7ed4cfc32bfe]" 2025-12-08T17:55:47.946854485+00:00 stderr F I1208 17:55:47.946799 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-admin, uid: ffeb1318-cd5e-42eb-bb0e-1a0782563111]" virtual=false 2025-12-08T17:55:47.953060386+00:00 stderr F I1208 17:55:47.952185 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-view, uid: 57ecfa98-7cc1-4c00-b741-b727402acc37]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusrules.monitoring.rhobs","uid":"2169c03d-febf-40e6-b8bd-5f7e207ac47f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.959074581+00:00 stderr F I1208 17:55:47.958984 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: prometheusrules.monitoring.rhobs-v1-crdview, uid: 2784bd36-37d9-49f6-a861-83f59d1fdd31]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"prometheusrules.monitoring.rhobs","uid":"2169c03d-febf-40e6-b8bd-5f7e207ac47f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.960403177+00:00 stderr F I1208 17:55:47.960338 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-edit, uid: 0dba8d47-56d9-44e6-8e34-b633d04e6cf2]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: scrapeconfigs.monitoring.rhobs, uid: 622a968c-0d2a-4eff-864d-7ed4cfc32bfe]" 2025-12-08T17:55:47.960426017+00:00 stderr F I1208 17:55:47.960390 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-edit, uid: 0dba8d47-56d9-44e6-8e34-b633d04e6cf2]" virtual=false 2025-12-08T17:55:47.966169685+00:00 stderr F I1208 17:55:47.965661 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-view, uid: 80eec6a4-9ef3-45ff-8f38-fce8f13d8a7f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: scrapeconfigs.monitoring.rhobs, uid: 622a968c-0d2a-4eff-864d-7ed4cfc32bfe]" 2025-12-08T17:55:47.966169685+00:00 stderr F I1208 17:55:47.965720 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-view, uid: 80eec6a4-9ef3-45ff-8f38-fce8f13d8a7f]" virtual=false 2025-12-08T17:55:47.969944549+00:00 stderr F I1208 17:55:47.969893 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-crdview, uid: 60fd96c9-b23b-4faa-860a-832069bcbe39]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: scrapeconfigs.monitoring.rhobs, uid: 622a968c-0d2a-4eff-864d-7ed4cfc32bfe]" 2025-12-08T17:55:47.969944549+00:00 stderr F I1208 17:55:47.969927 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-crdview, uid: 60fd96c9-b23b-4faa-860a-832069bcbe39]" virtual=false 2025-12-08T17:55:47.979705326+00:00 stderr F I1208 17:55:47.979348 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-admin, uid: ffeb1318-cd5e-42eb-bb0e-1a0782563111]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"scrapeconfigs.monitoring.rhobs","uid":"622a968c-0d2a-4eff-864d-7ed4cfc32bfe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:47.981406784+00:00 stderr F I1208 17:55:47.981365 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-admin, uid: 1ddef24a-e14d-4043-967c-4e0f92cec4ad]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicemonitors.monitoring.rhobs, uid: 235396a2-44b0-4c09-a3ea-25c067ee9cb0]" 2025-12-08T17:55:47.981406784+00:00 stderr F I1208 17:55:47.981391 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-admin, uid: 1ddef24a-e14d-4043-967c-4e0f92cec4ad]" virtual=false 2025-12-08T17:55:47.990475622+00:00 stderr F I1208 17:55:47.990404 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-edit, uid: 8ada5cae-6c88-4f65-a361-955102c84722]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicemonitors.monitoring.rhobs, uid: 235396a2-44b0-4c09-a3ea-25c067ee9cb0]" 2025-12-08T17:55:47.990475622+00:00 stderr F I1208 17:55:47.990429 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-edit, uid: 8ada5cae-6c88-4f65-a361-955102c84722]" virtual=false 2025-12-08T17:55:47.997993229+00:00 stderr F I1208 17:55:47.997554 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-view, uid: 80eec6a4-9ef3-45ff-8f38-fce8f13d8a7f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"scrapeconfigs.monitoring.rhobs","uid":"622a968c-0d2a-4eff-864d-7ed4cfc32bfe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.001937987+00:00 stderr F I1208 17:55:48.001505 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-view, uid: 5dd96797-871e-4ef8-ac74-9fcb0bca8fe3]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicemonitors.monitoring.rhobs, uid: 235396a2-44b0-4c09-a3ea-25c067ee9cb0]" 2025-12-08T17:55:48.001937987+00:00 stderr F I1208 17:55:48.001550 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-view, uid: 5dd96797-871e-4ef8-ac74-9fcb0bca8fe3]" virtual=false 2025-12-08T17:55:48.001937987+00:00 stderr F I1208 17:55:48.001812 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-edit, uid: 8ada5cae-6c88-4f65-a361-955102c84722]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicemonitors.monitoring.rhobs","uid":"235396a2-44b0-4c09-a3ea-25c067ee9cb0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.001937987+00:00 stderr F I1208 17:55:48.001843 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-admin, uid: 1ddef24a-e14d-4043-967c-4e0f92cec4ad]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicemonitors.monitoring.rhobs","uid":"235396a2-44b0-4c09-a3ea-25c067ee9cb0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.005659898+00:00 stderr F I1208 17:55:48.005601 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-crdview, uid: 6754764b-8360-4a30-9af2-da48536ada5b]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicemonitors.monitoring.rhobs, uid: 235396a2-44b0-4c09-a3ea-25c067ee9cb0]" 2025-12-08T17:55:48.005857324+00:00 stderr F I1208 17:55:48.005832 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-crdview, uid: 6754764b-8360-4a30-9af2-da48536ada5b]" virtual=false 2025-12-08T17:55:48.007347605+00:00 stderr F I1208 17:55:48.007065 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-edit, uid: 0dba8d47-56d9-44e6-8e34-b633d04e6cf2]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"scrapeconfigs.monitoring.rhobs","uid":"622a968c-0d2a-4eff-864d-7ed4cfc32bfe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.011386255+00:00 stderr F I1208 17:55:48.011328 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-view, uid: a67354e0-0e18-4c68-a8e5-5b9670255914]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosqueriers.monitoring.rhobs, uid: b92e85ae-31c7-496d-9cd8-ce86675e9c85]" 2025-12-08T17:55:48.011386255+00:00 stderr F I1208 17:55:48.011363 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-view, uid: a67354e0-0e18-4c68-a8e5-5b9670255914]" virtual=false 2025-12-08T17:55:48.011970702+00:00 stderr F I1208 17:55:48.011714 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: scrapeconfigs.monitoring.rhobs-v1alpha1-crdview, uid: 60fd96c9-b23b-4faa-860a-832069bcbe39]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"scrapeconfigs.monitoring.rhobs","uid":"622a968c-0d2a-4eff-864d-7ed4cfc32bfe","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.018094750+00:00 stderr F I1208 17:55:48.016977 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-crdview, uid: 6754764b-8360-4a30-9af2-da48536ada5b]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicemonitors.monitoring.rhobs","uid":"235396a2-44b0-4c09-a3ea-25c067ee9cb0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.018094750+00:00 stderr F I1208 17:55:48.017031 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicemonitors.monitoring.rhobs-v1-view, uid: 5dd96797-871e-4ef8-ac74-9fcb0bca8fe3]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicemonitors.monitoring.rhobs","uid":"235396a2-44b0-4c09-a3ea-25c067ee9cb0","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.023934190+00:00 stderr F I1208 17:55:48.023853 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-view, uid: a67354e0-0e18-4c68-a8e5-5b9670255914]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosqueriers.monitoring.rhobs","uid":"b92e85ae-31c7-496d-9cd8-ce86675e9c85","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.026407338+00:00 stderr F I1208 17:55:48.026349 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-admin, uid: 8c7cfbc1-7c22-4e74-b127-e86a3ad5d17d]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosqueriers.monitoring.rhobs, uid: b92e85ae-31c7-496d-9cd8-ce86675e9c85]" 2025-12-08T17:55:48.026496821+00:00 stderr F I1208 17:55:48.026471 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-admin, uid: 8c7cfbc1-7c22-4e74-b127-e86a3ad5d17d]" virtual=false 2025-12-08T17:55:48.034369217+00:00 stderr F I1208 17:55:48.033240 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-edit, uid: be0e64dc-e632-4f5b-9eae-5bc438bc3508]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosqueriers.monitoring.rhobs, uid: b92e85ae-31c7-496d-9cd8-ce86675e9c85]" 2025-12-08T17:55:48.034369217+00:00 stderr F I1208 17:55:48.033276 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-edit, uid: be0e64dc-e632-4f5b-9eae-5bc438bc3508]" virtual=false 2025-12-08T17:55:48.041760539+00:00 stderr F I1208 17:55:48.041670 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-crdview, uid: 1f5d4f8b-a8a7-4771-8485-ad293c80cd2b]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosqueriers.monitoring.rhobs, uid: b92e85ae-31c7-496d-9cd8-ce86675e9c85]" 2025-12-08T17:55:48.041760539+00:00 stderr F I1208 17:55:48.041728 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-crdview, uid: 1f5d4f8b-a8a7-4771-8485-ad293c80cd2b]" virtual=false 2025-12-08T17:55:48.041760539+00:00 stderr F I1208 17:55:48.041708 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-admin, uid: 8c7cfbc1-7c22-4e74-b127-e86a3ad5d17d]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosqueriers.monitoring.rhobs","uid":"b92e85ae-31c7-496d-9cd8-ce86675e9c85","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.047486026+00:00 stderr F I1208 17:55:48.047424 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-edit, uid: be0e64dc-e632-4f5b-9eae-5bc438bc3508]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosqueriers.monitoring.rhobs","uid":"b92e85ae-31c7-496d-9cd8-ce86675e9c85","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.047543408+00:00 stderr F I1208 17:55:48.047446 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-admin, uid: e2290bd3-91a1-462b-923b-f75910193398]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosrulers.monitoring.rhobs, uid: 76ea93db-01e3-4432-be54-86639deceb67]" 2025-12-08T17:55:48.047586389+00:00 stderr F I1208 17:55:48.047568 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-admin, uid: e2290bd3-91a1-462b-923b-f75910193398]" virtual=false 2025-12-08T17:55:48.050002265+00:00 stderr F I1208 17:55:48.049940 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosqueriers.monitoring.rhobs-v1alpha1-crdview, uid: 1f5d4f8b-a8a7-4771-8485-ad293c80cd2b]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosqueriers.monitoring.rhobs","uid":"b92e85ae-31c7-496d-9cd8-ce86675e9c85","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.055667511+00:00 stderr F I1208 17:55:48.055515 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-edit, uid: 9fe6cd0b-fe6a-45ee-8f48-44cc253e6ef4]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosrulers.monitoring.rhobs, uid: 76ea93db-01e3-4432-be54-86639deceb67]" 2025-12-08T17:55:48.055667511+00:00 stderr F I1208 17:55:48.055561 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-edit, uid: 9fe6cd0b-fe6a-45ee-8f48-44cc253e6ef4]" virtual=false 2025-12-08T17:55:48.061620774+00:00 stderr F I1208 17:55:48.061552 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-view, uid: 6f8dd432-a49b-46d7-9fab-5753e60fd2d8]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosrulers.monitoring.rhobs, uid: 76ea93db-01e3-4432-be54-86639deceb67]" 2025-12-08T17:55:48.061743097+00:00 stderr F I1208 17:55:48.061678 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-view, uid: 6f8dd432-a49b-46d7-9fab-5753e60fd2d8]" virtual=false 2025-12-08T17:55:48.076531374+00:00 stderr F I1208 17:55:48.076465 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-crdview, uid: dd50ed71-e998-482c-862d-0e57de9e2d17]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: thanosrulers.monitoring.rhobs, uid: 76ea93db-01e3-4432-be54-86639deceb67]" 2025-12-08T17:55:48.076624946+00:00 stderr F I1208 17:55:48.076584 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-crdview, uid: dd50ed71-e998-482c-862d-0e57de9e2d17]" virtual=false 2025-12-08T17:55:48.093856609+00:00 stderr F I1208 17:55:48.093766 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-admin, uid: d2fbdde7-a997-4bae-b298-7303d4215e70]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: uiplugins.observability.openshift.io, uid: fcd93eea-249e-4288-a596-2132c90b6fc5]" 2025-12-08T17:55:48.093923051+00:00 stderr F I1208 17:55:48.093836 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-admin, uid: d2fbdde7-a997-4bae-b298-7303d4215e70]" virtual=false 2025-12-08T17:55:48.105722994+00:00 stderr F I1208 17:55:48.105612 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-edit, uid: c51e93e9-1ec0-4cc8-b0c5-aef490dcc62f]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: uiplugins.observability.openshift.io, uid: fcd93eea-249e-4288-a596-2132c90b6fc5]" 2025-12-08T17:55:48.105722994+00:00 stderr F I1208 17:55:48.105664 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-edit, uid: c51e93e9-1ec0-4cc8-b0c5-aef490dcc62f]" virtual=false 2025-12-08T17:55:48.123479721+00:00 stderr F I1208 17:55:48.122479 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-edit, uid: 9fe6cd0b-fe6a-45ee-8f48-44cc253e6ef4]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosrulers.monitoring.rhobs","uid":"76ea93db-01e3-4432-be54-86639deceb67","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.123479721+00:00 stderr F I1208 17:55:48.122519 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-view, uid: 6f8dd432-a49b-46d7-9fab-5753e60fd2d8]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosrulers.monitoring.rhobs","uid":"76ea93db-01e3-4432-be54-86639deceb67","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.123479721+00:00 stderr F I1208 17:55:48.122557 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-admin, uid: d2fbdde7-a997-4bae-b298-7303d4215e70]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"uiplugins.observability.openshift.io","uid":"fcd93eea-249e-4288-a596-2132c90b6fc5","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.123685238+00:00 stderr F I1208 17:55:48.123624 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-view, uid: 97268596-4178-475b-896f-9f78d765b276]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: uiplugins.observability.openshift.io, uid: fcd93eea-249e-4288-a596-2132c90b6fc5]" 2025-12-08T17:55:48.123774670+00:00 stderr F I1208 17:55:48.123748 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-view, uid: 97268596-4178-475b-896f-9f78d765b276]" virtual=false 2025-12-08T17:55:48.126559327+00:00 stderr F I1208 17:55:48.126505 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-admin, uid: e2290bd3-91a1-462b-923b-f75910193398]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosrulers.monitoring.rhobs","uid":"76ea93db-01e3-4432-be54-86639deceb67","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.127054840+00:00 stderr F I1208 17:55:48.126998 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-edit, uid: c51e93e9-1ec0-4cc8-b0c5-aef490dcc62f]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"uiplugins.observability.openshift.io","uid":"fcd93eea-249e-4288-a596-2132c90b6fc5","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.134281828+00:00 stderr F I1208 17:55:48.129833 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-view, uid: 97268596-4178-475b-896f-9f78d765b276]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"uiplugins.observability.openshift.io","uid":"fcd93eea-249e-4288-a596-2132c90b6fc5","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.134281828+00:00 stderr F I1208 17:55:48.130008 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: thanosrulers.monitoring.rhobs-v1-crdview, uid: dd50ed71-e998-482c-862d-0e57de9e2d17]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"thanosrulers.monitoring.rhobs","uid":"76ea93db-01e3-4432-be54-86639deceb67","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:48.146782981+00:00 stderr F I1208 17:55:48.143938 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-crdview, uid: ca4ea4fe-3d76-41e8-a849-da85ca516534]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: uiplugins.observability.openshift.io, uid: fcd93eea-249e-4288-a596-2132c90b6fc5]" 2025-12-08T17:55:48.146782981+00:00 stderr F I1208 17:55:48.144007 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-crdview, uid: ca4ea4fe-3d76-41e8-a849-da85ca516534]" virtual=false 2025-12-08T17:55:48.167237462+00:00 stderr F I1208 17:55:48.167169 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: uiplugins.observability.openshift.io-v1alpha1-crdview, uid: ca4ea4fe-3d76-41e8-a849-da85ca516534]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"uiplugins.observability.openshift.io","uid":"fcd93eea-249e-4288-a596-2132c90b6fc5","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:55:49.716770111+00:00 stderr F I1208 17:55:49.716685 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:55:49.720584896+00:00 stderr F I1208 17:55:49.720531 1 replica_set.go:615] "Slow-start failure. Skipping creation of pods, decrementing expectations" logger="replicaset-controller" podsSkipped=1 kind="ReplicaSet" replicaSet="cert-manager/cert-manager-858d87f86b" 2025-12-08T17:55:49.720611147+00:00 stderr F E1208 17:55:49.720579 1 replica_set.go:562] "Unhandled Error" err="sync \"cert-manager/cert-manager-858d87f86b\" failed with pods \"cert-manager-858d87f86b-\" is forbidden: error looking up service account cert-manager/cert-manager: serviceaccount \"cert-manager\" not found" logger="UnhandledError" 2025-12-08T17:55:52.328075755+00:00 stderr F I1208 17:55:52.327996 1 resource_quota_controller.go:476] "syncing resource quota controller with updated resources from discovery" logger="resourcequota-controller" diff="added: [acme.cert-manager.io/v1, Resource=challenges acme.cert-manager.io/v1, Resource=orders cert-manager.io/v1, Resource=certificaterequests cert-manager.io/v1, Resource=certificates cert-manager.io/v1, Resource=issuers operator.openshift.io/v1alpha1, Resource=istiocsrs], removed: []" 2025-12-08T17:55:52.328210568+00:00 stderr F I1208 17:55:52.328152 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="orders.acme.cert-manager.io" 2025-12-08T17:55:52.328210568+00:00 stderr F I1208 17:55:52.328200 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="challenges.acme.cert-manager.io" 2025-12-08T17:55:52.328298211+00:00 stderr F I1208 17:55:52.328267 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificates.cert-manager.io" 2025-12-08T17:55:52.328349092+00:00 stderr F I1208 17:55:52.328315 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificaterequests.cert-manager.io" 2025-12-08T17:55:52.328349092+00:00 stderr F I1208 17:55:52.328338 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="issuers.cert-manager.io" 2025-12-08T17:55:52.328407924+00:00 stderr F I1208 17:55:52.328361 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="istiocsrs.operator.openshift.io" 2025-12-08T17:55:52.328923258+00:00 stderr F I1208 17:55:52.328536 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:55:52.330976574+00:00 stderr F I1208 17:55:52.330869 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.331280183+00:00 stderr F I1208 17:55:52.331221 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.349407310+00:00 stderr F I1208 17:55:52.349321 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.364919236+00:00 stderr F I1208 17:55:52.364841 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.392526283+00:00 stderr F I1208 17:55:52.392413 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.411487814+00:00 stderr F I1208 17:55:52.411415 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.429678063+00:00 stderr F I1208 17:55:52.429594 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:55:52.429678063+00:00 stderr F I1208 17:55:52.429631 1 resource_quota_controller.go:502] "synced quota controller" logger="resourcequota-controller" 2025-12-08T17:55:52.455188813+00:00 stderr F I1208 17:55:52.455032 1 garbagecollector.go:203] "syncing garbage collector with updated resources from discovery" logger="garbage-collector-controller" diff="added: [acme.cert-manager.io/v1, Resource=challenges acme.cert-manager.io/v1, Resource=orders cert-manager.io/v1, Resource=certificaterequests cert-manager.io/v1, Resource=certificates cert-manager.io/v1, Resource=clusterissuers cert-manager.io/v1, Resource=issuers operator.openshift.io/v1alpha1, Resource=certmanagers operator.openshift.io/v1alpha1, Resource=istiocsrs], removed: []" 2025-12-08T17:55:52.467923842+00:00 stderr F I1208 17:55:52.466936 1 shared_informer.go:350] "Waiting for caches to sync" controller="garbage collector" 2025-12-08T17:55:52.468930620+00:00 stderr F I1208 17:55:52.468859 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.493836084+00:00 stderr F I1208 17:55:52.493761 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:55:52.567366741+00:00 stderr F I1208 17:55:52.567302 1 shared_informer.go:357] "Caches are synced" controller="garbage collector" 2025-12-08T17:55:52.567366741+00:00 stderr F I1208 17:55:52.567325 1 garbagecollector.go:235] "synced garbage collector" logger="garbage-collector-controller" 2025-12-08T17:55:59.964133737+00:00 stderr F I1208 17:55:59.963954 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="cert-manager/cert-manager-858d87f86b" need=1 creating=1 2025-12-08T17:56:27.924931676+00:00 stderr F I1208 17:56:27.924819 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="0s" 2025-12-08T17:56:27.925118391+00:00 stderr F I1208 17:56:27.925060 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m59.99999551s" 2025-12-08T17:56:27.940107883+00:00 stderr F I1208 17:56:27.940032 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:27.946860218+00:00 stderr F I1208 17:56:27.946184 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:27.947996079+00:00 stderr F I1208 17:56:27.947872 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:27.960213455+00:00 stderr F I1208 17:56:27.960027 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:28.497729391+00:00 stderr F I1208 17:56:28.495627 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:28.713253522+00:00 stderr F I1208 17:56:28.713181 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="0s" 2025-12-08T17:56:28.713350705+00:00 stderr F I1208 17:56:28.713312 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m59.99999523s" 2025-12-08T17:56:28.723201391+00:00 stderr F I1208 17:56:28.723121 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:28.726007444+00:00 stderr F I1208 17:56:28.725700 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:28.732075662+00:00 stderr F I1208 17:56:28.731993 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:28.738597943+00:00 stderr F I1208 17:56:28.738010 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:28.948193090+00:00 stderr F I1208 17:56:28.940917 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m58.059099261s" 2025-12-08T17:56:29.212726429+00:00 stderr F I1208 17:56:29.212634 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:29.293804594+00:00 stderr F I1208 17:56:29.293632 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:29.507561879+00:00 stderr F I1208 17:56:29.507512 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="0s" 2025-12-08T17:56:29.507661881+00:00 stderr F I1208 17:56:29.507635 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" nextSyncIn="9m59.99999595s" 2025-12-08T17:56:29.520830795+00:00 stderr F I1208 17:56:29.520765 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:29.523488684+00:00 stderr F I1208 17:56:29.523351 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:29.527003896+00:00 stderr F I1208 17:56:29.526969 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:29.531726589+00:00 stderr F I1208 17:56:29.531676 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:29.726392086+00:00 stderr F I1208 17:56:29.723967 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m58.276045608s" 2025-12-08T17:56:29.782864639+00:00 stderr F I1208 17:56:29.782790 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:30.178526349+00:00 stderr F I1208 17:56:30.178068 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:30.213350417+00:00 stderr F I1208 17:56:30.213271 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m56.786747525s" 2025-12-08T17:56:30.521729490+00:00 stderr F I1208 17:56:30.521651 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" nextSyncIn="9m58.478361172s" 2025-12-08T17:56:30.709408795+00:00 stderr F I1208 17:56:30.709329 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:30.783176600+00:00 stderr F I1208 17:56:30.783110 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m57.216901102s" 2025-12-08T17:56:31.228617098+00:00 stderr F I1208 17:56:31.228513 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:31.248508777+00:00 stderr F I1208 17:56:31.248413 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:31.270430528+00:00 stderr F I1208 17:56:31.270329 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:31.710531327+00:00 stderr F I1208 17:56:31.710448 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" nextSyncIn="9m57.289571736s" 2025-12-08T17:56:32.229655946+00:00 stderr F I1208 17:56:32.229570 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m55.770455786s" 2025-12-08T17:56:32.276162179+00:00 stderr F I1208 17:56:32.271556 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m54.728460741s" 2025-12-08T17:56:33.271226173+00:00 stderr F I1208 17:56:33.271058 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:33.293352230+00:00 stderr F I1208 17:56:33.293295 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:33.318187117+00:00 stderr F I1208 17:56:33.318114 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:34.272325713+00:00 stderr F I1208 17:56:34.272247 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m53.727763379s" 2025-12-08T17:56:34.294419859+00:00 stderr F I1208 17:56:34.294243 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m52.705778546s" 2025-12-08T17:56:34.295701752+00:00 stderr F I1208 17:56:34.295623 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:34.316445224+00:00 stderr F I1208 17:56:34.315553 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:34.319407611+00:00 stderr F I1208 17:56:34.319370 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" nextSyncIn="9m54.68064025s" 2025-12-08T17:56:34.333289453+00:00 stderr F I1208 17:56:34.332857 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:35.316996170+00:00 stderr F I1208 17:56:35.316845 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m52.683165534s" 2025-12-08T17:56:35.335745359+00:00 stderr F I1208 17:56:35.334405 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m51.665621247s" 2025-12-08T17:56:35.536646518+00:00 stderr F I1208 17:56:35.536531 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:35.623345510+00:00 stderr F I1208 17:56:35.623288 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:35.669976487+00:00 stderr F I1208 17:56:35.669398 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:36.537043611+00:00 stderr F I1208 17:56:36.536923 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" nextSyncIn="9m52.463094223s" 2025-12-08T17:56:36.545113171+00:00 stderr F I1208 17:56:36.545065 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:36.560887503+00:00 stderr F I1208 17:56:36.560791 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf" delay="1s" 2025-12-08T17:56:36.624375249+00:00 stderr F I1208 17:56:36.624290 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" nextSyncIn="9m50.375733324s" 2025-12-08T17:56:36.629126812+00:00 stderr F I1208 17:56:36.629062 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:36.637851250+00:00 stderr F I1208 17:56:36.637763 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113bee73" delay="1s" 2025-12-08T17:56:36.669919937+00:00 stderr F I1208 17:56:36.669838 1 job_controller.go:945] "Job has activeDeadlineSeconds configuration. Will sync this job again" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" nextSyncIn="9m51.330176576s" 2025-12-08T17:56:36.677014321+00:00 stderr F I1208 17:56:36.676948 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:36.691412678+00:00 stderr F I1208 17:56:36.690768 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682cd199" delay="1s" 2025-12-08T17:56:39.573258981+00:00 stderr F I1208 17:56:39.573165 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/interconnect-operator-78b9bd8798" need=1 creating=1 2025-12-08T17:56:39.586860116+00:00 stderr F I1208 17:56:39.585931 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/interconnect-operator" err="Operation cannot be fulfilled on deployments.apps \"interconnect-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:56:39.680516249+00:00 stderr F I1208 17:56:39.680432 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/interconnect-operator" err="Operation cannot be fulfilled on deployments.apps \"interconnect-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:56:41.023641380+00:00 stderr F I1208 17:56:41.023568 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/service-telemetry-operator-79647f8775" need=1 creating=1 2025-12-08T17:56:41.041758943+00:00 stderr F I1208 17:56:41.041670 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/service-telemetry-operator" err="Operation cannot be fulfilled on deployments.apps \"service-telemetry-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:56:42.301473278+00:00 stderr F I1208 17:56:42.300679 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/smart-gateway-operator-5cd794ff55" need=1 creating=1 2025-12-08T17:56:42.324479538+00:00 stderr F I1208 17:56:42.324301 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/smart-gateway-operator" err="Operation cannot be fulfilled on deployments.apps \"smart-gateway-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:56:52.449797537+00:00 stderr F I1208 17:56:52.449736 1 resource_quota_controller.go:476] "syncing resource quota controller with updated resources from discovery" logger="resourcequota-controller" diff="added: [infra.watch/v1beta1, Resource=servicetelemetrys interconnectedcloud.github.io/v1alpha1, Resource=interconnects smartgateway.infra.watch/v2, Resource=smartgateways], removed: []" 2025-12-08T17:56:52.449967991+00:00 stderr F I1208 17:56:52.449937 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="interconnects.interconnectedcloud.github.io" 2025-12-08T17:56:52.450006062+00:00 stderr F I1208 17:56:52.449992 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="servicetelemetrys.infra.watch" 2025-12-08T17:56:52.450063684+00:00 stderr F I1208 17:56:52.450040 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="smartgateways.smartgateway.infra.watch" 2025-12-08T17:56:52.450189517+00:00 stderr F I1208 17:56:52.450161 1 shared_informer.go:350] "Waiting for caches to sync" controller="resource quota" 2025-12-08T17:56:52.451857371+00:00 stderr F I1208 17:56:52.451639 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:56:52.451857371+00:00 stderr F I1208 17:56:52.451699 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:56:52.452680562+00:00 stderr F I1208 17:56:52.452656 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="k8s.io/client-go/metadata/metadatainformer/informer.go:138" 2025-12-08T17:56:52.551255163+00:00 stderr F I1208 17:56:52.551176 1 shared_informer.go:357] "Caches are synced" controller="resource quota" 2025-12-08T17:56:52.551255163+00:00 stderr F I1208 17:56:52.551220 1 resource_quota_controller.go:502] "synced quota controller" logger="resourcequota-controller" 2025-12-08T17:56:52.597501930+00:00 stderr F I1208 17:56:52.597426 1 garbagecollector.go:203] "syncing garbage collector with updated resources from discovery" logger="garbage-collector-controller" diff="added: [infra.watch/v1beta1, Resource=servicetelemetrys interconnectedcloud.github.io/v1alpha1, Resource=interconnects smartgateway.infra.watch/v2, Resource=smartgateways], removed: []" 2025-12-08T17:56:52.618081946+00:00 stderr F I1208 17:56:52.618022 1 shared_informer.go:350] "Waiting for caches to sync" controller="garbage collector" 2025-12-08T17:56:52.618168968+00:00 stderr F I1208 17:56:52.618147 1 shared_informer.go:357] "Caches are synced" controller="garbage collector" 2025-12-08T17:56:52.618168968+00:00 stderr F I1208 17:56:52.618158 1 garbagecollector.go:235] "synced garbage collector" logger="garbage-collector-controller" 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.961585 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-admin, uid: 3a19ace4-c74b-4883-9115-e32b1010b096]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicetelemetrys.infra.watch, uid: 5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8]" 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.962429 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-admin, uid: c6d0c0e9-461f-4344-8a37-eebd56d60f0c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: interconnects.interconnectedcloud.github.io, uid: a76186de-2693-47e1-81be-94ec4872fb1f]" 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.962468 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-admin, uid: 3a19ace4-c74b-4883-9115-e32b1010b096]" virtual=false 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.962824 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-admin, uid: c6d0c0e9-461f-4344-8a37-eebd56d60f0c]" virtual=false 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.963711 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-edit, uid: 686850fb-a013-4dcf-994b-bde77b7bde05]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: interconnects.interconnectedcloud.github.io, uid: a76186de-2693-47e1-81be-94ec4872fb1f]" 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.963770 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-edit, uid: 686850fb-a013-4dcf-994b-bde77b7bde05]" virtual=false 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.964154 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-edit, uid: f38a1ed4-3610-415c-9ce7-c2b7f3b1f28d]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicetelemetrys.infra.watch, uid: 5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8]" 2025-12-08T17:57:06.965030042+00:00 stderr F I1208 17:57:06.964189 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-edit, uid: f38a1ed4-3610-415c-9ce7-c2b7f3b1f28d]" virtual=false 2025-12-08T17:57:06.966750278+00:00 stderr F I1208 17:57:06.966540 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-view, uid: 7821ab7d-2564-4924-ad71-db7e02b97ffc]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: interconnects.interconnectedcloud.github.io, uid: a76186de-2693-47e1-81be-94ec4872fb1f]" 2025-12-08T17:57:06.966750278+00:00 stderr F I1208 17:57:06.966613 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-view, uid: 7821ab7d-2564-4924-ad71-db7e02b97ffc]" virtual=false 2025-12-08T17:57:06.971903542+00:00 stderr F I1208 17:57:06.971793 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-edit, uid: 686850fb-a013-4dcf-994b-bde77b7bde05]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"interconnects.interconnectedcloud.github.io","uid":"a76186de-2693-47e1-81be-94ec4872fb1f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:06.971944883+00:00 stderr F I1208 17:57:06.971919 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-admin, uid: 3a19ace4-c74b-4883-9115-e32b1010b096]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicetelemetrys.infra.watch","uid":"5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:06.972241021+00:00 stderr F I1208 17:57:06.972170 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-edit, uid: f38a1ed4-3610-415c-9ce7-c2b7f3b1f28d]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicetelemetrys.infra.watch","uid":"5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:06.975375543+00:00 stderr F I1208 17:57:06.974845 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-admin, uid: c6d0c0e9-461f-4344-8a37-eebd56d60f0c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"interconnects.interconnectedcloud.github.io","uid":"a76186de-2693-47e1-81be-94ec4872fb1f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:06.977263952+00:00 stderr F I1208 17:57:06.977205 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-view, uid: 7821ab7d-2564-4924-ad71-db7e02b97ffc]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"interconnects.interconnectedcloud.github.io","uid":"a76186de-2693-47e1-81be-94ec4872fb1f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:06.982934300+00:00 stderr F I1208 17:57:06.979394 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-crdview, uid: 46e24f1f-79b0-41f3-8c4c-b2444160084d]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: interconnects.interconnectedcloud.github.io, uid: a76186de-2693-47e1-81be-94ec4872fb1f]" 2025-12-08T17:57:06.982934300+00:00 stderr F I1208 17:57:06.979499 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-crdview, uid: 46e24f1f-79b0-41f3-8c4c-b2444160084d]" virtual=false 2025-12-08T17:57:06.982934300+00:00 stderr F I1208 17:57:06.980568 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-view, uid: 9d76e298-2307-45db-9ad4-d5ee47a27c7c]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicetelemetrys.infra.watch, uid: 5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8]" 2025-12-08T17:57:06.982934300+00:00 stderr F I1208 17:57:06.980631 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-view, uid: 9d76e298-2307-45db-9ad4-d5ee47a27c7c]" virtual=false 2025-12-08T17:57:06.983257588+00:00 stderr F I1208 17:57:06.983194 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-crdview, uid: 76c31987-2f34-4909-8e4f-4bffb0ce7f50]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: servicetelemetrys.infra.watch, uid: 5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8]" 2025-12-08T17:57:06.983356731+00:00 stderr F I1208 17:57:06.983263 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-crdview, uid: 76c31987-2f34-4909-8e4f-4bffb0ce7f50]" virtual=false 2025-12-08T17:57:07.004302687+00:00 stderr F I1208 17:57:07.003656 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: interconnects.interconnectedcloud.github.io-v1alpha1-crdview, uid: 46e24f1f-79b0-41f3-8c4c-b2444160084d]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"interconnects.interconnectedcloud.github.io","uid":"a76186de-2693-47e1-81be-94ec4872fb1f","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:07.004302687+00:00 stderr F I1208 17:57:07.003824 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-view, uid: 9d76e298-2307-45db-9ad4-d5ee47a27c7c]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicetelemetrys.infra.watch","uid":"5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:07.005258392+00:00 stderr F I1208 17:57:07.005198 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: servicetelemetrys.infra.watch-v1beta1-crdview, uid: 76c31987-2f34-4909-8e4f-4bffb0ce7f50]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"servicetelemetrys.infra.watch","uid":"5f3f70c7-59e5-4f4d-b56d-3bf33d270fd8","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:07.774634689+00:00 stderr F I1208 17:57:07.774539 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-admin, uid: 7ab4ef7c-e8c9-4174-b5e8-f5f317d8dbfc]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: smartgateways.smartgateway.infra.watch, uid: af6db45c-3bcb-43bd-99d5-a42a7214b357]" 2025-12-08T17:57:07.774692860+00:00 stderr F I1208 17:57:07.774616 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-admin, uid: 7ab4ef7c-e8c9-4174-b5e8-f5f317d8dbfc]" virtual=false 2025-12-08T17:57:07.778393727+00:00 stderr F I1208 17:57:07.778328 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-edit, uid: f3135527-7b78-47fe-8629-0a8dc4365d70]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: smartgateways.smartgateway.infra.watch, uid: af6db45c-3bcb-43bd-99d5-a42a7214b357]" 2025-12-08T17:57:07.778426148+00:00 stderr F I1208 17:57:07.778383 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-edit, uid: f3135527-7b78-47fe-8629-0a8dc4365d70]" virtual=false 2025-12-08T17:57:07.782619027+00:00 stderr F I1208 17:57:07.782550 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-view, uid: 5f6d1d19-c8a3-4367-9e95-960dbb5e50cb]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: smartgateways.smartgateway.infra.watch, uid: af6db45c-3bcb-43bd-99d5-a42a7214b357]" 2025-12-08T17:57:07.782619027+00:00 stderr F I1208 17:57:07.782567 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-admin, uid: 7ab4ef7c-e8c9-4174-b5e8-f5f317d8dbfc]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"smartgateways.smartgateway.infra.watch","uid":"af6db45c-3bcb-43bd-99d5-a42a7214b357","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:07.782658688+00:00 stderr F I1208 17:57:07.782606 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-view, uid: 5f6d1d19-c8a3-4367-9e95-960dbb5e50cb]" virtual=false 2025-12-08T17:57:07.788656974+00:00 stderr F I1208 17:57:07.788574 1 graph_builder.go:456] "item references an owner with coordinates that do not match the observed identity" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-crdview, uid: 14c24e1a-ef23-4971-8951-fc58e864d269]" owner="[apiextensions.k8s.io/v1/CustomResourceDefinition, namespace: , name: smartgateways.smartgateway.infra.watch, uid: af6db45c-3bcb-43bd-99d5-a42a7214b357]" 2025-12-08T17:57:07.788691965+00:00 stderr F I1208 17:57:07.788637 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-crdview, uid: 14c24e1a-ef23-4971-8951-fc58e864d269]" virtual=false 2025-12-08T17:57:07.788806168+00:00 stderr F I1208 17:57:07.788735 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-edit, uid: f3135527-7b78-47fe-8629-0a8dc4365d70]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"smartgateways.smartgateway.infra.watch","uid":"af6db45c-3bcb-43bd-99d5-a42a7214b357","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:07.789031914+00:00 stderr F I1208 17:57:07.788975 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-view, uid: 5f6d1d19-c8a3-4367-9e95-960dbb5e50cb]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"smartgateways.smartgateway.infra.watch","uid":"af6db45c-3bcb-43bd-99d5-a42a7214b357","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:07.801818048+00:00 stderr F I1208 17:57:07.800824 1 garbagecollector.go:567] "item has at least one existing owner, will not garbage collect" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRole, namespace: , name: smartgateways.smartgateway.infra.watch-v2-crdview, uid: 14c24e1a-ef23-4971-8951-fc58e864d269]" owner=[{"apiVersion":"apiextensions.k8s.io/v1","kind":"customresourcedefinition","name":"smartgateways.smartgateway.infra.watch","uid":"af6db45c-3bcb-43bd-99d5-a42a7214b357","controller":false,"blockOwnerDeletion":false}] 2025-12-08T17:57:28.527462464+00:00 stderr F I1208 17:57:28.526836 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-interconnect-55bf8d5cb" need=1 creating=1 2025-12-08T17:57:28.543106322+00:00 stderr F I1208 17:57:28.540659 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-interconnect" err="Operation cannot be fulfilled on deployments.apps \"default-interconnect\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:37.564459445+00:00 stderr F E1208 17:57:37.563493 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/prometheus-default-0 failed with: error processing PVC \"service-telemetry\"/\"prometheus-default-db-prometheus-default-0\": PVC service-telemetry/prometheus-default-db-prometheus-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:37.571629561+00:00 stderr F E1208 17:57:37.571463 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/prometheus-default-0 failed with: error processing PVC \"service-telemetry\"/\"prometheus-default-db-prometheus-default-0\": PVC service-telemetry/prometheus-default-db-prometheus-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:37.583401565+00:00 stderr F E1208 17:57:37.583344 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/prometheus-default-0 failed with: error processing PVC \"service-telemetry\"/\"prometheus-default-db-prometheus-default-0\": PVC service-telemetry/prometheus-default-db-prometheus-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:37.587862440+00:00 stderr F E1208 17:57:37.587775 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/prometheus-default-0 failed with: error processing PVC \"service-telemetry\"/\"prometheus-default-db-prometheus-default-0\": PVC service-telemetry/prometheus-default-db-prometheus-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:37.591948475+00:00 stderr F I1208 17:57:37.591848 1 pv_controller.go:935] "Volume entered phase" logger="persistentvolume-binder-controller" volumeName="pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" phase="Bound" 2025-12-08T17:57:37.592034138+00:00 stderr F I1208 17:57:37.592011 1 pv_controller.go:1071] "Volume bound to claim" logger="persistentvolume-binder-controller" PVC="service-telemetry/prometheus-default-db-prometheus-default-0" volumeName="pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" 2025-12-08T17:57:37.596632847+00:00 stderr F E1208 17:57:37.596585 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/prometheus-default-0 failed with: error processing PVC \"service-telemetry\"/\"prometheus-default-db-prometheus-default-0\": PVC service-telemetry/prometheus-default-db-prometheus-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6\")" logger="UnhandledError" 2025-12-08T17:57:37.602735684+00:00 stderr F I1208 17:57:37.602231 1 pv_controller.go:876] "Claim entered phase" logger="persistentvolume-binder-controller" PVC="service-telemetry/prometheus-default-db-prometheus-default-0" phase="Bound" 2025-12-08T17:57:46.469044580+00:00 stderr F E1208 17:57:46.468986 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.479274464+00:00 stderr F E1208 17:57:46.479204 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.488940364+00:00 stderr F E1208 17:57:46.488888 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.499872076+00:00 stderr F E1208 17:57:46.499824 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.548817191+00:00 stderr F E1208 17:57:46.548760 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.641805875+00:00 stderr F E1208 17:57:46.641664 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.653298122+00:00 stderr F E1208 17:57:46.653224 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:46.811860849+00:00 stderr F E1208 17:57:46.811765 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:47.464562629+00:00 stderr F E1208 17:57:47.464483 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:48.422424946+00:00 stderr F I1208 17:57:48.422359 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-snmp-webhook-6774d8dfbc" need=1 creating=1 2025-12-08T17:57:48.437594748+00:00 stderr F I1208 17:57:48.437510 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-snmp-webhook" err="Operation cannot be fulfilled on deployments.apps \"default-snmp-webhook\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:57:48.757372943+00:00 stderr F E1208 17:57:48.757288 1 stateful_set.go:438] "Unhandled Error" err="error syncing StatefulSet service-telemetry/alertmanager-default, requeuing: pods \"alertmanager-default-0\" is forbidden: error looking up service account service-telemetry/alertmanager-stf: serviceaccount \"alertmanager-stf\" not found" logger="UnhandledError" 2025-12-08T17:57:51.335888056+00:00 stderr F E1208 17:57:51.335792 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/alertmanager-default-0 failed with: error processing PVC \"service-telemetry\"/\"alertmanager-default-db-alertmanager-default-0\": PVC service-telemetry/alertmanager-default-db-alertmanager-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:51.342570809+00:00 stderr F E1208 17:57:51.342508 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/alertmanager-default-0 failed with: error processing PVC \"service-telemetry\"/\"alertmanager-default-db-alertmanager-default-0\": PVC service-telemetry/alertmanager-default-db-alertmanager-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:51.354599690+00:00 stderr F E1208 17:57:51.354538 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/alertmanager-default-0 failed with: error processing PVC \"service-telemetry\"/\"alertmanager-default-db-alertmanager-default-0\": PVC service-telemetry/alertmanager-default-db-alertmanager-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:51.361659322+00:00 stderr F E1208 17:57:51.361440 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/alertmanager-default-0 failed with: error processing PVC \"service-telemetry\"/\"alertmanager-default-db-alertmanager-default-0\": PVC service-telemetry/alertmanager-default-db-alertmanager-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"\")" logger="UnhandledError" 2025-12-08T17:57:51.365266026+00:00 stderr F I1208 17:57:51.365219 1 pv_controller.go:935] "Volume entered phase" logger="persistentvolume-binder-controller" volumeName="pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" phase="Bound" 2025-12-08T17:57:51.365266026+00:00 stderr F I1208 17:57:51.365253 1 pv_controller.go:1071] "Volume bound to claim" logger="persistentvolume-binder-controller" PVC="service-telemetry/alertmanager-default-db-alertmanager-default-0" volumeName="pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" 2025-12-08T17:57:51.369316910+00:00 stderr F E1208 17:57:51.369269 1 selinux_warning_controller.go:383] "Unhandled Error" err="service-telemetry/alertmanager-default-0 failed with: error processing PVC \"service-telemetry\"/\"alertmanager-default-db-alertmanager-default-0\": PVC service-telemetry/alertmanager-default-db-alertmanager-default-0 has non-bound phase (\"Pending\") or empty pvc.Spec.VolumeName (\"pvc-1b15df9e-01ca-4097-a731-1c1b05c63480\")" logger="UnhandledError" 2025-12-08T17:57:51.372789300+00:00 stderr F I1208 17:57:51.372748 1 pv_controller.go:876] "Claim entered phase" logger="persistentvolume-binder-controller" PVC="service-telemetry/alertmanager-default-db-alertmanager-default-0" phase="Bound" 2025-12-08T17:58:06.104194823+00:00 stderr F I1208 17:58:06.104118 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794" need=1 creating=1 2025-12-08T17:58:06.115099075+00:00 stderr F I1208 17:58:06.114250 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-cloud1-coll-meter-smartgateway" err="Operation cannot be fulfilled on deployments.apps \"default-cloud1-coll-meter-smartgateway\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:58:08.889916612+00:00 stderr F I1208 17:58:08.886641 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f" need=1 creating=1 2025-12-08T17:58:08.901647346+00:00 stderr F I1208 17:58:08.900737 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-cloud1-ceil-meter-smartgateway" err="Operation cannot be fulfilled on deployments.apps \"default-cloud1-ceil-meter-smartgateway\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:58:12.981851591+00:00 stderr F I1208 17:58:12.981768 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc" need=1 creating=1 2025-12-08T17:58:12.993711407+00:00 stderr F I1208 17:58:12.993344 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-cloud1-sens-meter-smartgateway" err="Operation cannot be fulfilled on deployments.apps \"default-cloud1-sens-meter-smartgateway\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:58:20.082751368+00:00 stderr F I1208 17:58:20.082671 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648" need=1 creating=1 2025-12-08T17:58:20.095014874+00:00 stderr F I1208 17:58:20.094758 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-cloud1-coll-event-smartgateway" err="Operation cannot be fulfilled on deployments.apps \"default-cloud1-coll-event-smartgateway\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:58:20.107425606+00:00 stderr F I1208 17:58:20.107362 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-cloud1-coll-event-smartgateway" err="Operation cannot be fulfilled on deployments.apps \"default-cloud1-coll-event-smartgateway\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:58:21.402189349+00:00 stderr F I1208 17:58:21.402124 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8" need=1 creating=1 2025-12-08T17:58:21.410491654+00:00 stderr F I1208 17:58:21.410440 1 deployment_controller.go:512] "Error syncing deployment" logger="deployment-controller" deployment="service-telemetry/default-cloud1-ceil-event-smartgateway" err="Operation cannot be fulfilled on deployments.apps \"default-cloud1-ceil-event-smartgateway\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:58:34.371746506+00:00 stderr F I1208 17:58:34.371671 1 replica_set.go:590] "Too few replicas" logger="replicaset-controller" replicaSet="service-telemetry/default-interconnect-55bf8d5cb" need=1 creating=1 2025-12-08T17:59:14.132633191+00:00 stderr F I1208 17:59:14.132172 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="0s" 2025-12-08T17:59:14.145719236+00:00 stderr F I1208 17:59:14.145638 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:14.154664592+00:00 stderr F I1208 17:59:14.154502 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:14.166718819+00:00 stderr F I1208 17:59:14.166617 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:14.172106511+00:00 stderr F I1208 17:59:14.172036 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:14.212774173+00:00 stderr F I1208 17:59:14.212002 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:14.757329615+00:00 stderr F I1208 17:59:14.757266 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:23.451542766+00:00 stderr F I1208 17:59:23.451460 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:30.977955729+00:00 stderr F I1208 17:59:30.977848 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:31.986742226+00:00 stderr F I1208 17:59:31.986661 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T17:59:59.215031068+00:00 stderr F I1208 17:59:59.214955 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T18:00:00.147100879+00:00 stderr F I1208 18:00:00.147021 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="0s" 2025-12-08T18:00:00.157429730+00:00 stderr F I1208 18:00:00.157336 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:00.166286383+00:00 stderr F I1208 18:00:00.166219 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:00.169265271+00:00 stderr F I1208 18:00:00.169212 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:00.182463399+00:00 stderr F I1208 18:00:00.182418 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:00.221169527+00:00 stderr F I1208 18:00:00.221101 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T18:00:00.224561196+00:00 stderr F I1208 18:00:00.224514 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:00.840926705+00:00 stderr F I1208 18:00:00.839927 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:01.225546619+00:00 stderr F I1208 18:00:01.225442 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:02.247270549+00:00 stderr F I1208 18:00:02.240653 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T18:00:02.573748395+00:00 stderr F I1208 18:00:02.573193 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:03.538807933+00:00 stderr F I1208 18:00:03.538752 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T18:00:03.579530715+00:00 stderr F I1208 18:00:03.579489 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:03.586454116+00:00 stderr F I1208 18:00:03.586408 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29420280" delay="1s" 2025-12-08T18:00:03.594955760+00:00 stderr F I1208 18:00:03.594863 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="openshift-operator-lifecycle-manager/collect-profiles-29369370" delay="0s" 2025-12-08T18:00:04.546599776+00:00 stderr F I1208 18:00:04.546495 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T18:00:04.557328069+00:00 stderr F I1208 18:00:04.557275 1 job_controller.go:604] "enqueueing job" logger="job-controller" key="service-telemetry/stf-smoketest-smoke1" delay="1s" 2025-12-08T18:04:56.904031026+00:00 stderr F I1208 18:04:56.902356 1 garbagecollector.go:501] "Processing item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRoleBinding, namespace: , name: must-gather-vk99s, uid: 52812d6d-6e7d-46e7-b797-32f1ec6c1b26]" virtual=false 2025-12-08T18:04:56.917587116+00:00 stderr F I1208 18:04:56.917498 1 garbagecollector.go:640] "Deleting item" logger="garbage-collector-controller" item="[rbac.authorization.k8s.io/v1/ClusterRoleBinding, namespace: , name: must-gather-vk99s, uid: 52812d6d-6e7d-46e7-b797-32f1ec6c1b26]" propagationPolicy="Background" ././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000644000175000017500000023054015115611514033062 0ustar zuulzuul2025-12-08T17:46:05.772918964+00:00 stderr F + timeout 3m /bin/bash -exuo pipefail -c 'while [ -n "$(ss -Htanop \( sport = 10257 \))" ]; do sleep 1; done' 2025-12-08T17:46:05.777651697+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:05.813347769+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,39sec,0)' ']' 2025-12-08T17:46:05.813347769+00:00 stderr F + sleep 1 2025-12-08T17:46:06.816984963+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:06.830028524+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,38sec,0)' ']' 2025-12-08T17:46:06.830028524+00:00 stderr F + sleep 1 2025-12-08T17:46:07.835963469+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:07.860456563+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,37sec,0)' ']' 2025-12-08T17:46:07.860456563+00:00 stderr F + sleep 1 2025-12-08T17:46:08.864947334+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:08.875797600+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,36sec,0)' ']' 2025-12-08T17:46:08.875797600+00:00 stderr F + sleep 1 2025-12-08T17:46:09.880856186+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:09.906651252+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,35sec,0)' ']' 2025-12-08T17:46:09.906651252+00:00 stderr F + sleep 1 2025-12-08T17:46:10.911931176+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:10.928203944+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,34sec,0)' ']' 2025-12-08T17:46:10.928203944+00:00 stderr F + sleep 1 2025-12-08T17:46:11.932422907+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:11.942421206+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,33sec,0)' ']' 2025-12-08T17:46:11.942421206+00:00 stderr F + sleep 1 2025-12-08T17:46:12.946532466+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:12.975229307+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,32sec,0)' ']' 2025-12-08T17:46:12.975229307+00:00 stderr F + sleep 1 2025-12-08T17:46:13.979985876+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:13.993316335+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,31sec,0)' ']' 2025-12-08T17:46:13.993316335+00:00 stderr F + sleep 1 2025-12-08T17:46:14.997809286+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:15.009382003+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,30sec,0)' ']' 2025-12-08T17:46:15.009382003+00:00 stderr F + sleep 1 2025-12-08T17:46:16.012638976+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:16.021794971+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,29sec,0)' ']' 2025-12-08T17:46:16.021945586+00:00 stderr F + sleep 1 2025-12-08T17:46:17.024735335+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:17.044552040+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,28sec,0)' ']' 2025-12-08T17:46:17.044552040+00:00 stderr F + sleep 1 2025-12-08T17:46:18.048410040+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:18.058032359+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,27sec,0)' ']' 2025-12-08T17:46:18.058032359+00:00 stderr F + sleep 1 2025-12-08T17:46:19.062374625+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:19.082964643+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,26sec,0)' ']' 2025-12-08T17:46:19.082964643+00:00 stderr F + sleep 1 2025-12-08T17:46:20.087550206+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:20.098780183+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,25sec,0)' ']' 2025-12-08T17:46:20.098870026+00:00 stderr F + sleep 1 2025-12-08T17:46:21.102288474+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:21.113425078+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,24sec,0)' ']' 2025-12-08T17:46:21.113425078+00:00 stderr F + sleep 1 2025-12-08T17:46:22.119167666+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:22.141022162+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,23sec,0)' ']' 2025-12-08T17:46:22.141022162+00:00 stderr F + sleep 1 2025-12-08T17:46:23.143991627+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:23.153802431+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,22sec,0)' ']' 2025-12-08T17:46:23.153901345+00:00 stderr F + sleep 1 2025-12-08T17:46:24.157172559+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:24.168492929+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,21sec,0)' ']' 2025-12-08T17:46:24.168577391+00:00 stderr F + sleep 1 2025-12-08T17:46:25.174365170+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:25.194066822+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,20sec,0)' ']' 2025-12-08T17:46:25.194066822+00:00 stderr F + sleep 1 2025-12-08T17:46:26.198389267+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:26.210220463+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,19sec,0)' ']' 2025-12-08T17:46:26.210220463+00:00 stderr F + sleep 1 2025-12-08T17:46:27.214408974+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:27.226369953+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,18sec,0)' ']' 2025-12-08T17:46:27.226456305+00:00 stderr F + sleep 1 2025-12-08T17:46:28.229855733+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:28.249969707+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,17sec,0)' ']' 2025-12-08T17:46:28.250090021+00:00 stderr F + sleep 1 2025-12-08T17:46:29.254973692+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:29.265522489+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,16sec,0)' ']' 2025-12-08T17:46:29.265602662+00:00 stderr F + sleep 1 2025-12-08T17:46:30.270232207+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:30.288348150+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,15sec,0)' ']' 2025-12-08T17:46:30.288419732+00:00 stderr F + sleep 1 2025-12-08T17:46:31.291962675+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:31.317338226+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,14sec,0)' ']' 2025-12-08T17:46:31.317461980+00:00 stderr F + sleep 1 2025-12-08T17:46:32.321232869+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:32.330705472+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,13sec,0)' ']' 2025-12-08T17:46:32.330780045+00:00 stderr F + sleep 1 2025-12-08T17:46:33.333850202+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:33.345504593+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,12sec,0)' ']' 2025-12-08T17:46:33.345592776+00:00 stderr F + sleep 1 2025-12-08T17:46:34.349841939+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:34.372216730+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,11sec,0)' ']' 2025-12-08T17:46:34.372304463+00:00 stderr F + sleep 1 2025-12-08T17:46:35.375949787+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:35.395966038+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,10sec,0)' ']' 2025-12-08T17:46:35.396096883+00:00 stderr F + sleep 1 2025-12-08T17:46:36.401089968+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:36.420964304+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,9.008ms,0)' ']' 2025-12-08T17:46:36.421011086+00:00 stderr F + sleep 1 2025-12-08T17:46:37.425814756+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:37.442108415+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,7.987ms,0)' ']' 2025-12-08T17:46:37.442231289+00:00 stderr F + sleep 1 2025-12-08T17:46:38.446318217+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:38.457936116+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,6.971ms,0)' ']' 2025-12-08T17:46:38.458028769+00:00 stderr F + sleep 1 2025-12-08T17:46:39.461370145+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:39.472705865+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,5.956ms,0)' ']' 2025-12-08T17:46:39.472776157+00:00 stderr F + sleep 1 2025-12-08T17:46:40.476136444+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:40.497448994+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,4.932ms,0)' ']' 2025-12-08T17:46:40.497517496+00:00 stderr F + sleep 1 2025-12-08T17:46:41.502962594+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:41.517207152+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,3.912ms,0)' ']' 2025-12-08T17:46:41.517314505+00:00 stderr F + sleep 1 2025-12-08T17:46:42.521085294+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:42.543598790+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,2.886ms,0)' ']' 2025-12-08T17:46:42.543674202+00:00 stderr F + sleep 1 2025-12-08T17:46:43.547135731+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:43.557166153+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,1.872ms,0)' ']' 2025-12-08T17:46:43.557240015+00:00 stderr F + sleep 1 2025-12-08T17:46:44.560442517+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:44.582399846+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,848ms,0)' ']' 2025-12-08T17:46:44.582570991+00:00 stderr F + sleep 1 2025-12-08T17:46:45.586169205+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:45.606356690+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,,0)' ']' 2025-12-08T17:46:45.606479724+00:00 stderr F + sleep 1 2025-12-08T17:46:46.609979905+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:46.620151900+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,,0)' ']' 2025-12-08T17:46:46.620199212+00:00 stderr F + sleep 1 2025-12-08T17:46:47.623776423+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:47.640080246+00:00 stderr F + '[' -n 'TIME-WAIT 0 0 [::ffff:192.168.126.11]:10257 [::ffff:192.168.126.11]:60562 timer:(timewait,,0)' ']' 2025-12-08T17:46:47.640168049+00:00 stderr F + sleep 1 2025-12-08T17:46:48.643982099+00:00 stderr F ++ ss -Htanop '(' sport = 10257 ')' 2025-12-08T17:46:48.655636956+00:00 stderr F + '[' -n '' ']' 2025-12-08T17:46:48.656679428+00:00 stderr F + '[' -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt ']' 2025-12-08T17:46:48.656710399+00:00 stderr F + echo 'Copying system trust bundle' 2025-12-08T17:46:48.656720629+00:00 stdout F Copying system trust bundle 2025-12-08T17:46:48.656727870+00:00 stderr F + cp -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 2025-12-08T17:46:48.662311385+00:00 stderr F + '[' -f /etc/kubernetes/static-pod-resources/configmaps/cloud-config/ca-bundle.pem ']' 2025-12-08T17:46:48.662702058+00:00 stderr P + exec hyperkube kube-controller-manager --openshift-config=/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --authentication-kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --authorization-kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --client-ca-file=/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt --requestheader-client-ca-file=/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt -v=2 --tls-cert-file=/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt --tls-private-key-file=/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key --allocate-node-cidrs=false --cert-dir=/var/run/kubernetes --cloud-provider=external --cluster-cidr=10.217.0.0/22 --cluster-name=crc-rzkkk --cluster-signing-cert-file=/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt --cluster-signing-duration=720h --cluster-signing-key-file=/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key '--controllers=*' --controllers=-bootstrapsigner --controllers=-tokencleaner --controllers=-ttl --controllers=selinux-warning-controller --enable-dynamic-provisioning=true --feature-gates=AWSClusterHostedDNS=false --feature-gates=AWSClusterHostedDNSInstall=false --feature-gates=AWSDedicatedHosts=false --feature-gates=AWSServiceLBNetworkSecurityGroup=false --feature-gates=AdditionalRoutingCapabilities=true --feature-gates=AdminNetworkPolicy=true --feature-gates=AlibabaPlatform=true --feature-gates=AutomatedEtcdBackup=false --feature-gates=AzureClusterHostedDNSInstall=false --feature-gates=AzureDedicatedHosts=false --feature-gates=AzureMultiDisk=false --feature-gates=AzureWorkloadIdentity=true --feature-gates=BootImageSkewEnforcement=false --feature-gates=BootcNodeManagement=false --feature-gates=BuildCSIVolumes=true --feature-gates=CPMSMachineNamePrefix=true --feature-gates=ClusterAPIInstall=false --feature-gates=ClusterAPIInstallIBMCloud=false --feature-gates=ClusterMonitoringConfig=false --feature-gates=ClusterVersionOperatorConfiguration=false --feature-gates=ConsolePluginContentSecurityPolicy=true --feature-gates=DNSNameResolver=false --feature-gates=DualReplica=false --feature-gates=DyanmicServiceEndpointIBMCloud=false --feature-gates=DynamicResourceAllocation=false --feature-gates=EtcdBackendQuota=false --feature-gates=EventedPLEG=false --feature-gates=Example2=false --feature-gates=Example=false --feature-gates=ExternalOIDC=false --feature-gates=ExternalOIDCWithUIDAndExtraClaimMappings=false --feature-gates=ExternalSnapshotMetadata=false --feature-gates=GCPClusterHostedDNS=false --feature-gates=GCPClusterHostedDNSInstall=false --feature-gates=GCPCustomAPIEndpoints=false --feature-gates=GCPCustomAPIEndpointsInstall=false --feature-gates=GatewayAPI=true --feature-gates=GatewayAPIController=true --feature-gates=HighlyAvailableArbiter=true --feature-gates=ImageModeStatusReporting=false --feature-gates=ImageStreamImportMode=false --feature-gates=ImageVolume=true --feature-gates=IngressControllerDynamicConfigurationManager=false --feature-gates=IngressControllerLBSubnetsAWS=true --feature-gates=InsightsConfig=false --feature-gates=InsightsConfigAPI=false --feature-gates=InsightsOnDemandDataGather=false --feature-gates=IrreconcilableMachineConfig=false --feature-gates=KMSEncryptionProvider=false --feature-gates=KMSv1=true --feature-gates=MachineAPIMigration=false --feature-gates=MachineAPIOperatorDisableMachineHealthCheckController=false --feature-gates=MachineConfigNodes=true --feature-gates=ManagedBootImages=true --feature-gates=ManagedBootImagesAWS=true --feature-gates=ManagedBootImagesAzure=false --feature-gates=ManagedBootImagesvSphere=false --feature-gates=MaxUnavailableStatefulSet=false --feature-gates=MetricsCollectionProfiles=true --feature-gates=MinimumKubeletVersion=false --feature-gates=MixedCPUsAllocation=false --feature-gates=MultiArchInstallAzure=fals 2025-12-08T17:46:48.662734269+00:00 stderr F e --feature-gates=MultiDiskSetup=false --feature-gates=MutatingAdmissionPolicy=false --feature-gates=NetworkDiagnosticsConfig=true --feature-gates=NetworkLiveMigration=true --feature-gates=NetworkSegmentation=true --feature-gates=NewOLM=true --feature-gates=NewOLMCatalogdAPIV1Metas=false --feature-gates=NewOLMOwnSingleNamespace=false --feature-gates=NewOLMPreflightPermissionChecks=false --feature-gates=NewOLMWebhookProviderOpenshiftServiceCA=false --feature-gates=NoRegistryClusterOperations=false --feature-gates=NodeSwap=false --feature-gates=NutanixMultiSubnets=false --feature-gates=OVNObservability=false --feature-gates=OpenShiftPodSecurityAdmission=false --feature-gates=PinnedImages=true --feature-gates=PreconfiguredUDNAddresses=false --feature-gates=ProcMountType=true --feature-gates=RouteAdvertisements=true --feature-gates=RouteExternalCertificate=true --feature-gates=SELinuxMount=false --feature-gates=ServiceAccountTokenNodeBinding=true --feature-gates=SetEIPForNLBIngressController=true --feature-gates=ShortCertRotation=false --feature-gates=SignatureStores=false --feature-gates=SigstoreImageVerification=true --feature-gates=SigstoreImageVerificationPKI=false --feature-gates=StoragePerformantSecurityPolicy=true --feature-gates=TranslateStreamCloseWebsocketRequests=false --feature-gates=UpgradeStatus=true --feature-gates=UserNamespacesPodSecurityStandards=true --feature-gates=UserNamespacesSupport=true --feature-gates=VSphereConfigurableMaxAllowedBlockVolumesPerNode=false --feature-gates=VSphereHostVMGroupZonal=false --feature-gates=VSphereMixedNodeEnv=false --feature-gates=VSphereMultiDisk=true --feature-gates=VSphereMultiNetworks=true --feature-gates=VolumeAttributesClass=false --feature-gates=VolumeGroupSnapshot=false --flex-volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec --kube-api-burst=300 --kube-api-qps=150 --leader-elect-renew-deadline=12s --leader-elect-resource-lock=leases --leader-elect-retry-period=3s --leader-elect=true --pv-recycler-pod-template-filepath-hostpath=/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml --pv-recycler-pod-template-filepath-nfs=/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml --root-ca-file=/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt --secure-port=10257 --service-account-private-key-file=/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key --service-cluster-ip-range=10.217.4.0/23 --use-service-account-credentials=true --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.787889 1 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.787989 1 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.787993 1 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.787996 1 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.788000 1 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.788003 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.788006 1 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.788009 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.788013 1 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:46:48.788022853+00:00 stderr F W1208 17:46:48.788016 1 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788019 1 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788023 1 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788027 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788030 1 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788033 1 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788037 1 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788040 1 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788044 1 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788046 1 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788049 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:46:48.788059264+00:00 stderr F W1208 17:46:48.788053 1 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:46:48.788071884+00:00 stderr F W1208 17:46:48.788058 1 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:46:48.788071884+00:00 stderr F W1208 17:46:48.788061 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:46:48.788071884+00:00 stderr F W1208 17:46:48.788064 1 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:46:48.788071884+00:00 stderr F W1208 17:46:48.788067 1 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:46:48.788081655+00:00 stderr F W1208 17:46:48.788070 1 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:46:48.788081655+00:00 stderr F W1208 17:46:48.788073 1 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:46:48.788081655+00:00 stderr F W1208 17:46:48.788076 1 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:46:48.788081655+00:00 stderr F W1208 17:46:48.788079 1 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:46:48.788101635+00:00 stderr F W1208 17:46:48.788082 1 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:46:48.788101635+00:00 stderr F W1208 17:46:48.788085 1 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:46:48.788101635+00:00 stderr F W1208 17:46:48.788089 1 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:46:48.788101635+00:00 stderr F W1208 17:46:48.788091 1 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:46:48.788101635+00:00 stderr F W1208 17:46:48.788094 1 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:46:48.788101635+00:00 stderr F W1208 17:46:48.788097 1 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:46:48.788111986+00:00 stderr F W1208 17:46:48.788100 1 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:46:48.788111986+00:00 stderr F W1208 17:46:48.788104 1 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:46:48.788111986+00:00 stderr F W1208 17:46:48.788108 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:46:48.788120906+00:00 stderr F W1208 17:46:48.788111 1 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:46:48.788120906+00:00 stderr F W1208 17:46:48.788114 1 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:46:48.788120906+00:00 stderr F W1208 17:46:48.788117 1 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:46:48.788129176+00:00 stderr F W1208 17:46:48.788120 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:46:48.788129176+00:00 stderr F W1208 17:46:48.788123 1 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:46:48.788129176+00:00 stderr F W1208 17:46:48.788126 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:46:48.788137596+00:00 stderr F W1208 17:46:48.788128 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:46:48.788137596+00:00 stderr F W1208 17:46:48.788132 1 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:46:48.788137596+00:00 stderr F W1208 17:46:48.788134 1 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:46:48.788145957+00:00 stderr F W1208 17:46:48.788137 1 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:46:48.788145957+00:00 stderr F W1208 17:46:48.788140 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:46:48.788145957+00:00 stderr F W1208 17:46:48.788143 1 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:46:48.788154717+00:00 stderr F W1208 17:46:48.788146 1 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:46:48.788154717+00:00 stderr F W1208 17:46:48.788150 1 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:46:48.788163167+00:00 stderr F W1208 17:46:48.788154 1 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:46:48.788163167+00:00 stderr F W1208 17:46:48.788158 1 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:46:48.788176078+00:00 stderr F W1208 17:46:48.788163 1 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:46:48.788176078+00:00 stderr F W1208 17:46:48.788166 1 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:46:48.788176078+00:00 stderr F W1208 17:46:48.788169 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:46:48.788176078+00:00 stderr F W1208 17:46:48.788172 1 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:46:48.788184628+00:00 stderr F W1208 17:46:48.788175 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:46:48.788184628+00:00 stderr F W1208 17:46:48.788178 1 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:46:48.788184628+00:00 stderr F W1208 17:46:48.788180 1 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:46:48.788192568+00:00 stderr F W1208 17:46:48.788183 1 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:46:48.788192568+00:00 stderr F W1208 17:46:48.788188 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:46:48.788200378+00:00 stderr F W1208 17:46:48.788191 1 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:46:48.788200378+00:00 stderr F W1208 17:46:48.788194 1 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:46:48.788200378+00:00 stderr F W1208 17:46:48.788196 1 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:46:48.788207509+00:00 stderr F W1208 17:46:48.788199 1 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:46:48.788207509+00:00 stderr F W1208 17:46:48.788202 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:46:48.788214229+00:00 stderr F W1208 17:46:48.788207 1 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:46:48.788214229+00:00 stderr F W1208 17:46:48.788210 1 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:46:48.788220969+00:00 stderr F W1208 17:46:48.788213 1 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:46:48.788220969+00:00 stderr F W1208 17:46:48.788216 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:46:48.788220969+00:00 stderr F W1208 17:46:48.788219 1 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:46:48.788228039+00:00 stderr F W1208 17:46:48.788221 1 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:46:48.788228039+00:00 stderr F W1208 17:46:48.788225 1 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:46:48.788234699+00:00 stderr F W1208 17:46:48.788228 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:46:48.788234699+00:00 stderr F W1208 17:46:48.788231 1 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:46:48.788241430+00:00 stderr F W1208 17:46:48.788233 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:46:48.788241430+00:00 stderr F W1208 17:46:48.788236 1 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:46:48.788248190+00:00 stderr F W1208 17:46:48.788241 1 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:46:48.788248190+00:00 stderr F W1208 17:46:48.788245 1 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:46:48.788258480+00:00 stderr F W1208 17:46:48.788248 1 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:46:48.788258480+00:00 stderr F W1208 17:46:48.788251 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:46:48.788258480+00:00 stderr F W1208 17:46:48.788255 1 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:46:48.788265590+00:00 stderr F W1208 17:46:48.788258 1 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:46:48.788265590+00:00 stderr F W1208 17:46:48.788261 1 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:46:48.788391314+00:00 stderr F I1208 17:46:48.788361 1 flags.go:64] FLAG: --allocate-node-cidrs="false" 2025-12-08T17:46:48.788391314+00:00 stderr F I1208 17:46:48.788373 1 flags.go:64] FLAG: --allow-metric-labels="[]" 2025-12-08T17:46:48.788391314+00:00 stderr F I1208 17:46:48.788382 1 flags.go:64] FLAG: --allow-metric-labels-manifest="" 2025-12-08T17:46:48.788391314+00:00 stderr F I1208 17:46:48.788387 1 flags.go:64] FLAG: --allow-untagged-cloud="false" 2025-12-08T17:46:48.788399835+00:00 stderr F I1208 17:46:48.788391 1 flags.go:64] FLAG: --attach-detach-reconcile-sync-period="1m0s" 2025-12-08T17:46:48.788399835+00:00 stderr F I1208 17:46:48.788396 1 flags.go:64] FLAG: --authentication-kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig" 2025-12-08T17:46:48.788406755+00:00 stderr F I1208 17:46:48.788400 1 flags.go:64] FLAG: --authentication-skip-lookup="false" 2025-12-08T17:46:48.788406755+00:00 stderr F I1208 17:46:48.788404 1 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="10s" 2025-12-08T17:46:48.788413455+00:00 stderr F I1208 17:46:48.788407 1 flags.go:64] FLAG: --authentication-tolerate-lookup-failure="false" 2025-12-08T17:46:48.788419945+00:00 stderr F I1208 17:46:48.788411 1 flags.go:64] FLAG: --authorization-always-allow-paths="[/healthz,/readyz,/livez]" 2025-12-08T17:46:48.788426475+00:00 stderr F I1208 17:46:48.788418 1 flags.go:64] FLAG: --authorization-kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig" 2025-12-08T17:46:48.788426475+00:00 stderr F I1208 17:46:48.788422 1 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="10s" 2025-12-08T17:46:48.788435456+00:00 stderr F I1208 17:46:48.788426 1 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="10s" 2025-12-08T17:46:48.788435456+00:00 stderr F I1208 17:46:48.788429 1 flags.go:64] FLAG: --bind-address="0.0.0.0" 2025-12-08T17:46:48.788450086+00:00 stderr F I1208 17:46:48.788436 1 flags.go:64] FLAG: --cert-dir="/var/run/kubernetes" 2025-12-08T17:46:48.788450086+00:00 stderr F I1208 17:46:48.788440 1 flags.go:64] FLAG: --cidr-allocator-type="RangeAllocator" 2025-12-08T17:46:48.788450086+00:00 stderr F I1208 17:46:48.788444 1 flags.go:64] FLAG: --client-ca-file="/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:48.788459576+00:00 stderr F I1208 17:46:48.788449 1 flags.go:64] FLAG: --cloud-config="" 2025-12-08T17:46:48.788459576+00:00 stderr F I1208 17:46:48.788452 1 flags.go:64] FLAG: --cloud-provider="external" 2025-12-08T17:46:48.788459576+00:00 stderr F I1208 17:46:48.788456 1 flags.go:64] FLAG: --cluster-cidr="10.217.0.0/22" 2025-12-08T17:46:48.788468567+00:00 stderr F I1208 17:46:48.788459 1 flags.go:64] FLAG: --cluster-name="crc-rzkkk" 2025-12-08T17:46:48.788468567+00:00 stderr F I1208 17:46:48.788463 1 flags.go:64] FLAG: --cluster-signing-cert-file="/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.crt" 2025-12-08T17:46:48.788476437+00:00 stderr F I1208 17:46:48.788467 1 flags.go:64] FLAG: --cluster-signing-duration="720h0m0s" 2025-12-08T17:46:48.788476437+00:00 stderr F I1208 17:46:48.788470 1 flags.go:64] FLAG: --cluster-signing-key-file="/etc/kubernetes/static-pod-certs/secrets/csr-signer/tls.key" 2025-12-08T17:46:48.788476437+00:00 stderr F I1208 17:46:48.788474 1 flags.go:64] FLAG: --cluster-signing-kube-apiserver-client-cert-file="" 2025-12-08T17:46:48.788489647+00:00 stderr F I1208 17:46:48.788477 1 flags.go:64] FLAG: --cluster-signing-kube-apiserver-client-key-file="" 2025-12-08T17:46:48.788489647+00:00 stderr F I1208 17:46:48.788480 1 flags.go:64] FLAG: --cluster-signing-kubelet-client-cert-file="" 2025-12-08T17:46:48.788489647+00:00 stderr F I1208 17:46:48.788483 1 flags.go:64] FLAG: --cluster-signing-kubelet-client-key-file="" 2025-12-08T17:46:48.788489647+00:00 stderr F I1208 17:46:48.788486 1 flags.go:64] FLAG: --cluster-signing-kubelet-serving-cert-file="" 2025-12-08T17:46:48.788497208+00:00 stderr F I1208 17:46:48.788489 1 flags.go:64] FLAG: --cluster-signing-kubelet-serving-key-file="" 2025-12-08T17:46:48.788497208+00:00 stderr F I1208 17:46:48.788493 1 flags.go:64] FLAG: --cluster-signing-legacy-unknown-cert-file="" 2025-12-08T17:46:48.788503938+00:00 stderr F I1208 17:46:48.788496 1 flags.go:64] FLAG: --cluster-signing-legacy-unknown-key-file="" 2025-12-08T17:46:48.788503938+00:00 stderr F I1208 17:46:48.788499 1 flags.go:64] FLAG: --concurrent-cron-job-syncs="5" 2025-12-08T17:46:48.788510638+00:00 stderr F I1208 17:46:48.788503 1 flags.go:64] FLAG: --concurrent-daemonset-syncs="2" 2025-12-08T17:46:48.788510638+00:00 stderr F I1208 17:46:48.788507 1 flags.go:64] FLAG: --concurrent-deployment-syncs="5" 2025-12-08T17:46:48.788517338+00:00 stderr F I1208 17:46:48.788510 1 flags.go:64] FLAG: --concurrent-endpoint-syncs="5" 2025-12-08T17:46:48.788517338+00:00 stderr F I1208 17:46:48.788514 1 flags.go:64] FLAG: --concurrent-ephemeralvolume-syncs="5" 2025-12-08T17:46:48.788523978+00:00 stderr F I1208 17:46:48.788517 1 flags.go:64] FLAG: --concurrent-gc-syncs="20" 2025-12-08T17:46:48.788523978+00:00 stderr F I1208 17:46:48.788520 1 flags.go:64] FLAG: --concurrent-horizontal-pod-autoscaler-syncs="5" 2025-12-08T17:46:48.788530679+00:00 stderr F I1208 17:46:48.788523 1 flags.go:64] FLAG: --concurrent-job-syncs="5" 2025-12-08T17:46:48.788530679+00:00 stderr F I1208 17:46:48.788526 1 flags.go:64] FLAG: --concurrent-namespace-syncs="10" 2025-12-08T17:46:48.788537349+00:00 stderr F I1208 17:46:48.788530 1 flags.go:64] FLAG: --concurrent-rc-syncs="5" 2025-12-08T17:46:48.788537349+00:00 stderr F I1208 17:46:48.788533 1 flags.go:64] FLAG: --concurrent-replicaset-syncs="5" 2025-12-08T17:46:48.788544019+00:00 stderr F I1208 17:46:48.788536 1 flags.go:64] FLAG: --concurrent-resource-quota-syncs="5" 2025-12-08T17:46:48.788544019+00:00 stderr F I1208 17:46:48.788540 1 flags.go:64] FLAG: --concurrent-service-endpoint-syncs="5" 2025-12-08T17:46:48.788550659+00:00 stderr F I1208 17:46:48.788543 1 flags.go:64] FLAG: --concurrent-service-syncs="1" 2025-12-08T17:46:48.788550659+00:00 stderr F I1208 17:46:48.788546 1 flags.go:64] FLAG: --concurrent-serviceaccount-token-syncs="5" 2025-12-08T17:46:48.788557340+00:00 stderr F I1208 17:46:48.788549 1 flags.go:64] FLAG: --concurrent-statefulset-syncs="5" 2025-12-08T17:46:48.788557340+00:00 stderr F I1208 17:46:48.788554 1 flags.go:64] FLAG: --concurrent-ttl-after-finished-syncs="5" 2025-12-08T17:46:48.788564000+00:00 stderr F I1208 17:46:48.788558 1 flags.go:64] FLAG: --concurrent-validating-admission-policy-status-syncs="5" 2025-12-08T17:46:48.788564000+00:00 stderr F I1208 17:46:48.788561 1 flags.go:64] FLAG: --configure-cloud-routes="true" 2025-12-08T17:46:48.788570690+00:00 stderr F I1208 17:46:48.788564 1 flags.go:64] FLAG: --contention-profiling="false" 2025-12-08T17:46:48.788570690+00:00 stderr F I1208 17:46:48.788568 1 flags.go:64] FLAG: --controller-start-interval="0s" 2025-12-08T17:46:48.788578980+00:00 stderr F I1208 17:46:48.788571 1 flags.go:64] FLAG: --controllers="[*,-bootstrapsigner,-tokencleaner,-ttl,selinux-warning-controller]" 2025-12-08T17:46:48.788589401+00:00 stderr F I1208 17:46:48.788578 1 flags.go:64] FLAG: --disable-attach-detach-reconcile-sync="false" 2025-12-08T17:46:48.788589401+00:00 stderr F I1208 17:46:48.788582 1 flags.go:64] FLAG: --disable-force-detach-on-timeout="false" 2025-12-08T17:46:48.788589401+00:00 stderr F I1208 17:46:48.788585 1 flags.go:64] FLAG: --disable-http2-serving="false" 2025-12-08T17:46:48.788596501+00:00 stderr F I1208 17:46:48.788589 1 flags.go:64] FLAG: --disabled-metrics="[]" 2025-12-08T17:46:48.788603771+00:00 stderr F I1208 17:46:48.788593 1 flags.go:64] FLAG: --emulated-version="[]" 2025-12-08T17:46:48.788603771+00:00 stderr F I1208 17:46:48.788598 1 flags.go:64] FLAG: --enable-dynamic-provisioning="true" 2025-12-08T17:46:48.788612191+00:00 stderr F I1208 17:46:48.788602 1 flags.go:64] FLAG: --enable-garbage-collector="true" 2025-12-08T17:46:48.788612191+00:00 stderr F I1208 17:46:48.788605 1 flags.go:64] FLAG: --enable-hostpath-provisioner="false" 2025-12-08T17:46:48.788612191+00:00 stderr F I1208 17:46:48.788608 1 flags.go:64] FLAG: --enable-leader-migration="false" 2025-12-08T17:46:48.788621132+00:00 stderr F I1208 17:46:48.788612 1 flags.go:64] FLAG: --endpoint-updates-batch-period="0s" 2025-12-08T17:46:48.788621132+00:00 stderr F I1208 17:46:48.788615 1 flags.go:64] FLAG: --endpointslice-updates-batch-period="0s" 2025-12-08T17:46:48.788621132+00:00 stderr F I1208 17:46:48.788618 1 flags.go:64] FLAG: --external-cloud-volume-plugin="" 2025-12-08T17:46:48.788720505+00:00 stderr F I1208 17:46:48.788622 1 flags.go:64] FLAG: --feature-gates=":AWSClusterHostedDNS=false,:AWSClusterHostedDNSInstall=false,:AWSDedicatedHosts=false,:AWSServiceLBNetworkSecurityGroup=false,:AdditionalRoutingCapabilities=true,:AdminNetworkPolicy=true,:AlibabaPlatform=true,:AutomatedEtcdBackup=false,:AzureClusterHostedDNSInstall=false,:AzureDedicatedHosts=false,:AzureMultiDisk=false,:AzureWorkloadIdentity=true,:BootImageSkewEnforcement=false,:BootcNodeManagement=false,:BuildCSIVolumes=true,:CPMSMachineNamePrefix=true,:ClusterAPIInstall=false,:ClusterAPIInstallIBMCloud=false,:ClusterMonitoringConfig=false,:ClusterVersionOperatorConfiguration=false,:ConsolePluginContentSecurityPolicy=true,:DNSNameResolver=false,:DualReplica=false,:DyanmicServiceEndpointIBMCloud=false,:DynamicResourceAllocation=false,:EtcdBackendQuota=false,:EventedPLEG=false,:Example2=false,:Example=false,:ExternalOIDC=false,:ExternalOIDCWithUIDAndExtraClaimMappings=false,:ExternalSnapshotMetadata=false,:GCPClusterHostedDNS=false,:GCPClusterHostedDNSInstall=false,:GCPCustomAPIEndpoints=false,:GCPCustomAPIEndpointsInstall=false,:GatewayAPI=true,:GatewayAPIController=true,:HighlyAvailableArbiter=true,:ImageModeStatusReporting=false,:ImageStreamImportMode=false,:ImageVolume=true,:IngressControllerDynamicConfigurationManager=false,:IngressControllerLBSubnetsAWS=true,:InsightsConfig=false,:InsightsConfigAPI=false,:InsightsOnDemandDataGather=false,:IrreconcilableMachineConfig=false,:KMSEncryptionProvider=false,:KMSv1=true,:MachineAPIMigration=false,:MachineAPIOperatorDisableMachineHealthCheckController=false,:MachineConfigNodes=true,:ManagedBootImages=true,:ManagedBootImagesAWS=true,:ManagedBootImagesAzure=false,:ManagedBootImagesvSphere=false,:MaxUnavailableStatefulSet=false,:MetricsCollectionProfiles=true,:MinimumKubeletVersion=false,:MixedCPUsAllocation=false,:MultiArchInstallAzure=false,:MultiDiskSetup=false,:MutatingAdmissionPolicy=false,:NetworkDiagnosticsConfig=true,:NetworkLiveMigration=true,:NetworkSegmentation=true,:NewOLM=true,:NewOLMCatalogdAPIV1Metas=false,:NewOLMOwnSingleNamespace=false,:NewOLMPreflightPermissionChecks=false,:NewOLMWebhookProviderOpenshiftServiceCA=false,:NoRegistryClusterOperations=false,:NodeSwap=false,:NutanixMultiSubnets=false,:OVNObservability=false,:OpenShiftPodSecurityAdmission=false,:PinnedImages=true,:PreconfiguredUDNAddresses=false,:ProcMountType=true,:RouteAdvertisements=true,:RouteExternalCertificate=true,:SELinuxMount=false,:ServiceAccountTokenNodeBinding=true,:SetEIPForNLBIngressController=true,:ShortCertRotation=false,:SignatureStores=false,:SigstoreImageVerification=true,:SigstoreImageVerificationPKI=false,:StoragePerformantSecurityPolicy=true,:TranslateStreamCloseWebsocketRequests=false,:UpgradeStatus=true,:UserNamespacesPodSecurityStandards=true,:UserNamespacesSupport=true,:VSphereConfigurableMaxAllowedBlockVolumesPerNode=false,:VSphereHostVMGroupZonal=false,:VSphereMixedNodeEnv=false,:VSphereMultiDisk=true,:VSphereMultiNetworks=true,:VolumeAttributesClass=false,:VolumeGroupSnapshot=false" 2025-12-08T17:46:48.788720505+00:00 stderr F I1208 17:46:48.788702 1 flags.go:64] FLAG: --flex-volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" 2025-12-08T17:46:48.788720505+00:00 stderr F I1208 17:46:48.788707 1 flags.go:64] FLAG: --help="false" 2025-12-08T17:46:48.788720505+00:00 stderr F I1208 17:46:48.788710 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-cpu-initialization-period="5m0s" 2025-12-08T17:46:48.788720505+00:00 stderr F I1208 17:46:48.788713 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-downscale-stabilization="5m0s" 2025-12-08T17:46:48.788720505+00:00 stderr F I1208 17:46:48.788717 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-initial-readiness-delay="30s" 2025-12-08T17:46:48.788735645+00:00 stderr F I1208 17:46:48.788721 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-sync-period="15s" 2025-12-08T17:46:48.788742325+00:00 stderr F I1208 17:46:48.788724 1 flags.go:64] FLAG: --horizontal-pod-autoscaler-tolerance="0.1" 2025-12-08T17:46:48.788742325+00:00 stderr F I1208 17:46:48.788737 1 flags.go:64] FLAG: --http2-max-streams-per-connection="0" 2025-12-08T17:46:48.788749065+00:00 stderr F I1208 17:46:48.788741 1 flags.go:64] FLAG: --kube-api-burst="300" 2025-12-08T17:46:48.788749065+00:00 stderr F I1208 17:46:48.788745 1 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" 2025-12-08T17:46:48.788755786+00:00 stderr F I1208 17:46:48.788749 1 flags.go:64] FLAG: --kube-api-qps="150" 2025-12-08T17:46:48.788762286+00:00 stderr F I1208 17:46:48.788754 1 flags.go:64] FLAG: --kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig" 2025-12-08T17:46:48.788762286+00:00 stderr F I1208 17:46:48.788758 1 flags.go:64] FLAG: --large-cluster-size-threshold="50" 2025-12-08T17:46:48.788769056+00:00 stderr F I1208 17:46:48.788762 1 flags.go:64] FLAG: --leader-elect="true" 2025-12-08T17:46:48.788775466+00:00 stderr F I1208 17:46:48.788766 1 flags.go:64] FLAG: --leader-elect-lease-duration="15s" 2025-12-08T17:46:48.788775466+00:00 stderr F I1208 17:46:48.788770 1 flags.go:64] FLAG: --leader-elect-renew-deadline="12s" 2025-12-08T17:46:48.788782157+00:00 stderr F I1208 17:46:48.788774 1 flags.go:64] FLAG: --leader-elect-resource-lock="leases" 2025-12-08T17:46:48.788782157+00:00 stderr F I1208 17:46:48.788777 1 flags.go:64] FLAG: --leader-elect-resource-name="kube-controller-manager" 2025-12-08T17:46:48.788788837+00:00 stderr F I1208 17:46:48.788780 1 flags.go:64] FLAG: --leader-elect-resource-namespace="kube-system" 2025-12-08T17:46:48.788788837+00:00 stderr F I1208 17:46:48.788784 1 flags.go:64] FLAG: --leader-elect-retry-period="3s" 2025-12-08T17:46:48.788795487+00:00 stderr F I1208 17:46:48.788787 1 flags.go:64] FLAG: --leader-migration-config="" 2025-12-08T17:46:48.788795487+00:00 stderr F I1208 17:46:48.788790 1 flags.go:64] FLAG: --legacy-service-account-token-clean-up-period="8760h0m0s" 2025-12-08T17:46:48.788802197+00:00 stderr F I1208 17:46:48.788794 1 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:46:48.788808637+00:00 stderr F I1208 17:46:48.788797 1 flags.go:64] FLAG: --log-json-info-buffer-size="0" 2025-12-08T17:46:48.788808637+00:00 stderr F I1208 17:46:48.788804 1 flags.go:64] FLAG: --log-json-split-stream="false" 2025-12-08T17:46:48.788815278+00:00 stderr F I1208 17:46:48.788807 1 flags.go:64] FLAG: --log-text-info-buffer-size="0" 2025-12-08T17:46:48.788815278+00:00 stderr F I1208 17:46:48.788811 1 flags.go:64] FLAG: --log-text-split-stream="false" 2025-12-08T17:46:48.788825168+00:00 stderr F I1208 17:46:48.788814 1 flags.go:64] FLAG: --logging-format="text" 2025-12-08T17:46:48.788825168+00:00 stderr F I1208 17:46:48.788817 1 flags.go:64] FLAG: --master="" 2025-12-08T17:46:48.788825168+00:00 stderr F I1208 17:46:48.788820 1 flags.go:64] FLAG: --max-endpoints-per-slice="100" 2025-12-08T17:46:48.788832348+00:00 stderr F I1208 17:46:48.788824 1 flags.go:64] FLAG: --min-resync-period="12h0m0s" 2025-12-08T17:46:48.788832348+00:00 stderr F I1208 17:46:48.788827 1 flags.go:64] FLAG: --mirroring-concurrent-service-endpoint-syncs="5" 2025-12-08T17:46:48.788839108+00:00 stderr F I1208 17:46:48.788831 1 flags.go:64] FLAG: --mirroring-endpointslice-updates-batch-period="0s" 2025-12-08T17:46:48.788839108+00:00 stderr F I1208 17:46:48.788835 1 flags.go:64] FLAG: --mirroring-max-endpoints-per-subset="1000" 2025-12-08T17:46:48.788845818+00:00 stderr F I1208 17:46:48.788838 1 flags.go:64] FLAG: --namespace-sync-period="5m0s" 2025-12-08T17:46:48.788845818+00:00 stderr F I1208 17:46:48.788842 1 flags.go:64] FLAG: --node-cidr-mask-size="0" 2025-12-08T17:46:48.788852459+00:00 stderr F I1208 17:46:48.788845 1 flags.go:64] FLAG: --node-cidr-mask-size-ipv4="0" 2025-12-08T17:46:48.788852459+00:00 stderr F I1208 17:46:48.788848 1 flags.go:64] FLAG: --node-cidr-mask-size-ipv6="0" 2025-12-08T17:46:48.788859109+00:00 stderr F I1208 17:46:48.788851 1 flags.go:64] FLAG: --node-eviction-rate="0.1" 2025-12-08T17:46:48.788859109+00:00 stderr F I1208 17:46:48.788855 1 flags.go:64] FLAG: --node-monitor-grace-period="50s" 2025-12-08T17:46:48.788865769+00:00 stderr F I1208 17:46:48.788858 1 flags.go:64] FLAG: --node-monitor-period="5s" 2025-12-08T17:46:48.788865769+00:00 stderr F I1208 17:46:48.788862 1 flags.go:64] FLAG: --node-startup-grace-period="1m0s" 2025-12-08T17:46:48.788872999+00:00 stderr F I1208 17:46:48.788865 1 flags.go:64] FLAG: --node-sync-period="0s" 2025-12-08T17:46:48.788872999+00:00 stderr F I1208 17:46:48.788868 1 flags.go:64] FLAG: --openshift-config="/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml" 2025-12-08T17:46:48.788902480+00:00 stderr F I1208 17:46:48.788872 1 flags.go:64] FLAG: --permit-address-sharing="false" 2025-12-08T17:46:48.788902480+00:00 stderr F I1208 17:46:48.788896 1 flags.go:64] FLAG: --permit-port-sharing="false" 2025-12-08T17:46:48.788911481+00:00 stderr F I1208 17:46:48.788900 1 flags.go:64] FLAG: --profiling="true" 2025-12-08T17:46:48.788911481+00:00 stderr F I1208 17:46:48.788905 1 flags.go:64] FLAG: --pv-recycler-increment-timeout-nfs="30" 2025-12-08T17:46:48.788911481+00:00 stderr F I1208 17:46:48.788908 1 flags.go:64] FLAG: --pv-recycler-minimum-timeout-hostpath="60" 2025-12-08T17:46:48.788919921+00:00 stderr F I1208 17:46:48.788911 1 flags.go:64] FLAG: --pv-recycler-minimum-timeout-nfs="300" 2025-12-08T17:46:48.788919921+00:00 stderr F I1208 17:46:48.788915 1 flags.go:64] FLAG: --pv-recycler-pod-template-filepath-hostpath="/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml" 2025-12-08T17:46:48.788927921+00:00 stderr F I1208 17:46:48.788920 1 flags.go:64] FLAG: --pv-recycler-pod-template-filepath-nfs="/etc/kubernetes/static-pod-resources/configmaps/recycler-config/recycler-pod.yaml" 2025-12-08T17:46:48.788927921+00:00 stderr F I1208 17:46:48.788924 1 flags.go:64] FLAG: --pv-recycler-timeout-increment-hostpath="30" 2025-12-08T17:46:48.788936691+00:00 stderr F I1208 17:46:48.788928 1 flags.go:64] FLAG: --pvclaimbinder-sync-period="15s" 2025-12-08T17:46:48.788936691+00:00 stderr F I1208 17:46:48.788931 1 flags.go:64] FLAG: --requestheader-allowed-names="[]" 2025-12-08T17:46:48.788944682+00:00 stderr F I1208 17:46:48.788937 1 flags.go:64] FLAG: --requestheader-client-ca-file="/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:48.788955522+00:00 stderr F I1208 17:46:48.788941 1 flags.go:64] FLAG: --requestheader-extra-headers-prefix="[x-remote-extra-]" 2025-12-08T17:46:48.788955522+00:00 stderr F I1208 17:46:48.788947 1 flags.go:64] FLAG: --requestheader-group-headers="[x-remote-group]" 2025-12-08T17:46:48.788955522+00:00 stderr F I1208 17:46:48.788952 1 flags.go:64] FLAG: --requestheader-uid-headers="[]" 2025-12-08T17:46:48.788962792+00:00 stderr F I1208 17:46:48.788956 1 flags.go:64] FLAG: --requestheader-username-headers="[x-remote-user]" 2025-12-08T17:46:48.788969322+00:00 stderr F I1208 17:46:48.788961 1 flags.go:64] FLAG: --resource-quota-sync-period="5m0s" 2025-12-08T17:46:48.788969322+00:00 stderr F I1208 17:46:48.788965 1 flags.go:64] FLAG: --root-ca-file="/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" 2025-12-08T17:46:48.788976083+00:00 stderr F I1208 17:46:48.788969 1 flags.go:64] FLAG: --route-reconciliation-period="10s" 2025-12-08T17:46:48.788976083+00:00 stderr F I1208 17:46:48.788972 1 flags.go:64] FLAG: --secondary-node-eviction-rate="0.01" 2025-12-08T17:46:48.788982743+00:00 stderr F I1208 17:46:48.788976 1 flags.go:64] FLAG: --secure-port="10257" 2025-12-08T17:46:48.788989213+00:00 stderr F I1208 17:46:48.788980 1 flags.go:64] FLAG: --service-account-private-key-file="/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" 2025-12-08T17:46:48.788989213+00:00 stderr F I1208 17:46:48.788984 1 flags.go:64] FLAG: --service-cluster-ip-range="10.217.4.0/23" 2025-12-08T17:46:48.788995933+00:00 stderr F I1208 17:46:48.788987 1 flags.go:64] FLAG: --show-hidden-metrics-for-version="" 2025-12-08T17:46:48.788995933+00:00 stderr F I1208 17:46:48.788991 1 flags.go:64] FLAG: --terminated-pod-gc-threshold="12500" 2025-12-08T17:46:48.789002573+00:00 stderr F I1208 17:46:48.788994 1 flags.go:64] FLAG: --tls-cert-file="/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt" 2025-12-08T17:46:48.789010674+00:00 stderr F I1208 17:46:48.788998 1 flags.go:64] FLAG: --tls-cipher-suites="[TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256]" 2025-12-08T17:46:48.789017234+00:00 stderr F I1208 17:46:48.789008 1 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:46:48.789017234+00:00 stderr F I1208 17:46:48.789013 1 flags.go:64] FLAG: --tls-private-key-file="/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:46:48.789033294+00:00 stderr F I1208 17:46:48.789017 1 flags.go:64] FLAG: --tls-sni-cert-key="[]" 2025-12-08T17:46:48.789033294+00:00 stderr F I1208 17:46:48.789029 1 flags.go:64] FLAG: --unhealthy-zone-threshold="0.55" 2025-12-08T17:46:48.789040175+00:00 stderr F I1208 17:46:48.789033 1 flags.go:64] FLAG: --unsupported-kube-api-over-localhost="false" 2025-12-08T17:46:48.789040175+00:00 stderr F I1208 17:46:48.789037 1 flags.go:64] FLAG: --use-service-account-credentials="true" 2025-12-08T17:46:48.789046865+00:00 stderr F I1208 17:46:48.789040 1 flags.go:64] FLAG: --v="2" 2025-12-08T17:46:48.789054895+00:00 stderr F I1208 17:46:48.789045 1 flags.go:64] FLAG: --version="false" 2025-12-08T17:46:48.789061345+00:00 stderr F I1208 17:46:48.789053 1 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:46:48.789227040+00:00 stderr F W1208 17:46:48.789197 1 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:46:48.789227040+00:00 stderr F W1208 17:46:48.789208 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:46:48.789227040+00:00 stderr F W1208 17:46:48.789211 1 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:46:48.789227040+00:00 stderr F W1208 17:46:48.789214 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:46:48.789227040+00:00 stderr F W1208 17:46:48.789217 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:46:48.789227040+00:00 stderr F W1208 17:46:48.789220 1 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789224 1 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789228 1 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789231 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789234 1 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789237 1 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789240 1 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789244 1 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789248 1 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:46:48.789254741+00:00 stderr F W1208 17:46:48.789252 1 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:46:48.789265762+00:00 stderr F W1208 17:46:48.789255 1 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:46:48.789265762+00:00 stderr F W1208 17:46:48.789258 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:46:48.789265762+00:00 stderr F W1208 17:46:48.789261 1 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:46:48.789274692+00:00 stderr F W1208 17:46:48.789264 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:46:48.789274692+00:00 stderr F W1208 17:46:48.789267 1 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:46:48.789274692+00:00 stderr F W1208 17:46:48.789270 1 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:46:48.789283142+00:00 stderr F W1208 17:46:48.789273 1 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:46:48.789283142+00:00 stderr F W1208 17:46:48.789276 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:46:48.789283142+00:00 stderr F W1208 17:46:48.789279 1 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:46:48.789291432+00:00 stderr F W1208 17:46:48.789282 1 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:46:48.789291432+00:00 stderr F W1208 17:46:48.789285 1 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:46:48.789291432+00:00 stderr F W1208 17:46:48.789288 1 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:46:48.789299553+00:00 stderr F W1208 17:46:48.789291 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:46:48.789299553+00:00 stderr F W1208 17:46:48.789294 1 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:46:48.789299553+00:00 stderr F W1208 17:46:48.789297 1 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:46:48.789307723+00:00 stderr F W1208 17:46:48.789300 1 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:46:48.789307723+00:00 stderr F W1208 17:46:48.789303 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:46:48.789307723+00:00 stderr F W1208 17:46:48.789305 1 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:46:48.789320903+00:00 stderr F W1208 17:46:48.789309 1 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:46:48.789320903+00:00 stderr F W1208 17:46:48.789313 1 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:46:48.789320903+00:00 stderr F W1208 17:46:48.789315 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:46:48.789320903+00:00 stderr F W1208 17:46:48.789318 1 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:46:48.789329154+00:00 stderr F W1208 17:46:48.789321 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:46:48.789329154+00:00 stderr F W1208 17:46:48.789323 1 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:46:48.789370385+00:00 stderr F W1208 17:46:48.789328 1 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:46:48.789370385+00:00 stderr F W1208 17:46:48.789331 1 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:46:48.789370385+00:00 stderr F W1208 17:46:48.789358 1 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:46:48.789370385+00:00 stderr F W1208 17:46:48.789361 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:46:48.789370385+00:00 stderr F W1208 17:46:48.789364 1 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:46:48.789370385+00:00 stderr F W1208 17:46:48.789367 1 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:46:48.789380375+00:00 stderr F W1208 17:46:48.789370 1 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:46:48.789380375+00:00 stderr F W1208 17:46:48.789373 1 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:46:48.789380375+00:00 stderr F W1208 17:46:48.789376 1 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:46:48.789388505+00:00 stderr F W1208 17:46:48.789379 1 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:46:48.789388505+00:00 stderr F W1208 17:46:48.789382 1 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:46:48.789388505+00:00 stderr F W1208 17:46:48.789386 1 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:46:48.789396846+00:00 stderr F W1208 17:46:48.789389 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:46:48.789396846+00:00 stderr F W1208 17:46:48.789392 1 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:46:48.789396846+00:00 stderr F W1208 17:46:48.789394 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:46:48.789404946+00:00 stderr F W1208 17:46:48.789398 1 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:46:48.789404946+00:00 stderr F W1208 17:46:48.789401 1 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:46:48.789413366+00:00 stderr F W1208 17:46:48.789404 1 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:46:48.789413366+00:00 stderr F W1208 17:46:48.789406 1 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:46:48.789413366+00:00 stderr F W1208 17:46:48.789409 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:46:48.789429357+00:00 stderr F W1208 17:46:48.789412 1 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:46:48.789429357+00:00 stderr F W1208 17:46:48.789415 1 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:46:48.789429357+00:00 stderr F W1208 17:46:48.789420 1 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:46:48.789429357+00:00 stderr F W1208 17:46:48.789423 1 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:46:48.789429357+00:00 stderr F W1208 17:46:48.789425 1 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:46:48.789438237+00:00 stderr F W1208 17:46:48.789428 1 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:46:48.789438237+00:00 stderr F W1208 17:46:48.789431 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:46:48.789438237+00:00 stderr F W1208 17:46:48.789435 1 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:46:48.789445958+00:00 stderr F W1208 17:46:48.789438 1 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:46:48.789445958+00:00 stderr F W1208 17:46:48.789441 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:46:48.789453338+00:00 stderr F W1208 17:46:48.789444 1 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:46:48.789453338+00:00 stderr F W1208 17:46:48.789447 1 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:46:48.789453338+00:00 stderr F W1208 17:46:48.789450 1 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:46:48.789461069+00:00 stderr F W1208 17:46:48.789453 1 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:46:48.789461069+00:00 stderr F W1208 17:46:48.789456 1 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:46:48.789468619+00:00 stderr F W1208 17:46:48.789459 1 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:46:48.789468619+00:00 stderr F W1208 17:46:48.789462 1 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:46:48.789468619+00:00 stderr F W1208 17:46:48.789465 1 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:46:48.789476799+00:00 stderr F W1208 17:46:48.789468 1 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:46:48.789476799+00:00 stderr F W1208 17:46:48.789471 1 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:46:48.789476799+00:00 stderr F W1208 17:46:48.789474 1 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:46:48.789484929+00:00 stderr F W1208 17:46:48.789477 1 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:46:48.789484929+00:00 stderr F W1208 17:46:48.789480 1 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:46:48.789493310+00:00 stderr F W1208 17:46:48.789483 1 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:46:48.789493310+00:00 stderr F W1208 17:46:48.789488 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:46:48.789493310+00:00 stderr F W1208 17:46:48.789491 1 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:46:48.789501460+00:00 stderr F W1208 17:46:48.789494 1 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:46:48.791895914+00:00 stderr F I1208 17:46:48.791817 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:46:49.696431288+00:00 stderr F I1208 17:46:49.696374 1 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:49.696465210+00:00 stderr F I1208 17:46:49.696447 1 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:49.700471866+00:00 stderr F I1208 17:46:49.700428 1 controllermanager.go:203] "Starting" version="v1.33.5" 2025-12-08T17:46:49.700516968+00:00 stderr F I1208 17:46:49.700504 1 controllermanager.go:205] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" 2025-12-08T17:46:49.702221381+00:00 stderr F I1208 17:46:49.702164 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:49.702299123+00:00 stderr F I1208 17:46:49.702212 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:49.702684395+00:00 stderr F I1208 17:46:49.702639 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:46:49.702801360+00:00 stderr F I1208 17:46:49.702771 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:46:49.702752228 +0000 UTC))" 2025-12-08T17:46:49.702857411+00:00 stderr F I1208 17:46:49.702837 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:46:49.70282688 +0000 UTC))" 2025-12-08T17:46:49.702931534+00:00 stderr F I1208 17:46:49.702912 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:46:49.702900923 +0000 UTC))" 2025-12-08T17:46:49.702979555+00:00 stderr F I1208 17:46:49.702963 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:46:49.702954544 +0000 UTC))" 2025-12-08T17:46:49.703020256+00:00 stderr F I1208 17:46:49.703003 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:46:49.702995076 +0000 UTC))" 2025-12-08T17:46:49.703070398+00:00 stderr F I1208 17:46:49.703054 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:46:49.703047027 +0000 UTC))" 2025-12-08T17:46:49.703112269+00:00 stderr F I1208 17:46:49.703094 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:46:49.703086849 +0000 UTC))" 2025-12-08T17:46:49.703150131+00:00 stderr F I1208 17:46:49.703134 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:46:49.70312829 +0000 UTC))" 2025-12-08T17:46:49.703190662+00:00 stderr F I1208 17:46:49.703173 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:46:49.703166021 +0000 UTC))" 2025-12-08T17:46:49.703237323+00:00 stderr F I1208 17:46:49.703220 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:46:49.703212782 +0000 UTC))" 2025-12-08T17:46:49.703450160+00:00 stderr F I1208 17:46:49.703425 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"kube-controller-manager.openshift-kube-controller-manager.svc\" [serving] validServingFor=[kube-controller-manager.openshift-kube-controller-manager.svc,kube-controller-manager.openshift-kube-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:46:49.703411769 +0000 UTC))" 2025-12-08T17:46:49.703651476+00:00 stderr F I1208 17:46:49.703631 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216009\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216009\" (2025-12-08 16:46:48 +0000 UTC to 2028-12-08 16:46:48 +0000 UTC (now=2025-12-08 17:46:49.703619895 +0000 UTC))" 2025-12-08T17:46:49.703707698+00:00 stderr F I1208 17:46:49.703693 1 secure_serving.go:211] Serving securely on [::]:10257 2025-12-08T17:46:49.703834082+00:00 stderr F I1208 17:46:49.703796 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:46:49.704210653+00:00 stderr F I1208 17:46:49.704192 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... ././@LongLink0000644000000000000000000000032200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-control0000644000175000017500000003015615115611521033061 0ustar zuulzuul2025-12-08T17:42:25.406557568+00:00 stderr F I1208 17:42:25.406148 1 base_controller.go:76] Waiting for caches to sync for CertSyncController 2025-12-08T17:42:25.406557568+00:00 stderr F I1208 17:42:25.406152 1 observer_polling.go:159] Starting file observer 2025-12-08T17:42:25.408960801+00:00 stderr F E1208 17:42:25.408918 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: Get \"https://localhost:6443/api/v1/namespaces/openshift-kube-controller-manager/configmaps?limit=500&resourceVersion=0\": dial tcp [::1]:6443: connect: connection refused" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ConfigMap" 2025-12-08T17:42:25.408979882+00:00 stderr F E1208 17:42:25.408955 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Secret: Ge********** \"https://localhost:6443/api/v1/namespaces/openshift-kube-controller-manager/secrets?limit=500&resourceVersion=0\": dial tcp [::1]:6443: connect: connection refused" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.Secret" 2025-12-08T17:42:36.563856486+00:00 stderr F E1208 17:42:36.563777 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: Get \"https://localhost:6443/api/v1/namespaces/openshift-kube-controller-manager/configmaps?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ConfigMap" 2025-12-08T17:42:36.992953961+00:00 stderr F E1208 17:42:36.992007 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Secret: Ge********** \"https://localhost:6443/api/v1/namespaces/openshift-kube-controller-manager/secrets?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.Secret" 2025-12-08T17:42:43.506701135+00:00 stderr F I1208 17:42:43.506611 1 base_controller.go:82] Caches are synced for CertSyncController 2025-12-08T17:42:43.506701135+00:00 stderr F I1208 17:42:43.506655 1 base_controller.go:119] Starting #1 worker of CertSyncController controller ... 2025-12-08T17:42:43.506753717+00:00 stderr F I1208 17:42:43.506724 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:42:43.507630610+00:00 stderr F I1208 17:42:43.507579 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:28.627646960+00:00 stderr F I1208 17:44:28.624354 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:28.627646960+00:00 stderr F I1208 17:44:28.624946 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:28.637624742+00:00 stderr F I1208 17:44:28.637567 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:28.638895817+00:00 stderr F I1208 17:44:28.637812 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:28.647072759+00:00 stderr F I1208 17:44:28.647020 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:28.647418540+00:00 stderr F I1208 17:44:28.647290 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:28.653962068+00:00 stderr F I1208 17:44:28.653910 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:28.654134752+00:00 stderr F I1208 17:44:28.654114 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:28.714500699+00:00 stderr F I1208 17:44:28.712572 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:28.714500699+00:00 stderr F I1208 17:44:28.712856 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:28.723894455+00:00 stderr F I1208 17:44:28.723811 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:28.724055580+00:00 stderr F I1208 17:44:28.724034 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:35.630561428+00:00 stderr F I1208 17:44:35.630202 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:35.630561428+00:00 stderr F I1208 17:44:35.630478 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:35.636382717+00:00 stderr F I1208 17:44:35.635773 1 certsync_controller.go:260] Creating directory "/etc/kubernetes/static-pod-certs/secrets/kube-controller-manager-client-cert-key" ... 2025-12-08T17:44:35.636382717+00:00 stderr F I1208 17:44:35.635834 1 certsync_controller.go:274] Writing secret manifest "/etc/kubernetes/static-pod-certs/secrets/kube-controller-manager-client-cert-key/tls.crt" ... 2025-12-08T17:44:35.636681036+00:00 stderr F I1208 17:44:35.636267 1 certsync_controller.go:274] Writing secret manifest "/etc/kubernetes/static-pod-certs/secrets/kube-controller-manager-client-cert-key/tls.key" ... 2025-12-08T17:44:35.637248111+00:00 stderr F I1208 17:44:35.637143 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CertificateUpdated' Wrote updated secret: op**********ey 2025-12-08T17:44:43.358673124+00:00 stderr F I1208 17:44:43.358612 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:44:43.360918506+00:00 stderr F I1208 17:44:43.360888 1 certsync_controller.go:155] Creating directory "/etc/kubernetes/static-pod-certs/configmaps/client-ca" ... 2025-12-08T17:44:43.360954027+00:00 stderr F I1208 17:44:43.360937 1 certsync_controller.go:168] Writing configmap manifest "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" ... 2025-12-08T17:44:43.361795581+00:00 stderr F I1208 17:44:43.361765 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:44:43.363079507+00:00 stderr F I1208 17:44:43.362040 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CertificateUpdated' Wrote updated configmap: openshift-kube-controller-manager/client-ca 2025-12-08T17:45:19.463274963+00:00 stderr F I1208 17:45:19.463191 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:45:19.465743832+00:00 stderr F I1208 17:45:19.465714 1 certsync_controller.go:155] Creating directory "/etc/kubernetes/static-pod-certs/configmaps/client-ca" ... 2025-12-08T17:45:19.465800294+00:00 stderr F I1208 17:45:19.465762 1 certsync_controller.go:168] Writing configmap manifest "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" ... 2025-12-08T17:45:19.466851223+00:00 stderr F I1208 17:45:19.466795 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:45:19.467759258+00:00 stderr F I1208 17:45:19.467729 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-controller-manager", Name:"kube-controller-manager-crc", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CertificateUpdated' Wrote updated configmap: openshift-kube-controller-manager/client-ca 2025-12-08T17:46:25.350340683+00:00 stderr F I1208 17:46:25.350217 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:46:25.350541669+00:00 stderr F I1208 17:46:25.350521 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:46:25.350659882+00:00 stderr F I1208 17:46:25.350618 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:46:25.350785476+00:00 stderr F I1208 17:46:25.350767 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:46:29.665608558+00:00 stderr F I1208 17:46:29.665518 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:46:29.665832525+00:00 stderr F I1208 17:46:29.665802 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:46:29.665972969+00:00 stderr F I1208 17:46:29.665940 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:46:29.666392562+00:00 stderr F I1208 17:46:29.666353 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:46:29.666465354+00:00 stderr F I1208 17:46:29.666439 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:46:29.666605548+00:00 stderr F I1208 17:46:29.666579 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:56:25.351352767+00:00 stderr F I1208 17:56:25.351241 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:56:25.352243951+00:00 stderr F I1208 17:56:25.352188 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:56:29.669432521+00:00 stderr F I1208 17:56:29.669075 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:56:29.669755519+00:00 stderr F I1208 17:56:29.669685 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:56:29.669901763+00:00 stderr F I1208 17:56:29.669848 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:56:29.671075544+00:00 stderr F I1208 17:56:29.670073 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:56:29.671075544+00:00 stderr F I1208 17:56:29.670165 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:56:29.671075544+00:00 stderr F I1208 17:56:29.670336 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] 2025-12-08T17:56:29.671075544+00:00 stderr F I1208 17:56:29.670461 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true}] 2025-12-08T17:56:29.671075544+00:00 stderr F I1208 17:56:29.670661 1 certsync_controller.go:178] Syncing secrets: [{kube-controller-manager-client-cert-key false} {csr-signer false}] ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611513033052 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611520033050 5ustar zuulzuul././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000000134015115611513033052 0ustar zuulzuul2025-12-08T18:00:01.128975820+00:00 stderr F time="2025-12-08T18:00:01Z" level=info msg="Successfully created configMap openshift-operator-lifecycle-manager/olm-operator-heap-qvvpc" 2025-12-08T18:00:01.182423796+00:00 stderr F time="2025-12-08T18:00:01Z" level=info msg="Successfully created configMap openshift-operator-lifecycle-manager/catalog-operator-heap-624cb" 2025-12-08T18:00:01.186507974+00:00 stderr F time="2025-12-08T18:00:01Z" level=info msg="Successfully deleted configMap openshift-operator-lifecycle-manager/catalog-operator-heap-jb2b2" 2025-12-08T18:00:01.190719874+00:00 stderr F time="2025-12-08T18:00:01Z" level=info msg="Successfully deleted configMap openshift-operator-lifecycle-manager/olm-operator-heap-qvfbx" ././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_inte0000755000175000017500000000000015115611513033141 5ustar zuulzuul././@LongLink0000644000000000000000000000030300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_inte0000755000175000017500000000000015115611521033140 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_inte0000644000175000017500000002470515115611513033153 0ustar zuulzuul2025-12-08T17:57:06.384474381+00:00 stderr F {"level":"info","ts":1765216626.3840914,"logger":"cmd","msg":"Go Version: go1.10.3"} 2025-12-08T17:57:06.384698546+00:00 stderr F {"level":"info","ts":1765216626.3844738,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} 2025-12-08T17:57:06.384698546+00:00 stderr F {"level":"info","ts":1765216626.384526,"logger":"cmd","msg":"Version of operator-sdk: v0.4.0+git"} 2025-12-08T17:57:06.386340219+00:00 stderr F {"level":"info","ts":1765216626.3862891,"logger":"leader","msg":"Trying to become the leader."} 2025-12-08T17:57:06.575944024+00:00 stderr F {"level":"info","ts":1765216626.575725,"logger":"leader","msg":"No pre-existing lock was found."} 2025-12-08T17:57:06.588209875+00:00 stderr F {"level":"info","ts":1765216626.5879674,"logger":"leader","msg":"Became the leader."} 2025-12-08T17:57:06.715131565+00:00 stderr F {"level":"info","ts":1765216626.7150354,"logger":"cmd","msg":"Registering Components."} 2025-12-08T17:57:06.716763038+00:00 stderr F {"level":"info","ts":1765216626.7167091,"logger":"certificates","msg":"Issuer crd for cert-manager not present, qdr-operator will be unable to request certificate generation"} 2025-12-08T17:57:06.716763038+00:00 stderr F {"level":"info","ts":1765216626.716745,"logger":"env","msg":"attempting detection of OpenShift platform..."} 2025-12-08T17:57:06.719400847+00:00 stderr F {"level":"info","ts":1765216626.7193594,"logger":"env","msg":"OpenShift route detected in api groups, returning true"} 2025-12-08T17:57:06.719718595+00:00 stderr F {"level":"info","ts":1765216626.7196798,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720012162+00:00 stderr F {"level":"info","ts":1765216626.719995,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720166606+00:00 stderr F {"level":"info","ts":1765216626.7201467,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720299660+00:00 stderr F {"level":"info","ts":1765216626.7202845,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720412833+00:00 stderr F {"level":"info","ts":1765216626.7203991,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720533486+00:00 stderr F {"level":"info","ts":1765216626.7205179,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720650189+00:00 stderr F {"level":"info","ts":1765216626.7206361,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720761581+00:00 stderr F {"level":"info","ts":1765216626.7207475,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.720963547+00:00 stderr F {"level":"info","ts":1765216626.7209456,"logger":"kubebuilder.controller","msg":"Starting EventSource","controller":"interconnect-controller","source":"kind source: /, Kind="} 2025-12-08T17:57:06.721094340+00:00 stderr F {"level":"info","ts":1765216626.721082,"logger":"cmd","msg":"Starting the Cmd."} 2025-12-08T17:57:06.821675383+00:00 stderr F {"level":"info","ts":1765216626.8213754,"logger":"kubebuilder.controller","msg":"Starting Controller","controller":"interconnect-controller"} 2025-12-08T17:57:06.922017711+00:00 stderr F {"level":"info","ts":1765216626.9218204,"logger":"kubebuilder.controller","msg":"Starting workers","controller":"interconnect-controller","worker count":1} 2025-12-08T17:57:28.266169429+00:00 stderr F {"level":"info","ts":1765216648.2660139,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.372585275+00:00 stderr F {"level":"info","ts":1765216648.3724809,"logger":"controller_interconnect","msg":"Creating a new Role","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","role":{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"Role","namespace":"service-telemetry","name":"default-interconnect"}} 2025-12-08T17:57:28.398054229+00:00 stderr F {"level":"info","ts":1765216648.397976,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.401869678+00:00 stderr F {"level":"info","ts":1765216648.4017916,"logger":"controller_interconnect","msg":"Creating a new RoleBinding","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","RoleBinding":{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"RoleBinding","namespace":"service-telemetry","name":"default-interconnect"}} 2025-12-08T17:57:28.498731885+00:00 stderr F {"level":"info","ts":1765216648.4986644,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.501642521+00:00 stderr F {"level":"info","ts":1765216648.5015912,"logger":"controller_interconnect","msg":"Creating a new ServiceAccount","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","ServiceAccount":{"apiVersion":"v1","kind":"ServiceAccount","namespace":"service-telemetry","name":"default-interconnect"}} 2025-12-08T17:57:28.505378518+00:00 stderr F {"level":"info","ts":1765216648.505346,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.509511126+00:00 stderr F {"level":"info","ts":1765216648.5091553,"logger":"controller_interconnect","msg":"Creating a new Deployment","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","Deployment":{"apiVersion":"apps/v1","kind":"Deployment","namespace":"service-telemetry","name":"default-interconnect"}} 2025-12-08T17:57:28.523524072+00:00 stderr F {"level":"info","ts":1765216648.523453,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.528587484+00:00 stderr F {"level":"info","ts":1765216648.5285077,"logger":"controller_interconnect","msg":"Creating service for interconnect deployment","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","Service":{"apiVersion":"v1","kind":"Service","namespace":"service-telemetry","name":"default-interconnect"}} 2025-12-08T17:57:28.547623611+00:00 stderr F {"level":"info","ts":1765216648.5459297,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.555577257+00:00 stderr F {"level":"info","ts":1765216648.5550866,"logger":"controller_interconnect","msg":"Creating route for interconnect deployment","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","listener":{"port":5671,"sslProfile":"openstack","saslMechanisms":"PLAIN","authenticatePeer":true,"expose":true}} 2025-12-08T17:57:28.599993646+00:00 stderr F {"level":"info","ts":1765216648.5997403,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.613956770+00:00 stderr F {"level":"info","ts":1765216648.6129873,"logger":"controller_interconnect","msg":"Creating user secret for interconnect deployment","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect","Secret":{"apiVersion":"v1","kind":"Secret","namespace":"service-telemetry","name":"default-interconnect-users"}} 2025-12-08T17:57:28.620720567+00:00 stderr F {"level":"info","ts":1765216648.6206028,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.632116584+00:00 stderr F {"level":"info","ts":1765216648.6314397,"logger":"controller_interconnect","msg":"Pod names updated","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.632116584+00:00 stderr F {"level":"info","ts":1765216648.6314783,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:28.760021240+00:00 stderr F {"level":"info","ts":1765216648.7599387,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:57:34.829925249+00:00 stderr F {"level":"info","ts":1765216654.8291454,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:58:32.580027418+00:00 stderr F {"level":"info","ts":1765216712.5798151,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:58:34.418957346+00:00 stderr F {"level":"info","ts":1765216714.4188974,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:58:34.432298901+00:00 stderr F {"level":"info","ts":1765216714.431741,"logger":"controller_interconnect","msg":"Pod names updated","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:58:34.435477143+00:00 stderr F {"level":"info","ts":1765216714.4332428,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:58:34.441717685+00:00 stderr F {"level":"info","ts":1765216714.4416592,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} 2025-12-08T17:58:36.566787488+00:00 stderr F {"level":"info","ts":1765216716.5653431,"logger":"controller_interconnect","msg":"Reconciling Interconnect","Request.Namespace":"service-telemetry","Request.Name":"default-interconnect"} ././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611514033102 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611521033100 5ustar zuulzuul././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000271115115611514033105 0ustar zuulzuul2025-12-08T17:58:11.762536617+00:00 stderr F 2025/12/08 17:58:11 provider.go:129: Defaulting client-id to system:serviceaccount:service-telemetry:smart-gateway 2025-12-08T17:58:11.762721132+00:00 stderr F 2025/12/08 17:58:11 provider.go:134: Defaulting client-secret to service account token /var/run/secrets/kubernetes.io/serviceaccount/token 2025-12-08T17:58:11.764006224+00:00 stderr F 2025/12/08 17:58:11 provider.go:358: Delegation of authentication and authorization to OpenShift is enabled for bearer tokens and client certificates. 2025-12-08T17:58:11.790783747+00:00 stderr F 2025/12/08 17:58:11 oauthproxy.go:210: mapping path "/" => upstream "http://localhost:8081/" 2025-12-08T17:58:11.790783747+00:00 stderr F 2025/12/08 17:58:11 oauthproxy.go:237: OAuthProxy configured for Client ID: system:serviceaccount:service-telemetry:smart-gateway 2025-12-08T17:58:11.790783747+00:00 stderr F 2025/12/08 17:58:11 oauthproxy.go:247: Cookie settings: name:_oauth_proxy secure(https):true httponly:true expiry:168h0m0s domain: samesite: refresh:disabled 2025-12-08T17:58:11.794168614+00:00 stderr F 2025/12/08 17:58:11 http.go:64: HTTP: listening on 127.0.0.1:4180 2025-12-08T17:58:11.794670617+00:00 stderr F 2025/12/08 17:58:11 http.go:110: HTTPS: listening on [::]:8083 2025-12-08T17:58:11.794742649+00:00 stderr F I1208 17:58:11.794727 1 dynamic_serving_content.go:135] "Starting controller" name="serving::/etc/tls/private/tls.crt::/etc/tls/private/tls.key" ././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611521033100 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000112415115611514033102 0ustar zuulzuul2025-12-08T17:58:36.337873451+00:00 stdout F bridge-368 ==> (/tmp/smartgateway) 2025-12-08T17:58:36.344300358+00:00 stderr F PN_TRANSPORT_CLOSED: proton:io: Connection refused - disconnected default-interconnect.service-telemetry.svc.cluster.local:5673 2025-12-08T17:58:36.344300358+00:00 stderr F Exit AMQP RCV thread... 2025-12-08T17:58:37.339833498+00:00 stdout F Joining amqp_rcv_th... 2025-12-08T17:58:37.339833498+00:00 stdout F Cancel socket_snd_th... 2025-12-08T17:58:37.339833498+00:00 stdout F Joining socket_snd_th... 2025-12-08T17:58:37.339865289+00:00 stderr F Exit SOCKET thread... ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/2.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000206515115611514033107 0ustar zuulzuul2025-12-08T17:58:50.035379674+00:00 stdout F bridge-3c8 ==> (/tmp/smartgateway) 2025-12-08T17:58:50.060372483+00:00 stdout F bridge-3c8 ==> (amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/collectd/cloud1-telemetry) 2025-12-08T17:59:50.054995318+00:00 stdout F in: 2608(104), amqp_overrun: 0(0), out: 2608(104), sock_overrun: 0(0), link_credit_average: 14972.240234 2025-12-08T18:00:49.071207842+00:00 stdout F in: 3440(0), amqp_overrun: 0(0), out: 3440(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:01:48.089045681+00:00 stdout F in: 3440(0), amqp_overrun: 0(0), out: 3440(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:02:47.105472801+00:00 stdout F in: 3440(0), amqp_overrun: 0(0), out: 3440(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:03:46.121102811+00:00 stdout F in: 3440(0), amqp_overrun: 0(0), out: 3440(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:04:45.138741139+00:00 stdout F in: 3440(0), amqp_overrun: 0(0), out: 3440(0), sock_overrun: 0(0), link_credit_average: -nan ././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611521033100 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000247415115611514033113 0ustar zuulzuul2025-12-08T17:58:34.360417944+00:00 stdout F 2025-12-08 17:58:34 [INFO] initialized handler [transport pair: socket0, handler: collectd-metrics] 2025-12-08T17:58:34.360417944+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded transport [transport: socket0] 2025-12-08T17:58:34.416463312+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded application plugin [application: prometheus] 2025-12-08T17:58:34.417657893+00:00 stdout F 2025-12-08 17:58:34 [INFO] socket listening on /tmp/smartgateway [plugin: socket] 2025-12-08T17:58:34.417657893+00:00 stdout F 2025-12-08 17:58:34 [INFO] metric server at : 127.0.0.1:8081 [plugin: Prometheus] 2025-12-08T17:58:35.418179631+00:00 stdout F 2025-12-08 17:58:35 [INFO] registered collector tracking metrics with 1 label [plugin: Prometheus] 2025-12-08T17:58:35.421037625+00:00 stdout F 2025-12-08 17:58:35 [INFO] registered expiry process for metrics with interval 0s [plugin: Prometheus] 2025-12-08T17:59:24.613620343+00:00 stdout F 2025-12-08 17:59:24 [INFO] registered collector tracking metrics with 3 labels [plugin: Prometheus] 2025-12-08T17:59:24.613620343+00:00 stdout F 2025-12-08 17:59:24 [INFO] registered expiry process for metrics with interval 1s [plugin: Prometheus] 2025-12-08T18:00:02.425925127+00:00 stdout F 2025-12-08 18:00:02 [WARN] prometheus collector expired [plugin: Prometheus] ././@LongLink0000644000000000000000000000024400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611520033074 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611546033104 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server/0.log.gzhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000015407115115611513033110 0ustar zuulzuul‹K7i0.logì]Ûn\Wr}ÏW| t¼«v]ð%L$@Œ Oc šjI„)’![Æ8_ŸÓMÏXÉ$Ëûd/†e¢ÕìîZ]µjÕë â-þ›ü¥ÄKê‹ {ûçÖ^¶vr¿½»»;ùãÉþòýîì”?ý+:=¹Ú}¿»:»¼~ssòþþíÙéýþünyýöäööîæÍÉîúõíÍåõþôäüõë»ÝýýÙéÕÍÅùÕ»›ûýKkÖNÿ‰?ÿR:õÖì1/åÍ͇ë×'»¿\Þ_ÐÅùÅ»ÝÉÅÍõ~w½¿?=ùöüâ»õåÝÞ¼½Û}»|O8ûj÷—ýÝùÅ~÷úÅÅùþüêæíWû«o.ßÞþÇÿ~æ½È¬ë?ÒäKß‹|îcÝÝ}xw»·ëÛ¹ûáôñ/öäöæn¦­)ýÒë6N&{ÔëÞßÜÞî^_íÑ·NÀ¸¼Ú¼^_ÇoÁÓ¢&Âݲákbú%ˆ_ÓÕÕéÉËóûw7w—ûÎN¿ùé5?—?Ÿþ//ö?Ü®¿à›óÛÛ«Ëõ¼¼¹þêíÝíÅúˆÃ–‹›÷·7×ëÏ–ÚÝ=üôýn}ê×gx·»øî㟼:>Û‡ëó»~|·ûŸ»ûýòzwþúêòú³o…ÿôãï:bábwvüŸw»ó«ý»½Ë¿ÿôãCoñÕ¯}(LJðêýýY[ÅÉínw·üÍľ¬?_èeO#9=˜msqsuü½'ÖWñâüíáŸ~søÉ«‡Wòj}Ø·»ãÓ¿x{ó-Öþó3ZóÍåõåý»'7çëÝÙþûÿÛzÖÛÖÂCMÉñM}bkªýš5ٞǚѸ{2¾©ÏiÛQm[]VÒ·þ8{~©mõ·}S/v+!|s¹›ÛÝÝùþæî~Yÿt}ÿîòÍþÅûó»ïvûÛ«ó‹ÝrÿýÅo4ÿ‹ó‹‹ÝíþÅîzÅÁÊ/ŸÂÿ^Þ~Ñwý?V"õ/+C¼ÚÝŠŠ‡Ç¿Z©Öîüý—¡CûÃGó3tœß^.ÿõ#gûPèg¾ðñ‰ûn«Ñ¸ [¹RSþ‡°ø‡ èÜVêßR‚_û‚ÿ®(øR± HPËE+@!äIjz†¯×ìpý°w J(0ïX&CÁ¿îþ ‚G‘†g³={í×$-¶ÊöŸæ°ý,¶ï%‘߂ݧ¦ƒƒ#Š¢dËx±¼@‰éÓ8Áôc™¾$ø{kÙ‰FBÖ ´çt"Àð^ Dt•HØ~¬ï=L?í×^jlïÚ`û¡l/%™¿÷.á<}!h€ÕŒ5*ð`þàÉõŸ"ÐÝB;l?aõÇ…•S Œ‹έ ^àé½@MÓF€³½–Ø^Wóª?ƒe‚5¯º%¢ÿÀ9Q RÅ¡ ¦Ö0?[Ã'x½@ÔxÓD;Àh^€j¸€So*Д­ îLŠ>Ð)ó?OaFî?˜ÏÛ‡=®¶ßê÷>²÷„î3ØÜG‰é³EGãÏ`¦·©ÿÐñ-HñƲ}”4þDëÄ+ôCèyf¡G² ©‚„o´d¿„ôõ4GÂ7–í½DêNU…Ô?0¤ $¿¨ ½7f…ÜkàÁ 9p0VXc{cÊu‡u`¼¹ûûóëëÝÕ špMjàM” ÔP ”Ög盀ըEÑ|åà‹Fˆõǵ‡#;x^$XÍ® ÈæB H ^”+¤$´£±ƒšZBÖŠ+n+ßP×m$l`¸Ì7‚äOo{í§m/Ç÷~Þörð@Dÿû3Z¢¦Ó$;IV‹ÍØk˜²>¡Ït°P“ÿ‰dáÞZ M š=RÄ%H ¶5À Œ5l\cúnýoh'`5H°ÆÌèž~ÂtE‚¤¡8c=hµ}4oX0;¥í½…)ŽËL8T¼Ú^„=ƒÙžjloØ36ãBùÕöI]ÿ ½s´ Ñ-ÊÞ—_mo¡†5ÓƒE-±}Úšï£4Xé·„ùQk¹ÆØ~¬ƒÒZc{r Ø~ÆÒï 1ÅAé¡¿Š¼@Æ?€„}BF Ò àŒ'¦:‘¸"÷í{/5¶n¸34Ú÷¾ÄôÌòÈ–˜~«.Ÿ­‹Cö™pî§ÓêôÝ:6Hêj4€.ìŒ5âW3—©‡˜(@0ñ.ùNB=íÿSR1&u8€©ËÚ¬ ÊÿƒÙ¾ÆôI‰ïÿÔ,P½õÄèŠ@dë«â7ᲦBlâæè™‘XôÄ¥¹aÉAÍ÷߉;£/`Ê"‘Käbв5¡5¹‚»Òã¶EU·É­†&Äš@Î8=²‘"BMQd›²—d#\±d‡ ¥uAI9‘[WUdWwrsQC_ÁÔe&!I¤ ›¸;Z²m~…DŠy[¢„32§zcÇU¹fw§Õ?À9lÀ9XÍxj'ÓÎHléêQY€;þ^©  —Hàf ¼PÉ5+ a>!ñ5‚vÀ‡.Ö†da ˆ"Høú›púväªCÍ´Ã!DGÏÒÜËPz×Õ gin®(ÍM4a ðšR´§¡…i|HÐRã%´ë @bÉD …T³•E¢ò0–íKš\»±p¢}m`$H‘ÊdAŽ£ cŸÕ©¡Þ™±UqØ¥j%ݬb.âèfÝÄôCMêð€_x>¿­w‡Ÿ¥ |éÃCúççð³®Æ488‡cá$ Ç0áH$°˜¦Ÿíf{" ¨CIšÖ "À´¶ïÛOi{ZT‘þ!úߟùB=àþÁ Lôß/øzX'ðKïÂǧýR³lȨñŸÿŒ.`{Ä‹ß }Ñšë-@ÂðH€„#‡´š‚‚k.:쬨eÝ#›c©â&ZÖµ†:fw%Œ· ël1©ÑÓYw€ÓK(‚6æ$Å)‚[¯AÂúTb@¸ñ ÍjæìˆCÅ+áJFA /0*b!.Y¤LœŽ³¯SúS–žBÁÌKvµ›)céŨ›°jÒƒôpíP‡‡„,½æ&¸“§%–£ 6«R¢;wkPÇ[­ñìÔ© ¯É«É½73Bßò`u©±½°š³½ÕØÞ›wT†ž[­‰ÒT •ƒÁfkx ¬Æo8Ò9cK¬é`ÂöóM¬¹23.°á*çŠ5K‚˜p%ö¡Ñ\”aû±l_Rr#ëŠöÑÁH ñ3%RH~ƒ•€¢ÆöAÚ!ùM™ì;«5ƒígäz®¦Š©Â¡åÞ"$d$Cö,Ô}Bzvè>Cë>%@Èf¬è¹ÿ£f‰§°$„€ÁzÄkÄßto !`Æê_´p TýgLƒ¨… ê?£dÄYÑqwW¯ö®©ýÅšbXôjæB`°G&&Æõ´††Ð;µŽ‘ÑQýA‰2J­$‘ýÔ\E]NࣞÆ.Á€wÓ@ÝpèŠAM`ˆ–.‰]ð F5b·&ƒˆ’¬![gst –,píF§Ž’ˆš¢ˆ0عS¯±½vNFî&¬9|›+ôÀüØŒ+“…ÂÐD6²¨Q‰²¯¤CpsdÑ8kvQg憎"Ò¨Ëgkü4a4—¦Õ0Bå–â°HÈ%µFTkÒH–,&Vƒ„\C† fœ9K;Œ*ê†ÃÓÃX¨æv]ZŠ'JFƒµÔHÇÞÅRƒÁBAMZè!ŒÑó9KÅaÜ…‚ {3»0Ž œö5úWtŒIkh'{ú¬d…4êÆh'Y¢”$dO3˜°‰LÖ_Ó²aí„ëíÍ£ƒŽŒ„¬á”KIÇjª±¼® Œ.ÙK²"!…™áЂ@:³0°Oh5 Qœ(Q/˜pÅñ,™9T±lo5¬P…³c°``m0kH¡Q”NX4”æ,Š>‘{VÛ÷Þ ¹à°Hà%ª¼€eï2L®až½²¿ 7TK;ì$5Ô;QWý;™­Ÿ8¢ÿÀÃÄ5^à /ðä^À ^^àÐ)¨%÷j… õDDÀŠr…?ÀDéOè 9Ça Ž€Â Ä h!€˜Ù pÑ¥Kä cç  þàAXDd8â´ ì‘Ì»¶ÜA¯YB70o“égmÿõ nà—ÞƒÒúi}‚_°Æg„Íç?r¸Œ˜ÃãmæðÜÌÁn%B#Õ ÷OG½uRÒ¢I”˜QmF1‹lŸä°ýœß{ñx…•ÕЀTeÂɳQ¹ Õ€ CËcM¬õÛgs‡í‡ê<æV"X#'œ:œñòjûÃ;l'˜qÅš7'ì¯W –%j"µæÙ@ÿ 5îÊÐÈÞ kâsä±#xƒçóEIÁãA€ Õ2– 5É«©0±7Q4µl"g¼ƒ»æX‚dÕA‰‚l‡…ׂrêr Í a ˆ¨Y‚Hlˆ3Ö\Ë4§P DŠ™×ãšg×Ĭ-@Be#ˆ€[xF·P‚`eA¯áŒÑ…f<©¼‘T²FlŽÐ(@Œ‰\²fÖÝRÖ§R@b ¹d‘—H…¼0ò%«dòÖ¢+ºXë^¯1}ÏŽsˈ ²´šÞœ­¡·uä Ü58ˆFŒÒô¨zB‰ÖìDÝ£!>lVÓÞè$ÂÝÎO¿yÍF$çhi‚ ¶XM]Ú;‘)´„‘ïµQÍ®uïÒ™Œa J£E $B×X}i¬ªt‘í³[ƒí«CÖ(IÂâ†î3nGtQÞÀÞ–j5¹$+A/ ¾D¯ ”») ;)ª2©…$b70$}8ÈQ)Bœ Í‹cŠR3÷ @Œxfºàæðhfvø…Ÿ ±Z‹Áà(áà>îcî­€ÄÏ a%Ό ÚŠ‚ƒ8D|ŒeB¤À}WتbÂÌÕ]‚À€„cK»Ôô1"2Œ= ÉðÈ~æ¼!]€cè5sçÖŽaŽ¡Ä/$i$ QCsÇ( (XÍ  '€ØÂÖUFøƒí2$>Šºˆƒ6€6iƒ’ @ᮩN X³£€ÂCyª  (¡€ ð£W@V (<Ü!<¢LÙáàŽi¤ (Ë“„šBƒ@a„?xØÐ¿¾~ (üâ/ ÿôV!€ó$Àá%Yk€óy;¢ˆ9Þy3ß$À-ž1í€MO¬B[o–†é-æÜ$àžÏ/´PzJ€`Põ¡¢P¥ë3®Ÿ=ÂÌ”2w&ܧßB¬,D’‚Jãì¸=ºLØR²žˆØ’—È$‰SKq@U‰û³XÖ¼HÎ(*¹G( Y‚: Êð±üPS½$65‚Û‘l"bÓyÛ „Dýl1‚´³t€í,ffp¼m®( qøØ;PBxÈ"@ÐKx‚3µž00Oàš25 °!žPÓý@ˆx¾QS–ÆÖz ¤yâÔÐÐc´Vä´E`,fÔ P”7„„b,fÔ `ƒXgúTÎ*‚•É]'g²H7ǘû ›Ô6‰ìh¸nq\,gô‡àõ}“'@"d¦¬ˆ´HRxóÐc63ðjêz¹Øxa È~ È’½xaïÄviOP陌²Mà ~MÔ*%ì©‘!©&§tAÏóˆ”rªÌp·,X»Ü|i )‚8QWÜ<$ՔɑH"ˆüôPt(-ÆàÜ ˜ð§y+^ÄêúœúPÏ“m±zLÔÓ>’S2éú‚-­#Fç¨k¢¡˜‡8C€ðL¬3)æ¾U>‚‰63œ¿¼úA õ 0ÛuÎÚF@À]pŒºÙ~¾Å”ÂÀw‡71CórHä##6£-A€Äªӈ꤮VPΘ¤®‘ãÈ­­ %òLÔc1’Rj§¶ÃWî€&–<>rù²ÕšeÆž& Ch!4·ÏèP å&•ÙˆG\¸Ð‘ôÁŒ$³r¤”ëés¤iTD‹#ì£f"Gˆ1Æ¢±‰É0ßS&°Ú´™á3Ä`ªz9"¥|¡fòçbÂ&Ö®›X3Äà™ŠÉ§«}ì;²½C/›‚ GÖlâG{µÃŠ;âN²LìtD=I3ëxI)ÕŽ°Ò™}þ¤Î”—'ÌÊ匹~L¬Ò8¡c1Ó°Hv[ÿPfܬD–ˆPa†ö„Zšq-Ñh‚uõâUI2ÃjvÛà0ÃJ”¸´{†ÕK“Ï`B‰ýë« K=\pÿè¡h&VØŠ˜žß¶s92[Îájò„X9ÓÇ,w œÐ<‚%dÆò©)‚]ˈ!äÂZRoZ.¬x †ñÆ#rƉª -kÂô ªiP‰µ(ƒ%N€„ÉH;¢R)¡Do›=Œ` ŒMá{Dò #òB/DD£¨¸ØÜkÀy¥à…#xa¦ÎìÂõ‘˧N÷ÄUäCxa¤Œè¦jGãúæ‘h!R.ÁÒÔ«> þá ^Ø”x²²ÆÂQ# Ñ3¼ Þ*è\ïÊ :3WB;â ^èH”gÌvå…‰ýZaéÔBçúˆ6¥×$,“fMWç á’ÐŽ˜‚¦^è6Ãôݼ°)Ã"ݺ㈹£è*.›PGÜܧUÒÔgô)g‘Y˜vÜvkjæîtáˆÁxÉ ív$´)wMFzRæÍéHÎ_‘,rÁù¦G‡h¤aŤ‚1è ±’û H¸µ;C^Øukj$iôPƒG¢'å5‰Èvôªïæ…àUOâpñ®“€ E¨±X}†óï/4 eíéŒvÄ"Þ@!85uDYdR‘dAv¼ùÔTR¡C…mÊßAÂ¥£¾~»òÂ) /LM;N”™ÅѹJMЦ´À# È428ùí*8@p=z† Š -)¬Cü/$Þ-)…}ÆFm€ÄXÂF²¡”Hœ Ûu™r¤˜”&hL¶M`À(DŸpO<’AšˆWƒv¥…¾j´ªOàîžDz qF1‰&b‹€¹k¨Ð°¯27 h¶.F«úaÇ^à0¥@¾pÄÞ}L(Ñ-Ô-‰U‰›oе½…Äg÷îÁ S¡bļ]ŒH1Ötóíˆve–‚}†=øH ™ŠÒòæÕÊwà•6ÁÈžTg×Â&ÝŽŸG&`»È)&n'@B'`#Y*.kòwE&+0îx/ä$¤£˜}Ê]צ0 D¶~ñÀÀŵ œlhS Ÿðpsµ@e¹?$â fp³~ë`ëws¨0/7Œ;žRFŒðBt%_׎ø»õ2¿¡à?þÿãÿöÿÿa½ÝÏ?ýë_ºM9QKFJV¢W}ñIRc”' ¢c`ÞÛ(µŸ'] ˆÊúìF-xaj´MÒ…`Ré„O϶µ¤Œ€À+¥pW­êÿDêBEà2åÅN~!ê.ö¤À CÚóz¨H4GÕúr5$ÎPœØì)›À„–¸]¨;þýOÿø/ë—µ¹ðH1‚÷~„$˜áLôD]ÑîF%ŠNÿMsÇ>äíÿ ¹ãÎH`bJ°À}K“ëíWÉåxû½º qÈÛ#|·ûŽÍ !*5€„}‘ N ù6¢ñඇ÷éã5n ì\Œ¡X³°×`ÚL-ØìlØ™f2v],°—*0R¾T‘xû½ÞÞfÞÞ´¥vVf€ÉRp3i¿ç$°1j jʸmKé}ú6b ìœ Ì AHÃ$pc= Öœ†·ß+(ÞþÚ¿û¡·/rÇtÀÎHH@ºõº€ æÜP…¯Œxûßžy{€ïg€PdQ ecOF7èηÏNÁ@ØfoÈÓ#öÿ ¼©^$T)HàÎêϋɠ_Yý}þöˆß,€ð½oßê·¿òícUþxûKÿõøÂxûÇÿÖ}%ÔÞ+€R”o£â—¡eXßzêw %-Ø¿’ÊÈ £ÿÁ| á¤ý¿2 ìõI cÀ@3•Cõßzú{ •êP€î ¿=Xà›}}d $I²3À×»‚$©»bàF³è¤ŠjHƒÛ"šé %½î˜¿qà=a. o ‘ Cñ€“CÑ Ø‹¨‡Þ¾™¡ ß8”Â"‰ÓA;w‡†"€pNÝ™J»5|Av¶ —™\@)5a¾ ÔÌÛ»)„a´¾ `C…‹þ»á rgh*è lŒ„ŠBBg [EÍô‡¬XÊð•Ê0Þ~Ç·Ÿ‰ž, exg$t @³²$ ?²B? È ÎÜ'I/ªÄ>ÉÖægž ê%—aÒ·¿²E‹ÿá"¿õð¸Îˆƒ)aŠ Ã;#AfâAÆ»W$ì|Z(‡°J®l<æ7ŽÂ3Q™®_?XàÆ¥‚*øŒïÌ‘yÀ§ªFŒ¥‚YÀGÆ ‹ˆÔà=³õ݉BBªÀj³xà3o_æ…hãxà3Yá ,ðõ—F:ï@©Árd묰y ÊüÀgŽg:Ó|cȘ) E\RáÖæÄ$ “H+ÁhÙf‰ÁH¹TÎw¶ ¿àí¾7+¬bˆ(xPÜØ6ÄÛß»TPFLhàTÍB‚i$ÄÁÍX@ðöp úÞ·wׯñÖ£"3Mb ÍÀñ•J€s(|&¶ö#΀„.D3Y¡g£?´ufà‡ ™Á©C¤Þé„¥‚/V ±@FÉïd(òD§p[$È3Ô,J¡bL’_)¦Tâ„íÞŽ#3@ˆ•¢_peÇ8Ó KWÞ0¯¢6ܬ½òí›"+„Wv‡¾àí‘ø}3f¶‡À÷N öï+r(Á`»•`3äÈÀ·Ï†4$ü¨ y •‰®À•­ÁfssllŒ›qú$€N­ EÞ1°À¾µ¡+‚°À©Ý"I“ÂÄðÖcBqÀ_˜hd¬UÍÃ[;KÌd…A™++Í ÁÛßè+Õfòá”0"ÀŸ#öF‚>C²Àç@@88Ôh¶­Ô-‚­%fÂ[°(X`¯k4xzl|ïÛ{£'på¼Xp0C ¾Rúüí‘øý)Ž‘ (B3B@äú‚«#wƼý½yàÇoð½H›‰IiŽÎà• ×ã7ÚWN~þöˆßݘBWáÜÀ$P¾~ÎQïl%1” T‰%BÞ¤G„=ÐØëégÖE»= àìàW $p¤ D¤’ è¾ à}û0¿Ñ<è¯ÿó×úåç¿ý·ÿ›þé×_þå^׈‚Ud„@¼’ØZàqéß}…Þ8zùË„ºåúC“G@‚½f ‘Eè¾mÑ/y{HBÁ…Õ Üš;#0ÄL®ðz‰TÂ+ EmM‚Rô˽»c ¦¥ ¥èÊ·wvfA>p4ZáÇÑLrà¢ÍºÏað}û$ ¸ oÍ3H2ºq»üŒœ@f’‚ÐÈ €àj± ÜÊ Ø” fÚËj\=[$‹ 4 ØtÌ|¤•$\¥‰ƒý!Á3—IVÅhÁÆ ^Ø–#“çêiÔ^Øò”Î@"4É‘4þró’F;g×î&. dDŠ÷š•»ÁÈÔ™Ö»¢ˆ5…«ÅD-/6C©p$j&KìZib' (ÏFRÝSBè)˜6Î\]otžÑeqå‘,NXl¿óíÓÄ`g_« Ô‚AòÁM·d–Œ.ã¾lÐOÄTƳ©×Ÿ¼XoßA©P‹N rf£1©¼ ûC"›i3$«t£×tõ RŠ1[€NДmDSN•bø¤nÛƒ–”Y1xa{HèC=*^÷dÆfà ș¢Â‹½1À|õÖ[f4*Ë !CbCãÖÊÆ*ôÊífêQ@Âõ†)õºe4ªÊ_n>¿SÖ„ªrHäc3jSñ«5 }áía¾˜›±É¥†kðW†ƒifBá½"¹âÌÖÀÛß·àOoa0+Ô—7&Af6ß} PÏO2T*ð®DŸYƒ[”Úd acM˜i+&­@çàÖ$Ô ªX¼5 =CBq‚„Iˆ!MhJƒÇ¸µ&Œ”ŽJôõ7^X0¨Ö×##s¸ÞÞ’Ñ.péϽ%Þ~3{?óôï!n<ý5âŠrv…Ó§ÿ½›( aëDÀŒ˜"Žž¡;C€ôlÃèØ•!ÀÇßàK'H׋Ïì&­Œf °1 õÔ̆Ò*7 ”vF¡iFÞ!*D{«BΠÐÑé 63gq›Ô™‘8ØZ´gPHe6¬¨Ý4Œœ€—Ð8uqÄF"óÁe¤¸®º«0Œ4˜µp`'É HÈ#3³ -âä ]¸9ïÜ‹dBºú@Ú‚`E€àêë­ê!8zsÆ2Ó™h2ÄM±ßö$jd´¹»«*qö°MT)…˜ßþ0qB9Ä®/L»9¸\¿É…ö…S„a$Ö|«òúÂpq§«š*bÍ3„A‡„AÛ²q{&ü±‘jBº^¶ââË«+^aVG_˦ØL$‘.ŠíYG ÁÔ#H”HyB®Î0t/l:==Q“&¥+r#¥ð{V°ÀðFm.$BSá4¡1â/ðË„8A%ºg`SF¥ò§{O¶ËúŸo$ºø`£°Zg)lÃþE)_Ï?c´Ò¡åÝÂ`ABªãA¦„# yˆóP3efRN—ÛŠKETqˆ­¨C˜€0|áõؘ :{9.·€àzÄ´à'î Ï@àïAq@p·tFb‡ßåJ €iq¦—+s)öúÞ'ò€\xrázà…;Ä‹ÌÑ¿~õ,´x³a‹óÕ`DÞ­9„1ب™ñx)3ÂÆNÈ#5Òh"ýžþÀfï#˜ˆYÀ§,ms9¡!­Ÿ–Û¡Æá˜—?‰xºGV{‡vº‰3v±˜ŽXÞÄò)p†vÛ‹ÄýøÈÉz‰¤jw °- üˆÎËsM°ómê‘´ …w+PØ…œY·ÅæŒC´dè±™U¾ii”ÈPÀ„=¤3L„´`”þ&òÑÉFeyW܈=JB!ª˜ 'œ&%)jµª`zúêÖø2sMT±ðøéIO—» îXŸQí¦‘4u…z:‹#Й޹ªõû„6ú]/ÍCs ‹v׋B#ýôMï¹98‘GX‡É8¨µ† Ûû&Ÿú©<„˜ˆ¯3š˜ˆoºàtÂkP_èE,ÄÖ />‚³fñ¶"lao”+÷m…|Mä5 PØê‘8Ò…ŒYÂÎ(XÏ àÍ…aí#bˆ8 8އïsZD[‚]ÏÔj¨AlMpž1 V–…þ•#üs $~'OŒÌ嫇e'ìê5 ¸‚c€m² %3¥ˆˆ7œGT«gˆhú`x Cñ…‰¦™C¦:ž¯^®ÁDŒ G¸ 9Óë $rfÒÑŸ€LÅáÛŪ“0g¥H‰oü…t˜ ˜ 8€`ä.Ñ‚ "pkâaæ¬8É_˜q„B]`)®ÎDËúLô½î=Á3±¤F¡0y{¢Q{…Ø}‚ÏH>ÒÞFí $öG"é™ÈÒYÜà4îÚñ:AH69`*ÖsñL~!Z’àCîêCÎIòÁMÐ€à ­ÃëCp'à€MŽËkœé_¨õ=± ü•P™I-¥ïã6¬kÛ }X†t!»[ÀP‰‘Êuª—áêý"A6#&AX~÷Ô Ø»ÁÌœà>Øã#õ XÞcPÿ &tÆ›ˆÌ`¸”7ŸµõÅ€$Á8Á¥\žÞE>¨]×;É-­'ò] ™E·¨Â_¸y¬Æ“Å5P¥Úµ@1M®ï#lH:€D<¦qDèr WnXïä<„„‹ÖvÜÜèº|ÆÂxÝ®ØD0ÆoB-›–¥ŠG ÊBø¸­Œ0àʸWyÄ]: $~DxŒ *…ÂäÕKœÂ’ºç¨öG¢’2Õ ¶-LŽ0PÂ]Ø•ÁÙC Éæ#2Œœ «ÅtáfŸÑUØqÚx[ã3¼Ÿ6…`$Ñèæ•Ø}õ¸Ü‚ : =ŒG,äé‘…<áëƒ éæ#B‡™`Òë=f "NX Í3H¹8!÷R3H© åõ$r&Ù®ªhjÚ5ΜI;–kæªO…ŽA¢I"‰¾yLjAð³}›B0¢A¡Q˜•ÛiZ ÎPÇIë]ÝÄ%õ™H(œqŒnäªH„Òú4>‚ é|^þB‹¢éõ„)ZŽ‘4SXq`›ß#39Q×Nns7t¸Ü¼ )…‹ÝPÈÞ‰õZ# S$( I§]wóøÎî‰ôó®I' ZJpó‘w¯+»$<„!Ù{X®žX”k!¡pBàÐ2ã.d{36µÝ\£Ji%Mt8±²«k‰j*,\ص‹eÄ_P&ª†q¸Úi{C 3¸áˆÔ!ÂHw[ªˆK£±é $j‰”rÄG¤i Ò@&Û CªƒS'Ü—â§(Á˜øF&F6ý¥ú :Ý 7g ŠÓ: #4»&x‚KÇRð]ûš&ÌA “%š^Ï83H¬oIŒÕ®º0â!ˆÙû]8`njdO|É»†Ã0{¿kìŒm(UG*úYGFÖó”rPbìú&걑Cµ ‰NÆ9ëdŸ±ªœ©Ø¾ë±J@Y\wT3NBÞq×@b„—®€ƒpÄ!YßÁÚÙÈ;Þ¼æ`ã Ø‚>¸S©Æè!&š¬Ðöº?"O L|0_g,r!NÁýâ3F­s‰Na4ºnª q…¡jaÂ*Zà/ì\& @! ‚‚9C9@€ÒÓ'@€`a&Xxd¤MHœ?jÁk„­*Nº¼Ã†ý…a=ãP²ÑÓ‹±úcaC>e¼ÏH@¡ì3Ne„7N—¢-#L¤´ú¡O·6Ÿi‚,esÄžg›™ =˨ Hq¹tÈÁìŒ t¹Ü|¹´“ŠŠÑ*}D”A#K£û=Lf`â[1“´Ln×@Òò„ #fŠ©Þ޳¦Ûº#>d:¥Š#šçsÀTqµµbAð®C52AÐÛAJ÷®­q#$yš`í㦑C ¸‰ï r@°#öðÄ·µÂM܉~ÊAˆø½H˜ÌˆDjaÃß® … :±øŒ³SD°pzžŽ°=1x$ÎA±»Á®ÒØ {s5bAPÐ…#ÒO3þ¤ˆ…ª”»êB@&™è^8£J)‡ ]8ûœtá(]PètGâ÷öÆHÕëÂgd> èÂÆ’]€.|3s#¹çJéLtÂ_<`» èH è»&ˆh~g#Ðâ´?ú¸êâiŒ&ù]#ËìÝ÷†…og´>Žx]j™è^¸ºÑ‹BdyÈå‰Cˆ€,|6‘uâªÆ¤ÄåH!&•Œæ…«§îÅ’ÉaÑ¿!ÑäŠE W‡Ÿta*½`]@j'%‚.Q 1eúž.lzÿ|D*,’p“è]ˆ‰Êµt{•¡¡eÓ‰Ú‰D¥Ü"Bš:atäßeñÞŠ¼ã®w-g HÎ0Ô#ŽÈ/ÔDÞQ½<+  »æ{‚Î`C=⌼ãˆ.i‡"¿°«¿0Âk|°å²0¶{aFDC¥! —)5|é.M‘vô$"Ê s÷»^+˜»×V.@°k­:Æá‡Zð $NºL9q¿¶Œ(TÝa*.nkXüû)À8ŒÕ¤r m'….ÜmÂBà42;71Ni,,”hƒ> ét3Öt©ØvœÒG ´5€„ůëÊÐ…MÛb¥´‚.œqSfD’¥“Ñ¿pó`K‰¬e9Cf\ÈÌXð¡ zWa¤(•­ŽñˆCè‘TtIš5Ú wÕ…ìíaÀ˜õ©èžÈ;:‹ªad¼ ð&3lk:âj­ ¤œZˆ—5Ê”»êBO@°>‚«wšÒŠÚ§…ŽØ¾5Dj2 :`÷G"ÉzDkÑzø Ûš @)9‘w< µ SášÚؾ°ëØTŽ@P.È/@~E"Z,4P¸x[Ó‚ÀÊUP8¢¯éëãˆxˆØW$ßÝÐ…/ƒ þmÐ/ά‘ý  #HèþåYÈ~ÜrÅÑßm*úÇÐ…?­—ù…‚ýïãÿò÷ÿ²Þîïýç÷CÐ… ÿñˆ~¢U“œú#¯ g ÁæOV@Ñ&ýý+ eøó_ÿöõŸµ¹4|ýáÚ—gµÿc¢ò‡•†˜àKè!®Éy™2,Çñ?|Ø_ü=˜yz%欫å`s˜y†„ ûþóR(^¨ÀÆ$ø î$íP½\yûnø[“Ð3*Õ¢ØË°‘·Oywoáí·z{™±é®? ²4 $ìLBÉ ÅDßFö`Æè™·×H2¼ý^o?ôs¿ÅûG·z{“ ð"u¤·%Áž³w-«veGò6ù~ jbƒÇý x d0øÿ{Ÿš¸ÊMn)«WH]ÁEº+N>"##ÃG¨ANU,ž¯kfšÁL‘4D½Hð™1—îÑÞèHùy{ûãíwÃ##bn}[0‹‘9Ó v|J€„µ–”3÷É“„œÐ\)æêÂtèÆeao+´ËZ€‘öODÞ§‡é²·ŸùÜ‹·@!¼ííGÈ_QénxÝç:ú¾½7;>÷»Z<žÙ•z?úxû]Ÿû™/RRó¯ìñ>l`ì¿z`¦ã+-KtûËÆþ#T¿4 G" ì­Mx Jš ú—Õ=óöešX Ü,§‘nPY] SŸeµÀLÒ,ˆ–õ3@ÜBÀÞ覚ÞXX½ 2£ùPcïm„ÿô¯ïûöó_þñÏ?ÿý¯ß¿ÿüífè[þQšÍÍ€ŽŒßV ÚPÞh¦!š ^I ¿ÿYDX|pj¨È/Ü•X&yúRQh€/¼=™oš©TxDncƒú·Gòÿ±çd|† B¸—2ïz›D½H¨™{· ÃêF]HPšÀ0zs- 3óá`abÜL åBŠQ.Ë# ¡!ƒEÌêS£=„×$Œ‡–E‘Ž T­Á²ÑàLö·ÔpÔ›ëÀ™ÄÇ*Áo¾#23) 7 ÂÙf$dÌ ¡¨*ò íâ"ÄÞ×GØ ܆h¶i†f@Š›£@舆4Š^ÌA>v£v<Êpbx_8ô¹ïs02Àâ Ð3HhÜX„œ‘½€sØÌ-«†Þ¾¥`+´­'¹ª¡\œtÆ\:©ßjcÃÕë%#@`wfÌŠÖ¡ŸªšAB‹fEËŠÂIJx+†E{]GÇ6ÌS¹—ǯ<5”FÚ„›Ãk‘Îp…if„iÑ•s¨¢#Ø,é˜ABñwŽÝ,ÊHr‡åÔæ(À#ò±L*NXNÝèDœ¦ ‰óüØŒ}f4D7ž&ËrÃBÉnz0fܲ9ÒÐ첟žÙ1Ïö·À”`-ò±š¡»Ü C‚]Q`ƲHY"¢ÍQÀ} Áš¸Mv#3P,Ff`qàà$„»£9\rÆ“°„M &‹‘à6B•ÕGS†Ê`—ˆHy{D,"šY;F¸wåQ`½~`FD„ °ñ|-J_JsC¸¶!˜iÝÊ´Àâ(À3»EåeÉ8b´8&(ÏT…!Ú’+[Ãøå| ¢Àâ|0#&«$J…´üB{âJ3f¬š.2S d²aÅ`µ°Lgê‚¢dGpãyÛ*7ÇÒñê¡aÌ ¡ß(X0Ø´]òPÍHÉÚ2 ‹E….3íA—K`Á`Y-0âBÔ$© ~hóÔPr ®à‡n¼[ÐLê^`qÈB‚†ÀyàF–¸¹Õa<°9øLkØÒÊ„ñîÄÀÛ# üP‚Hžª …ý+f3HPccܱYVÆÌÛwUA<°¸, ž) MÚ»¢€ ½}J*¶K6Gœ™8©&X­!*ŸA‚¶Ž-« gÈB¯ ÇvÉæ|0‚ƒ'G°yÿ8g²A¨T$wΣš±e´ 5uÕ¬“Ó *’/^w†uCE²ZE23DÌf2¨H–E™Ž Œ…à@±8 ôÌÝû®vbl—ìŠ#:B&WÓ?´™%6AB½†hs>AB°¼ÅòÁ¦( #WÍÞ·7 ‚èÂK÷ïÛ‡Y€\½i:”Ú-qÎh1t(¤ºªß—þô¯ïûöó_þñÏ?ÿý¯ß¿ÿümé®( ~ìÛ—”CP¸úÒéL*(‰/î" œ¹\ÀÔì• ‡6_¶Ó™Öà³mæØ9]¶\0ÂEùö(pDk Ú3hƒoõòùÑHb`øVo#dæí“ ¾D«‘Ð5‚æ (ʶEžyû¤$ì™-Ó´,”Ð-ûÜÏ<=›AB†aá‹§pC7ÎYªJ [öö3Ÿ{Ë„hìÊÖ_#¾hGìÿƒ‘0Ãÿ™‘Á‘øÎÎߪ^C«Ý%b†ÿó÷Ç/hW‹gŠÂÏw‚ãÌnáØL‰øˆ}¢m•ÁÈ~)§(5\i7W4“>^Ø$¸÷t suS50p3c$Äé¬Pž "¹[ð»$¶Ì¾Ý©÷‚ ÚÊÖ#jÅ™«ZJš¡˜Q_­`SJvÓ„¸ð+Ht‹`b}õdJÙ˵‚›»F•‰6WWâŽöñöt î*\­bS!‡pÆr¦BÐHipw—‰Zý¦€àbû 5•ip·’Q-˜‚”# 1s™S­š‚”# ‘3©âã§§¥»ËOJ³øíâã,\©à”î&õÀ)2ž)£>6ˆ KE‹#ªeý¬708¥o7ßnÐ ©´#‹òZDV \ Êr¤ÛçÏåìÞèŽàGÑœ¥Ð&}»ù¼§vY§W@àMÙHÁA€*qüx7 HüCÂUo}¿)†WwÁTî ·*Qzg5È‘®^r ¡·}$T'È|¤Iȶƫ4a½#µ°(ƒ„ýDèŒo„„5Øæ»»ÇŒûñ'°JòXÌô)ž‰záêz¡8# UÚê·9² ›ô~Ï‚~y+dýv؇Þ ybd:-ò….hOˆR9‰ þâ%8@bìôL”hÍ6ð [“ü‚è[ªJn6ìzûœyû7” 7ïÀŠ2sj#ì <ôö->áˆp1 7êë5*{ƒƒÈÃ3•¡PWÃ}‹ïÛ›Ã0ç5‚Í$¯Ò$θë‡@ýãì qqá¿ÑC .\¿ìÀx €£*„“$¬ŸÙaˆeB`8'0ðÌ42L©p)ú –iTØv¸ø’‡h†5¦’k§’ütÎðÅaûµ»y…êAûpóEPÑv«@­x†¤}oGi¨·ÖŠ#´‚‘}|–ÖC"mžD›àäÕVbœ€à« @r‚„Ïöùò×N„S}„Ì "%¦KGÈŸGXGÓpJ¬Jnõéœ &b.âîÒ*+Q@‰š‰ î¦ Ëv;:ÃAzH1TW[+X1¡8=TB†’+f–GHÍg†Q͸=¾vh9R\þÿgœöH„‰Á–¿‚Dz ¨«-­ˆÅ ¸.ÅZžq/h&9Tk hÈzK} †™²M &n»C̤ˆ.~[¼ý®·Ñ49ñûÑ,Ì-^˜SÊ(h³' °7(ÌØ-8U:c‰vuL˜) üC 0.‘A-Î ì- YDÐÿFD¶;#],n&r ò–$ œo¶\H¡4XŸÁ.ÊÈV„²º˜âñҨ•Í(Q8n.4z AÅóoßT.&KÌ¥ÏÐ±É $\âF³®Ýs¨ M‚¾#Kªƒµœs>="g}¡`ŸSÃ(–²#ãû™¹à>_±$Ç¥ñ Õªxû!¿gÞ>„o¡E¯~ÌûEp\~ÕÛWÚÌÛg¦Þ~×L`¦Ö3zÿ`Eii¯§9‚L‡-Î ã~ž ð?)?\39ÂÍ™PX ™a†½“°¬² òPÏ4Œ¡ì /œo7ÅÑaØSÙjf\ô9ý‡=•3Zˆ©¶2£Ø°©rƦÊLWYízã¦û á¡ 4}lwö#"­™TÑNè&¶JŽ&0 Ä!ÅýˆèG¨f !Æh0Ïè*FHe3ŒªpÔã߉2ô7ûÑ7UX¼“6+Z:gàÁ¸ ¼•gQ´©—tª„êÆj/ß"¡aÒF”(@øêa¦Ž¬Ï ÎÁm•5ÌÐNÍA Œ­£Ê“‘¶’Ã~HÈ3–)ö1ÎArX›FhSi5X©m=0?‚ÈÝŽ òqé&Í=E0‡ØºJ7šéý…áæsâ/¬ëÆ[N‡L$yåÄ9A)Ìäᨭ´5,Ø œ¾¸7‡°06¬ž)¤´Ö›kµ+üôŒE{+‡Ãysëuà‘3€/Þ=ÃŽéʦAëÓ9 ÜìÃØÆŸûàÁRÌdsvŢà 0Š ¥ZñnmR»n÷ì?ÏDOIºôÖQã “LÉ¢lM3‘ <öC„«3Ãg«¡aé|³—F'5¬·Î@„̸üw¾ïÐ`•¾Ý|æ£3M ò´­5ã ŠäíQÖCž˜¹ö×åžðP¹\šÔ¢&°ñ÷L`x+†/º©ÿ¯äjF¤ΫmU«˜<=r$è…B¸$ä*[ËEa,}óDòAf(ˆ­‘@F@ L°];b+:5f0®Ž¦òˆ‚l-¡Hìçôñ¬L(·Áµõjáû ‚à ³°wÆi(”ö`®žGØÛºvæ”'œ›GG­ËŽD î·p X±àÓi6ÂCYIT‡:þ°Ö &"«°2³´¢ôübÊ5Ã¥?4Ã<…IHarµU+‡€aþ#. .Œœ$‡£’C6 HünHH¨‚!$@°9¤`3Ù @$Pè‚!w6°‡èÜG\Þ `Êö; øŸÿ&~÷úþŸéÿè*™qÏ…ßy Ä‹ŽÝ ŠB@/ €Züc@)ÊЖ䔘a¹ X½Ö€* à @ÝË Õƒ¥‚“‘[Ídç4ÆþûÝ\›ªÏ[s‚ÄÃ93t ŽJBApãÀùëoÊp†2œã‹¢I —á¶v‰#hsu\޽š3Ì÷ç^‰@°Õ(Ëf@Ô‰›.7gƒü\óHØôà¢hÏÌ.[šG;äÊW“HÁjÌÐG†\—sÆa8òèÌè±’_T(0±ü¼ 0!ÔOÊ‚‚ÃoLõ&€‰ß& ›I劢`¢ž8 !~Týð(ˆZ›Ô•p ìH¼§(0îâ'ÿÔ @¢Ã¿!QÏT ÍDÑØ•= ’¤3aBR˘8áX`ÇȲDw5…'`¢t‚™r.ãâFÇy¯$Ê߯áüôîÑÅÈ´ÛÅKYQ6ìöV "’¸)ÁInUÇÍ€@:D±l}³ÛB¤¾Ù‘àr©6†5Œˆ¥_H;VmO`ò‰Q䋉20ŽG`BŸ*­‹K,.vuôLÅÖåÚ²Q‚ëA |P" ué3BHC»p@Õ1Îïo+(£Ï˜SØýÆ›8(¥‰ý¢ØxŸ?ÁjÊÃ]ÿ^¼@Ež,ú‘šDw#.\|J,²”bÇ#«ÒfRE…¶pÑDÓL¯Ùü† ð’ë__ XÎU¯ÅÀˆ¤¡ˆÉ⦫ÁjÔ ²À4žA(Ìh›ŠZ¸€‰#Øg±éˆÒÒI¡w‹a*TÙ›t[7éÌžì™2"IE‘2ÎÃÒL{Qäeर|Ó™Œñ¦ OpÒ'8ŽGè$ºØò§# di&L4Õ[N@åp)Ñ#ž€ÑR\Ξ'Þ°>‚ ­pC×qĈ;j&NDtã<ö˜ðÛ¯]¢êˆ¶Ãr"u$I3ÃEôŒ3ÃF0¡%¹×!„Õȼ$×Ê€[à^·À|Fû“âãˆ"âˆGÖH¡¬nŽÇJ úØÿŽ`BÙ ènQC˜èÏæ?0qBeÙ2“;Œ¬˜8"NÐHªî™ŒÕŒ‹×sRÃÌÀU€ˆ÷ùzU‘þW°f,ÊSÓ« þ£tžúP¤Ž (M`âLÄ}젯π„œâÎP1E5q&ò‘n„e1„ÙgÄ ÎÒ2¼Õ1ð:Ù3õD4´<ý¸Î`¢¨¬åNÖ2fzÏv#—ÈVC™ž2É?Ë\ÁRŒ´)Qþ5§€àøH`*̨ n6™Ëô34Œ2åI›Éñ‚"áb³Ä!‹;qëîäPTp‡9Iª'fV,J… goN(ì©èLT6AÔp&ôÑ‘ú¡œ„¿æ`HÌ@Ÿ®‘!Dµ*¶nŽÕ#q¢J`Xy&&ʉ ‡ îLL]*XÛ^ 'Ϫ‘N8'LäÓÒÀ0ñË wÄ `â·K¼j5ƒ‰ê·!ÅxcéxCF@ jÔ˜s^<ç¬ÐO$Àeæ­Ð1‡*~’f*§R‡Xzi`È™Ö!X¾èÿüÀì# Hë.H¡¶Š¥gÒAvù×¼¨‚‚ ´¸*f•' Bl&7t+Á\öŒÎ!xI%>€ ™ñ%¯äî(ÐÍ[ûˆ‘2E“Êé#<}zFê”J%åô WÄFºŒ4ÒlÐ 7wšéd¸crÈ1´âHX¡„\ldZ¡ŠráH4ÍTY†{[[Ë ~¡…ßÔ 8ž • óù¶”`ÃÀji\¡¢_x*AIJµhìi:‡›­\^(›|Þ ×'z‚ejçT'èß—ö# †và·f‡‘{¸/Z, c9âF2a"< K`¢ß×—H|â6h¦#î^jÏ„‰¢²Fw¹u\=‚61ÆQ«)† ¡7A ; Z(æHx§£`89áÆRÄ¡æ·Ý\/¼ ÈpI8î‡D~nŒ@¢¤¬°\·5³0ñB¢¥Èpðöã.žP@ w}‘©FùðÇ#Ör,X¡vs„%—ÿ/{g³dÇ‘[áWñ èFâÐÒa{á…Ã/  9=#†8”‚¤gá§wVSÉîËêju%”U… ƒê`7Åûá ‘È“ÈÔrá=„@£N1p<¡9æ7oD Õ:@ ©·ƒ„D­KH­¼Pyá·îoX!Q!ñ›¥‚nÁµVÔZÁJfuÿJ§W£S ó9BLø,¥ E‚lV½éK/ÜÐûrQA0i'2%ô¢‘›W'rþÐxÊN‚Q qùž&Í )LÚ"jm˜tdtÎé5³R@pJå7Î9¹dŒr:ãÅc6J‰ %(—Ë: Ò÷9k‡µe:lÅÄb’ê s.ãô!Ö¸ån³³ÔLY{P)yAšÖħ#Ìô¸Aε+in­¶ž‡È)K…YpÓZ*&]*R<²¡Hhe|:@y ϰ7:QMu™õ(›S‚€û×k*Ü!B4cS,$ÐêµYý¤Ä€5mÕp:FZ0M ‰ ¬ƒíYÈ8ÄiЫátˆ¼r-˜È¨ò¤yR‚€ !Ô°§CäÏY*„k1m^Hé1)z‹Vyá§)·ªÀ„œZ]·œõà:¥^0U6©þÂ!ꅜ҅ë1ÃIƒ@RÆAB( @õŽ0LZ[†¡¹1½òž]å…Ç”žU/äñã”P W­¾ã¬#¾0%›×kUǸ{ )õ‚SÏ •&í;JJ  Fí#Ñ_ K YƼÕKÈW>@S¤Ö òÂ!úŽ9õ‚‘‘×9å•ý hnÍ´ê…cø3B‚Z ‰×­©YmÐû‚ˆP½& Åœ `3¨ aÖLà)ARÀÑiK \9­8DÑ‘!P‡R³n&3nDk`ß;T^8ÀTô”¼ ÀnZwíç È u¬Ës ¾aÊ¥Ún­x"K˜ç„9*W·aÖû9K³öŠ¡òÂ!ü )!¡Ëh?*_Ó¬K¤J)†G«óˆY[Ñ)‚r ‰ZŽl)ýUP‰z\â­hÎ ŽÚG̺T¤Q©F]ªõœsvfjXÍçY; )þž ¢®ÑÎêuNq2YsÁj+cç³8Ö$–c„„æt £™ÖfòsYR¶ƈkÝ8D’ˆ”zÒå•ëFÕ“ŸÙŨÚwÕ^¸òY5÷òÑAêbõ¬/žsJˆW…p„M¡9!Ž5ˆeÚ»s)‹ƒZTÇé!á·À”¥BL½<¯‡hBzJDHÃÀ:›˜Ó÷I+…ôu¢nYO{u.R‚@)kžã®È@Ê(?VCc­ö¬>¦œÅÁÜ…ªÛ<«ù='œ¼ÿ¨ZŽp‘ò˜P_B êQÒ ¿UÌÖ¤g†ê'®ÐRjF𾙬8ÄõÉœ`  ‰C¼Dè)hóæÔªžœ?$$i~˜QÍïºôcSÌŒ\KÅBÂSFº‰Â²RÔáĬ^†”`À*Žò]JDH JuŸf=²L‰3 š¼pŒVCÊ9…X £z™òÒ'–bè‚ì•qÕ6eaD"Z‡Ø³æ…H ¨áà‡¸J™Ó\0µF\yáÊóYÄC¹.PÍpCθ]«TŽèió§‚öäPyáýɉ#æ2DOš,%¸— ^÷$q‘q:¥¤Bõ\í¬gža å{¿ö:Â?}ÿðö‡WA[þ)øÿ¡=þÇ÷oÞþþöw¸ýÛãï¶Ðƒ{‚~r¥tÞ·ljþ,»ï¾üŸ|×ÿØ=üSÍ5¿çVy€-< 6ž­†@Û|ݵ”ºY©±FÓqM%Ûü¾W)õ÷)Õ,™-÷,¬ÞË9ÝÜ­åó(•w£É÷”úÔžñš"¤>ˆ&ô0b{ Í3(u,[l˜ÏVcPI¥Ô½•ºÓtZ˜%ÔçÐÂÓvÕ`¶²ll”¶zæä[ lÔxó6µ”ºU©bk4y^š¥Ôg•útòöP¶ñm[²°0…á¶KîË·œD©ýŸ²ÍŸ?”'÷ÍV¶©-¢MKóøJÍÖ¶©£Ø`82@)uo¥ú*Mœ–f)õY¥¢$³…ž…B7²…ó(v£ °Éñ+MdôiižA©cÙ®V¿£ØÚbˆrC(¥î¬Ô§ÇßÒ¤yi–RŸWª'³Å% +5°nÖå[N£TÜ&–+ ¿žcP íó B‹vmIÆÖš²ˆG”PwêJÚ]hê šÒLJ©C•*šÌvñ‰jkŠºñÆ8—0vó›Å}/á×a’â ˜ÐK_×À<ƒPÇ¢Eä|¶`ìâZBÍ*ñ(˜¡LVB‰²Ñ>šDÑ|yÁt#Z>Ny7˜w„¼ÒÅïŸ7¢éDâòšgêX¶‘ÏÖ? ¶RêÎJu\¥é£h7ÁRêH¶HœÌvq‰.“ö™ykBΣTÙæ]'!~=ï’{ÌKó JÌvÅ?Œ­öŸ+ÔŒK©;Ó4Z¡Ц¥YJ}Ž-0g²•öè5m²u Ìã·œC©Ë?eš¿|(O”ºÒÆW3D°Å¶o_£yx¥g ùl @YH¼”º³RW*¤…¦OK³”ú¬R#íb7UÙˆÎ#TØ &¼´õÛ pZšgêX¶@ùh—÷\)£„º³PWê#iØ|Zš%Ôg…ªšÌv1‰J ld‹çQ*îF_x3• U§¥y¥Že òÙj!n|œ¤”ú¥®ŽEo£hZ RêH¶„”ÌöÑ%ja,¶•-G©´Í»v³öõ¹,d$>-Í3(u,[Îg«ÁH¥Ô½i6‘5šÆÓÒ,¥>›…Ñ“Ùò—f¡*ÀÖ ŸG©¼Í»^BZ»¾HÒ¦¥y¥Že ð°]æØ±‰p)ug¥2¬ÒÔQ4Ýbó!M)uˆR÷g»øD™—±;ÙÊy”*»Ñ¼??tÅŸÏÃhª¨âkhžA©cÙ‚r>[u5w)¥î¬Ô¾° 8sÛ‹f)õY¶á©láÑ'Êæ ¾m6ìã·œD©°›ß ¾2?ôë0Mw7†îóøB0­zP µV:ÝY§k³ó;MESµA u¬P×Þ£ÂöÑ&Ê-zaÍÙÂy” »Ñ¼ë%¤•ÓÔå)„iižA©ƒÙÆÀÖúç-ÈMJ©{+umíîbÙf)õ9¶Ð ™í£OÔ‰‘yk½„çQ*îFó¾—på9ð šFȯ¡y¥Že‹+]ýal š "R)ug¥’¬ÑDDÓ J©c• žÌvñ‰²„mî(Ñy”J»Ñ¤—úóÛî/öýBS[@#| Í3(u,[ç|´Ö k 5W¨>ˆ¦À¶I¿%ÔßË“Ù>‡ 1ݪT>Ry7š÷M¿+MÞ}ÀúŽ4Ï ÔÁlÝòÙZSP§(¥î®TY¥éÓÒ,¥>ÇZ6ÛÅ&jî¼¹ú•ó(Uv£)/¬~IÚ šH†î¯¡y¥Že kYxÛ.Tt€RêÎJ]»jÜiú š¤†B¥Ô¡J5Le‹>Q iÎÙâyÌ„¸›á ¿b&\‰å»?0ô+M $y Íã+u4[X3ŠŽbk­™{ÓRêîJõ5š¬ƒh:+6.¥Uªf³ý2–¥áÆx3!îf8ï˜ WLÜnÔ¦¥y¥Že óÙjðâ}àVJÝY©Bk4y^š¥Ôg•JžÌw6½`‚ÍõžG©¸Í»fÂXé(qì>d}?šgPêX¶àšÏvA ´•RwVêÚ)Íò\×´4K©Ï*-™íbQß<–Ïã&ÄÝgxßM¨+GA3œ–æ”:–-ÄÀV0„7÷ J©[•ºâ&\hú´4K©Ï² LfËmJO¶õŽÏ£TÞ&¿p,·¨9OKó JËóÙZ‹eÝæz©”ºU©º Ó§…YB}V¨–ŒöÑ'Ú‚ }k¯PΣSÙ ¦¼pÜ8qÛ‹æ„:–-˜ç³ÕF‹f¥Ô•ʱF“ç¥YJ}V©©léÑ'*NÂÀÛØÒy¼„´›ßŒ^îú Û}ÐÎ~4¯ÔÑlÁ$Ÿ­F˜x/¥î­T^¥éƒh:BÛxšZJýÝJ¥d¶ðeì‹lLHçñÒn~3ºï%ä•7 ãQ4»X½†æ”:–-@>Z n¬Š^BÝ[¨«4}^š%Ôç…šÍöÑ&ŠÞÌl+[ =ø…&³J)u$[NeË_l¢Ð7À¼±õËç±ònv3¾o%\ñürk8-Ìã u4ZXé@ b‹ËuØp¤.ÑŸ?‚“•v£‰÷–ÔX£ :†&õ¢Û_CóJÊœ’Ù..QCeóØÊγ¤Âny÷¥óCDç¥y¥Že ìùl­õ­Mÿ̽”º³RW'(‰ÍK³”ú,ÛÐd¶>Q1ˆÖ`#[Ц[¾†æ”:–-6Ïgk}w¬d†¥Ô•ºæ7dD“ØH¸”:T©ÙdýÆ¡›ÝùË·¼L§o>~~÷—wþæÇŸ>¾ùüãÇO·þ»Ÿ¾÷—ÏßüíÍÇ>ÿôþÍÛ‡Û§¿¿}!üoÞ¼}ûðÓço>ô(x÷á¯Ë§ð?ï~Ú¤ô÷éó?ÿ÷‡?¿øô4&¾üùï>}þøðæoc#¾|4ÿ'6ÞüôîöŸíÕ/!¶~GîOG€,GK\€~«ýóz>,ž o¦ÛKæ»A°&ï?4 ¶&ˆc„˜ÝXí AqÐÌðoÞþÐ?¬ÉSÃÓºnH€*;\>5 &ú–*226â^5´‹e†}ø%^WF+ä ìŸfçÊWÍà•* Üï ‰î!÷ºú°²ÀþY ‰=…1û¹êÀœ€ŹV€‰#áéíHRmR‘0s-Ó©õ`*öœ³Ô¶Ü ­,0o$PF$@ÿ{ª/0YUÅþ²+€ûËvâ ìkõ ZYຫ?æ°gïð+ ÌœR¹’À\M ÎaïLTM ÉÊÀÝaS­`æÀs"A€U˜+ ¤7ôÊóFBÿC9‘@!•¦Ê”Ó fójL·äì„‚¤e Ø!* ìßÌ9^ž Q¬,0±% 'š U˜« Ìi -S>ZùDgN-çdÐYhe+zCŒÐ°Ø_ñ®˜‘KµëT¸×¬SïrR‚k„Vop²R }`ÔÁK¶…Ã0Œj˜yH‰„þUëAW‘0qN0ˉ6€²‹]q=ÀTv±© SN°ÿRv±+Þì?‘¡úCsOK±‹õÐÊ.6YÈA/5Ln6ô9e †K+ÐGˆ!1¼ÒR‹ÿèᜀl¹?TYà‚‡±'§!6q$pÎÝ!dÒj^q ÀnåºdHÈ‚¼Ø_ÐÖwþäµø_~v*¨R5.ÙR Âj]qlZÃÆuQlê ' · 6¾¼<ø/?~üÓ÷o>|xxå{£h&= j)¸âRà}1ð²Mm ÊÙFãæÕ¸¤10Øz%PY`fKPN-ÎÔj°ôÔ‘Ò¢äåºdUH-¤ï@‹ý\ìSV×WîŠýA%d”V{€É.†¥ §edTyÁ¯øš ‘DœºèOi ÖÉÐÌ7Æsv,ÁVîà+šÃH!Ôîïšì½._ÔH÷Âk÷wÉŽŸ*:× ÐÌ‘9›Á¾$¬¾ÿdã1‡½ s ‹»dßß‘ êfðÔM œ@l^W¯x+€bWÛ¿ÉØ§~ÜX¸N€fc¯a_‹ÿà¾?sN$8;·ºrå.0Ô+r—œ Ã`HíòGÀ‡Hã b„ˆzSlî7Årªô^Toà’ H+cðdÍá$ödîÕºâÁS4¡š>õ´ˆœ.Q_þ©æ…Ï–rZìèWœtŒ¾å,Ò,¸úBWœÀ"(QÞɶ~’Ã>´ÿZÉÒÓÔ€zSüš½e!º2{¸™¥Tþ*~Å#‘üSŠ?¨‰¡3g——˜Q`@T6¸ò~ÀXœ½ú“­)M`µ º'0õˆ€œHè{BÓ* gm ç,êÎaWž n† XAðþÊWÈÍ›^òâðAú9‚X®ÔIÁ•›E=TI0m]˜r‹ØA›DÔ5‚„DßÓç„DH«8@HhÎ „Þ‹¸âÀ‘ºmø› ‚VG ÓÖŒšAJP‹Ãô!Á7àÓ‰F4©“†ysÚ‹îÈÂ^¹á{‰–²\D/Œë:• ÇeHµ6ª[‰t¤vöJ—8¾} OØL`Pn%Xç,Í3$ôÂÅ“†žIŒ²’@ÂíÇ´^,Ká¨~¡ƒâûí«[ÐM¾ðزê÷?¼ñío_W·æÄ)ÍsûÇüPΔ^Tâèl˜Iê™~AxI'ÔáàÚÀgjƒd ‚'ÊÍw^´M£så@ÞÏ=DB¹šFG·G@(¶R\^8, }{Ó á9ZfºÎ…Ò•'-և훽ö_2\|‘Yí…ïW+1¹'|ÖÏG¢Ÿ"A›Í‚£/´õ m ®|äeÉDšÑ;X&lfkÅ4ߎyõ H Í«› kúJv—Í*\5ž¼Ùš5$mZ@áXâ陿‚sŠ£~8Úþ¤fHpÔgûL‰B&5ÖVn_·x&a|ýdxCö þÙí…ƒ7fŽ1½(”T;"ÂYmƒ‘OŸJ¥ØX89 ¼ßh…M4o\iµ"ûÍkïEùÁP0#å˜;88Èc3ÇX¬½2zp±k®³ á}yïçwò&ŠX°Ñxô ËÈFc{C6¨C¤ƒ ¨Ã_ü´ÇÐKøóedA ÕÔIO¨À‘G*xU E ×÷’<Á­¡À¡­‚/ª F@vðf²ƒ'u\|?õ¥i$;híÖôa‡ò/Ì-o뤅 #óˆýþŸWœâ;Ì={æ¦F»—zàÛŸõ{3ß¾=àœ¾"ðÌ}©9.kÖ˜iµJFâÛŸõí'Ê#­ÆhéÑ®‡5B«‰Ã«âè!c™!!Ù -ÁC[‚:€IÄïùš!'øÓcÄUµäÛ#üô­í!Á•,Q^x1åýöƉŽÀß>Z2Ñ8ìÛÏDÿ”6ÃhЩ£A#Ÿ‰˜6eçñèÈ „eå Ç¢PéH³˜‹õM ‘\˜üoøiÈ™ˆPM;’ &3 cS„ÁÊîÔYrŸ@˜‹ÁÝ„(cñøêþka˜`CŠ`Í0¡/ш7ïþ PFþpc± ( Füº‹62…~oˆ8{ùD ˆ  ÉP[U€*³º ›ŠÇ2†kÓC… 0üõ~ñTäˆ ¿žNâ5ÈÃOà™NPˇÔzäº2^1‹‚®qU÷¡'ó2$‰÷X5U*8œ#æÐcÓôàå€Áõ@ ÁŒåƒEDdBÐ?@l¸÷ž*>=~ÿg~ÿ# ýÃG2f2Äé=¤~Èf^¾ûi3«;Ôa„—P…qæ{ïêù挆Àp* òí2Î  „°Q 0¬ CHtTã¶Ú•Æ©îòÙ‘Agn.i½?%ÃŽÈ0r…Û¬ÝÛêpò.YBr„÷W¡ ÚŠU{Oñ:d2ð­:+9dàÚl ñí1|Pˆˆ_Û¨PÂñG¿½s‹à]ñdÈ‘;í@aM’ [.A‘aå„2>ý™OÊ>óíë›PÇ·?ë×~&!,µÀ·?«˜ùôîE¸Âyc´o% î‘/KH€lýmIW³¿/¾ÇüBPB‰nðaß~bŽÜ…ØRßþ>»"câ·¿±á#Þ‘œü‡B0R*gijÀ“kÀ á[h[B’‚Ý`|úßýy¦€oíK>ýµŽ÷­) ý±Dg |{\EÅ·¿pG(ÌëG†T¨Dà:x¡ë «vé¾bKÞ…fÀ‚”qÒìJñwÊn ö{?Sú{0»Bü¯ÿWû›°veùöj¿CÍfJ€¤,643!\°•¾2ðËû›oá È›ñ©¢åwçï}‡V!á?kÚSG*~+-MÄûʽ¡o߿ދ”§{#¯þîI¦‰ÁÿSå@ùñ°^A¬„©Ú#`Af¸\>¯0¨t…ƒ¸ÃÁÄ›HlÄAøO0`±½‚GCà‚è€ â/ŽÇ ( Qø7þ(!Q@Œ˜Ù+Š–tÍo¨*åa×%L@~°ýÿ‘p('þRzjf©#ô`Ó…û ñ—ôÓ  ¿<`¯‰H~°¬,ètá—Ç ö‚0`¯zpsSAž5$ =ø99˜[i—(»<«w@#c’§æÝ%¡Ÿ²‘ (Éßö ¡F8¿F §k† aþÍkXP‡î,ÖLk1%Ä ^H§ö‘Fš‰¯°&üpþ¾Ù3-ÄωÂö¨Õ ]Œ-9¼;ý /ub qþ’Üc3÷´2¬³ÕıÕD=Cñ"Ú”ðþp2 =$ ©ï_8qþ*Ä\Û©¸„`ºÿ÷ÍæÛEìÞh8ý}óYv@p2=cÂû @Š0TJFŽtŠÅ5n@¢f,{‹SÎ} ‰'gž«Jœ“Ж^ öP1ÑeNh=Ü„œÙŠ(õ,Ât˹SÐúÐÌð[¥)êÌS› 1Av‘C¶Z™q)—üÍ›O@á§gÚОü›o á‡Efž¥*ØDaÝwª½ëL’!EÝŸ©£#ŒÁ¡åˆace°(ÆÙÎ5z~¿žP ÿØjÌܨìRX+œÂÌ^}•µ†ãùiAÎ =(ZÝ 7ŮޢlöÀäÑ~3»”ͪBHOΜb…”rxô-ðßx?WOL¾†©tž¥OMl«JÁ`ã cbÂÓ½1Ù¸"XØÈdcØ”¤1Îvr.™)#(|£ðX›:ö‘RG (2-´wä 3L8éïmÛC~Ðîw$ytj§„çóÍ·ÊÿˆSÑ!gÚM.Ęz>úabäLõ‹‚$5*O3z ÌZ˜u=µý<£Þä %8‚ ¨Åáù»Âàuä^ýw:„ ‹w÷½™ÛÐOºyoú…ी´øÉi/½ùBºÀþ{Åì³úÜßãbÅ¡…ÄHK!D-°³#VØÈ Â%Ay³Óï A”ÁóûXFzŸ‹'œ~—d2SH„J`‰rGç1r‰à*˜?o@Âz¤¥TŽfô©Íè™:âófÂɹHD„ŠÌnÃûÄ¡¼¹þömÆ@Îï9=>rHlR‰®%H Tüàj‚»!xƒC"8 8ü ÒAˆøeÇÚÖä `b({pS $þK!—ÀJÝKBÝ, +P €@ñ«}“AÀį÷k+ÑÂLÜDï!ùIN¸5sóŠUò‹A'¼ýÎuæðÇF,Ã_ZîMÇŽ:M0 ï \"<–@XçŠ7‘ùHÉ í¯ E$?5„ª.ï`""f˜ð’B³é|&$Ÿª‘ÂBL4g©Î­1ë1ž‘{« ÈÃù©¥?å3å†u¾äáXyˆÇh ÁÕº“}‚CgHˆ÷_àÚñÉ$(Ï ”¥Žžä¡ïT3é0 èÁÁ7¨:fÊÉ(XŸŒ‚~sÑ3Èbx;û\5ÓSH'uŒ¿}\¤‡ô ÞŸ·–›­ÃSŠEsm7_!{!0îßk*‚\º÷*Ú` }õD[›”`ªñæ{c/Éâh!ž ÁÈ›~?(áî¹b7.|D”-,Q0Þlï™*¬Ò°÷\ ýPt’T³`½Ãª)t¤¯¤þ–­J@âüÑÖË)Ý,½ÆCóŸ B$pzê|$걜I¢[+tÒÖ‘gjMµÄ3õÁc+S$´9öæ¶Ô›Hµ‰âœÄ©3®#¦q¼µ¦Ù΄ gÿy­D3rEæè#&áiÊbŽ¥Éó‘ÈÇfÖìÍ´ -z‘ÏlÙ[¹¡ µ‰÷óX¿±Sì¼N-+FP/oè‚nƒ<9Ò*2÷¤„.ª 3ÁÁƒÓñž½BFrÈ¢hÁsöÍ–ÐÅ”L¤Ð… º#;÷ÅÚÞuªC…Ay‚`ûå‚C4µ¤Z #ްB3½ÈÏÕO±b³sWei:Ã-ü|$”›Y¯ÎwÅü&F6eZ8BëR7Ÿ2o±änø.,È"í!²&âû+t ‚…<:ÂD2 !©\1÷2„D» ¬964*óÉ‘D«j#t¬Z9„ùû%L¬H'd‰ÈpEèØ!Ô#L¤‡(\¤W0ñæ}#L´h5Þ3.¶ n&m¤ :TþDÅúÊãRöÕº Òx¾8Õím¤UýÝ@\Æ^„̼h™§i"_Ø€„æ ñO¸@¨¸x¿-MÔ`'jÎ8“4–§âþÌ×hži<ÚKEãþÌÕöíôYGL]6ua©)Å(\(T e‚3!à©›‰ÂP7ª¥3xˆà­áP%ÈZ#”Q7.@¢'H"Qaìdo}”f0'G¨83_ÈÀ­ã® ðÇ,Fˆ,o“*¼$ü:6èBhÍ áØ ÚQhªŒ ÑÌ®xŒ:µùì¼e¥(#Tì˜jË$88ƒ +ܾšGÚ ¬šÝ0õYá X>£¦ÌŠRóÔkCxUa5âÔ~ƒ4£g¨ W0pïYš‚®2×.K¾ ¸ä J… ¦òž5ÄF 7Îg¢I›A"ÌHì°jš™sâx³HÌ3l`BŸ™yް04 1´0ñ2‘–#éfûL:Q’xÇ\á«<Ó–¬‚C수É'Ú9.>+ÌÅy$tYÖì¨:†&ë…*š±q³€ {4f˜àT¤˜ ˜ðGh† 5õD:±‚ oŸa"Ëá ¸ãÞLwB,à «F‘3MLñŽh,ä¬P‰¨™È‘Öˆ+˜ày•²V†1ÐÅ–/­n‚ÒsÅöæÌ¼­t&æmO„ ŸžyÍP© Xj”:’!¨·.WÝl9ŸäïO²B88ç’«õãŠeþ™˜à¢‚uà+ oàad#”©ëš§î쎴Â8›±± ×ØóÈ›DDT6ÚL ˜xhf%'Ò³±Í¹ãår&p”DE {84{i7D“¾Å& 8Ôûg/4¯î9}Ö²,x‚8ÕûgN©FNpuN’̘T¹Ûz>µ²`=¿£^¬]pÑìHl@"iD%êM;±ry>ñX𠪦x¨Ü0ØÄ3n’Eá Ÿ— HèC4#bß¿ dBz ~¿Á7ÿ†ºóPÓÙ LXÑ|8õ~vŽ@ɬة\Ñ| ™àP.íÈV 1CDw*^-O#ÝfaþÍóÙ…$ì•…šA¢]ç0o^´~³ÇÃ.ÝÍL˜­„°q¹¸…;JÉuÃÈ©Cæ ÏßË¡ ÛcC´JcÃv‡ËLÝð=>f6”’.3‘"+;ðp½‰²™ÀQìJ“¿Ù¤å…àŸ;ˆÐ… þ=Sgvyâ}jGB)#*!ß@K }j9’/‡Ãw ÍÏôŸDJÔañvª.ÌM¥F¾° “‘”˜Š£'¹ã¤O¸þ±]ÉÄ3ö©¡b$_pÑ B·aEi™#ÙCD‰;FNÕ…‘àÄ^ÉÐ… £ >Ò…Lqe´œîn9¥Z’Æc!)&ÓÜÍ!\ýN™®Z ÓוƒM8¶¼ÿ°^a€áçÍæ^BÆÝè(,@Bž÷; á.Ì0÷Z1ÒR5‚D¤YÂÉiÅ!•Hùœ‚‘=š=Ì@PI$uŠž`€‰ËKÒ…I˜ù xŽX1ÑB:ƒDâ˜é±é‚@`ž¸m}¼Š)ãÕu‡Ù+H á<£ Ñ&‰‘×9cΨD¶%üãwT–2ƒD)Y£ëxó@‹p‡{!…<Õîsb E¾7©@¾°#_Ù¦a3aü©‚Î@ TïßÁÍSM’ÊœÈN=c€Ýæ‘‚QÊ“¯ÓŽK…ÎèB·ºàuzƒJôÌ ‹~6Àkè *‘5òV­l.pqÛáÖä<‚„‰«@%6¨DùHz©ß’5UÞ=Í éo­‰ª7 GÆ­¸“nÇî\t!•[S;ta¦1E¬ ]8‚š EèI®h@å*ìÔ/†Só…@)ý…-#H¸³c¤åX]˜ á¤ðhÙ¡ #v^ÆÊÙ†›ócSbB±búÑz‰ ·bÁô㩺P#¤6 ¼Vä #×#>“?OØ/« #+ í@¢Fò‘o[ý…›û &úYó` vÇÁÒ$4É }Ç»ó}3ÆpèÂŽ¾ãĬé‹Þ)ožk2cú]ë_è”.èŒ.HsˆCÕ…´¨ –+taÄíÑœëýuÄ©û# ¨¾å$Æšvœž©,ýͼ.\Ý^p×(ÃøÂŽ2b&Td”Æš®n/x}Œ/ì(#Fƽë-%`ûzu{!ÈKcÐ+è‘|!˜8cM§Z¼Ô¡œixŽX1=býhù2ÑŽþÂÕcÐÞ¢wÜ1î8£ éŒñ…S}e‚¦Ï± º°¡¿Ð#)d©cmêî÷ˆ?taê(]Œ¤•Ü„÷ˆcÏM¼GTy²c¬iƒí+ÛHß±Ú©p†în]hruC±Ã®i‘ÿ+¶ñ{¡ºðƒuD@ &»¦ï#¶.á_Ê]8Td‚’4½Ç¼ãÿ!¡/zªÐ…›í\ù»[‹|a‡ýÂH¡bR„³27¿G¸ªÎGìÈl‰Ï ºpw¡Ùјw\bï8¢ FldÐ…«uÁ™™Ž}”âäÙ;¿]1ŽÛŒ¿JŸ@þ'}Ù\4¹èN*@Eœ¨Ý<gœ´±¥={rvhî.a°Yvôã÷-ÉáppvÒx‰CÊ‹H ï&Ó“/ɈÍ`ƒÞñy‰!L¹!ަ]GÔ þ‘4ÿ„žw|òNpq1ŸICûÂ%ž³NÉ‚h~-z[Õ5n9A03–î/<º¿6ع?—ø8„æ„„‚Œ~ÓüÁ;>ç¯CîÑ»ü®±ƒƒSB´_•¹HH¸e$ ¤hÒ텪텫AûBRHpÊɵ"€÷mäh_XxB•’/ ˆõήª S>¨Ä]G\¢Ž0K #Gè:â§”N´¹w¾P5_Hi9!öqDÑ °H Öwç.QL¦ìþÕYKêÐN!¯0é68¥ª ½öõÑëš:ú˜òŒèoCV{ C¢Câ·¸eD„‘B–U³…”/…i°zŸR^£ë˜rJéŠÒCÐU/Y§œFñŒ„‚‚ÞÌs‘]Ѿðx_IA ÆÖ7j¯1í˜RL»ö£2—˜‚N€5 &¦î/Tí/èE‚ }!+…Ly7 }¡O)Û®u‘‚Œ=ÿäGef¸:´/\£´ÌèBÎÚÒƒúqÊGŸSšŽèVôEò…”ˆ1ÿðÞÇñä#*‹mß'wßñýI Bï|áÉýäÐç”O~YÈ· ñ ýá%Æ 5#it ·/\a´§QÏZ¢³~òÒ×`a†÷AûBÖ[M˜ƒö¡ÄóSÑçU·/@J¨L[è|á¾À £nŽ!î=¿PÔ„2‚XºŽ¸DHXÆãÎóGM½ó…ª‹z4%Ô#Ú®1Ñ…tqGíw «æ )Aƒ¨}á¾ñ› ïE=eÏ)æ݉fp÷뇄~ \í D@‘Ã~*Þæ ßÿ0ðÓ?þôFÚû÷éãŸþü>hóÿ þÚËß|úøÝ÷?}úðWøðû—¿:@ïå7å—Ó'¿d‡¸ùãßWÉ}ûù¿ãÛùÓþøñ_޲ f|Ë=y¿æQq^€,“æ³Õ@=þýn¤ù…gþ&/¢I8ìð€q+õŸó` d¶¼¹°#B~Ö•ï£T>&áùï4eD¬¢I:ôðÈ/ßU©kÙâ |¶êæÀÚJ=Y©¼CÆei¶R_c Éleº°R Ÿ©ÚA¶r¥Êi4å‹JÝù¦:˜–¥y¥®e (ùl5˜•µ•z¶Rm¦E4ƒÜ[©K•Jélù;;îÂ|¡?üç?÷ÿüuT|þùßþøÓüî/£C>ÿÖ¼§ýÿ¿Àÿ¿´ôõ‰Sù†ˆÔßÙþ ‚ &’ñ° ø·?ýûwúóü½zŸ3¬b/9ì?ø÷Eöw8 ¬ `ž ¡óײvR.Àxöí‹]3¾¡a‡ßþlÈqÈ@oƒÂÚ*ÂÎ&‹!MÛje–Â^ÃÚ*gš È€ÁdCÚJ¹]…}»ÀbK‰ 1í®@±\ =ÒÀ1Ú ›§D¢3k´ ”2KIY¨] pW@4#”‘û˜°–  ä°W3€v‘à‘ Ƴ8> Ô.ãvöí«›ƒ) €óÏ6ZAJ1h6fAÈm… ‚‘R:ÅàvZ©@ûm¨ü=H˜` ÜÓÅÎ 9‡½ÍO@ç• ‚”cñÞsƒÅ\@RØ«8`·JO§ä ïl· œï‘¡ k ¤äÆC†õä`±û„GàY¡ïŽ˜ 6ÿEÒ¹ÀÇÀ}&ѹÀãǶ%ˆÜ£ƒµØkÊ 8©sÇß%š)!±@»@±\ RØ!`ç¥ï¥¸€)lÛ!Ûjõ8‡½ñ;ï”· ¬îfÔ†4¶½Ð'…¬Î`ß.°:ˆ”H1¢ÎŠÝ+æ‹°oX ¤|@$4:(6/@)ì•GO Uv‘qf<´ K§D8ö³DÕ\€rسiÔv”\€dØè5äÕúšÃ~»WÜ.Pº"H‰†í‘ª(ÖHAô¾ôm«g†<%ÌÁ¤›ƒÅ ‚”æ » hï+}P˜Rˆ Ç>(|âîÑ3Ø· Üa5+!šQ»@-ö˜ÂžIÔûˆ ôòŒR¶m×PTkÒEØ· ¬~½>ÇH½+‚bAJ.`̨ûß7Æ.nä=4Tì.QJ.àêÎ} r$pÊÓÕ/”Ñ}b^„}»Àê7I2ÔH·ŠžÂÞTzÅHé'I2 AG·èգϛ Qï·jïÎ܃ˆÜ©À_'&á.=>œRί8÷øp±‚ å æÓÚJϧd…N!ÑÁß(ÜÜG·j¯É„PAÕ¾DðD3ô÷=OÙ&°6fªž :˜‘´ ¸mL‡°IôøpåT e\)À;xàæÑÉÞÕ¥‡J_(̨”f0@ï~âÈÐd ÐÍÁÇoQ™•!k?ð P…¡¯UŽ÷ŒñaUòÞ}b׊S¾ÊÛC¥Ý¬|­8eµº£ëƒÂb×È<…}0±õè`»@(»wwð‰×Š5 ¨ß"(¾\ c ¹Íß{å~‘ä‰.`¶Õ£}RXÚ"%\{ÝX5HAÚ‹ÀP€´ç‡«™@ä°gÜ[È;ÀæíO|–h²gRì[ß?l„á³¼=QHn£Ç‡ËF} ŒÁÁ NìÒ—й¥°ŸxŸ–^= ‘ &ØA±‚@SسŒÐ>(,}•(å{àF‚ýjù#Ûnb!]´ ÄöZå襃ÏØØv.Pú.QB À`è‚ Ú¶1Èa/ñÎw(ÚVKF$@D0v*ð¼Ó!À!fÜ#C•M eXPE;xb*€ê@ý,QíK]â™ ¸½ó™ºvkvp }NX6ôC0¥D‚"÷U¢ç-!ŸìCU¼{ƒ¯E·kí¥öJd}󨢪xŸ;#€‹°oX=?œqRˆFÆ37l¨u£0å¤Ð˜ÌzÅHéŠ@Rr'¶,æ‘Ã>Œ°g+»gÔ†DÈhÚ;Fž·…|coƒ½] l$@Òž "ë3‚rÝÁHaϤÀݬÜÌh #¹¸µ ÔJ<…=9P¿Y^7ðCF{h[78¸/!#o->¨53šó@Ó>%¬ ’Q0›ªõ òjÃÞÂÞçŸ m•¯fŒ‘³ÄÐ!½v´Ø–!ÎaBÚ©@é+e44¬&{`kp²g$lxüì°¼xtAðÀÛ„2,€¥ ‚ÒAFR(³6dˆ>!¨Å>…¼"³õ¦±Ò{f2zÄ¢¡„ÞçÅ2Œz@llÏÐôäpå¯AFXõ¼ÐŸ%ÓAB½n°tS ãs 0lŒNб—ô@ˆÒ™@éûÄ‘ ³ðÞ-R­àönt0v— sšƒc[7Ø.P«"H©Ù»/Pyp0eù¬47ê§HŠÍ ÙEØ· ¬¾Wš19è0¸v.ðÀçÊ'{Tí¥] å{¤AÐ.P‹=丹Œh(]æä.îÔóÃźƒ’Â>ÀTûŒàé[GƒY¥'x•è ömwtì=.P, „ö0„¨ˆK¿J–Ò0D}­ø‘¹À ìÛVç)m0ÿhxàÛ„g°o¸Ã5Ÿ¡@(=.Pë‚”–‹@Ï ¶ŒT  êæ`±‚€.¾]`q$XF[ hV†2zZà/”žÁ¾]`qA0r"!B•ÚŠÍ eÜ 8{fèñ‡EÁ6FôKÅOœœì· ôí¥ç‡3Aͬ‹™]„}›Àê‚ åsA3è‚ XHaß.°úœ0¡ Ø^¤˜¿ûÑKk])7Mao¬}‰ òAaƵb!.ñèHøÃäô¯ÿó_ÿñýÇ Ÿþ·“äÇïþò[†¢~ _;B"ß ø0ãaþ(ê±ÄíyÛ§áûæ~úáÇŸÞÈ{âï>}üÓŸß«ä¿Äöò7Ÿ>~÷ýOŸ>ü>üþå¯^å÷óoÊ/S¼_y;nä&»oh›àzÝ·ŸÿC¾?íŸ]ëL>8%þ˜{ Í£ú¼Z4Ìgk0 Z¨' Ux—¦–¥ÙJ}-¼¢ÔóÙÂtañ˜ÿêƒlá>J…ÓhÂ?©öUš 4¸,Í;(u-[øuGl=[ šéûh¥ž¬Tâ=š ei¶R_U*H2[œÿÓ€?z¬½ý#·Q*žFáКã¿ÓSeiÞA©kÙèoÀ6Æôabn¥ž¬Tô]š^–f+õU¶.Éli«lDÃõ [ºRé4šôÅoê×›:Á4V?xà+0ï Ôµh÷’ßelÕÃÑ\[¨§ uŸ¦—¥ÙJ}•mx2[Þ\X-l :È–ï£T>&¿5ùe„º4ï Ôµlaç¨|[u7vn¥ž¬T’=šÈei¶R_e”ÌV6 ‡ËT¹Rå4šòE¥îÐ4Y0ÆrÍ;(u1ÛÐ|¶³²1'<ø¾y+õ JÝ;¤‘àE4[©+ÙB*Y}U „cÙ’Þg”PO7Ó¯ŒîL›áea^_¦«Ñ‚X>[ rp¿c õ¸PyW©P—f+õU¥‚&³}™eqƒwmõ>“„zÚ´™~e’pg.tÛsS–攺˜íÎÍ2¶Ó"[©g+UwijYš­Ôר`2[œ.lÈ‚GÙâ}”ЧÑÄ7f¿l ¾Š¦š¿‡æ”º–-ÐoÀÖ†’¸·RS•º CúL“Q0F+u­R-™-½Ü¼$=Ü-¤û(•N£ùÅq3ÙÉ WÑ Q×÷мƒRײ…½:u[›d§ ÜåÑJ=®Ô½Ž’!kYš­ÔWÙ†$³ÝÆD QÑîòÐûŒêiãfúåq3þúḎк4ï Ôµl÷:JËØj#P´RÏV*íÁôQf õU´;Éï¶ÛìD ?Úú•ûUN£)oª-¨d~†©LFïy¡®E ùl À#K 5S¨ìea¶P_EëÊÖ^ÆDEÍEÇ1¶vŸQB;mÜÌÞ<ó+çO›óúB]v®],c«>Ëáè[ -Ô7U÷hž~yñï4 @¢•ºT©ÍöeJÙp0d ÷Q*œFó­“„[‚T—攺–-¼RØ,a»]L5†àVêÙJå]šZ–f+õu¥F2ÛmJTe€;Œƒlñ>JÅÓhâÏRÅbŒU4cƾ‡æ”º–-켋°Œ­¸˜°·ROVêÞ¥IËÒl¥¾ÊÖ#™í¶V-)޲¥û(•N£ùæõ¡ÂÁeiÞA©kÙîe¿ËتëVÒVêÉJÝíäËXFSD®än¥þ³l’ÙnS¢!Œ4ì([¾Rù4šo]Ê3?eiÞA©kÙî (-C«aFÚB=[¨»4uM3çv l¡®d‹Ã“Ùn›a‡ ÊQ¶r¥Êi4¿8of;»ó]b¬¢AJï¡y¥®e»÷lê2¶†C”gXµROVê^{pÒÄE4™Z©K•šÌÖ?/‡E`0<ÆÖï3Lè§ œù—‡ w–ŸÞxú ÷y4¯¯ÔÕlÁ8Ÿ­Šü¦¶R+•iæé/öý¦Ê¬¢F+u©RÙ“Ùnƒ¢ 1ư£. ÷Q*œFÞxå ÂqLr;¸cÒï:K¸-ì=ß·Š­Æ`c?¸ë¬…z\¨"{4cM ­Ô¥JeOfû2'ŠcVÀ‡ ¼Rñ4šo%dîeiÞA©kÙÂÀ|¶6p¸„·RS•ºÆw7š< Õh¥.U*$£¥—À#ähòK÷*óË£„;k›ç§OÑÄaãà«©~×QÂÕl Ÿ­†9óÑ ¥Vêq¥îM*qY˜-ÔW… –Ì–_z…æ1ôhîË÷*ŸF“ß|ãÍ—Ñt8|DÃwUêZ¶À˜Ïv{ºšZ©g+•viêš1¦ñ‚b+u%[ Mfû2& ȳ>ªT¹Rå4šòÖñ| \Es{7õðiªÜU©kÙòÎmÆel§FEHb´RÏV*ïÑ$\C“†´RW²%õT¶ñy;ìpàcçoqŸQÂ8mÜ,ÞºêWh–«`Îüèà'5î:I¸-ühÕ]ç7õàÆÉÖéqòÎ>, ÀE4MÇ0m¡®d»·Æn Ú—Õ°[Sàèî#T8 æ qG¨Aækhú‹ÃB…» u-[œÏVc~P‡·ROV*ïÒ<ýžñßh:ÅÑGS[©ÿ´R%™í6$êۢߣu ÞG©xM|ë3Ƨ/XÿMV†€÷мƒRײåk4ËØš ­Ô“•º·”ÐX–ÑÜöü¶R—²…d²/‹a ÈÁënqŸ9Â8mØ,þ—½³Û‘ó¸ÑðùCÉ*ø$ ìžì°7°l%âȆ¥ìÁ^ý~-!H·4£È;Óœ*×c†áŸ±Ü|úåK‹õÈÄïWÆ­Ör§hª‹ØxJ4ßÓûÆVÚÈ­±¦çø¦>ó7Uâ«ÑìwŠf‹Rø¦Þù›ú•ën÷‰íÇ!ÑËVXþ±µßÏ7Õž-šöÇûIÚeŒ;ES¤ª•§Dó÷ðM½olå+ƒ„w‹mQ‹ê7÷ ù¦~ë7õ+Ãùg4cÞhòMýW±­Õ’cÛÊÑý¬S»Öo=kå·}Sxó뇷zûæÇï~þåͯ¯?üüëûãü³wïÿòöO¾ûÛë_ÿúæÃ/?½þáÍñþ~øáÿîõ?¼ùåÃwoÞ¼}÷ç˧ð¿où¦ïú¾}ÿá÷ãOoÞIŧþ¿ßøõÍë¿} ãÅ>}47t¼þåíñ_oþ|þ§þÙW¡øç¸6ÑŸ3qJz½`Qûúùqýë3öÉÀ­§e«ñ¾öý~Q¾U!Ö Bär1vd@Ñ-jTßP^½þá¯ç‡5¹2TÍ `TéCëöÒ°õpKQ†ð6Lu3eø7ÿ€à >òŽz`„~ÛÐû"±_>LN™¬!>’@>Ø6ˆû]c_ƒ @øxª ðéd¡“¶õø@| ±Ç’ýwõˆÀ¶ @ˆý¶ €CÀ§K¨Ž l«¡ŸTàåŠÁW¿xìã^ü!º¹]cŽf<òÑ ÓÐÝ!.š€$@ÂÃéM U媇 ˆÀüUeƒ(R,†*0Wìë"±GîK‚dœ1¢“v‹ý¶6°‘H`Àؼ!„ÀzBþo©ÿ…~$|*èq6Dì‰=`×KGt„˜9GvST€¶@vè_múÇ>‘*^â‹}Þ¤‹‡?ÒÅLmƒ ð𻩓’€qXuª`Àؽ•H쩈ýfkkÈþ"CÂBƒ%J>Ø6ÐB.$Œe*Høl¶'ÐDfv…ᨛíQU Õ_îŒà*°ÆˆÑ£Au-å‹ýBOåÁŸ‰dÌT>䌦=$cÙv±ß·t$L´ ðiÄÈÈäT€-¦¨ÀÝTà÷ãOoþýç_ÿí/¯ß½{óÓÞ“G@03䄼œ°È²Pª ÈÂÍÎëž2žªµö"Bå8—Ø"±§r¼ïñ³ÈÈ!Áº7%1,4Å*¨ŒÑyi²¼ûÑ»ö6È 3ïÂNq‡Ãkø(0so9rH7Ã+NMBJ'éôZºâ 6l ¾Ðbà ¦%aâ–CBCØp©IH -xiIhÇÈY¢Ú/°ãz›PæÌMK‚ÒR¦ ж W8Ù-””зî£c&˹Ž]¥9V`²Œ9*àÕ:ÃE;ŸE”óû_€©boÝ ZJ‰Q0ñ‰a µ†4RÁΩà„ÀÆY æŠ}JèÅšÔ`’l…›#G tHS%%L* ž•³BÐR hÝ´rÿd‰û'5'9ô.µtaçzáR0T+è ×R& ªhÓˆ†.LÚVl9D¨±wÃݸUš•ØÏ5Z”{×Ökã8aâãM!AUJé*n8fü±Gîz=½¥X ]»Wª]ÁY Œp£K0=gø}ŒE˜À¬¹¾¦jÓÞ­£Óë%¥_¤ÑXh4±c,Çè¾ $†UÃ¥q(´¦.='#D‹Aá¸ñk9ÕJó'ÞLÀ(f ƒ´žÃÄ:J?íÜa6iÕGÃ$Îu²d9±÷P.ªìÝR6‰ó7à¹Rð–cÕ‡+•œzÐrr‚Ym•iƒ/¨X+RƒÞñÌ© ¥¬29QhÞñ†[ͪõÒ<2X¢Wdµæ0¡f6Á–¢«÷`ãñ̶@¼‚©á†‰Ñrp•¨ÃÌêP’EÄQÀ$¬Z4„ªëÏf–k92=¼2¼á«Ûgì/o§ûÉ®+§´[©—¿Aq0q §%l9L¨´Áê̶ §ìÉ?QˆªÂFÌÉÌK9Njgä‹ÒBœzÈ$)#<d`Í·k“RKÎæv9·–Ÿd`ÑfMzìµÚò0¹%À O§˜‘„»€òð‡dÌtya,B5æó‹@EP ضÓ4PTàâ:^€“RÐ`ºÑ‰†.l;°ÈîàTœ=ù¨ÀΧѨÀì*Ð+^€A¨¨nï7WA8G@¶vCØV^μBž@£Ÿ´m_ê"±ÇBfÝ Ly}~_9àèæÎ FN`ÛûïEŽ–3ãJ.àÍ8 `Ia$¤2 &`⊉#gg+D¬s" A5ñ½- >Ö•fJIÁ %=Æm{Œ…\@.8ýa9œã&RAFsÉ´Z±_œ÷ûo‡%öÛÿ[Ë ´ ž›,öºHìéfÝz‹Žl;)±—.u üÀ¤ää—‡aiL[ÔCzލˆj`æ?R,‡%õ L{˜àæJ Ø3f9tï¦øƒ™ýåèAkEƒKðó«ƒÖÃ"§|è¢58SÚù±'Sob•r˜ècMÃ-›†^M黑“Fí­Ð4ܲi8´¹20± ô¤Šq4‹FÓp2ð”ØGŒ† L,ž4gôtè,^œnPÊàyGKø±'Ü5ŒCRΑ÷•^傹sxNì½ÕR€9W› XÆYQ¥µ¦¬<\á™”¢  qU1´£¦¬F&Èk¯FGÖycÍOyHi_]˜ºˆ ä€~âÅ´‚;ÀŒ†`nžåÍYl‚0pЀ0,ÕO)Ïj “¯Å¤Ñˆ0\ C?O)0´ÇP¥UááYmcJíî¥!;¬0± ºèº€.d!± ÈÂ=#AÊž}rÃJž1§¡°­.L=¡`Øo;Òˆý¶ß{¼à®±¯)+¨æ®ºRPÜöˆè£ t à B êt Ñ…Ô…W³èÂc¿èQâ‹7eQ‘«ÛóåpϱƒÌ2cí™òæ3H,´dà Hp{öý÷æI¯ºÂJ¥H÷Eàk͇þƒÙIˆ#†¡Ûnè b¿kì¡ß6ùz®Åóýßöû¯ @Bƒ˜®àÚ+…ƒ! Ãõab?z1$^íÝ2~ì£ _ì—šŠ›æ‡æpóð‡ÁQÃ4›ZÏx«ƒ(|¯õð”c§Þº—Nÿi¿ªó9b ÜÕÈ1z€uÆÍlsÊcq 1ýÐâåñÈIà²L ýò‚°ò‚è¬ÇÕ)åBuTÁ',ðîËÑJF_©» +¥  SB5¥”tW¯BrØy–é„ »r§zZ›˜ÒIðQÄE`ãk-Ý#Dm`Wh'iÊùcÅÜ>qÖA·ŒÅl=üLClâ™øäñ,H7›3r΢Ÿ 2Äê‡Ò@Àc¡@09H@)kþ`r% :‚AuBu•tÒh"ÓD¾¹k a@jà v À'¾\ÛàÕ$<úk¾ü¢íyßyù]èF2~üèÆ$G±Ô—Y7]D@Ä~ÔÈ8žôV½Yí¤ŠISAÊõhoR»·“›0šZ عõxB൴‚[@Àìb¸æ•]j_{Œ£§¬ï~ЃÕz°Ì{z˜ÀL\/Þ´¤‹/0±ÐùÓHy0ÿ€@V¨'T("®Fà#éÕ0rl@0û4Ý& À-bn: =éÝ/˜XH&j#W+t  èÂͦÖâ†0`"a@®Ž"Æ¡/‡Ä7ߪ|쇆Æ(_ÌKPªãL*BŽ5š2ãDu&0÷3M„áêP«áG†¤!9„aaȹx ˘H;ª6˜€‰+&úQ-ðt1a@n’E7<%LÜ2Ñ* ˜¸=íð0q«®ø ˜øŒ ÇOÀÄ-ÑŒâ“â“N%ÂpݼÇЗKŸÏÐ<òïŽê%¢}ÎÍÑ×,%¥,y„TeÆÄ}t˜€‰«?~˜ø"L`AïhA€0\%‹~Ô0Á2‘ï[?@øBÒVPX©¨òòpÖãŽ< 7•EW‰ëŒÑŽŒ|1lôÞŒÇÙ©5%…‰°ª01ÿ˜^2“£•*ú´—Ó`"kñµôhÕ4Gåkl›p… ˜¸Ò =J©)Lˆu¯xÌ%øhç'Á;Í“BPFR­>aâƒíœÅs' ÝOM…‰G L#…1ªWRÄøÑ{N±›¨Ó_\â¼òŒy ^jk+œWÖšã<Êû $‹;.'Íi2^`ÖtR²Ãùƒǰ’28"æð-rTBŠºÑ|žÕ/h£…³hÒC:.°ä†ÞÛ;¿¯)Lô*Ñxjm&ŽÞ- /R@b…û¸&ILôR;•æLhν‰>dDgîqÖTÊ¡UCv[•GJêâ¢syf‰d!}ä0Ñ¢8¦r &’d¢jÅS.rŸªç !] {#§µ”9h?ua  óÏAµ¥Tnõi£²èÂu!¥Uí]Šw޶W@"§¦p/â$ŠùˆÃrÚQÃ,ņ;ó5\éP‡›ŽƒGƒ ˜ø'ã-I&B†2±F³ºLÀÄMgRS^U—•pKt¥$§Øèv nínÝ­Ž2$ž¶îA΋¨¥y(Lºß'¥’ÓÓ42^¿€CÐcÔ”uœa\ã 2ᑲ)2¼6gj‘†ƒÀLÜ>h##ÇN¸‡(Õå¤76svÄ}¼½m³ÞÝÎi1„õ`×Ë´ÕeFÿ9J/í±;/ü‰ËçÞ]¡Å`GKÙ¥v•¶ÓL”c¤ÿHàî8…, ×k<ºà@â:SôC‡*H ôR¯ÝdX!;,p¿î¨ÃA$®KKo q³OÖîŽD=J)!æúÍ;§+H¼à$œÞî]ziOaGù<ÔåýßÒ½@Pkk¢æ„ ‡)=¾Ý/À@.÷_ùLà’Ð:rhã7ܾ‰—T‰ˆ$F³§Õ¤ŠûAÐ<USièÂÛR0+úíO¡ ɲå@ÐF‚Y!¸ÿöà fµÉ·?¤NrxA$jŽi|$Ð…;&’CH×Htˆ€ˆÏÚŽJ¦À.  Ñ`0×H w‰›SW‰$–i8D OÄbHp4A‰A…*|¶øQdÜ9cñBðê7&‡Gö0‰/æ¥H%)5MJ%™dîa,3‰  ñ¹J„+H€ÄµJX–ýÄ=Ì|G?…Þº 7s—èOµœ³­á¢Y˜µ5‘c¢©:«;f]Ý@ÐSzL­h-8„EîÜe QE›µÂý5fäVA‚TqÇT@@å@n 7Üä†Ö‘N)º€.Üî{St]pt]¸ñ –b!›ô⣂ÄôHèÑS®à=¤ŠÅ×ÉÁäí…Nr 9\¿?ãfègÕŠ.  7»¾4@$nH¹¤_#ÜEé6,±#RVA÷p'ú##UH)&Ѹ¦??~ԔƴXHttaëÚ@0yi¹ŠÒö¿)º@rèèºp£ ŽiD´¡ èÂ5–ò6 H,õI*:ÜwÑg}ƒ"R qÚŽKl_()º ¡è´•¥/Áÿ±wvKZɾ߀&*ÿ3÷ÐaûÀ߀³³ !)ï¯ÞÕ3h–¡§G_wªº+Š@ˆAâ{Þ7++++«âBV\ਸP—%¢âBÅ…¯*N)ObW\¨wÑ+.œ*.¤¼oXqað¸ *.|=¨'¥¾ áR—%¦Ž %‚züvÔâUd:M\(Id]H™çEŽLXCZæ>§t“ΰT¤4Eï!‰Š g¯;–J%‚Á{^+C¨ áŸîÈ`I¢$ñ•$œk©¨|Á+.T\ø*.PÎRáLZ“zª¯éï’`h€ õ ÝÌGT{ˆ âBÚph«¸P)dT\¨¸ðõÍ{J‘P°V¿ãÜqRÅ…3H"eâ'[?­Þ°zŒ*©Vô9N'râq3አcÆNA4¼mþB‰àä/±± Em&OÑ܆’" q4«û3Ç0î9B‰`â›R‚@Š\'“ãK‚ï„r$h Ud:…$R.Éi«9n'I!=El­êŽ3Ÿ¥/ª¾0ì³µ)ðÖ0ªaaÔë9 ‚³Bõ¼žBÒ8EÁaPgR§ ÎÇ”BD­Î¤F=“¢Àr\]eÇ™¯S ár.YùÂ)ò…œ’ñ•$Î𪌥T¢‰–ïÕó:êݹ”cJR³huBuŠÚsäHÂÈ¡¶–'ÜqÊ8h‘ר¥bÔ1nèë9WYSÜö”D……oGp‰ Ö†Zjmøê4‚J¥ˆ¯²MieiÕÎ2ìB‘²R7µšޏà)½Ž!&RG–ÃY¦ì"¢«Ù*.œ@–²T(¡‹Qõ³Œzƒ*EÔ\D*,œbôBŽ"Œk1jT °…A58câ«¥HBЩâÂÄÜ”ú6ërí9êЖ±±ìQÁT½ª £žQ¦h€TØ«£éÅ…”¨ Ëì.¯§Dfž¯ÖCNqa)’På†\Ž3g jà|[ݹÂBVq!e ƒºDk^GS§ ‚.U\¨&·_Íœ¸Î"fž¯ÒÐjð9Ò…Œ•°)3HÅ…‰ÓCPu¯¸pŽ×ç2$áh F•/Œ2Ú]PÁ¹zN1œE3öa‚¦Xå…Q'wehÀ‘¬yUNQ]HhffBuF9qT ÁèËCE…á¡=*P†$„‘£Š 3†ï"ˆÆ!U\8Å& 0Z=85óäW â¾:´*.œ".¤ä lKdà*.Œ$EÆA\ûˆSŒÿÍ» `jT/ËLܼÐE Â\Tž#_ȸCavãcCN]vD 'S¯èŒ€Ö»–qeJ–+S‡Å…w?÷Ÿ|óóÇO/ĽÎðoî_ÿtµÇ? þ3µ‡xsÿêݧ7wƒ»ÿxøÑo6ô7\Ðux ÀàgÙýøøòcÿeÿsÿ/y4×~έþ<[ ÈgkÍQT]Ê©{;•Vh´ai–SŸŸ]dyl{.ÐY.QØIض<ÁóùK®àÔÇ?Ê4ÿñ¡|3Ë4¾K“] ¢áF·Ð<¹SØ‚S>[ S0.§îìÔo¯uIÓý(š†ZN=Ô©éla‰Â&n¨°‘-\Ç©°MØ6øRÏRuXšWpêÁlƒòÙjxÏ–Ú–)ŽåÔÑäU§†C3[lya¶œúÛÙBh2[ìQØQºQyëίãTÜ&>¹OõïÒ”%ðDSÙ”äšWpê±lÁ¶Ô<Ê©»;×hÂa4¸œz¨SWNiŽaK= rãM/oþ’Ë8•v£IO9U}¥’ßó§£h3Ñ-4¯àÔcÙb“|¶cÅ£œº³SE×NLJ¥YN}Ž-8&³å% ƒoºëðùK.ãTÞ&?Yû]écY¾ai^Á©Ç²…Fùl58еœº³SÅViú°4Ë©Ï:5íÒ(ªèŽ[‹…r£Ên0Ÿl&Ô•°ët@‚ôH3´±ê-4¯`ÔcÙi>[uÖÝ«åÔºNÛ°4Ë©Ï:5•,<¶‰‚·ØVø…ë´Ânífðt+!}¿ðÛÍ„:,Íóûôp¶‘VC´…lÜÕ”Q·Ãd[£I>,Í2êsl“Ñ.M¢ÆËè£há:F…Ý`>ÙH(ßߣ ‚EÓÅZè-4¯`ÔcÙ‚þl—–_SjXNÝÙ©Šk4í šÑxcm°œúÙ"{2Û¥ITš*mu*^Ç©¸Í' iÅ©†¤ÃÒ¼‚Se ùd{ Æpe+ŸîìS“5šŒÑ$!ö(ŸÉ’Ù.-¢ÂÄhÞ6²¥ë8•v£I/lNb mÑ´o¢y§ËÖ]òÙj(@Ðæ5µœºÙ©¸FÓð(šÚ÷P^N=”mH2[~Ðɪl­üòuœÊ»Ñ|ºðû0Ép\˜W0ê±hA-Ÿ­52·¶9—Q·•cæn4]¨mÞÊ”S›S“Ù>´ˆ.÷£¶:U®ãTÙæ‹çü*ÙQ4ÉûÏ·[h^Á©Ç²³|¶KƧ²”S·;uerG§éxMUVÓrê‘l •->4‰ªƒé¶ ^§“wë6Ãït~¿5_Àâ š³°Po¡y~§ÍÛïÀVÃÞ>rêÎN%[¡é»OÃú;Ípܘ!•S#[Xki9„íÃdXnꤱ‘-\Ç©°Mxaé·‰óA0ny£‹òÙj(¡ Y5Ó¨áGÁì,%ʨG¢Šd¶øð~ImŠ×i%ÄÝÚÍðéVB]iPR; ìîEó N=–-¬Ì°;Œ­µ¶¼èÞ¸œº³S×n/Z´aa–QŸ5*J2Û‡.QˆÐˆ­¹/]Ǩ´Mzér&Âai^Á©Ç²ÉG»\£1ÖeÔ½ê«4}XšeÔg ˜Ì–/^¡úF¶|§òn4ù…'4ä¶ûõÅýh^Á©Ç²ˆ|¶ê.`¨­œšëT–f9õY§&“]zD—¿¾5Ëu|*»±|ºãwí\¼µ£hzl>—«úôX¶šÏVC:[½m9õeå¤NÛ°4Ë©Ï;•SÙÒc( »é¶Ó7ºN!íÖkFßé#\É0`\šçwêÑl¡Y>[ a)§îíÔ5˜»_3ÞfõY´k¤aûÐ#Šd,`ÙÂuŒ »Ñ„—Ñí> v?šWpê±laåÍ¨ÃØj41²o"”S·;UÖfìt? K³œú¼S=™->Î$TÙº±Áë8w£ùô”_^¹fÜÿ–æœz,[X½B~[kÒ8±œº·Si¦·ai–SŸe‘Ì–Þ/‰†Àº‘-]Ç©´Mzá°:d/ó™¦96¿…æœz,[`ÈgkM£a*§îîT^£i8,Írê³NÕHf»´‰²÷|É76tÓuZ i·v3zº•W.;2D›µ[h^Á©Ç²¥µA¿G±U·ž,Y9uw§®ìS;M9Œ¦„3”Sd Éd—6QCІ¸u_#×ñ©ìÆòéV•·…­D³ûT´ÝBó >=–-­\M=Œ­†5Ú|R^NÝêÔµ±u&K³œú[H%ËM¢ì.(‡üòu y·n3þN#áJÔ¶ûpõýhžß§‡³ Íg«!ý3ßÚÎ]N}Sy¦óQ4Å´I9õH¶°ÒÏ} Û‡.Q lÆ[£0\Ç©°ͧ; Wâ.ÓîÓ°>ÓÔj·Ð¼‚Se‹+×hckݧ°ñ2c9õN]YS™›K³œú[@Hf‹š6Ú…ñ:NÅÝh¾px¨ v÷‚y£‹ÖÞ¢9Š­9—QŠ‚¹¼(eÔcº6–å¶=¢Ò€À¶V è:F¥Ýh>ÙG(+½ùÝPxMWD»…æœz,[ðÈg«¡¦JZNÝÛ©k½ùÁm\šåÔgº6nò(¶F®àÆ'æ_îÔ×÷>½ýóÛû?ýðó/÷^}úùÃÇ»þ£÷ß¼ýó§þúêÃO÷Ÿ~y÷êõýÝÇ¿½~!þ^½~}ÿ˧îßw¼}ÿ—åSø¿·¿lòú¾ýøé_ÿ÷ýŸÞÝüV¿þÇŸ>Ü¿úëFuÄLUÇ«_ÞÞý÷ý_úêW‘½ÜðßôN,oë.²ýƒöëyU<« dpß\ý±¿WlçP„wþ)¢ ¾áF— ýzýSÿ° ß6É¡vÙ87üÊ¡ášx¡ õf-fKþýþWÜ–G(‰½Q4œ: ®„'^!8D !ËýûŠCEÔ öÖšÉt¹áð+ä°'Õ¦Å~(öÄ^‘Öê?rHYý  /^Q`,öRì§](‡½‘pÔ 0°8'¹íਔp¬Prփ姥êƒU0‡½PTX æ9J0ÆŠce…Rì§eŸ³¨o¾ä]+Àï²$)°—FÞæœ ´:œ2 ÐfRQ`ä(`)J`vÖªOY'f_^³*öcÕ„r*ÃÂÞ—ÿZVBpŠ´)[Õ‹9èÍ5j38òñPäl–C´6ƒ3¾x«~±‘ƒæó&T]£c±OŠÞw‚Ru€)÷‚·³¯àX%pR]Ø•V‚DN]8¤C­3v Üξ¢ÀÁëW˜7 $±7©(0°z➣„hˆ5O`ä˜à)™7ê#y²ånÂ9ƒÇñcÿöó‡?¾yõþýý»™÷ ŠaZ"SÍK R4Àͽ҂s("§¡ÀY¥ ;Oâöͪ­hX%ÀXޤ‰˜Tr0ó@*!ÀÊGÝ&ÄIDP)âµÏ]L­$qIhNEIƒ½–Š©— #E†ÁÌ%3:r<‡"rN¡Kç‘æ¥3ÈH(¼û±ÉÉaou4ã=‘ØWÖôpãœÕߨ´:ƒFVB´œ=€7¤º52ôêàIJ@U®sá ?ao±|+öcuˆ§ôƒtÛ;×ËêSVœ©äÕ:ð9'EŽE fŒ¢K9¸¢Àô§!¢!u*0ãÍñhV•áÑìýCê‰ìAgÉ!戠§P­ƒÿœ…ßÐBª!lè¦ <‰* œr9‡Üx;¨‚ÀÁ–äè ¬¯lpH ˜¤ ‰0Á¨íà`Eø$ìk!¸Â@ÙþÙ+kÔë$gXB$E€¡*;t§@)"@Ð.ƒŠ gxý6åü Kb9H¬y£ƒ…Ía¦T\Žz~¢Äê%9hJ'qW‚«rõMx§X7’š3wŠu!gØ„´þ Z(Mx± “§j.Ÿ2÷ð:Mº»,Eª!Xlj£ns’í{¬ÎâÁØçT;ýV{‚3‹SN˜¥-­Fuíü’`ÍÉ]¼ÎßM| EZ0‚H‰`æ¢4SãÊ›L”² ,C +18Eý8åRª¶¾…¨Ëis¯ Ô—¬jâÀwÔSž1`â) I爔“%(22U–pIt³¦HÂH‚êy£‘/¯å¤‹¦VÇMï&i$àêH5àvèç\J¨TáËDDJCC›ñÔ“rÒ„PÄw<åul€HÕ•8£ïw`_™á±Já% êŒQ 6Š_ŠÀ„kæÑ¸á@ïœK%„E)O# PH]d,;ÌY@]¹.1ŒS:—MzÍ?›pþÙò*²‹bEQ•€w‘suÙA[åSVЬg‚FU‚ÜaËY¬ñ”Fuy±³fVgƒ±Ï9' $𱕍&—5læ5Äj—p„ºík½ìfzÊzO=í—b?–ïSò|¢ÆµÞϘçÓ2¨¬ÖûÑšRNühyû®Õz?%{‰¾ä×z?c(©c­÷ƒ±ÏÉõœX¤^;q!…Æû“Æ|l}Í/ööu06 ­˜?{§ö! uŽ7cW'“³såù3Öõ˜Å ú8G‹ù)y> aÕtcŸ2'Œµ!aÕtg¬é²ªV®7cm‡culÕ»‘SÚñ&¡57zʲ^°UigÆÒŽ´e$l-÷3¶é °cµlMyAKµ.h<ò•5G î­^œ‰=ß1§ê ±{zF;ÐM)î ƒ(Wæ7ãÁŽðCÐ/ö6ï +…Ö°®‘K~9óû„£Ö³Ã*!î,åÎfÏûfÃJÃ>Õ©[Nf°,^/€Œ:Ø5G !Uœñu0‘0Æâ8¬ìN#§B¬õøËÐÙ¡7*%Ô3@_NxEÈY&–é~V-c¥9o?ûŸÿrÈÙihM¹50A.öSv‹y¸R±Ÿ±U¨o3¬nNyûSœ­¦= VÎÉõ<£Žžá‘³úö PcE¤ÁРzÕiè2°ä FO¢”0r—@Ó”R€6()Œ{J9gCÚ”^8§¯T¡1Q=ü6òòà9«02P)aÜÕArº Dûö¡”0p7QRƈm™3PJU t 9J@oT%åï)šEÝ8ú‰pJ))+Aß0Ô=ƒw3Ÿ0)7U®ã‘‹šrάҀ«Åø-Æž“'Úò’dM¦­ÕPrØ«@µŒ\7pMÉ<ªõ`ÆùtÆÚœêÀyàÝbxŽLƒë‚Á‚@ z¡Z[Äc€7ÍQHÍ),äDþ;y•†ŽYJ€å}ŠªÏœX³)/¥œC9{CsoPçGï¦N <œ¬•b™rˆhZÁÌ'É ]¢U*¹ùXSªVl±äQ”¢P ¨5aT ¤¬ ˆŒ>ãÜŠsˆÀs"èÿ·w.»–É~?ÁRÜ/žzâa¿A˶ŒÔ·üüβ]hú svÙ믚@vAå·ÿ¸GP ±è$d¤¬b÷[8»‡Ào7ZTx)ŒÃ Ù¤–$’„5Ä H̸ ¹ü’,XŠçVEª 6e`&±ØiFÈ'ìêLL¥„rºn!ì„_’=‚„2q"Š8A%fv!-$ZJÛú #ƒZ¨ ”ܺ}¹fœ†°NœYßMFÞÞœ ën8ÆüÜ·?¤5­g °õ÷Ö7–9†˜‚3(­·»+ oÿH7ànPÇÛoö½™·o-Æþ’½Þ>G²€ä˜QÛ´`$3â$(`³&ç/Ô» oÿ¼k¹ëí­¸Ñ6´kîg†Êì­Úìé o·ëÛ'%&LvÕþš€›ûÊ6{ûßÊ*xûg~ﳩÐìñÀû6xû'7údk8vNb)äŽ\Œùsk…0øÖÕõyÅÌÚrO1 °ñÙcŸsåjÃìIè)TŠAÂÆ$äL4¡EÜH%íšJRƒV5L•<¹ÉÄýÞE‹c¸;ߺš™-õ C¼°3 }eæ *"Ø2´³(XÌ”›îù# ;'”Üy…–Ä ÒœG¶K{¦‘fU8•º³≮ éE¶.<èŒËÍ™¨T`'ò*›©K–GâBÞ³—×z%‰0„á€s'5^6S&ÒÛº rï¶Q‡ C•ú·!®˜éx6n„—[ç¤;k÷@ÿë ò —ù ¥Ù uØV–¡èD<—h…v›Aby“ÆP‡Õ!‡ …vG:ÔwØÿ—cÜQ:ƒŸ 0¬–L`·þ^™i›ñÖxfAN(Y 1aÝŠ„ÃþLèU3ó÷LŒUÜÏ®bF¤xaêú$r‰tÎ@ëüÎÍOC3U‘â¸Ò¼ëLÕ %…•}?™:>!«Ïö¥bäNhQ™ÙJ‚ÉÚ†¾Rt†%ª†…xtÀ uïvO.3¤Y«"“øì€áóÀWkgžñ<*«ž÷-6ÕÅ3Ǥséƒ2öünj"FÆ’BZ¿w½%:¡ÉìÞè@8 »t$͘œ-ŸL3BÞ8ÿ4Â@…¶|ů ÆŒ´¦&² {ª\ Z´„(&aŸ—N¼+ÏžØ÷¾ñB¾ËfFaÖ¨ ;_†ÑAAì>+ 6FA+gP‰ {£3(tE"™tÂEò¨‘ÈaÙ£ÆùéÍ}†‘L³8«âXÐ!·ée )',ØÚ+­ :óöÑÑXä½ñáé’™X22˜1½ó1úžq 6õëPvˆ û’À6â#*E¬ÃÎ$ 圕‰köv&!FšÛ Fiðv&a„ärJÛç”ä*)G©t“¡{qÓîÅ™~%ÕÆ¨«~ùÈ^µTóNÇ Ü3#£ò©¾~`­Ò¶²#½ JŒ ÓΛ3¢g,D41NÊžpêÅ®!§¡Hª±Gc×í#‘¤k6æcAscÁÖ Æ®˜™ˆ1ÏV0±?zEÊ ÝV8zB&zùv#H¯g@&ú‡'·9ZÃVœ€DŒœ LK7B#ü î_<ÓkMùÉXØŠÓcMçä(8 »ne©K¸iI£8µ)12+çáæŠ™ê}ûâò™êTt1.Œïzæ¡g XŠPh‚߸X]­3$D;Æ!6¶ ëiÆ2,ñÁŠ÷Q°ûÕGPP²õ©@aã-3ªàUBX×·3 C£“±ÜFe °ñ˜TÏ,ñ°(CÉá„R‡´!ËÐôzB*/·˜ab±Žúãç""9™0:·¯ÿØ—Ñ ªÅè}=bcó wPA@b·áip+'&m8 O¾6ú@¦ºK  ×…4肉;¹­@(üx}¸ (…{;O6œ<ülÿÆHÁ°¹0 z€0¼"Ñ@H¼"1rtHœµ¾ E ¸C èR#ŒT?Û…d7%¬ß8‰Ö™œ“Ü7¦p3àŒÎÇšI7ˆw·ÃV<:©Æl$ÎØ?C„ßóT bSQWØLºÁ˜–§&P‰L@âeöRu¦ÇÁ² Ù‡GÏÒ”»Q%²Gøf@H|;@Ñ2w†Ðâ¦bÓbÅ Ŭ‚ÁªöÇóÈ:Ø ²hèÂ¦Ý 3(…àÖÈÖ5²¶Â, K¾¶n›gš¨a¶TJ&N“=9»Ðä6‚<ÄŽS‚ C/ ]€.ü?våHšHƒ„Ýš¥Àé1èÂö¹%‘+B¡ÐFÎâ·H”2ß^$SCH $¾õ!ý’@≑~x qöþâ¥\ÄĪ”˜Ä>€ ¾Hc„ qnÃæ–MÏ!ê¾wŒ!‰3æfÞ_¦ø‰ krC†zÛ õz½¿9ÜC 'ô»¤Å!LÀo8yÛ ØßyÜuøÃ<†¸C”3 x¯<¬ÃaÛûýÓt?1ÑŸÛÉa8^DE_:ÁmÔ«G°&3ů]½F0¶!ðÿ8ã—T@Ð'ÏÃ7Â`W·ƒ 0ñÒ½ )`L¼8Cí`â(&ú§LÌØ¹‚ë@ ñ5ȯ§òÉÂð $òÊ6èÂãuA! …oe¡ ²YÄ–Ð…]h….@atáUêCâO ‰¿ù!jÝ¿h«„Š «È%ùÕdžˆlG„]9UËø4…÷É €‡00ßn˜fM $¾U V˜ ˜Š‘ÚErD!queÉ Åâ…][FÜ…”¶b›Ú†‘¦§4 Ó‚q8ÁgôÁ<²H€„ÌLè¦wóÇ'÷a*†MÅH,™Q®áÐ…í‘èKt&ŽH“ò€.ìé/ÌôÍgU’tá]Ð1+¬ìþøè¬Óh#Nc¹Û2€`Ó$ÓÈà~E)jRGèÂÌ.‡Ê6l;á¶YxUf#ö”…œÙóÓÖ‚rÄ£»š¾؆$ø’é›ïõËÜ@∌SŽT¨ZÙÊYnj*bÆT¨‰W‚Mý› ”¹çzÛe(V¡íˆvU‚‘\£Ë½„Çà'n„ñ¥#ÕÉÅÄò°«Ÿ8b\ÍIT8CjÆXØr• › Ãj„ÐáÉ]âB–€`S!˜ µ‹l ÁL¬P*¤È)ïjFªÐÞÖªH"ÐĶ^rÆEªðdžaa kÙŒ¥;ƒ],9ÃÄr­Œ0!WÙˆJš…¢ôžÆBfÚSBË+W ß2aJ‘H4=:Û–†RÔÂ3åÉpJI$w†˜ ‚ õÈG'#lýÙ#Çp€uè‹zÆmL‹"ÌÆ=[î“£Š•\gÄ“+¹$‰kÑ$öoj[‘„ÈÄ=XÏ`â™hÓ&Ê;Lì¯zÙ÷è”âNÌLϤR01Vx=øTÀ‚€-qFf{aàºF¶º-$l…èŠ|ô|mª“|üâ ž¯R‚¤.q8áÆØÌή…D1taÏpò"Ïzᆎ·G/nK£X‘* Ø‚™ðÑÔ»xž½m!ÍÖ?»Œ3>'£kaGâb‚ 9°Öùé>A‚g¯fËûO»½w… fò‡Î”XÒøì˜t£Æº]Ç#sH ,GO ÎÌÔHÐs€éÍT+29ÓpYèÐ+GîÞ/&rýºÂ§Ü4°˜‰.‹…>åS¹tµÏÔ¬Z3pÐúc‘—ÐŒÑÕp*˜Ð”±E&å„~ÉMWHŽ‹b]Ÿ…>¨&±úJ×&JôsWêÀÄŒ±È«gâ^Ÿ…@ã€~I¾hdUœ•TI£×þ€^{¾ÔG*µB ÃÆ‡3˜à˜ñ'B3°:î Ûá#Éë®02¬ Ûµ¯z"ötâ3Ð…Б<åBÂT?wHÌä®íÊ‘¾)'á¦@Wí.åÈ §$·D˜q„K)#M¶‹‰.J4DlêSÆÄgÒ$\´8¡!¢.ʧ’Ù"1ëBGe\Îõ ·(OANêÐ+HG˜¨›*ÁÄ »ƒªF˜p M…Nœ°dù$–Hú+X”3yKg+ì­=¥=¢FBQ—åQ4ÂŽ˜ +g¼ MçÀñƒMST>’»vc¯Ï¹”€àƒ[# 8•2¶‚lÊÏ8ÞôÉ^9@pø©]÷H-Rx‰ Ñ#KGÝs¹ÈMDÌê-K¡…Ôô*A#†c}P0:íÈLÛÈY½Ð^‘e¡|rWõW@]˜ZNÄ™Y€,¼l±é•„.lžˆètáõ–ÁÙ¡8ļ«,Ô!@Æ¢ƒ. fiÐèÂü 9èÂæºY€,¼¸ ð  Y€,¼Ê‚0tº]€.¼"QþB²$U£+þÁ]ñ)Mn†.·3’Žy…Ã{  'õ.Œ„и ``óP²a`^|Æd $^÷JÃTÀTŒìüKmbÆŠ3ˆ‘•_táSK €‡Ððà!¼z$€ÄË2ŽÉ/X“…ceÏHÌ”#\ðØq×2åH~Á} P`”òˆ¼£Í Ѥ†;—ÏÎ;~Ð…±nÇ!zaEÐÛê‚ta ‰èt¡ Ð…—––™þ…¬®úÜ CèÂKTmÐE¡Ú]Ø ½œGˆè66ƒ,lÚ¾0ÑÃR¬–(Gœ€„¬‹/v£N¤7­Ii@®‚ó2ÛÖ¤|‚,®†Ïx•ê‘0‚›Å¢€Ä*á1b*,»‚vÜÕTŒÄV!Ž Ûæ˜f h7WÔªÈ=׈ éÄ¥ŒÙ¹Mu!féÄ-ó36pŒä˜œC¹çgË‚D¯O…,œJŽŒP•«„RÏ®T»zs Š8£~&ŠðŒJ$¢,Gæèª;É MM»vÂOšj¢Êp¤žwõræHBêy×úÃ+–Ô@šq×ë>.TÁƒ£ÇfâPAôxÆÊaù–„×ÈñX–±Ht¶qø m+œGt¶¤ØZa8NP‰È÷2ÉÍ 1Æ“3NËy!˜Šý‘ˆËGÖw–h(POîíZ¿OcÝçn1‚D‹§ü…'G,Ò³ÑÖôäñÝF¦ØÂòèñ_<„)e ÕÂŽCÈ$L yÇC¨N0sç½”Þæ˚&XŸ˜ÖØÉ²).#™Áƒd½ ðŽV”#ޏ(£é_>d‹¡ úÁÇDØ5ªÌ  ' SÖEŽKSOÞÌ7ê‰^ÇdA'nr:iQÃ]Øõ˜ù„m(Yn‰&6ÄïÚì˜#ˆJ–yÑì8Ò_Í÷át/à/Ø5ˆ–e’½a)ž;ñ% @¦z \Q’¶dÔ"6U…‰m] ‚ É‚³pÄæO™ðB™„Þƒ½…èõ3 G¬OØßþn$Œ/ψNùpÕÚø÷ÉÂY¿øý_þ믿óµû ÿåûïþã??÷hëE~þh?þÍ÷ßýÛýþúo¾þõÇ¿úÀëÿZŸêÏOî—[o÷ÏêÖôwŸîÏ?ý‡üyýkÿþÝ?}ô1ÓZ?ó˜¿õýþÄk~ôÛyÀÓþÊá÷¿mrˆK‰â‹úÅ_Tý­×t²m_ßÔ¿ûMýe%÷ÍoëK…Ù*ý£ ¢þóMõ/{MçµñýßkZHŶ¯ùðM}ïÛÊ/[·ßÿ¶w0¤Ä…oê—Sû·^³ßöšÁ-…oê;ß–[…?ÿ¶ÿâÇ垤L'././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611513033052 5ustar zuulzuul././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611520033050 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000017260115115611513033063 0ustar zuulzuul2025-12-08T17:44:22.302044538+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:44:22.346938222+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="Defaulting Interval to '12h0m0s'" 2025-12-08T17:44:22.399755642+00:00 stderr F I1208 17:44:22.398568 1 handler.go:288] Adding GroupVersion packages.operators.coreos.com v1 to ResourceManager 2025-12-08T17:44:22.409803967+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="connection established. cluster-version: v1.33.5" 2025-12-08T17:44:22.409867958+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="operator ready" 2025-12-08T17:44:22.409915870+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="starting informers..." 2025-12-08T17:44:22.416774186+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="informers started" 2025-12-08T17:44:22.416843979+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="waiting for caches to sync..." 2025-12-08T17:44:22.523006104+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="starting workers..." 2025-12-08T17:44:22.523308303+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="connecting to source" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:44:22.523615552+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="connecting to source" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:44:22.525120492+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="connecting to source" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:44:22.525120492+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="connecting to source" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:44:22.598484123+00:00 stderr F I1208 17:44:22.597584 1 secure_serving.go:211] Serving securely on [::]:5443 2025-12-08T17:44:22.598484123+00:00 stderr F I1208 17:44:22.597644 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::apiserver.local.config/certificates/apiserver.crt::apiserver.local.config/certificates/apiserver.key" 2025-12-08T17:44:22.598484123+00:00 stderr F I1208 17:44:22.597734 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599433 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599506 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599573 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599582 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599607 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599617 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599803 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599842 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599932 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599942 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599955 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.600844188+00:00 stderr F I1208 17:44:22.599961 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.622658223+00:00 stderr F W1208 17:44:22.621167 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:57639->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:22.672755339+00:00 stderr F W1208 17:44:22.672699 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:59531->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:22.686434143+00:00 stderr F W1208 17:44:22.685191 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:35661->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:22.697000651+00:00 stderr F W1208 17:44:22.696894 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:35286->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:22.705599906+00:00 stderr F I1208 17:44:22.705533 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.705648627+00:00 stderr F I1208 17:44:22.705631 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:22.705968595+00:00 stderr F I1208 17:44:22.705785 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.709696247+00:00 stderr F I1208 17:44:22.706818 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.709696247+00:00 stderr F I1208 17:44:22.706889 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:22.709696247+00:00 stderr F I1208 17:44:22.706984 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.867181342+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:44:22.867350247+00:00 stderr F time="2025-12-08T17:44:22Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:35661->10.217.4.10:53: read: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:44:23.257615593+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:44:23.258431355+00:00 stderr F time="2025-12-08T17:44:23Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:57639->10.217.4.10:53: read: connection refused\"" source="{certified-operators openshift-marketplace}" 2025-12-08T17:44:23.883750801+00:00 stderr F W1208 17:44:23.883705 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:47061->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:23.883802072+00:00 stderr F W1208 17:44:23.883782 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:45697->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:23.884232914+00:00 stderr F W1208 17:44:23.883830 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:39132->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:23.895105791+00:00 stderr F W1208 17:44:23.895055 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:34008->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.057506317+00:00 stderr F time="2025-12-08T17:44:25Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:44:25.058054272+00:00 stderr F time="2025-12-08T17:44:25Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:39132->10.217.4.10:53: read: connection refused\"" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:44:25.281648291+00:00 stderr F W1208 17:44:25.281286 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:34406->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.577732228+00:00 stderr F W1208 17:44:25.577678 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:35856->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.660752542+00:00 stderr F W1208 17:44:25.660557 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:38688->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.751157829+00:00 stderr F W1208 17:44:25.751050 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:34295->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:26.058053429+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:44:26.058053429+00:00 stderr F time="2025-12-08T17:44:26Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:35856->10.217.4.10:53: read: connection refused\"" source="{redhat-operators openshift-marketplace}" 2025-12-08T17:44:26.858554735+00:00 stderr F time="2025-12-08T17:44:26Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:44:26.858554735+00:00 stderr F time="2025-12-08T17:44:26Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:34295->10.217.4.10:53: read: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:44:27.805129924+00:00 stderr F W1208 17:44:27.805063 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:43885->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.109095285+00:00 stderr F W1208 17:44:28.109054 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:56001->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.256617949+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:44:28.256617949+00:00 stderr F time="2025-12-08T17:44:28Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:43885->10.217.4.10:53: read: connection refused\"" source="{certified-operators openshift-marketplace}" 2025-12-08T17:44:28.438595213+00:00 stderr F W1208 17:44:28.437648 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:55434->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.801042869+00:00 stderr F W1208 17:44:28.800283 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:57052->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.855536516+00:00 stderr F time="2025-12-08T17:44:28Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:44:28.855536516+00:00 stderr F time="2025-12-08T17:44:28Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:56001->10.217.4.10:53: read: connection refused\"" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:44:30.256392327+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:44:30.256392327+00:00 stderr F time="2025-12-08T17:44:30Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:55434->10.217.4.10:53: read: connection refused\"" source="{redhat-operators openshift-marketplace}" 2025-12-08T17:44:30.859023845+00:00 stderr F time="2025-12-08T17:44:30Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:44:30.859232630+00:00 stderr F time="2025-12-08T17:44:30Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:57052->10.217.4.10:53: read: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:44:32.114772598+00:00 stderr F W1208 17:44:32.114727 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:59692->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:32.256105193+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:44:32.256105193+00:00 stderr F time="2025-12-08T17:44:32Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:43885->10.217.4.10:53: read: connection refused\"" source="{certified-operators openshift-marketplace}" 2025-12-08T17:44:32.270143546+00:00 stderr F W1208 17:44:32.270094 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:33812->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:32.682462773+00:00 stderr F W1208 17:44:32.682403 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup certified-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:48203->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:32.854099704+00:00 stderr F time="2025-12-08T17:44:32Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:44:32.854099704+00:00 stderr F time="2025-12-08T17:44:32Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup redhat-marketplace.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:59692->10.217.4.10:53: read: connection refused\"" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:44:33.168279574+00:00 stderr F W1208 17:44:33.168098 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp: lookup community-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:56230->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:34.253552448+00:00 stderr F time="2025-12-08T17:44:34Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:44:34.253603769+00:00 stderr F time="2025-12-08T17:44:34Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp: lookup redhat-operators.openshift-marketplace.svc on 10.217.4.10:53: read udp 10.217.0.16:33812->10.217.4.10:53: read: connection refused\"" source="{redhat-operators openshift-marketplace}" 2025-12-08T17:44:37.916697481+00:00 stderr F W1208 17:44:37.916620 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:44:39.371338257+00:00 stderr F W1208 17:44:39.371281 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:44:39.524163268+00:00 stderr F W1208 17:44:39.524108 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:44:39.745929469+00:00 stderr F W1208 17:44:39.745403 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:44:46.342259282+00:00 stderr F W1208 17:44:46.341896 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:44:48.516123960+00:00 stderr F W1208 17:44:48.516079 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:44:49.764155777+00:00 stderr F W1208 17:44:49.764088 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:44:50.754855883+00:00 stderr F W1208 17:44:50.753209 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:44:59.570384954+00:00 stderr F time="2025-12-08T17:44:59Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:44:59.570384954+00:00 stderr F time="2025-12-08T17:44:59Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:45:00.175458250+00:00 stderr F time="2025-12-08T17:45:00Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:45:00.175507002+00:00 stderr F time="2025-12-08T17:45:00Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused\"" source="{redhat-operators openshift-marketplace}" 2025-12-08T17:45:01.571922067+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:45:01.571964249+00:00 stderr F time="2025-12-08T17:45:01Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused\"" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:45:02.175485591+00:00 stderr F time="2025-12-08T17:45:02Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:45:02.175485591+00:00 stderr F time="2025-12-08T17:45:02Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused\"" source="{certified-operators openshift-marketplace}" 2025-12-08T17:45:03.572096182+00:00 stderr F time="2025-12-08T17:45:03Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:45:03.572132413+00:00 stderr F time="2025-12-08T17:45:03Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:45:06.973050014+00:00 stderr F time="2025-12-08T17:45:06Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:45:06.973050014+00:00 stderr F time="2025-12-08T17:45:06Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:45:07.971112194+00:00 stderr F time="2025-12-08T17:45:07Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:45:09.968753729+00:00 stderr F time="2025-12-08T17:45:09Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:45:10.969304719+00:00 stderr F time="2025-12-08T17:45:10Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:46:05.374465345+00:00 stderr F E1208 17:46:05.373950 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.374988780+00:00 stderr F E1208 17:46:05.374972 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.291450698+00:00 stderr F E1208 17:46:06.291396 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.291585662+00:00 stderr F E1208 17:46:06.291566 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.361342816+00:00 stderr F E1208 17:46:06.361256 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.361342816+00:00 stderr F E1208 17:46:06.361331 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:58.894493602+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:47:58.894559124+00:00 stderr F time="2025-12-08T17:47:58Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:47:59.517423999+00:00 stderr F time="2025-12-08T17:47:59Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:47:59.703021764+00:00 stderr F time="2025-12-08T17:47:59Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:48:09.364584640+00:00 stderr F W1208 17:48:09.364521 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:48:09.387654938+00:00 stderr F W1208 17:48:09.387288 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:48:09.405360864+00:00 stderr F W1208 17:48:09.405299 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:48:09.433153534+00:00 stderr F W1208 17:48:09.433098 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:48:09.747747783+00:00 stderr F time="2025-12-08T17:48:09Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:48:09.747847686+00:00 stderr F time="2025-12-08T17:48:09Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:48:10.368804733+00:00 stderr F W1208 17:48:10.368369 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:48:10.389969163+00:00 stderr F W1208 17:48:10.389917 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:48:10.408506575+00:00 stderr F W1208 17:48:10.408471 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:48:10.437533553+00:00 stderr F W1208 17:48:10.437488 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:48:11.158802725+00:00 stderr F time="2025-12-08T17:48:11Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:48:11.158802725+00:00 stderr F time="2025-12-08T17:48:11Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused\"" source="{certified-operators openshift-marketplace}" 2025-12-08T17:48:11.792974782+00:00 stderr F W1208 17:48:11.792929 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:48:12.046327627+00:00 stderr F W1208 17:48:12.046003 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:48:12.151958754+00:00 stderr F time="2025-12-08T17:48:12Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:48:12.152020325+00:00 stderr F time="2025-12-08T17:48:12Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused\"" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:48:12.180338912+00:00 stderr F W1208 17:48:12.180283 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:48:12.234340116+00:00 stderr F W1208 17:48:12.234286 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:48:13.551925450+00:00 stderr F time="2025-12-08T17:48:13Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:48:13.551925450+00:00 stderr F time="2025-12-08T17:48:13Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused\"" source="{redhat-operators openshift-marketplace}" 2025-12-08T17:48:14.457470048+00:00 stderr F W1208 17:48:14.457410 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:48:14.514736670+00:00 stderr F W1208 17:48:14.514675 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:48:14.548034928+00:00 stderr F W1208 17:48:14.547986 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:48:14.550899664+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:48:14.551012268+00:00 stderr F time="2025-12-08T17:48:14Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:48:14.679178866+00:00 stderr F W1208 17:48:14.679138 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:48:17.546082865+00:00 stderr F time="2025-12-08T17:48:17Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:48:17.546227410+00:00 stderr F time="2025-12-08T17:48:17Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused\"" source="{redhat-operators openshift-marketplace}" 2025-12-08T17:48:18.454193611+00:00 stderr F W1208 17:48:18.454120 1 logging.go:55] [core] [Channel #1 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "certified-operators.openshift-marketplace.svc:50051", ServerName: "certified-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.115:50051: connect: connection refused" 2025-12-08T17:48:18.548564476+00:00 stderr F time="2025-12-08T17:48:18Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:48:18.548564476+00:00 stderr F time="2025-12-08T17:48:18Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused\"" source="{community-operators openshift-marketplace}" 2025-12-08T17:48:19.062599969+00:00 stderr F W1208 17:48:19.062557 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-marketplace.openshift-marketplace.svc:50051", ServerName: "redhat-marketplace.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused" 2025-12-08T17:48:19.172540315+00:00 stderr F W1208 17:48:19.172067 1 logging.go:55] [core] [Channel #6 SubChannel #7]grpc: addrConn.createTransport failed to connect to {Addr: "redhat-operators.openshift-marketplace.svc:50051", ServerName: "redhat-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.50:50051: connect: connection refused" 2025-12-08T17:48:19.523724500+00:00 stderr F W1208 17:48:19.523421 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "community-operators.openshift-marketplace.svc:50051", ServerName: "community-operators.openshift-marketplace.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.5.29:50051: connect: connection refused" 2025-12-08T17:48:24.549118556+00:00 stderr F time="2025-12-08T17:48:24Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:48:24.549207379+00:00 stderr F time="2025-12-08T17:48:24Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.5.43:50051: connect: connection refused\"" source="{redhat-marketplace openshift-marketplace}" 2025-12-08T17:48:25.545591225+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:48:26.547040135+00:00 stderr F time="2025-12-08T17:48:26Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:48:27.547583487+00:00 stderr F time="2025-12-08T17:48:27Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:48:28.546821879+00:00 stderr F time="2025-12-08T17:48:28Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:48:29.547147524+00:00 stderr F time="2025-12-08T17:48:29Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:48:36.602234681+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:54:14.417072773+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T17:54:15.631788212+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-marketplace openshift-marketplace}" action="sync catalogsource" address="redhat-marketplace.openshift-marketplace.svc:50051" name=redhat-marketplace namespace=openshift-marketplace 2025-12-08T17:54:28.431524738+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:54:41.499366095+00:00 stderr F time="2025-12-08T17:54:41Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:56:07.891530353+00:00 stderr F time="2025-12-08T17:56:07Z" level=info msg="connecting to source" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T17:56:08.942731227+00:00 stderr F W1208 17:56:08.942662 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "infrawatch-operators.service-telemetry.svc:50051", ServerName: "infrawatch-operators.service-telemetry.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:09.955261551+00:00 stderr F W1208 17:56:09.955222 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "infrawatch-operators.service-telemetry.svc:50051", ServerName: "infrawatch-operators.service-telemetry.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:11.455813265+00:00 stderr F W1208 17:56:11.455749 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "infrawatch-operators.service-telemetry.svc:50051", ServerName: "infrawatch-operators.service-telemetry.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:12.988791831+00:00 stderr F time="2025-12-08T17:56:12Z" level=info msg="updating PackageManifest based on CatalogSource changes: {infrawatch-operators service-telemetry}" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T17:56:12.989009087+00:00 stderr F time="2025-12-08T17:56:12Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" source="{infrawatch-operators service-telemetry}" 2025-12-08T17:56:13.860669695+00:00 stderr F W1208 17:56:13.860613 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "infrawatch-operators.service-telemetry.svc:50051", ServerName: "infrawatch-operators.service-telemetry.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:16.878771290+00:00 stderr F time="2025-12-08T17:56:16Z" level=info msg="updating PackageManifest based on CatalogSource changes: {infrawatch-operators service-telemetry}" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T17:56:16.878814722+00:00 stderr F time="2025-12-08T17:56:16Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" source="{infrawatch-operators service-telemetry}" 2025-12-08T17:56:17.627555557+00:00 stderr F W1208 17:56:17.625099 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "infrawatch-operators.service-telemetry.svc:50051", ServerName: "infrawatch-operators.service-telemetry.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:20.279759823+00:00 stderr F time="2025-12-08T17:56:20Z" level=info msg="updating PackageManifest based on CatalogSource changes: {infrawatch-operators service-telemetry}" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T17:56:20.279811654+00:00 stderr F time="2025-12-08T17:56:20Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" source="{infrawatch-operators service-telemetry}" 2025-12-08T17:56:24.022957395+00:00 stderr F W1208 17:56:24.022778 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "infrawatch-operators.service-telemetry.svc:50051", ServerName: "infrawatch-operators.service-telemetry.svc:50051", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused" 2025-12-08T17:56:26.478470645+00:00 stderr F time="2025-12-08T17:56:26Z" level=info msg="updating PackageManifest based on CatalogSource changes: {infrawatch-operators service-telemetry}" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T17:56:26.478470645+00:00 stderr F time="2025-12-08T17:56:26Z" level=warning msg="error getting bundle stream" action="refresh cache" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.217.4.230:50051: connect: connection refused\"" source="{infrawatch-operators service-telemetry}" 2025-12-08T17:56:33.492087603+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace 2025-12-08T17:56:33.877591118+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="updating PackageManifest based on CatalogSource changes: {infrawatch-operators service-telemetry}" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T17:59:50.672031705+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T17:59:51.688928037+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="updating PackageManifest based on CatalogSource changes: {community-operators openshift-marketplace}" action="sync catalogsource" address="community-operators.openshift-marketplace.svc:50051" name=community-operators namespace=openshift-marketplace 2025-12-08T18:01:18.236803541+00:00 stderr F time="2025-12-08T18:01:18Z" level=info msg="updating PackageManifest based on CatalogSource changes: {infrawatch-operators service-telemetry}" action="sync catalogsource" address="infrawatch-operators.service-telemetry.svc:50051" name=infrawatch-operators namespace=service-telemetry 2025-12-08T18:04:31.733610922+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="updating PackageManifest based on CatalogSource changes: {certified-operators openshift-marketplace}" action="sync catalogsource" address="certified-operators.openshift-marketplace.svc:50051" name=certified-operators namespace=openshift-marketplace 2025-12-08T18:04:43.038669594+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="updating PackageManifest based on CatalogSource changes: {redhat-operators openshift-marketplace}" action="sync catalogsource" address="redhat-operators.openshift-marketplace.svc:50051" name=redhat-operators namespace=openshift-marketplace ././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000755000175000017500000000000015115611513032761 5ustar zuulzuul././@LongLink0000644000000000000000000000034100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000755000175000017500000000000015115611520032757 5ustar zuulzuul././@LongLink0000644000000000000000000000034600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000644000175000017500000004377415115611513033002 0ustar zuulzuul2025-12-08T17:44:19.832961228+00:00 stderr F I1208 17:44:19.831994 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:19.847119014+00:00 stderr F I1208 17:44:19.847067 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.864460 1 recorder_logging.go:49] &Event{ObjectMeta:{dummy.187f4e78d71445b1.d3b4e2d2 dummy 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:dummy,Name:dummy,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:FeatureGatesInitialized,Message:FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}},Source:EventSource{Component:,Host:,},FirstTimestamp:2025-12-08 17:44:19.863995825 +0000 UTC m=+0.398070419,LastTimestamp:2025-12-08 17:44:19.863995825 +0000 UTC m=+0.398070419,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:,ReportingInstance:,} 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.864511 1 main.go:176] FeatureGates initialized: [AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.864829 1 webhook.go:257] "Registering a validating webhook" logger="controller-runtime.builder" GVK="machine.openshift.io/v1, Kind=ControlPlaneMachineSet" path="/validate-machine-openshift-io-v1-controlplanemachineset" 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.864956 1 server.go:183] "Registering webhook" logger="controller-runtime.webhook" path="/validate-machine-openshift-io-v1-controlplanemachineset" 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.864985 1 main.go:232] "starting manager" logger="setup" 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.865076 1 server.go:208] "Starting metrics server" logger="controller-runtime.metrics" 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.865251 1 server.go:247] "Serving metrics server" logger="controller-runtime.metrics" bindAddress=":8080" secure=false 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.865308 1 server.go:83] "starting server" name="health probe" addr="[::]:8081" 2025-12-08T17:44:19.865786704+00:00 stderr F I1208 17:44:19.865330 1 server.go:191] "Starting webhook server" logger="controller-runtime.webhook" 2025-12-08T17:44:19.870476352+00:00 stderr F I1208 17:44:19.870023 1 leaderelection.go:257] attempting to acquire leader lease openshift-machine-api/control-plane-machine-set-leader... 2025-12-08T17:44:19.870933264+00:00 stderr F I1208 17:44:19.870732 1 certwatcher.go:211] "Updated current TLS certificate" logger="controller-runtime.certwatcher" 2025-12-08T17:44:19.871039017+00:00 stderr F I1208 17:44:19.870945 1 server.go:242] "Serving webhook server" logger="controller-runtime.webhook" host="" port=9443 2025-12-08T17:44:19.871105559+00:00 stderr F I1208 17:44:19.871054 1 certwatcher.go:133] "Starting certificate poll+watcher" logger="controller-runtime.certwatcher" interval="10s" 2025-12-08T17:44:19.918410789+00:00 stderr F I1208 17:44:19.915410 1 leaderelection.go:271] successfully acquired lease openshift-machine-api/control-plane-machine-set-leader 2025-12-08T17:44:19.921910895+00:00 stderr F I1208 17:44:19.915804 1 recorder.go:104] "control-plane-machine-set-operator-75ffdb6fcd-dhfht_8fe9a4fb-748b-45f3-bec6-41aad8e7a3ea became leader" logger="events" type="Normal" object={"kind":"Lease","namespace":"openshift-machine-api","name":"control-plane-machine-set-leader","uid":"c6932179-7585-41da-8f3c-8cd7912cdd90","apiVersion":"coordination.k8s.io/v1","resourceVersion":"36976"} reason="LeaderElection" 2025-12-08T17:44:19.921910895+00:00 stderr F I1208 17:44:19.919120 1 controller.go:246] "Starting EventSource" controller="controlplanemachineset" source="kind source: *v1.Infrastructure" 2025-12-08T17:44:19.925824131+00:00 stderr F I1208 17:44:19.923423 1 controller.go:246] "Starting EventSource" controller="controlplanemachinesetgenerator" source="kind source: *v1beta1.Machine" 2025-12-08T17:44:19.925824131+00:00 stderr F I1208 17:44:19.923516 1 controller.go:246] "Starting EventSource" controller="controlplanemachinesetgenerator" source="kind source: *v1.ControlPlaneMachineSet" 2025-12-08T17:44:19.925824131+00:00 stderr F I1208 17:44:19.923538 1 controller.go:246] "Starting EventSource" controller="controlplanemachineset" source="kind source: *v1beta1.Machine" 2025-12-08T17:44:19.925824131+00:00 stderr F I1208 17:44:19.923568 1 controller.go:246] "Starting EventSource" controller="controlplanemachineset" source="kind source: *v1.ControlPlaneMachineSet" 2025-12-08T17:44:19.925824131+00:00 stderr F I1208 17:44:19.923585 1 controller.go:246] "Starting EventSource" controller="controlplanemachineset" source="kind source: *v1.Node" 2025-12-08T17:44:19.925824131+00:00 stderr F I1208 17:44:19.923621 1 controller.go:246] "Starting EventSource" controller="controlplanemachineset" source="kind source: *v1.ClusterOperator" 2025-12-08T17:44:19.986037794+00:00 stderr F I1208 17:44:19.985276 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.Infrastructure" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:19.986037794+00:00 stderr F I1208 17:44:19.985854 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1beta1.Machine" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:19.986794744+00:00 stderr F I1208 17:44:19.986196 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.Node" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:19.987117503+00:00 stderr F I1208 17:44:19.986816 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.ControlPlaneMachineSet" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:20.003035287+00:00 stderr F I1208 17:44:20.002852 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.ClusterOperator" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:20.074920799+00:00 stderr F I1208 17:44:20.074619 1 controller.go:186] "Starting Controller" controller="controlplanemachinesetgenerator" 2025-12-08T17:44:20.074920799+00:00 stderr F I1208 17:44:20.074651 1 controller.go:195] "Starting workers" controller="controlplanemachinesetgenerator" worker count=1 2025-12-08T17:44:20.074920799+00:00 stderr F I1208 17:44:20.074709 1 controller.go:186] "Starting Controller" controller="controlplanemachineset" 2025-12-08T17:44:20.074920799+00:00 stderr F I1208 17:44:20.074714 1 controller.go:195] "Starting workers" controller="controlplanemachineset" worker count=1 2025-12-08T17:44:20.074920799+00:00 stderr F I1208 17:44:20.074776 1 controller.go:175] "Reconciling control plane machine set" controller="controlplanemachineset" reconcileID="2c46bfab-3c4e-4d7d-b25b-8b00ef7f54e9" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.074920799+00:00 stderr F I1208 17:44:20.074900 1 controller.go:183] "No control plane machine set found, setting operator status available" controller="controlplanemachineset" reconcileID="2c46bfab-3c4e-4d7d-b25b-8b00ef7f54e9" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075065 1 controller.go:189] "Finished reconciling control plane machine set" controller="controlplanemachineset" reconcileID="2c46bfab-3c4e-4d7d-b25b-8b00ef7f54e9" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075115 1 controller.go:175] "Reconciling control plane machine set" controller="controlplanemachineset" reconcileID="bb49be19-439b-466d-8747-b39a918b0820" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075145 1 controller.go:183] "No control plane machine set found, setting operator status available" controller="controlplanemachineset" reconcileID="bb49be19-439b-466d-8747-b39a918b0820" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075198 1 controller.go:189] "Finished reconciling control plane machine set" controller="controlplanemachineset" reconcileID="bb49be19-439b-466d-8747-b39a918b0820" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075228 1 controller.go:175] "Reconciling control plane machine set" controller="controlplanemachineset" reconcileID="9c6ffcd0-42bd-4fd3-84a9-3d6a234310cb" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075255 1 controller.go:183] "No control plane machine set found, setting operator status available" controller="controlplanemachineset" reconcileID="9c6ffcd0-42bd-4fd3-84a9-3d6a234310cb" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:44:20.075901475+00:00 stderr F I1208 17:44:20.075287 1 controller.go:189] "Finished reconciling control plane machine set" controller="controlplanemachineset" reconcileID="9c6ffcd0-42bd-4fd3-84a9-3d6a234310cb" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:46:03.952034419+00:00 stderr F E1208 17:46:03.951342 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-api/leases/control-plane-machine-set-leader?timeout=53.5s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:03.952948536+00:00 stderr F E1208 17:46:03.952912 1 leaderelection.go:436] error retrieving resource lock openshift-machine-api/control-plane-machine-set-leader: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-api/leases/control-plane-machine-set-leader?timeout=53.5s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:47:05.135787536+00:00 stderr F I1208 17:47:05.133866 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1beta1.Machine" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:47:10.417688055+00:00 stderr F I1208 17:47:10.417560 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.Infrastructure" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:47:10.417961114+00:00 stderr F I1208 17:47:10.417805 1 controller.go:175] "Reconciling control plane machine set" controller="controlplanemachineset" reconcileID="4f15c4ac-af9f-4c8a-9f8a-b04524ac0081" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:47:10.417961114+00:00 stderr F I1208 17:47:10.417922 1 controller.go:183] "No control plane machine set found, setting operator status available" controller="controlplanemachineset" reconcileID="4f15c4ac-af9f-4c8a-9f8a-b04524ac0081" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:47:10.418055807+00:00 stderr F I1208 17:47:10.417976 1 controller.go:189] "Finished reconciling control plane machine set" controller="controlplanemachineset" reconcileID="4f15c4ac-af9f-4c8a-9f8a-b04524ac0081" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:47:15.258840641+00:00 stderr F I1208 17:47:15.258755 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.ControlPlaneMachineSet" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:47:21.088060440+00:00 stderr F I1208 17:47:21.086717 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.Node" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:47:22.301043683+00:00 stderr F I1208 17:47:22.301002 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.ClusterOperator" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:47:22.301518378+00:00 stderr F I1208 17:47:22.301497 1 controller.go:175] "Reconciling control plane machine set" controller="controlplanemachineset" reconcileID="ac11365e-bb29-4e5f-a998-eae5dba91ae6" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:47:22.301591480+00:00 stderr F I1208 17:47:22.301579 1 controller.go:183] "No control plane machine set found, setting operator status available" controller="controlplanemachineset" reconcileID="ac11365e-bb29-4e5f-a998-eae5dba91ae6" namespace="openshift-machine-api" name="cluster" 2025-12-08T17:47:22.301652272+00:00 stderr F I1208 17:47:22.301639 1 controller.go:189] "Finished reconciling control plane machine set" controller="controlplanemachineset" reconcileID="ac11365e-bb29-4e5f-a998-eae5dba91ae6" namespace="openshift-machine-api" name="cluster" ././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611513032776 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611520032774 5ustar zuulzuul././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000000254015115611513033001 0ustar zuulzuul2025-12-08T17:42:24.827420149+00:00 stderr F W1208 17:42:24.827310 1 deprecated.go:66] 2025-12-08T17:42:24.827420149+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:42:24.827420149+00:00 stderr F 2025-12-08T17:42:24.827420149+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:42:24.827420149+00:00 stderr F 2025-12-08T17:42:24.827420149+00:00 stderr F =============================================== 2025-12-08T17:42:24.827420149+00:00 stderr F 2025-12-08T17:42:24.827420149+00:00 stderr F I1208 17:42:24.827391 1 kube-rbac-proxy.go:532] Reading config file: /etc/kubernetes/crio-metrics-proxy.cfg 2025-12-08T17:42:24.833046530+00:00 stderr F I1208 17:42:24.833002 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:42:24.833459628+00:00 stderr F I1208 17:42:24.833420 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca::/etc/kubernetes/kubelet-ca.crt" 2025-12-08T17:42:24.835207144+00:00 stderr F I1208 17:42:24.835186 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:42:24.835892643+00:00 stderr F I1208 17:42:24.835842 1 kube-rbac-proxy.go:397] Starting TCP socket on :9637 2025-12-08T17:42:24.836564471+00:00 stderr F I1208 17:42:24.836536 1 kube-rbac-proxy.go:404] Listening securely on :9637 ././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611520032774 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000000014515115611513033000 0ustar zuulzuul2025-12-08T17:42:24.369799514+00:00 stdout P Waiting for kubelet key and certificate to be available ././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000755000175000017500000000000015115611513032662 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000755000175000017500000000000015115611520032660 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000644000175000017500000000010715115611513032662 0ustar zuulzuul2025-12-08T17:56:30.897557142+00:00 stdout F '/bin/cpb' -> '/util/cpb' ././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000755000175000017500000000000015115611520032660 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000644000175000017500000000364315115611513032672 0ustar zuulzuul2025-12-08T17:56:32.788843682+00:00 stdout F skipping a dir without errors: / 2025-12-08T17:56:32.788843682+00:00 stdout F skipping a dir without errors: /bundle 2025-12-08T17:56:32.788843682+00:00 stdout F skipping all files in the dir: /dev 2025-12-08T17:56:32.789047377+00:00 stdout F skipping a dir without errors: /etc 2025-12-08T17:56:32.789047377+00:00 stdout F skipping a dir without errors: /manifests 2025-12-08T17:56:32.789047377+00:00 stdout F skipping a dir without errors: /metadata 2025-12-08T17:56:32.789130569+00:00 stdout F skipping all files in the dir: /proc 2025-12-08T17:56:32.789140049+00:00 stdout F skipping a dir without errors: /run 2025-12-08T17:56:32.789171360+00:00 stdout F skipping a dir without errors: /run/secrets 2025-12-08T17:56:32.789196421+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm 2025-12-08T17:56:32.789218051+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/ca 2025-12-08T17:56:32.789248732+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/syspurpose 2025-12-08T17:56:32.789291443+00:00 stdout F skipping all files in the dir: /sys 2025-12-08T17:56:32.789299123+00:00 stdout F skipping a dir without errors: /util 2025-12-08T17:56:32.789326004+00:00 stdout F skipping a dir without errors: /var 2025-12-08T17:56:32.789343174+00:00 stdout F skipping a dir without errors: /var/run 2025-12-08T17:56:32.789372515+00:00 stdout F skipping a dir without errors: /var/run/secrets 2025-12-08T17:56:32.789397186+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io 2025-12-08T17:56:32.789426207+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount 2025-12-08T17:56:32.789453927+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount/..2025_12_08_17_56_29.748663655 2025-12-08T17:56:32.789532899+00:00 stdout F &{metadata/annotations.yaml manifests/} ././@LongLink0000644000000000000000000000031600000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000755000175000017500000000000015115611520032660 5ustar zuulzuul././@LongLink0000644000000000000000000000032300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f3080000644000175000017500000000114115115611513032661 0ustar zuulzuul2025-12-08T17:56:33.494242939+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:56:33.503774088+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/manifests/infra.watch_servicetelemetrys.yaml 2025-12-08T17:56:33.510535264+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/manifests/service-telemetry-operator.clusterserviceversion.yaml 2025-12-08T17:56:33.511276223+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/metadata/annotations.yaml ././@LongLink0000644000000000000000000000024600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_rout0000755000175000017500000000000015115611513033172 5ustar zuulzuul././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_rout0000755000175000017500000000000015115611520033170 5ustar zuulzuul././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_rout0000644000175000017500000007564015115611513033210 0ustar zuulzuul2025-12-08T17:44:19.500256973+00:00 stderr F I1208 17:44:19.499194 1 template.go:560] "msg"="starting router" "logger"="router" "version"="majorFromGit: \nminorFromGit: \ncommitFromGit: 96bfd2164c7885df9019ce9eeb79d506bd7e871b\nversionFromGit: 4.0.0-581-g96bfd216\ngitTreeState: clean\nbuildDate: 2025-10-21T12:30:19Z\n" 2025-12-08T17:44:19.502524475+00:00 stderr F I1208 17:44:19.502504 1 metrics.go:156] "msg"="router health and metrics port listening on HTTP and HTTPS" "address"="0.0.0.0:1936" "logger"="metrics" 2025-12-08T17:44:19.520787053+00:00 stderr F I1208 17:44:19.520434 1 router.go:214] "msg"="creating a new template router" "logger"="template" "writeDir"="/var/lib/haproxy" 2025-12-08T17:44:19.520787053+00:00 stderr F I1208 17:44:19.520515 1 router.go:298] "msg"="router will coalesce reloads within an interval of each other" "interval"="5s" "logger"="template" 2025-12-08T17:44:19.521335208+00:00 stderr F I1208 17:44:19.521312 1 router.go:368] "msg"="watching for changes" "logger"="template" "path"="/etc/pki/tls/private" 2025-12-08T17:44:19.521477672+00:00 stderr F I1208 17:44:19.521370 1 router.go:283] "msg"="router is including routes in all namespaces" "logger"="router" 2025-12-08T17:44:19.576641927+00:00 stderr F W1208 17:44:19.576507 1 reflector.go:547] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:19.576641927+00:00 stderr F E1208 17:44:19.576577 1 reflector.go:150] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:19.577354226+00:00 stderr F I1208 17:44:19.577327 1 reflector.go:359] Caches populated for *v1.EndpointSlice from github.com/openshift/router/pkg/router/controller/factory/factory.go:124 2025-12-08T17:44:19.579985448+00:00 stderr F I1208 17:44:19.579938 1 reflector.go:359] Caches populated for *v1.Service from github.com/openshift/router/pkg/router/template/service_lookup.go:33 2025-12-08T17:44:20.372764343+00:00 stderr F I1208 17:44:20.372166 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:20.372764343+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:20.372764343+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:20.873110930+00:00 stderr F W1208 17:44:20.867772 1 reflector.go:547] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:20.873110930+00:00 stderr F E1208 17:44:20.867810 1 reflector.go:150] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:21.358835380+00:00 stderr F I1208 17:44:21.358287 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:21.358835380+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:21.358835380+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:22.360960405+00:00 stderr F I1208 17:44:22.359667 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:22.360960405+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:22.360960405+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:22.588849841+00:00 stderr F W1208 17:44:22.588743 1 reflector.go:547] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:22.588849841+00:00 stderr F E1208 17:44:22.588778 1 reflector.go:150] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:23.359792349+00:00 stderr F I1208 17:44:23.357817 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:23.359792349+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:23.359792349+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:24.363990431+00:00 stderr F I1208 17:44:24.360235 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:24.363990431+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:24.363990431+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:25.361301565+00:00 stderr F I1208 17:44:25.361261 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:25.361301565+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:25.361301565+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:26.357037965+00:00 stderr F I1208 17:44:26.356987 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:26.357037965+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:26.357037965+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:27.357911475+00:00 stderr F I1208 17:44:27.354639 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:27.357911475+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:27.357911475+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:27.626596274+00:00 stderr F W1208 17:44:27.626542 1 reflector.go:547] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:27.626596274+00:00 stderr F E1208 17:44:27.626573 1 reflector.go:150] github.com/openshift/router/pkg/router/controller/factory/factory.go:124: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:28.353551244+00:00 stderr F I1208 17:44:28.353082 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:28.353551244+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:28.353551244+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:29.352692426+00:00 stderr F I1208 17:44:29.352261 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:29.352692426+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:29.352692426+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:30.351580994+00:00 stderr F I1208 17:44:30.351521 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:30.351580994+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:30.351580994+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:31.355523138+00:00 stderr F I1208 17:44:31.355474 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:31.355523138+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:31.355523138+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:32.352242545+00:00 stderr F I1208 17:44:32.352197 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:32.352242545+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:32.352242545+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:33.352265203+00:00 stderr F I1208 17:44:33.352214 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:33.352265203+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:33.352265203+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:34.351377556+00:00 stderr F I1208 17:44:34.351321 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:34.351377556+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:34.351377556+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:35.352206606+00:00 stderr F I1208 17:44:35.352143 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:35.352206606+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:35.352206606+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:36.352112630+00:00 stderr F I1208 17:44:36.352032 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:36.352112630+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:36.352112630+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:37.351264088+00:00 stderr F I1208 17:44:37.351188 1 healthz.go:255] backend-http,has-synced check failed: healthz 2025-12-08T17:44:37.351264088+00:00 stderr F [-]backend-http failed: backend reported failure 2025-12-08T17:44:37.351264088+00:00 stderr F [-]has-synced failed: Router not synced 2025-12-08T17:44:37.647040208+00:00 stderr F I1208 17:44:37.646989 1 reflector.go:359] Caches populated for *v1.Route from github.com/openshift/router/pkg/router/controller/factory/factory.go:124 2025-12-08T17:44:37.729070680+00:00 stderr F I1208 17:44:37.729016 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:37.729184203+00:00 stderr F I1208 17:44:37.729157 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:37.729474302+00:00 stderr F I1208 17:44:37.729445 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:37.729682228+00:00 stderr F I1208 17:44:37.729655 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:37.729798211+00:00 stderr F I1208 17:44:37.729772 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:37.730691465+00:00 stderr F E1208 17:44:37.730657 1 haproxy.go:418] can't scrape HAProxy: dial unix /var/lib/haproxy/run/haproxy.sock: connect: no such file or directory 2025-12-08T17:44:37.788135744+00:00 stderr F I1208 17:44:37.788086 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:44:42.826392223+00:00 stderr F I1208 17:44:42.826207 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:42.826392223+00:00 stderr F I1208 17:44:42.826377 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:42.826943378+00:00 stderr F I1208 17:44:42.826731 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:42.827383130+00:00 stderr F I1208 17:44:42.827335 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:42.827719240+00:00 stderr F I1208 17:44:42.827665 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:44:42.975265405+00:00 stderr F I1208 17:44:42.974695 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:45:05.465558407+00:00 stderr F I1208 17:45:05.465467 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:05.465628299+00:00 stderr F I1208 17:45:05.465601 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:05.465937518+00:00 stderr F I1208 17:45:05.465913 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:05.466117933+00:00 stderr F I1208 17:45:05.466097 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:05.466298058+00:00 stderr F I1208 17:45:05.466277 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:05.526436671+00:00 stderr F I1208 17:45:05.526362 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:45:22.101187283+00:00 stderr F I1208 17:45:22.100755 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:22.101187283+00:00 stderr F I1208 17:45:22.100860 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:22.101244875+00:00 stderr F I1208 17:45:22.101219 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:22.104959308+00:00 stderr F I1208 17:45:22.101443 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:22.104959308+00:00 stderr F I1208 17:45:22.101682 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:22.177191768+00:00 stderr F I1208 17:45:22.176954 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:45:49.357627228+00:00 stderr F I1208 17:45:49.357529 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:49.357765382+00:00 stderr F I1208 17:45:49.357731 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:49.359696760+00:00 stderr F I1208 17:45:49.359630 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:49.359989959+00:00 stderr F I1208 17:45:49.359944 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:49.360273247+00:00 stderr F I1208 17:45:49.360206 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:45:49.450182046+00:00 stderr F I1208 17:45:49.450072 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:46:31.377687267+00:00 stderr F I1208 17:46:31.377614 1 reflector.go:359] Caches populated for *v1.Service from github.com/openshift/router/pkg/router/template/service_lookup.go:33 2025-12-08T17:46:31.922355096+00:00 stderr F I1208 17:46:31.922284 1 reflector.go:359] Caches populated for *v1.EndpointSlice from github.com/openshift/router/pkg/router/controller/factory/factory.go:124 2025-12-08T17:55:13.313680463+00:00 stderr F I1208 17:55:13.313509 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:13.313724384+00:00 stderr F I1208 17:55:13.313712 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:13.316008935+00:00 stderr F I1208 17:55:13.314094 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:13.316008935+00:00 stderr F I1208 17:55:13.314312 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:13.316008935+00:00 stderr F I1208 17:55:13.314637 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:13.438473691+00:00 stderr F I1208 17:55:13.438402 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:55:18.311901360+00:00 stderr F I1208 17:55:18.311838 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:18.312174588+00:00 stderr F I1208 17:55:18.312149 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:18.313926144+00:00 stderr F I1208 17:55:18.313870 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:18.314108320+00:00 stderr F I1208 17:55:18.314090 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:18.314336596+00:00 stderr F I1208 17:55:18.314308 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:55:18.375088980+00:00 stderr F I1208 17:55:18.375045 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:56:07.919804249+00:00 stderr F E1208 17:56:07.919763 1 plugin.go:288] unable to find service service-telemetry/infrawatch-operators: Service "infrawatch-operators" not found 2025-12-08T17:57:28.598708713+00:00 stderr F I1208 17:57:28.598309 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:28.598708713+00:00 stderr F I1208 17:57:28.598462 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:28.598790625+00:00 stderr F I1208 17:57:28.598758 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:28.598995240+00:00 stderr F I1208 17:57:28.598963 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:28.599173195+00:00 stderr F I1208 17:57:28.599144 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:28.599268167+00:00 stderr F I1208 17:57:28.599239 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:28.652185787+00:00 stderr F I1208 17:57:28.652144 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:57:34.801381431+00:00 stderr F I1208 17:57:34.801297 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:34.801690029+00:00 stderr F I1208 17:57:34.801641 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:34.802137541+00:00 stderr F I1208 17:57:34.802085 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:34.802427918+00:00 stderr F I1208 17:57:34.802380 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:34.802735046+00:00 stderr F I1208 17:57:34.802687 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:34.802896340+00:00 stderr F I1208 17:57:34.802851 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:34.872110309+00:00 stderr F I1208 17:57:34.872062 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:57:40.468172704+00:00 stderr F I1208 17:57:40.468105 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.468318737+00:00 stderr F I1208 17:57:40.468281 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.468708877+00:00 stderr F I1208 17:57:40.468681 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.469004025+00:00 stderr F I1208 17:57:40.468978 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.469250671+00:00 stderr F I1208 17:57:40.469227 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.469395145+00:00 stderr F I1208 17:57:40.469359 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.469543479+00:00 stderr F I1208 17:57:40.469505 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:40.531203352+00:00 stderr F I1208 17:57:40.531163 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:57:56.695349815+00:00 stderr F I1208 17:57:56.695304 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.695463438+00:00 stderr F I1208 17:57:56.695445 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.695718664+00:00 stderr F I1208 17:57:56.695699 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.695900150+00:00 stderr F I1208 17:57:56.695888 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.696103715+00:00 stderr F I1208 17:57:56.696081 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.696206668+00:00 stderr F I1208 17:57:56.696184 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.696325131+00:00 stderr F I1208 17:57:56.696304 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.696422363+00:00 stderr F I1208 17:57:56.696405 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:57:56.755084249+00:00 stderr F I1208 17:57:56.755000 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:58:14.187371958+00:00 stderr F I1208 17:58:14.187111 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.187577573+00:00 stderr F I1208 17:58:14.187564 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.187912373+00:00 stderr F I1208 17:58:14.187897 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.188145618+00:00 stderr F I1208 17:58:14.188132 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.188398435+00:00 stderr F I1208 17:58:14.188385 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.188533258+00:00 stderr F I1208 17:58:14.188521 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.188772104+00:00 stderr F I1208 17:58:14.188758 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.188919298+00:00 stderr F I1208 17:58:14.188907 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:14.261059063+00:00 stderr F I1208 17:58:14.260987 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:58:19.188492676+00:00 stderr F I1208 17:58:19.188048 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.188492676+00:00 stderr F I1208 17:58:19.188199 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.188524816+00:00 stderr F I1208 17:58:19.188490 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.189245255+00:00 stderr F I1208 17:58:19.188676 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.189245255+00:00 stderr F I1208 17:58:19.188900 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.189245255+00:00 stderr F I1208 17:58:19.189013 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.189245255+00:00 stderr F I1208 17:58:19.189215 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.189331657+00:00 stderr F I1208 17:58:19.189312 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:19.254791289+00:00 stderr F I1208 17:58:19.254745 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:58:26.432571943+00:00 stderr F I1208 17:58:26.432510 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.432733357+00:00 stderr F I1208 17:58:26.432715 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.433185909+00:00 stderr F I1208 17:58:26.433157 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.433435705+00:00 stderr F I1208 17:58:26.433410 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.433698562+00:00 stderr F I1208 17:58:26.433670 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.433820785+00:00 stderr F I1208 17:58:26.433802 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.434125423+00:00 stderr F I1208 17:58:26.434110 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.434257016+00:00 stderr F I1208 17:58:26.434238 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:26.503801224+00:00 stderr F I1208 17:58:26.503754 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:58:31.391382877+00:00 stderr F I1208 17:58:31.391309 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.391483719+00:00 stderr F I1208 17:58:31.391460 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.391738456+00:00 stderr F I1208 17:58:31.391717 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.391957971+00:00 stderr F I1208 17:58:31.391916 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.392116835+00:00 stderr F I1208 17:58:31.392094 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.392217048+00:00 stderr F I1208 17:58:31.392195 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.392429443+00:00 stderr F I1208 17:58:31.392406 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.392573667+00:00 stderr F I1208 17:58:31.392528 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:31.501091392+00:00 stderr F I1208 17:58:31.501030 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:58:36.392443212+00:00 stderr F I1208 17:58:36.392367 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.392672728+00:00 stderr F I1208 17:58:36.392656 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.393133000+00:00 stderr F I1208 17:58:36.393112 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.393445838+00:00 stderr F I1208 17:58:36.393429 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.393742825+00:00 stderr F I1208 17:58:36.393726 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.393945941+00:00 stderr F I1208 17:58:36.393929 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.394259680+00:00 stderr F I1208 17:58:36.394242 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.394386903+00:00 stderr F I1208 17:58:36.394372 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:36.485062536+00:00 stderr F I1208 17:58:36.484837 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" 2025-12-08T17:58:41.392433585+00:00 stderr F I1208 17:58:41.392106 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.392522207+00:00 stderr F I1208 17:58:41.392498 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.392811575+00:00 stderr F I1208 17:58:41.392771 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.393001020+00:00 stderr F I1208 17:58:41.392968 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.393178364+00:00 stderr F I1208 17:58:41.393149 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.393292137+00:00 stderr F I1208 17:58:41.393250 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.393472582+00:00 stderr F I1208 17:58:41.393439 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.393561534+00:00 stderr F I1208 17:58:41.393540 1 template_helper.go:370] "msg"="parseIPList empty list found" "logger"="template" 2025-12-08T17:58:41.442656459+00:00 stderr F I1208 17:58:41.442541 1 router.go:665] "msg"="router reloaded" "logger"="template" "output"=" - Checking http://localhost:80 ...\n - Health check ok : 0 retry attempt(s).\n" ././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-oper0000755000175000017500000000000015115611514033065 5ustar zuulzuul././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-oper0000755000175000017500000000000015115611521033063 5ustar zuulzuul././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-oper0000644000175000017500000000202015115611514033061 0ustar zuulzuul2025-12-08T17:44:23.065369048+00:00 stderr F W1208 17:44:23.053395 1 deprecated.go:66] 2025-12-08T17:44:23.065369048+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:23.065369048+00:00 stderr F 2025-12-08T17:44:23.065369048+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:23.065369048+00:00 stderr F 2025-12-08T17:44:23.065369048+00:00 stderr F =============================================== 2025-12-08T17:44:23.065369048+00:00 stderr F 2025-12-08T17:44:23.065369048+00:00 stderr F I1208 17:44:23.061263 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:23.065369048+00:00 stderr F I1208 17:44:23.063394 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:23.066239632+00:00 stderr F I1208 17:44:23.066208 1 kube-rbac-proxy.go:397] Starting TCP socket on :9393 2025-12-08T17:44:23.066969882+00:00 stderr F I1208 17:44:23.066947 1 kube-rbac-proxy.go:404] Listening securely on :9393 ././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-oper0000755000175000017500000000000015115611521033063 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-oper0000644000175000017500000013752215115611514033101 0ustar zuulzuul2025-12-08T17:44:23.185199467+00:00 stderr F 2025-12-08T17:44:23.177Z INFO operator.main ingress-operator/start.go:76 using operator namespace {"namespace": "openshift-ingress-operator"} 2025-12-08T17:44:23.236987029+00:00 stderr F 2025-12-08T17:44:23.234Z INFO operator.init.KubeAPIWarningLogger rest/warnings.go:144 v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice 2025-12-08T17:44:23.249739958+00:00 stderr F 2025-12-08T17:44:23.249Z INFO operator.main ingress-operator/start.go:76 registering Prometheus metrics for canary_controller 2025-12-08T17:44:23.249789279+00:00 stderr F 2025-12-08T17:44:23.249Z INFO operator.main ingress-operator/start.go:76 registering Prometheus metrics for ingress_controller 2025-12-08T17:44:23.250026295+00:00 stderr F 2025-12-08T17:44:23.250Z INFO operator.main ingress-operator/start.go:76 registering Prometheus metrics for route_metrics_controller 2025-12-08T17:44:23.250755875+00:00 stderr F 2025-12-08T17:44:23.250Z INFO operator.init runtime/asm_amd64.s:1700 starting metrics listener {"addr": "127.0.0.1:60000"} 2025-12-08T17:44:23.250755875+00:00 stderr F 2025-12-08T17:44:23.250Z INFO operator.main ingress-operator/start.go:76 watching file {"filename": "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"} 2025-12-08T17:44:23.288000811+00:00 stderr F I1208 17:44:23.287900 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:23.290254002+00:00 stderr F 2025-12-08T17:44:23.289Z INFO operator.init ingress-operator/start.go:218 FeatureGates initialized {"knownFeatures": ["AWSClusterHostedDNS","AWSClusterHostedDNSInstall","AWSDedicatedHosts","AWSServiceLBNetworkSecurityGroup","AdditionalRoutingCapabilities","AdminNetworkPolicy","AlibabaPlatform","AutomatedEtcdBackup","AzureClusterHostedDNSInstall","AzureDedicatedHosts","AzureMultiDisk","AzureWorkloadIdentity","BootImageSkewEnforcement","BootcNodeManagement","BuildCSIVolumes","CPMSMachineNamePrefix","ClusterAPIInstall","ClusterAPIInstallIBMCloud","ClusterMonitoringConfig","ClusterVersionOperatorConfiguration","ConsolePluginContentSecurityPolicy","DNSNameResolver","DualReplica","DyanmicServiceEndpointIBMCloud","DynamicResourceAllocation","EtcdBackendQuota","EventedPLEG","Example","Example2","ExternalOIDC","ExternalOIDCWithUIDAndExtraClaimMappings","ExternalSnapshotMetadata","GCPClusterHostedDNS","GCPClusterHostedDNSInstall","GCPCustomAPIEndpoints","GCPCustomAPIEndpointsInstall","GatewayAPI","GatewayAPIController","HighlyAvailableArbiter","ImageModeStatusReporting","ImageStreamImportMode","ImageVolume","IngressControllerDynamicConfigurationManager","IngressControllerLBSubnetsAWS","InsightsConfig","InsightsConfigAPI","InsightsOnDemandDataGather","IrreconcilableMachineConfig","KMSEncryptionProvider","KMSv1","MachineAPIMigration","MachineAPIOperatorDisableMachineHealthCheckController","MachineConfigNodes","ManagedBootImages","ManagedBootImagesAWS","ManagedBootImagesAzure","ManagedBootImagesvSphere","MaxUnavailableStatefulSet","MetricsCollectionProfiles","MinimumKubeletVersion","MixedCPUsAllocation","MultiArchInstallAzure","MultiDiskSetup","MutatingAdmissionPolicy","NetworkDiagnosticsConfig","NetworkLiveMigration","NetworkSegmentation","NewOLM","NewOLMCatalogdAPIV1Metas","NewOLMOwnSingleNamespace","NewOLMPreflightPermissionChecks","NewOLMWebhookProviderOpenshiftServiceCA","NoRegistryClusterOperations","NodeSwap","NutanixMultiSubnets","OVNObservability","OpenShiftPodSecurityAdmission","PinnedImages","PreconfiguredUDNAddresses","ProcMountType","RouteAdvertisements","RouteExternalCertificate","SELinuxMount","ServiceAccountTokenNodeBinding","SetEIPForNLBIngressController","ShortCertRotation","SignatureStores","SigstoreImageVerification","SigstoreImageVerificationPKI","StoragePerformantSecurityPolicy","TranslateStreamCloseWebsocketRequests","UpgradeStatus","UserNamespacesPodSecurityStandards","UserNamespacesSupport","VSphereConfigurableMaxAllowedBlockVolumesPerNode","VSphereHostVMGroupZonal","VSphereMixedNodeEnv","VSphereMultiDisk","VSphereMultiNetworks","VolumeAttributesClass","VolumeGroupSnapshot"]} 2025-12-08T17:44:23.291356453+00:00 stderr F I1208 17:44:23.289702 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-ingress-operator", Name:"ingress-operator", UID:"", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:23.327819717+00:00 stderr F I1208 17:44:23.327534 1 base_controller.go:67] Waiting for caches to sync for spread-default-router-pods 2025-12-08T17:44:23.422756057+00:00 stderr F 2025-12-08T17:44:23.421Z INFO operator.init.controller-runtime.metrics manager/runnable_group.go:226 Starting metrics server 2025-12-08T17:44:23.422756057+00:00 stderr F 2025-12-08T17:44:23.422Z INFO operator.init.controller-runtime.metrics manager/runnable_group.go:226 Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-12-08T17:44:23.436053269+00:00 stderr F I1208 17:44:23.428438 1 base_controller.go:73] Caches are synced for spread-default-router-pods 2025-12-08T17:44:23.436053269+00:00 stderr F I1208 17:44:23.428456 1 base_controller.go:110] Starting #1 worker of spread-default-router-pods controller ... 2025-12-08T17:44:24.437925807+00:00 stderr F 2025-12-08T17:44:24.427Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.Infrastructure"} 2025-12-08T17:44:24.437925807+00:00 stderr F 2025-12-08T17:44:24.428Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "status_controller", "source": "kind source: *v1alpha1.Subscription"} 2025-12-08T17:44:24.437925807+00:00 stderr F 2025-12-08T17:44:24.428Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "status_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.437925807+00:00 stderr F 2025-12-08T17:44:24.428Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "status_controller", "source": "kind source: *v1.ClusterOperator"} 2025-12-08T17:44:24.437925807+00:00 stderr F 2025-12-08T17:44:24.428Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "status_controller", "source": "kind source: *v1alpha1.Subscription"} 2025-12-08T17:44:24.437994599+00:00 stderr F 2025-12-08T17:44:24.428Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "status_controller", "source": "kind source: *v1.CustomResourceDefinition"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.428Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "configurable_route_controller", "source": "kind source: *v1.RoleBinding"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.Pod"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "error_page_configmap_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.Deployment"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "configurable_route_controller", "source": "kind source: *v1.Ingress"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "configurable_route_controller", "source": "kind source: *v1.Role"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.Service"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "certificate_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.DNSRecord"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.DNS"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.Ingress"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "clientca_configmap_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:24.439179211+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "error_page_configmap_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.439207542+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "error_page_configmap_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:24.439285004+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingress_controller", "source": "kind source: *v1.Proxy"} 2025-12-08T17:44:24.439285004+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "clientca_configmap_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.439295414+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "clientca_configmap_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.Secret"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "certificate_publisher_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "crl", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "certificate_publisher_controller", "source": "informer source: 0xc00159e410"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.DNSRecord"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.DNS"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "canary_controller", "source": "kind source: *v1.Service"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "crl", "source": "informer source: 0xc00159e738"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "crl", "source": "informer source: 0xc00159e738"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingressclass_controller", "source": "kind source: *v1.IngressClass"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "monitoring_dashboard_controller", "source": "kind source: *v1.Infrastructure"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "canary_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "canary_controller", "source": "kind source: *v1.Route"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.Infrastructure"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "canary_controller", "source": "kind source: *v1.DaemonSet"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "monitoring_dashboard_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gatewayapi_controller", "source": "kind source: *v1.CustomResourceDefinition"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gatewayapi_controller", "source": "kind source: *v1.FeatureGate"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "ingressclass_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "route_metrics_controller", "source": "kind source: *v1.Route"} 2025-12-08T17:44:24.442567704+00:00 stderr F 2025-12-08T17:44:24.429Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "route_metrics_controller", "source": "kind source: *v1.IngressController"} 2025-12-08T17:44:24.451344594+00:00 stderr F 2025-12-08T17:44:24.450Z ERROR operator.init.controller-runtime.source.EventHandler wait/loop.go:53 failed to get informer from cache {"error": "failed to get restmapping: failed to get API group resources: unable to retrieve the complete list of server APIs: route.openshift.io/v1: the server is currently unable to handle the request"} 2025-12-08T17:44:24.457264475+00:00 stderr F 2025-12-08T17:44:24.456Z INFO operator.certificate_publisher_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:44:24.537921165+00:00 stderr F 2025-12-08T17:44:24.534Z ERROR operator.init.controller-runtime.source.EventHandler wait/loop.go:53 failed to get informer from cache {"error": "unable to retrieve the complete list of server APIs: route.openshift.io/v1: no matches for route.openshift.io/v1, Resource="} 2025-12-08T17:44:24.541111462+00:00 stderr F 2025-12-08T17:44:24.540Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:44:24.549310216+00:00 stderr F 2025-12-08T17:44:24.548Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:44:24.549310216+00:00 stderr F 2025-12-08T17:44:24.549Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:44:24.549310216+00:00 stderr F 2025-12-08T17:44:24.549Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:44:24.588615958+00:00 stderr F 2025-12-08T17:44:24.588Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "crl"} 2025-12-08T17:44:24.588615958+00:00 stderr F 2025-12-08T17:44:24.588Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "crl", "worker count": 1} 2025-12-08T17:44:24.769927303+00:00 stderr F 2025-12-08T17:44:24.769Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "certificate_publisher_controller"} 2025-12-08T17:44:24.769927303+00:00 stderr F 2025-12-08T17:44:24.769Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "certificate_publisher_controller", "worker count": 1} 2025-12-08T17:44:24.769927303+00:00 stderr F 2025-12-08T17:44:24.769Z INFO operator.certificate_publisher_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:24.773905982+00:00 stderr F 2025-12-08T17:44:24.770Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:44:24.789084766+00:00 stderr F 2025-12-08T17:44:24.788Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "clientca_configmap_controller"} 2025-12-08T17:44:24.789084766+00:00 stderr F 2025-12-08T17:44:24.789Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "clientca_configmap_controller", "worker count": 1} 2025-12-08T17:44:24.852377312+00:00 stderr F 2025-12-08T17:44:24.851Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "monitoring_dashboard_controller"} 2025-12-08T17:44:24.852377312+00:00 stderr F 2025-12-08T17:44:24.852Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "monitoring_dashboard_controller", "worker count": 1} 2025-12-08T17:44:24.855616921+00:00 stderr F 2025-12-08T17:44:24.853Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "ingressclass_controller"} 2025-12-08T17:44:24.855616921+00:00 stderr F 2025-12-08T17:44:24.853Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "ingressclass_controller", "worker count": 1} 2025-12-08T17:44:24.855616921+00:00 stderr F 2025-12-08T17:44:24.853Z INFO operator.ingressclass_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:24.870905278+00:00 stderr F 2025-12-08T17:44:24.870Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "certificate_controller"} 2025-12-08T17:44:24.870905278+00:00 stderr F 2025-12-08T17:44:24.870Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "certificate_controller", "worker count": 1} 2025-12-08T17:44:24.870905278+00:00 stderr F 2025-12-08T17:44:24.870Z INFO operator.certificate_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:24.890953665+00:00 stderr F 2025-12-08T17:44:24.890Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "dns_controller"} 2025-12-08T17:44:24.890953665+00:00 stderr F 2025-12-08T17:44:24.890Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "dns_controller", "worker count": 1} 2025-12-08T17:44:24.890953665+00:00 stderr F 2025-12-08T17:44:24.890Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "gatewayapi_controller"} 2025-12-08T17:44:24.890953665+00:00 stderr F 2025-12-08T17:44:24.890Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "gatewayapi_controller", "worker count": 1} 2025-12-08T17:44:24.890953665+00:00 stderr F 2025-12-08T17:44:24.890Z INFO operator.gatewayapi_controller controller/controller.go:119 reconciling {"request": {"name":"cluster"}} 2025-12-08T17:44:24.948350530+00:00 stderr F 2025-12-08T17:44:24.947Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "error_page_configmap_controller"} 2025-12-08T17:44:24.948350530+00:00 stderr F 2025-12-08T17:44:24.947Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "error_page_configmap_controller", "worker count": 1} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.054Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "status_controller"} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.054Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "status_controller", "worker count": 1} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.054Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.054Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.055Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "ingress_controller"} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.055Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "ingress_controller", "worker count": 1} 2025-12-08T17:44:25.057946759+00:00 stderr F 2025-12-08T17:44:25.055Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.084787052+00:00 stderr F 2025-12-08T17:44:25.083Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "configurable_route_controller"} 2025-12-08T17:44:25.084787052+00:00 stderr F 2025-12-08T17:44:25.083Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "configurable_route_controller", "worker count": 1} 2025-12-08T17:44:25.084787052+00:00 stderr F 2025-12-08T17:44:25.083Z INFO operator.configurable_route_controller controller/controller.go:119 reconciling {"request": {"name":"cluster"}} 2025-12-08T17:44:25.246961236+00:00 stderr F 2025-12-08T17:44:25.245Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "c1aa7bc2-4ddb-4a18-b13d-5221355e06e9", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.254045348+00:00 stderr F 2025-12-08T17:44:25.252Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.278974968+00:00 stderr F 2025-12-08T17:44:25.277Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "6663dbd6-e7cc-445c-84e9-cbf7af076329", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.289047894+00:00 stderr F 2025-12-08T17:44:25.288Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.307332872+00:00 stderr F 2025-12-08T17:44:25.306Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "026f045e-0d65-460b-a0a3-fa4c1a5a038d", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.329377583+00:00 stderr F 2025-12-08T17:44:25.327Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.385794202+00:00 stderr F 2025-12-08T17:44:25.383Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "e27a98ca-67d7-4eb7-b42c-79d11832544e", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.423753968+00:00 stderr F 2025-12-08T17:44:25.423Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.440891995+00:00 stderr F 2025-12-08T17:44:25.440Z INFO operator.ingressclass_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.444805572+00:00 stderr F 2025-12-08T17:44:25.444Z ERROR operator.ingress_controller controller/controller.go:119 got retryable error; requeueing {"after": "1m0s", "error": "IngressController is degraded: DeploymentReplicasAllAvailable=False (DeploymentReplicasNotAvailable: 0/1 of replicas are available)"} 2025-12-08T17:44:25.444868863+00:00 stderr F 2025-12-08T17:44:25.444Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.445389368+00:00 stderr F 2025-12-08T17:44:25.445Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "9f919e65-5c65-49cd-9c7f-a6d4ac5a3778", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.445416989+00:00 stderr F 2025-12-08T17:44:25.445Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.487896218+00:00 stderr F 2025-12-08T17:44:25.487Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "4f8acb3c-5f0f-4d37-b2ae-e736c83b7db0", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.529102231+00:00 stderr F 2025-12-08T17:44:25.525Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.551050720+00:00 stderr F 2025-12-08T17:44:25.549Z ERROR operator.init controller/controller.go:294 Reconciler error {"controller": "status_controller", "object": {"name":"default","namespace":"openshift-ingress-operator"}, "namespace": "openshift-ingress-operator", "name": "default", "reconcileID": "04b75202-7ecf-4f21-a175-f590f4db87f1", "error": "failed to get operator state: failed to list gateway classes: Index with name field:gatewayclassController does not exist"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.596Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gateway_labeler_controller", "source": "kind source: *v1.Gateway"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.596Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gatewayclass_controller", "source": "kind source: *v1.CustomResourceDefinition"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "service_dns_controller", "source": "kind source: *v1.DNSRecord"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gateway_labeler_controller", "source": "kind source: *v1.GatewayClass"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gatewayclass_controller", "source": "kind source: *v1.GatewayClass"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gatewayclass_controller", "source": "kind source: *v1alpha1.Subscription"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "gatewayclass_controller", "source": "kind source: *v1alpha1.InstallPlan"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "service_dns_controller", "source": "kind source: *v1.Gateway"} 2025-12-08T17:44:25.601453905+00:00 stderr F 2025-12-08T17:44:25.597Z INFO operator.init runtime/asm_amd64.s:1700 Starting EventSource {"controller": "service_dns_controller", "source": "kind source: *v1.Service"} 2025-12-08T17:44:25.662403027+00:00 stderr F 2025-12-08T17:44:25.661Z INFO operator.certificate_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.669284746+00:00 stderr F 2025-12-08T17:44:25.668Z ERROR operator.ingress_controller controller/controller.go:119 got retryable error; requeueing {"after": "1m0s", "error": "IngressController is degraded: DeploymentReplicasAllAvailable=False (DeploymentReplicasNotAvailable: 0/1 of replicas are available)"} 2025-12-08T17:44:25.719339200+00:00 stderr F 2025-12-08T17:44:25.717Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "gateway_labeler_controller"} 2025-12-08T17:44:25.719339200+00:00 stderr F 2025-12-08T17:44:25.717Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "gateway_labeler_controller", "worker count": 1} 2025-12-08T17:44:25.871682586+00:00 stderr F 2025-12-08T17:44:25.870Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:25.905383745+00:00 stderr F 2025-12-08T17:44:25.905Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:26.210971361+00:00 stderr F 2025-12-08T17:44:26.209Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "service_dns_controller"} 2025-12-08T17:44:26.210971361+00:00 stderr F 2025-12-08T17:44:26.210Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "service_dns_controller", "worker count": 1} 2025-12-08T17:44:26.210971361+00:00 stderr F 2025-12-08T17:44:26.210Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "gatewayclass_controller"} 2025-12-08T17:44:26.210971361+00:00 stderr F 2025-12-08T17:44:26.210Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "gatewayclass_controller", "worker count": 1} 2025-12-08T17:44:34.548581325+00:00 stderr F 2025-12-08T17:44:34.547Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:44:34.548581325+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:44:34.548581325+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:44:34.548624056+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:44:34.548624056+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:44:34.548624056+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "route_metrics_controller"} 2025-12-08T17:44:34.548624056+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "route_metrics_controller", "worker count": 1} 2025-12-08T17:44:34.548717378+00:00 stderr F 2025-12-08T17:44:34.548Z INFO operator.route_metrics_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:35.053580919+00:00 stderr F 2025-12-08T17:44:35.053Z INFO operator.init controller/controller.go:262 Starting Controller {"controller": "canary_controller"} 2025-12-08T17:44:35.053580919+00:00 stderr F 2025-12-08T17:44:35.053Z INFO operator.init controller/controller.go:262 Starting workers {"controller": "canary_controller", "worker count": 1} 2025-12-08T17:44:35.088318308+00:00 stderr F 2025-12-08T17:44:35.088Z ERROR operator.canary_controller wait/backoff.go:233 error performing canary route check {"error": "error sending canary HTTP request: DNS error: Get \"https://canary-openshift-ingress-canary.apps-crc.testing\": dial tcp: lookup canary-openshift-ingress-canary.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.39:34537->10.217.4.10:53: read: connection refused"} 2025-12-08T17:44:38.407980901+00:00 stderr F 2025-12-08T17:44:38.407Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:44:38.407980901+00:00 stderr F 2025-12-08T17:44:38.407Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:44:38.408025522+00:00 stderr F 2025-12-08T17:44:38.407Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:44:38.474773420+00:00 stderr F 2025-12-08T17:44:38.474Z ERROR operator.ingress_controller controller/controller.go:119 got retryable error; requeueing {"after": "1m0s", "error": "IngressController is degraded: DeploymentReplicasAllAvailable=False (DeploymentReplicasNotAvailable: 0/1 of replicas are available)"} 2025-12-08T17:45:08.424915061+00:00 stderr F 2025-12-08T17:45:08.423Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:45:08.424915061+00:00 stderr F 2025-12-08T17:45:08.423Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:45:08.424915061+00:00 stderr F 2025-12-08T17:45:08.423Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:08.527131425+00:00 stderr F 2025-12-08T17:45:08.526Z INFO operator.route_metrics_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:08.527541406+00:00 stderr F 2025-12-08T17:45:08.527Z INFO operator.ingressclass_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:08.527740273+00:00 stderr F 2025-12-08T17:45:08.527Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:08.528140014+00:00 stderr F 2025-12-08T17:45:08.528Z INFO operator.certificate_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:08.528685979+00:00 stderr F 2025-12-08T17:45:08.528Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:08.552000107+00:00 stderr F 2025-12-08T17:45:08.551Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:45:25.449225342+00:00 stderr F 2025-12-08T17:45:25.448Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:46:24.514116802+00:00 stderr F 2025-12-08T17:46:24.513Z ERROR operator.init wait/backoff.go:233 failed to fetch ingress config {"error": "Get \"https://10.217.4.1:443/apis/config.openshift.io/v1/ingresses/cluster\": dial tcp 10.217.4.1:443: connect: connection refused"} 2025-12-08T17:47:10.420262436+00:00 stderr F 2025-12-08T17:47:10.419Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:10.420262436+00:00 stderr F 2025-12-08T17:47:10.419Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:10.420262436+00:00 stderr F 2025-12-08T17:47:10.420Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:11.090462894+00:00 stderr F 2025-12-08T17:47:11.090Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:11.090532096+00:00 stderr F 2025-12-08T17:47:11.090Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:11.090554087+00:00 stderr F 2025-12-08T17:47:11.090Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:14.873106388+00:00 stderr F 2025-12-08T17:47:14.872Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:47:14.873106388+00:00 stderr F 2025-12-08T17:47:14.873Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:47:14.873164879+00:00 stderr F 2025-12-08T17:47:14.873Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:16.516743198+00:00 stderr F 2025-12-08T17:47:16.516Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:16.516782239+00:00 stderr F 2025-12-08T17:47:16.516Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:16.516815630+00:00 stderr F 2025-12-08T17:47:16.516Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:17.947869279+00:00 stderr F 2025-12-08T17:47:17.947Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:26.721439693+00:00 stderr F 2025-12-08T17:47:26.720Z INFO operator.gatewayapi_controller controller/controller.go:119 reconciling {"request": {"name":"cluster"}} 2025-12-08T17:47:26.992776025+00:00 stderr F 2025-12-08T17:47:26.992Z INFO operator.gatewayapi_controller controller/controller.go:119 reconciling {"request": {"name":"cluster"}} 2025-12-08T17:47:32.800661542+00:00 stderr F 2025-12-08T17:47:32.799Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:47:32.800661542+00:00 stderr F 2025-12-08T17:47:32.800Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingress {"name": "default", "related": ""} 2025-12-08T17:47:32.800741044+00:00 stderr F 2025-12-08T17:47:32.800Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:44.438532989+00:00 stderr F 2025-12-08T17:47:44.437Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:44.438532989+00:00 stderr F 2025-12-08T17:47:44.438Z INFO operator.ingress_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default", "related": ""} 2025-12-08T17:47:44.438612792+00:00 stderr F 2025-12-08T17:47:44.437Z INFO operator.configurable_route_controller controller/controller.go:119 reconciling {"request": {"name":"cluster"}} 2025-12-08T17:47:44.438612792+00:00 stderr F 2025-12-08T17:47:44.438Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:45.864671172+00:00 stderr F 2025-12-08T17:47:45.864Z INFO operator.certificate_publisher_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:47:45.864671172+00:00 stderr F 2025-12-08T17:47:45.864Z INFO operator.certificate_publisher_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:47:45.864671172+00:00 stderr F 2025-12-08T17:47:45.864Z INFO operator.certificate_publisher_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:47.220957887+00:00 stderr F 2025-12-08T17:47:47.220Z INFO operator.ingressclass_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:53.448504411+00:00 stderr F 2025-12-08T17:47:53.447Z INFO operator.ingressclass_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:53.448679866+00:00 stderr F 2025-12-08T17:47:53.448Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:53.448899143+00:00 stderr F 2025-12-08T17:47:53.448Z INFO operator.certificate_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:53.449072538+00:00 stderr F 2025-12-08T17:47:53.448Z INFO operator.ingress_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:53.449206112+00:00 stderr F 2025-12-08T17:47:53.448Z INFO operator.route_metrics_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:47:57.573572896+00:00 stderr F 2025-12-08T17:47:57.573Z INFO operator.gatewayapi_controller controller/controller.go:119 reconciling {"request": {"name":"cluster"}} 2025-12-08T17:54:52.086601669+00:00 stderr F 2025-12-08T17:54:52.085Z INFO operator.status_controller controller/controller.go:119 Reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:57:28.612165174+00:00 stderr F 2025-12-08T17:57:28.611Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:57:28.612261626+00:00 stderr F 2025-12-08T17:57:28.612Z INFO operator.route_metrics_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:57:40.477854363+00:00 stderr F 2025-12-08T17:57:40.475Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:57:40.477854363+00:00 stderr F 2025-12-08T17:57:40.476Z INFO operator.route_metrics_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} 2025-12-08T17:57:56.703461155+00:00 stderr F 2025-12-08T17:57:56.702Z INFO operator.route_metrics_controller handler/enqueue_mapped.go:140 queueing ingresscontroller {"name": "default"} 2025-12-08T17:57:56.703505656+00:00 stderr F 2025-12-08T17:57:56.703Z INFO operator.route_metrics_controller controller/controller.go:119 reconciling {"request": {"name":"default","namespace":"openshift-ingress-operator"}} ././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infr0000755000175000017500000000000015115611514033141 5ustar zuulzuul././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infr0000755000175000017500000000000015115611521033137 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infr0000644000175000017500000000120715115611514033143 0ustar zuulzuul2025-12-08T17:56:14.396124737+00:00 stdout F time="2025-12-08T17:56:14Z" level=info msg="Keeping server open for infinite seconds" database=/database/index.db port=50051 2025-12-08T17:56:14.396124737+00:00 stdout F time="2025-12-08T17:56:14Z" level=info msg="serving registry" database=/database/index.db port=50051 2025-12-08T17:56:14.396294252+00:00 stderr F time="2025-12-08T17:56:14Z" level=warning msg="\x1b[1;33mDEPRECATION NOTICE:\nSqlite-based catalogs and their related subcommands are deprecated. Support for\nthem will be removed in a future release. Please migrate your catalog workflows\nto the new file-based catalog format.\x1b[0m" ././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-oper0000755000175000017500000000000015115611514033055 5ustar zuulzuul././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-oper0000755000175000017500000000000015115611521033053 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-oper0000644000175000017500000027537315115611514033100 0ustar zuulzuul2025-12-08T17:44:23.881722535+00:00 stderr F I1208 17:44:23.874640 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:23.881722535+00:00 stderr F I1208 17:44:23.877868 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:23.883957516+00:00 stderr F I1208 17:44:23.883916 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:24.014087837+00:00 stderr F I1208 17:44:24.010367 1 builder.go:304] console-operator version - 2025-12-08T17:44:25.297989197+00:00 stderr F I1208 17:44:25.297456 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:25.297989197+00:00 stderr F W1208 17:44:25.297973 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:25.297989197+00:00 stderr F W1208 17:44:25.297978 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:25.297989197+00:00 stderr F W1208 17:44:25.297982 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:25.297989197+00:00 stderr F W1208 17:44:25.297985 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:25.298018598+00:00 stderr F W1208 17:44:25.297988 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:25.298018598+00:00 stderr F W1208 17:44:25.297990 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:25.302109889+00:00 stderr F I1208 17:44:25.302043 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:25.302715987+00:00 stderr F I1208 17:44:25.302684 1 leaderelection.go:257] attempting to acquire leader lease openshift-console-operator/console-operator-lock... 2025-12-08T17:44:25.302782288+00:00 stderr F I1208 17:44:25.302758 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:25.303064996+00:00 stderr F I1208 17:44:25.303026 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:25.303135608+00:00 stderr F I1208 17:44:25.303113 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:25.303135608+00:00 stderr F I1208 17:44:25.303119 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:25.303187199+00:00 stderr F I1208 17:44:25.303170 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:25.303187199+00:00 stderr F I1208 17:44:25.303182 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:25.303298162+00:00 stderr F I1208 17:44:25.303282 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:25.303358364+00:00 stderr F I1208 17:44:25.303345 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:25.303492347+00:00 stderr F I1208 17:44:25.303477 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:25.330750501+00:00 stderr F I1208 17:44:25.330642 1 leaderelection.go:271] successfully acquired lease openshift-console-operator/console-operator-lock 2025-12-08T17:44:25.334913654+00:00 stderr F I1208 17:44:25.333980 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-console-operator", Name:"console-operator-lock", UID:"3a868ea0-4081-4a86-9897-df432a3a5d1a", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37579", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' console-operator-67c89758df-79mps_0236bd99-8e70-4539-b34e-6298af745dd0 became leader 2025-12-08T17:44:25.349648456+00:00 stderr F I1208 17:44:25.349610 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:25.370212868+00:00 stderr F I1208 17:44:25.369692 1 starter.go:212] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:25.370350871+00:00 stderr F I1208 17:44:25.370305 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:25.405313684+00:00 stderr F I1208 17:44:25.405256 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:25.405440758+00:00 stderr F I1208 17:44:25.405422 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:25.406116267+00:00 stderr F I1208 17:44:25.406074 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:25.478088600+00:00 stderr F I1208 17:44:25.476387 1 base_controller.go:76] Waiting for caches to sync for RemoveStaleConditionsController-RemoveStaleConditions 2025-12-08T17:44:25.478088600+00:00 stderr F I1208 17:44:25.477848 1 base_controller.go:76] Waiting for caches to sync for InformerWithSwitchController 2025-12-08T17:44:25.478088600+00:00 stderr F I1208 17:44:25.477855 1 base_controller.go:82] Caches are synced for InformerWithSwitchController 2025-12-08T17:44:25.478088600+00:00 stderr F I1208 17:44:25.477860 1 base_controller.go:119] Starting #1 worker of InformerWithSwitchController controller ... 2025-12-08T17:44:25.478088600+00:00 stderr F I1208 17:44:25.477966 1 base_controller.go:76] Waiting for caches to sync for PodDisruptionBudgetController 2025-12-08T17:44:25.478088600+00:00 stderr F I1208 17:44:25.478028 1 base_controller.go:76] Waiting for caches to sync for console 2025-12-08T17:44:25.478120640+00:00 stderr F I1208 17:44:25.478100 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:25.478133341+00:00 stderr F I1208 17:44:25.478119 1 base_controller.go:76] Waiting for caches to sync for console-ManagementState 2025-12-08T17:44:25.478196353+00:00 stderr F I1208 17:44:25.478143 1 base_controller.go:76] Waiting for caches to sync for UnsupportedConfigOverridesController-UnsupportedConfigOverrides 2025-12-08T17:44:25.504467480+00:00 stderr F I1208 17:44:25.504424 1 base_controller.go:76] Waiting for caches to sync for ConsoleServiceController 2025-12-08T17:44:25.504536332+00:00 stderr F I1208 17:44:25.504526 1 base_controller.go:76] Waiting for caches to sync for ConsoleRouteController 2025-12-08T17:44:25.504561342+00:00 stderr F I1208 17:44:25.504551 1 base_controller.go:76] Waiting for caches to sync for ConsoleServiceController 2025-12-08T17:44:25.504591133+00:00 stderr F I1208 17:44:25.504580 1 base_controller.go:76] Waiting for caches to sync for DownloadsRouteController 2025-12-08T17:44:25.504619904+00:00 stderr F I1208 17:44:25.504610 1 base_controller.go:76] Waiting for caches to sync for ConsoleOperator 2025-12-08T17:44:25.504646955+00:00 stderr F I1208 17:44:25.504638 1 base_controller.go:76] Waiting for caches to sync for ConsoleCLIDownloadsController 2025-12-08T17:44:25.504669775+00:00 stderr F I1208 17:44:25.504661 1 base_controller.go:76] Waiting for caches to sync for ConsoleDownloadsDeploymentSyncController 2025-12-08T17:44:25.504692286+00:00 stderr F I1208 17:44:25.504684 1 base_controller.go:76] Waiting for caches to sync for HealthCheckController 2025-12-08T17:44:25.504834740+00:00 stderr F I1208 17:44:25.504824 1 base_controller.go:76] Waiting for caches to sync for PodDisruptionBudgetController 2025-12-08T17:44:25.504863700+00:00 stderr F I1208 17:44:25.504855 1 base_controller.go:76] Waiting for caches to sync for OAuthClientsController 2025-12-08T17:44:25.504907082+00:00 stderr F I1208 17:44:25.504897 1 base_controller.go:76] Waiting for caches to sync for OAuthClientSecretController 2025-12-08T17:44:25.504934662+00:00 stderr F I1208 17:44:25.504926 1 base_controller.go:76] Waiting for caches to sync for OIDCSetupController 2025-12-08T17:44:25.504957143+00:00 stderr F I1208 17:44:25.504949 1 base_controller.go:76] Waiting for caches to sync for CLIOIDCClientStatusController 2025-12-08T17:44:25.504979173+00:00 stderr F I1208 17:44:25.504971 1 base_controller.go:76] Waiting for caches to sync for ClusterUpgradeNotificationController 2025-12-08T17:44:25.505000314+00:00 stderr F I1208 17:44:25.504992 1 base_controller.go:82] Caches are synced for ClusterUpgradeNotificationController 2025-12-08T17:44:25.505023595+00:00 stderr F I1208 17:44:25.505014 1 base_controller.go:119] Starting #1 worker of ClusterUpgradeNotificationController controller ... 2025-12-08T17:44:25.509518317+00:00 stderr F E1208 17:44:25.509474 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: console.operator.openshift.io \"cluster\" not found" 2025-12-08T17:44:25.525413320+00:00 stderr F I1208 17:44:25.525374 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_console 2025-12-08T17:44:25.552051508+00:00 stderr F E1208 17:44:25.551996 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: console.operator.openshift.io \"cluster\" not found" 2025-12-08T17:44:25.564953229+00:00 stderr F E1208 17:44:25.563408 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:25.564953229+00:00 stderr F E1208 17:44:25.563451 1 reflector.go:200] "Failed to watch" err="failed to list *v1.OAuthClient: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io)" reflector="github.com/openshift/console-operator/pkg/console/controllers/util/informers.go:106" type="*v1.OAuthClient" 2025-12-08T17:44:25.564953229+00:00 stderr F E1208 17:44:25.563987 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: console.operator.openshift.io \"cluster\" not found" 2025-12-08T17:44:25.584747409+00:00 stderr F I1208 17:44:25.584374 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:25.584747409+00:00 stderr F I1208 17:44:25.584397 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:25.584747409+00:00 stderr F I1208 17:44:25.584429 1 base_controller.go:82] Caches are synced for RemoveStaleConditionsController-RemoveStaleConditions 2025-12-08T17:44:25.584747409+00:00 stderr F I1208 17:44:25.584454 1 base_controller.go:119] Starting #1 worker of RemoveStaleConditionsController-RemoveStaleConditions controller ... 2025-12-08T17:44:25.584747409+00:00 stderr F I1208 17:44:25.584725 1 base_controller.go:82] Caches are synced for console-ManagementState 2025-12-08T17:44:25.584747409+00:00 stderr F I1208 17:44:25.584731 1 base_controller.go:119] Starting #1 worker of console-ManagementState controller ... 2025-12-08T17:44:25.588284466+00:00 stderr F I1208 17:44:25.585910 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.588284466+00:00 stderr F I1208 17:44:25.585993 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.588284466+00:00 stderr F I1208 17:44:25.587588 1 reflector.go:430] "Caches populated" type="*v1.Console" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:44:25.588284466+00:00 stderr F I1208 17:44:25.587919 1 reflector.go:430] "Caches populated" type="*v1.Console" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:25.588284466+00:00 stderr F I1208 17:44:25.588112 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.590305260+00:00 stderr F I1208 17:44:25.589062 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:25.594562897+00:00 stderr F I1208 17:44:25.592604 1 base_controller.go:82] Caches are synced for UnsupportedConfigOverridesController-UnsupportedConfigOverrides 2025-12-08T17:44:25.594562897+00:00 stderr F I1208 17:44:25.592620 1 base_controller.go:119] Starting #1 worker of UnsupportedConfigOverridesController-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:25.622170840+00:00 stderr F I1208 17:44:25.622126 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.622532980+00:00 stderr F I1208 17:44:25.622513 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.623443354+00:00 stderr F I1208 17:44:25.623414 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.651547941+00:00 stderr F I1208 17:44:25.637119 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.651632133+00:00 stderr F I1208 17:44:25.651608 1 base_controller.go:82] Caches are synced for PodDisruptionBudgetController 2025-12-08T17:44:25.651661904+00:00 stderr F I1208 17:44:25.651651 1 base_controller.go:119] Starting #1 worker of PodDisruptionBudgetController controller ... 2025-12-08T17:44:25.653189076+00:00 stderr F I1208 17:44:25.653172 1 base_controller.go:82] Caches are synced for CLIOIDCClientStatusController 2025-12-08T17:44:25.653223907+00:00 stderr F I1208 17:44:25.653214 1 base_controller.go:119] Starting #1 worker of CLIOIDCClientStatusController controller ... 2025-12-08T17:44:25.653283099+00:00 stderr F I1208 17:44:25.653270 1 base_controller.go:82] Caches are synced for ConsoleServiceController 2025-12-08T17:44:25.653310960+00:00 stderr F I1208 17:44:25.653301 1 base_controller.go:119] Starting #1 worker of ConsoleServiceController controller ... 2025-12-08T17:44:25.653693290+00:00 stderr F I1208 17:44:25.653649 1 base_controller.go:82] Caches are synced for ConsoleServiceController 2025-12-08T17:44:25.653704690+00:00 stderr F I1208 17:44:25.653693 1 base_controller.go:119] Starting #1 worker of ConsoleServiceController controller ... 2025-12-08T17:44:25.653784852+00:00 stderr F I1208 17:44:25.653760 1 base_controller.go:82] Caches are synced for ConsoleDownloadsDeploymentSyncController 2025-12-08T17:44:25.653784852+00:00 stderr F I1208 17:44:25.653775 1 base_controller.go:119] Starting #1 worker of ConsoleDownloadsDeploymentSyncController controller ... 2025-12-08T17:44:25.659072657+00:00 stderr F I1208 17:44:25.658620 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:25.660015632+00:00 stderr F I1208 17:44:25.659997 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.669033908+00:00 stderr F I1208 17:44:25.666913 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.669033908+00:00 stderr F I1208 17:44:25.667600 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.669033908+00:00 stderr F I1208 17:44:25.668408 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.678358033+00:00 stderr F I1208 17:44:25.678299 1 base_controller.go:82] Caches are synced for PodDisruptionBudgetController 2025-12-08T17:44:25.678419094+00:00 stderr F I1208 17:44:25.678409 1 base_controller.go:119] Starting #1 worker of PodDisruptionBudgetController controller ... 2025-12-08T17:44:25.725035385+00:00 stderr F I1208 17:44:25.722731 1 base_controller.go:82] Caches are synced for OIDCSetupController 2025-12-08T17:44:25.725035385+00:00 stderr F I1208 17:44:25.722755 1 base_controller.go:119] Starting #1 worker of OIDCSetupController controller ... 2025-12-08T17:44:25.725772436+00:00 stderr F I1208 17:44:25.725739 1 base_controller.go:82] Caches are synced for StatusSyncer_console 2025-12-08T17:44:25.725792707+00:00 stderr F I1208 17:44:25.725768 1 base_controller.go:119] Starting #1 worker of StatusSyncer_console controller ... 2025-12-08T17:44:25.732946801+00:00 stderr F I1208 17:44:25.731433 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:25.732946801+00:00 stderr F I1208 17:44:25.731709 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"ConsoleCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io console-custom)\nDownloadsCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)\nOAuthClientSyncDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nOAuthClientsControllerDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nRouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"ConsoleCustomRouteSync_FailedDeleteCustomRoutes::DownloadsCustomRouteSync_FailedDeleteCustomRoutes::OAuthClientSync_FailedRegister::OAuthClientsController_SyncError::RouteHealth_FailedGet","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"SyncLoopRefreshProgressing: working toward version 4.20.1, 0 replicas available","reason":"SyncLoopRefresh_InProgress","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"DeploymentAvailable: 0 replicas available for console deployment\nRouteHealthAvailable: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"Deployment_InsufficientReplicas::RouteHealth_FailedGet","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-03T09:40:49Z","message":"ConsoleCustomRouteSyncUpgradeable: the server is currently unable to handle the request (delete routes.route.openshift.io console-custom)\nDownloadsCustomRouteSyncUpgradeable: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)","reason":"ConsoleCustomRouteSync_FailedDeleteCustomRoutes::DownloadsCustomRouteSync_FailedDeleteCustomRoutes","status":"False","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:25.764820741+00:00 stderr F I1208 17:44:25.764101 1 base_controller.go:82] Caches are synced for OAuthClientSecretController 2025-12-08T17:44:25.764820741+00:00 stderr F I1208 17:44:25.764426 1 base_controller.go:119] Starting #1 worker of OAuthClientSecretController controller ... 2025-12-08T17:44:25.766592949+00:00 stderr F I1208 17:44:25.765240 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded changed from False to True ("ConsoleCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io console-custom)\nDownloadsCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)\nOAuthClientSyncDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nOAuthClientsControllerDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nRouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused") 2025-12-08T17:44:25.780483328+00:00 stderr F I1208 17:44:25.780023 1 base_controller.go:82] Caches are synced for console 2025-12-08T17:44:25.780483328+00:00 stderr F I1208 17:44:25.780113 1 base_controller.go:119] Starting #1 worker of console controller ... 2025-12-08T17:44:26.751049873+00:00 stderr F E1208 17:44:26.750357 1 reflector.go:200] "Failed to watch" err="failed to list *v1.OAuthClient: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io)" reflector="github.com/openshift/console-operator/pkg/console/controllers/util/informers.go:106" type="*v1.OAuthClient" 2025-12-08T17:44:27.113366245+00:00 stderr F E1208 17:44:27.111147 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:28.562761319+00:00 stderr F E1208 17:44:28.562196 1 reflector.go:200] "Failed to watch" err="failed to list *v1.OAuthClient: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io)" reflector="github.com/openshift/console-operator/pkg/console/controllers/util/informers.go:106" type="*v1.OAuthClient" 2025-12-08T17:44:29.031415684+00:00 stderr F E1208 17:44:29.031053 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" type="*v1.Route" 2025-12-08T17:44:30.609736485+00:00 stderr F I1208 17:44:30.609573 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.609475688 +0000 UTC))" 2025-12-08T17:44:30.609736485+00:00 stderr F I1208 17:44:30.609718 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.609703114 +0000 UTC))" 2025-12-08T17:44:30.609766326+00:00 stderr F I1208 17:44:30.609734 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.609725435 +0000 UTC))" 2025-12-08T17:44:30.609766326+00:00 stderr F I1208 17:44:30.609749 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.609740975 +0000 UTC))" 2025-12-08T17:44:30.609804897+00:00 stderr F I1208 17:44:30.609776 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.609763546 +0000 UTC))" 2025-12-08T17:44:30.609812837+00:00 stderr F I1208 17:44:30.609803 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.609791167 +0000 UTC))" 2025-12-08T17:44:30.609841088+00:00 stderr F I1208 17:44:30.609821 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.609809867 +0000 UTC))" 2025-12-08T17:44:30.609850588+00:00 stderr F I1208 17:44:30.609842 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.609830088 +0000 UTC))" 2025-12-08T17:44:30.609872819+00:00 stderr F I1208 17:44:30.609859 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.609848378 +0000 UTC))" 2025-12-08T17:44:30.609945101+00:00 stderr F I1208 17:44:30.609911 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.609867709 +0000 UTC))" 2025-12-08T17:44:30.610236699+00:00 stderr F I1208 17:44:30.610194 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-console-operator.svc\" [serving] validServingFor=[metrics.openshift-console-operator.svc,metrics.openshift-console-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:30.610172647 +0000 UTC))" 2025-12-08T17:44:30.610452834+00:00 stderr F I1208 17:44:30.610413 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215865\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:24 +0000 UTC to 2028-12-08 16:44:24 +0000 UTC (now=2025-12-08 17:44:30.610392983 +0000 UTC))" 2025-12-08T17:44:32.187111611+00:00 stderr F I1208 17:44:32.186796 1 reflector.go:430] "Caches populated" type="*v1.OAuthClient" reflector="github.com/openshift/console-operator/pkg/console/controllers/util/informers.go:106" 2025-12-08T17:44:34.582897791+00:00 stderr F I1208 17:44:34.582232 1 reflector.go:430] "Caches populated" type="*v1.Route" reflector="github.com/openshift/client-go/route/informers/externalversions/factory.go:125" 2025-12-08T17:44:34.610523164+00:00 stderr F I1208 17:44:34.610456 1 base_controller.go:82] Caches are synced for ConsoleRouteController 2025-12-08T17:44:34.610523164+00:00 stderr F I1208 17:44:34.610483 1 base_controller.go:119] Starting #1 worker of ConsoleRouteController controller ... 2025-12-08T17:44:34.622052679+00:00 stderr F I1208 17:44:34.621993 1 base_controller.go:82] Caches are synced for ConsoleOperator 2025-12-08T17:44:34.622052679+00:00 stderr F I1208 17:44:34.622009 1 base_controller.go:82] Caches are synced for DownloadsRouteController 2025-12-08T17:44:34.622052679+00:00 stderr F I1208 17:44:34.622017 1 base_controller.go:119] Starting #1 worker of ConsoleOperator controller ... 2025-12-08T17:44:34.622052679+00:00 stderr F I1208 17:44:34.622021 1 base_controller.go:119] Starting #1 worker of DownloadsRouteController controller ... 2025-12-08T17:44:34.636043980+00:00 stderr F E1208 17:44:34.636000 1 status.go:130] SyncLoopRefreshProgressing InProgress working toward version 4.20.1, 0 replicas available 2025-12-08T17:44:34.636094392+00:00 stderr F E1208 17:44:34.636085 1 status.go:130] DeploymentAvailable InsufficientReplicas 0 replicas available for console deployment 2025-12-08T17:44:34.647952166+00:00 stderr F I1208 17:44:34.647785 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"DownloadsCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)\nOAuthClientSyncDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nOAuthClientsControllerDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nRouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"DownloadsCustomRouteSync_FailedDeleteCustomRoutes::OAuthClientSync_FailedRegister::OAuthClientsController_SyncError::RouteHealth_FailedGet","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"SyncLoopRefreshProgressing: working toward version 4.20.1, 0 replicas available","reason":"SyncLoopRefresh_InProgress","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"DeploymentAvailable: 0 replicas available for console deployment\nRouteHealthAvailable: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"Deployment_InsufficientReplicas::RouteHealth_FailedGet","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-03T09:40:49Z","message":"DownloadsCustomRouteSyncUpgradeable: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)","reason":"DownloadsCustomRouteSync_FailedDeleteCustomRoutes","status":"False","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:34.650135475+00:00 stderr F I1208 17:44:34.650064 1 base_controller.go:82] Caches are synced for ConsoleCLIDownloadsController 2025-12-08T17:44:34.650204026+00:00 stderr F I1208 17:44:34.650190 1 base_controller.go:119] Starting #1 worker of ConsoleCLIDownloadsController controller ... 2025-12-08T17:44:34.650250878+00:00 stderr F I1208 17:44:34.650238 1 base_controller.go:82] Caches are synced for OAuthClientsController 2025-12-08T17:44:34.650282099+00:00 stderr F I1208 17:44:34.650270 1 base_controller.go:119] Starting #1 worker of OAuthClientsController controller ... 2025-12-08T17:44:34.650951898+00:00 stderr F I1208 17:44:34.650832 1 base_controller.go:82] Caches are synced for HealthCheckController 2025-12-08T17:44:34.650988009+00:00 stderr F I1208 17:44:34.650977 1 base_controller.go:119] Starting #1 worker of HealthCheckController controller ... 2025-12-08T17:44:34.651622586+00:00 stderr F I1208 17:44:34.651582 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'FastControllerResync' Controller "HealthCheckController" resync interval is set to 30s which might lead to client request throttling 2025-12-08T17:44:34.660996841+00:00 stderr F E1208 17:44:34.660540 1 status.go:130] SyncLoopRefreshProgressing InProgress working toward version 4.20.1, 0 replicas available 2025-12-08T17:44:34.660996841+00:00 stderr F E1208 17:44:34.660570 1 status.go:130] DeploymentAvailable InsufficientReplicas 0 replicas available for console deployment 2025-12-08T17:44:34.663909580+00:00 stderr F I1208 17:44:34.663811 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded message changed from "ConsoleCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io console-custom)\nDownloadsCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)\nOAuthClientSyncDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nOAuthClientsControllerDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nRouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused" to "DownloadsCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)\nOAuthClientSyncDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nOAuthClientsControllerDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nRouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused",Upgradeable message changed from "ConsoleCustomRouteSyncUpgradeable: the server is currently unable to handle the request (delete routes.route.openshift.io console-custom)\nDownloadsCustomRouteSyncUpgradeable: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)" to "DownloadsCustomRouteSyncUpgradeable: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)" 2025-12-08T17:44:34.750269316+00:00 stderr F I1208 17:44:34.750193 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"RouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"RouteHealth_FailedGet","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"SyncLoopRefreshProgressing: working toward version 4.20.1, 0 replicas available","reason":"SyncLoopRefresh_InProgress","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"DeploymentAvailable: 0 replicas available for console deployment\nRouteHealthAvailable: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"Deployment_InsufficientReplicas::RouteHealth_FailedGet","status":"False","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:34.761896143+00:00 stderr F I1208 17:44:34.759275 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded message changed from "DownloadsCustomRouteSyncDegraded: the server is currently unable to handle the request (delete routes.route.openshift.io downloads-custom)\nOAuthClientSyncDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nOAuthClientsControllerDegraded: the server is currently unable to handle the request (get oauthclients.oauth.openshift.io console)\nRouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused" to "RouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused",Upgradeable changed from False to True ("All is well") 2025-12-08T17:44:34.834400951+00:00 stderr F E1208 17:44:34.834324 1 status.go:130] SyncLoopRefreshProgressing InProgress working toward version 4.20.1, 0 replicas available 2025-12-08T17:44:34.834400951+00:00 stderr F E1208 17:44:34.834358 1 status.go:130] DeploymentAvailable InsufficientReplicas 0 replicas available for console deployment 2025-12-08T17:44:36.034418485+00:00 stderr F E1208 17:44:36.034347 1 status.go:130] SyncLoopRefreshProgressing InProgress working toward version 4.20.1, 0 replicas available 2025-12-08T17:44:36.034418485+00:00 stderr F E1208 17:44:36.034389 1 status.go:130] DeploymentAvailable InsufficientReplicas 0 replicas available for console deployment 2025-12-08T17:44:39.271343054+00:00 stderr F I1208 17:44:39.270409 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"RouteHealthDegraded: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"RouteHealth_FailedGet","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"RouteHealthAvailable: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465-\u003e10.217.4.10:53: read: connection refused","reason":"RouteHealth_FailedGet","status":"False","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:39.281483516+00:00 stderr F I1208 17:44:39.281124 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Progressing changed from True to False ("All is well"),Available message changed from "DeploymentAvailable: 0 replicas available for console deployment\nRouteHealthAvailable: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused" to "RouteHealthAvailable: failed to GET route (https://console-openshift-console.apps-crc.testing): Get \"https://console-openshift-console.apps-crc.testing\": dial tcp: lookup console-openshift-console.apps-crc.testing on 10.217.4.10:53: read udp 10.217.0.23:58465->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:43.230739114+00:00 stderr F I1208 17:44:43.230210 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:43.238134010+00:00 stderr F I1208 17:44:43.237447 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded changed from True to False ("All is well"),Available changed from False to True ("All is well") 2025-12-08T17:45:16.051945963+00:00 stderr F I1208 17:45:16.040767 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.040718761 +0000 UTC))" 2025-12-08T17:45:16.052038505+00:00 stderr F I1208 17:45:16.052010 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.051951563 +0000 UTC))" 2025-12-08T17:45:16.052068756+00:00 stderr F I1208 17:45:16.052055 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.052032965 +0000 UTC))" 2025-12-08T17:45:16.052103457+00:00 stderr F I1208 17:45:16.052085 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.052066616 +0000 UTC))" 2025-12-08T17:45:16.052137308+00:00 stderr F I1208 17:45:16.052117 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.052099427 +0000 UTC))" 2025-12-08T17:45:16.052189761+00:00 stderr F I1208 17:45:16.052149 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.052131958 +0000 UTC))" 2025-12-08T17:45:16.052189761+00:00 stderr F I1208 17:45:16.052181 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.052163669 +0000 UTC))" 2025-12-08T17:45:16.052229462+00:00 stderr F I1208 17:45:16.052214 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.052191151 +0000 UTC))" 2025-12-08T17:45:16.052287343+00:00 stderr F I1208 17:45:16.052269 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.052224251 +0000 UTC))" 2025-12-08T17:45:16.052321834+00:00 stderr F I1208 17:45:16.052304 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.052288913 +0000 UTC))" 2025-12-08T17:45:16.052356985+00:00 stderr F I1208 17:45:16.052335 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.052318844 +0000 UTC))" 2025-12-08T17:45:16.052760176+00:00 stderr F I1208 17:45:16.052739 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-console-operator.svc\" [serving] validServingFor=[metrics.openshift-console-operator.svc,metrics.openshift-console-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:45:16.052713065 +0000 UTC))" 2025-12-08T17:45:16.053109006+00:00 stderr F I1208 17:45:16.053057 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215865\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:24 +0000 UTC to 2028-12-08 16:44:24 +0000 UTC (now=2025-12-08 17:45:16.053034794 +0000 UTC))" 2025-12-08T17:46:25.357070844+00:00 stderr F E1208 17:46:25.356325 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-console-operator/leases/console-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:25.357983321+00:00 stderr F E1208 17:46:25.357940 1 leaderelection.go:436] error retrieving resource lock openshift-console-operator/console-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-console-operator/leases/console-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.507013074+00:00 stderr F E1208 17:46:25.506928 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.510265113+00:00 stderr F E1208 17:46:25.510206 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.517998474+00:00 stderr F E1208 17:46:25.517922 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.523553571+00:00 stderr F E1208 17:46:25.523490 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.536056927+00:00 stderr F E1208 17:46:25.535971 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.538284684+00:00 stderr F E1208 17:46:25.538226 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.560588353+00:00 stderr F E1208 17:46:25.560458 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.562191431+00:00 stderr F E1208 17:46:25.562146 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.606009687+00:00 stderr F E1208 17:46:25.605801 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.611594724+00:00 stderr F E1208 17:46:25.610983 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.655584105+00:00 stderr F E1208 17:46:25.655441 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.656097350+00:00 stderr F E1208 17:46:25.656071 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.657535533+00:00 stderr F E1208 17:46:25.657277 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.657535533+00:00 stderr F E1208 17:46:25.657339 1 status.go:130] DownloadsDeploymentSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.657535533+00:00 stderr F E1208 17:46:25.657343 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.657673107+00:00 stderr F E1208 17:46:25.657631 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.658826412+00:00 stderr F E1208 17:46:25.658780 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.659254225+00:00 stderr F E1208 17:46:25.659217 1 base_controller.go:279] "Unhandled Error" err="ConsoleDownloadsDeploymentSyncController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.665773830+00:00 stderr F E1208 17:46:25.665675 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.669896624+00:00 stderr F E1208 17:46:25.669834 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.670548644+00:00 stderr F E1208 17:46:25.670522 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.670956126+00:00 stderr F E1208 17:46:25.670919 1 status.go:130] DownloadsDeploymentSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.673381379+00:00 stderr F E1208 17:46:25.673356 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.681081090+00:00 stderr F E1208 17:46:25.681030 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.686604256+00:00 stderr F E1208 17:46:25.686567 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.694954077+00:00 stderr F E1208 17:46:25.694861 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.711756041+00:00 stderr F E1208 17:46:25.711687 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.725265485+00:00 stderr F E1208 17:46:25.725178 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.911005981+00:00 stderr F E1208 17:46:25.910838 1 base_controller.go:279] "Unhandled Error" err="ConsoleDownloadsDeploymentSyncController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.925163506+00:00 stderr F E1208 17:46:25.925104 1 status.go:130] DownloadsDeploymentSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.110340174+00:00 stderr F E1208 17:46:26.110264 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.123036485+00:00 stderr F E1208 17:46:26.122998 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.310728819+00:00 stderr F E1208 17:46:26.310617 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.319551584+00:00 stderr F E1208 17:46:26.319495 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.510934878+00:00 stderr F E1208 17:46:26.510856 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.533454104+00:00 stderr F E1208 17:46:26.533388 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.710141538+00:00 stderr F I1208 17:46:26.710040 1 request.go:752] "Waited before sending request" delay="1.013892023s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status" 2025-12-08T17:46:26.711233070+00:00 stderr F E1208 17:46:26.711185 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.874357587+00:00 stderr F E1208 17:46:26.874308 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.910983697+00:00 stderr F E1208 17:46:26.910904 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.934260505+00:00 stderr F E1208 17:46:26.934198 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.110826855+00:00 stderr F E1208 17:46:27.110769 1 base_controller.go:279] "Unhandled Error" err="ConsoleDownloadsDeploymentSyncController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.134992370+00:00 stderr F E1208 17:46:27.134938 1 status.go:130] DownloadsDeploymentSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.312919110+00:00 stderr F E1208 17:46:27.311439 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.337959122+00:00 stderr F E1208 17:46:27.337733 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.513214623+00:00 stderr F E1208 17:46:27.513143 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.525966335+00:00 stderr F E1208 17:46:27.525919 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.710828944+00:00 stderr F E1208 17:46:27.710776 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.754976239+00:00 stderr F E1208 17:46:27.754757 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.909466956+00:00 stderr F I1208 17:46:27.909353 1 request.go:752] "Waited before sending request" delay="1.033684937s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status" 2025-12-08T17:46:27.910726615+00:00 stderr F E1208 17:46:27.910648 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.982520359+00:00 stderr F E1208 17:46:27.982452 1 status.go:130] OAuthClientSyncDegraded FailedRegister Get "https://10.217.4.1:443/apis/oauth.openshift.io/v1/oauthclients/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.110503351+00:00 stderr F E1208 17:46:28.110430 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.153155191+00:00 stderr F E1208 17:46:28.153100 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.233129222+00:00 stderr F E1208 17:46:28.233068 1 status.go:130] ConsoleNotificationSyncDegraded FailedDelete Delete "https://10.217.4.1:443/apis/console.openshift.io/v1/consolenotifications/cluster-upgrade": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.314069091+00:00 stderr F E1208 17:46:28.314017 1 base_controller.go:279] "Unhandled Error" err="ConsoleDownloadsDeploymentSyncController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.358217386+00:00 stderr F E1208 17:46:28.358161 1 status.go:130] DownloadsDeploymentSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.512245229+00:00 stderr F E1208 17:46:28.511814 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.557636741+00:00 stderr F E1208 17:46:28.557572 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.711116878+00:00 stderr F E1208 17:46:28.711066 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.758029057+00:00 stderr F E1208 17:46:28.736433 1 status.go:130] PDBSyncDegraded FailedApply Get "https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.910070410+00:00 stderr F I1208 17:46:28.909981 1 request.go:752] "Waited before sending request" delay="1.153915286s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status" 2025-12-08T17:46:28.911000899+00:00 stderr F E1208 17:46:28.910958 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.994349260+00:00 stderr F E1208 17:46:28.993993 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/console": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.310862570+00:00 stderr F E1208 17:46:29.310744 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.394467080+00:00 stderr F E1208 17:46:29.394392 1 status.go:130] ServiceSyncDegraded FailedApply Get "https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.511133662+00:00 stderr F E1208 17:46:29.511064 1 base_controller.go:279] "Unhandled Error" err="ClusterUpgradeNotificationController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.749221098+00:00 stderr F E1208 17:46:29.749160 1 base_controller.go:279] "Unhandled Error" err="ConsoleDownloadsDeploymentSyncController reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:30.109816692+00:00 stderr F I1208 17:46:30.109724 1 request.go:752] "Waited before sending request" delay="1.371539817s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status" 2025-12-08T17:46:30.531727385+00:00 stderr F E1208 17:46:30.531679 1 base_controller.go:279] "Unhandled Error" err="OAuthClientsController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:30.915668960+00:00 stderr F I1208 17:46:30.915624 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38924 2025-12-08T17:46:31.116367044+00:00 stderr F I1208 17:46:31.116267 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38924 2025-12-08T17:46:31.319781219+00:00 stderr F I1208 17:46:31.319718 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38924 2025-12-08T17:46:31.519002659+00:00 stderr F I1208 17:46:31.518929 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38924 2025-12-08T17:46:31.742919080+00:00 stderr F E1208 17:46:31.737520 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/downloads\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:33.116917591+00:00 stderr F I1208 17:46:33.116650 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38939 2025-12-08T17:46:33.320921945+00:00 stderr F I1208 17:46:33.320271 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38939 2025-12-08T17:46:33.529920748+00:00 stderr F E1208 17:46:33.529437 1 base_controller.go:279] "Unhandled Error" err="PodDisruptionBudgetController reconciliation failed: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:33.716914711+00:00 stderr F I1208 17:46:33.715991 1 helpers.go:188] lister was stale at resourceVersion=38391, live get showed resourceVersion=38950 2025-12-08T17:46:33.919942055+00:00 stderr F E1208 17:46:33.919744 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Operation cannot be fulfilled on consoles.operator.openshift.io \"cluster\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:46:34.131946728+00:00 stderr F E1208 17:46:34.131338 1 base_controller.go:279] "Unhandled Error" err="ConsoleServiceController reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:57.875078175+00:00 stderr F I1208 17:46:57.873189 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:58.566139018+00:00 stderr F I1208 17:46:58.566066 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:46:58.697466453+00:00 stderr F I1208 17:46:58.697396 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:46:59.696130310+00:00 stderr F I1208 17:46:59.695743 1 reflector.go:430] "Caches populated" type="*v1.ConsoleCLIDownload" reflector="github.com/openshift/client-go/console/informers/externalversions/factory.go:125" 2025-12-08T17:47:01.904005302+00:00 stderr F I1208 17:47:01.903584 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:02.910057981+00:00 stderr F I1208 17:47:02.908715 1 reflector.go:430] "Caches populated" type="operators.coreos.com/v1, Resource=olmconfigs" reflector="k8s.io/client-go/dynamic/dynamicinformer/informer.go:108" 2025-12-08T17:47:07.619577993+00:00 stderr F I1208 17:47:07.619126 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:08.599374396+00:00 stderr F I1208 17:47:08.598778 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:09.674587613+00:00 stderr F I1208 17:47:09.674499 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=consoles" reflector="k8s.io/client-go/dynamic/dynamicinformer/informer.go:108" 2025-12-08T17:47:09.678324971+00:00 stderr F I1208 17:47:09.678263 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nPDBSyncDegraded: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused\nServiceSyncDegraded: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nOAuthClientsControllerDegraded: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:09.689277276+00:00 stderr F I1208 17:47:09.689144 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded message changed from "All is well" to "DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nPDBSyncDegraded: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused\nServiceSyncDegraded: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nOAuthClientsControllerDegraded: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:12.000656996+00:00 stderr F I1208 17:47:12.000276 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go:183" 2025-12-08T17:47:12.196921354+00:00 stderr F I1208 17:47:12.196841 1 reflector.go:430] "Caches populated" type="*v1.Console" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:47:12.246150174+00:00 stderr F I1208 17:47:12.241701 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nPDBSyncDegraded: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused\nOAuthClientsControllerDegraded: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.255910921+00:00 stderr F I1208 17:47:12.255825 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded message changed from "DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nPDBSyncDegraded: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused\nServiceSyncDegraded: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-console/services/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nOAuthClientsControllerDegraded: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" to "DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nPDBSyncDegraded: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused\nOAuthClientsControllerDegraded: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:12.344041576+00:00 stderr F I1208 17:47:12.343987 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.354529745+00:00 stderr F I1208 17:47:12.351687 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded message changed from "DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused\nPDBSyncDegraded: Get \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-console/poddisruptionbudgets/console\": dial tcp 10.217.4.1:443: connect: connection refused\nOAuthClientsControllerDegraded: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/consoles/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" to "DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:12.445894592+00:00 stderr F I1208 17:47:12.444429 1 status_controller.go:230] clusteroperator/console diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:39Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:16Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.451700494+00:00 stderr F I1208 17:47:12.451602 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-console-operator", Name:"console-operator", UID:"4982b9f1-eaf4-44fa-a84a-bf9954aedcb1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/console changed: Degraded message changed from "DownloadsDeploymentSyncDegraded: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-console/deployments/downloads\": dial tcp 10.217.4.1:443: connect: connection refused" to "All is well" 2025-12-08T17:47:12.816119656+00:00 stderr F I1208 17:47:12.815287 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:13.401271376+00:00 stderr F I1208 17:47:13.401195 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:13.997943349+00:00 stderr F I1208 17:47:13.997851 1 reflector.go:430] "Caches populated" type="*v1.OAuth" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:15.023889254+00:00 stderr F I1208 17:47:15.023808 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:47:15.636509359+00:00 stderr F I1208 17:47:15.635890 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:47:17.282091230+00:00 stderr F I1208 17:47:17.281755 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:17.905081902+00:00 stderr F I1208 17:47:17.905008 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:19.958509062+00:00 stderr F I1208 17:47:19.958042 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:20.839790614+00:00 stderr F I1208 17:47:20.839696 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:21.113602494+00:00 stderr F I1208 17:47:21.113169 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:32.954250017+00:00 stderr F I1208 17:47:32.953648 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:36.968991506+00:00 stderr F I1208 17:47:36.968333 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:37.845743636+00:00 stderr F I1208 17:47:37.845626 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:40.328986145+00:00 stderr F I1208 17:47:40.328284 1 reflector.go:430] "Caches populated" type="*v1.Console" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:42.740211828+00:00 stderr F I1208 17:47:42.740170 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:47.502752108+00:00 stderr F I1208 17:47:47.502178 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:48.388943594+00:00 stderr F I1208 17:47:48.388690 1 reflector.go:430] "Caches populated" type="*v1.ConsolePlugin" reflector="github.com/openshift/client-go/console/informers/externalversions/factory.go:125" 2025-12-08T17:47:51.317349968+00:00 stderr F I1208 17:47:51.316806 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:57.041714484+00:00 stderr F I1208 17:47:57.041641 1 reflector.go:430] "Caches populated" type="*v1.Authentication" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:03.052772862+00:00 stderr F I1208 17:48:03.052153 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:48:11.813218945+00:00 stderr F I1208 17:48:11.812650 1 reflector.go:430] "Caches populated" type="*v1.IngressController" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" ././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-op0000755000175000017500000000000015115611513033063 5ustar zuulzuul././@LongLink0000644000000000000000000000033400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-op0000755000175000017500000000000015115611521033062 5ustar zuulzuul././@LongLink0000644000000000000000000000034100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-op0000644000175000017500000027037615115611513033104 0ustar zuulzuul2025-12-08T17:44:19.986353372+00:00 stderr F I1208 17:44:19.980952 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:19.990637789+00:00 stderr F I1208 17:44:19.987164 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:19.993967770+00:00 stderr F I1208 17:44:19.991825 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:20.055604202+00:00 stderr F I1208 17:44:20.055518 1 builder.go:304] openshift-apiserver-operator version 4.20.0-202510211040.p2.g9cfa567.assembly.stream.el9-9cfa567-9cfa5679a8ac1e5a68eea32179d9e069da85dfcf 2025-12-08T17:44:21.174202663+00:00 stderr F I1208 17:44:21.168541 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:21.174202663+00:00 stderr F W1208 17:44:21.169300 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:21.174202663+00:00 stderr F W1208 17:44:21.169312 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:21.174202663+00:00 stderr F W1208 17:44:21.169320 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:21.174202663+00:00 stderr F W1208 17:44:21.169324 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:21.174202663+00:00 stderr F W1208 17:44:21.169328 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:21.174202663+00:00 stderr F W1208 17:44:21.169332 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:21.174202663+00:00 stderr F I1208 17:44:21.172154 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:21.174488561+00:00 stderr F I1208 17:44:21.174304 1 leaderelection.go:257] attempting to acquire leader lease openshift-apiserver-operator/openshift-apiserver-operator-lock... 2025-12-08T17:44:21.181507493+00:00 stderr F I1208 17:44:21.177314 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:21.181507493+00:00 stderr F I1208 17:44:21.178181 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:21.181507493+00:00 stderr F I1208 17:44:21.178845 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:21.189212132+00:00 stderr F I1208 17:44:21.186225 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:21.189212132+00:00 stderr F I1208 17:44:21.186853 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:21.189212132+00:00 stderr F I1208 17:44:21.186899 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:21.189212132+00:00 stderr F I1208 17:44:21.186904 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:21.189212132+00:00 stderr F I1208 17:44:21.187247 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:21.189212132+00:00 stderr F I1208 17:44:21.187266 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:21.192926854+00:00 stderr F I1208 17:44:21.192634 1 leaderelection.go:271] successfully acquired lease openshift-apiserver-operator/openshift-apiserver-operator-lock 2025-12-08T17:44:21.198015862+00:00 stderr F I1208 17:44:21.197850 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator-lock", UID:"7b2e46a3-072f-412d-bcc7-d5ceda9ba725", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37088", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' openshift-apiserver-operator-846cbfc458-q6lj7_d24ea9e3-5ac7-4433-a72a-2ec52538983b became leader 2025-12-08T17:44:21.208862018+00:00 stderr F I1208 17:44:21.208807 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:21.214914893+00:00 stderr F I1208 17:44:21.214191 1 starter.go:144] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:21.214914893+00:00 stderr F I1208 17:44:21.214531 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:21.255098310+00:00 stderr F I1208 17:44:21.251693 1 base_controller.go:76] Waiting for caches to sync for ConnectivityCheckController 2025-12-08T17:44:21.260040055+00:00 stderr F I1208 17:44:21.257964 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:21.264831195+00:00 stderr F I1208 17:44:21.264200 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-EncryptionPrune 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266187 1 base_controller.go:76] Waiting for caches to sync for RevisionController 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266222 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266239 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266250 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-APIService 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266273 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-EncryptionState 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266289 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-EncryptionMigration 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266308 1 base_controller.go:76] Waiting for caches to sync for auditPolicyController 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266427 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-RemoveStaleConditions 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.266521 1 base_controller.go:76] Waiting for caches to sync for SecretRevisionPruneController 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.267276 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-EncryptionKey 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.267309 1 base_controller.go:76] Waiting for caches to sync for OpenShiftAPIServer-WorkloadWorkloadController 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.267330 1 base_controller.go:76] Waiting for caches to sync for NamespaceFinalizerController_openshift-apiserver 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.267347 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-UnsupportedConfigOverrides 2025-12-08T17:44:21.268013172+00:00 stderr F I1208 17:44:21.267927 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:21.272176596+00:00 stderr F I1208 17:44:21.268658 1 base_controller.go:76] Waiting for caches to sync for openshift-apiserver-EncryptionCondition 2025-12-08T17:44:21.272176596+00:00 stderr F I1208 17:44:21.270634 1 base_controller.go:76] Waiting for caches to sync for APIServerStaticResources-StaticResources 2025-12-08T17:44:21.272176596+00:00 stderr F I1208 17:44:21.270721 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_openshift-apiserver 2025-12-08T17:44:21.285023826+00:00 stderr F I1208 17:44:21.283243 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:21.328101791+00:00 stderr F I1208 17:44:21.328027 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:21.340678034+00:00 stderr F I1208 17:44:21.337389 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:21.340678034+00:00 stderr F I1208 17:44:21.337603 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:21.467997057+00:00 stderr F I1208 17:44:21.466771 1 base_controller.go:82] Caches are synced for openshift-apiserver-RemoveStaleConditions 2025-12-08T17:44:21.467997057+00:00 stderr F I1208 17:44:21.466817 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-RemoveStaleConditions controller ... 2025-12-08T17:44:21.467997057+00:00 stderr F I1208 17:44:21.467762 1 base_controller.go:82] Caches are synced for openshift-apiserver-UnsupportedConfigOverrides 2025-12-08T17:44:21.467997057+00:00 stderr F I1208 17:44:21.467829 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:21.468369947+00:00 stderr F I1208 17:44:21.468032 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:21.468369947+00:00 stderr F I1208 17:44:21.468087 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:21.471940934+00:00 stderr F I1208 17:44:21.470820 1 base_controller.go:82] Caches are synced for APIServerStaticResources-StaticResources 2025-12-08T17:44:21.471940934+00:00 stderr F I1208 17:44:21.470835 1 base_controller.go:119] Starting #1 worker of APIServerStaticResources-StaticResources controller ... 2025-12-08T17:44:21.471940934+00:00 stderr F I1208 17:44:21.470843 1 base_controller.go:82] Caches are synced for StatusSyncer_openshift-apiserver 2025-12-08T17:44:21.471940934+00:00 stderr F I1208 17:44:21.470857 1 base_controller.go:119] Starting #1 worker of StatusSyncer_openshift-apiserver controller ... 2025-12-08T17:44:21.626119730+00:00 stderr F I1208 17:44:21.620556 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:21Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-apiserver ()","reason":"APIServerDeployment_UnavailablePod","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady","reason":"APIServerDeployment_NoPod::APIServices_PreconditionNotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:21.626119730+00:00 stderr F I1208 17:44:21.621435 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Degraded changed from False to True ("APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-apiserver ()") 2025-12-08T17:44:21.659466120+00:00 stderr F I1208 17:44:21.655765 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:21.673991566+00:00 stderr F I1208 17:44:21.669888 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:21.817082169+00:00 stderr F E1208 17:44:21.816432 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_openshift-apiserver reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"openshift-apiserver\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:44:21.893743960+00:00 stderr F I1208 17:44:21.893035 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:22.060144079+00:00 stderr F I1208 17:44:22.059733 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:22.314886398+00:00 stderr F I1208 17:44:22.314037 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:22.458910537+00:00 stderr F I1208 17:44:22.457722 1 request.go:752] "Waited before sending request" delay="1.198491502s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver-operator/secrets?limit=500&resourceVersion=0" 2025-12-08T17:44:22.466040241+00:00 stderr F I1208 17:44:22.464759 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:22.658602863+00:00 stderr F I1208 17:44:22.658539 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:22.670912649+00:00 stderr F I1208 17:44:22.667937 1 base_controller.go:82] Caches are synced for NamespaceFinalizerController_openshift-apiserver 2025-12-08T17:44:22.670912649+00:00 stderr F I1208 17:44:22.667957 1 base_controller.go:119] Starting #1 worker of NamespaceFinalizerController_openshift-apiserver controller ... 2025-12-08T17:44:22.752346860+00:00 stderr F I1208 17:44:22.752225 1 base_controller.go:82] Caches are synced for ConnectivityCheckController 2025-12-08T17:44:22.752346860+00:00 stderr F I1208 17:44:22.752253 1 base_controller.go:119] Starting #1 worker of ConnectivityCheckController controller ... 2025-12-08T17:44:22.856689897+00:00 stderr F I1208 17:44:22.856231 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.063149078+00:00 stderr F I1208 17:44:23.063000 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.070332174+00:00 stderr F I1208 17:44:23.066714 1 base_controller.go:82] Caches are synced for auditPolicyController 2025-12-08T17:44:23.070332174+00:00 stderr F I1208 17:44:23.066745 1 base_controller.go:119] Starting #1 worker of auditPolicyController controller ... 2025-12-08T17:44:23.257737946+00:00 stderr F I1208 17:44:23.255706 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:23.257737946+00:00 stderr F I1208 17:44:23.256548 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.261737325+00:00 stderr F I1208 17:44:23.259593 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:23.458615844+00:00 stderr F I1208 17:44:23.457539 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.468927386+00:00 stderr F I1208 17:44:23.467246 1 base_controller.go:82] Caches are synced for openshift-apiserver-APIService 2025-12-08T17:44:23.468927386+00:00 stderr F I1208 17:44:23.467280 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-APIService controller ... 2025-12-08T17:44:23.653908042+00:00 stderr F I1208 17:44:23.653068 1 request.go:752] "Waited before sending request" delay="2.384185773s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/endpoints?limit=500&resourceVersion=0" 2025-12-08T17:44:23.660332447+00:00 stderr F I1208 17:44:23.660257 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:23.660524212+00:00 stderr F I1208 17:44:23.660414 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.665776155+00:00 stderr F I1208 17:44:23.664358 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:23.879590698+00:00 stderr F I1208 17:44:23.878929 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:24.058236500+00:00 stderr F I1208 17:44:24.057184 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:24.067230126+00:00 stderr F I1208 17:44:24.065423 1 base_controller.go:82] Caches are synced for openshift-apiserver-EncryptionPrune 2025-12-08T17:44:24.067230126+00:00 stderr F I1208 17:44:24.065464 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-EncryptionPrune controller ... 2025-12-08T17:44:24.067230126+00:00 stderr F I1208 17:44:24.067055 1 base_controller.go:82] Caches are synced for SecretRevisionPruneController 2025-12-08T17:44:24.067230126+00:00 stderr F I1208 17:44:24.067084 1 base_controller.go:119] Starting #1 worker of SecretRevisionPruneController controller ... 2025-12-08T17:44:24.068691695+00:00 stderr F I1208 17:44:24.068122 1 base_controller.go:82] Caches are synced for OpenShiftAPIServer-WorkloadWorkloadController 2025-12-08T17:44:24.068691695+00:00 stderr F I1208 17:44:24.068149 1 base_controller.go:119] Starting #1 worker of OpenShiftAPIServer-WorkloadWorkloadController controller ... 2025-12-08T17:44:24.068691695+00:00 stderr F I1208 17:44:24.068491 1 base_controller.go:82] Caches are synced for openshift-apiserver-EncryptionKey 2025-12-08T17:44:24.068691695+00:00 stderr F I1208 17:44:24.068497 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-EncryptionKey controller ... 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.073771 1 base_controller.go:82] Caches are synced for openshift-apiserver-EncryptionMigration 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.073796 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-EncryptionMigration controller ... 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.073822 1 base_controller.go:82] Caches are synced for RevisionController 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.073828 1 base_controller.go:119] Starting #1 worker of RevisionController controller ... 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.073896 1 base_controller.go:82] Caches are synced for openshift-apiserver-EncryptionState 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.073902 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-EncryptionState controller ... 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.075935 1 base_controller.go:82] Caches are synced for openshift-apiserver-EncryptionCondition 2025-12-08T17:44:24.081034843+00:00 stderr F I1208 17:44:24.075953 1 base_controller.go:119] Starting #1 worker of openshift-apiserver-EncryptionCondition controller ... 2025-12-08T17:44:24.264008433+00:00 stderr F I1208 17:44:24.263227 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:24.269132213+00:00 stderr F I1208 17:44:24.268046 1 base_controller.go:82] Caches are synced for openshift-apiserver 2025-12-08T17:44:24.269132213+00:00 stderr F I1208 17:44:24.268064 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:24.269132213+00:00 stderr F I1208 17:44:24.268072 1 base_controller.go:119] Starting #1 worker of openshift-apiserver controller ... 2025-12-08T17:44:24.269132213+00:00 stderr F I1208 17:44:24.268074 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:24.853722329+00:00 stderr F I1208 17:44:24.852055 1 request.go:752] "Waited before sending request" delay="1.784621169s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/configmaps/audit" 2025-12-08T17:44:27.517417076+00:00 stderr F I1208 17:44:27.516458 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:21Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-apiserver (container is not ready in apiserver-9ddfb9f55-8h8fl pod)","reason":"APIServerDeployment_UnavailablePod","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady","reason":"APIServerDeployment_NoPod::APIServices_PreconditionNotReady","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:27.553480630+00:00 stderr F I1208 17:44:27.551340 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Degraded message changed from "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-apiserver ()" to "APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-apiserver (container is not ready in apiserver-9ddfb9f55-8h8fl pod)" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607066 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.607009811 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607472 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.607456593 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607487 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.607478273 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607528 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.607491834 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607543 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.607533695 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607557 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.607547065 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607571 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.607562036 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607585 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.607576126 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607598 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.607589826 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607614 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.607604927 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607796 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-apiserver-operator.svc\" [serving] validServingFor=[metrics.openshift-apiserver-operator.svc,metrics.openshift-apiserver-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:44:30.607783481 +0000 UTC))" 2025-12-08T17:44:30.610463565+00:00 stderr F I1208 17:44:30.607946 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2028-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:44:30.607934786 +0000 UTC))" 2025-12-08T17:44:33.614559848+00:00 stderr F E1208 17:44:33.614071 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: apiservices.apiregistration.k8s.io/v1.apps.openshift.io: not available: endpoints for service/api in \"openshift-apiserver\" have no addresses with port name \"https\"" 2025-12-08T17:44:33.614619100+00:00 stderr F I1208 17:44:33.614208 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:21Z","message":"APIServerDeploymentDegraded: 1 of 1 requested instances are unavailable for apiserver.openshift-apiserver (container is not ready in apiserver-9ddfb9f55-8h8fl pod)","reason":"APIServerDeployment_UnavailablePod","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"APIServerDeploymentAvailable: no apiserver.openshift-apiserver pods available on any node.\nAPIServicesAvailable: apiservices.apiregistration.k8s.io/v1.apps.openshift.io: not available: endpoints for service/api in \"openshift-apiserver\" have no addresses with port name \"https\"","reason":"APIServerDeployment_NoPod::APIServices_Error","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:33.623212294+00:00 stderr F I1208 17:44:33.622926 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-apiserver pods available on any node.\nAPIServicesAvailable: PreconditionNotReady" to "APIServerDeploymentAvailable: no apiserver.openshift-apiserver pods available on any node.\nAPIServicesAvailable: apiservices.apiregistration.k8s.io/v1.apps.openshift.io: not available: endpoints for service/api in \"openshift-apiserver\" have no addresses with port name \"https\"" 2025-12-08T17:44:34.682936210+00:00 stderr F I1208 17:44:34.682860 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-03T09:40:48Z","message":"APIServicesAvailable: apiservices.apiregistration.k8s.io/v1.apps.openshift.io: not available: endpoints for service/api in \"openshift-apiserver\" have no addresses with port name \"https\"","reason":"APIServices_Error","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:34.693640752+00:00 stderr F I1208 17:44:34.693594 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Degraded changed from True to False ("All is well"),Available message changed from "APIServerDeploymentAvailable: no apiserver.openshift-apiserver pods available on any node.\nAPIServicesAvailable: apiservices.apiregistration.k8s.io/v1.apps.openshift.io: not available: endpoints for service/api in \"openshift-apiserver\" have no addresses with port name \"https\"" to "APIServicesAvailable: apiservices.apiregistration.k8s.io/v1.apps.openshift.io: not available: endpoints for service/api in \"openshift-apiserver\" have no addresses with port name \"https\"" 2025-12-08T17:44:35.208955418+00:00 stderr F I1208 17:44:35.208845 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:35Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:35.219792124+00:00 stderr F I1208 17:44:35.218398 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Available changed from False to True ("All is well") 2025-12-08T17:45:16.043464127+00:00 stderr F I1208 17:45:16.043100 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.043067586 +0000 UTC))" 2025-12-08T17:45:16.043464127+00:00 stderr F I1208 17:45:16.043258 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.043249261 +0000 UTC))" 2025-12-08T17:45:16.043464127+00:00 stderr F I1208 17:45:16.043272 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.043264621 +0000 UTC))" 2025-12-08T17:45:16.043464127+00:00 stderr F I1208 17:45:16.043283 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.043276162 +0000 UTC))" 2025-12-08T17:45:16.043464127+00:00 stderr F I1208 17:45:16.043295 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.043286812 +0000 UTC))" 2025-12-08T17:45:16.043464127+00:00 stderr F I1208 17:45:16.043307 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.043299352 +0000 UTC))" 2025-12-08T17:45:16.043943691+00:00 stderr F I1208 17:45:16.043796 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.043311173 +0000 UTC))" 2025-12-08T17:45:16.043943691+00:00 stderr F I1208 17:45:16.043821 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.043810837 +0000 UTC))" 2025-12-08T17:45:16.043943691+00:00 stderr F I1208 17:45:16.043862 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.043838688 +0000 UTC))" 2025-12-08T17:45:16.043943691+00:00 stderr F I1208 17:45:16.043896 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.04388851 +0000 UTC))" 2025-12-08T17:45:16.043943691+00:00 stderr F I1208 17:45:16.043913 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.04390128 +0000 UTC))" 2025-12-08T17:45:16.044109246+00:00 stderr F I1208 17:45:16.044095 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-apiserver-operator.svc\" [serving] validServingFor=[metrics.openshift-apiserver-operator.svc,metrics.openshift-apiserver-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:09 +0000 UTC to 2027-11-02 07:52:10 +0000 UTC (now=2025-12-08 17:45:16.044084615 +0000 UTC))" 2025-12-08T17:45:16.044258910+00:00 stderr F I1208 17:45:16.044224 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2028-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:45:16.044213698 +0000 UTC))" 2025-12-08T17:46:21.214197113+00:00 stderr F E1208 17:46:21.213501 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-apiserver-operator/leases/openshift-apiserver-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:21.215252044+00:00 stderr F E1208 17:46:21.215195 1 leaderelection.go:436] error retrieving resource lock openshift-apiserver-operator/openshift-apiserver-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-apiserver-operator/leases/openshift-apiserver-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:21.480029262+00:00 stderr F E1208 17:46:21.479931 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:21.498091664+00:00 stderr F E1208 17:46:21.497845 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:21.518820637+00:00 stderr F E1208 17:46:21.518760 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:21.881074470+00:00 stderr F E1208 17:46:21.880767 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.478031798+00:00 stderr F E1208 17:46:22.477956 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:22.875217220+00:00 stderr F E1208 17:46:22.875132 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.275105293+00:00 stderr F E1208 17:46:23.275000 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.479031504+00:00 stderr F E1208 17:46:23.478648 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.490435996+00:00 stderr F E1208 17:46:23.490369 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.678871412+00:00 stderr F E1208 17:46:23.678767 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:23.874817034+00:00 stderr F E1208 17:46:23.874421 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.078610061+00:00 stderr F E1208 17:46:24.077798 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.475207824+00:00 stderr F E1208 17:46:24.475097 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.679810676+00:00 stderr F E1208 17:46:24.679683 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.074686208+00:00 stderr F E1208 17:46:25.074549 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.074804012+00:00 stderr F E1208 17:46:25.074736 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.278092634+00:00 stderr F E1208 17:46:25.277524 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.478447807+00:00 stderr F E1208 17:46:25.478377 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.673787251+00:00 stderr F E1208 17:46:25.673701 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.876108293+00:00 stderr F E1208 17:46:25.876035 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.273688987+00:00 stderr F E1208 17:46:26.273603 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.478043991+00:00 stderr F E1208 17:46:26.477927 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.874157821+00:00 stderr F E1208 17:46:26.873958 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.874240374+00:00 stderr F E1208 17:46:26.874212 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.079441093+00:00 stderr F E1208 17:46:27.079348 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.280976612+00:00 stderr F E1208 17:46:27.280031 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.479159350+00:00 stderr F E1208 17:46:27.479105 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.675637148+00:00 stderr F E1208 17:46:27.675590 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.278279927+00:00 stderr F E1208 17:46:28.278207 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:28.478506206+00:00 stderr F E1208 17:46:28.478124 1 base_controller.go:279] "Unhandled Error" err="auditPolicyController reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-AuditPolicy\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-AuditPolicy&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.676858200+00:00 stderr F E1208 17:46:28.676119 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: unable to ApplyStatus for operator using fieldManager \"openshift-apiserver-APIService\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=openshift-apiserver-APIService&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.958347599+00:00 stderr F E1208 17:46:28.958293 1 base_controller.go:279] "Unhandled Error" err="NamespaceFinalizerController_openshift-apiserver reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.567181374+00:00 stderr F E1208 17:46:29.567076 1 base_controller.go:279] "Unhandled Error" err="APIServerStaticResources-StaticResources reconciliation failed: [\"v3.11.0/openshift-apiserver/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/apiserver-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-apiserver\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/services/api\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"v3.11.0/openshift-apiserver/pdb.yaml\" (string): Delete \"https://10.217.4.1:443/apis/policy/v1/namespaces/openshift-apiserver/poddisruptionbudgets/openshift-apiserver-pdb\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"APIServerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftapiservers/cluster/status?fieldManager=APIServerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:30.488994903+00:00 stderr F E1208 17:46:30.488924 1 base_controller.go:279] "Unhandled Error" err="openshift-apiserver-APIService reconciliation failed: [Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.apps.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.build.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.image.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:32.768211364+00:00 stderr F I1208 17:46:32.768137 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:56.326987672+00:00 stderr F I1208 17:46:56.326518 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:58.005454109+00:00 stderr F I1208 17:46:58.005362 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:59.328207558+00:00 stderr F I1208 17:46:59.328128 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:06.368157189+00:00 stderr F I1208 17:47:06.366785 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:07.395989044+00:00 stderr F I1208 17:47:07.353078 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:10.420343329+00:00 stderr F I1208 17:47:10.419139 1 reflector.go:430] "Caches populated" type="*v1alpha1.StorageVersionMigration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:11.092596781+00:00 stderr F I1208 17:47:11.092518 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=openshiftapiservers" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:11.095104120+00:00 stderr F I1208 17:47:11.095047 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:11Z","message":"APIServicesAvailable: [Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.apps.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.build.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.image.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused]","reason":"APIServices_Error","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:11.103844625+00:00 stderr F I1208 17:47:11.103461 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Available changed from True to False ("APIServicesAvailable: [Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.apps.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.build.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.image.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused]") 2025-12-08T17:47:12.018269000+00:00 stderr F I1208 17:47:12.018210 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:12.150267075+00:00 stderr F I1208 17:47:12.150206 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:12.212076472+00:00 stderr F I1208 17:47:12.211790 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:12.319000828+00:00 stderr F I1208 17:47:12.318057 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:12Z","message":"APIServicesAvailable: [Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.apps.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.build.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.image.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused]","reason":"APIServices_Error","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.323358754+00:00 stderr F E1208 17:47:12.323283 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_openshift-apiserver reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"openshift-apiserver\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.331831331+00:00 stderr F I1208 17:47:12.331781 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:12Z","message":"APIServicesAvailable: [Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.apps.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.build.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.image.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused]","reason":"APIServices_Error","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.339758061+00:00 stderr F E1208 17:47:12.339703 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_openshift-apiserver reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"openshift-apiserver\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.352668717+00:00 stderr F I1208 17:47:12.352607 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:12Z","message":"APIServicesAvailable: [Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.apps.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.authorization.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.build.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused, Get \"https://10.217.4.1:443/apis/apiregistration.k8s.io/v1/apiservices/v1.image.openshift.io\": dial tcp 10.217.4.1:443: connect: connection refused]","reason":"APIServices_Error","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.357683856+00:00 stderr F E1208 17:47:12.357622 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_openshift-apiserver reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"openshift-apiserver\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:16.270938170+00:00 stderr F I1208 17:47:16.270240 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:16.535987114+00:00 stderr F I1208 17:47:16.535905 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:16.970317936+00:00 stderr F I1208 17:47:16.970259 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:16.970360578+00:00 stderr F I1208 17:47:16.970338 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:16.971547176+00:00 stderr F I1208 17:47:16.971524 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:18.615261768+00:00 stderr F I1208 17:47:18.615182 1 reflector.go:430] "Caches populated" type="*v1.APIService" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:19.335031436+00:00 stderr F I1208 17:47:19.334602 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:20.473233525+00:00 stderr F I1208 17:47:20.472848 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:20.797924716+00:00 stderr F I1208 17:47:20.797801 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:22.690779622+00:00 stderr F I1208 17:47:22.690438 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:30.410929405+00:00 stderr F I1208 17:47:30.410201 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:31.702515973+00:00 stderr F I1208 17:47:31.702464 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:31.702705949+00:00 stderr F I1208 17:47:31.702687 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:31.704933489+00:00 stderr F I1208 17:47:31.704822 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:32.102800614+00:00 stderr F I1208 17:47:32.102644 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:34.104290888+00:00 stderr F I1208 17:47:34.103555 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:35.359476300+00:00 stderr F I1208 17:47:35.358945 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:36.102266843+00:00 stderr F I1208 17:47:36.102194 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:37.986270889+00:00 stderr F I1208 17:47:37.986186 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:38.819318322+00:00 stderr F I1208 17:47:38.818592 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:40.281240042+00:00 stderr F I1208 17:47:40.281138 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:41.281128547+00:00 stderr F I1208 17:47:41.281070 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:45.388827223+00:00 stderr F I1208 17:47:45.388078 1 reflector.go:430] "Caches populated" type="*v1.Project" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:49.825415153+00:00 stderr F I1208 17:47:49.824845 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:52.230130948+00:00 stderr F I1208 17:47:52.229695 1 reflector.go:430] "Caches populated" type="*v1.OpenShiftAPIServer" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:52.913530195+00:00 stderr F I1208 17:47:52.913404 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:52.914381220+00:00 stderr F I1208 17:47:52.914258 1 status_controller.go:229] clusteroperator/openshift-apiserver diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:34Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-03T08:58:14Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:52Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:48Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:48Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:52.929046204+00:00 stderr F I1208 17:47:52.928968 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-apiserver-operator", Name:"openshift-apiserver-operator", UID:"f8199ef4-1467-44ed-9019-69c1f1737f70", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-apiserver changed: Available changed from False to True ("All is well") 2025-12-08T17:47:54.617689475+00:00 stderr F I1208 17:47:54.616966 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:54.672205114+00:00 stderr F I1208 17:47:54.672093 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:55.050758167+00:00 stderr F I1208 17:47:55.050680 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:56.241995459+00:00 stderr F I1208 17:47:56.241905 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:57.384561518+00:00 stderr F I1208 17:47:57.384513 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:57.788243732+00:00 stderr F I1208 17:47:57.788168 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:59.591331644+00:00 stderr F I1208 17:47:59.591055 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:00.087062723+00:00 stderr F I1208 17:48:00.086985 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:02.085095444+00:00 stderr F I1208 17:48:02.084726 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:02.477799486+00:00 stderr F I1208 17:48:02.477734 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:04.271718552+00:00 stderr F I1208 17:48:04.271294 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:48:04.271826895+00:00 stderr F I1208 17:48:04.271783 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:04.273414184+00:00 stderr F I1208 17:48:04.273347 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:48:08.309519678+00:00 stderr F I1208 17:48:08.308297 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:09.321253319+00:00 stderr F I1208 17:48:09.320919 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:17.113540269+00:00 stderr F I1208 17:48:17.112975 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:54:10.709667648+00:00 stderr F I1208 17:54:10.708921 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:54:10.974930381+00:00 stderr F I1208 17:54:10.974773 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:54:45.277517529+00:00 stderr F I1208 17:54:45.277161 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:00:16.714646349+00:00 stderr F I1208 18:00:16.713700 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:01:46.978205797+00:00 stderr F I1208 18:01:46.977450 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:04:20.281648283+00:00 stderr F I1208 18:04:20.281039 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-cons0000755000175000017500000000000015115611513033100 5ustar zuulzuul././@LongLink0000644000000000000000000000032300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-cons0000755000175000017500000000000015115611520033076 5ustar zuulzuul././@LongLink0000644000000000000000000000033000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-cons0000644000175000017500000000141315115611513033101 0ustar zuulzuul2025-12-08T17:44:42.846710278+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: using the "epoll" event method 2025-12-08T17:44:42.846710278+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: nginx/1.24.0 2025-12-08T17:44:42.846710278+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: built by gcc 11.4.1 20231218 (Red Hat 11.4.1-4) (GCC) 2025-12-08T17:44:42.846710278+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: OS: Linux 5.14.0-570.57.1.el9_6.x86_64 2025-12-08T17:44:42.846710278+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576 2025-12-08T17:44:42.846953905+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: start worker processes 2025-12-08T17:44:42.847141760+00:00 stdout F 2025/12/08 17:44:42 [notice] 1#1: start worker process 5 ././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiser0000755000175000017500000000000015115611513033050 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiser0000755000175000017500000000000015115611520033046 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiser0000644000175000017500000015646415115611513033072 0ustar zuulzuul2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.260895 1 feature_gate.go:385] feature gates: {map[]} 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262102 1 flags.go:64] FLAG: --accesstoken-inactivity-timeout="0s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262113 1 flags.go:64] FLAG: --admission-control-config-file="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262118 1 flags.go:64] FLAG: --advertise-address="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262122 1 flags.go:64] FLAG: --api-audiences="[https://kubernetes.default.svc]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262128 1 flags.go:64] FLAG: --audit-log-batch-buffer-size="10000" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262132 1 flags.go:64] FLAG: --audit-log-batch-max-size="1" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262135 1 flags.go:64] FLAG: --audit-log-batch-max-wait="0s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262137 1 flags.go:64] FLAG: --audit-log-batch-throttle-burst="0" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262140 1 flags.go:64] FLAG: --audit-log-batch-throttle-enable="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262147 1 flags.go:64] FLAG: --audit-log-batch-throttle-qps="0" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262151 1 flags.go:64] FLAG: --audit-log-compress="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262153 1 flags.go:64] FLAG: --audit-log-format="json" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262156 1 flags.go:64] FLAG: --audit-log-maxage="0" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262159 1 flags.go:64] FLAG: --audit-log-maxbackup="10" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262161 1 flags.go:64] FLAG: --audit-log-maxsize="100" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262164 1 flags.go:64] FLAG: --audit-log-mode="blocking" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262166 1 flags.go:64] FLAG: --audit-log-path="/var/log/oauth-apiserver/audit.log" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262169 1 flags.go:64] FLAG: --audit-log-truncate-enabled="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262171 1 flags.go:64] FLAG: --audit-log-truncate-max-batch-size="10485760" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262175 1 flags.go:64] FLAG: --audit-log-truncate-max-event-size="102400" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262178 1 flags.go:64] FLAG: --audit-log-version="audit.k8s.io/v1" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262180 1 flags.go:64] FLAG: --audit-policy-file="/var/run/configmaps/audit/policy.yaml" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262183 1 flags.go:64] FLAG: --audit-webhook-batch-buffer-size="10000" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262186 1 flags.go:64] FLAG: --audit-webhook-batch-initial-backoff="10s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262188 1 flags.go:64] FLAG: --audit-webhook-batch-max-size="400" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262191 1 flags.go:64] FLAG: --audit-webhook-batch-max-wait="30s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262195 1 flags.go:64] FLAG: --audit-webhook-batch-throttle-burst="15" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262198 1 flags.go:64] FLAG: --audit-webhook-batch-throttle-enable="true" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262200 1 flags.go:64] FLAG: --audit-webhook-batch-throttle-qps="10" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262203 1 flags.go:64] FLAG: --audit-webhook-config-file="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262206 1 flags.go:64] FLAG: --audit-webhook-initial-backoff="10s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262208 1 flags.go:64] FLAG: --audit-webhook-mode="batch" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262211 1 flags.go:64] FLAG: --audit-webhook-truncate-enabled="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262213 1 flags.go:64] FLAG: --audit-webhook-truncate-max-batch-size="10485760" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262216 1 flags.go:64] FLAG: --audit-webhook-truncate-max-event-size="102400" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262219 1 flags.go:64] FLAG: --audit-webhook-version="audit.k8s.io/v1" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262221 1 flags.go:64] FLAG: --authentication-kubeconfig="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262224 1 flags.go:64] FLAG: --authentication-skip-lookup="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262226 1 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="10s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262229 1 flags.go:64] FLAG: --authentication-tolerate-lookup-failure="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262232 1 flags.go:64] FLAG: --authorization-always-allow-paths="[/healthz,/readyz,/livez]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262236 1 flags.go:64] FLAG: --authorization-kubeconfig="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262239 1 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="10s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262241 1 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="10s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262244 1 flags.go:64] FLAG: --bind-address="0.0.0.0" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262247 1 flags.go:64] FLAG: --cert-dir="apiserver.local.config/certificates" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262251 1 flags.go:64] FLAG: --client-ca-file="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262253 1 flags.go:64] FLAG: --contention-profiling="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262256 1 flags.go:64] FLAG: --cors-allowed-origins="[//127\\.0\\.0\\.1(:|$),//localhost(:|$)]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262259 1 flags.go:64] FLAG: --debug-socket-path="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262262 1 flags.go:64] FLAG: --default-watch-cache-size="100" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262264 1 flags.go:64] FLAG: --delete-collection-workers="1" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262267 1 flags.go:64] FLAG: --disable-admission-plugins="[]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262270 1 flags.go:64] FLAG: --disable-http2-serving="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262273 1 flags.go:64] FLAG: --egress-selector-config-file="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262275 1 flags.go:64] FLAG: --emulated-version="[]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262278 1 flags.go:64] FLAG: --emulation-forward-compatible="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262281 1 flags.go:64] FLAG: --enable-admission-plugins="[]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262284 1 flags.go:64] FLAG: --enable-garbage-collector="true" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262287 1 flags.go:64] FLAG: --enable-priority-and-fairness="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262289 1 flags.go:64] FLAG: --encryption-provider-config="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262292 1 flags.go:64] FLAG: --encryption-provider-config-automatic-reload="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262294 1 flags.go:64] FLAG: --etcd-cafile="/var/run/configmaps/etcd-serving-ca/ca-bundle.crt" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262297 1 flags.go:64] FLAG: --etcd-certfile="/var/run/secrets/etcd-client/tls.crt" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262300 1 flags.go:64] FLAG: --etcd-compaction-interval="5m0s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262303 1 flags.go:64] FLAG: --etcd-count-metric-poll-period="1m0s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262305 1 flags.go:64] FLAG: --etcd-db-metric-poll-interval="30s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262308 1 flags.go:64] FLAG: --etcd-healthcheck-timeout="9s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262310 1 flags.go:64] FLAG: --etcd-keyfile="/var/run/secrets/etcd-client/tls.key" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262313 1 flags.go:64] FLAG: --etcd-prefix="openshift.io" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262316 1 flags.go:64] FLAG: --etcd-readycheck-timeout="9s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262318 1 flags.go:64] FLAG: --etcd-servers="[https://192.168.126.11:2379]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262322 1 flags.go:64] FLAG: --etcd-servers-overrides="[]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262325 1 flags.go:64] FLAG: --external-hostname="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262328 1 flags.go:64] FLAG: --feature-gates="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262336 1 flags.go:64] FLAG: --goaway-chance="0" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262340 1 flags.go:64] FLAG: --help="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262342 1 flags.go:64] FLAG: --http2-max-streams-per-connection="1000" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262345 1 flags.go:64] FLAG: --kubeconfig="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262347 1 flags.go:64] FLAG: --lease-reuse-duration-seconds="60" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262350 1 flags.go:64] FLAG: --livez-grace-period="0s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262352 1 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262356 1 flags.go:64] FLAG: --max-mutating-requests-inflight="200" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262359 1 flags.go:64] FLAG: --max-requests-inflight="400" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262361 1 flags.go:64] FLAG: --min-request-timeout="1800" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262364 1 flags.go:64] FLAG: --permit-address-sharing="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262385 1 flags.go:64] FLAG: --permit-port-sharing="false" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262388 1 flags.go:64] FLAG: --profiling="true" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262391 1 flags.go:64] FLAG: --request-timeout="1m0s" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262393 1 flags.go:64] FLAG: --requestheader-allowed-names="[]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262397 1 flags.go:64] FLAG: --requestheader-client-ca-file="" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262399 1 flags.go:64] FLAG: --requestheader-extra-headers-prefix="[x-remote-extra-]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262403 1 flags.go:64] FLAG: --requestheader-group-headers="[x-remote-group]" 2025-12-08T17:44:22.264936955+00:00 stderr F I1208 17:44:22.262406 1 flags.go:64] FLAG: --requestheader-uid-headers="[]" 2025-12-08T17:44:22.264936955+00:00 stderr P I1208 17:44:22.262410 1 flags.go:64] FLAG: --re 2025-12-08T17:44:22.265112290+00:00 stderr F questheader-username-headers="[x-remote-user]" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262415 1 flags.go:64] FLAG: --runtime-config-emulation-forward-compatible="false" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262417 1 flags.go:64] FLAG: --secure-port="8443" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262420 1 flags.go:64] FLAG: --shutdown-delay-duration="50s" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262422 1 flags.go:64] FLAG: --shutdown-send-retry-after="true" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262425 1 flags.go:64] FLAG: --shutdown-watch-termination-grace-period="0s" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262428 1 flags.go:64] FLAG: --storage-backend="" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262430 1 flags.go:64] FLAG: --storage-initialization-timeout="1m0s" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262433 1 flags.go:64] FLAG: --storage-media-type="application/json" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262435 1 flags.go:64] FLAG: --strict-transport-security-directives="[]" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262439 1 flags.go:64] FLAG: --tls-cert-file="/var/run/secrets/serving-cert/tls.crt" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262442 1 flags.go:64] FLAG: --tls-cipher-suites="[TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256]" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262449 1 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262452 1 flags.go:64] FLAG: --tls-private-key-file="/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262455 1 flags.go:64] FLAG: --tls-sni-cert-key="[]" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262467 1 flags.go:64] FLAG: --tracing-config-file="" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262470 1 flags.go:64] FLAG: --v="2" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262473 1 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262476 1 flags.go:64] FLAG: --watch-cache="true" 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262478 1 flags.go:64] FLAG: --watch-cache-sizes="[]" 2025-12-08T17:44:22.265112290+00:00 stderr F W1208 17:44:22.262673 1 registry.go:321] setting componentGlobalsRegistry in SetFallback. We recommend calling componentGlobalsRegistry.Set() right after parsing flags to avoid using feature gates before their final values are set by the flags. 2025-12-08T17:44:22.265112290+00:00 stderr F I1208 17:44:22.262686 1 registry.go:355] setting kube:feature gate emulation version to 1.33 2025-12-08T17:44:22.339108959+00:00 stderr F I1208 17:44:22.338893 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:23.207842165+00:00 stderr F I1208 17:44:23.204902 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:44:23.210832236+00:00 stderr F I1208 17:44:23.210800 1 audit.go:340] Using audit backend: ignoreErrors 2025-12-08T17:44:23.250113628+00:00 stderr F I1208 17:44:23.249687 1 shared_informer.go:350] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" 2025-12-08T17:44:23.253946782+00:00 stderr F I1208 17:44:23.253898 1 plugins.go:157] Loaded 3 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,MutatingAdmissionPolicy,MutatingAdmissionWebhook. 2025-12-08T17:44:23.253946782+00:00 stderr F I1208 17:44:23.253923 1 plugins.go:160] Loaded 2 validating admission controller(s) successfully in the following order: ValidatingAdmissionPolicy,ValidatingAdmissionWebhook. 2025-12-08T17:44:23.256526332+00:00 stderr F I1208 17:44:23.254700 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:23.256526332+00:00 stderr F I1208 17:44:23.254717 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:23.256526332+00:00 stderr F I1208 17:44:23.254729 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:44:23.256526332+00:00 stderr F I1208 17:44:23.254734 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:44:23.284912826+00:00 stderr F I1208 17:44:23.280483 1 store.go:1663] "Monitoring resource count at path" resource="oauthclients.oauth.openshift.io" path="//oauth/clients" 2025-12-08T17:44:23.289216444+00:00 stderr F I1208 17:44:23.289186 1 cacher.go:469] cacher (oauthclients.oauth.openshift.io): initialized 2025-12-08T17:44:23.289234705+00:00 stderr F I1208 17:44:23.289219 1 reflector.go:430] "Caches populated" type="*oauth.OAuthClient" reflector="storage/cacher.go:/oauth/clients" 2025-12-08T17:44:23.294309063+00:00 stderr F I1208 17:44:23.294279 1 store.go:1663] "Monitoring resource count at path" resource="oauthauthorizetokens.oauth.openshift.io" path="//oauth/authorizetokens" 2025-12-08T17:44:23.295542457+00:00 stderr F I1208 17:44:23.295513 1 cacher.go:469] cacher (oauthauthorizetokens.oauth.openshift.io): initialized 2025-12-08T17:44:23.295558627+00:00 stderr F I1208 17:44:23.295543 1 reflector.go:430] "Caches populated" type="*oauth.OAuthAuthorizeToken" reflector="storage/cacher.go:/oauth/authorizetokens" 2025-12-08T17:44:23.308828329+00:00 stderr F I1208 17:44:23.308784 1 store.go:1663] "Monitoring resource count at path" resource="oauthaccesstokens.oauth.openshift.io" path="//oauth/accesstokens" 2025-12-08T17:44:23.313569899+00:00 stderr F I1208 17:44:23.311190 1 cacher.go:469] cacher (oauthaccesstokens.oauth.openshift.io): initialized 2025-12-08T17:44:23.313569899+00:00 stderr F I1208 17:44:23.311213 1 reflector.go:430] "Caches populated" type="*oauth.OAuthAccessToken" reflector="storage/cacher.go:/oauth/accesstokens" 2025-12-08T17:44:23.320645251+00:00 stderr F I1208 17:44:23.320425 1 store.go:1663] "Monitoring resource count at path" resource="oauthclientauthorizations.oauth.openshift.io" path="//oauth/clientauthorizations" 2025-12-08T17:44:23.323858179+00:00 stderr F I1208 17:44:23.323821 1 handler.go:288] Adding GroupVersion oauth.openshift.io v1 to ResourceManager 2025-12-08T17:44:23.328391093+00:00 stderr F I1208 17:44:23.328225 1 cacher.go:469] cacher (oauthclientauthorizations.oauth.openshift.io): initialized 2025-12-08T17:44:23.328391093+00:00 stderr F I1208 17:44:23.328252 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:23.328391093+00:00 stderr F I1208 17:44:23.328265 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:23.328391093+00:00 stderr F I1208 17:44:23.328271 1 reflector.go:430] "Caches populated" type="*oauth.OAuthClientAuthorization" reflector="storage/cacher.go:/oauth/clientauthorizations" 2025-12-08T17:44:23.336363781+00:00 stderr F I1208 17:44:23.336284 1 store.go:1663] "Monitoring resource count at path" resource="users.user.openshift.io" path="//users" 2025-12-08T17:44:23.339388583+00:00 stderr F I1208 17:44:23.339361 1 cacher.go:469] cacher (users.user.openshift.io): initialized 2025-12-08T17:44:23.339412444+00:00 stderr F I1208 17:44:23.339390 1 reflector.go:430] "Caches populated" type="*user.User" reflector="storage/cacher.go:/users" 2025-12-08T17:44:23.360260012+00:00 stderr F I1208 17:44:23.357976 1 store.go:1663] "Monitoring resource count at path" resource="identities.user.openshift.io" path="//useridentities" 2025-12-08T17:44:23.360828327+00:00 stderr F I1208 17:44:23.360805 1 cacher.go:469] cacher (identities.user.openshift.io): initialized 2025-12-08T17:44:23.360897460+00:00 stderr F I1208 17:44:23.360868 1 reflector.go:430] "Caches populated" type="*user.Identity" reflector="storage/cacher.go:/useridentities" 2025-12-08T17:44:23.371560670+00:00 stderr F I1208 17:44:23.369899 1 store.go:1663] "Monitoring resource count at path" resource="groups.user.openshift.io" path="//groups" 2025-12-08T17:44:23.374255853+00:00 stderr F I1208 17:44:23.373411 1 handler.go:288] Adding GroupVersion user.openshift.io v1 to ResourceManager 2025-12-08T17:44:23.374255853+00:00 stderr F I1208 17:44:23.373498 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:23.374255853+00:00 stderr F I1208 17:44:23.373509 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:23.374846129+00:00 stderr F I1208 17:44:23.374478 1 cacher.go:469] cacher (groups.user.openshift.io): initialized 2025-12-08T17:44:23.374846129+00:00 stderr F I1208 17:44:23.374534 1 reflector.go:430] "Caches populated" type="*user.Group" reflector="storage/cacher.go:/groups" 2025-12-08T17:44:23.579451181+00:00 stderr F I1208 17:44:23.579402 1 genericapiserver.go:583] "[graceful-termination] using HTTP Server shutdown timeout" shutdownTimeout="2s" 2025-12-08T17:44:23.579739179+00:00 stderr F I1208 17:44:23.579725 1 genericapiserver.go:551] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:44:23.587303595+00:00 stderr F I1208 17:44:23.587176 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-oauth-apiserver.svc\" [serving] validServingFor=[api.openshift-oauth-apiserver.svc,api.openshift-oauth-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:23.58714188 +0000 UTC))" 2025-12-08T17:44:23.587374897+00:00 stderr F I1208 17:44:23.587356 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:23.587327755 +0000 UTC))" 2025-12-08T17:44:23.587383397+00:00 stderr F I1208 17:44:23.587375 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:23.587444618+00:00 stderr F I1208 17:44:23.587409 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.587476559+00:00 stderr F I1208 17:44:23.587460 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.587512240+00:00 stderr F I1208 17:44:23.587417 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:23.587512240+00:00 stderr F I1208 17:44:23.587503 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:23.587520350+00:00 stderr F I1208 17:44:23.587507 1 genericapiserver.go:706] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:44:23.587558982+00:00 stderr F I1208 17:44:23.587493 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.587600863+00:00 stderr F I1208 17:44:23.587590 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.587631963+00:00 stderr F I1208 17:44:23.587534 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:23.588167859+00:00 stderr F I1208 17:44:23.587601 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:23.590789881+00:00 stderr F I1208 17:44:23.590130 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.618003212+00:00 stderr F I1208 17:44:23.616279 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.618003212+00:00 stderr F I1208 17:44:23.616782 1 reflector.go:430] "Caches populated" type="*v1.Group" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.625427515+00:00 stderr F I1208 17:44:23.622018 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.625427515+00:00 stderr F I1208 17:44:23.622323 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.625427515+00:00 stderr F I1208 17:44:23.622746 1 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicyBinding" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.625427515+00:00 stderr F I1208 17:44:23.622987 1 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.625427515+00:00 stderr F I1208 17:44:23.623216 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.628697044+00:00 stderr F I1208 17:44:23.627927 1 reflector.go:430] "Caches populated" type="*v1.OAuthClient" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.634662297+00:00 stderr F I1208 17:44:23.631855 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:23.650502559+00:00 stderr F I1208 17:44:23.650453 1 shared_informer.go:357] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" 2025-12-08T17:44:23.650552090+00:00 stderr F I1208 17:44:23.650542 1 policy_source.go:240] refreshing policies 2025-12-08T17:44:23.691248770+00:00 stderr F I1208 17:44:23.691184 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:23.691327132+00:00 stderr F I1208 17:44:23.691307 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:23.691405334+00:00 stderr F I1208 17:44:23.691384 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691756 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:23.691730874 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691788 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:23.691772255 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691806 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:23.691794555 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691822 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:23.691811926 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691838 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:23.691826736 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691859 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:23.691843847 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.691895 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:23.691864707 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692111 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-oauth-apiserver.svc\" [serving] validServingFor=[api.openshift-oauth-apiserver.svc,api.openshift-oauth-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:23.692096914 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692299 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:23.692285079 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692431 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:23.692418362 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692453 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:23.692441833 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692470 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:23.692458983 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692486 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:23.692475284 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692510 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:23.692492044 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692532 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:23.692519625 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692551 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:23.692537395 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692567 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:23.692555816 +0000 UTC))" 2025-12-08T17:44:23.692821203+00:00 stderr F I1208 17:44:23.692767 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-oauth-apiserver.svc\" [serving] validServingFor=[api.openshift-oauth-apiserver.svc,api.openshift-oauth-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:23.692751531 +0000 UTC))" 2025-12-08T17:44:23.693535772+00:00 stderr F I1208 17:44:23.693016 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:23.693000348 +0000 UTC))" 2025-12-08T17:44:23.695019003+00:00 stderr F I1208 17:44:23.694981 1 policy_source.go:435] informer started for config.openshift.io/v1, Kind=Infrastructure 2025-12-08T17:44:23.701644814+00:00 stderr F I1208 17:44:23.701424 1 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Resource=infrastructures" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:24.745269551+00:00 stderr F I1208 17:44:24.745211 1 policy_source.go:240] refreshing policies 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604791 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.604761319 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604829 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.6048128 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604847 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.604837041 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604863 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.604852642 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604904 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.604872832 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604925 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.604913533 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604949 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.604936244 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604962 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.604953424 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604976 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.604967735 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.604989 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.604981415 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.605191 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-oauth-apiserver.svc\" [serving] validServingFor=[api.openshift-oauth-apiserver.svc,api.openshift-oauth-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:30.6051752 +0000 UTC))" 2025-12-08T17:44:30.610625269+00:00 stderr F I1208 17:44:30.605425 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:30.605408608 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041106 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.041066341 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041145 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.041131883 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041164 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.041151783 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041179 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.041168754 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041195 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.041183674 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041213 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.041199985 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041230 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.041218045 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041249 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041236596 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041267 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041255526 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041283 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.041273957 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041307 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.041295557 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041544 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-oauth-apiserver.svc\" [serving] validServingFor=[api.openshift-oauth-apiserver.svc,api.openshift-oauth-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:45:16.041527554 +0000 UTC))" 2025-12-08T17:45:16.042281734+00:00 stderr F I1208 17:45:16.041717 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215863\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:45:16.041699368 +0000 UTC))" 2025-12-08T17:46:05.359002820+00:00 stderr F E1208 17:46:05.358935 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.359055072+00:00 stderr F E1208 17:46:05.359008 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.403057663+00:00 stderr F E1208 17:46:05.403016 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.403078363+00:00 stderr F E1208 17:46:05.403065 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.344218182+00:00 stderr F E1208 17:46:06.344149 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.344218182+00:00 stderr F E1208 17:46:06.344206 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.363522282+00:00 stderr F E1208 17:46:06.363429 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.363557323+00:00 stderr F E1208 17:46:06.363517 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.406503732+00:00 stderr F E1208 17:46:06.406406 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.406503732+00:00 stderr F E1208 17:46:06.406471 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.422346228+00:00 stderr F E1208 17:46:06.422283 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.422371418+00:00 stderr F E1208 17:46:06.422360 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:10.326579687+00:00 stderr F I1208 17:47:10.326432 1 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicyBinding" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:10.883626402+00:00 stderr F I1208 17:47:10.883545 1 policy_source.go:240] refreshing policies 2025-12-08T17:47:11.872737369+00:00 stderr F I1208 17:47:11.872625 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:12.941534684+00:00 stderr F I1208 17:47:12.941440 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:17.492422211+00:00 stderr F I1208 17:47:17.492324 1 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Resource=infrastructures" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:26.159906656+00:00 stderr F I1208 17:47:26.159757 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:29.287356476+00:00 stderr F I1208 17:47:29.287290 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:41.788667104+00:00 stderr F I1208 17:47:41.788579 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:01.518381468+00:00 stderr F I1208 17:48:01.518289 1 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:01.922315229+00:00 stderr F I1208 17:48:01.922243 1 policy_source.go:240] refreshing policies 2025-12-08T17:48:09.761015135+00:00 stderr F I1208 17:48:09.760958 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:57:11.273660710+00:00 stderr F I1208 17:57:11.273552 1 policy_source.go:240] refreshing policies 2025-12-08T17:58:02.304273022+00:00 stderr F I1208 17:58:02.304212 1 policy_source.go:240] refreshing policies ././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiser0000755000175000017500000000000015115611520033046 5ustar zuulzuul././@LongLink0000644000000000000000000000030400000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiser0000644000175000017500000000000015115611513033040 0ustar zuulzuul././@LongLink0000644000000000000000000000023700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_cons0000755000175000017500000000000015115611514033134 5ustar zuulzuul././@LongLink0000644000000000000000000000024700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_cons0000755000175000017500000000000015115611521033132 5ustar zuulzuul././@LongLink0000644000000000000000000000025400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_cons0000644000175000017500000000552515115611514033145 0ustar zuulzuul2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.991511 1 main.go:259] Console plugins are enabled in following order: 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993504 1 main.go:261] - networking-console-plugin 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993575 1 main.go:302] Console telemetry options: 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993600 1 main.go:304] - CLUSTER_ID 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993614 1 main.go:304] - SEGMENT_API_HOST console.redhat.com/connections/api/v1 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993618 1 main.go:304] - SEGMENT_JS_HOST console.redhat.com/connections/cdn 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993621 1 main.go:304] - SEGMENT_PUBLIC_API_KEY BnuS1RP39EmLQjP21ko67oDjhbl9zpNU 2025-12-08T17:44:22.993936761+00:00 stderr F I1208 17:44:22.993624 1 main.go:304] - TELEMETER_CLIENT_DISABLED true 2025-12-08T17:44:22.993936761+00:00 stderr F W1208 17:44:22.993638 1 authoptions.go:112] Flag inactivity-timeout is set to less then 300 seconds and will be ignored! 2025-12-08T17:44:23.030919729+00:00 stderr F I1208 17:44:23.030042 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.030919729+00:00 stderr F I1208 17:44:23.030088 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.030919729+00:00 stderr F I1208 17:44:23.030104 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.030919729+00:00 stderr F I1208 17:44:23.030108 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:23.030919729+00:00 stderr F I1208 17:44:23.030112 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:38.228582609+00:00 stderr F I1208 17:44:38.227861 1 main.go:718] Binding to [::]:8443... 2025-12-08T17:44:38.228582609+00:00 stderr F I1208 17:44:38.228555 1 main.go:723] Using TLS 2025-12-08T17:44:41.230433615+00:00 stderr F I1208 17:44:41.230007 1 metrics.go:133] serverconfig.Metrics: Update ConsolePlugin metrics... 2025-12-08T17:44:41.247935253+00:00 stderr F I1208 17:44:41.247860 1 metrics.go:143] serverconfig.Metrics: Update ConsolePlugin metrics: &map[networking:map[enabled:1]] (took 17.450076ms) 2025-12-08T17:44:43.229249403+00:00 stderr F I1208 17:44:43.228639 1 metrics.go:80] usage.Metrics: Count console users... 2025-12-08T17:44:43.662188159+00:00 stderr F I1208 17:44:43.662078 1 metrics.go:156] usage.Metrics: Update console users metrics: 0 kubeadmin, 0 cluster-admins, 0 developers, 0 unknown/errors (took 432.850314ms) ././@LongLink0000644000000000000000000000024600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000755000175000017500000000000015115611514033127 5ustar zuulzuul././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000755000175000017500000000000015115611521033125 5ustar zuulzuul././@LongLink0000644000000000000000000000026300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000644000175000017500000045213615115611514033144 0ustar zuulzuul2025-12-08T17:55:41.012165489+00:00 stderr F 2025/12/08 17:55:41 INFO GOMEMLIMIT is updated package=github.com/KimMachineGun/automemlimit/memlimit GOMEMLIMIT=966367641 previous=9223372036854775807 2025-12-08T17:55:41.042918233+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.042Z","log.logger":"manager","message":"maxprocs: Updating GOMAXPROCS=1: determined from CPU quota","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0"} 2025-12-08T17:55:41.042958414+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.042Z","log.logger":"manager","message":"Setting default container registry","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","container_registry":"docker.elastic.co"} 2025-12-08T17:55:41.044594429+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.043Z","log.logger":"manager","message":"Setting up scheme","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0"} 2025-12-08T17:55:41.044785564+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.044Z","log.logger":"manager","message":"Operator configured to manage a single namespace","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","namespace":"service-telemetry","operator_namespace":"service-telemetry"} 2025-12-08T17:55:41.059071156+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.059Z","log.logger":"manager","message":"Setting up controllers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.068Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-agent-k8s-elastic-co-v1alpha1-agent"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.068Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-apm-k8s-elastic-co-v1-apmserver"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.068Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-apm-k8s-elastic-co-v1beta1-apmserver"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-beat-k8s-elastic-co-v1beta1-beat"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-enterprisesearch-k8s-elastic-co-v1-enterprisesearch"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-enterprisesearch-k8s-elastic-co-v1beta1-enterprisesearch"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-elasticsearch-k8s-elastic-co-v1beta1-elasticsearch"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-kibana-k8s-elastic-co-v1-kibana"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-kibana-k8s-elastic-co-v1beta1-kibana"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-ems-k8s-elastic-co-v1alpha1-mapsservers"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-scp-k8s-elastic-co-v1alpha1-stackconfigpolicies"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"es-validation","message":"Registering Elasticsearch validating webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-elasticsearch-k8s-elastic-co-v1-elasticsearch"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.069Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-elasticsearch-k8s-elastic-co-v1-elasticsearch"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.070Z","log.logger":"esa-validation","message":"Registering ElasticsearchAutoscaler validating webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-autoscaling-k8s-elastic-co-v1alpha1-elasticsearchautoscaler"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.070Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-autoscaling-k8s-elastic-co-v1alpha1-elasticsearchautoscaler"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.070Z","log.logger":"ls-validation","message":"Registering Logstash validating webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-logstash-k8s-elastic-co-v1alpha1-logstash"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.070Z","log.logger":"controller-runtime.webhook","message":"Registering webhook","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/validate-logstash-k8s-elastic-co-v1alpha1-logstash"} 2025-12-08T17:55:41.070919721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.070Z","log.logger":"manager","message":"Polling for the webhook certificate to be available","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","path":"/tmp/k8s-webhook-server/serving-certs/tls.crt"} 2025-12-08T17:55:41.074626503+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.073Z","log.logger":"manager","message":"Starting the manager","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","uuid":"b8fa4046-2154-4c1d-9aab-c9d62dea8ab1","namespace":"service-telemetry","version":"3.2.0","build_hash":"3ed7be5a","build_date":"2025-10-30T08:32:16Z","build_snapshot":"false"} 2025-12-08T17:55:41.074965952+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.074Z","log.logger":"controller-runtime.metrics","message":"Starting metrics server","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0"} 2025-12-08T17:55:41.075326353+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.075Z","log.logger":"controller-runtime.metrics","message":"Serving metrics server","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","bindAddress":"0.0.0.0:0","secure":false} 2025-12-08T17:55:41.075384104+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.075Z","log.logger":"controller-runtime.webhook","message":"Starting webhook server","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0"} 2025-12-08T17:55:41.075603560+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.075Z","log.logger":"controller-runtime.certwatcher","message":"Updated current TLS certificate","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","cert":"/tmp/k8s-webhook-server/serving-certs/tls.crt","key":"/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-12-08T17:55:41.075698483+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.075Z","log.logger":"controller-runtime.webhook","message":"Serving webhook server","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","host":"","port":9443} 2025-12-08T17:55:41.078243953+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.078Z","log.logger":"controller-runtime.certwatcher","message":"Starting certificate poll+watcher","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","cert":"/tmp/k8s-webhook-server/serving-certs/tls.crt","key":"/tmp/k8s-webhook-server/serving-certs/tls.key","interval":10} 2025-12-08T17:55:41.078677784+00:00 stderr F I1208 17:55:41.078647 1 leaderelection.go:257] attempting to acquire leader lease service-telemetry/elastic-operator-leader... 2025-12-08T17:55:41.088461592+00:00 stderr F I1208 17:55:41.088421 1 leaderelection.go:271] successfully acquired lease service-telemetry/elastic-operator-leader 2025-12-08T17:55:41.088799141+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.088Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.089566113+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.089Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-autoscaler","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.089696717+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.089Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.ApmServer"} 2025-12-08T17:55:41.089799069+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.089Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.Deployment"} 2025-12-08T17:55:41.089932043+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.089Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.090044356+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.090Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.090180990+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.090Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.090199390+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.090Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.090308183+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.090Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"channel source: 0xc00047abd0"} 2025-12-08T17:55:41.090662493+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.090Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.091631209+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.090Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-autoscaler","source":"kind source: *v1alpha1.ElasticsearchAutoscaler"} 2025-12-08T17:55:41.093965473+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.093Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.094079996+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.094Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.StatefulSet"} 2025-12-08T17:55:41.094490738+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.094Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.094704683+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.094Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.095312251+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.095Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.ConfigMap"} 2025-12-08T17:55:41.095442544+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.095Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.PodDisruptionBudget"} 2025-12-08T17:55:41.095787814+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.095Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.096065141+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.096Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.096629527+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.096Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"license-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.096854293+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.096Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.EnterpriseSearch"} 2025-12-08T17:55:41.096974066+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.096Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.Deployment"} 2025-12-08T17:55:41.097027327+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.097Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.097070198+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.097Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.097123500+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.097Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.097170301+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.097Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.097912002+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.097Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Kibana"} 2025-12-08T17:55:41.097993394+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.097Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Deployment"} 2025-12-08T17:55:41.098042366+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.098114348+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.098160029+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.098206920+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.098247131+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.ConfigMap"} 2025-12-08T17:55:41.098322293+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.098461387+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.098581940+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.098642182+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.098Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"license-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.104775780+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.104Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1beta1.Beat"} 2025-12-08T17:55:41.105012847+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.104Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1.DaemonSet"} 2025-12-08T17:55:41.105264193+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1.Deployment"} 2025-12-08T17:55:41.105314295+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.105415027+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.105471899+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.105644824+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"trial-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.105809468+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.105890780+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.105Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1alpha1.Agent"} 2025-12-08T17:55:41.106096547+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.106Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.DaemonSet"} 2025-12-08T17:55:41.106169219+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.106Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.Deployment"} 2025-12-08T17:55:41.106232911+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.106Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.StatefulSet"} 2025-12-08T17:55:41.106288102+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.106Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.106480837+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.106Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.106480837+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.106Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.107377212+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.107Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.107394232+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.107Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"remotecluster-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.107542856+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.107Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller","source":"kind source: *v1alpha1.StackConfigPolicy"} 2025-12-08T17:55:41.108576884+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.107Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.108576884+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.107Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller","source":"kind source: *v1.Kibana"} 2025-12-08T17:55:41.108576884+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.107Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.108576884+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.108Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1alpha1.ElasticMapsServer"} 2025-12-08T17:55:41.108604395+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.108Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1.Deployment"} 2025-12-08T17:55:41.108678327+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.108Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.108753249+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.108Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.108836001+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.108Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.108901614+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.108Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.109079269+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.109Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.109223483+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.109Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.109318105+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.109Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"remotecluster-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.109382027+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.109Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"remotecluster-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.110181339+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1alpha1.Logstash"} 2025-12-08T17:55:41.110270521+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1.StatefulSet"} 2025-12-08T17:55:41.110311672+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1.Pod"} 2025-12-08T17:55:41.110382994+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.110421965+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.110468676+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.110624931+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.110685772+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-ent-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.110742174+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller","source":"kind source: *v1.ApmServer"} 2025-12-08T17:55:41.110796395+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.110846106+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller","source":"kind source: *v1.Kibana"} 2025-12-08T17:55:41.110957590+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.110Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.111340510+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.111Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"remotecluster-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.123110134+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.123Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller","source":"kind source: *v1.ApmServer"} 2025-12-08T17:55:41.123253888+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.123Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.123366711+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.123Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.123459713+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.123Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.123969597+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.123Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.124471601+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.124Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.124904202+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.124Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-ent-association-controller","source":"kind source: *v1.Kibana"} 2025-12-08T17:55:41.125255182+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.125Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-ent-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.125492778+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.125Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-ent-association-controller","source":"kind source: *v1.EnterpriseSearch"} 2025-12-08T17:55:41.126495126+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.126Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller","source":"kind source: *v1.Kibana"} 2025-12-08T17:55:41.126547118+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.126Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.126618770+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.126Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.126683992+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.126Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.126820595+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.126Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ent-es-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.126962269+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.126Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-es-association-controller","source":"kind source: *v1.Service"} 2025-12-08T17:55:41.127046071+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.127Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller","source":"kind source: *v1beta1.Beat"} 2025-12-08T17:55:41.127111253+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.127Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.127164365+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.127Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller","source":"kind source: *v1.Elasticsearch"} 2025-12-08T17:55:41.127217236+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.127Z","log.logger":"manager.eck-operator","message":"Starting EventSource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller","source":"kind source: *v1.Secret"} 2025-12-08T17:55:41.129730245+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.129Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-kibana-association-controller"} 2025-12-08T17:55:41.129730245+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.129Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-kibana-association-controller","worker count":3} 2025-12-08T17:55:41.130158887+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.130Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-fleetserver-association-controller"} 2025-12-08T17:55:41.130158887+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.130Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-fleetserver-association-controller","worker count":3} 2025-12-08T17:55:41.130428224+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.130Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-kibana-association-controller"} 2025-12-08T17:55:41.130428224+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.130Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-kibana-association-controller","worker count":3} 2025-12-08T17:55:41.130845105+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.130Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-es-association-controller"} 2025-12-08T17:55:41.130845105+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.130Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-es-association-controller","worker count":3} 2025-12-08T17:55:41.131172184+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.131Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ems-es-association-controller"} 2025-12-08T17:55:41.131172184+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.131Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ems-es-association-controller","worker count":3} 2025-12-08T17:55:41.131620947+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.131Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-monitoring-association-controller"} 2025-12-08T17:55:41.131620947+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.131Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-monitoring-association-controller","worker count":3} 2025-12-08T17:55:41.131914895+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.131Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"es-monitoring-association-controller"} 2025-12-08T17:55:41.131934646+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.131Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"es-monitoring-association-controller","worker count":3} 2025-12-08T17:55:41.132348667+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.132Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ls-monitoring-association-controller"} 2025-12-08T17:55:41.132348667+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.132Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ls-monitoring-association-controller","worker count":3} 2025-12-08T17:55:41.132549842+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.132Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.132615704+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.132Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000135494} 2025-12-08T17:55:41.132937033+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.132Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-monitoring-association-controller"} 2025-12-08T17:55:41.132937033+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.132Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-monitoring-association-controller","worker count":3} 2025-12-08T17:55:41.189238028+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.189Z","log.logger":"manager","message":"Operator license key validated","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","license_type":"basic"} 2025-12-08T17:55:41.191211922+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.191Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller"} 2025-12-08T17:55:41.191249623+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.191Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apmserver-controller","worker count":3} 2025-12-08T17:55:41.191361666+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.191Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-autoscaler"} 2025-12-08T17:55:41.191386097+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.191Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-autoscaler","worker count":3} 2025-12-08T17:55:41.196932059+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.196Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller"} 2025-12-08T17:55:41.197001721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.196Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"elasticsearch-controller","worker count":3} 2025-12-08T17:55:41.197327380+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.197Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.198073030+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.198Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"ConfigMap","namespace":"service-telemetry","name":"elasticsearch-es-scripts"} 2025-12-08T17:55:41.198987566+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.198Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller"} 2025-12-08T17:55:41.199033827+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"enterprisesearch-controller","worker count":3} 2025-12-08T17:55:41.199130719+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"license-controller"} 2025-12-08T17:55:41.199159660+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"license-controller","worker count":3} 2025-12-08T17:55:41.199279414+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.199652494+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","took":0.00036388} 2025-12-08T17:55:41.199803278+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller"} 2025-12-08T17:55:41.199837129+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.199Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kibana-controller","worker count":3} 2025-12-08T17:55:41.205788562+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.205Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"trial-controller"} 2025-12-08T17:55:41.206025748+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.206Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"trial-controller","worker count":3} 2025-12-08T17:55:41.206335377+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.206Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller"} 2025-12-08T17:55:41.206370608+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.206Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-controller","worker count":3} 2025-12-08T17:55:41.206565793+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.206Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller"} 2025-12-08T17:55:41.206604024+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.206Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-controller","worker count":3} 2025-12-08T17:55:41.206902552+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.206Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"ConfigMap","namespace":"service-telemetry","name":"elasticsearch-es-scripts","resourceVersion":"42635"} 2025-12-08T17:55:41.207089978+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.207Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-transport"} 2025-12-08T17:55:41.208725802+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.208Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller"} 2025-12-08T17:55:41.208795164+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.208Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"stackconfigpolicy-controller","worker count":3} 2025-12-08T17:55:41.210035279+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.210Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller"} 2025-12-08T17:55:41.210077200+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.210Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"maps-controller","worker count":3} 2025-12-08T17:55:41.211429977+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller"} 2025-12-08T17:55:41.211474158+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"logstash-controller","worker count":3} 2025-12-08T17:55:41.211583301+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller"} 2025-12-08T17:55:41.211613242+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-kibana-association-controller","worker count":3} 2025-12-08T17:55:41.211768836+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"remotecluster-controller"} 2025-12-08T17:55:41.211799077+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"remotecluster-controller","worker count":3} 2025-12-08T17:55:41.211968821+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.211Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.212390103+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.212Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000429622} 2025-12-08T17:55:41.212540338+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.212Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-transport","resourceVersion":"42636"} 2025-12-08T17:55:41.212643410+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.212Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-http"} 2025-12-08T17:55:41.221827942+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.221Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-http","resourceVersion":"42642"} 2025-12-08T17:55:41.222053838+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.222Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-internal-http"} 2025-12-08T17:55:41.223713953+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.223Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller"} 2025-12-08T17:55:41.223755695+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.223Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"apm-es-association-controller","worker count":3} 2025-12-08T17:55:41.227767605+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.227Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-ent-association-controller"} 2025-12-08T17:55:41.227849107+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.227Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-ent-association-controller","worker count":3} 2025-12-08T17:55:41.228017592+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.227Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller"} 2025-12-08T17:55:41.228049313+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"kb-es-association-controller","worker count":3} 2025-12-08T17:55:41.228090174+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ent-es-association-controller"} 2025-12-08T17:55:41.228123345+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"ent-es-association-controller","worker count":3} 2025-12-08T17:55:41.228162846+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-es-association-controller"} 2025-12-08T17:55:41.228186546+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"agent-es-association-controller","worker count":3} 2025-12-08T17:55:41.228270389+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting Controller","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller"} 2025-12-08T17:55:41.228294499+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.228Z","log.logger":"manager.eck-operator","message":"Starting workers","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","controller":"beat-es-association-controller","worker count":3} 2025-12-08T17:55:41.233225675+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.233Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-internal-http","resourceVersion":"42648"} 2025-12-08T17:55:41.307045670+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.306Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-elastic-user"} 2025-12-08T17:55:41.312967432+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.312Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-elastic-user","resourceVersion":"42652"} 2025-12-08T17:55:41.636939893+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.636Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-internal-users"} 2025-12-08T17:55:41.642630669+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.642Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-internal-users","resourceVersion":"42660"} 2025-12-08T17:55:41.650427483+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.650Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-xpack-file-realm"} 2025-12-08T17:55:41.654957907+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.654Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.655441961+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.655Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000504255} 2025-12-08T17:55:41.657957280+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.657Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.658136625+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.658Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000191046} 2025-12-08T17:55:41.658628338+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.658Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-xpack-file-realm","resourceVersion":"42661"} 2025-12-08T17:55:41.658750101+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.658Z","log.logger":"elasticsearch-controller","message":"No internal CA certificate Secret found, creating a new one","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","owner_namespace":"service-telemetry","owner_name":"elasticsearch","ca_type":"http"} 2025-12-08T17:55:41.698199374+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.698Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.698450170+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.698Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000287438} 2025-12-08T17:55:41.718180092+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.718Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.718400128+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.718Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000240406} 2025-12-08T17:55:41.808641254+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.808Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-http-ca-internal"} 2025-12-08T17:55:41.813897268+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.813Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.814147755+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.814Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000269437} 2025-12-08T17:55:41.814422902+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.814Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.814533345+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.814Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000110183} 2025-12-08T17:55:41.827796440+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.827Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-http-ca-internal","resourceVersion":"42668"} 2025-12-08T17:55:41.898590882+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.898Z","log.logger":"elasticsearch-controller","message":"Issuing new HTTP certificate","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","secret_name":"elasticsearch-es-http-certs-internal","owner_namespace":"service-telemetry","owner_name":"elasticsearch"} 2025-12-08T17:55:41.901464111+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.901Z","log.logger":"elasticsearch-controller","message":"Creating HTTP internal certificate secret","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","secret_name":"elasticsearch-es-http-certs-internal"} 2025-12-08T17:55:41.905527043+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.905Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-http-certs-public"} 2025-12-08T17:55:41.906712455+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.906Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.906961222+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.906Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000247677} 2025-12-08T17:55:41.907165147+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.907Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.907429025+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.907Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000260858} 2025-12-08T17:55:41.908707040+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.908Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-http-certs-public","resourceVersion":"42687"} 2025-12-08T17:55:41.908921456+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.908Z","log.logger":"elasticsearch-observer","message":"Creating observer for cluster","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.909008958+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.908Z","log.logger":"elasticsearch-controller","message":"No internal CA certificate Secret found, creating a new one","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","owner_namespace":"service-telemetry","owner_name":"elasticsearch","ca_type":"transport"} 2025-12-08T17:55:41.934337673+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.934Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-transport-ca-internal"} 2025-12-08T17:55:41.938861817+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.938Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-transport-ca-internal","resourceVersion":"42689"} 2025-12-08T17:55:41.938989671+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.938Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-transport-certs-public"} 2025-12-08T17:55:41.939380061+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.939Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.939532446+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.939Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000150854} 2025-12-08T17:55:41.939665529+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.939Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.939905526+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.939Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000213276} 2025-12-08T17:55:41.942000254+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.941Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-transport-certs-public","resourceVersion":"42690"} 2025-12-08T17:55:41.942135917+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.942Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-default-es-transport-certs"} 2025-12-08T17:55:41.945910890+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.945Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.946033414+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.946Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000143654} 2025-12-08T17:55:41.946147107+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.946Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.946334112+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.946Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000185025} 2025-12-08T17:55:41.946535398+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.946Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-default-es-transport-certs","resourceVersion":"42691"} 2025-12-08T17:55:41.950657601+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.950Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-remote-ca"} 2025-12-08T17:55:41.951000870+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.950Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.951115713+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.951Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000116183} 2025-12-08T17:55:41.951247417+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.951Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.951459513+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.951Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000204425} 2025-12-08T17:55:41.954508606+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.954Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.954715782+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.954Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000206185} 2025-12-08T17:55:41.954924658+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.954Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.955027181+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.955Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000102493} 2025-12-08T17:55:41.955183696+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.955Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-remote-ca","resourceVersion":"42693"} 2025-12-08T17:55:41.955281098+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.955Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"ConfigMap","namespace":"service-telemetry","name":"elasticsearch-es-unicast-hosts"} 2025-12-08T17:55:41.955312139+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.955Z","log.logger":"elasticsearch-controller","message":"Creating seed hosts","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch","hosts":[]} 2025-12-08T17:55:41.959158744+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.959Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"ConfigMap","namespace":"service-telemetry","name":"elasticsearch-es-unicast-hosts","resourceVersion":"42694"} 2025-12-08T17:55:41.962269789+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.962Z","log.logger":"elasticsearch-controller","message":"Creating master node","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","statefulset_name":"elasticsearch-es-default","actualReplicas":0,"targetReplicas":1} 2025-12-08T17:55:41.962362982+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.962Z","log.logger":"elasticsearch-controller","message":"Setting `cluster.initial_master_nodes`","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch","cluster.initial_master_nodes":"elasticsearch-es-default-0"} 2025-12-08T17:55:41.973436226+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.973Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.973722574+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.973Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000293648} 2025-12-08T17:55:41.973807606+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.973Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.974103784+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.974Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000291077} 2025-12-08T17:55:41.974188066+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.974Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.974323000+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.974Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000132533} 2025-12-08T17:55:41.974982579+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.974Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-default-es-config"} 2025-12-08T17:55:41.977987151+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.977Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-default-es-config","resourceVersion":"42696"} 2025-12-08T17:55:41.978228368+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.978Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-default"} 2025-12-08T17:55:41.978556867+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.978Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:41.978698930+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.978Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000137204} 2025-12-08T17:55:41.978850465+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.978Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:41.979034609+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.979Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000182285} 2025-12-08T17:55:41.981769295+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.981Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Service","namespace":"service-telemetry","name":"elasticsearch-es-default","resourceVersion":"42697"} 2025-12-08T17:55:41.981870237+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:41.981Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"StatefulSet","namespace":"service-telemetry","name":"elasticsearch-es-default"} 2025-12-08T17:55:42.000414507+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.000Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"StatefulSet","namespace":"service-telemetry","name":"elasticsearch-es-default","resourceVersion":"42701"} 2025-12-08T17:55:42.000762216+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.000Z","log.logger":"elasticsearch-controller","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"PodDisruptionBudget","namespace":"service-telemetry","name":"elasticsearch-es-default"} 2025-12-08T17:55:42.004290503+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.004Z","log.logger":"elasticsearch-controller","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","kind":"PodDisruptionBudget","namespace":"service-telemetry","name":"elasticsearch-es-default","resourceVersion":"42702"} 2025-12-08T17:55:42.105423217+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.105Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.112456951+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.112Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"1","namespace":"service-telemetry","es_name":"elasticsearch","took":0.91509029} 2025-12-08T17:55:42.112516083+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.112Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.116811080+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.116Z","log.logger":"elasticsearch-controller","message":"Skipping pod because it has no IP yet","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:42.119682769+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.119Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.126961789+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.126Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.127175275+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.127Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000244927} 2025-12-08T17:55:42.127175275+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.127Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.127447302+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.127Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000200516} 2025-12-08T17:55:42.127447302+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.127Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:42.127535804+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.127Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000072012} 2025-12-08T17:55:42.132424899+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.132Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"2","namespace":"service-telemetry","es_name":"elasticsearch","took":0.019873865} 2025-12-08T17:55:42.132424899+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.132Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.137561470+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.137Z","log.logger":"elasticsearch-controller","message":"Skipping pod because it has no IP yet","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:42.145335872+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.144Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.155467051+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.155Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"3","namespace":"service-telemetry","es_name":"elasticsearch","took":0.023000611} 2025-12-08T17:55:42.155557103+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.155Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.155810850+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.155Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000263287} 2025-12-08T17:55:42.155847981+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.155Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.156040766+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.156Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000194006} 2025-12-08T17:55:42.156094448+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.156Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:42.156168880+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.156Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000075732} 2025-12-08T17:55:42.156220941+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.156Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.159589714+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.159Z","log.logger":"elasticsearch-controller","message":"Skipping pod because it has no IP yet","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:42.161951778+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.161Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.161974059+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.161Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"4","namespace":"service-telemetry","es_name":"elasticsearch","took":0.005761099} 2025-12-08T17:55:42.904162505+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.903Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.907767054+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.907Z","log.logger":"elasticsearch-controller","message":"Skipping pod because it has no IP yet","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:42.913563493+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.913Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:42.913608444+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:42.913Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch","took":0.009826191} 2025-12-08T17:55:43.463436701+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:43.463Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:43.471356589+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:43.471Z","log.logger":"elasticsearch-controller","message":"Skipping pod because it has no IP yet","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:43.474558896+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:43.474Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:43.474586407+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:43.474Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch","took":0.011414133} 2025-12-08T17:55:51.136918240+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:51.133Z","log.logger":"resource-reporter","message":"Creating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","kind":"ConfigMap","namespace":"service-telemetry","name":"elastic-licensing"} 2025-12-08T17:55:51.139948263+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:51.138Z","log.logger":"resource-reporter","message":"Created resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","kind":"ConfigMap","namespace":"service-telemetry","name":"elastic-licensing","resourceVersion":"43295"} 2025-12-08T17:55:51.154647737+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:51.154Z","log.logger":"manager","message":"Orphan secrets garbage collection complete","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0"} 2025-12-08T17:55:52.112846730+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:52.112Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:52.119467831+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:52.119Z","log.logger":"elasticsearch-controller","message":"Skipping pod because it has no IP yet","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:52.124241502+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:52.124Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:52.124274173+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:52.124Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch","took":0.011471815} 2025-12-08T17:55:57.964615250+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:57.964Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:58.140829555+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.140Z","log.logger":"elasticsearch-controller","message":"No tls certificate found in secret","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:58.140829555+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.140Z","log.logger":"elasticsearch-controller","message":"Issuing new certificate","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","pod_name":"elasticsearch-es-default-0"} 2025-12-08T17:55:58.160420573+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.157Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:58.160420573+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.158Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es_name":"elasticsearch","took":0.00033844} 2025-12-08T17:55:58.160420573+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.158Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:55:58.160420573+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.158Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000101412} 2025-12-08T17:55:58.200971356+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.200Z","log.logger":"elasticsearch-controller","message":"Updating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","kind":"ConfigMap","namespace":"service-telemetry","name":"elasticsearch-es-unicast-hosts"} 2025-12-08T17:55:58.209582612+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.209Z","log.logger":"elasticsearch-controller","message":"Seed hosts updated","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch","hosts":["10.217.0.53:9300"]} 2025-12-08T17:55:58.260867139+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.260Z","log.logger":"elasticsearch-controller","message":"Updated resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","kind":"ConfigMap","namespace":"service-telemetry","name":"elasticsearch-es-unicast-hosts","resourceVersion":"43356"} 2025-12-08T17:55:58.264491418+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.263Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:58.264491418+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.263Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","took":0.299401145} 2025-12-08T17:55:58.264491418+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.264Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:58.278039460+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.276Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:55:58.278039460+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:58.276Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch","took":0.0127299} 2025-12-08T17:55:59.980392203+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:55:59.979Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:00.017655555+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:00.017Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:00.017655555+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:00.017Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch","took":0.037269902} 2025-12-08T17:56:00.976540358+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:00.975Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:00.984475814+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:00.984Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:00.985071442+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:00.985Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch","took":0.009108001} 2025-12-08T17:56:02.011024473+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.010Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.021784029+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.021Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.036184244+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.035Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"12","namespace":"service-telemetry","es_name":"elasticsearch","took":0.024968305} 2025-12-08T17:56:02.036184244+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.035Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.041298324+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.041Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.041377956+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.041Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"5","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000266877} 2025-12-08T17:56:02.041377956+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.041Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.041612083+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.041Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000201265} 2025-12-08T17:56:02.041642164+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.041Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:02.041776317+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.041Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000084052} 2025-12-08T17:56:02.053975511+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.048Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.053975511+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.048Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"13","namespace":"service-telemetry","es_name":"elasticsearch","took":0.01277401} 2025-12-08T17:56:02.125470454+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.125Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.140819314+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.140Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:02.140855645+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:02.140Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"14","namespace":"service-telemetry","es_name":"elasticsearch","took":0.015448293} 2025-12-08T17:56:12.144834982+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:12.141Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:12.153480679+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:12.153Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:12.153511670+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:12.153Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"15","namespace":"service-telemetry","es_name":"elasticsearch","took":0.011555757} 2025-12-08T17:56:18.545643199+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.545Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.734349218+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.734Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.742600363+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.742Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es_name":"elasticsearch","took":0.197290583} 2025-12-08T17:56:18.742600363+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.742Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.748286929+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.748Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.748591588+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.748Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"6","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000513085} 2025-12-08T17:56:18.748652850+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.748Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.749089812+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.748Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000236456} 2025-12-08T17:56:18.749089812+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.748Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:18.749106922+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.749Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"16","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000142274} 2025-12-08T17:56:18.774365295+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.774Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.780776701+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.780Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es_name":"elasticsearch","took":0.038135457} 2025-12-08T17:56:18.780848993+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.780Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.831910554+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.831Z","log.logger":"elasticsearch-controller","message":"Elasticsearch cannot be reached yet, re-queuing","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:18.832120390+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:18.832Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es_name":"elasticsearch","took":0.051262317} 2025-12-08T17:56:22.020063806+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.018Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.214268845+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.214Z","log.logger":"elasticsearch-controller","message":"Annotating bootstrapped cluster with its UUID","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch","uuid":"BHO8BtUVT8i1ANtvfdqJYw"} 2025-12-08T17:56:22.230752888+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.230Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.231070736+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.231Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"7","namespace":"service-telemetry","es_name":"elasticsearch","took":0.00036021} 2025-12-08T17:56:22.231139418+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.231Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.231519468+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.231Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es_name":"elasticsearch","took":0.00038183} 2025-12-08T17:56:22.231609961+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.231Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:22.231768775+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.231Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"17","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000154204} 2025-12-08T17:56:22.249495881+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.249Z","log.logger":"elasticsearch-controller","message":"Zen 2 bootstrap is complete","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.263631879+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.263Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:22.263976018+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.263Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000327608} 2025-12-08T17:56:22.264791452+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.264Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.265573523+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.265Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.265573523+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.265Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"8","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000286877} 2025-12-08T17:56:22.265573523+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.265Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:22.265866721+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:22.265Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"18","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000307968} 2025-12-08T17:56:23.136181082+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.136Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch","took":1.117676469} 2025-12-08T17:56:23.136254964+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.136Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.178578636+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.178Z","log.logger":"elasticsearch-controller","message":"Updating resource","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-default-es-config"} 2025-12-08T17:56:23.182861282+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.182Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:23.183786729+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.183Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000275748} 2025-12-08T17:56:23.183786729+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.183Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.183786729+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.183Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"19","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000340659} 2025-12-08T17:56:23.184267782+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.184Z","log.logger":"elasticsearch-controller","message":"Updated resource successfully","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch","kind":"Secret","namespace":"service-telemetry","name":"elasticsearch-es-default-es-config","resourceVersion":"43552"} 2025-12-08T17:56:23.185598538+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.185Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.246138649+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.246Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:23.246380716+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.246Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000264627} 2025-12-08T17:56:23.246615042+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.246Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.246841378+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.246Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"9","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000240597} 2025-12-08T17:56:23.246935791+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.246Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.247151057+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.247Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000214436} 2025-12-08T17:56:23.253433010+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.253Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"20","namespace":"service-telemetry","es_name":"elasticsearch","took":0.117153415} 2025-12-08T17:56:23.253550803+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.253Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.312291295+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.312Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.352957280+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.352Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es_name":"elasticsearch","took":0.099326074} 2025-12-08T17:56:23.353112164+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.353Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.358022799+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.357Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.358262666+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.358Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"10","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000271087} 2025-12-08T17:56:23.358334608+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.358Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.358523693+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.358Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000192145} 2025-12-08T17:56:23.358603985+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.358Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:56:23.358702648+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.358Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"21","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000096822} 2025-12-08T17:56:23.410287514+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.410Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:23.451544585+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:23.451Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es_name":"elasticsearch","took":0.09838664} 2025-12-08T17:56:33.137114965+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:33.136Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"23","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:33.190745614+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:33.190Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"23","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:56:33.229456743+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:56:33.229Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"23","namespace":"service-telemetry","es_name":"elasticsearch","took":0.093025866} 2025-12-08T17:59:31.923943211+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:31.923Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"24","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:59:31.991289506+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:31.991Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"24","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:59:32.039822765+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.039Z","log.logger":"license-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:59:32.040094632+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"license-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"11","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000340789} 2025-12-08T17:59:32.040155764+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"remotecluster-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:59:32.040371029+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"remotecluster-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es_name":"elasticsearch","took":0.000197685} 2025-12-08T17:59:32.040410910+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"es-monitoring","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es-mon_name":"elasticsearch"} 2025-12-08T17:59:32.040517723+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"es-monitoring","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"22","namespace":"service-telemetry","es-mon_name":"elasticsearch","took":0.000092452} 2025-12-08T17:59:32.040858972+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"24","namespace":"service-telemetry","es_name":"elasticsearch","took":0.117020863} 2025-12-08T17:59:32.040895964+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.040Z","log.logger":"elasticsearch-controller","message":"Starting reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"25","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:59:32.070488203+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.070Z","log.logger":"elasticsearch-controller","message":"Ensuring no voting exclusions are set","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"25","namespace":"service-telemetry","es_name":"elasticsearch","namespace":"service-telemetry","es_name":"elasticsearch"} 2025-12-08T17:59:32.110934350+00:00 stderr F {"log.level":"info","@timestamp":"2025-12-08T17:59:32.110Z","log.logger":"elasticsearch-controller","message":"Ending reconciliation run","service.version":"3.2.0+3ed7be5a","service.type":"eck","ecs.version":"1.4.0","iteration":"25","namespace":"service-telemetry","es_name":"elasticsearch","took":0.069969504} ././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611513033101 5ustar zuulzuul././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000112415115611513033101 0ustar zuulzuul2025-12-08T17:58:37.194928523+00:00 stdout F bridge-15b ==> (/tmp/smartgateway) 2025-12-08T17:58:37.203427942+00:00 stderr F PN_TRANSPORT_CLOSED: proton:io: Connection refused - disconnected default-interconnect.service-telemetry.svc.cluster.local:5673 2025-12-08T17:58:37.203427942+00:00 stderr F Exit AMQP RCV thread... 2025-12-08T17:58:38.194927592+00:00 stdout F Joining amqp_rcv_th... 2025-12-08T17:58:38.194927592+00:00 stdout F Cancel socket_snd_th... 2025-12-08T17:58:38.194927592+00:00 stdout F Joining socket_snd_th... 2025-12-08T17:58:38.194970753+00:00 stderr F Exit SOCKET thread... ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/2.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000202215115611513033077 0ustar zuulzuul2025-12-08T17:58:55.221821146+00:00 stdout F bridge-3cd ==> (/tmp/smartgateway) 2025-12-08T17:58:55.229335794+00:00 stdout F bridge-3cd ==> (amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/anycast/ceilometer/cloud1-event.sample) 2025-12-08T17:59:55.269377295+00:00 stdout F in: 7(0), amqp_overrun: 0(0), out: 7(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:00:54.277309161+00:00 stdout F in: 7(0), amqp_overrun: 0(0), out: 7(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:01:53.296242890+00:00 stdout F in: 7(0), amqp_overrun: 0(0), out: 7(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:02:52.305317161+00:00 stdout F in: 7(0), amqp_overrun: 0(0), out: 7(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:03:51.325781004+00:00 stdout F in: 7(0), amqp_overrun: 0(0), out: 7(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:04:50.340329286+00:00 stdout F in: 7(0), amqp_overrun: 0(0), out: 7(0), sock_overrun: 0(0), link_credit_average: -nan ././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000125215115611513033103 0ustar zuulzuul2025-12-08T17:58:35.612673009+00:00 stdout F 2025-12-08 17:58:35 [INFO] initialized handler [transport pair: socket0, handler: events] 2025-12-08T17:58:35.612673009+00:00 stdout F 2025-12-08 17:58:35 [INFO] loaded transport [transport: socket0] 2025-12-08T17:58:35.645150388+00:00 stdout F 2025-12-08 17:58:35 [INFO] loaded application plugin [application: elasticsearch] 2025-12-08T17:58:35.646263847+00:00 stdout F 2025-12-08 17:58:35 [INFO] storing events and(or) logs to Elasticsearch. [plugin: elasticsearch, url: https://elasticsearch-es-http:9200] 2025-12-08T17:58:35.647426917+00:00 stdout F 2025-12-08 17:58:35 [INFO] socket listening on /tmp/smartgateway [plugin: socket] ././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611513032776 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611520032774 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000000202015115611513032772 0ustar zuulzuul2025-12-08T17:44:22.413938129+00:00 stderr F I1208 17:44:22.412115 1 start.go:40] Version: 89b561f0 (f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:44:22.646955186+00:00 stderr F I1208 17:44:22.646859 1 start.go:51] Launching server with tls min version: VersionTLS12 & cipher suites [TLS_AES_128_GCM_SHA256 TLS_AES_256_GCM_SHA384 TLS_CHACHA20_POLY1305_SHA256 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256] 2025-12-08T17:44:22.647984554+00:00 stderr F I1208 17:44:22.647012 1 api.go:68] Launching server on :22624 2025-12-08T17:44:22.647984554+00:00 stderr F I1208 17:44:22.647657 1 api.go:68] Launching server on :22623 2025-12-08T17:44:22.649578947+00:00 stderr F I1208 17:44:22.649115 1 certwatcher.go:133] "Starting certificate poll+watcher" logger="controller-runtime.certwatcher" interval="10s" ././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000755000175000017500000000000015115611514033164 5ustar zuulzuul././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000755000175000017500000000000015115611521033162 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000644000175000017500000000061415115611514033167 0ustar zuulzuul2025-12-08T17:44:41.808972464+00:00 stderr F I1208 17:44:41.792232 1 main.go:149] calling CSI driver to discover driver name 2025-12-08T17:44:41.808972464+00:00 stderr F I1208 17:44:41.795461 1 main.go:155] CSI driver name: "kubevirt.io.hostpath-provisioner" 2025-12-08T17:44:41.808972464+00:00 stderr F I1208 17:44:41.795479 1 main.go:183] ServeMux listening at "0.0.0.0:9898" ././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000755000175000017500000000000015115611521033162 5ustar zuulzuul././@LongLink0000644000000000000000000000027500000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000644000175000017500000000274015115611514033171 0ustar zuulzuul2025-12-08T17:44:30.296342236+00:00 stderr F I1208 17:44:30.294814 1 main.go:135] Version: 902b844d6eef9c046509e5b4ce435e3240c54389 2025-12-08T17:44:30.296342236+00:00 stderr F I1208 17:44:30.294970 1 main.go:136] Running node-driver-registrar in mode= 2025-12-08T17:44:30.296342236+00:00 stderr F I1208 17:44:30.294982 1 main.go:157] Attempting to open a gRPC connection with: "/csi/csi.sock" 2025-12-08T17:44:30.300484710+00:00 stderr F I1208 17:44:30.300329 1 main.go:164] Calling CSI driver to discover driver name 2025-12-08T17:44:30.308549460+00:00 stderr F I1208 17:44:30.308462 1 main.go:173] CSI driver name: "kubevirt.io.hostpath-provisioner" 2025-12-08T17:44:30.308549460+00:00 stderr F I1208 17:44:30.308520 1 node_register.go:55] Starting Registration Server at: /registration/kubevirt.io.hostpath-provisioner-reg.sock 2025-12-08T17:44:30.309981949+00:00 stderr F I1208 17:44:30.309653 1 node_register.go:64] Registration Server started at: /registration/kubevirt.io.hostpath-provisioner-reg.sock 2025-12-08T17:44:30.309981949+00:00 stderr F I1208 17:44:30.309924 1 node_register.go:88] Skipping HTTP server because endpoint is set to: "" 2025-12-08T17:44:30.403458918+00:00 stderr F I1208 17:44:30.403404 1 main.go:90] Received GetInfo call: &InfoRequest{} 2025-12-08T17:44:30.421092280+00:00 stderr F I1208 17:44:30.421027 1 main.go:101] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,} ././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000755000175000017500000000000015115611521033162 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000644000175000017500000015163615115611514033202 0ustar zuulzuul2025-12-08T17:44:27.737987463+00:00 stderr F I1208 17:44:27.737684 1 plugin.go:44] Starting Prometheus metrics endpoint server 2025-12-08T17:44:27.737987463+00:00 stderr F I1208 17:44:27.737911 1 plugin.go:47] Starting new HostPathDriver, config: {kubevirt.io.hostpath-provisioner unix:///csi/csi.sock crc map[] latest } 2025-12-08T17:44:27.825595262+00:00 stderr F I1208 17:44:27.825534 1 mount_linux.go:174] Cannot run systemd-run, assuming non-systemd OS 2025-12-08T17:44:27.825630063+00:00 stderr F I1208 17:44:27.825606 1 hostpath.go:88] name: local, dataDir: /csi-data-dir 2025-12-08T17:44:27.825703165+00:00 stderr F I1208 17:44:27.825682 1 hostpath.go:107] Driver: kubevirt.io.hostpath-provisioner, version: latest 2025-12-08T17:44:27.826281000+00:00 stderr F I1208 17:44:27.826253 1 server.go:194] Starting domain socket: unix///csi/csi.sock 2025-12-08T17:44:27.827543965+00:00 stderr F I1208 17:44:27.826422 1 server.go:89] Listening for connections on address: &net.UnixAddr{Name:"//csi/csi.sock", Net:"unix"} 2025-12-08T17:44:27.843411039+00:00 stderr F I1208 17:44:27.838600 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:44:30.304539320+00:00 stderr F I1208 17:44:30.304483 1 server.go:104] GRPC call: /csi.v1.Identity/GetPluginInfo 2025-12-08T17:44:30.405636788+00:00 stderr F I1208 17:44:30.404912 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetInfo 2025-12-08T17:44:30.502685465+00:00 stderr F I1208 17:44:30.502473 1 server.go:104] GRPC call: /csi.v1.Node/NodeUnpublishVolume 2025-12-08T17:44:30.502774498+00:00 stderr F I1208 17:44:30.502751 1 nodeserver.go:199] Node Unpublish Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 TargetPath:/var/lib/kubelet/pods/9e9b5059-1b3e-4067-a63d-2952cbe863af/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:44:30.502797938+00:00 stderr F I1208 17:44:30.502788 1 nodeserver.go:206] Unmounting path: /var/lib/kubelet/pods/9e9b5059-1b3e-4067-a63d-2952cbe863af/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount 2025-12-08T17:44:30.625116745+00:00 stderr F I1208 17:44:30.621635 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:44:30.625765343+00:00 stderr F I1208 17:44:30.625648 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:44:30.627358605+00:00 stderr F I1208 17:44:30.626860 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:44:30.628929049+00:00 stderr F I1208 17:44:30.628903 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:44:30.633152743+00:00 stderr F I1208 17:44:30.633052 1 server.go:104] GRPC call: /csi.v1.Node/NodePublishVolume 2025-12-08T17:44:30.633152743+00:00 stderr F I1208 17:44:30.633073 1 nodeserver.go:82] Node Publish Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 PublishContext:map[] StagingTargetPath: TargetPath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount VolumeCapability:mount:<> access_mode: Readonly:false Secrets:map[] VolumeContext:map[csi.storage.k8s.io/ephemeral:false csi.storage.k8s.io/pod.name:image-registry-66587d64c8-s6hn4 csi.storage.k8s.io/pod.namespace:openshift-image-registry csi.storage.k8s.io/pod.uid:1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc csi.storage.k8s.io/pv/name:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 csi.storage.k8s.io/pvc/name:crc-image-registry-storage csi.storage.k8s.io/pvc/namespace:openshift-image-registry csi.storage.k8s.io/serviceAccount.name:registry storage.kubernetes.io/csiProvisionerIdentity:1762159825768-6575-kubevirt.io.hostpath-provisioner-crc storagePool:local] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:44:41.794999955+00:00 stderr F I1208 17:44:41.793660 1 server.go:104] GRPC call: /csi.v1.Identity/GetPluginInfo 2025-12-08T17:44:43.530582117+00:00 stderr F I1208 17:44:43.530518 1 server.go:104] GRPC call: /csi.v1.Identity/GetPluginInfo 2025-12-08T17:44:43.531299477+00:00 stderr F I1208 17:44:43.531267 1 server.go:104] GRPC call: /csi.v1.Identity/GetPluginCapabilities 2025-12-08T17:44:43.531917724+00:00 stderr F I1208 17:44:43.531889 1 server.go:104] GRPC call: /csi.v1.Controller/ControllerGetCapabilities 2025-12-08T17:44:43.537288344+00:00 stderr F I1208 17:44:43.537243 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetInfo 2025-12-08T17:44:43.722163118+00:00 stderr F I1208 17:44:43.722109 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:44:43.722163118+00:00 stderr F I1208 17:44:43.722125 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:45:27.856671142+00:00 stderr F I1208 17:45:27.856537 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:45:43.722700983+00:00 stderr F I1208 17:45:43.722594 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:45:43.722700983+00:00 stderr F I1208 17:45:43.722622 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:46:08.481275788+00:00 stderr F I1208 17:46:08.480444 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:46:08.481976499+00:00 stderr F I1208 17:46:08.481954 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:46:08.482005520+00:00 stderr F I1208 17:46:08.481970 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:46:08.488980939+00:00 stderr F I1208 17:46:08.488940 1 healthcheck.go:84] fs available: 59840675840, total capacity: 85292941312, percentage available: 70.16, number of free inodes: 41544757 2025-12-08T17:46:08.488980939+00:00 stderr F I1208 17:46:08.488966 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:46:08.488996350+00:00 stderr F I1208 17:46:08.488981 1 nodeserver.go:330] Capacity: 85292941312 Used: 25452265472 Available: 59840675840 Inodes: 41679680 Free inodes: 41544757 Used inodes: 134923 2025-12-08T17:46:27.874153156+00:00 stderr F I1208 17:46:27.874050 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:46:43.723090463+00:00 stderr F I1208 17:46:43.723036 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:46:43.723090463+00:00 stderr F I1208 17:46:43.723059 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:47:27.887107737+00:00 stderr F I1208 17:47:27.886344 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:47:36.618079480+00:00 stderr F I1208 17:47:36.617996 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:47:36.619898097+00:00 stderr F I1208 17:47:36.619836 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:47:36.619921458+00:00 stderr F I1208 17:47:36.619859 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:47:36.626104222+00:00 stderr F I1208 17:47:36.626039 1 healthcheck.go:84] fs available: 59820150784, total capacity: 85292941312, percentage available: 70.13, number of free inodes: 41544791 2025-12-08T17:47:36.626104222+00:00 stderr F I1208 17:47:36.626074 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:47:36.626104222+00:00 stderr F I1208 17:47:36.626097 1 nodeserver.go:330] Capacity: 85292941312 Used: 25472790528 Available: 59820150784 Inodes: 41679680 Free inodes: 41544791 Used inodes: 134889 2025-12-08T17:47:43.724316476+00:00 stderr F I1208 17:47:43.724250 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:47:43.724316476+00:00 stderr F I1208 17:47:43.724281 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:47:44.734030691+00:00 stderr F I1208 17:47:44.733984 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:47:44.734030691+00:00 stderr F I1208 17:47:44.734008 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:47:48.129249110+00:00 stderr F I1208 17:47:48.129185 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:47:48.129249110+00:00 stderr F I1208 17:47:48.129209 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:48:27.905978391+00:00 stderr F I1208 17:48:27.902281 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:48:43.725200850+00:00 stderr F I1208 17:48:43.725138 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:48:43.725329834+00:00 stderr F I1208 17:48:43.725303 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:49:26.020190799+00:00 stderr F I1208 17:49:26.020088 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:49:26.021809770+00:00 stderr F I1208 17:49:26.021744 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:49:26.021809770+00:00 stderr F I1208 17:49:26.021770 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:49:26.027768061+00:00 stderr F I1208 17:49:26.027683 1 healthcheck.go:84] fs available: 59814432768, total capacity: 85292941312, percentage available: 70.13, number of free inodes: 41544827 2025-12-08T17:49:26.027768061+00:00 stderr F I1208 17:49:26.027718 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:49:26.027768061+00:00 stderr F I1208 17:49:26.027747 1 nodeserver.go:330] Capacity: 85292941312 Used: 25478508544 Available: 59814432768 Inodes: 41679680 Free inodes: 41544827 Used inodes: 134853 2025-12-08T17:49:27.915054819+00:00 stderr F I1208 17:49:27.914945 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:49:43.726287345+00:00 stderr F I1208 17:49:43.726201 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:49:43.726287345+00:00 stderr F I1208 17:49:43.726233 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:50:27.930604101+00:00 stderr F I1208 17:50:27.930457 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:50:43.726183664+00:00 stderr F I1208 17:50:43.726123 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:50:43.726255485+00:00 stderr F I1208 17:50:43.726245 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:51:23.110788477+00:00 stderr F I1208 17:51:23.110741 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:51:23.111816254+00:00 stderr F I1208 17:51:23.111801 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:51:23.111866144+00:00 stderr F I1208 17:51:23.111844 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:51:23.118761086+00:00 stderr F I1208 17:51:23.118730 1 healthcheck.go:84] fs available: 59774234624, total capacity: 85292941312, percentage available: 70.08, number of free inodes: 41544823 2025-12-08T17:51:23.118806347+00:00 stderr F I1208 17:51:23.118796 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:51:23.118834297+00:00 stderr F I1208 17:51:23.118825 1 nodeserver.go:330] Capacity: 85292941312 Used: 25518706688 Available: 59774234624 Inodes: 41679680 Free inodes: 41544823 Used inodes: 134857 2025-12-08T17:51:27.943432608+00:00 stderr F I1208 17:51:27.943396 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:51:43.727319246+00:00 stderr F I1208 17:51:43.727219 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:51:43.727319246+00:00 stderr F I1208 17:51:43.727245 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:52:27.965929530+00:00 stderr F I1208 17:52:27.965820 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:52:33.922999827+00:00 stderr F I1208 17:52:33.922014 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:52:33.923150412+00:00 stderr F I1208 17:52:33.923101 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:52:33.923150412+00:00 stderr F I1208 17:52:33.923115 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:52:33.929777378+00:00 stderr F I1208 17:52:33.929697 1 healthcheck.go:84] fs available: 59776679936, total capacity: 85292941312, percentage available: 70.08, number of free inodes: 41544823 2025-12-08T17:52:33.929777378+00:00 stderr F I1208 17:52:33.929717 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:52:33.929777378+00:00 stderr F I1208 17:52:33.929728 1 nodeserver.go:330] Capacity: 85292941312 Used: 25516261376 Available: 59776679936 Inodes: 41679680 Free inodes: 41544823 Used inodes: 134857 2025-12-08T17:52:43.727850523+00:00 stderr F I1208 17:52:43.727757 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:52:43.727850523+00:00 stderr F I1208 17:52:43.727789 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:53:27.980325923+00:00 stderr F I1208 17:53:27.980214 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:53:37.447230670+00:00 stderr F I1208 17:53:37.447100 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:53:37.449105071+00:00 stderr F I1208 17:53:37.448896 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:53:37.449105071+00:00 stderr F I1208 17:53:37.448914 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:53:37.458862427+00:00 stderr F I1208 17:53:37.458776 1 healthcheck.go:84] fs available: 59773878272, total capacity: 85292941312, percentage available: 70.08, number of free inodes: 41544823 2025-12-08T17:53:37.458862427+00:00 stderr F I1208 17:53:37.458818 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:53:37.458862427+00:00 stderr F I1208 17:53:37.458835 1 nodeserver.go:330] Capacity: 85292941312 Used: 25519063040 Available: 59773878272 Inodes: 41679680 Free inodes: 41544823 Used inodes: 134857 2025-12-08T17:53:43.734063731+00:00 stderr F I1208 17:53:43.730035 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:53:43.734063731+00:00 stderr F I1208 17:53:43.730109 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:54:27.996507381+00:00 stderr F I1208 17:54:27.996380 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:54:43.729429148+00:00 stderr F I1208 17:54:43.729376 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:54:43.729429148+00:00 stderr F I1208 17:54:43.729397 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:54:51.298464730+00:00 stderr F I1208 17:54:51.298414 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:54:51.299892078+00:00 stderr F I1208 17:54:51.299828 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:54:51.301202734+00:00 stderr F I1208 17:54:51.301174 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:54:51.304271326+00:00 stderr F I1208 17:54:51.304247 1 server.go:104] GRPC call: /csi.v1.Node/NodePublishVolume 2025-12-08T17:54:51.304323207+00:00 stderr F I1208 17:54:51.304265 1 nodeserver.go:82] Node Publish Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 PublishContext:map[] StagingTargetPath: TargetPath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount VolumeCapability:mount:<> access_mode: Readonly:false Secrets:map[] VolumeContext:map[csi.storage.k8s.io/ephemeral:false csi.storage.k8s.io/pod.name:image-registry-5d9d95bf5b-cmjbz csi.storage.k8s.io/pod.namespace:openshift-image-registry csi.storage.k8s.io/pod.uid:82c8be84-d9b0-44df-99be-57f994255a0b csi.storage.k8s.io/pv/name:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 csi.storage.k8s.io/pvc/name:crc-image-registry-storage csi.storage.k8s.io/pvc/namespace:openshift-image-registry csi.storage.k8s.io/serviceAccount.name:registry storage.kubernetes.io/csiProvisionerIdentity:1762159825768-6575-kubevirt.io.hostpath-provisioner-crc storagePool:local] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:55:23.369987279+00:00 stderr F I1208 17:55:23.369947 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:55:23.370870493+00:00 stderr F I1208 17:55:23.370853 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:55:23.370942225+00:00 stderr F I1208 17:55:23.370917 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:55:23.377193293+00:00 stderr F I1208 17:55:23.377076 1 healthcheck.go:84] fs available: 58967060480, total capacity: 85292941312, percentage available: 69.13, number of free inodes: 41539659 2025-12-08T17:55:23.377193293+00:00 stderr F I1208 17:55:23.377100 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:55:23.377193293+00:00 stderr F I1208 17:55:23.377111 1 nodeserver.go:330] Capacity: 85292941312 Used: 26325880832 Available: 58967060480 Inodes: 41679680 Free inodes: 41539658 Used inodes: 140022 2025-12-08T17:55:28.008149192+00:00 stderr F I1208 17:55:28.008102 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:55:30.550111043+00:00 stderr F I1208 17:55:30.549968 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:55:30.552036926+00:00 stderr F I1208 17:55:30.551996 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:55:30.552036926+00:00 stderr F I1208 17:55:30.552022 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:55:30.557683731+00:00 stderr F I1208 17:55:30.557633 1 healthcheck.go:84] fs available: 58642419712, total capacity: 85292941312, percentage available: 68.75, number of free inodes: 41531903 2025-12-08T17:55:30.557683731+00:00 stderr F I1208 17:55:30.557659 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:55:30.557683731+00:00 stderr F I1208 17:55:30.557673 1 nodeserver.go:330] Capacity: 85292941312 Used: 26650521600 Available: 58642419712 Inodes: 41679680 Free inodes: 41531903 Used inodes: 147777 2025-12-08T17:55:39.414753077+00:00 stderr F I1208 17:55:39.414699 1 server.go:104] GRPC call: /csi.v1.Node/NodeUnpublishVolume 2025-12-08T17:55:39.414753077+00:00 stderr F I1208 17:55:39.414718 1 nodeserver.go:199] Node Unpublish Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 TargetPath:/var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:55:39.414753077+00:00 stderr F I1208 17:55:39.414732 1 nodeserver.go:206] Unmounting path: /var/lib/kubelet/pods/1a6cf2c2-bdc0-4d0c-b1e5-9c640c87cbfc/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount 2025-12-08T17:55:43.732343760+00:00 stderr F I1208 17:55:43.732266 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:55:43.732424602+00:00 stderr F I1208 17:55:43.732412 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:56:28.019919713+00:00 stderr F I1208 17:56:28.019836 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:56:32.942687713+00:00 stderr F I1208 17:56:32.942510 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:56:32.945477786+00:00 stderr F I1208 17:56:32.944476 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:56:32.945477786+00:00 stderr F I1208 17:56:32.944490 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:56:32.950963129+00:00 stderr F I1208 17:56:32.950911 1 healthcheck.go:84] fs available: 57065418752, total capacity: 85292941312, percentage available: 66.91, number of free inodes: 41515344 2025-12-08T17:56:32.950963129+00:00 stderr F I1208 17:56:32.950944 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:56:32.950963129+00:00 stderr F I1208 17:56:32.950956 1 nodeserver.go:330] Capacity: 85292941312 Used: 28227522560 Available: 57065418752 Inodes: 41679680 Free inodes: 41515344 Used inodes: 164336 2025-12-08T17:56:43.730909021+00:00 stderr F I1208 17:56:43.730737 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:56:43.730909021+00:00 stderr F I1208 17:56:43.730760 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:57:28.037158586+00:00 stderr F I1208 17:57:28.036513 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:57:37.579953586+00:00 stderr F I1208 17:57:37.579558 1 server.go:104] GRPC call: /csi.v1.Controller/CreateVolume 2025-12-08T17:57:37.579953586+00:00 stderr F I1208 17:57:37.579579 1 controllerserver.go:93] Create Volume Request: {Name:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 CapacityRange:required_bytes:20000000000 VolumeCapabilities:[mount:<> access_mode: ] Parameters:map[csi.storage.k8s.io/pv/name:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 csi.storage.k8s.io/pvc/name:prometheus-default-db-prometheus-default-0 csi.storage.k8s.io/pvc/namespace:service-telemetry storagePool:local] Secrets:map[] VolumeContentSource: AccessibilityRequirements:requisite: > preferred: > MutableParameters:map[] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:57:37.582237645+00:00 stderr F I1208 17:57:37.582163 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:57:37.582237645+00:00 stderr F I1208 17:57:37.582180 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:57:38.859691402+00:00 stderr F I1208 17:57:38.851401 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:38.859691402+00:00 stderr F I1208 17:57:38.852973 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:38.859691402+00:00 stderr F I1208 17:57:38.854097 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:38.859691402+00:00 stderr F I1208 17:57:38.854951 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:38.859691402+00:00 stderr F I1208 17:57:38.855763 1 server.go:104] GRPC call: /csi.v1.Node/NodePublishVolume 2025-12-08T17:57:38.859691402+00:00 stderr F I1208 17:57:38.855770 1 nodeserver.go:82] Node Publish Request: {VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 PublishContext:map[] StagingTargetPath: TargetPath:/var/lib/kubelet/pods/3d62a6f6-b57c-48e0-9279-d8dadd01a921/volumes/kubernetes.io~csi/pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6/mount VolumeCapability:mount:<> access_mode: Readonly:false Secrets:map[] VolumeContext:map[csi.storage.k8s.io/ephemeral:false csi.storage.k8s.io/pod.name:prometheus-default-0 csi.storage.k8s.io/pod.namespace:service-telemetry csi.storage.k8s.io/pod.uid:3d62a6f6-b57c-48e0-9279-d8dadd01a921 csi.storage.k8s.io/pv/name:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 csi.storage.k8s.io/pvc/name:prometheus-default-db-prometheus-default-0 csi.storage.k8s.io/pvc/namespace:service-telemetry csi.storage.k8s.io/serviceAccount.name:prometheus-stf storage.kubernetes.io/csiProvisionerIdentity:1765215883532-6720-kubevirt.io.hostpath-provisioner-crc storagePool:local] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:57:43.730930591+00:00 stderr F I1208 17:57:43.730791 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:57:43.730930591+00:00 stderr F I1208 17:57:43.730846 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:57:44.734563821+00:00 stderr F I1208 17:57:44.734496 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:44.735793002+00:00 stderr F I1208 17:57:44.735751 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:57:44.735793002+00:00 stderr F I1208 17:57:44.735769 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:57:44.746133340+00:00 stderr F I1208 17:57:44.746040 1 healthcheck.go:84] fs available: 55426367488, total capacity: 85292941312, percentage available: 64.98, number of free inodes: 41444923 2025-12-08T17:57:44.746133340+00:00 stderr F I1208 17:57:44.746066 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:57:44.746133340+00:00 stderr F I1208 17:57:44.746077 1 nodeserver.go:330] Capacity: 85292941312 Used: 29866573824 Available: 55426367488 Inodes: 41679680 Free inodes: 41444923 Used inodes: 234757 2025-12-08T17:57:51.349303983+00:00 stderr F I1208 17:57:51.349231 1 server.go:104] GRPC call: /csi.v1.Controller/CreateVolume 2025-12-08T17:57:51.350588346+00:00 stderr F I1208 17:57:51.349256 1 controllerserver.go:93] Create Volume Request: {Name:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 CapacityRange:required_bytes:20000000000 VolumeCapabilities:[mount:<> access_mode: ] Parameters:map[csi.storage.k8s.io/pv/name:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 csi.storage.k8s.io/pvc/name:alertmanager-default-db-alertmanager-default-0 csi.storage.k8s.io/pvc/namespace:service-telemetry storagePool:local] Secrets:map[] VolumeContentSource: AccessibilityRequirements:requisite: > preferred: > MutableParameters:map[] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:57:51.355908513+00:00 stderr F I1208 17:57:51.355856 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:57:51.355937854+00:00 stderr F I1208 17:57:51.355925 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:57:52.677079850+00:00 stderr F I1208 17:57:52.676673 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:52.683779673+00:00 stderr F I1208 17:57:52.678844 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:52.683779673+00:00 stderr F I1208 17:57:52.680454 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:52.683779673+00:00 stderr F I1208 17:57:52.681690 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:57:52.683779673+00:00 stderr F I1208 17:57:52.682853 1 server.go:104] GRPC call: /csi.v1.Node/NodePublishVolume 2025-12-08T17:57:52.683779673+00:00 stderr F I1208 17:57:52.682864 1 nodeserver.go:82] Node Publish Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 PublishContext:map[] StagingTargetPath: TargetPath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount VolumeCapability:mount:<> access_mode: Readonly:false Secrets:map[] VolumeContext:map[csi.storage.k8s.io/ephemeral:false csi.storage.k8s.io/pod.name:alertmanager-default-0 csi.storage.k8s.io/pod.namespace:service-telemetry csi.storage.k8s.io/pod.uid:81e17e77-b0f9-4df6-8c85-e06d1fd7a46a csi.storage.k8s.io/pv/name:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 csi.storage.k8s.io/pvc/name:alertmanager-default-db-alertmanager-default-0 csi.storage.k8s.io/pvc/namespace:service-telemetry csi.storage.k8s.io/serviceAccount.name:alertmanager-stf storage.kubernetes.io/csiProvisionerIdentity:1765215883532-6720-kubevirt.io.hostpath-provisioner-crc storagePool:local] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:58:23.376129966+00:00 stderr F I1208 17:58:23.376057 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:58:23.376447265+00:00 stderr F I1208 17:58:23.376389 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:58:23.377394789+00:00 stderr F I1208 17:58:23.377358 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:58:23.377394789+00:00 stderr F I1208 17:58:23.377376 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 VolumePath:/var/lib/kubelet/pods/3d62a6f6-b57c-48e0-9279-d8dadd01a921/volumes/kubernetes.io~csi/pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:58:23.377750148+00:00 stderr F I1208 17:58:23.377713 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:58:23.377750148+00:00 stderr F I1208 17:58:23.377732 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumePath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:58:23.385832948+00:00 stderr F I1208 17:58:23.385747 1 healthcheck.go:84] fs available: 53041078272, total capacity: 85292941312, percentage available: 62.19, number of free inodes: 41397418 2025-12-08T17:58:23.385832948+00:00 stderr F I1208 17:58:23.385777 1 nodeserver.go:321] Healthy state: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 Volume: true 2025-12-08T17:58:23.385832948+00:00 stderr F I1208 17:58:23.385792 1 nodeserver.go:330] Capacity: 85292941312 Used: 32251822080 Available: 53041119232 Inodes: 41679680 Free inodes: 41397418 Used inodes: 282262 2025-12-08T17:58:23.390045046+00:00 stderr F I1208 17:58:23.389986 1 healthcheck.go:84] fs available: 53044088832, total capacity: 85292941312, percentage available: 62.19, number of free inodes: 41397418 2025-12-08T17:58:23.390045046+00:00 stderr F I1208 17:58:23.390033 1 nodeserver.go:321] Healthy state: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 Volume: true 2025-12-08T17:58:23.390064686+00:00 stderr F I1208 17:58:23.390058 1 nodeserver.go:330] Capacity: 85292941312 Used: 32248770560 Available: 53044170752 Inodes: 41679680 Free inodes: 41397418 Used inodes: 282262 2025-12-08T17:58:28.051929637+00:00 stderr F I1208 17:58:28.050856 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:58:43.736961276+00:00 stderr F I1208 17:58:43.734825 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:58:43.736961276+00:00 stderr F I1208 17:58:43.734849 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:59:27.453781017+00:00 stderr F I1208 17:59:27.453288 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:59:27.454857676+00:00 stderr F I1208 17:59:27.454791 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:59:27.454857676+00:00 stderr F I1208 17:59:27.454820 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumePath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:59:27.468124055+00:00 stderr F I1208 17:59:27.468028 1 healthcheck.go:84] fs available: 51692396544, total capacity: 85292941312, percentage available: 60.61, number of free inodes: 41342502 2025-12-08T17:59:27.468124055+00:00 stderr F I1208 17:59:27.468061 1 nodeserver.go:321] Healthy state: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 Volume: true 2025-12-08T17:59:27.468124055+00:00 stderr F I1208 17:59:27.468075 1 nodeserver.go:330] Capacity: 85292941312 Used: 33600544768 Available: 51692396544 Inodes: 41679680 Free inodes: 41342501 Used inodes: 337179 2025-12-08T17:59:28.072330770+00:00 stderr F I1208 17:59:28.065143 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T17:59:41.414181491+00:00 stderr F I1208 17:59:41.413535 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:59:41.415391724+00:00 stderr F I1208 17:59:41.415352 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:59:41.415391724+00:00 stderr F I1208 17:59:41.415368 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 VolumePath:/var/lib/kubelet/pods/3d62a6f6-b57c-48e0-9279-d8dadd01a921/volumes/kubernetes.io~csi/pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:59:41.423687643+00:00 stderr F I1208 17:59:41.423555 1 healthcheck.go:84] fs available: 51940139008, total capacity: 85292941312, percentage available: 60.90, number of free inodes: 41334406 2025-12-08T17:59:41.423687643+00:00 stderr F I1208 17:59:41.423590 1 nodeserver.go:321] Healthy state: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 Volume: true 2025-12-08T17:59:41.423687643+00:00 stderr F I1208 17:59:41.423602 1 nodeserver.go:330] Capacity: 85292941312 Used: 33352720384 Available: 51940220928 Inodes: 41679680 Free inodes: 41334406 Used inodes: 345274 2025-12-08T17:59:43.733132827+00:00 stderr F I1208 17:59:43.732855 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:59:43.733132827+00:00 stderr F I1208 17:59:43.732952 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T17:59:44.426345447+00:00 stderr F I1208 17:59:44.426200 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T17:59:44.427607620+00:00 stderr F I1208 17:59:44.427557 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T17:59:44.427607620+00:00 stderr F I1208 17:59:44.427575 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:59:44.438079786+00:00 stderr F I1208 17:59:44.437983 1 healthcheck.go:84] fs available: 52358438912, total capacity: 85292941312, percentage available: 61.39, number of free inodes: 41334371 2025-12-08T17:59:44.438079786+00:00 stderr F I1208 17:59:44.438006 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T17:59:44.438079786+00:00 stderr F I1208 17:59:44.438016 1 nodeserver.go:330] Capacity: 85292941312 Used: 32934502400 Available: 52358438912 Inodes: 41679680 Free inodes: 41334371 Used inodes: 345309 2025-12-08T18:00:28.080863442+00:00 stderr F I1208 18:00:28.080791 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T18:00:42.894053105+00:00 stderr F I1208 18:00:42.893468 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:00:42.895767190+00:00 stderr F I1208 18:00:42.895710 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:00:42.895784740+00:00 stderr F I1208 18:00:42.895748 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumePath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:00:42.903204576+00:00 stderr F I1208 18:00:42.903113 1 healthcheck.go:84] fs available: 52345769984, total capacity: 85292941312, percentage available: 61.37, number of free inodes: 41334001 2025-12-08T18:00:42.903204576+00:00 stderr F I1208 18:00:42.903153 1 nodeserver.go:321] Healthy state: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 Volume: true 2025-12-08T18:00:42.903204576+00:00 stderr F I1208 18:00:42.903176 1 nodeserver.go:330] Capacity: 85292941312 Used: 32947171328 Available: 52345769984 Inodes: 41679680 Free inodes: 41334001 Used inodes: 345679 2025-12-08T18:00:43.733092230+00:00 stderr F I1208 18:00:43.733021 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:00:43.733092230+00:00 stderr F I1208 18:00:43.733046 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T18:00:49.046128004+00:00 stderr F I1208 18:00:49.045610 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:00:49.047371227+00:00 stderr F I1208 18:00:49.047331 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:00:49.047398788+00:00 stderr F I1208 18:00:49.047356 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 VolumePath:/var/lib/kubelet/pods/3d62a6f6-b57c-48e0-9279-d8dadd01a921/volumes/kubernetes.io~csi/pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:00:49.059074888+00:00 stderr F I1208 18:00:49.059009 1 healthcheck.go:84] fs available: 52345765888, total capacity: 85292941312, percentage available: 61.37, number of free inodes: 41334001 2025-12-08T18:00:49.059074888+00:00 stderr F I1208 18:00:49.059040 1 nodeserver.go:321] Healthy state: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 Volume: true 2025-12-08T18:00:49.059074888+00:00 stderr F I1208 18:00:49.059058 1 nodeserver.go:330] Capacity: 85292941312 Used: 32947175424 Available: 52345765888 Inodes: 41679680 Free inodes: 41334001 Used inodes: 345679 2025-12-08T18:01:06.164019362+00:00 stderr F I1208 18:01:06.163901 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:01:06.165431200+00:00 stderr F I1208 18:01:06.165356 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:01:06.165431200+00:00 stderr F I1208 18:01:06.165372 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:01:06.173507076+00:00 stderr F I1208 18:01:06.173430 1 healthcheck.go:84] fs available: 52348825600, total capacity: 85292941312, percentage available: 61.38, number of free inodes: 41334000 2025-12-08T18:01:06.173507076+00:00 stderr F I1208 18:01:06.173466 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T18:01:06.173507076+00:00 stderr F I1208 18:01:06.173481 1 nodeserver.go:330] Capacity: 85292941312 Used: 32944115712 Available: 52348825600 Inodes: 41679680 Free inodes: 41334000 Used inodes: 345680 2025-12-08T18:01:28.091287409+00:00 stderr F I1208 18:01:28.091213 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T18:01:43.736038060+00:00 stderr F I1208 18:01:43.735952 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:01:43.736038060+00:00 stderr F I1208 18:01:43.735989 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T18:02:16.251264490+00:00 stderr F I1208 18:02:16.251160 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:02:16.252415401+00:00 stderr F I1208 18:02:16.252353 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:02:16.252415401+00:00 stderr F I1208 18:02:16.252386 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumePath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:02:16.259356465+00:00 stderr F I1208 18:02:16.259299 1 healthcheck.go:84] fs available: 52348301312, total capacity: 85292941312, percentage available: 61.37, number of free inodes: 41333999 2025-12-08T18:02:16.259356465+00:00 stderr F I1208 18:02:16.259325 1 nodeserver.go:321] Healthy state: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 Volume: true 2025-12-08T18:02:16.259356465+00:00 stderr F I1208 18:02:16.259336 1 nodeserver.go:330] Capacity: 85292941312 Used: 32944640000 Available: 52348301312 Inodes: 41679680 Free inodes: 41333999 Used inodes: 345681 2025-12-08T18:02:28.101613947+00:00 stderr F I1208 18:02:28.101531 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T18:02:43.736641573+00:00 stderr F I1208 18:02:43.736522 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:02:43.736641573+00:00 stderr F I1208 18:02:43.736590 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T18:02:44.522846186+00:00 stderr F I1208 18:02:44.522751 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:02:44.524116151+00:00 stderr F I1208 18:02:44.524075 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:02:44.524116151+00:00 stderr F I1208 18:02:44.524090 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:02:44.530313615+00:00 stderr F I1208 18:02:44.530201 1 healthcheck.go:84] fs available: 51902656512, total capacity: 85292941312, percentage available: 60.85, number of free inodes: 41326261 2025-12-08T18:02:44.530313615+00:00 stderr F I1208 18:02:44.530222 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T18:02:44.530313615+00:00 stderr F I1208 18:02:44.530232 1 nodeserver.go:330] Capacity: 85292941312 Used: 33390116864 Available: 51902824448 Inodes: 41679680 Free inodes: 41326260 Used inodes: 353420 2025-12-08T18:02:46.055926805+00:00 stderr F I1208 18:02:46.055042 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:02:46.057749534+00:00 stderr F I1208 18:02:46.056805 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:02:46.057749534+00:00 stderr F I1208 18:02:46.056822 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 VolumePath:/var/lib/kubelet/pods/3d62a6f6-b57c-48e0-9279-d8dadd01a921/volumes/kubernetes.io~csi/pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:02:46.065000348+00:00 stderr F I1208 18:02:46.064934 1 healthcheck.go:84] fs available: 51643248640, total capacity: 85292941312, percentage available: 60.55, number of free inodes: 41324318 2025-12-08T18:02:46.065000348+00:00 stderr F I1208 18:02:46.064965 1 nodeserver.go:321] Healthy state: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 Volume: true 2025-12-08T18:02:46.065000348+00:00 stderr F I1208 18:02:46.064979 1 nodeserver.go:330] Capacity: 85292941312 Used: 33649717248 Available: 51643224064 Inodes: 41679680 Free inodes: 41324318 Used inodes: 355362 2025-12-08T18:03:28.117870669+00:00 stderr F I1208 18:03:28.117772 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T18:03:43.737249972+00:00 stderr F I1208 18:03:43.737050 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:03:43.737249972+00:00 stderr F I1208 18:03:43.737090 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T18:03:43.810514055+00:00 stderr F I1208 18:03:43.809590 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:03:43.817693657+00:00 stderr F I1208 18:03:43.814633 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:03:43.817693657+00:00 stderr F I1208 18:03:43.814655 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumePath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:03:43.822362913+00:00 stderr F I1208 18:03:43.822306 1 healthcheck.go:84] fs available: 51706470400, total capacity: 85292941312, percentage available: 60.62, number of free inodes: 41323670 2025-12-08T18:03:43.822362913+00:00 stderr F I1208 18:03:43.822336 1 nodeserver.go:321] Healthy state: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 Volume: true 2025-12-08T18:03:43.822362913+00:00 stderr F I1208 18:03:43.822347 1 nodeserver.go:330] Capacity: 85292941312 Used: 33586470912 Available: 51706470400 Inodes: 41679680 Free inodes: 41323670 Used inodes: 356010 2025-12-08T18:04:01.961478357+00:00 stderr F I1208 18:04:01.961031 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:04:01.962022431+00:00 stderr F I1208 18:04:01.961993 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:04:01.962022431+00:00 stderr F I1208 18:04:01.962006 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 VolumePath:/var/lib/kubelet/pods/3d62a6f6-b57c-48e0-9279-d8dadd01a921/volumes/kubernetes.io~csi/pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:04:01.972638373+00:00 stderr F I1208 18:04:01.972599 1 healthcheck.go:84] fs available: 51708379136, total capacity: 85292941312, percentage available: 60.62, number of free inodes: 41323591 2025-12-08T18:04:01.972638373+00:00 stderr F I1208 18:04:01.972620 1 nodeserver.go:321] Healthy state: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 Volume: true 2025-12-08T18:04:01.972672494+00:00 stderr F I1208 18:04:01.972640 1 nodeserver.go:330] Capacity: 85292941312 Used: 33584562176 Available: 51708379136 Inodes: 41679680 Free inodes: 41323591 Used inodes: 356089 2025-12-08T18:04:16.936128929+00:00 stderr F I1208 18:04:16.935532 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:04:16.937680910+00:00 stderr F I1208 18:04:16.937543 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:04:16.937680910+00:00 stderr F I1208 18:04:16.937560 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 VolumePath:/var/lib/kubelet/pods/82c8be84-d9b0-44df-99be-57f994255a0b/volumes/kubernetes.io~csi/pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:04:16.950523111+00:00 stderr F I1208 18:04:16.950430 1 healthcheck.go:84] fs available: 51704455168, total capacity: 85292941312, percentage available: 60.62, number of free inodes: 41323521 2025-12-08T18:04:16.950523111+00:00 stderr F I1208 18:04:16.950468 1 nodeserver.go:321] Healthy state: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 Volume: true 2025-12-08T18:04:16.950523111+00:00 stderr F I1208 18:04:16.950484 1 nodeserver.go:330] Capacity: 85292941312 Used: 33588486144 Available: 51704455168 Inodes: 41679680 Free inodes: 41323521 Used inodes: 356159 2025-12-08T18:04:28.132866088+00:00 stderr F I1208 18:04:28.132366 1 utils.go:221] pool (local, /csi-data-dir), shares path with OS which can lead to node disk pressure 2025-12-08T18:04:43.737476397+00:00 stderr F I1208 18:04:43.737421 1 server.go:104] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:04:43.737476397+00:00 stderr F I1208 18:04:43.737441 1 controllerserver.go:230] Checking capacity for storage pool local 2025-12-08T18:04:55.682637135+00:00 stderr F I1208 18:04:55.682561 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetCapabilities 2025-12-08T18:04:55.684491453+00:00 stderr F I1208 18:04:55.684411 1 server.go:104] GRPC call: /csi.v1.Node/NodeGetVolumeStats 2025-12-08T18:04:55.684491453+00:00 stderr F I1208 18:04:55.684460 1 nodeserver.go:314] Node Get Volume Stats Request: {VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumePath:/var/lib/kubelet/pods/81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/volumes/kubernetes.io~csi/pvc-1b15df9e-01ca-4097-a731-1c1b05c63480/mount StagingTargetPath: XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T18:04:55.692241419+00:00 stderr F I1208 18:04:55.692137 1 healthcheck.go:84] fs available: 50629181440, total capacity: 85292941312, percentage available: 59.36, number of free inodes: 41322246 2025-12-08T18:04:55.692241419+00:00 stderr F I1208 18:04:55.692167 1 nodeserver.go:321] Healthy state: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 Volume: true 2025-12-08T18:04:55.692241419+00:00 stderr F I1208 18:04:55.692181 1 nodeserver.go:330] Capacity: 85292941312 Used: 34663759872 Available: 50629181440 Inodes: 41679680 Free inodes: 41322246 Used inodes: 357434 ././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000755000175000017500000000000015115611521033162 5ustar zuulzuul././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_c0000644000175000017500000035703415115611514033202 0ustar zuulzuul2025-12-08T17:44:43.307322675+00:00 stderr F W1208 17:44:43.306746 1 feature_gate.go:241] Setting GA feature gate Topology=true. It will be removed in a future release. 2025-12-08T17:44:43.307322675+00:00 stderr F I1208 17:44:43.306954 1 feature_gate.go:249] feature gates: &{map[Topology:true]} 2025-12-08T17:44:43.307322675+00:00 stderr F I1208 17:44:43.307120 1 csi-provisioner.go:154] Version: 66d31bec20f1f15d0916c3b833d3aec6422942ca 2025-12-08T17:44:43.307322675+00:00 stderr F I1208 17:44:43.307127 1 csi-provisioner.go:177] Building kube configs for running in cluster... 2025-12-08T17:44:43.494006129+00:00 stderr F I1208 17:44:43.493517 1 connection.go:215] Connecting to unix:///csi/csi.sock 2025-12-08T17:44:43.512382681+00:00 stderr F I1208 17:44:43.512301 1 common.go:138] Probing CSI driver for readiness 2025-12-08T17:44:43.512382681+00:00 stderr F I1208 17:44:43.512329 1 connection.go:244] GRPC call: /csi.v1.Identity/Probe 2025-12-08T17:44:43.524855857+00:00 stderr F I1208 17:44:43.512335 1 connection.go:245] GRPC request: {} 2025-12-08T17:44:43.530350650+00:00 stderr F I1208 17:44:43.530312 1 connection.go:251] GRPC response: {} 2025-12-08T17:44:43.530350650+00:00 stderr F I1208 17:44:43.530334 1 connection.go:252] GRPC error: 2025-12-08T17:44:43.530350650+00:00 stderr F I1208 17:44:43.530345 1 connection.go:244] GRPC call: /csi.v1.Identity/GetPluginInfo 2025-12-08T17:44:43.530373321+00:00 stderr F I1208 17:44:43.530349 1 connection.go:245] GRPC request: {} 2025-12-08T17:44:43.531132713+00:00 stderr F I1208 17:44:43.531029 1 connection.go:251] GRPC response: {"name":"kubevirt.io.hostpath-provisioner","vendor_version":"latest"} 2025-12-08T17:44:43.531132713+00:00 stderr F I1208 17:44:43.531040 1 connection.go:252] GRPC error: 2025-12-08T17:44:43.531132713+00:00 stderr F I1208 17:44:43.531049 1 csi-provisioner.go:230] Detected CSI driver kubevirt.io.hostpath-provisioner 2025-12-08T17:44:43.531132713+00:00 stderr F I1208 17:44:43.531056 1 connection.go:244] GRPC call: /csi.v1.Identity/GetPluginCapabilities 2025-12-08T17:44:43.531132713+00:00 stderr F I1208 17:44:43.531059 1 connection.go:245] GRPC request: {} 2025-12-08T17:44:43.531744030+00:00 stderr F I1208 17:44:43.531685 1 connection.go:251] GRPC response: {"capabilities":[{"Type":{"Service":{"type":1}}},{"Type":{"Service":{"type":2}}}]} 2025-12-08T17:44:43.531744030+00:00 stderr F I1208 17:44:43.531697 1 connection.go:252] GRPC error: 2025-12-08T17:44:43.531744030+00:00 stderr F I1208 17:44:43.531705 1 connection.go:244] GRPC call: /csi.v1.Controller/ControllerGetCapabilities 2025-12-08T17:44:43.531744030+00:00 stderr F I1208 17:44:43.531709 1 connection.go:245] GRPC request: {} 2025-12-08T17:44:43.532215553+00:00 stderr F I1208 17:44:43.532180 1 connection.go:251] GRPC response: {"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":12}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":11}}}]} 2025-12-08T17:44:43.532215553+00:00 stderr F I1208 17:44:43.532191 1 connection.go:252] GRPC error: 2025-12-08T17:44:43.537474949+00:00 stderr F I1208 17:44:43.536722 1 csi-provisioner.go:302] CSI driver does not support PUBLISH_UNPUBLISH_VOLUME, not watching VolumeAttachments 2025-12-08T17:44:43.537474949+00:00 stderr F I1208 17:44:43.537068 1 connection.go:244] GRPC call: /csi.v1.Node/NodeGetInfo 2025-12-08T17:44:43.537474949+00:00 stderr F I1208 17:44:43.537072 1 connection.go:245] GRPC request: {} 2025-12-08T17:44:43.537495350+00:00 stderr F I1208 17:44:43.537471 1 connection.go:251] GRPC response: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"node_id":"crc"} 2025-12-08T17:44:43.537495350+00:00 stderr F I1208 17:44:43.537477 1 connection.go:252] GRPC error: 2025-12-08T17:44:43.567667699+00:00 stderr F I1208 17:44:43.537501 1 csi-provisioner.go:351] using local topology with Node = &Node{ObjectMeta:{crc 0 0001-01-01 00:00:00 +0000 UTC map[topology.hostpath.csi/node:crc] map[] [] [] []},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{},Allocatable:ResourceList{},Phase:,Conditions:[]NodeCondition{},Addresses:[]NodeAddress{},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:0,},},NodeInfo:NodeSystemInfo{MachineID:,SystemUUID:,BootID:,KernelVersion:,OSImage:,ContainerRuntimeVersion:,KubeletVersion:,KubeProxyVersion:,OperatingSystem:,Architecture:,},Images:[]ContainerImage{},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} and CSINode = &CSINode{ObjectMeta:{crc 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},Spec:CSINodeSpec{Drivers:[]CSINodeDriver{CSINodeDriver{Name:kubevirt.io.hostpath-provisioner,NodeID:crc,TopologyKeys:[topology.hostpath.csi/node],Allocatable:nil,},},},} 2025-12-08T17:44:43.616278682+00:00 stderr F I1208 17:44:43.616218 1 csi-provisioner.go:464] using apps/v1/DaemonSet csi-hostpathplugin as owner of CSIStorageCapacity objects 2025-12-08T17:44:43.616278682+00:00 stderr F I1208 17:44:43.616262 1 csi-provisioner.go:483] producing CSIStorageCapacity objects with fixed topology segment [topology.hostpath.csi/node: crc] 2025-12-08T17:44:43.619253254+00:00 stderr F I1208 17:44:43.619206 1 csi-provisioner.go:529] using the CSIStorageCapacity v1 API 2025-12-08T17:44:43.619327536+00:00 stderr F I1208 17:44:43.619297 1 capacity.go:339] Capacity Controller: topology changed: added [0xc0005a9878 = topology.hostpath.csi/node: crc], removed [] 2025-12-08T17:44:43.619787419+00:00 stderr F I1208 17:44:43.619728 1 controller.go:732] Using saving PVs to API server in background 2025-12-08T17:44:43.620129518+00:00 stderr F I1208 17:44:43.620104 1 reflector.go:289] Starting reflector *v1.PersistentVolumeClaim (15m0s) from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:44:43.620129518+00:00 stderr F I1208 17:44:43.620111 1 reflector.go:289] Starting reflector *v1.CSIStorageCapacity (1h0m0s) from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:44:43.620129518+00:00 stderr F I1208 17:44:43.620121 1 reflector.go:325] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:44:43.620129518+00:00 stderr F I1208 17:44:43.620125 1 reflector.go:325] Listing and watching *v1.CSIStorageCapacity from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:44:43.620145459+00:00 stderr F I1208 17:44:43.620123 1 reflector.go:289] Starting reflector *v1.StorageClass (1h0m0s) from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:44:43.620145459+00:00 stderr F I1208 17:44:43.620136 1 reflector.go:325] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:44:43.651683347+00:00 stderr F I1208 17:44:43.651629 1 capacity.go:373] Capacity Controller: storage class crc-csi-hostpath-provisioner was updated or added 2025-12-08T17:44:43.651683347+00:00 stderr F I1208 17:44:43.651657 1 capacity.go:480] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:44:43.720252515+00:00 stderr F I1208 17:44:43.720175 1 shared_informer.go:341] caches populated 2025-12-08T17:44:43.720252515+00:00 stderr F I1208 17:44:43.720206 1 shared_informer.go:341] caches populated 2025-12-08T17:44:43.720252515+00:00 stderr F I1208 17:44:43.720229 1 controller.go:811] Starting provisioner controller kubevirt.io.hostpath-provisioner_csi-hostpathplugin-qrls7_ef5ab378-87ec-4002-9e9f-3235e89de6f8! 2025-12-08T17:44:43.720309186+00:00 stderr F I1208 17:44:43.720287 1 capacity.go:243] Starting Capacity Controller 2025-12-08T17:44:43.720309186+00:00 stderr F I1208 17:44:43.720301 1 shared_informer.go:341] caches populated 2025-12-08T17:44:43.720371578+00:00 stderr F I1208 17:44:43.720308 1 capacity.go:339] Capacity Controller: topology changed: added [0xc0005a9878 = topology.hostpath.csi/node: crc], removed [] 2025-12-08T17:44:43.720371578+00:00 stderr F I1208 17:44:43.720358 1 capacity.go:480] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:44:43.720403029+00:00 stderr F I1208 17:44:43.720372 1 capacity.go:279] Initial number of topology segments 1, storage classes 1, potential CSIStorageCapacity objects 1 2025-12-08T17:44:43.720403029+00:00 stderr F I1208 17:44:43.720379 1 capacity.go:290] Checking for existing CSIStorageCapacity objects 2025-12-08T17:44:43.720450880+00:00 stderr F I1208 17:44:43.720426 1 capacity.go:725] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 35673 matches {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:44:43.720450880+00:00 stderr F I1208 17:44:43.720441 1 capacity.go:255] Started Capacity Controller 2025-12-08T17:44:43.720464050+00:00 stderr F I1208 17:44:43.720456 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:44:43.720493041+00:00 stderr F I1208 17:44:43.720475 1 volume_store.go:97] Starting save volume queue 2025-12-08T17:44:43.720665156+00:00 stderr F I1208 17:44:43.720629 1 reflector.go:289] Starting reflector *v1.PersistentVolume (15m0s) from sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845 2025-12-08T17:44:43.720665156+00:00 stderr F I1208 17:44:43.720645 1 reflector.go:325] Listing and watching *v1.PersistentVolume from sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845 2025-12-08T17:44:43.721263012+00:00 stderr F I1208 17:44:43.720991 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 35673 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:44:43.721263012+00:00 stderr F I1208 17:44:43.721159 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:44:43.721263012+00:00 stderr F I1208 17:44:43.721233 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:44:43.721544671+00:00 stderr F I1208 17:44:43.721239 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:44:43.722216400+00:00 stderr F I1208 17:44:43.722094 1 reflector.go:289] Starting reflector *v1.StorageClass (15m0s) from sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848 2025-12-08T17:44:43.722216400+00:00 stderr F I1208 17:44:43.722148 1 reflector.go:325] Listing and watching *v1.StorageClass from sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848 2025-12-08T17:44:43.722588980+00:00 stderr F I1208 17:44:43.722537 1 connection.go:251] GRPC response: {"available_capacity":59309539328,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:44:43.722588980+00:00 stderr F I1208 17:44:43.722561 1 connection.go:252] GRPC error: 2025-12-08T17:44:43.722666522+00:00 stderr F I1208 17:44:43.722583 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 57919472Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:44:43.727185528+00:00 stderr F I1208 17:44:43.727124 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 38404 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 57919472Ki 2025-12-08T17:44:43.727546318+00:00 stderr F I1208 17:44:43.727478 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 38404 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:44:43.825921985+00:00 stderr F I1208 17:44:43.821222 1 shared_informer.go:341] caches populated 2025-12-08T17:44:43.829903056+00:00 stderr F I1208 17:44:43.821851 1 controller.go:860] Started provisioner controller kubevirt.io.hostpath-provisioner_csi-hostpathplugin-qrls7_ef5ab378-87ec-4002-9e9f-3235e89de6f8! 2025-12-08T17:44:43.853979086+00:00 stderr F I1208 17:44:43.828769 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 fc1bbaba-db8e-4b91-8f7b-815ce1e79968 24587 0 2025-11-03 08:50:26 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-11-03 08:50:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-11-03 08:50:26 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{32212254720 0} {} 30Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,csi.storage.k8s.io/pvc/name: crc-image-registry-storage,csi.storage.k8s.io/pvc/namespace: openshift-image-registry,storage.kubernetes.io/csiProvisionerIdentity: 1762159825768-6575-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteMany],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:openshift-image-registry,Name:crc-image-registry-storage,UID:b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,APIVersion:v1,ResourceVersion:22386,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-11-03 08:50:26 +0000 UTC,},} 2025-12-08T17:44:43.853979086+00:00 stderr F I1208 17:44:43.849768 1 controller.go:1239] shouldDelete volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" 2025-12-08T17:44:43.853979086+00:00 stderr F I1208 17:44:43.849776 1 controller.go:1260] shouldDelete volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" is false: PersistentVolumePhase is not Released 2025-12-08T17:45:43.721530928+00:00 stderr F I1208 17:45:43.721428 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:45:43.721530928+00:00 stderr F I1208 17:45:43.721512 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:45:43.721604380+00:00 stderr F I1208 17:45:43.721559 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:45:43.722192647+00:00 stderr F I1208 17:45:43.721567 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:45:43.723231858+00:00 stderr F I1208 17:45:43.723182 1 connection.go:251] GRPC response: {"available_capacity":59840512000,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:45:43.723231858+00:00 stderr F I1208 17:45:43.723213 1 connection.go:252] GRPC error: 2025-12-08T17:45:43.723276510+00:00 stderr F I1208 17:45:43.723244 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58438000Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:45:43.729087714+00:00 stderr F I1208 17:45:43.728996 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 38783 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:45:43.729783445+00:00 stderr F I1208 17:45:43.729726 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 38783 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58438000Ki 2025-12-08T17:45:52.345359937+00:00 stderr F I1208 17:45:52.328743 1 reflector.go:790] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: Watch close - *v1.PersistentVolume total 1 items received 2025-12-08T17:45:52.345359937+00:00 stderr F I1208 17:45:52.329502 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.PersistentVolumeClaim total 1 items received 2025-12-08T17:45:52.369076158+00:00 stderr F I1208 17:45:52.369022 1 reflector.go:790] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: Watch close - *v1.StorageClass total 1 items received 2025-12-08T17:45:52.369301835+00:00 stderr F I1208 17:45:52.369257 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.StorageClass total 1 items received 2025-12-08T17:45:52.372192843+00:00 stderr F I1208 17:45:52.372069 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.PersistentVolumeClaim returned Get "https://10.217.4.1:443/api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=38779&timeout=8m42s&timeoutSeconds=522&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:52.372192843+00:00 stderr F I1208 17:45:52.372103 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.CSIStorageCapacity total 3 items received 2025-12-08T17:45:52.372756020+00:00 stderr F I1208 17:45:52.372387 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: watch of *v1.PersistentVolume returned Get "https://10.217.4.1:443/api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=38775&timeout=6m56s&timeoutSeconds=416&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:52.419968567+00:00 stderr F I1208 17:45:52.413150 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=8m14s&timeoutSeconds=494&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:52.419968567+00:00 stderr F I1208 17:45:52.413154 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=8m17s&timeoutSeconds=497&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:52.419968567+00:00 stderr F I1208 17:45:52.418474 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/namespaces/hostpath-provisioner/csistoragecapacities?allowWatchBookmarks=true&labelSelector=csi.storage.k8s.io%2Fdrivername%3Dkubevirt.io.hostpath-provisioner%2Ccsi.storage.k8s.io%2Fmanaged-by%3Dexternal-provisioner-crc&resourceVersion=38783&timeout=6m33s&timeoutSeconds=393&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:53.330871998+00:00 stderr F I1208 17:45:53.330775 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.PersistentVolumeClaim returned Get "https://10.217.4.1:443/api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=38779&timeout=8m36s&timeoutSeconds=516&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:53.357179847+00:00 stderr F I1208 17:45:53.357068 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: watch of *v1.PersistentVolume returned Get "https://10.217.4.1:443/api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=38775&timeout=7m12s&timeoutSeconds=432&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:53.825727431+00:00 stderr F I1208 17:45:53.825656 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=6m56s&timeoutSeconds=416&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:53.958706373+00:00 stderr F I1208 17:45:53.958628 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=9m35s&timeoutSeconds=575&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:54.019670643+00:00 stderr F I1208 17:45:54.019607 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/namespaces/hostpath-provisioner/csistoragecapacities?allowWatchBookmarks=true&labelSelector=csi.storage.k8s.io%2Fdrivername%3Dkubevirt.io.hostpath-provisioner%2Ccsi.storage.k8s.io%2Fmanaged-by%3Dexternal-provisioner-crc&resourceVersion=38783&timeout=7m5s&timeoutSeconds=425&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:55.688472053+00:00 stderr F I1208 17:45:55.688414 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/namespaces/hostpath-provisioner/csistoragecapacities?allowWatchBookmarks=true&labelSelector=csi.storage.k8s.io%2Fdrivername%3Dkubevirt.io.hostpath-provisioner%2Ccsi.storage.k8s.io%2Fmanaged-by%3Dexternal-provisioner-crc&resourceVersion=38783&timeout=7m23s&timeoutSeconds=443&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:56.175866823+00:00 stderr F I1208 17:45:56.175799 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=5m58s&timeoutSeconds=358&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:56.288012079+00:00 stderr F I1208 17:45:56.287928 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: watch of *v1.PersistentVolume returned Get "https://10.217.4.1:443/api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=38775&timeout=7m5s&timeoutSeconds=425&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:56.483851647+00:00 stderr F I1208 17:45:56.483771 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.PersistentVolumeClaim returned Get "https://10.217.4.1:443/api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=38779&timeout=6m10s&timeoutSeconds=370&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:56.737531782+00:00 stderr F I1208 17:45:56.737446 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=8m25s&timeoutSeconds=505&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:45:59.965219144+00:00 stderr F I1208 17:45:59.965093 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/namespaces/hostpath-provisioner/csistoragecapacities?allowWatchBookmarks=true&labelSelector=csi.storage.k8s.io%2Fdrivername%3Dkubevirt.io.hostpath-provisioner%2Ccsi.storage.k8s.io%2Fmanaged-by%3Dexternal-provisioner-crc&resourceVersion=38783&timeout=5m41s&timeoutSeconds=341&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:00.272982102+00:00 stderr F I1208 17:46:00.272906 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=9m2s&timeoutSeconds=542&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:02.223019213+00:00 stderr F I1208 17:46:02.222842 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: watch of *v1.PersistentVolume returned Get "https://10.217.4.1:443/api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=38775&timeout=6m11s&timeoutSeconds=371&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:02.366222021+00:00 stderr F I1208 17:46:02.366101 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.PersistentVolumeClaim returned Get "https://10.217.4.1:443/api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=38779&timeout=5m23s&timeoutSeconds=323&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:02.550609165+00:00 stderr F I1208 17:46:02.550533 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=7m36s&timeoutSeconds=456&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:06.778080315+00:00 stderr F I1208 17:46:06.777999 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=8m48s&timeoutSeconds=528&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:08.748671303+00:00 stderr F I1208 17:46:08.748530 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/namespaces/hostpath-provisioner/csistoragecapacities?allowWatchBookmarks=true&labelSelector=csi.storage.k8s.io%2Fdrivername%3Dkubevirt.io.hostpath-provisioner%2Ccsi.storage.k8s.io%2Fmanaged-by%3Dexternal-provisioner-crc&resourceVersion=38783&timeout=6m0s&timeoutSeconds=360&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:09.860164386+00:00 stderr F I1208 17:46:09.860064 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=5m39s&timeoutSeconds=339&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:12.582383296+00:00 stderr F I1208 17:46:12.582285 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.PersistentVolumeClaim returned Get "https://10.217.4.1:443/api/v1/persistentvolumeclaims?allowWatchBookmarks=true&resourceVersion=38779&timeout=5m13s&timeoutSeconds=313&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:14.478634303+00:00 stderr F I1208 17:46:14.478543 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: watch of *v1.PersistentVolume returned Get "https://10.217.4.1:443/api/v1/persistentvolumes?allowWatchBookmarks=true&resourceVersion=38775&timeout=9m29s&timeoutSeconds=569&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:21.812202593+00:00 stderr F I1208 17:46:21.812088 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=6m53s&timeoutSeconds=413&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:25.755854534+00:00 stderr F I1208 17:46:25.755780 1 reflector.go:421] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true&resourceVersion=38781&timeout=7m42s&timeoutSeconds=462&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:26.910314956+00:00 stderr F I1208 17:46:26.910221 1 reflector.go:421] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity returned Get "https://10.217.4.1:443/apis/storage.k8s.io/v1/namespaces/hostpath-provisioner/csistoragecapacities?allowWatchBookmarks=true&labelSelector=csi.storage.k8s.io%2Fdrivername%3Dkubevirt.io.hostpath-provisioner%2Ccsi.storage.k8s.io%2Fmanaged-by%3Dexternal-provisioner-crc&resourceVersion=38783&timeout=9m14s&timeoutSeconds=554&watch=true": dial tcp 10.217.4.1:443: connect: connection refused - backing off 2025-12-08T17:46:31.363815611+00:00 stderr F I1208 17:46:31.363747 1 reflector.go:445] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: watch of *v1.PersistentVolume closed with: too old resource version: 38775 (38840) 2025-12-08T17:46:33.543825545+00:00 stderr F I1208 17:46:33.543708 1 reflector.go:445] k8s.io/client-go/informers/factory.go:150: watch of *v1.PersistentVolumeClaim closed with: too old resource version: 38779 (38840) 2025-12-08T17:46:43.722570787+00:00 stderr F I1208 17:46:43.722470 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:46:43.722570787+00:00 stderr F I1208 17:46:43.722543 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:46:43.722668390+00:00 stderr F I1208 17:46:43.722588 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:46:43.722684711+00:00 stderr F I1208 17:46:43.722594 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:46:43.723519936+00:00 stderr F I1208 17:46:43.723484 1 connection.go:251] GRPC response: {"available_capacity":59838320640,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:46:43.723519936+00:00 stderr F I1208 17:46:43.723499 1 connection.go:252] GRPC error: 2025-12-08T17:46:43.723570697+00:00 stderr F I1208 17:46:43.723517 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58435860Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:46:43.746821705+00:00 stderr F I1208 17:46:43.746607 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 38984 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58435860Ki 2025-12-08T17:46:58.838400320+00:00 stderr F I1208 17:46:58.838298 1 reflector.go:325] Listing and watching *v1.PersistentVolume from sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845 2025-12-08T17:46:58.844994757+00:00 stderr F I1208 17:46:58.844633 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 fc1bbaba-db8e-4b91-8f7b-815ce1e79968 24587 0 2025-11-03 08:50:26 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-11-03 08:50:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-11-03 08:50:26 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{32212254720 0} {} 30Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,csi.storage.k8s.io/pvc/name: crc-image-registry-storage,csi.storage.k8s.io/pvc/namespace: openshift-image-registry,storage.kubernetes.io/csiProvisionerIdentity: 1762159825768-6575-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteMany],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:openshift-image-registry,Name:crc-image-registry-storage,UID:b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,APIVersion:v1,ResourceVersion:22386,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-11-03 08:50:26 +0000 UTC,},} 2025-12-08T17:46:58.845045679+00:00 stderr F I1208 17:46:58.845002 1 controller.go:1239] shouldDelete volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" 2025-12-08T17:46:58.845045679+00:00 stderr F I1208 17:46:58.845018 1 controller.go:1260] shouldDelete volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" is false: PersistentVolumePhase is not Released 2025-12-08T17:46:59.187295812+00:00 stderr F I1208 17:46:59.187215 1 reflector.go:445] k8s.io/client-go/informers/factory.go:150: watch of *v1.StorageClass closed with: too old resource version: 38781 (38840) 2025-12-08T17:47:01.437950961+00:00 stderr F I1208 17:47:01.437592 1 reflector.go:445] k8s.io/client-go/informers/factory.go:150: watch of *v1.CSIStorageCapacity closed with: too old resource version: 38783 (38840) 2025-12-08T17:47:01.999953012+00:00 stderr F I1208 17:47:01.999860 1 reflector.go:325] Listing and watching *v1.PersistentVolumeClaim from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:47:14.912833189+00:00 stderr F I1208 17:47:14.912753 1 reflector.go:445] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: watch of *v1.StorageClass closed with: too old resource version: 38781 (38840) 2025-12-08T17:47:43.723457590+00:00 stderr F I1208 17:47:43.723367 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:47:43.723517481+00:00 stderr F I1208 17:47:43.723470 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:43.723565923+00:00 stderr F I1208 17:47:43.723548 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:47:43.723751949+00:00 stderr F I1208 17:47:43.723561 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:47:43.724674297+00:00 stderr F I1208 17:47:43.724642 1 connection.go:251] GRPC response: {"available_capacity":59820294144,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:47:43.724674297+00:00 stderr F I1208 17:47:43.724656 1 connection.go:252] GRPC error: 2025-12-08T17:47:43.724695278+00:00 stderr F I1208 17:47:43.724670 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58418256Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:47:43.732431762+00:00 stderr F E1208 17:47:43.732383 1 capacity.go:551] update CSIStorageCapacity for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}: Operation cannot be fulfilled on csistoragecapacities.storage.k8s.io "csisc-k4gvk": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:43.732431762+00:00 stderr F W1208 17:47:43.732400 1 capacity.go:552] Retrying capacity.workItem{segment:(*topology.Segment)(0xc0005a9878), storageClassName:"crc-csi-hostpath-provisioner"} after 0 failures 2025-12-08T17:47:44.293505734+00:00 stderr F I1208 17:47:44.293408 1 reflector.go:325] Listing and watching *v1.CSIStorageCapacity from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:47:44.296613642+00:00 stderr F I1208 17:47:44.296544 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 38984 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:44.733586448+00:00 stderr F I1208 17:47:44.733489 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:44.733586448+00:00 stderr F I1208 17:47:44.733542 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:47:44.733640369+00:00 stderr F I1208 17:47:44.733547 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:47:44.734254528+00:00 stderr F I1208 17:47:44.734215 1 connection.go:251] GRPC response: {"available_capacity":59820244992,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:47:44.734254528+00:00 stderr F I1208 17:47:44.734230 1 connection.go:252] GRPC error: 2025-12-08T17:47:44.734286459+00:00 stderr F I1208 17:47:44.734249 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58418208Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:47:44.739228215+00:00 stderr F I1208 17:47:44.739165 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 39360 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58418208Ki 2025-12-08T17:47:44.739321008+00:00 stderr F I1208 17:47:44.739287 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 39360 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:48.126305097+00:00 stderr F I1208 17:47:48.126211 1 reflector.go:325] Listing and watching *v1.StorageClass from k8s.io/client-go/informers/factory.go:150 2025-12-08T17:47:48.128371202+00:00 stderr F I1208 17:47:48.128241 1 capacity.go:373] Capacity Controller: storage class crc-csi-hostpath-provisioner was updated or added 2025-12-08T17:47:48.128371202+00:00 stderr F I1208 17:47:48.128293 1 capacity.go:480] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:48.128371202+00:00 stderr F I1208 17:47:48.128333 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:48.128445454+00:00 stderr F I1208 17:47:48.128369 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:47:48.128527476+00:00 stderr F I1208 17:47:48.128379 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:47:48.129628732+00:00 stderr F I1208 17:47:48.129562 1 connection.go:251] GRPC response: {"available_capacity":59820240896,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:47:48.129628732+00:00 stderr F I1208 17:47:48.129584 1 connection.go:252] GRPC error: 2025-12-08T17:47:48.129628732+00:00 stderr F I1208 17:47:48.129611 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58418204Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:47:48.136089915+00:00 stderr F I1208 17:47:48.136025 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 39374 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:47:48.136142526+00:00 stderr F I1208 17:47:48.136115 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 39374 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58418204Ki 2025-12-08T17:48:06.175086649+00:00 stderr F I1208 17:48:06.175006 1 reflector.go:325] Listing and watching *v1.StorageClass from sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848 2025-12-08T17:48:43.724554699+00:00 stderr F I1208 17:48:43.724441 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:48:43.724554699+00:00 stderr F I1208 17:48:43.724521 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:48:43.724634382+00:00 stderr F I1208 17:48:43.724556 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:48:43.724698984+00:00 stderr F I1208 17:48:43.724565 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:48:43.726247061+00:00 stderr F I1208 17:48:43.726190 1 connection.go:251] GRPC response: {"available_capacity":59816443904,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:48:43.726282542+00:00 stderr F I1208 17:48:43.726248 1 connection.go:252] GRPC error: 2025-12-08T17:48:43.726324423+00:00 stderr F I1208 17:48:43.726279 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58414496Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:48:43.733835541+00:00 stderr F I1208 17:48:43.733761 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 39724 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58414496Ki 2025-12-08T17:48:43.734045117+00:00 stderr F I1208 17:48:43.733952 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 39724 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:49:43.725491114+00:00 stderr F I1208 17:49:43.725403 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:49:43.725491114+00:00 stderr F I1208 17:49:43.725461 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:49:43.725549937+00:00 stderr F I1208 17:49:43.725518 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:49:43.725711313+00:00 stderr F I1208 17:49:43.725526 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:49:43.726790013+00:00 stderr F I1208 17:49:43.726767 1 connection.go:251] GRPC response: {"available_capacity":59817975808,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:49:43.726824914+00:00 stderr F I1208 17:49:43.726815 1 connection.go:252] GRPC error: 2025-12-08T17:49:43.726944899+00:00 stderr F I1208 17:49:43.726855 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58415992Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:49:43.735842669+00:00 stderr F I1208 17:49:43.735797 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 39813 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:49:43.736524454+00:00 stderr F I1208 17:49:43.736437 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 39813 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58415992Ki 2025-12-08T17:50:43.725594930+00:00 stderr F I1208 17:50:43.725528 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:50:43.725660143+00:00 stderr F I1208 17:50:43.725605 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:50:43.725660143+00:00 stderr F I1208 17:50:43.725629 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:50:43.725769125+00:00 stderr F I1208 17:50:43.725633 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:50:43.726822267+00:00 stderr F I1208 17:50:43.726758 1 connection.go:251] GRPC response: {"available_capacity":59775893504,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:50:43.726822267+00:00 stderr F I1208 17:50:43.726785 1 connection.go:252] GRPC error: 2025-12-08T17:50:43.726841548+00:00 stderr F I1208 17:50:43.726816 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58374896Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:50:43.732708874+00:00 stderr F I1208 17:50:43.732648 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 39904 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:50:43.732920879+00:00 stderr F I1208 17:50:43.732867 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 39904 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58374896Ki 2025-12-08T17:51:43.726578984+00:00 stderr F I1208 17:51:43.726470 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:51:43.726578984+00:00 stderr F I1208 17:51:43.726566 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:51:43.726652255+00:00 stderr F I1208 17:51:43.726619 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:51:43.726816468+00:00 stderr F I1208 17:51:43.726628 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:51:43.727825874+00:00 stderr F I1208 17:51:43.727756 1 connection.go:251] GRPC response: {"available_capacity":59776974848,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:51:43.727825874+00:00 stderr F I1208 17:51:43.727779 1 connection.go:252] GRPC error: 2025-12-08T17:51:43.727855594+00:00 stderr F I1208 17:51:43.727813 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58375952Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:51:43.734214577+00:00 stderr F I1208 17:51:43.734132 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 40008 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:51:43.734414250+00:00 stderr F I1208 17:51:43.734358 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 40008 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58375952Ki 2025-12-08T17:52:15.005596555+00:00 stderr F I1208 17:52:15.005469 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.PersistentVolumeClaim total 6 items received 2025-12-08T17:52:43.727295538+00:00 stderr F I1208 17:52:43.727177 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:52:43.727295538+00:00 stderr F I1208 17:52:43.727258 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:52:43.727383070+00:00 stderr F I1208 17:52:43.727293 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:52:43.727383070+00:00 stderr F I1208 17:52:43.727299 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:52:43.728490181+00:00 stderr F I1208 17:52:43.728401 1 connection.go:251] GRPC response: {"available_capacity":59775676416,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:52:43.728490181+00:00 stderr F I1208 17:52:43.728428 1 connection.go:252] GRPC error: 2025-12-08T17:52:43.728490181+00:00 stderr F I1208 17:52:43.728458 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58374684Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:52:43.735505287+00:00 stderr F I1208 17:52:43.735437 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 40100 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58374684Ki 2025-12-08T17:52:43.735505287+00:00 stderr F I1208 17:52:43.735443 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 40100 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:53:05.130641480+00:00 stderr F I1208 17:53:05.130028 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.StorageClass total 7 items received 2025-12-08T17:53:43.727780760+00:00 stderr F I1208 17:53:43.727668 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:53:43.727780760+00:00 stderr F I1208 17:53:43.727734 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:53:43.727780760+00:00 stderr F I1208 17:53:43.727764 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:53:43.727992515+00:00 stderr F I1208 17:53:43.727768 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:53:43.732439517+00:00 stderr F I1208 17:53:43.732239 1 connection.go:251] GRPC response: {"available_capacity":59775012864,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:53:43.732439517+00:00 stderr F I1208 17:53:43.732260 1 connection.go:252] GRPC error: 2025-12-08T17:53:43.732439517+00:00 stderr F I1208 17:53:43.732280 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58374036Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:53:43.742979193+00:00 stderr F I1208 17:53:43.742643 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 40521 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:53:43.743241820+00:00 stderr F I1208 17:53:43.743087 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 40521 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58374036Ki 2025-12-08T17:54:43.728557215+00:00 stderr F I1208 17:54:43.728496 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:54:43.728677058+00:00 stderr F I1208 17:54:43.728661 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:54:43.728730180+00:00 stderr F I1208 17:54:43.728716 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:54:43.728836953+00:00 stderr F I1208 17:54:43.728745 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:54:43.729898161+00:00 stderr F I1208 17:54:43.729844 1 connection.go:251] GRPC response: {"available_capacity":59767336960,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:54:43.729898161+00:00 stderr F I1208 17:54:43.729860 1 connection.go:252] GRPC error: 2025-12-08T17:54:43.729918761+00:00 stderr F I1208 17:54:43.729899 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 58366540Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:54:43.736117148+00:00 stderr F I1208 17:54:43.736060 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 40705 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 58366540Ki 2025-12-08T17:54:43.736308003+00:00 stderr F I1208 17:54:43.736188 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 40705 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:55:10.180983259+00:00 stderr F I1208 17:55:10.180909 1 reflector.go:790] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: Watch close - *v1.StorageClass total 9 items received 2025-12-08T17:55:43.731276201+00:00 stderr F I1208 17:55:43.729582 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:55:43.731276201+00:00 stderr F I1208 17:55:43.729643 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:55:43.731276201+00:00 stderr F I1208 17:55:43.729668 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:55:43.731276201+00:00 stderr F I1208 17:55:43.729673 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:55:43.735676111+00:00 stderr F I1208 17:55:43.733085 1 connection.go:251] GRPC response: {"available_capacity":58374017024,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:55:43.735676111+00:00 stderr F I1208 17:55:43.733107 1 connection.go:252] GRPC error: 2025-12-08T17:55:43.735676111+00:00 stderr F I1208 17:55:43.733133 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 57005876Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:55:43.739336281+00:00 stderr F I1208 17:55:43.739210 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 42902 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:55:43.739570129+00:00 stderr F I1208 17:55:43.739514 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 42902 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 57005876Ki 2025-12-08T17:56:02.848051801+00:00 stderr F I1208 17:56:02.846946 1 reflector.go:790] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: Watch close - *v1.PersistentVolume total 10 items received 2025-12-08T17:56:10.298579731+00:00 stderr F I1208 17:56:10.298494 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.CSIStorageCapacity total 20 items received 2025-12-08T17:56:43.730919581+00:00 stderr F I1208 17:56:43.730025 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:56:43.730919581+00:00 stderr F I1208 17:56:43.730071 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:56:43.730919581+00:00 stderr F I1208 17:56:43.730093 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:56:43.730919581+00:00 stderr F I1208 17:56:43.730097 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:56:43.731220270+00:00 stderr F I1208 17:56:43.731179 1 connection.go:251] GRPC response: {"available_capacity":56942952448,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:56:43.731220270+00:00 stderr F I1208 17:56:43.731195 1 connection.go:252] GRPC error: 2025-12-08T17:56:43.731234100+00:00 stderr F I1208 17:56:43.731212 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 55608352Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:56:43.738505879+00:00 stderr F I1208 17:56:43.738431 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 44000 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 55608352Ki 2025-12-08T17:56:43.738538900+00:00 stderr F I1208 17:56:43.738500 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 44000 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:37.578417956+00:00 stderr F I1208 17:57:37.578374 1 controller.go:1366] provision "service-telemetry/prometheus-default-db-prometheus-default-0" class "crc-csi-hostpath-provisioner": started 2025-12-08T17:57:37.578609900+00:00 stderr F I1208 17:57:37.578597 1 connection.go:244] GRPC call: /csi.v1.Controller/CreateVolume 2025-12-08T17:57:37.578937649+00:00 stderr F I1208 17:57:37.578776 1 event.go:298] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"service-telemetry", Name:"prometheus-default-db-prometheus-default-0", UID:"e87a7084-eea8-46c1-a85e-77b652e25ad6", APIVersion:"v1", ResourceVersion:"45024", FieldPath:""}): type: 'Normal' reason: 'Provisioning' External provisioner is provisioning volume for claim "service-telemetry/prometheus-default-db-prometheus-default-0" 2025-12-08T17:57:37.578995810+00:00 stderr F I1208 17:57:37.578625 1 connection.go:245] GRPC request: {"accessibility_requirements":{"preferred":[{"segments":{"topology.hostpath.csi/node":"crc"}}],"requisite":[{"segments":{"topology.hostpath.csi/node":"crc"}}]},"capacity_range":{"required_bytes":20000000000},"name":"pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6","parameters":{"csi.storage.k8s.io/pv/name":"pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6","csi.storage.k8s.io/pvc/name":"prometheus-default-db-prometheus-default-0","csi.storage.k8s.io/pvc/namespace":"service-telemetry","storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}]} 2025-12-08T17:57:37.580796017+00:00 stderr F I1208 17:57:37.580766 1 connection.go:251] GRPC response: {"volume":{"accessible_topology":[{"segments":{"topology.hostpath.csi/node":"crc"}}],"capacity_bytes":84825604096,"volume_context":{"csi.storage.k8s.io/pv/name":"pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6","csi.storage.k8s.io/pvc/name":"prometheus-default-db-prometheus-default-0","csi.storage.k8s.io/pvc/namespace":"service-telemetry","storagePool":"local"},"volume_id":"pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6"}} 2025-12-08T17:57:37.580796017+00:00 stderr F I1208 17:57:37.580791 1 connection.go:252] GRPC error: 2025-12-08T17:57:37.580868109+00:00 stderr F I1208 17:57:37.580818 1 controller.go:826] create volume rep: {CapacityBytes:84825604096 VolumeId:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 VolumeContext:map[csi.storage.k8s.io/pv/name:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 csi.storage.k8s.io/pvc/name:prometheus-default-db-prometheus-default-0 csi.storage.k8s.io/pvc/namespace:service-telemetry storagePool:local] ContentSource: AccessibleTopology:[segments: ] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:57:37.581269049+00:00 stderr F I1208 17:57:37.580871 1 controller.go:898] createVolumeOperation: set annotation [volume.kubernetes.io/provisioner-deletion-secret-namespace/volume.kubernetes.io/provisioner-deletion-secret-name] on pv [pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6]. 2025-12-08T17:57:37.581269049+00:00 stderr F I1208 17:57:37.581252 1 controller.go:923] successfully created PV pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 for PVC prometheus-default-db-prometheus-default-0 and csi volume name pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 2025-12-08T17:57:37.581432663+00:00 stderr F I1208 17:57:37.581264 1 controller.go:939] successfully created PV {GCEPersistentDisk:nil AWSElasticBlockStore:nil HostPath:nil Glusterfs:nil NFS:nil RBD:nil ISCSI:nil Cinder:nil CephFS:nil FC:nil Flocker:nil FlexVolume:nil AzureFile:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil PortworxVolume:nil ScaleIO:nil Local:nil StorageOS:nil CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,csi.storage.k8s.io/pvc/name: prometheus-default-db-prometheus-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,}} 2025-12-08T17:57:37.581432663+00:00 stderr F I1208 17:57:37.581422 1 capacity.go:424] Capacity Controller: skipping refresh: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} because of the topology 2025-12-08T17:57:37.581480615+00:00 stderr F I1208 17:57:37.581463 1 controller.go:1449] provision "service-telemetry/prometheus-default-db-prometheus-default-0" class "crc-csi-hostpath-provisioner": volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" provisioned 2025-12-08T17:57:37.581487835+00:00 stderr F I1208 17:57:37.581481 1 controller.go:1462] provision "service-telemetry/prometheus-default-db-prometheus-default-0" class "crc-csi-hostpath-provisioner": succeeded 2025-12-08T17:57:37.581545456+00:00 stderr F I1208 17:57:37.581486 1 volume_store.go:154] Saving volume pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 2025-12-08T17:57:37.581693770+00:00 stderr F I1208 17:57:37.581660 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:37.581744751+00:00 stderr F I1208 17:57:37.581713 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:57:37.581927766+00:00 stderr F I1208 17:57:37.581726 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:57:37.582487521+00:00 stderr F I1208 17:57:37.582441 1 connection.go:251] GRPC response: {"available_capacity":55470637056,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:57:37.582487521+00:00 stderr F I1208 17:57:37.582462 1 connection.go:252] GRPC error: 2025-12-08T17:57:37.582516472+00:00 stderr F I1208 17:57:37.582488 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 54170544Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:57:37.588744673+00:00 stderr F I1208 17:57:37.588476 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 303b951d-8ea6-44ce-a886-47c5892e6da7 45027 0 2025-12-08 17:57:37 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-12-08 17:57:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} }]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{84825604096 0} {} 79Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,csi.storage.k8s.io/pvc/name: prometheus-default-db-prometheus-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteOnce],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:service-telemetry,Name:prometheus-default-db-prometheus-default-0,UID:e87a7084-eea8-46c1-a85e-77b652e25ad6,APIVersion:v1,ResourceVersion:45024,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Pending,Message:,Reason:,LastPhaseTransitionTime:2025-12-08 17:57:37 +0000 UTC,},} 2025-12-08T17:57:37.588744673+00:00 stderr F I1208 17:57:37.588715 1 controller.go:1239] shouldDelete volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" 2025-12-08T17:57:37.588744673+00:00 stderr F I1208 17:57:37.588725 1 controller.go:1260] shouldDelete volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" is false: PersistentVolumePhase is not Released 2025-12-08T17:57:37.588824985+00:00 stderr F I1208 17:57:37.588685 1 volume_store.go:157] Volume pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 saved 2025-12-08T17:57:37.588923457+00:00 stderr F I1208 17:57:37.588869 1 controller.go:1069] Claim processing succeeded, removing PVC e87a7084-eea8-46c1-a85e-77b652e25ad6 from claims in progress 2025-12-08T17:57:37.589166764+00:00 stderr F I1208 17:57:37.589127 1 event.go:298] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"service-telemetry", Name:"prometheus-default-db-prometheus-default-0", UID:"e87a7084-eea8-46c1-a85e-77b652e25ad6", APIVersion:"v1", ResourceVersion:"45024", FieldPath:""}): type: 'Normal' reason: 'ProvisioningSucceeded' Successfully provisioned volume pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 2025-12-08T17:57:37.590480888+00:00 stderr F I1208 17:57:37.590438 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 45028 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:37.590738935+00:00 stderr F I1208 17:57:37.590713 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 45028 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 54170544Ki 2025-12-08T17:57:37.593482536+00:00 stderr F I1208 17:57:37.593261 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 303b951d-8ea6-44ce-a886-47c5892e6da7 45029 0 2025-12-08 17:57:37 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-12-08 17:57:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-12-08 17:57:37 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{84825604096 0} {} 79Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,csi.storage.k8s.io/pvc/name: prometheus-default-db-prometheus-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteOnce],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:service-telemetry,Name:prometheus-default-db-prometheus-default-0,UID:e87a7084-eea8-46c1-a85e-77b652e25ad6,APIVersion:v1,ResourceVersion:45024,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-12-08 17:57:37 +0000 UTC,},} 2025-12-08T17:57:37.593482536+00:00 stderr F I1208 17:57:37.593466 1 controller.go:1239] shouldDelete volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" 2025-12-08T17:57:37.593504636+00:00 stderr F I1208 17:57:37.593477 1 controller.go:1260] shouldDelete volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" is false: PersistentVolumePhase is not Released 2025-12-08T17:57:43.730315905+00:00 stderr F I1208 17:57:43.730197 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:57:43.730315905+00:00 stderr F I1208 17:57:43.730276 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:43.730386847+00:00 stderr F I1208 17:57:43.730310 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:57:43.730449418+00:00 stderr F I1208 17:57:43.730318 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:57:43.731551967+00:00 stderr F I1208 17:57:43.731478 1 connection.go:251] GRPC response: {"available_capacity":55427313664,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:57:43.731551967+00:00 stderr F I1208 17:57:43.731509 1 connection.go:252] GRPC error: 2025-12-08T17:57:43.731591458+00:00 stderr F I1208 17:57:43.731543 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 54128236Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:57:43.736566596+00:00 stderr F I1208 17:57:43.736234 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 45085 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:43.736566596+00:00 stderr F I1208 17:57:43.736496 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 45085 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 54128236Ki 2025-12-08T17:57:51.349391865+00:00 stderr F I1208 17:57:51.345089 1 controller.go:1366] provision "service-telemetry/alertmanager-default-db-alertmanager-default-0" class "crc-csi-hostpath-provisioner": started 2025-12-08T17:57:51.349391865+00:00 stderr F I1208 17:57:51.345176 1 connection.go:244] GRPC call: /csi.v1.Controller/CreateVolume 2025-12-08T17:57:51.349391865+00:00 stderr F I1208 17:57:51.345183 1 connection.go:245] GRPC request: {"accessibility_requirements":{"preferred":[{"segments":{"topology.hostpath.csi/node":"crc"}}],"requisite":[{"segments":{"topology.hostpath.csi/node":"crc"}}]},"capacity_range":{"required_bytes":20000000000},"name":"pvc-1b15df9e-01ca-4097-a731-1c1b05c63480","parameters":{"csi.storage.k8s.io/pv/name":"pvc-1b15df9e-01ca-4097-a731-1c1b05c63480","csi.storage.k8s.io/pvc/name":"alertmanager-default-db-alertmanager-default-0","csi.storage.k8s.io/pvc/namespace":"service-telemetry","storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}]} 2025-12-08T17:57:51.349391865+00:00 stderr F I1208 17:57:51.345709 1 event.go:298] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"service-telemetry", Name:"alertmanager-default-db-alertmanager-default-0", UID:"1b15df9e-01ca-4097-a731-1c1b05c63480", APIVersion:"v1", ResourceVersion:"45171", FieldPath:""}): type: 'Normal' reason: 'Provisioning' External provisioner is provisioning volume for claim "service-telemetry/alertmanager-default-db-alertmanager-default-0" 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.354940 1 connection.go:251] GRPC response: {"volume":{"accessible_topology":[{"segments":{"topology.hostpath.csi/node":"crc"}}],"capacity_bytes":84825604096,"volume_context":{"csi.storage.k8s.io/pv/name":"pvc-1b15df9e-01ca-4097-a731-1c1b05c63480","csi.storage.k8s.io/pvc/name":"alertmanager-default-db-alertmanager-default-0","csi.storage.k8s.io/pvc/namespace":"service-telemetry","storagePool":"local"},"volume_id":"pvc-1b15df9e-01ca-4097-a731-1c1b05c63480"}} 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.354963 1 connection.go:252] GRPC error: 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.354971 1 controller.go:826] create volume rep: {CapacityBytes:84825604096 VolumeId:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 VolumeContext:map[csi.storage.k8s.io/pv/name:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 csi.storage.k8s.io/pvc/name:alertmanager-default-db-alertmanager-default-0 csi.storage.k8s.io/pvc/namespace:service-telemetry storagePool:local] ContentSource: AccessibleTopology:[segments: ] XXX_NoUnkeyedLiteral:{} XXX_unrecognized:[] XXX_sizecache:0} 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355023 1 controller.go:898] createVolumeOperation: set annotation [volume.kubernetes.io/provisioner-deletion-secret-namespace/volume.kubernetes.io/provisioner-deletion-secret-name] on pv [pvc-1b15df9e-01ca-4097-a731-1c1b05c63480]. 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355036 1 controller.go:923] successfully created PV pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 for PVC alertmanager-default-db-alertmanager-default-0 and csi volume name pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355047 1 controller.go:939] successfully created PV {GCEPersistentDisk:nil AWSElasticBlockStore:nil HostPath:nil Glusterfs:nil NFS:nil RBD:nil ISCSI:nil Cinder:nil CephFS:nil FC:nil Flocker:nil FlexVolume:nil AzureFile:nil VsphereVolume:nil Quobyte:nil AzureDisk:nil PhotonPersistentDisk:nil PortworxVolume:nil ScaleIO:nil Local:nil StorageOS:nil CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,csi.storage.k8s.io/pvc/name: alertmanager-default-db-alertmanager-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,}} 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355149 1 capacity.go:424] Capacity Controller: skipping refresh: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} because of the topology 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355172 1 controller.go:1449] provision "service-telemetry/alertmanager-default-db-alertmanager-default-0" class "crc-csi-hostpath-provisioner": volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" provisioned 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355182 1 controller.go:1462] provision "service-telemetry/alertmanager-default-db-alertmanager-default-0" class "crc-csi-hostpath-provisioner": succeeded 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355187 1 volume_store.go:154] Saving volume pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355509 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:51.355559845+00:00 stderr F I1208 17:57:51.355528 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:57:51.355611856+00:00 stderr F I1208 17:57:51.355533 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:57:51.356179510+00:00 stderr F I1208 17:57:51.356102 1 connection.go:251] GRPC response: {"available_capacity":55258386432,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:57:51.356179510+00:00 stderr F I1208 17:57:51.356114 1 connection.go:252] GRPC error: 2025-12-08T17:57:51.356179510+00:00 stderr F I1208 17:57:51.356131 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 53963268Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.361800 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 45177 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 53963268Ki 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362054 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 45177 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362331 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 7687530c-dc50-4224-bbd7-e0ba882c5a3a 45178 0 2025-12-08 17:57:51 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-12-08 17:57:51 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} }]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{84825604096 0} {} 79Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,csi.storage.k8s.io/pvc/name: alertmanager-default-db-alertmanager-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteOnce],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:service-telemetry,Name:alertmanager-default-db-alertmanager-default-0,UID:1b15df9e-01ca-4097-a731-1c1b05c63480,APIVersion:v1,ResourceVersion:45171,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Pending,Message:,Reason:,LastPhaseTransitionTime:2025-12-08 17:57:51 +0000 UTC,},} 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362505 1 controller.go:1239] shouldDelete volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362513 1 controller.go:1260] shouldDelete volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" is false: PersistentVolumePhase is not Released 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362713 1 volume_store.go:157] Volume pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 saved 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362742 1 controller.go:1069] Claim processing succeeded, removing PVC 1b15df9e-01ca-4097-a731-1c1b05c63480 from claims in progress 2025-12-08T17:57:51.363047178+00:00 stderr F I1208 17:57:51.362967 1 event.go:298] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"service-telemetry", Name:"alertmanager-default-db-alertmanager-default-0", UID:"1b15df9e-01ca-4097-a731-1c1b05c63480", APIVersion:"v1", ResourceVersion:"45171", FieldPath:""}): type: 'Normal' reason: 'ProvisioningSucceeded' Successfully provisioned volume pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 2025-12-08T17:57:51.365408390+00:00 stderr F I1208 17:57:51.365255 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 7687530c-dc50-4224-bbd7-e0ba882c5a3a 45179 0 2025-12-08 17:57:51 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-12-08 17:57:51 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-12-08 17:57:51 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{84825604096 0} {} 79Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,csi.storage.k8s.io/pvc/name: alertmanager-default-db-alertmanager-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteOnce],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:service-telemetry,Name:alertmanager-default-db-alertmanager-default-0,UID:1b15df9e-01ca-4097-a731-1c1b05c63480,APIVersion:v1,ResourceVersion:45171,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-12-08 17:57:51 +0000 UTC,},} 2025-12-08T17:57:51.365408390+00:00 stderr F I1208 17:57:51.365394 1 controller.go:1239] shouldDelete volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" 2025-12-08T17:57:51.365408390+00:00 stderr F I1208 17:57:51.365401 1 controller.go:1260] shouldDelete volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" is false: PersistentVolumePhase is not Released 2025-12-08T17:58:43.731023099+00:00 stderr F I1208 17:58:43.730959 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:58:43.731149913+00:00 stderr F I1208 17:58:43.731132 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:58:43.731211025+00:00 stderr F I1208 17:58:43.731198 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:58:43.731335628+00:00 stderr F I1208 17:58:43.731231 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:58:43.737264514+00:00 stderr F I1208 17:58:43.737226 1 connection.go:251] GRPC response: {"available_capacity":53694816256,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:58:43.737354697+00:00 stderr F I1208 17:58:43.737341 1 connection.go:252] GRPC error: 2025-12-08T17:58:43.737418908+00:00 stderr F I1208 17:58:43.737393 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 52436344Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:58:43.746986700+00:00 stderr F I1208 17:58:43.746928 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 45757 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:58:43.747335999+00:00 stderr F I1208 17:58:43.747312 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 45757 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 52436344Ki 2025-12-08T17:58:58.010934945+00:00 stderr F I1208 17:58:58.009262 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.PersistentVolumeClaim total 18 items received 2025-12-08T17:59:43.732184522+00:00 stderr F I1208 17:59:43.732033 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T17:59:43.732184522+00:00 stderr F I1208 17:59:43.732116 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T17:59:43.732184522+00:00 stderr F I1208 17:59:43.732148 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T17:59:43.732335236+00:00 stderr F I1208 17:59:43.732154 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T17:59:43.733624920+00:00 stderr F I1208 17:59:43.733549 1 connection.go:251] GRPC response: {"available_capacity":52358459392,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T17:59:43.733624920+00:00 stderr F I1208 17:59:43.733571 1 connection.go:252] GRPC error: 2025-12-08T17:59:43.733624920+00:00 stderr F I1208 17:59:43.733597 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 51131308Ki, new maximumVolumeSize 83293888Ki 2025-12-08T17:59:43.739533056+00:00 stderr F I1208 17:59:43.739399 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 46034 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 51131308Ki 2025-12-08T17:59:43.739597378+00:00 stderr F I1208 17:59:43.739556 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 46034 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:00:43.732586597+00:00 stderr F I1208 18:00:43.732482 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T18:00:43.732586597+00:00 stderr F I1208 18:00:43.732560 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:00:43.732655349+00:00 stderr F I1208 18:00:43.732591 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:00:43.732700550+00:00 stderr F I1208 18:00:43.732597 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T18:00:43.733258614+00:00 stderr F I1208 18:00:43.733218 1 connection.go:251] GRPC response: {"available_capacity":52345769984,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T18:00:43.733258614+00:00 stderr F I1208 18:00:43.733230 1 connection.go:252] GRPC error: 2025-12-08T18:00:43.733258614+00:00 stderr F I1208 18:00:43.733245 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 51118916Ki, new maximumVolumeSize 83293888Ki 2025-12-08T18:00:43.738262086+00:00 stderr F I1208 18:00:43.738207 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 46241 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:00:43.738262086+00:00 stderr F I1208 18:00:43.738238 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 46241 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 51118916Ki 2025-12-08T18:00:44.183800063+00:00 stderr F I1208 18:00:44.183715 1 reflector.go:790] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:848: Watch close - *v1.StorageClass total 6 items received 2025-12-08T18:01:43.734185321+00:00 stderr F I1208 18:01:43.733654 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T18:01:43.734185321+00:00 stderr F I1208 18:01:43.733800 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:01:43.734461208+00:00 stderr F I1208 18:01:43.734397 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:01:43.735308441+00:00 stderr F I1208 18:01:43.734423 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T18:01:43.736701397+00:00 stderr F I1208 18:01:43.736649 1 connection.go:251] GRPC response: {"available_capacity":52347576320,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T18:01:43.736701397+00:00 stderr F I1208 18:01:43.736684 1 connection.go:252] GRPC error: 2025-12-08T18:01:43.736762810+00:00 stderr F I1208 18:01:43.736717 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 51120680Ki, new maximumVolumeSize 83293888Ki 2025-12-08T18:01:43.744805353+00:00 stderr F I1208 18:01:43.744670 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 46407 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:01:43.744862885+00:00 stderr F I1208 18:01:43.744806 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 46407 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 51120680Ki 2025-12-08T18:01:58.845908595+00:00 stderr F I1208 18:01:58.845730 1 reflector.go:378] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: forcing resync 2025-12-08T18:01:58.846212083+00:00 stderr F I1208 18:01:58.845847 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 fc1bbaba-db8e-4b91-8f7b-815ce1e79968 24587 0 2025-11-03 08:50:26 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-11-03 08:50:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-11-03 08:50:26 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{32212254720 0} {} 30Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,csi.storage.k8s.io/pvc/name: crc-image-registry-storage,csi.storage.k8s.io/pvc/namespace: openshift-image-registry,storage.kubernetes.io/csiProvisionerIdentity: 1762159825768-6575-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteMany],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:openshift-image-registry,Name:crc-image-registry-storage,UID:b21f41aa-58d4-44b1-aeaa-280a8e32ddf2,APIVersion:v1,ResourceVersion:22386,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-11-03 08:50:26 +0000 UTC,},} 2025-12-08T18:01:58.846212083+00:00 stderr F I1208 18:01:58.846193 1 controller.go:1239] shouldDelete volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" 2025-12-08T18:01:58.846212083+00:00 stderr F I1208 18:01:58.846205 1 controller.go:1260] shouldDelete volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" is false: PersistentVolumePhase is not Released 2025-12-08T18:01:58.846396357+00:00 stderr F I1208 18:01:58.845955 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6 303b951d-8ea6-44ce-a886-47c5892e6da7 45029 0 2025-12-08 17:57:37 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-12-08 17:57:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-12-08 17:57:37 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{84825604096 0} {} 79Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6,csi.storage.k8s.io/pvc/name: prometheus-default-db-prometheus-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteOnce],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:service-telemetry,Name:prometheus-default-db-prometheus-default-0,UID:e87a7084-eea8-46c1-a85e-77b652e25ad6,APIVersion:v1,ResourceVersion:45024,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-12-08 17:57:37 +0000 UTC,},} 2025-12-08T18:01:58.846396357+00:00 stderr F I1208 18:01:58.846242 1 controller.go:1152] handleProtectionFinalizer Volume : &PersistentVolume{ObjectMeta:{pvc-1b15df9e-01ca-4097-a731-1c1b05c63480 7687530c-dc50-4224-bbd7-e0ba882c5a3a 45179 0 2025-12-08 17:57:51 +0000 UTC map[] map[pv.kubernetes.io/provisioned-by:kubevirt.io.hostpath-provisioner volume.kubernetes.io/provisioner-deletion-secret-name: volume.kubernetes.io/provisioner-deletion-secret-namespace:] [] [kubernetes.io/pv-protection] [{csi-provisioner Update v1 2025-12-08 17:57:51 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:pv.kubernetes.io/provisioned-by":{},"f:volume.kubernetes.io/provisioner-deletion-secret-name":{},"f:volume.kubernetes.io/provisioner-deletion-secret-namespace":{}}},"f:spec":{"f:accessModes":{},"f:capacity":{".":{},"f:storage":{}},"f:claimRef":{".":{},"f:apiVersion":{},"f:kind":{},"f:name":{},"f:namespace":{},"f:resourceVersion":{},"f:uid":{}},"f:csi":{".":{},"f:driver":{},"f:volumeAttributes":{".":{},"f:csi.storage.k8s.io/pv/name":{},"f:csi.storage.k8s.io/pvc/name":{},"f:csi.storage.k8s.io/pvc/namespace":{},"f:storage.kubernetes.io/csiProvisionerIdentity":{},"f:storagePool":{}},"f:volumeHandle":{}},"f:nodeAffinity":{".":{},"f:required":{}},"f:persistentVolumeReclaimPolicy":{},"f:storageClassName":{},"f:volumeMode":{}}} } {kube-controller-manager Update v1 2025-12-08 17:57:51 +0000 UTC FieldsV1 {"f:status":{"f:phase":{}}} status}]},Spec:PersistentVolumeSpec{Capacity:ResourceList{storage: {{84825604096 0} {} 79Gi BinarySI},},PersistentVolumeSource:PersistentVolumeSource{GCEPersistentDisk:nil,AWSElasticBlockStore:nil,HostPath:nil,Glusterfs:nil,NFS:nil,RBD:nil,ISCSI:nil,Cinder:nil,CephFS:nil,FC:nil,Flocker:nil,FlexVolume:nil,AzureFile:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Local:nil,StorageOS:nil,CSI:&CSIPersistentVolumeSource{Driver:kubevirt.io.hostpath-provisioner,VolumeHandle:pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,ReadOnly:false,FSType:,VolumeAttributes:map[string]string{csi.storage.k8s.io/pv/name: pvc-1b15df9e-01ca-4097-a731-1c1b05c63480,csi.storage.k8s.io/pvc/name: alertmanager-default-db-alertmanager-default-0,csi.storage.k8s.io/pvc/namespace: service-telemetry,storage.kubernetes.io/csiProvisionerIdentity: 1765215883532-6720-kubevirt.io.hostpath-provisioner-crc,storagePool: local,},ControllerPublishSecretRef:nil,NodeStageSecretRef:nil,NodePublishSecretRef:nil,ControllerExpandSecretRef:nil,NodeExpandSecretRef:nil,},},AccessModes:[ReadWriteOnce],ClaimRef:&ObjectReference{Kind:PersistentVolumeClaim,Namespace:service-telemetry,Name:alertmanager-default-db-alertmanager-default-0,UID:1b15df9e-01ca-4097-a731-1c1b05c63480,APIVersion:v1,ResourceVersion:45171,FieldPath:,},PersistentVolumeReclaimPolicy:Retain,StorageClassName:crc-csi-hostpath-provisioner,MountOptions:[],VolumeMode:*Filesystem,NodeAffinity:&VolumeNodeAffinity{Required:&NodeSelector{NodeSelectorTerms:[]NodeSelectorTerm{NodeSelectorTerm{MatchExpressions:[]NodeSelectorRequirement{NodeSelectorRequirement{Key:topology.hostpath.csi/node,Operator:In,Values:[crc],},},MatchFields:[]NodeSelectorRequirement{},},},},},},Status:PersistentVolumeStatus{Phase:Bound,Message:,Reason:,LastPhaseTransitionTime:2025-12-08 17:57:51 +0000 UTC,},} 2025-12-08T18:01:58.846396357+00:00 stderr F I1208 18:01:58.846371 1 controller.go:1239] shouldDelete volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" 2025-12-08T18:01:58.846396357+00:00 stderr F I1208 18:01:58.846377 1 controller.go:1239] shouldDelete volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" 2025-12-08T18:01:58.846417148+00:00 stderr F I1208 18:01:58.846383 1 controller.go:1260] shouldDelete volume "pvc-e87a7084-eea8-46c1-a85e-77b652e25ad6" is false: PersistentVolumePhase is not Released 2025-12-08T18:01:58.846417148+00:00 stderr F I1208 18:01:58.846391 1 controller.go:1260] shouldDelete volume "pvc-1b15df9e-01ca-4097-a731-1c1b05c63480" is false: PersistentVolumePhase is not Released 2025-12-08T18:02:02.003124968+00:00 stderr F I1208 18:02:02.003025 1 reflector.go:378] k8s.io/client-go/informers/factory.go:150: forcing resync 2025-12-08T18:02:04.301152395+00:00 stderr F I1208 18:02:04.301062 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.CSIStorageCapacity total 14 items received 2025-12-08T18:02:43.734853376+00:00 stderr F I1208 18:02:43.734750 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T18:02:43.734917297+00:00 stderr F I1208 18:02:43.734853 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:02:43.734917297+00:00 stderr F I1208 18:02:43.734909 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:02:43.735094212+00:00 stderr F I1208 18:02:43.734919 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T18:02:43.737386143+00:00 stderr F I1208 18:02:43.737333 1 connection.go:251] GRPC response: {"available_capacity":52063207424,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T18:02:43.737418674+00:00 stderr F I1208 18:02:43.737409 1 connection.go:252] GRPC error: 2025-12-08T18:02:43.737482516+00:00 stderr F I1208 18:02:43.737447 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 50842976Ki, new maximumVolumeSize 83293888Ki 2025-12-08T18:02:43.747016299+00:00 stderr F I1208 18:02:43.746037 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 46577 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:02:43.747016299+00:00 stderr F I1208 18:02:43.746096 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 46577 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 50842976Ki 2025-12-08T18:03:02.136266866+00:00 stderr F I1208 18:03:02.136183 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.StorageClass total 10 items received 2025-12-08T18:03:43.737131558+00:00 stderr F I1208 18:03:43.736098 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T18:03:43.737131558+00:00 stderr F I1208 18:03:43.736162 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:03:43.737131558+00:00 stderr F I1208 18:03:43.736195 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:03:43.737131558+00:00 stderr F I1208 18:03:43.736203 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T18:03:43.739036869+00:00 stderr F I1208 18:03:43.738523 1 connection.go:251] GRPC response: {"available_capacity":51706494976,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T18:03:43.739036869+00:00 stderr F I1208 18:03:43.738544 1 connection.go:252] GRPC error: 2025-12-08T18:03:43.739036869+00:00 stderr F I1208 18:03:43.738602 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 50494624Ki, new maximumVolumeSize 83293888Ki 2025-12-08T18:03:43.744673350+00:00 stderr F I1208 18:03:43.744650 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 46714 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:03:43.744955097+00:00 stderr F I1208 18:03:43.744784 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 46714 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 50494624Ki 2025-12-08T18:04:20.851629538+00:00 stderr F I1208 18:04:20.851523 1 reflector.go:790] sigs.k8s.io/sig-storage-lib-external-provisioner/v9/controller/controller.go:845: Watch close - *v1.PersistentVolume total 14 items received 2025-12-08T18:04:43.737062376+00:00 stderr F I1208 18:04:43.736980 1 capacity.go:518] Capacity Controller: enqueuing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} for periodic update 2025-12-08T18:04:43.737062376+00:00 stderr F I1208 18:04:43.737035 1 capacity.go:574] Capacity Controller: refreshing {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:04:43.737116297+00:00 stderr F I1208 18:04:43.737061 1 connection.go:244] GRPC call: /csi.v1.Controller/GetCapacity 2025-12-08T18:04:43.737166029+00:00 stderr F I1208 18:04:43.737066 1 connection.go:245] GRPC request: {"accessible_topology":{"segments":{"topology.hostpath.csi/node":"crc"}},"parameters":{"storagePool":"local"},"volume_capabilities":[{"AccessType":{"Mount":null},"access_mode":{}}]} 2025-12-08T18:04:43.737945739+00:00 stderr F I1208 18:04:43.737914 1 connection.go:251] GRPC response: {"available_capacity":51592912896,"maximum_volume_size":{"value":85292941312},"minimum_volume_size":{}} 2025-12-08T18:04:43.737945739+00:00 stderr F I1208 18:04:43.737933 1 connection.go:252] GRPC error: 2025-12-08T18:04:43.737965199+00:00 stderr F I1208 18:04:43.737950 1 capacity.go:667] Capacity Controller: updating csisc-k4gvk for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner}, new capacity 50383704Ki, new maximumVolumeSize 83293888Ki 2025-12-08T18:04:43.744236146+00:00 stderr F I1208 18:04:43.744192 1 capacity.go:715] Capacity Controller: CSIStorageCapacity csisc-k4gvk with resource version 46880 is already known to match {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} 2025-12-08T18:04:43.744699468+00:00 stderr F I1208 18:04:43.744670 1 capacity.go:672] Capacity Controller: updated csisc-k4gvk with new resource version 46880 for {segment:0xc0005a9878 storageClassName:crc-csi-hostpath-provisioner} with capacity 50383704Ki ././@LongLink0000644000000000000000000000024400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-utilities/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/registry-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-content/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000033500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000755000175000017500000000000015115611514033040 5ustar zuulzuul././@LongLink0000644000000000000000000000040400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000755000175000017500000000000015115611521033036 5ustar zuulzuul././@LongLink0000644000000000000000000000041100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000644000175000017500000013217215115611514033050 0ustar zuulzuul2025-12-08T17:44:24.129420312+00:00 stderr F I1208 17:44:24.125915 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:24.133909674+00:00 stderr F I1208 17:44:24.133377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:24.134514731+00:00 stderr F I1208 17:44:24.133951 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:24.326379584+00:00 stderr F I1208 17:44:24.325851 1 builder.go:304] openshift-kube-storage-version-migrator-operator version 4.20.0-202510211040.p2.g5adc142.assembly.stream.el9-5adc142-5adc14299739bc64c8812cbab0b0ff2d12863602 2025-12-08T17:44:25.592771188+00:00 stderr F I1208 17:44:25.592166 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:25.592771188+00:00 stderr F W1208 17:44:25.592727 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:25.592771188+00:00 stderr F W1208 17:44:25.592736 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:25.592771188+00:00 stderr F W1208 17:44:25.592743 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:25.592771188+00:00 stderr F W1208 17:44:25.592748 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:25.592771188+00:00 stderr F W1208 17:44:25.592756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:25.592771188+00:00 stderr F W1208 17:44:25.592759 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:25.601449004+00:00 stderr F I1208 17:44:25.598690 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:25.601449004+00:00 stderr F I1208 17:44:25.598961 1 leaderelection.go:254] attempting to acquire leader lease openshift-kube-storage-version-migrator-operator/openshift-kube-storage-version-migrator-operator-lock... 2025-12-08T17:44:25.605513666+00:00 stderr F I1208 17:44:25.605463 1 secure_serving.go:213] Serving securely on [::]:8443 2025-12-08T17:44:25.609003251+00:00 stderr F I1208 17:44:25.608960 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:25.609134594+00:00 stderr F I1208 17:44:25.609082 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:25.609298169+00:00 stderr F I1208 17:44:25.609276 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:25.609667539+00:00 stderr F I1208 17:44:25.609638 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:25.611449318+00:00 stderr F I1208 17:44:25.611059 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:25.611649253+00:00 stderr F I1208 17:44:25.611619 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:44:25.613792912+00:00 stderr F I1208 17:44:25.611996 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:44:25.613792912+00:00 stderr F I1208 17:44:25.613741 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController 2025-12-08T17:44:25.661076842+00:00 stderr F I1208 17:44:25.659928 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-storage-version-migrator-operator", Name:"openshift-kube-storage-version-migrator-operator-lock", UID:"e7876336-887a-41b4-ab4c-7734d4110fad", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37632", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' kube-storage-version-migrator-operator-565b79b866-6gkgz_eb881855-3852-4463-9a4d-4908cb6a1f24 became leader 2025-12-08T17:44:25.663146787+00:00 stderr F I1208 17:44:25.657280 1 leaderelection.go:268] successfully acquired lease openshift-kube-storage-version-migrator-operator/openshift-kube-storage-version-migrator-operator-lock 2025-12-08T17:44:25.712095913+00:00 stderr F I1208 17:44:25.705726 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:25.712095913+00:00 stderr F I1208 17:44:25.706162 1 base_controller.go:76] Waiting for caches to sync for KubeStorageVersionMigrator 2025-12-08T17:44:25.712095913+00:00 stderr F I1208 17:44:25.706196 1 base_controller.go:76] Waiting for caches to sync for StaticConditionsController 2025-12-08T17:44:25.712095913+00:00 stderr F I1208 17:44:25.706215 1 base_controller.go:76] Waiting for caches to sync for kube-storage-version-migrator-RemoveStaleConditions 2025-12-08T17:44:25.712095913+00:00 stderr F I1208 17:44:25.706453 1 base_controller.go:76] Waiting for caches to sync for KubeStorageVersionMigratorStaticResources-StaticResources 2025-12-08T17:44:25.712591486+00:00 stderr F I1208 17:44:25.712539 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:44:25.721769586+00:00 stderr F I1208 17:44:25.721724 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:44:25.722363452+00:00 stderr F I1208 17:44:25.722336 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_kube-storage-version-migrator 2025-12-08T17:44:25.722840666+00:00 stderr F I1208 17:44:25.722823 1 shared_informer.go:320] Caches are synced for RequestHeaderAuthRequestController 2025-12-08T17:44:25.806865868+00:00 stderr F I1208 17:44:25.806408 1 base_controller.go:82] Caches are synced for KubeStorageVersionMigrator 2025-12-08T17:44:25.806865868+00:00 stderr F I1208 17:44:25.806520 1 base_controller.go:119] Starting #1 worker of KubeStorageVersionMigrator controller ... 2025-12-08T17:44:25.807057223+00:00 stderr F I1208 17:44:25.807032 1 base_controller.go:82] Caches are synced for KubeStorageVersionMigratorStaticResources-StaticResources 2025-12-08T17:44:25.807115595+00:00 stderr F I1208 17:44:25.807080 1 base_controller.go:119] Starting #1 worker of KubeStorageVersionMigratorStaticResources-StaticResources controller ... 2025-12-08T17:44:25.807177747+00:00 stderr F I1208 17:44:25.807167 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:25.807215238+00:00 stderr F I1208 17:44:25.807194 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:25.808591925+00:00 stderr F I1208 17:44:25.808020 1 base_controller.go:82] Caches are synced for kube-storage-version-migrator-RemoveStaleConditions 2025-12-08T17:44:25.808591925+00:00 stderr F I1208 17:44:25.808073 1 base_controller.go:119] Starting #1 worker of kube-storage-version-migrator-RemoveStaleConditions controller ... 2025-12-08T17:44:25.808591925+00:00 stderr F I1208 17:44:25.808260 1 base_controller.go:82] Caches are synced for StaticConditionsController 2025-12-08T17:44:25.808591925+00:00 stderr F I1208 17:44:25.808268 1 base_controller.go:119] Starting #1 worker of StaticConditionsController controller ... 2025-12-08T17:44:25.822915705+00:00 stderr F I1208 17:44:25.822526 1 base_controller.go:82] Caches are synced for StatusSyncer_kube-storage-version-migrator 2025-12-08T17:44:25.822997547+00:00 stderr F I1208 17:44:25.822985 1 base_controller.go:119] Starting #1 worker of StatusSyncer_kube-storage-version-migrator controller ... 2025-12-08T17:44:25.874092542+00:00 stderr F W1208 17:44:25.874034 1 dynamic_operator_client.go:352] .status.conditions["KubeStorageVersionMigratorAvailable"].reason is missing; this will eventually be fatal 2025-12-08T17:44:25.874092542+00:00 stderr F W1208 17:44:25.874058 1 dynamic_operator_client.go:355] .status.conditions["KubeStorageVersionMigratorAvailable"].message is missing; this will eventually be fatal 2025-12-08T17:44:25.874092542+00:00 stderr F W1208 17:44:25.874063 1 dynamic_operator_client.go:352] .status.conditions["KubeStorageVersionMigratorProgressing"].reason is missing; this will eventually be fatal 2025-12-08T17:44:25.874092542+00:00 stderr F W1208 17:44:25.874069 1 dynamic_operator_client.go:355] .status.conditions["KubeStorageVersionMigratorProgressing"].message is missing; this will eventually be fatal 2025-12-08T17:44:25.888389112+00:00 stderr F I1208 17:44:25.887274 1 status_controller.go:225] clusteroperator/kube-storage-version-migrator diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:52:00Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:00Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:00Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:25.906697891+00:00 stderr F I1208 17:44:25.906627 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-storage-version-migrator-operator", Name:"kube-storage-version-migrator-operator", UID:"af746821-921a-4842-94da-28c08769612a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-storage-version-migrator changed: Progressing changed from True to False ("All is well"),Available changed from False to True ("All is well") 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614002 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.613954631 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614596 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.614581098 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614614 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.614603738 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614631 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.614621449 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614649 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.614636599 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614666 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.614654759 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614693 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.61468199 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614710 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.614698941 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614726 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.614715361 +0000 UTC))" 2025-12-08T17:44:30.614785973+00:00 stderr F I1208 17:44:30.614748 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.614733242 +0000 UTC))" 2025-12-08T17:44:30.616119159+00:00 stderr F I1208 17:44:30.615027 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-storage-version-migrator-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-storage-version-migrator-operator.svc,metrics.openshift-kube-storage-version-migrator-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:14 +0000 UTC to 2027-11-02 07:52:15 +0000 UTC (now=2025-12-08 17:44:30.615001089 +0000 UTC))" 2025-12-08T17:44:30.616119159+00:00 stderr F I1208 17:44:30.615216 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215865\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:24 +0000 UTC to 2026-12-08 16:44:24 +0000 UTC (now=2025-12-08 17:44:30.615202484 +0000 UTC))" 2025-12-08T17:45:16.042362607+00:00 stderr F I1208 17:45:16.042125 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.042093549 +0000 UTC))" 2025-12-08T17:45:16.042362607+00:00 stderr F I1208 17:45:16.042299 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.042281304 +0000 UTC))" 2025-12-08T17:45:16.042362607+00:00 stderr F I1208 17:45:16.042313 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.042305655 +0000 UTC))" 2025-12-08T17:45:16.042362607+00:00 stderr F I1208 17:45:16.042325 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.042318165 +0000 UTC))" 2025-12-08T17:45:16.042362607+00:00 stderr F I1208 17:45:16.042337 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.042329046 +0000 UTC))" 2025-12-08T17:45:16.042362607+00:00 stderr F I1208 17:45:16.042351 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.042343196 +0000 UTC))" 2025-12-08T17:45:16.042385487+00:00 stderr F I1208 17:45:16.042364 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.042355066 +0000 UTC))" 2025-12-08T17:45:16.042385487+00:00 stderr F I1208 17:45:16.042376 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.042368847 +0000 UTC))" 2025-12-08T17:45:16.042398328+00:00 stderr F I1208 17:45:16.042388 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.042380497 +0000 UTC))" 2025-12-08T17:45:16.042417348+00:00 stderr F I1208 17:45:16.042401 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.042394738 +0000 UTC))" 2025-12-08T17:45:16.042424618+00:00 stderr F I1208 17:45:16.042415 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.042406118 +0000 UTC))" 2025-12-08T17:45:16.043657242+00:00 stderr F I1208 17:45:16.042588 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-storage-version-migrator-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-storage-version-migrator-operator.svc,metrics.openshift-kube-storage-version-migrator-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:14 +0000 UTC to 2027-11-02 07:52:15 +0000 UTC (now=2025-12-08 17:45:16.042576323 +0000 UTC))" 2025-12-08T17:45:16.043657242+00:00 stderr F I1208 17:45:16.042719 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215865\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:24 +0000 UTC to 2026-12-08 16:44:24 +0000 UTC (now=2025-12-08 17:45:16.042709366 +0000 UTC))" 2025-12-08T17:46:25.681926655+00:00 stderr F E1208 17:46:25.680408 1 leaderelection.go:429] Failed to update lock optimitically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-storage-version-migrator-operator/leases/openshift-kube-storage-version-migrator-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:25.681926655+00:00 stderr F E1208 17:46:25.681829 1 leaderelection.go:436] error retrieving resource lock openshift-kube-storage-version-migrator-operator/openshift-kube-storage-version-migrator-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-storage-version-migrator-operator/leases/openshift-kube-storage-version-migrator-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.813452143+00:00 stderr F W1208 17:46:25.813372 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.814032020+00:00 stderr F E1208 17:46:25.813987 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.816357000+00:00 stderr F E1208 17:46:25.816321 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.826161644+00:00 stderr F W1208 17:46:25.826090 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.826186225+00:00 stderr F E1208 17:46:25.826156 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.829433833+00:00 stderr F E1208 17:46:25.829389 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.848303119+00:00 stderr F W1208 17:46:25.848236 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.848303119+00:00 stderr F E1208 17:46:25.848286 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.850195686+00:00 stderr F E1208 17:46:25.850135 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.874042991+00:00 stderr F W1208 17:46:25.873976 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.874042991+00:00 stderr F E1208 17:46:25.874028 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.876516225+00:00 stderr F E1208 17:46:25.876466 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.921361431+00:00 stderr F W1208 17:46:25.921314 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.921409613+00:00 stderr F E1208 17:46:25.921367 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.925053883+00:00 stderr F E1208 17:46:25.925026 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.014515958+00:00 stderr F W1208 17:46:26.014406 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.014515958+00:00 stderr F E1208 17:46:26.014453 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.216349006+00:00 stderr F W1208 17:46:26.216245 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.216349006+00:00 stderr F E1208 17:46:26.216313 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.414845105+00:00 stderr F E1208 17:46:26.413977 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.614733814+00:00 stderr F W1208 17:46:26.614593 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.614733814+00:00 stderr F E1208 17:46:26.614700 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.814818079+00:00 stderr F E1208 17:46:26.814739 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.141200126+00:00 stderr F E1208 17:46:27.141125 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.265831277+00:00 stderr F W1208 17:46:27.265786 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.265959071+00:00 stderr F E1208 17:46:27.265946 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.787501636+00:00 stderr F E1208 17:46:27.787452 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:28.559993082+00:00 stderr F W1208 17:46:28.557582 1 base_controller.go:242] Updating status of "KubeStorageVersionMigrator" failed: unable to ApplyStatus for operator using fieldManager "KubeStorageVersionMigrator-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigrator-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.559993082+00:00 stderr F E1208 17:46:28.557974 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigrator reconciliation failed: Get \"https://10.217.4.1:443/apis/apps/v1/namespaces/openshift-kube-storage-version-migrator/deployments/migrator\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.074957039+00:00 stderr F E1208 17:46:29.074573 1 base_controller.go:279] "Unhandled Error" err="KubeStorageVersionMigratorStaticResources-StaticResources reconciliation failed: [\"kube-storage-version-migrator/namespace.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/serviceaccount.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-storage-version-migrator/serviceaccounts/kube-storage-version-migrator-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"kube-storage-version-migrator/roles.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/storage-version-migration-migrator\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeStorageVersionMigratorStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubestorageversionmigrators/cluster/status?fieldManager=KubeStorageVersionMigratorStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:47:06.320669375+00:00 stderr F I1208 17:47:06.319781 1 reflector.go:368] Caches populated for *v1.Secret from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 2025-12-08T17:47:12.842377893+00:00 stderr F I1208 17:47:12.841424 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 2025-12-08T17:47:23.312221224+00:00 stderr F I1208 17:47:23.311664 1 reflector.go:368] Caches populated for *v1.Deployment from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 2025-12-08T17:47:24.108130629+00:00 stderr F I1208 17:47:24.107565 1 reflector.go:368] Caches populated for operator.openshift.io/v1, Resource=kubestorageversionmigrators from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 2025-12-08T17:47:28.942690246+00:00 stderr F I1208 17:47:28.942072 1 reflector.go:368] Caches populated for *v1.ClusterOperator from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 2025-12-08T17:48:00.675061833+00:00 stderr F I1208 17:48:00.674360 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 2025-12-08T17:48:06.938032073+00:00 stderr F I1208 17:48:06.937577 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243 ././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-oper0000755000175000017500000000000015115611514033104 5ustar zuulzuul././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-oper0000755000175000017500000000000015115611521033102 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-oper0000644000175000017500000300571115115611514033115 0ustar zuulzuul2025-12-08T17:44:02.458514715+00:00 stderr F I1208 17:44:02.449084 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:02.458792073+00:00 stderr F I1208 17:44:02.458725 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:02.461794085+00:00 stderr F I1208 17:44:02.461739 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:02.490324713+00:00 stderr F I1208 17:44:02.489995 1 builder.go:304] network-operator version 4.20.0-202510211040.p2.gb0393aa.assembly.stream.el9-b0393aa-b0393aa3e67302d89e91b8f7b1013b6d2e317f04 2025-12-08T17:44:03.081714265+00:00 stderr F I1208 17:44:03.081590 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:03.081714265+00:00 stderr F W1208 17:44:03.081636 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:03.081714265+00:00 stderr F W1208 17:44:03.081647 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:03.081714265+00:00 stderr F W1208 17:44:03.081657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:03.081714265+00:00 stderr F W1208 17:44:03.081665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:03.081714265+00:00 stderr F W1208 17:44:03.081672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:03.081714265+00:00 stderr F W1208 17:44:03.081678 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:03.085233871+00:00 stderr F I1208 17:44:03.085150 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:03.085688493+00:00 stderr F I1208 17:44:03.085645 1 leaderelection.go:257] attempting to acquire leader lease openshift-network-operator/network-operator-lock... 2025-12-08T17:44:03.086104204+00:00 stderr F I1208 17:44:03.086061 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:03.086448463+00:00 stderr F I1208 17:44:03.086388 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:03.086448463+00:00 stderr F I1208 17:44:03.086419 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:03.086995088+00:00 stderr F I1208 17:44:03.086944 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:03.087100921+00:00 stderr F I1208 17:44:03.087060 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:03.087112071+00:00 stderr F I1208 17:44:03.086946 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:03.087323438+00:00 stderr F I1208 17:44:03.087258 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:03.087605856+00:00 stderr F I1208 17:44:03.087562 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:03.087789661+00:00 stderr F I1208 17:44:03.087344 1 secure_serving.go:211] Serving securely on [::]:9104 2025-12-08T17:44:03.092579390+00:00 stderr F I1208 17:44:03.092487 1 leaderelection.go:271] successfully acquired lease openshift-network-operator/network-operator-lock 2025-12-08T17:44:03.092752575+00:00 stderr F I1208 17:44:03.092693 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-network-operator", Name:"network-operator-lock", UID:"3b926046-f029-4c42-859b-6c530a3e535e", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"36476", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' crc_ecd2c522-70a0-4510-8a56-53fce2bc8cd9 became leader 2025-12-08T17:44:03.122433235+00:00 stderr F I1208 17:44:03.121907 1 operator.go:104] Creating status manager for stand-alone cluster 2025-12-08T17:44:03.122433235+00:00 stderr F I1208 17:44:03.122151 1 operator.go:108] Fetching cluster feature gates... 2025-12-08T17:44:03.122564469+00:00 stderr F I1208 17:44:03.122526 1 operator.go:126] Waiting for feature gates initialization... 2025-12-08T17:44:03.122652431+00:00 stderr F I1208 17:44:03.122616 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:03.127408121+00:00 stderr F I1208 17:44:03.127326 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-network-operator", Name:"network-operator", UID:"2c897060-d3cf-4d7f-8d38-ef464b7a697a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:03.127408121+00:00 stderr F I1208 17:44:03.127231 1 operator.go:133] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:03.127465273+00:00 stderr F I1208 17:44:03.127397 1 operator.go:145] Adding controller-runtime controllers 2025-12-08T17:44:03.141914307+00:00 stderr F I1208 17:44:03.141844 1 client.go:241] Starting informers... 2025-12-08T17:44:03.142137493+00:00 stderr F I1208 17:44:03.142085 1 client.go:252] Waiting for informers to sync... 2025-12-08T17:44:03.210175318+00:00 stderr F I1208 17:44:03.210010 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:03.210175318+00:00 stderr F I1208 17:44:03.210092 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:03.223366218+00:00 stderr F I1208 17:44:03.223046 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:03.344295537+00:00 stderr F I1208 17:44:03.344206 1 client.go:273] Informers started and synced 2025-12-08T17:44:03.344295537+00:00 stderr F I1208 17:44:03.344262 1 operator.go:169] Starting controller-manager 2025-12-08T17:44:03.345354616+00:00 stderr F I1208 17:44:03.345308 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:03.345354616+00:00 stderr F I1208 17:44:03.345327 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:03.345354616+00:00 stderr F I1208 17:44:03.345342 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:03.345408548+00:00 stderr F I1208 17:44:03.345369 1 base_controller.go:76] Waiting for caches to sync for cluster-network-operator-ManagementState 2025-12-08T17:44:03.345408548+00:00 stderr F I1208 17:44:03.345404 1 base_controller.go:82] Caches are synced for cluster-network-operator-ManagementState 2025-12-08T17:44:03.345428048+00:00 stderr F I1208 17:44:03.345413 1 base_controller.go:119] Starting #1 worker of cluster-network-operator-ManagementState controller ... 2025-12-08T17:44:03.345562172+00:00 stderr F I1208 17:44:03.345531 1 base_controller.go:76] Waiting for caches to sync for ConnectivityCheckController 2025-12-08T17:44:03.347521224+00:00 stderr F I1208 17:44:03.347485 1 controller.go:246] "Starting EventSource" controller="egress-router-controller" source="kind source: *v1.EgressRouter" 2025-12-08T17:44:03.348297246+00:00 stderr F I1208 17:44:03.348255 1 controller.go:246] "Starting EventSource" controller="proxyconfig-controller" source="kind source: *v1.Proxy" 2025-12-08T17:44:03.348520002+00:00 stderr F I1208 17:44:03.348485 1 controller.go:246] "Starting EventSource" controller="clusterconfig-controller" source="kind source: *v1.Network" 2025-12-08T17:44:03.348641666+00:00 stderr F I1208 17:44:03.348607 1 controller.go:246] "Starting EventSource" controller="proxyconfig-controller" source="informer source: 0xc00045cc60" 2025-12-08T17:44:03.348834071+00:00 stderr F I1208 17:44:03.348786 1 controller.go:246] "Starting EventSource" controller="pki-controller" source="kind source: *v1.OperatorPKI" 2025-12-08T17:44:03.348983075+00:00 stderr F I1208 17:44:03.348946 1 controller.go:246] "Starting EventSource" controller="operconfig-controller" source="kind source: *v1.Node" 2025-12-08T17:44:03.349136179+00:00 stderr F I1208 17:44:03.349103 1 controller.go:246] "Starting EventSource" controller="signer-controller" source="kind source: *v1.CertificateSigningRequest" 2025-12-08T17:44:03.349241592+00:00 stderr F I1208 17:44:03.349200 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.349241592+00:00 stderr F I1208 17:44:03.349226 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.349475578+00:00 stderr F I1208 17:44:03.349437 1 controller.go:246] "Starting EventSource" controller="configmap-trust-bundle-injector-controller" source="informer source: 0xc00045d080" 2025-12-08T17:44:03.349528549+00:00 stderr F I1208 17:44:03.349506 1 controller.go:246] "Starting EventSource" controller="configmap-trust-bundle-injector-controller" source="informer source: 0xc00045cfd0" 2025-12-08T17:44:03.349549310+00:00 stderr F I1208 17:44:03.349539 1 controller.go:186] "Starting Controller" controller="configmap-trust-bundle-injector-controller" 2025-12-08T17:44:03.349560180+00:00 stderr F I1208 17:44:03.349555 1 controller.go:195] "Starting workers" controller="configmap-trust-bundle-injector-controller" worker count=1 2025-12-08T17:44:03.349613792+00:00 stderr F I1208 17:44:03.349586 1 controller.go:246] "Starting EventSource" controller="operconfig-controller" source="kind source: *v1.Network" 2025-12-08T17:44:03.349613792+00:00 stderr F I1208 17:44:03.349510 1 controller.go:246] "Starting EventSource" controller="infrastructureconfig-controller" source="kind source: *v1.Infrastructure" 2025-12-08T17:44:03.349656493+00:00 stderr F I1208 17:44:03.349638 1 controller.go:246] "Starting EventSource" controller="operconfig-controller" source="kind source: *v1.Network" 2025-12-08T17:44:03.349705834+00:00 stderr F I1208 17:44:03.349670 1 controller.go:246] "Starting EventSource" controller="operconfig-controller" source="informer source: 0xc00045ce70" 2025-12-08T17:44:03.349705834+00:00 stderr F I1208 17:44:03.349684 1 controller.go:246] "Starting EventSource" controller="ingress-config-controller" source="kind source: *v1.IngressController" 2025-12-08T17:44:03.349794907+00:00 stderr F I1208 17:44:03.349746 1 log.go:245] openshift-network-operator/mtu changed, triggering operconf reconciliation 2025-12-08T17:44:03.349794907+00:00 stderr F I1208 17:44:03.349767 1 controller.go:246] "Starting EventSource" controller="dashboard-controller" source="informer source: 0xc00045d600" 2025-12-08T17:44:03.349794907+00:00 stderr F I1208 17:44:03.349779 1 log.go:245] Reconciling configmap from openshift-config-managed/trusted-ca-bundle 2025-12-08T17:44:03.349809687+00:00 stderr F I1208 17:44:03.349803 1 controller.go:186] "Starting Controller" controller="dashboard-controller" 2025-12-08T17:44:03.349821027+00:00 stderr F I1208 17:44:03.349814 1 controller.go:195] "Starting workers" controller="dashboard-controller" worker count=1 2025-12-08T17:44:03.349908870+00:00 stderr F I1208 17:44:03.349870 1 controller.go:246] "Starting EventSource" controller="allowlist-controller" source="informer source: 0xc00045d4a0" 2025-12-08T17:44:03.349926320+00:00 stderr F I1208 17:44:03.349914 1 controller.go:186] "Starting Controller" controller="allowlist-controller" 2025-12-08T17:44:03.349926320+00:00 stderr F I1208 17:44:03.349921 1 controller.go:195] "Starting workers" controller="allowlist-controller" worker count=1 2025-12-08T17:44:03.349978762+00:00 stderr F I1208 17:44:03.349960 1 controller.go:246] "Starting EventSource" controller="machineconfig-watcher" source="kind source: *v1.MachineConfig" 2025-12-08T17:44:03.350325031+00:00 stderr F I1208 17:44:03.350279 1 controller.go:246] "Starting EventSource" controller="pod-watcher" source="informer source: 0xc00045d810" 2025-12-08T17:44:03.350340311+00:00 stderr F I1208 17:44:03.350330 1 dashboard_controller.go:117] Reconcile dashboards 2025-12-08T17:44:03.350578998+00:00 stderr F I1208 17:44:03.350539 1 controller.go:246] "Starting EventSource" controller="pod-watcher" source="informer source: 0xc00045d6b0" 2025-12-08T17:44:03.350578998+00:00 stderr F I1208 17:44:03.350555 1 controller.go:246] "Starting EventSource" controller="pod-watcher" source="informer source: 0xc00045d760" 2025-12-08T17:44:03.350578998+00:00 stderr F I1208 17:44:03.350572 1 controller.go:186] "Starting Controller" controller="pod-watcher" 2025-12-08T17:44:03.350595048+00:00 stderr F I1208 17:44:03.350579 1 controller.go:195] "Starting workers" controller="pod-watcher" worker count=1 2025-12-08T17:44:03.350595048+00:00 stderr F I1208 17:44:03.350578 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=openshiftapiservers" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.350662991+00:00 stderr F I1208 17:44:03.350632 1 controller.go:246] "Starting EventSource" controller="machineconfigpool-watcher" source="kind source: *v1.MachineConfigPool" 2025-12-08T17:44:03.350819505+00:00 stderr F I1208 17:44:03.350773 1 log.go:245] openshift-network-operator/openshift-service-ca.crt changed, triggering operconf reconciliation 2025-12-08T17:44:03.350819505+00:00 stderr F I1208 17:44:03.350790 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-admission-controller updated, re-generating status 2025-12-08T17:44:03.350819505+00:00 stderr F I1208 17:44:03.350809 1 log.go:245] openshift-network-operator/iptables-alerter-script changed, triggering operconf reconciliation 2025-12-08T17:44:03.350834676+00:00 stderr F I1208 17:44:03.350820 1 log.go:245] openshift-network-operator/kube-root-ca.crt changed, triggering operconf reconciliation 2025-12-08T17:44:03.350846276+00:00 stderr F I1208 17:44:03.350839 1 pod_watcher.go:132] Operand /, Kind= openshift-network-console/networking-console-plugin updated, re-generating status 2025-12-08T17:44:03.350857466+00:00 stderr F I1208 17:44:03.350850 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.352948983+00:00 stderr F I1208 17:44:03.352895 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:03.352948983+00:00 stderr F I1208 17:44:03.352924 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:44:03.352948983+00:00 stderr F I1208 17:44:03.352934 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:03.352948983+00:00 stderr F I1208 17:44:03.352940 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/network-metrics-daemon updated, re-generating status 2025-12-08T17:44:03.352977723+00:00 stderr F I1208 17:44:03.352947 1 pod_watcher.go:132] Operand /, Kind= openshift-network-node-identity/network-node-identity updated, re-generating status 2025-12-08T17:44:03.352977723+00:00 stderr F I1208 17:44:03.352953 1 pod_watcher.go:132] Operand /, Kind= openshift-network-operator/iptables-alerter updated, re-generating status 2025-12-08T17:44:03.356839269+00:00 stderr F I1208 17:44:03.356773 1 log.go:245] trusted-ca-bundle changed, updating 13 configMaps 2025-12-08T17:44:03.356839269+00:00 stderr F I1208 17:44:03.356834 1 log.go:245] ConfigMap openshift-console/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.356869900+00:00 stderr F I1208 17:44:03.356858 1 log.go:245] ConfigMap openshift-kube-apiserver/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.356928902+00:00 stderr F I1208 17:44:03.356909 1 log.go:245] ConfigMap openshift-machine-api/mao-trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.356938212+00:00 stderr F I1208 17:44:03.356932 1 log.go:245] ConfigMap openshift-marketplace/marketplace-trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.356990613+00:00 stderr F I1208 17:44:03.356956 1 log.go:245] ConfigMap openshift-apiserver/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.356998924+00:00 stderr F I1208 17:44:03.356989 1 log.go:245] ConfigMap openshift-authentication-operator/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357034665+00:00 stderr F I1208 17:44:03.357013 1 log.go:245] ConfigMap openshift-authentication/v4-0-config-system-trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357062565+00:00 stderr F I1208 17:44:03.357043 1 log.go:245] ConfigMap openshift-console-operator/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357092356+00:00 stderr F I1208 17:44:03.357072 1 log.go:245] ConfigMap openshift-controller-manager/openshift-global-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357121517+00:00 stderr F I1208 17:44:03.357102 1 log.go:245] ConfigMap openshift-image-registry/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357151278+00:00 stderr F I1208 17:44:03.357131 1 log.go:245] ConfigMap openshift-ingress-operator/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357180459+00:00 stderr F I1208 17:44:03.357161 1 log.go:245] ConfigMap openshift-kube-controller-manager/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357211629+00:00 stderr F I1208 17:44:03.357191 1 log.go:245] ConfigMap openshift-apiserver-operator/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:03.357524988+00:00 stderr F I1208 17:44:03.357488 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.357722363+00:00 stderr F I1208 17:44:03.357686 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:03.357810956+00:00 stderr F I1208 17:44:03.357778 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.358143534+00:00 stderr F I1208 17:44:03.358102 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.358143534+00:00 stderr F I1208 17:44:03.358115 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.358327339+00:00 stderr F I1208 17:44:03.358280 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:03.358340920+00:00 stderr F I1208 17:44:03.358326 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:03.363122520+00:00 stderr F I1208 17:44:03.363058 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.363804759+00:00 stderr F I1208 17:44:03.363777 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.364795456+00:00 stderr F I1208 17:44:03.364744 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.365322381+00:00 stderr F I1208 17:44:03.365280 1 reflector.go:430] "Caches populated" type="*v1.EgressRouter" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.365362702+00:00 stderr F I1208 17:44:03.365336 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.365508096+00:00 stderr F I1208 17:44:03.365472 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.365738332+00:00 stderr F I1208 17:44:03.365536 1 dashboard_controller.go:143] Applying dashboards manifests 2025-12-08T17:44:03.365738332+00:00 stderr F I1208 17:44:03.365564 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.365857375+00:00 stderr F I1208 17:44:03.365701 1 allowlist_controller.go:106] Reconcile allowlist for openshift-multus/cni-sysctl-allowlist 2025-12-08T17:44:03.366571294+00:00 stderr F I1208 17:44:03.366507 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.366919984+00:00 stderr F I1208 17:44:03.366868 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.368251661+00:00 stderr F I1208 17:44:03.368219 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.368327373+00:00 stderr F I1208 17:44:03.367153 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.369479014+00:00 stderr F I1208 17:44:03.369421 1 reflector.go:430] "Caches populated" type="*v1.IngressController" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.369522285+00:00 stderr F I1208 17:44:03.369489 1 reflector.go:430] "Caches populated" type="*v1alpha1.PodNetworkConnectivityCheck" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.371395066+00:00 stderr F I1208 17:44:03.371252 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:03.371554360+00:00 stderr F I1208 17:44:03.371518 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:03.372694961+00:00 stderr F I1208 17:44:03.372650 1 reflector.go:430] "Caches populated" type="*v1.OperatorPKI" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.372745412+00:00 stderr F I1208 17:44:03.372675 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.375061706+00:00 stderr F I1208 17:44:03.374043 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:03.375061706+00:00 stderr F I1208 17:44:03.374525 1 reflector.go:430] "Caches populated" type="*v1.MachineConfigPool" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.390413255+00:00 stderr F I1208 17:44:03.390326 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-ovn-health 2025-12-08T17:44:03.392864762+00:00 stderr F I1208 17:44:03.392828 1 reflector.go:430] "Caches populated" type="*v1.MachineConfig" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.395779972+00:00 stderr F I1208 17:44:03.393993 1 warning_handler.go:64] "unknown field \"spec.template.spec.volumes[0].configMap.namespace\"" controller="allowlist-controller" object="openshift-multus/cni-sysctl-allowlist" namespace="openshift-multus" name="cni-sysctl-allowlist" reconcileID="dcce349d-dfa6-41e6-85c4-64204947ce76" 2025-12-08T17:44:03.395779972+00:00 stderr F I1208 17:44:03.394012 1 warning_handler.go:64] "unknown field \"spec.template.spec.volumes[0].defaultMode\"" controller="allowlist-controller" object="openshift-multus/cni-sysctl-allowlist" namespace="openshift-multus" name="cni-sysctl-allowlist" reconcileID="dcce349d-dfa6-41e6-85c4-64204947ce76" 2025-12-08T17:44:03.397374745+00:00 stderr F I1208 17:44:03.396232 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:03.397374745+00:00 stderr F I1208 17:44:03.396253 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:03.405030614+00:00 stderr F I1208 17:44:03.402410 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-ovn-health was successful 2025-12-08T17:44:03.405030614+00:00 stderr F I1208 17:44:03.402451 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-network-stats 2025-12-08T17:44:03.417126903+00:00 stderr F I1208 17:44:03.417056 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-network-stats was successful 2025-12-08T17:44:03.433837959+00:00 stderr F I1208 17:44:03.433786 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.433837959+00:00 stderr F I1208 17:44:03.433809 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.434019554+00:00 stderr F I1208 17:44:03.433971 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:03.434019554+00:00 stderr F I1208 17:44:03.433981 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:03.448611542+00:00 stderr F I1208 17:44:03.448554 1 controller.go:186] "Starting Controller" controller="egress-router-controller" 2025-12-08T17:44:03.448611542+00:00 stderr F I1208 17:44:03.448582 1 controller.go:195] "Starting workers" controller="egress-router-controller" worker count=1 2025-12-08T17:44:03.448611542+00:00 stderr F I1208 17:44:03.448585 1 controller.go:186] "Starting Controller" controller="proxyconfig-controller" 2025-12-08T17:44:03.448611542+00:00 stderr F I1208 17:44:03.448602 1 controller.go:195] "Starting workers" controller="proxyconfig-controller" worker count=1 2025-12-08T17:44:03.448656564+00:00 stderr F I1208 17:44:03.448638 1 controller.go:186] "Starting Controller" controller="clusterconfig-controller" 2025-12-08T17:44:03.448656564+00:00 stderr F I1208 17:44:03.448646 1 controller.go:195] "Starting workers" controller="clusterconfig-controller" worker count=1 2025-12-08T17:44:03.448778427+00:00 stderr F I1208 17:44:03.448744 1 log.go:245] Reconciling Network.config.openshift.io cluster 2025-12-08T17:44:03.448950272+00:00 stderr F I1208 17:44:03.448927 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/admin-acks' 2025-12-08T17:44:03.449395494+00:00 stderr F I1208 17:44:03.449193 1 log.go:245] /crc changed, triggering operconf reconciliation 2025-12-08T17:44:03.449395494+00:00 stderr F I1208 17:44:03.449224 1 controller.go:186] "Starting Controller" controller="pki-controller" 2025-12-08T17:44:03.449395494+00:00 stderr F I1208 17:44:03.449231 1 controller.go:195] "Starting workers" controller="pki-controller" worker count=1 2025-12-08T17:44:03.449395494+00:00 stderr F I1208 17:44:03.449272 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-network-node-identity/network-node-identity 2025-12-08T17:44:03.449633840+00:00 stderr F I1208 17:44:03.449604 1 controller.go:186] "Starting Controller" controller="signer-controller" 2025-12-08T17:44:03.449633840+00:00 stderr F I1208 17:44:03.449624 1 controller.go:195] "Starting workers" controller="signer-controller" worker count=1 2025-12-08T17:44:03.450344069+00:00 stderr F I1208 17:44:03.449944 1 controller.go:186] "Starting Controller" controller="infrastructureconfig-controller" 2025-12-08T17:44:03.450344069+00:00 stderr F I1208 17:44:03.449957 1 controller.go:195] "Starting workers" controller="infrastructureconfig-controller" worker count=1 2025-12-08T17:44:03.450344069+00:00 stderr F I1208 17:44:03.450015 1 controller.go:186] "Starting Controller" controller="ingress-config-controller" 2025-12-08T17:44:03.450344069+00:00 stderr F I1208 17:44:03.450021 1 controller.go:195] "Starting workers" controller="ingress-config-controller" worker count=1 2025-12-08T17:44:03.450344069+00:00 stderr F I1208 17:44:03.450049 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:44:03.453383682+00:00 stderr F I1208 17:44:03.453345 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.455095869+00:00 stderr F I1208 17:44:03.454257 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.455926212+00:00 stderr F I1208 17:44:03.455893 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.456100936+00:00 stderr F I1208 17:44:03.456082 1 log.go:245] configmap 'openshift-config/admin-acks' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.456132547+00:00 stderr F I1208 17:44:03.456116 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/admin-kubeconfig-client-ca' 2025-12-08T17:44:03.457000591+00:00 stderr F I1208 17:44:03.456971 1 controller.go:186] "Starting Controller" controller="machineconfigpool-watcher" 2025-12-08T17:44:03.457015801+00:00 stderr F I1208 17:44:03.456999 1 controller.go:195] "Starting workers" controller="machineconfigpool-watcher" worker count=1 2025-12-08T17:44:03.460455186+00:00 stderr F I1208 17:44:03.460411 1 log.go:245] configmap 'openshift-config/admin-kubeconfig-client-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.460476576+00:00 stderr F I1208 17:44:03.460471 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/etcd-ca-bundle' 2025-12-08T17:44:03.463787486+00:00 stderr F I1208 17:44:03.463739 1 log.go:245] configmap 'openshift-config/etcd-ca-bundle' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.463803807+00:00 stderr F I1208 17:44:03.463786 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/installer-images' 2025-12-08T17:44:03.469482052+00:00 stderr F I1208 17:44:03.469433 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.469482052+00:00 stderr F I1208 17:44:03.469457 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.471135527+00:00 stderr F I1208 17:44:03.469760 1 log.go:245] configmap 'openshift-config/installer-images' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.471135527+00:00 stderr F I1208 17:44:03.469797 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/kube-root-ca.crt' 2025-12-08T17:44:03.471135527+00:00 stderr F I1208 17:44:03.469997 1 warning_handler.go:64] "spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:03.475498626+00:00 stderr F I1208 17:44:03.474702 1 log.go:245] configmap 'openshift-config/kube-root-ca.crt' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.475498626+00:00 stderr F I1208 17:44:03.474756 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/openshift-install-manifests' 2025-12-08T17:44:03.490490434+00:00 stderr F I1208 17:44:03.490440 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.490556066+00:00 stderr F I1208 17:44:03.490542 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.490647618+00:00 stderr F I1208 17:44:03.490611 1 log.go:245] configmap 'openshift-config/openshift-install-manifests' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.490691011+00:00 stderr F I1208 17:44:03.490671 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/openshift-service-ca.crt' 2025-12-08T17:44:03.498856562+00:00 stderr F I1208 17:44:03.498693 1 log.go:245] configmap 'openshift-config/openshift-service-ca.crt' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.498856562+00:00 stderr F I1208 17:44:03.498744 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/etcd-serving-ca' 2025-12-08T17:44:03.504804776+00:00 stderr F I1208 17:44:03.504744 1 log.go:245] configmap 'openshift-config/etcd-serving-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.504804776+00:00 stderr F I1208 17:44:03.504794 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/initial-kube-apiserver-server-ca' 2025-12-08T17:44:03.505746921+00:00 stderr F I1208 17:44:03.505703 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:03.520263137+00:00 stderr F I1208 17:44:03.520189 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:03.520263137+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:03.520263137+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:03.520263137+00:00 stderr F reason: Unknown 2025-12-08T17:44:03.520263137+00:00 stderr F status: "False" 2025-12-08T17:44:03.520263137+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:03.520263137+00:00 stderr F - lastTransitionTime: "2025-11-03T08:57:46Z" 2025-12-08T17:44:03.520263137+00:00 stderr F status: "False" 2025-12-08T17:44:03.520263137+00:00 stderr F type: Degraded 2025-12-08T17:44:03.520263137+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:03.520263137+00:00 stderr F status: "True" 2025-12-08T17:44:03.520263137+00:00 stderr F type: Upgradeable 2025-12-08T17:44:03.520263137+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:03.520263137+00:00 stderr F message: |- 2025-12-08T17:44:03.520263137+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.520263137+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:03.520263137+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:03.520263137+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.520263137+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:03.520263137+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:03.520263137+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.520263137+00:00 stderr F reason: Deploying 2025-12-08T17:44:03.520263137+00:00 stderr F status: "True" 2025-12-08T17:44:03.520263137+00:00 stderr F type: Progressing 2025-12-08T17:44:03.520263137+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:03.520263137+00:00 stderr F status: "True" 2025-12-08T17:44:03.520263137+00:00 stderr F type: Available 2025-12-08T17:44:03.520984326+00:00 stderr F I1208 17:44:03.520938 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:03.536008247+00:00 stderr F I1208 17:44:03.535238 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:03.536008247+00:00 stderr F - lastTransitionTime: "2025-11-03T08:57:46Z" 2025-12-08T17:44:03.536008247+00:00 stderr F status: "False" 2025-12-08T17:44:03.536008247+00:00 stderr F type: Degraded 2025-12-08T17:44:03.536008247+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:03.536008247+00:00 stderr F status: "True" 2025-12-08T17:44:03.536008247+00:00 stderr F type: Upgradeable 2025-12-08T17:44:03.536008247+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:03.536008247+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:03.536008247+00:00 stderr F reason: Unknown 2025-12-08T17:44:03.536008247+00:00 stderr F status: "False" 2025-12-08T17:44:03.536008247+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:03.536008247+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:03.536008247+00:00 stderr F message: |- 2025-12-08T17:44:03.536008247+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.536008247+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:03.536008247+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:03.536008247+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.536008247+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:03.536008247+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:03.536008247+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.536008247+00:00 stderr F reason: Deploying 2025-12-08T17:44:03.536008247+00:00 stderr F status: "True" 2025-12-08T17:44:03.536008247+00:00 stderr F type: Progressing 2025-12-08T17:44:03.536008247+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:03.536008247+00:00 stderr F status: "True" 2025-12-08T17:44:03.536008247+00:00 stderr F type: Available 2025-12-08T17:44:03.546391979+00:00 stderr F I1208 17:44:03.545872 1 base_controller.go:82] Caches are synced for ConnectivityCheckController 2025-12-08T17:44:03.546391979+00:00 stderr F I1208 17:44:03.545917 1 base_controller.go:119] Starting #1 worker of ConnectivityCheckController controller ... 2025-12-08T17:44:03.546619295+00:00 stderr F I1208 17:44:03.546585 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:03.546619295+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:03.546619295+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:03.546619295+00:00 stderr F reason: Unknown 2025-12-08T17:44:03.546619295+00:00 stderr F status: "False" 2025-12-08T17:44:03.546619295+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:03.546619295+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:03.546619295+00:00 stderr F message: |- 2025-12-08T17:44:03.546619295+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.546619295+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.546619295+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" rollout is not making progress - last change 2025-11-03T09:40:47Z 2025-12-08T17:44:03.546619295+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:03.546619295+00:00 stderr F status: "True" 2025-12-08T17:44:03.546619295+00:00 stderr F type: Degraded 2025-12-08T17:44:03.546619295+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:03.546619295+00:00 stderr F status: "True" 2025-12-08T17:44:03.546619295+00:00 stderr F type: Upgradeable 2025-12-08T17:44:03.546619295+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:03.546619295+00:00 stderr F message: |- 2025-12-08T17:44:03.546619295+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.546619295+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:03.546619295+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:03.546619295+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.546619295+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:03.546619295+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:03.546619295+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.546619295+00:00 stderr F reason: Deploying 2025-12-08T17:44:03.546619295+00:00 stderr F status: "True" 2025-12-08T17:44:03.546619295+00:00 stderr F type: Progressing 2025-12-08T17:44:03.546619295+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:03.546619295+00:00 stderr F status: "True" 2025-12-08T17:44:03.546619295+00:00 stderr F type: Available 2025-12-08T17:44:03.546723279+00:00 stderr F I1208 17:44:03.546668 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:03.549386931+00:00 stderr F I1208 17:44:03.549344 1 log.go:245] unable to determine openshift-apiserver apiserver service endpoints: no openshift-apiserver api endpoints found 2025-12-08T17:44:03.550235734+00:00 stderr F I1208 17:44:03.550207 1 log.go:245] successful reconciliation 2025-12-08T17:44:03.550352808+00:00 stderr F I1208 17:44:03.550329 1 controller.go:186] "Starting Controller" controller="machineconfig-watcher" 2025-12-08T17:44:03.550352808+00:00 stderr F I1208 17:44:03.550346 1 controller.go:195] "Starting workers" controller="machineconfig-watcher" worker count=1 2025-12-08T17:44:03.550386519+00:00 stderr F I1208 17:44:03.550369 1 controller.go:186] "Starting Controller" controller="operconfig-controller" 2025-12-08T17:44:03.550386519+00:00 stderr F I1208 17:44:03.550381 1 controller.go:195] "Starting workers" controller="operconfig-controller" worker count=1 2025-12-08T17:44:03.550446700+00:00 stderr F I1208 17:44:03.550429 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:44:03.563496035+00:00 stderr F I1208 17:44:03.563441 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:03.563496035+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:03.563496035+00:00 stderr F message: |- 2025-12-08T17:44:03.563496035+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.563496035+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.563496035+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" rollout is not making progress - last change 2025-11-03T09:40:47Z 2025-12-08T17:44:03.563496035+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:03.563496035+00:00 stderr F status: "True" 2025-12-08T17:44:03.563496035+00:00 stderr F type: Degraded 2025-12-08T17:44:03.563496035+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:03.563496035+00:00 stderr F status: "True" 2025-12-08T17:44:03.563496035+00:00 stderr F type: Upgradeable 2025-12-08T17:44:03.563496035+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:03.563496035+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:03.563496035+00:00 stderr F reason: Unknown 2025-12-08T17:44:03.563496035+00:00 stderr F status: "False" 2025-12-08T17:44:03.563496035+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:03.563496035+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:03.563496035+00:00 stderr F message: |- 2025-12-08T17:44:03.563496035+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.563496035+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:03.563496035+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:03.563496035+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.563496035+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:03.563496035+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:03.563496035+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.563496035+00:00 stderr F reason: Deploying 2025-12-08T17:44:03.563496035+00:00 stderr F status: "True" 2025-12-08T17:44:03.563496035+00:00 stderr F type: Progressing 2025-12-08T17:44:03.563496035+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:03.563496035+00:00 stderr F status: "True" 2025-12-08T17:44:03.563496035+00:00 stderr F type: Available 2025-12-08T17:44:03.571918206+00:00 stderr F I1208 17:44:03.568281 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:03.574631779+00:00 stderr F I1208 17:44:03.574583 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:03.582769801+00:00 stderr F I1208 17:44:03.582286 1 warning_handler.go:64] "spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:03.582769801+00:00 stderr F I1208 17:44:03.582756 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:03.582840003+00:00 stderr F I1208 17:44:03.582822 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.582901045+00:00 stderr F I1208 17:44:03.582869 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.589445114+00:00 stderr F I1208 17:44:03.588251 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:44:03.589759543+00:00 stderr F I1208 17:44:03.589738 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.589798124+00:00 stderr F I1208 17:44:03.589787 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:44:03.590088341+00:00 stderr F I1208 17:44:03.590070 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:03.600629009+00:00 stderr F I1208 17:44:03.595113 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:03.600629009+00:00 stderr F I1208 17:44:03.600057 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:03.600629009+00:00 stderr F I1208 17:44:03.600249 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:03.600629009+00:00 stderr F I1208 17:44:03.600421 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:44:03.600629009+00:00 stderr F I1208 17:44:03.600436 1 log.go:245] Successfully updated Operator config from Cluster config 2025-12-08T17:44:03.654765705+00:00 stderr F I1208 17:44:03.653730 1 log.go:245] configmap 'openshift-config/initial-kube-apiserver-server-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.654765705+00:00 stderr F I1208 17:44:03.653788 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/registry-certs' 2025-12-08T17:44:03.752434140+00:00 stderr F I1208 17:44:03.752381 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:03.752434140+00:00 stderr F I1208 17:44:03.752409 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:03.752434140+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:03.752434140+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:03.752434140+00:00 stderr F reason: Unknown 2025-12-08T17:44:03.752434140+00:00 stderr F status: "False" 2025-12-08T17:44:03.752434140+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:03.752434140+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:03.752434140+00:00 stderr F message: |- 2025-12-08T17:44:03.752434140+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.752434140+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.752434140+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" rollout is not making progress - last change 2025-11-03T09:40:47Z 2025-12-08T17:44:03.752434140+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:03.752434140+00:00 stderr F status: "True" 2025-12-08T17:44:03.752434140+00:00 stderr F type: Degraded 2025-12-08T17:44:03.752434140+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:03.752434140+00:00 stderr F status: "True" 2025-12-08T17:44:03.752434140+00:00 stderr F type: Upgradeable 2025-12-08T17:44:03.752434140+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:03.752434140+00:00 stderr F message: |- 2025-12-08T17:44:03.752434140+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.752434140+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:03.752434140+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:03.752434140+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.752434140+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:03.752434140+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:03.752434140+00:00 stderr F reason: Deploying 2025-12-08T17:44:03.752434140+00:00 stderr F status: "True" 2025-12-08T17:44:03.752434140+00:00 stderr F type: Progressing 2025-12-08T17:44:03.752434140+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:03.752434140+00:00 stderr F status: "True" 2025-12-08T17:44:03.752434140+00:00 stderr F type: Available 2025-12-08T17:44:03.973457559+00:00 stderr F I1208 17:44:03.973343 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:03.973457559+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:03.973457559+00:00 stderr F message: |- 2025-12-08T17:44:03.973457559+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.973457559+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:03.973457559+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" rollout is not making progress - last change 2025-11-03T09:40:47Z 2025-12-08T17:44:03.973457559+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:03.973457559+00:00 stderr F status: "True" 2025-12-08T17:44:03.973457559+00:00 stderr F type: Degraded 2025-12-08T17:44:03.973457559+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:03.973457559+00:00 stderr F status: "True" 2025-12-08T17:44:03.973457559+00:00 stderr F type: Upgradeable 2025-12-08T17:44:03.973457559+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:03.973457559+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:03.973457559+00:00 stderr F reason: Unknown 2025-12-08T17:44:03.973457559+00:00 stderr F status: "False" 2025-12-08T17:44:03.973457559+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:03.973457559+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:03.973457559+00:00 stderr F message: |- 2025-12-08T17:44:03.973457559+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.973457559+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:03.973457559+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:03.973457559+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:03.973457559+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:03.973457559+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:03.973457559+00:00 stderr F reason: Deploying 2025-12-08T17:44:03.973457559+00:00 stderr F status: "True" 2025-12-08T17:44:03.973457559+00:00 stderr F type: Progressing 2025-12-08T17:44:03.973457559+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:03.973457559+00:00 stderr F status: "True" 2025-12-08T17:44:03.973457559+00:00 stderr F type: Available 2025-12-08T17:44:03.974391804+00:00 stderr F I1208 17:44:03.974317 1 log.go:245] configmap 'openshift-config/registry-certs' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:44:03.974422605+00:00 stderr F I1208 17:44:03.974398 1 log.go:245] Reconciling proxy 'cluster' 2025-12-08T17:44:04.055009633+00:00 stderr F I1208 17:44:04.054867 1 log.go:245] httpProxy, httpsProxy and noProxy not defined for proxy 'cluster'; validation will be skipped 2025-12-08T17:44:04.170330469+00:00 stderr F I1208 17:44:04.170203 1 log.go:245] Reconciling proxy 'cluster' complete 2025-12-08T17:44:04.359396806+00:00 stderr F I1208 17:44:04.359287 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:04.359396806+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:04.359396806+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:04.359396806+00:00 stderr F reason: Unknown 2025-12-08T17:44:04.359396806+00:00 stderr F status: "False" 2025-12-08T17:44:04.359396806+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:04.359396806+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:04.359396806+00:00 stderr F message: |- 2025-12-08T17:44:04.359396806+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:04.359396806+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:04.359396806+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:04.359396806+00:00 stderr F status: "True" 2025-12-08T17:44:04.359396806+00:00 stderr F type: Degraded 2025-12-08T17:44:04.359396806+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:04.359396806+00:00 stderr F status: "True" 2025-12-08T17:44:04.359396806+00:00 stderr F type: Upgradeable 2025-12-08T17:44:04.359396806+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:04.359396806+00:00 stderr F message: |- 2025-12-08T17:44:04.359396806+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:04.359396806+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:04.359396806+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:04.359396806+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:04.359396806+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:04.359396806+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:04.359396806+00:00 stderr F reason: Deploying 2025-12-08T17:44:04.359396806+00:00 stderr F status: "True" 2025-12-08T17:44:04.359396806+00:00 stderr F type: Progressing 2025-12-08T17:44:04.359396806+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:04.359396806+00:00 stderr F status: "True" 2025-12-08T17:44:04.359396806+00:00 stderr F type: Available 2025-12-08T17:44:04.360153597+00:00 stderr F I1208 17:44:04.360080 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:04.579011466+00:00 stderr F I1208 17:44:04.578171 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:04.579011466+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:04.579011466+00:00 stderr F message: |- 2025-12-08T17:44:04.579011466+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:04.579011466+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:04.579011466+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:04.579011466+00:00 stderr F status: "True" 2025-12-08T17:44:04.579011466+00:00 stderr F type: Degraded 2025-12-08T17:44:04.579011466+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:04.579011466+00:00 stderr F status: "True" 2025-12-08T17:44:04.579011466+00:00 stderr F type: Upgradeable 2025-12-08T17:44:04.579011466+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:04.579011466+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:04.579011466+00:00 stderr F reason: Unknown 2025-12-08T17:44:04.579011466+00:00 stderr F status: "False" 2025-12-08T17:44:04.579011466+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:04.579011466+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:04.579011466+00:00 stderr F message: |- 2025-12-08T17:44:04.579011466+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:04.579011466+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:04.579011466+00:00 stderr F DaemonSet "/openshift-network-operator/iptables-alerter" is waiting for other operators to become ready 2025-12-08T17:44:04.579011466+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:04.579011466+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:04.579011466+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:04.579011466+00:00 stderr F reason: Deploying 2025-12-08T17:44:04.579011466+00:00 stderr F status: "True" 2025-12-08T17:44:04.579011466+00:00 stderr F type: Progressing 2025-12-08T17:44:04.579011466+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:04.579011466+00:00 stderr F status: "True" 2025-12-08T17:44:04.579011466+00:00 stderr F type: Available 2025-12-08T17:44:04.757855515+00:00 stderr F I1208 17:44:04.757778 1 log.go:245] Reconciling configmap from openshift-kube-apiserver/trusted-ca-bundle 2025-12-08T17:44:04.760790255+00:00 stderr F I1208 17:44:04.760763 1 log.go:245] ConfigMap openshift-kube-apiserver/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:05.158468572+00:00 stderr F I1208 17:44:05.158403 1 dashboard_controller.go:117] Reconcile dashboards 2025-12-08T17:44:05.163484279+00:00 stderr F I1208 17:44:05.163420 1 dashboard_controller.go:143] Applying dashboards manifests 2025-12-08T17:44:05.175676121+00:00 stderr F I1208 17:44:05.175593 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-ovn-health 2025-12-08T17:44:05.189200141+00:00 stderr F I1208 17:44:05.189136 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-ovn-health was successful 2025-12-08T17:44:05.189264432+00:00 stderr F I1208 17:44:05.189254 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-network-stats 2025-12-08T17:44:05.206768229+00:00 stderr F I1208 17:44:05.206640 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-network-stats was successful 2025-12-08T17:44:05.359699011+00:00 stderr F I1208 17:44:05.359613 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/ovn 2025-12-08T17:44:05.363115494+00:00 stderr F I1208 17:44:05.363031 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:05.363307049+00:00 stderr F I1208 17:44:05.363255 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:05.454752184+00:00 stderr F I1208 17:44:05.454695 1 pod_watcher.go:132] Operand /, Kind= openshift-network-operator/iptables-alerter updated, re-generating status 2025-12-08T17:44:05.454845037+00:00 stderr F I1208 17:44:05.454830 1 pod_watcher.go:132] Operand /, Kind= openshift-network-operator/iptables-alerter updated, re-generating status 2025-12-08T17:44:05.461296482+00:00 stderr F I1208 17:44:05.461266 1 log.go:245] successful reconciliation 2025-12-08T17:44:05.573308958+00:00 stderr F I1208 17:44:05.573264 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:44:05.575816416+00:00 stderr F I1208 17:44:05.575794 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:44:05.578315924+00:00 stderr F I1208 17:44:05.578292 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:44:05.756260708+00:00 stderr F I1208 17:44:05.756221 1 log.go:245] Reconciling Network.config.openshift.io cluster 2025-12-08T17:44:05.769720585+00:00 stderr F I1208 17:44:05.769663 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc00069acc0 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:44:06.174310751+00:00 stderr F I1208 17:44:06.174231 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 3 -> 3 2025-12-08T17:44:06.174310751+00:00 stderr F I1208 17:44:06.174279 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:44:06.174310751+00:00 stderr F I1208 17:44:06.174294 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:44:06.178219848+00:00 stderr F I1208 17:44:06.178179 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout progressing; 1/1 scheduled; 1 unavailable; 0 available; generation 2 -> 2 2025-12-08T17:44:06.178219848+00:00 stderr F W1208 17:44:06.178209 1 ovn_kubernetes.go:1708] daemonset openshift-ovn-kubernetes/ovnkube-node rollout seems to have hung with 0/1 behind, force-continuing 2025-12-08T17:44:06.178242359+00:00 stderr F I1208 17:44:06.178218 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout progressing; 1/1 scheduled; 1 unavailable; 0 available; generation 2 -> 2 2025-12-08T17:44:06.178242359+00:00 stderr F W1208 17:44:06.178227 1 ovn_kubernetes.go:1708] daemonset openshift-ovn-kubernetes/ovnkube-node rollout seems to have hung with 0/1 behind, force-continuing 2025-12-08T17:44:06.178272539+00:00 stderr F I1208 17:44:06.178248 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:44:06.357829457+00:00 stderr F I1208 17:44:06.357719 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:44:06.371667504+00:00 stderr F I1208 17:44:06.371619 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:44:06.371704875+00:00 stderr F I1208 17:44:06.371669 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:44:06.375650513+00:00 stderr F I1208 17:44:06.375613 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:44:06.375650513+00:00 stderr F I1208 17:44:06.375641 1 log.go:245] Successfully updated Operator config from Cluster config 2025-12-08T17:44:06.376279911+00:00 stderr F I1208 17:44:06.376256 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:06.569952623+00:00 stderr F I1208 17:44:06.569897 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:44:06.584807708+00:00 stderr F I1208 17:44:06.584747 1 log.go:245] Failed to update the operator configuration: could not apply (/, Kind=) /cluster, err: failed to apply / update (operator.openshift.io/v1, Kind=Network) /cluster: Operation cannot be fulfilled on networks.operator.openshift.io "cluster": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:44:06.584858170+00:00 stderr F E1208 17:44:06.584823 1 controller.go:353] "Reconciler error" err="could not apply (/, Kind=) /cluster, err: failed to apply / update (operator.openshift.io/v1, Kind=Network) /cluster: Operation cannot be fulfilled on networks.operator.openshift.io \"cluster\": the object has been modified; please apply your changes to the latest version and try again" controller="operconfig-controller" object="cluster" namespace="" name="cluster" reconcileID="f1ea7756-79a4-4c09-9472-0bac0b3261a5" 2025-12-08T17:44:06.589634800+00:00 stderr F I1208 17:44:06.589592 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:06.589634800+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:06.589634800+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:06.589634800+00:00 stderr F reason: Unknown 2025-12-08T17:44:06.589634800+00:00 stderr F status: "False" 2025-12-08T17:44:06.589634800+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:06.589634800+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:06.589634800+00:00 stderr F message: |- 2025-12-08T17:44:06.589634800+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:06.589634800+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:06.589634800+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:06.589634800+00:00 stderr F status: "True" 2025-12-08T17:44:06.589634800+00:00 stderr F type: Degraded 2025-12-08T17:44:06.589634800+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:06.589634800+00:00 stderr F status: "True" 2025-12-08T17:44:06.589634800+00:00 stderr F type: Upgradeable 2025-12-08T17:44:06.589634800+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:06.589634800+00:00 stderr F message: |- 2025-12-08T17:44:06.589634800+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:06.589634800+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:06.589634800+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:06.589634800+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:06.589634800+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:06.589634800+00:00 stderr F reason: Deploying 2025-12-08T17:44:06.589634800+00:00 stderr F status: "True" 2025-12-08T17:44:06.589634800+00:00 stderr F type: Progressing 2025-12-08T17:44:06.589634800+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:06.589634800+00:00 stderr F status: "True" 2025-12-08T17:44:06.589634800+00:00 stderr F type: Available 2025-12-08T17:44:06.590489543+00:00 stderr F I1208 17:44:06.590411 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:44:06.591221003+00:00 stderr F I1208 17:44:06.591180 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:06.981936851+00:00 stderr F I1208 17:44:06.981313 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:06.981936851+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:06.981936851+00:00 stderr F message: |- 2025-12-08T17:44:06.981936851+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:06.981936851+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:06.981936851+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:06.981936851+00:00 stderr F status: "True" 2025-12-08T17:44:06.981936851+00:00 stderr F type: Degraded 2025-12-08T17:44:06.981936851+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:06.981936851+00:00 stderr F status: "True" 2025-12-08T17:44:06.981936851+00:00 stderr F type: Upgradeable 2025-12-08T17:44:06.981936851+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:06.981936851+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:06.981936851+00:00 stderr F reason: Unknown 2025-12-08T17:44:06.981936851+00:00 stderr F status: "False" 2025-12-08T17:44:06.981936851+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:06.981936851+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:06.981936851+00:00 stderr F message: |- 2025-12-08T17:44:06.981936851+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" is not available (awaiting 1 nodes) 2025-12-08T17:44:06.981936851+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:06.981936851+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:06.981936851+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:06.981936851+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:06.981936851+00:00 stderr F reason: Deploying 2025-12-08T17:44:06.981936851+00:00 stderr F status: "True" 2025-12-08T17:44:06.981936851+00:00 stderr F type: Progressing 2025-12-08T17:44:06.981936851+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:06.981936851+00:00 stderr F status: "True" 2025-12-08T17:44:06.981936851+00:00 stderr F type: Available 2025-12-08T17:44:07.974339431+00:00 stderr F I1208 17:44:07.974260 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:44:07.976686415+00:00 stderr F I1208 17:44:07.976613 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:44:07.980159039+00:00 stderr F I1208 17:44:07.980092 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:44:08.160044936+00:00 stderr F I1208 17:44:08.159989 1 log.go:245] Reconciling configmap from openshift-machine-api/mao-trusted-ca 2025-12-08T17:44:08.164837057+00:00 stderr F I1208 17:44:08.164769 1 log.go:245] ConfigMap openshift-machine-api/mao-trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:08.169681779+00:00 stderr F I1208 17:44:08.169583 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc003016f00 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:44:08.358501299+00:00 stderr F I1208 17:44:08.358437 1 dashboard_controller.go:117] Reconcile dashboards 2025-12-08T17:44:08.363569138+00:00 stderr F I1208 17:44:08.363537 1 dashboard_controller.go:143] Applying dashboards manifests 2025-12-08T17:44:08.372170262+00:00 stderr F I1208 17:44:08.372088 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 3 -> 3 2025-12-08T17:44:08.372170262+00:00 stderr F I1208 17:44:08.372125 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:44:08.372170262+00:00 stderr F I1208 17:44:08.372138 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:44:08.372588974+00:00 stderr F I1208 17:44:08.372564 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-ovn-health 2025-12-08T17:44:08.377269182+00:00 stderr F I1208 17:44:08.377230 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout progressing; 1/1 scheduled; 1 unavailable; 0 available; generation 2 -> 2 2025-12-08T17:44:08.377269182+00:00 stderr F W1208 17:44:08.377260 1 ovn_kubernetes.go:1708] daemonset openshift-ovn-kubernetes/ovnkube-node rollout seems to have hung with 0/1 behind, force-continuing 2025-12-08T17:44:08.377321023+00:00 stderr F I1208 17:44:08.377271 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout progressing; 1/1 scheduled; 1 unavailable; 0 available; generation 2 -> 2 2025-12-08T17:44:08.377321023+00:00 stderr F W1208 17:44:08.377281 1 ovn_kubernetes.go:1708] daemonset openshift-ovn-kubernetes/ovnkube-node rollout seems to have hung with 0/1 behind, force-continuing 2025-12-08T17:44:08.377321023+00:00 stderr F I1208 17:44:08.377305 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:44:08.389606828+00:00 stderr F I1208 17:44:08.389534 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-ovn-health was successful 2025-12-08T17:44:08.389606828+00:00 stderr F I1208 17:44:08.389577 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-network-stats 2025-12-08T17:44:08.414682812+00:00 stderr F I1208 17:44:08.414564 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/grafana-dashboard-network-stats was successful 2025-12-08T17:44:08.557547549+00:00 stderr F I1208 17:44:08.557504 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/signer 2025-12-08T17:44:08.560968042+00:00 stderr F I1208 17:44:08.560514 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:08.561200849+00:00 stderr F I1208 17:44:08.561153 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:08.569327170+00:00 stderr F I1208 17:44:08.569246 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:44:08.569327170+00:00 stderr F I1208 17:44:08.569278 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:44:08.658638136+00:00 stderr F I1208 17:44:08.658568 1 log.go:245] successful reconciliation 2025-12-08T17:44:08.770443796+00:00 stderr F I1208 17:44:08.770319 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:44:08.799454208+00:00 stderr F I1208 17:44:08.799343 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:08.800870626+00:00 stderr F I1208 17:44:08.800639 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:44:08.800870626+00:00 stderr F I1208 17:44:08.800803 1 log.go:245] Starting render phase 2025-12-08T17:44:08.832599941+00:00 stderr F I1208 17:44:08.832521 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T17:44:08.882912394+00:00 stderr F I1208 17:44:08.882814 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T17:44:08.882912394+00:00 stderr F I1208 17:44:08.882843 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T17:44:08.882912394+00:00 stderr F I1208 17:44:08.882871 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T17:44:08.882963305+00:00 stderr F I1208 17:44:08.882923 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T17:44:08.972368354+00:00 stderr F I1208 17:44:08.972274 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T17:44:08.972368354+00:00 stderr F I1208 17:44:08.972317 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T17:44:09.223270728+00:00 stderr F I1208 17:44:09.223160 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T17:44:09.533434579+00:00 stderr F I1208 17:44:09.533321 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:09.533434579+00:00 stderr F I1208 17:44:09.533371 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:09.959210983+00:00 stderr F I1208 17:44:09.959142 1 log.go:245] Reconciling configmap from openshift-marketplace/marketplace-trusted-ca 2025-12-08T17:44:09.962782990+00:00 stderr F I1208 17:44:09.962736 1 log.go:245] ConfigMap openshift-marketplace/marketplace-trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:10.773523354+00:00 stderr F I1208 17:44:10.772439 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:10.773523354+00:00 stderr F I1208 17:44:10.772475 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:44:11.173768592+00:00 stderr F I1208 17:44:11.173715 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:11.174615525+00:00 stderr F I1208 17:44:11.174384 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:11.174615525+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:11.174615525+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:11.174615525+00:00 stderr F reason: Unknown 2025-12-08T17:44:11.174615525+00:00 stderr F status: "False" 2025-12-08T17:44:11.174615525+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:11.174615525+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:11.174615525+00:00 stderr F message: |- 2025-12-08T17:44:11.174615525+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:11.174615525+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:11.174615525+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:11.174615525+00:00 stderr F status: "True" 2025-12-08T17:44:11.174615525+00:00 stderr F type: Degraded 2025-12-08T17:44:11.174615525+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:11.174615525+00:00 stderr F status: "True" 2025-12-08T17:44:11.174615525+00:00 stderr F type: Upgradeable 2025-12-08T17:44:11.174615525+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:11.174615525+00:00 stderr F message: |- 2025-12-08T17:44:11.174615525+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:11.174615525+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:11.174615525+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:11.174615525+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:11.174615525+00:00 stderr F reason: Deploying 2025-12-08T17:44:11.174615525+00:00 stderr F status: "True" 2025-12-08T17:44:11.174615525+00:00 stderr F type: Progressing 2025-12-08T17:44:11.174615525+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:11.174615525+00:00 stderr F status: "True" 2025-12-08T17:44:11.174615525+00:00 stderr F type: Available 2025-12-08T17:44:11.563907173+00:00 stderr F I1208 17:44:11.563833 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:11.563907173+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:11.563907173+00:00 stderr F message: |- 2025-12-08T17:44:11.563907173+00:00 stderr F DaemonSet "/openshift-multus/multus-additional-cni-plugins" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:11.563907173+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:11.563907173+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:11.563907173+00:00 stderr F status: "True" 2025-12-08T17:44:11.563907173+00:00 stderr F type: Degraded 2025-12-08T17:44:11.563907173+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:11.563907173+00:00 stderr F status: "True" 2025-12-08T17:44:11.563907173+00:00 stderr F type: Upgradeable 2025-12-08T17:44:11.563907173+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:11.563907173+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:11.563907173+00:00 stderr F reason: Unknown 2025-12-08T17:44:11.563907173+00:00 stderr F status: "False" 2025-12-08T17:44:11.563907173+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:11.563907173+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:11.563907173+00:00 stderr F message: |- 2025-12-08T17:44:11.563907173+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:11.563907173+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:11.563907173+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:11.563907173+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:11.563907173+00:00 stderr F reason: Deploying 2025-12-08T17:44:11.563907173+00:00 stderr F status: "True" 2025-12-08T17:44:11.563907173+00:00 stderr F type: Progressing 2025-12-08T17:44:11.563907173+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:11.563907173+00:00 stderr F status: "True" 2025-12-08T17:44:11.563907173+00:00 stderr F type: Available 2025-12-08T17:44:11.574122032+00:00 stderr F I1208 17:44:11.574084 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:11.574122032+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:11.574122032+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:11.574122032+00:00 stderr F reason: Unknown 2025-12-08T17:44:11.574122032+00:00 stderr F status: "False" 2025-12-08T17:44:11.574122032+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:11.574122032+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:11.574122032+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:11.574122032+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:11.574122032+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:11.574122032+00:00 stderr F status: "True" 2025-12-08T17:44:11.574122032+00:00 stderr F type: Degraded 2025-12-08T17:44:11.574122032+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:11.574122032+00:00 stderr F status: "True" 2025-12-08T17:44:11.574122032+00:00 stderr F type: Upgradeable 2025-12-08T17:44:11.574122032+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:11.574122032+00:00 stderr F message: |- 2025-12-08T17:44:11.574122032+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:11.574122032+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:11.574122032+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:11.574122032+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:11.574122032+00:00 stderr F reason: Deploying 2025-12-08T17:44:11.574122032+00:00 stderr F status: "True" 2025-12-08T17:44:11.574122032+00:00 stderr F type: Progressing 2025-12-08T17:44:11.574122032+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:11.574122032+00:00 stderr F status: "True" 2025-12-08T17:44:11.574122032+00:00 stderr F type: Available 2025-12-08T17:44:11.574757110+00:00 stderr F I1208 17:44:11.574706 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:11.972020216+00:00 stderr F I1208 17:44:11.971940 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:11.972020216+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:11.972020216+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:11.972020216+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:11.972020216+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:11.972020216+00:00 stderr F status: "True" 2025-12-08T17:44:11.972020216+00:00 stderr F type: Degraded 2025-12-08T17:44:11.972020216+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:11.972020216+00:00 stderr F status: "True" 2025-12-08T17:44:11.972020216+00:00 stderr F type: Upgradeable 2025-12-08T17:44:11.972020216+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:11.972020216+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:11.972020216+00:00 stderr F reason: Unknown 2025-12-08T17:44:11.972020216+00:00 stderr F status: "False" 2025-12-08T17:44:11.972020216+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:11.972020216+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:11.972020216+00:00 stderr F message: |- 2025-12-08T17:44:11.972020216+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:11.972020216+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:11.972020216+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:11.972020216+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:11.972020216+00:00 stderr F reason: Deploying 2025-12-08T17:44:11.972020216+00:00 stderr F status: "True" 2025-12-08T17:44:11.972020216+00:00 stderr F type: Progressing 2025-12-08T17:44:11.972020216+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:11.972020216+00:00 stderr F status: "True" 2025-12-08T17:44:11.972020216+00:00 stderr F type: Available 2025-12-08T17:44:12.159281164+00:00 stderr F I1208 17:44:12.159213 1 log.go:245] Reconciling configmap from openshift-apiserver/trusted-ca-bundle 2025-12-08T17:44:12.162214514+00:00 stderr F I1208 17:44:12.162165 1 log.go:245] ConfigMap openshift-apiserver/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:12.791840008+00:00 stderr F I1208 17:44:12.790865 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:12.791840008+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:12.791840008+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:12.791840008+00:00 stderr F reason: Unknown 2025-12-08T17:44:12.791840008+00:00 stderr F status: "False" 2025-12-08T17:44:12.791840008+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:12.791840008+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:12.791840008+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:12.791840008+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:12.791840008+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:12.791840008+00:00 stderr F status: "True" 2025-12-08T17:44:12.791840008+00:00 stderr F type: Degraded 2025-12-08T17:44:12.791840008+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:12.791840008+00:00 stderr F status: "True" 2025-12-08T17:44:12.791840008+00:00 stderr F type: Upgradeable 2025-12-08T17:44:12.791840008+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:12.791840008+00:00 stderr F message: |- 2025-12-08T17:44:12.791840008+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:12.791840008+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:12.791840008+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:12.791840008+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:12.791840008+00:00 stderr F reason: Deploying 2025-12-08T17:44:12.791840008+00:00 stderr F status: "True" 2025-12-08T17:44:12.791840008+00:00 stderr F type: Progressing 2025-12-08T17:44:12.791840008+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:12.791840008+00:00 stderr F status: "True" 2025-12-08T17:44:12.791840008+00:00 stderr F type: Available 2025-12-08T17:44:12.792394073+00:00 stderr F I1208 17:44:12.792345 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:13.174111535+00:00 stderr F I1208 17:44:13.174000 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:13.174111535+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:13.174111535+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:13.174111535+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:13.174111535+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:13.174111535+00:00 stderr F status: "True" 2025-12-08T17:44:13.174111535+00:00 stderr F type: Degraded 2025-12-08T17:44:13.174111535+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:13.174111535+00:00 stderr F status: "True" 2025-12-08T17:44:13.174111535+00:00 stderr F type: Upgradeable 2025-12-08T17:44:13.174111535+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:13.174111535+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:13.174111535+00:00 stderr F reason: Unknown 2025-12-08T17:44:13.174111535+00:00 stderr F status: "False" 2025-12-08T17:44:13.174111535+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:13.174111535+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:13.174111535+00:00 stderr F message: |- 2025-12-08T17:44:13.174111535+00:00 stderr F DaemonSet "/openshift-multus/network-metrics-daemon" is waiting for other operators to become ready 2025-12-08T17:44:13.174111535+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:13.174111535+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:13.174111535+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:13.174111535+00:00 stderr F reason: Deploying 2025-12-08T17:44:13.174111535+00:00 stderr F status: "True" 2025-12-08T17:44:13.174111535+00:00 stderr F type: Progressing 2025-12-08T17:44:13.174111535+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:13.174111535+00:00 stderr F status: "True" 2025-12-08T17:44:13.174111535+00:00 stderr F type: Available 2025-12-08T17:44:13.358361331+00:00 stderr F I1208 17:44:13.358311 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T17:44:13.364482788+00:00 stderr F I1208 17:44:13.364442 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T17:44:13.364582581+00:00 stderr F I1208 17:44:13.364563 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T17:44:13.381798900+00:00 stderr F I1208 17:44:13.381690 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T17:44:13.381798900+00:00 stderr F I1208 17:44:13.381749 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T17:44:13.389816149+00:00 stderr F I1208 17:44:13.389719 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T17:44:13.389816149+00:00 stderr F I1208 17:44:13.389785 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T17:44:13.401013344+00:00 stderr F I1208 17:44:13.400735 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T17:44:13.401066786+00:00 stderr F I1208 17:44:13.401039 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T17:44:13.408468168+00:00 stderr F I1208 17:44:13.408431 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T17:44:13.408488319+00:00 stderr F I1208 17:44:13.408473 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T17:44:13.414630836+00:00 stderr F I1208 17:44:13.414600 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T17:44:13.414711648+00:00 stderr F I1208 17:44:13.414698 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T17:44:13.420102455+00:00 stderr F I1208 17:44:13.420059 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T17:44:13.420127526+00:00 stderr F I1208 17:44:13.420120 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T17:44:13.423706273+00:00 stderr F I1208 17:44:13.423684 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T17:44:13.423764375+00:00 stderr F I1208 17:44:13.423754 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T17:44:13.431195408+00:00 stderr F I1208 17:44:13.431131 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T17:44:13.431245999+00:00 stderr F I1208 17:44:13.431219 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T17:44:13.436162464+00:00 stderr F I1208 17:44:13.436131 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T17:44:13.436242606+00:00 stderr F I1208 17:44:13.436230 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T17:44:13.561965136+00:00 stderr F I1208 17:44:13.561722 1 log.go:245] Reconciling configmap from openshift-authentication-operator/trusted-ca-bundle 2025-12-08T17:44:13.562650054+00:00 stderr F I1208 17:44:13.562621 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T17:44:13.562720606+00:00 stderr F I1208 17:44:13.562702 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T17:44:13.563886947+00:00 stderr F I1208 17:44:13.563845 1 log.go:245] ConfigMap openshift-authentication-operator/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:13.758553687+00:00 stderr F I1208 17:44:13.758473 1 log.go:245] Reconciling configmap from openshift-authentication/v4-0-config-system-trusted-ca-bundle 2025-12-08T17:44:13.760353026+00:00 stderr F I1208 17:44:13.760311 1 log.go:245] ConfigMap openshift-authentication/v4-0-config-system-trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:13.763172083+00:00 stderr F I1208 17:44:13.763146 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T17:44:13.763190553+00:00 stderr F I1208 17:44:13.763179 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T17:44:13.962391817+00:00 stderr F I1208 17:44:13.962195 1 log.go:245] Reconciling configmap from openshift-console-operator/trusted-ca 2025-12-08T17:44:13.965035219+00:00 stderr F I1208 17:44:13.964350 1 log.go:245] ConfigMap openshift-console-operator/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:13.966702084+00:00 stderr F I1208 17:44:13.966646 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T17:44:13.966702084+00:00 stderr F I1208 17:44:13.966680 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T17:44:14.161458017+00:00 stderr F I1208 17:44:14.161377 1 log.go:245] Reconciling configmap from openshift-controller-manager/openshift-global-ca 2025-12-08T17:44:14.169625070+00:00 stderr F I1208 17:44:14.169541 1 log.go:245] ConfigMap openshift-controller-manager/openshift-global-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:14.173346381+00:00 stderr F I1208 17:44:14.173294 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T17:44:14.173346381+00:00 stderr F I1208 17:44:14.173335 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T17:44:14.365933395+00:00 stderr F I1208 17:44:14.363067 1 log.go:245] Reconciling configmap from openshift-image-registry/trusted-ca 2025-12-08T17:44:14.365933395+00:00 stderr F I1208 17:44:14.363524 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T17:44:14.365933395+00:00 stderr F I1208 17:44:14.363579 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T17:44:14.369103531+00:00 stderr F I1208 17:44:14.367712 1 log.go:245] ConfigMap openshift-image-registry/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:14.562518027+00:00 stderr F I1208 17:44:14.562412 1 log.go:245] Reconciling configmap from openshift-ingress-operator/trusted-ca 2025-12-08T17:44:14.565581860+00:00 stderr F I1208 17:44:14.565499 1 log.go:245] ConfigMap openshift-ingress-operator/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:44:14.566544786+00:00 stderr F I1208 17:44:14.566468 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T17:44:14.566628528+00:00 stderr F I1208 17:44:14.566575 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T17:44:14.668397575+00:00 stderr F I1208 17:44:14.668314 1 log.go:245] unable to determine openshift-apiserver apiserver service endpoints: no openshift-apiserver api endpoints found 2025-12-08T17:44:14.675577170+00:00 stderr F I1208 17:44:14.675507 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:14.683012424+00:00 stderr F I1208 17:44:14.682934 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:14.688183475+00:00 stderr F I1208 17:44:14.688125 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:14.692542323+00:00 stderr F I1208 17:44:14.692462 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:14.698015532+00:00 stderr F I1208 17:44:14.697943 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:14.703193144+00:00 stderr F I1208 17:44:14.703129 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:14.758702418+00:00 stderr F I1208 17:44:14.758594 1 log.go:245] Reconciling configmap from openshift-kube-controller-manager/trusted-ca-bundle 2025-12-08T17:44:14.761394822+00:00 stderr F I1208 17:44:14.761302 1 log.go:245] ConfigMap openshift-kube-controller-manager/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:14.765361269+00:00 stderr F I1208 17:44:14.765281 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T17:44:14.765407201+00:00 stderr F I1208 17:44:14.765377 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T17:44:14.964067189+00:00 stderr F I1208 17:44:14.963981 1 log.go:245] Reconciling configmap from openshift-apiserver-operator/trusted-ca-bundle 2025-12-08T17:44:14.966146776+00:00 stderr F I1208 17:44:14.966075 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T17:44:14.966181047+00:00 stderr F I1208 17:44:14.966143 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T17:44:14.969234861+00:00 stderr F I1208 17:44:14.969132 1 log.go:245] ConfigMap openshift-apiserver-operator/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:15.163583051+00:00 stderr F I1208 17:44:15.163472 1 log.go:245] Reconciling configmap from openshift-console/trusted-ca-bundle 2025-12-08T17:44:15.165153905+00:00 stderr F I1208 17:44:15.165042 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T17:44:15.165153905+00:00 stderr F I1208 17:44:15.165113 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T17:44:15.167059227+00:00 stderr F I1208 17:44:15.166973 1 log.go:245] ConfigMap openshift-console/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:44:15.364829931+00:00 stderr F I1208 17:44:15.364791 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T17:44:15.364949974+00:00 stderr F I1208 17:44:15.364936 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T17:44:15.566577724+00:00 stderr F I1208 17:44:15.566533 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T17:44:15.566675577+00:00 stderr F I1208 17:44:15.566661 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T17:44:15.765638434+00:00 stderr F I1208 17:44:15.765591 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T17:44:15.765738046+00:00 stderr F I1208 17:44:15.765724 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T17:44:15.962158194+00:00 stderr F I1208 17:44:15.962095 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T17:44:15.962158194+00:00 stderr F I1208 17:44:15.962145 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T17:44:16.181290472+00:00 stderr F I1208 17:44:16.181241 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T17:44:16.181320443+00:00 stderr F I1208 17:44:16.181299 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T17:44:16.385633666+00:00 stderr F I1208 17:44:16.385528 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T17:44:16.385633666+00:00 stderr F I1208 17:44:16.385601 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T17:44:16.565719958+00:00 stderr F I1208 17:44:16.565626 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T17:44:16.565719958+00:00 stderr F I1208 17:44:16.565688 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T17:44:16.767397399+00:00 stderr F I1208 17:44:16.765914 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T17:44:16.767397399+00:00 stderr F I1208 17:44:16.766020 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T17:44:16.963224441+00:00 stderr F I1208 17:44:16.963101 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T17:44:16.963224441+00:00 stderr F I1208 17:44:16.963165 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T17:44:17.172026986+00:00 stderr F I1208 17:44:17.171952 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T17:44:17.172069897+00:00 stderr F I1208 17:44:17.172039 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T17:44:17.367864148+00:00 stderr F I1208 17:44:17.367732 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T17:44:17.367864148+00:00 stderr F I1208 17:44:17.367853 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T17:44:17.567479242+00:00 stderr F I1208 17:44:17.567416 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T17:44:17.567544374+00:00 stderr F I1208 17:44:17.567496 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:44:17.765407211+00:00 stderr F I1208 17:44:17.765301 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:44:17.765407211+00:00 stderr F I1208 17:44:17.765368 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:44:17.966423825+00:00 stderr F I1208 17:44:17.966347 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:44:17.966423825+00:00 stderr F I1208 17:44:17.966400 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T17:44:18.164224270+00:00 stderr F I1208 17:44:18.163577 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T17:44:18.164224270+00:00 stderr F I1208 17:44:18.163616 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T17:44:18.364000170+00:00 stderr F I1208 17:44:18.363083 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T17:44:18.364000170+00:00 stderr F I1208 17:44:18.363128 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T17:44:18.565235409+00:00 stderr F I1208 17:44:18.564499 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T17:44:18.565235409+00:00 stderr F I1208 17:44:18.564544 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T17:44:18.763109266+00:00 stderr F I1208 17:44:18.763038 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T17:44:18.763135406+00:00 stderr F I1208 17:44:18.763116 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T17:44:18.968790147+00:00 stderr F I1208 17:44:18.966401 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T17:44:18.968790147+00:00 stderr F I1208 17:44:18.966440 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T17:44:19.166420387+00:00 stderr F I1208 17:44:19.166207 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T17:44:19.166420387+00:00 stderr F I1208 17:44:19.166252 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T17:44:19.365115057+00:00 stderr F I1208 17:44:19.365047 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T17:44:19.365143967+00:00 stderr F I1208 17:44:19.365121 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:44:19.566606853+00:00 stderr F I1208 17:44:19.566544 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:44:19.566631394+00:00 stderr F I1208 17:44:19.566601 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:44:19.766953638+00:00 stderr F I1208 17:44:19.766785 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:44:19.767034470+00:00 stderr F I1208 17:44:19.767022 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T17:44:19.966258334+00:00 stderr F I1208 17:44:19.966226 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T17:44:19.966327366+00:00 stderr F I1208 17:44:19.966317 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T17:44:20.172397767+00:00 stderr F I1208 17:44:20.172357 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T17:44:20.172493339+00:00 stderr F I1208 17:44:20.172479 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T17:44:20.383921097+00:00 stderr F I1208 17:44:20.380946 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T17:44:20.383921097+00:00 stderr F I1208 17:44:20.381024 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T17:44:20.574932327+00:00 stderr F I1208 17:44:20.572133 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T17:44:20.574932327+00:00 stderr F I1208 17:44:20.572209 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T17:44:20.772740123+00:00 stderr F I1208 17:44:20.772699 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T17:44:20.772830115+00:00 stderr F I1208 17:44:20.772820 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T17:44:20.975420171+00:00 stderr F I1208 17:44:20.974956 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T17:44:20.975518894+00:00 stderr F I1208 17:44:20.975507 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T17:44:21.171432188+00:00 stderr F I1208 17:44:21.170350 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T17:44:21.171432188+00:00 stderr F I1208 17:44:21.170409 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:44:21.412356169+00:00 stderr F I1208 17:44:21.412265 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:44:21.412356169+00:00 stderr F I1208 17:44:21.412318 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:44:21.653827516+00:00 stderr F I1208 17:44:21.651783 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:44:21.653827516+00:00 stderr F I1208 17:44:21.651843 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T17:44:21.768036301+00:00 stderr F I1208 17:44:21.767637 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T17:44:21.768036301+00:00 stderr F I1208 17:44:21.767678 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T17:44:21.999380621+00:00 stderr F I1208 17:44:21.997778 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:44:21.999380621+00:00 stderr F I1208 17:44:21.997816 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T17:44:22.268936525+00:00 stderr F I1208 17:44:22.267338 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:44:22.268936525+00:00 stderr F I1208 17:44:22.267376 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:44:22.372305454+00:00 stderr F I1208 17:44:22.372126 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:44:22.372305454+00:00 stderr F I1208 17:44:22.372193 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T17:44:22.566015298+00:00 stderr F I1208 17:44:22.565629 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:44:22.566015298+00:00 stderr F I1208 17:44:22.565683 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T17:44:22.774263558+00:00 stderr F I1208 17:44:22.770125 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T17:44:22.774263558+00:00 stderr F I1208 17:44:22.770168 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T17:44:22.974641664+00:00 stderr F I1208 17:44:22.973888 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:44:22.974641664+00:00 stderr F I1208 17:44:22.973935 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T17:44:23.169916620+00:00 stderr F I1208 17:44:23.169538 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T17:44:23.169916620+00:00 stderr F I1208 17:44:23.169577 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T17:44:23.365929646+00:00 stderr F I1208 17:44:23.365771 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T17:44:23.365929646+00:00 stderr F I1208 17:44:23.365808 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T17:44:23.415351595+00:00 stderr F I1208 17:44:23.415307 1 allowlist_controller.go:149] Successfully updated sysctl allowlist 2025-12-08T17:44:23.423563939+00:00 stderr F I1208 17:44:23.423518 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.434425685+00:00 stderr F I1208 17:44:23.434389 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.445088286+00:00 stderr F I1208 17:44:23.444051 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.454914814+00:00 stderr F I1208 17:44:23.452095 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.463018055+00:00 stderr F I1208 17:44:23.462373 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.471761423+00:00 stderr F I1208 17:44:23.471322 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.499245673+00:00 stderr F I1208 17:44:23.498538 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.507266322+00:00 stderr F I1208 17:44:23.507227 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.523439773+00:00 stderr F I1208 17:44:23.523401 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.536429317+00:00 stderr F I1208 17:44:23.536397 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.571197416+00:00 stderr F I1208 17:44:23.571154 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T17:44:23.571287848+00:00 stderr F I1208 17:44:23.571274 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T17:44:23.628425277+00:00 stderr F I1208 17:44:23.628384 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.786955961+00:00 stderr F I1208 17:44:23.785525 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T17:44:23.786955961+00:00 stderr F I1208 17:44:23.785561 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:44:23.837826199+00:00 stderr F I1208 17:44:23.837785 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:23.989407803+00:00 stderr F I1208 17:44:23.986158 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:44:23.989407803+00:00 stderr F I1208 17:44:23.986227 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:44:24.174963704+00:00 stderr F I1208 17:44:24.174925 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:44:24.175047927+00:00 stderr F I1208 17:44:24.175038 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:44:24.368191285+00:00 stderr F I1208 17:44:24.368149 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:44:24.368304178+00:00 stderr F I1208 17:44:24.368291 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:44:24.578388019+00:00 stderr F I1208 17:44:24.578347 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:44:24.578480101+00:00 stderr F I1208 17:44:24.578469 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:44:24.766000176+00:00 stderr F I1208 17:44:24.765287 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:44:24.766000176+00:00 stderr F I1208 17:44:24.765326 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T17:44:24.981720990+00:00 stderr F I1208 17:44:24.981676 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T17:44:24.981720990+00:00 stderr F I1208 17:44:24.981714 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T17:44:24.982579443+00:00 stderr F I1208 17:44:24.982532 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/network-metrics-daemon updated, re-generating status 2025-12-08T17:44:24.982579443+00:00 stderr F I1208 17:44:24.982562 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/network-metrics-daemon updated, re-generating status 2025-12-08T17:44:25.051919025+00:00 stderr F I1208 17:44:25.051650 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:25.051919025+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:25.051919025+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:25.051919025+00:00 stderr F reason: Unknown 2025-12-08T17:44:25.051919025+00:00 stderr F status: "False" 2025-12-08T17:44:25.051919025+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:25.051919025+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:25.051919025+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:25.051919025+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:25.051919025+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:25.051919025+00:00 stderr F status: "True" 2025-12-08T17:44:25.051919025+00:00 stderr F type: Degraded 2025-12-08T17:44:25.051919025+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:25.051919025+00:00 stderr F status: "True" 2025-12-08T17:44:25.051919025+00:00 stderr F type: Upgradeable 2025-12-08T17:44:25.051919025+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:25.051919025+00:00 stderr F message: |- 2025-12-08T17:44:25.051919025+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:25.051919025+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:25.051919025+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:25.051919025+00:00 stderr F reason: Deploying 2025-12-08T17:44:25.051919025+00:00 stderr F status: "True" 2025-12-08T17:44:25.051919025+00:00 stderr F type: Progressing 2025-12-08T17:44:25.051919025+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:25.051919025+00:00 stderr F status: "True" 2025-12-08T17:44:25.051919025+00:00 stderr F type: Available 2025-12-08T17:44:25.053688254+00:00 stderr F I1208 17:44:25.052192 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:25.101728164+00:00 stderr F I1208 17:44:25.099623 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:25.101728164+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:25.101728164+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:25.101728164+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:25.101728164+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:25.101728164+00:00 stderr F status: "True" 2025-12-08T17:44:25.101728164+00:00 stderr F type: Degraded 2025-12-08T17:44:25.101728164+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:25.101728164+00:00 stderr F status: "True" 2025-12-08T17:44:25.101728164+00:00 stderr F type: Upgradeable 2025-12-08T17:44:25.101728164+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:25.101728164+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:25.101728164+00:00 stderr F reason: Unknown 2025-12-08T17:44:25.101728164+00:00 stderr F status: "False" 2025-12-08T17:44:25.101728164+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:25.101728164+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:25.101728164+00:00 stderr F message: |- 2025-12-08T17:44:25.101728164+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:25.101728164+00:00 stderr F Deployment "/openshift-multus/multus-admission-controller" is waiting for other operators to become ready 2025-12-08T17:44:25.101728164+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:25.101728164+00:00 stderr F reason: Deploying 2025-12-08T17:44:25.101728164+00:00 stderr F status: "True" 2025-12-08T17:44:25.101728164+00:00 stderr F type: Progressing 2025-12-08T17:44:25.101728164+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:25.101728164+00:00 stderr F status: "True" 2025-12-08T17:44:25.101728164+00:00 stderr F type: Available 2025-12-08T17:44:25.165383411+00:00 stderr F I1208 17:44:25.165332 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T17:44:25.165383411+00:00 stderr F I1208 17:44:25.165374 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T17:44:25.373267721+00:00 stderr F I1208 17:44:25.371320 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T17:44:25.373267721+00:00 stderr F I1208 17:44:25.371357 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T17:44:25.440697430+00:00 stderr F I1208 17:44:25.438825 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:44:25.564283561+00:00 stderr F I1208 17:44:25.563105 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-admission-controller updated, re-generating status 2025-12-08T17:44:25.564283561+00:00 stderr F I1208 17:44:25.563129 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-admission-controller updated, re-generating status 2025-12-08T17:44:25.571588171+00:00 stderr F I1208 17:44:25.571536 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T17:44:25.571618391+00:00 stderr F I1208 17:44:25.571588 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T17:44:25.625185702+00:00 stderr F I1208 17:44:25.624084 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.632059209+00:00 stderr F I1208 17:44:25.630343 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.643209804+00:00 stderr F I1208 17:44:25.641583 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.654916023+00:00 stderr F I1208 17:44:25.652401 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.665540213+00:00 stderr F I1208 17:44:25.665452 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.668951096+00:00 stderr F I1208 17:44:25.668759 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:25.668951096+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:25.668951096+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:25.668951096+00:00 stderr F reason: Unknown 2025-12-08T17:44:25.668951096+00:00 stderr F status: "False" 2025-12-08T17:44:25.668951096+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:25.668951096+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:25.668951096+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:25.668951096+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:25.668951096+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:25.668951096+00:00 stderr F status: "True" 2025-12-08T17:44:25.668951096+00:00 stderr F type: Degraded 2025-12-08T17:44:25.668951096+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:25.668951096+00:00 stderr F status: "True" 2025-12-08T17:44:25.668951096+00:00 stderr F type: Upgradeable 2025-12-08T17:44:25.668951096+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:25.668951096+00:00 stderr F message: |- 2025-12-08T17:44:25.668951096+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:25.668951096+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:25.668951096+00:00 stderr F reason: Deploying 2025-12-08T17:44:25.668951096+00:00 stderr F status: "True" 2025-12-08T17:44:25.668951096+00:00 stderr F type: Progressing 2025-12-08T17:44:25.668951096+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:25.668951096+00:00 stderr F status: "True" 2025-12-08T17:44:25.668951096+00:00 stderr F type: Available 2025-12-08T17:44:25.669980654+00:00 stderr F I1208 17:44:25.669272 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:25.678924378+00:00 stderr F I1208 17:44:25.678185 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.686458334+00:00 stderr F I1208 17:44:25.685997 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.704452384+00:00 stderr F I1208 17:44:25.699396 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.707943019+00:00 stderr F I1208 17:44:25.707388 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:25.707943019+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:25.707943019+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:25.707943019+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:25.707943019+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:25.707943019+00:00 stderr F status: "True" 2025-12-08T17:44:25.707943019+00:00 stderr F type: Degraded 2025-12-08T17:44:25.707943019+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:25.707943019+00:00 stderr F status: "True" 2025-12-08T17:44:25.707943019+00:00 stderr F type: Upgradeable 2025-12-08T17:44:25.707943019+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:25.707943019+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:25.707943019+00:00 stderr F reason: Unknown 2025-12-08T17:44:25.707943019+00:00 stderr F status: "False" 2025-12-08T17:44:25.707943019+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:25.707943019+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:25.707943019+00:00 stderr F message: |- 2025-12-08T17:44:25.707943019+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:44:25.707943019+00:00 stderr F Deployment "/openshift-network-console/networking-console-plugin" is waiting for other operators to become ready 2025-12-08T17:44:25.707943019+00:00 stderr F reason: Deploying 2025-12-08T17:44:25.707943019+00:00 stderr F status: "True" 2025-12-08T17:44:25.707943019+00:00 stderr F type: Progressing 2025-12-08T17:44:25.707943019+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:25.707943019+00:00 stderr F status: "True" 2025-12-08T17:44:25.707943019+00:00 stderr F type: Available 2025-12-08T17:44:25.712645828+00:00 stderr F I1208 17:44:25.712457 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:25.770963299+00:00 stderr F I1208 17:44:25.770524 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T17:44:25.770963299+00:00 stderr F I1208 17:44:25.770575 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T17:44:25.968005463+00:00 stderr F I1208 17:44:25.967276 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T17:44:25.968005463+00:00 stderr F I1208 17:44:25.967317 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T17:44:26.167963708+00:00 stderr F I1208 17:44:26.167299 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T17:44:26.167963708+00:00 stderr F I1208 17:44:26.167338 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T17:44:26.367003207+00:00 stderr F I1208 17:44:26.366933 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T17:44:26.367003207+00:00 stderr F I1208 17:44:26.366975 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T17:44:26.569118410+00:00 stderr F I1208 17:44:26.569039 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T17:44:26.569118410+00:00 stderr F I1208 17:44:26.569083 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T17:44:26.762080024+00:00 stderr F I1208 17:44:26.761971 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T17:44:26.762080024+00:00 stderr F I1208 17:44:26.762011 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T17:44:26.965644115+00:00 stderr F I1208 17:44:26.964922 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T17:44:26.965776919+00:00 stderr F I1208 17:44:26.965762 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:44:27.173127855+00:00 stderr F I1208 17:44:27.172408 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:44:27.173127855+00:00 stderr F I1208 17:44:27.172461 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T17:44:27.371942898+00:00 stderr F I1208 17:44:27.370229 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T17:44:27.371942898+00:00 stderr F I1208 17:44:27.370268 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:44:27.530579895+00:00 stderr F I1208 17:44:27.529326 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.553507551+00:00 stderr F I1208 17:44:27.550362 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.561477878+00:00 stderr F I1208 17:44:27.560909 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.573689201+00:00 stderr F I1208 17:44:27.573284 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.575801419+00:00 stderr F I1208 17:44:27.575364 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:44:27.575801419+00:00 stderr F I1208 17:44:27.575403 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:44:27.585155674+00:00 stderr F I1208 17:44:27.584396 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.597085880+00:00 stderr F I1208 17:44:27.595139 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.606460065+00:00 stderr F I1208 17:44:27.605574 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.625073863+00:00 stderr F I1208 17:44:27.620550 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.628012203+00:00 stderr F I1208 17:44:27.627971 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:27.768519386+00:00 stderr F I1208 17:44:27.767648 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:44:27.768519386+00:00 stderr F I1208 17:44:27.767696 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:44:27.976755245+00:00 stderr F I1208 17:44:27.975298 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:44:27.976755245+00:00 stderr F I1208 17:44:27.975363 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T17:44:28.165265777+00:00 stderr F I1208 17:44:28.165223 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T17:44:28.165265777+00:00 stderr F I1208 17:44:28.165261 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T17:44:28.364691847+00:00 stderr F I1208 17:44:28.364649 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T17:44:28.364722778+00:00 stderr F I1208 17:44:28.364691 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T17:44:28.572035973+00:00 stderr F I1208 17:44:28.571993 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T17:44:28.572035973+00:00 stderr F I1208 17:44:28.572031 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T17:44:28.789641498+00:00 stderr F I1208 17:44:28.789582 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T17:44:28.789641498+00:00 stderr F I1208 17:44:28.789624 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T17:44:28.967201792+00:00 stderr F I1208 17:44:28.967136 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T17:44:28.967201792+00:00 stderr F I1208 17:44:28.967178 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T17:44:29.163081895+00:00 stderr F I1208 17:44:29.162778 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T17:44:29.163081895+00:00 stderr F I1208 17:44:29.162819 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T17:44:29.367112081+00:00 stderr F I1208 17:44:29.365615 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T17:44:29.367112081+00:00 stderr F I1208 17:44:29.365669 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:44:29.562385166+00:00 stderr F I1208 17:44:29.562163 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:44:29.562385166+00:00 stderr F I1208 17:44:29.562210 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:44:29.763909143+00:00 stderr F I1208 17:44:29.763834 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:44:29.763909143+00:00 stderr F I1208 17:44:29.763898 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:44:29.962622114+00:00 stderr F I1208 17:44:29.962449 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:44:29.962622114+00:00 stderr F I1208 17:44:29.962488 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T17:44:30.163202695+00:00 stderr F I1208 17:44:30.162713 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T17:44:30.163266097+00:00 stderr F I1208 17:44:30.163199 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T17:44:30.364242158+00:00 stderr F I1208 17:44:30.364198 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T17:44:30.364282129+00:00 stderr F I1208 17:44:30.364239 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T17:44:30.568922982+00:00 stderr F I1208 17:44:30.565685 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T17:44:30.568922982+00:00 stderr F I1208 17:44:30.565731 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606531 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.606462506 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606567 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.606553798 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606589 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.606574299 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606605 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.606594259 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606621 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.6066103 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606637 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.60662605 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606653 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.606641881 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606669 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.606658631 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606691 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.606675122 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606711 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.606699872 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.606962 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"*.metrics.openshift-network-operator.svc\" [serving] validServingFor=[*.metrics.openshift-network-operator.svc,*.metrics.openshift-network-operator.svc.cluster.local,metrics.openshift-network-operator.svc,metrics.openshift-network-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:19 +0000 UTC to 2027-11-02 07:52:20 +0000 UTC (now=2025-12-08 17:44:30.606941109 +0000 UTC))" 2025-12-08T17:44:30.608416810+00:00 stderr F I1208 17:44:30.607126 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215843\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215842\" (2025-12-08 16:44:02 +0000 UTC to 2028-12-08 16:44:02 +0000 UTC (now=2025-12-08 17:44:30.607111513 +0000 UTC))" 2025-12-08T17:44:30.765479104+00:00 stderr F I1208 17:44:30.765420 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:44:30.765479104+00:00 stderr F I1208 17:44:30.765464 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T17:44:30.963599467+00:00 stderr F I1208 17:44:30.963546 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:44:30.963599467+00:00 stderr F I1208 17:44:30.963593 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T17:44:31.164892648+00:00 stderr F I1208 17:44:31.164767 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:44:31.164892648+00:00 stderr F I1208 17:44:31.164808 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:44:31.361966244+00:00 stderr F I1208 17:44:31.361916 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:44:31.361966244+00:00 stderr F I1208 17:44:31.361955 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:44:31.565273249+00:00 stderr F I1208 17:44:31.565193 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:44:31.565273249+00:00 stderr F I1208 17:44:31.565255 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T17:44:31.765595494+00:00 stderr F I1208 17:44:31.765543 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:44:31.765595494+00:00 stderr F I1208 17:44:31.765581 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T17:44:31.963069080+00:00 stderr F I1208 17:44:31.963025 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:44:31.963098350+00:00 stderr F I1208 17:44:31.963071 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T17:44:32.163871197+00:00 stderr F I1208 17:44:32.163814 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T17:44:32.163871197+00:00 stderr F I1208 17:44:32.163865 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T17:44:32.362505466+00:00 stderr F I1208 17:44:32.362449 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T17:44:32.362505466+00:00 stderr F I1208 17:44:32.362495 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T17:44:32.570346615+00:00 stderr F I1208 17:44:32.569850 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T17:44:32.570346615+00:00 stderr F I1208 17:44:32.570331 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T17:44:32.766305240+00:00 stderr F I1208 17:44:32.766256 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:44:32.766305240+00:00 stderr F I1208 17:44:32.766294 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T17:44:32.963277092+00:00 stderr F I1208 17:44:32.963236 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T17:44:32.963309723+00:00 stderr F I1208 17:44:32.963277 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T17:44:33.162616280+00:00 stderr F I1208 17:44:33.162526 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T17:44:33.162616280+00:00 stderr F I1208 17:44:33.162566 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:44:33.363221572+00:00 stderr F I1208 17:44:33.363176 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:44:33.363221572+00:00 stderr F I1208 17:44:33.363212 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:44:33.543925600+00:00 stderr F I1208 17:44:33.542178 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.552246037+00:00 stderr F I1208 17:44:33.552123 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.563659410+00:00 stderr F I1208 17:44:33.563606 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.564357018+00:00 stderr F I1208 17:44:33.564332 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:44:33.564387849+00:00 stderr F I1208 17:44:33.564362 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T17:44:33.572175872+00:00 stderr F I1208 17:44:33.572036 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.579962544+00:00 stderr F I1208 17:44:33.579919 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.591047366+00:00 stderr F I1208 17:44:33.590984 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.598376426+00:00 stderr F I1208 17:44:33.596214 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.601968914+00:00 stderr F I1208 17:44:33.600289 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.608095551+00:00 stderr F I1208 17:44:33.607944 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.614220479+00:00 stderr F I1208 17:44:33.614174 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.738931280+00:00 stderr F I1208 17:44:33.738890 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.762110072+00:00 stderr F I1208 17:44:33.762061 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T17:44:33.762110072+00:00 stderr F I1208 17:44:33.762103 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T17:44:33.944929820+00:00 stderr F I1208 17:44:33.941360 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:33.970640691+00:00 stderr F I1208 17:44:33.968682 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T17:44:33.970640691+00:00 stderr F I1208 17:44:33.968728 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T17:44:34.137398379+00:00 stderr F I1208 17:44:34.137336 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:34.162760560+00:00 stderr F I1208 17:44:34.162715 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:44:34.162780581+00:00 stderr F I1208 17:44:34.162761 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T17:44:34.337297322+00:00 stderr F I1208 17:44:34.337242 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:34.363988560+00:00 stderr F I1208 17:44:34.363928 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T17:44:34.363988560+00:00 stderr F I1208 17:44:34.363978 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T17:44:34.537349148+00:00 stderr F I1208 17:44:34.537287 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:34.566996448+00:00 stderr F I1208 17:44:34.566938 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:44:34.566996448+00:00 stderr F I1208 17:44:34.566979 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T17:44:34.738686640+00:00 stderr F I1208 17:44:34.738628 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:34.774791845+00:00 stderr F I1208 17:44:34.774728 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T17:44:34.774791845+00:00 stderr F I1208 17:44:34.774770 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T17:44:34.938323006+00:00 stderr F I1208 17:44:34.938262 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:34.962455914+00:00 stderr F I1208 17:44:34.962415 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T17:44:34.962547997+00:00 stderr F I1208 17:44:34.962535 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T17:44:35.142142546+00:00 stderr F I1208 17:44:35.142084 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:35.162535461+00:00 stderr F I1208 17:44:35.162487 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:44:35.162647705+00:00 stderr F I1208 17:44:35.162628 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T17:44:35.340397454+00:00 stderr F I1208 17:44:35.340347 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:35.361744105+00:00 stderr F I1208 17:44:35.361700 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T17:44:35.361744105+00:00 stderr F I1208 17:44:35.361738 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T17:44:35.542641950+00:00 stderr F I1208 17:44:35.542577 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:35.563051476+00:00 stderr F I1208 17:44:35.562968 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T17:44:35.563051476+00:00 stderr F I1208 17:44:35.563007 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T17:44:35.740207089+00:00 stderr F I1208 17:44:35.740153 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:35.767508383+00:00 stderr F I1208 17:44:35.767458 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:44:35.767508383+00:00 stderr F I1208 17:44:35.767499 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T17:44:35.942691462+00:00 stderr F I1208 17:44:35.942193 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:35.967607962+00:00 stderr F I1208 17:44:35.967537 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T17:44:35.967642123+00:00 stderr F I1208 17:44:35.967610 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T17:44:36.138033381+00:00 stderr F I1208 17:44:36.137918 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:36.162312683+00:00 stderr F I1208 17:44:36.162257 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:44:36.162312683+00:00 stderr F I1208 17:44:36.162305 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T17:44:36.337707447+00:00 stderr F I1208 17:44:36.337644 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:36.367673204+00:00 stderr F I1208 17:44:36.367621 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:44:36.367673204+00:00 stderr F I1208 17:44:36.367662 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T17:44:36.537241430+00:00 stderr F I1208 17:44:36.537186 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:36.566959801+00:00 stderr F I1208 17:44:36.566847 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:44:36.566959801+00:00 stderr F I1208 17:44:36.566913 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T17:44:36.738365366+00:00 stderr F I1208 17:44:36.738312 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:36.764484858+00:00 stderr F I1208 17:44:36.764403 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T17:44:36.774049040+00:00 stderr F I1208 17:44:36.774010 1 log.go:245] Operconfig Controller complete 2025-12-08T17:44:36.936922682+00:00 stderr F I1208 17:44:36.936837 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:37.139296532+00:00 stderr F I1208 17:44:37.139236 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:37.337796773+00:00 stderr F I1208 17:44:37.337734 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:37.538326172+00:00 stderr F I1208 17:44:37.538279 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:40.567698895+00:00 stderr F I1208 17:44:40.567630 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:40.567698895+00:00 stderr F I1208 17:44:40.567666 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:40.587183538+00:00 stderr F I1208 17:44:40.587120 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:40.587183538+00:00 stderr F I1208 17:44:40.587151 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:44:40.626128861+00:00 stderr F I1208 17:44:40.626047 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:40.626128861+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:40.626128861+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:40.626128861+00:00 stderr F reason: Unknown 2025-12-08T17:44:40.626128861+00:00 stderr F status: "False" 2025-12-08T17:44:40.626128861+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:40.626128861+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:40.626128861+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:40.626128861+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:40.626128861+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:40.626128861+00:00 stderr F status: "True" 2025-12-08T17:44:40.626128861+00:00 stderr F type: Degraded 2025-12-08T17:44:40.626128861+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:40.626128861+00:00 stderr F status: "True" 2025-12-08T17:44:40.626128861+00:00 stderr F type: Upgradeable 2025-12-08T17:44:40.626128861+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:40.626128861+00:00 stderr F message: Deployment "/openshift-network-console/networking-console-plugin" is waiting 2025-12-08T17:44:40.626128861+00:00 stderr F for other operators to become ready 2025-12-08T17:44:40.626128861+00:00 stderr F reason: Deploying 2025-12-08T17:44:40.626128861+00:00 stderr F status: "True" 2025-12-08T17:44:40.626128861+00:00 stderr F type: Progressing 2025-12-08T17:44:40.626128861+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:40.626128861+00:00 stderr F status: "True" 2025-12-08T17:44:40.626128861+00:00 stderr F type: Available 2025-12-08T17:44:40.626638395+00:00 stderr F I1208 17:44:40.626551 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:40.654190092+00:00 stderr F I1208 17:44:40.653918 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:40.654190092+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:03Z" 2025-12-08T17:44:40.654190092+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" rollout is not making 2025-12-08T17:44:40.654190092+00:00 stderr F progress - last change 2025-11-03T09:40:45Z 2025-12-08T17:44:40.654190092+00:00 stderr F reason: RolloutHung 2025-12-08T17:44:40.654190092+00:00 stderr F status: "True" 2025-12-08T17:44:40.654190092+00:00 stderr F type: Degraded 2025-12-08T17:44:40.654190092+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:40.654190092+00:00 stderr F status: "True" 2025-12-08T17:44:40.654190092+00:00 stderr F type: Upgradeable 2025-12-08T17:44:40.654190092+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:40.654190092+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:40.654190092+00:00 stderr F reason: Unknown 2025-12-08T17:44:40.654190092+00:00 stderr F status: "False" 2025-12-08T17:44:40.654190092+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:40.654190092+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:40.654190092+00:00 stderr F message: Deployment "/openshift-network-console/networking-console-plugin" is waiting 2025-12-08T17:44:40.654190092+00:00 stderr F for other operators to become ready 2025-12-08T17:44:40.654190092+00:00 stderr F reason: Deploying 2025-12-08T17:44:40.654190092+00:00 stderr F status: "True" 2025-12-08T17:44:40.654190092+00:00 stderr F type: Progressing 2025-12-08T17:44:40.654190092+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:40.654190092+00:00 stderr F status: "True" 2025-12-08T17:44:40.654190092+00:00 stderr F type: Available 2025-12-08T17:44:40.704918604+00:00 stderr F I1208 17:44:40.698836 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:40.704918604+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:40.704918604+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:40.704918604+00:00 stderr F reason: Unknown 2025-12-08T17:44:40.704918604+00:00 stderr F status: "False" 2025-12-08T17:44:40.704918604+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:40.704918604+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:44:40.704918604+00:00 stderr F status: "False" 2025-12-08T17:44:40.704918604+00:00 stderr F type: Degraded 2025-12-08T17:44:40.704918604+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:40.704918604+00:00 stderr F status: "True" 2025-12-08T17:44:40.704918604+00:00 stderr F type: Upgradeable 2025-12-08T17:44:40.704918604+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:40.704918604+00:00 stderr F message: Deployment "/openshift-network-console/networking-console-plugin" is waiting 2025-12-08T17:44:40.704918604+00:00 stderr F for other operators to become ready 2025-12-08T17:44:40.704918604+00:00 stderr F reason: Deploying 2025-12-08T17:44:40.704918604+00:00 stderr F status: "True" 2025-12-08T17:44:40.704918604+00:00 stderr F type: Progressing 2025-12-08T17:44:40.704918604+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:40.704918604+00:00 stderr F status: "True" 2025-12-08T17:44:40.704918604+00:00 stderr F type: Available 2025-12-08T17:44:40.704918604+00:00 stderr F I1208 17:44:40.703897 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:40.730956958+00:00 stderr F I1208 17:44:40.730868 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:40.730956958+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:44:40.730956958+00:00 stderr F status: "False" 2025-12-08T17:44:40.730956958+00:00 stderr F type: Degraded 2025-12-08T17:44:40.730956958+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:40.730956958+00:00 stderr F status: "True" 2025-12-08T17:44:40.730956958+00:00 stderr F type: Upgradeable 2025-12-08T17:44:40.730956958+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:40.730956958+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:40.730956958+00:00 stderr F reason: Unknown 2025-12-08T17:44:40.730956958+00:00 stderr F status: "False" 2025-12-08T17:44:40.730956958+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:40.730956958+00:00 stderr F - lastTransitionTime: "2025-11-03T09:40:45Z" 2025-12-08T17:44:40.730956958+00:00 stderr F message: Deployment "/openshift-network-console/networking-console-plugin" is waiting 2025-12-08T17:44:40.730956958+00:00 stderr F for other operators to become ready 2025-12-08T17:44:40.730956958+00:00 stderr F reason: Deploying 2025-12-08T17:44:40.730956958+00:00 stderr F status: "True" 2025-12-08T17:44:40.730956958+00:00 stderr F type: Progressing 2025-12-08T17:44:40.730956958+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:40.730956958+00:00 stderr F status: "True" 2025-12-08T17:44:40.730956958+00:00 stderr F type: Available 2025-12-08T17:44:41.495203253+00:00 stderr F I1208 17:44:41.495158 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.500282205+00:00 stderr F I1208 17:44:41.500247 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.503817463+00:00 stderr F I1208 17:44:41.503794 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.509970834+00:00 stderr F I1208 17:44:41.509308 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.513934575+00:00 stderr F I1208 17:44:41.513078 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.519863669+00:00 stderr F I1208 17:44:41.519827 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.524617821+00:00 stderr F I1208 17:44:41.524583 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.530185216+00:00 stderr F I1208 17:44:41.530148 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.537671145+00:00 stderr F I1208 17:44:41.537631 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.542217011+00:00 stderr F I1208 17:44:41.542190 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.696375031+00:00 stderr F I1208 17:44:41.696285 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:41.897545818+00:00 stderr F I1208 17:44:41.897096 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:42.095534667+00:00 stderr F I1208 17:44:42.095469 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:42.295262604+00:00 stderr F I1208 17:44:42.295218 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:42.495476226+00:00 stderr F I1208 17:44:42.495415 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:42.696234592+00:00 stderr F I1208 17:44:42.696177 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:42.896601777+00:00 stderr F I1208 17:44:42.896509 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:43.097992300+00:00 stderr F I1208 17:44:43.096694 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:43.294764085+00:00 stderr F I1208 17:44:43.294706 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:43.497216428+00:00 stderr F I1208 17:44:43.497148 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:43.697111471+00:00 stderr F I1208 17:44:43.697029 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:43.896116518+00:00 stderr F I1208 17:44:43.896064 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:44.094580431+00:00 stderr F I1208 17:44:44.094515 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:44.267346337+00:00 stderr F I1208 17:44:44.265544 1 pod_watcher.go:132] Operand /, Kind= openshift-network-console/networking-console-plugin updated, re-generating status 2025-12-08T17:44:44.267346337+00:00 stderr F I1208 17:44:44.265565 1 pod_watcher.go:132] Operand /, Kind= openshift-network-console/networking-console-plugin updated, re-generating status 2025-12-08T17:44:44.294387540+00:00 stderr F I1208 17:44:44.293687 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:44.318963244+00:00 stderr F I1208 17:44:44.318644 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:44:44.318963244+00:00 stderr F I1208 17:44:44.318805 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:44:44.318963244+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:44.318963244+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:44.318963244+00:00 stderr F reason: Unknown 2025-12-08T17:44:44.318963244+00:00 stderr F status: "False" 2025-12-08T17:44:44.318963244+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:44.318963244+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:44:44.318963244+00:00 stderr F status: "False" 2025-12-08T17:44:44.318963244+00:00 stderr F type: Degraded 2025-12-08T17:44:44.318963244+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:44.318963244+00:00 stderr F status: "True" 2025-12-08T17:44:44.318963244+00:00 stderr F type: Upgradeable 2025-12-08T17:44:44.318963244+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:44Z" 2025-12-08T17:44:44.318963244+00:00 stderr F status: "False" 2025-12-08T17:44:44.318963244+00:00 stderr F type: Progressing 2025-12-08T17:44:44.318963244+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:44.318963244+00:00 stderr F status: "True" 2025-12-08T17:44:44.318963244+00:00 stderr F type: Available 2025-12-08T17:44:44.353498085+00:00 stderr F I1208 17:44:44.353452 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:44:44.353498085+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:44:44.353498085+00:00 stderr F status: "False" 2025-12-08T17:44:44.353498085+00:00 stderr F type: Degraded 2025-12-08T17:44:44.353498085+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:44:44.353498085+00:00 stderr F status: "True" 2025-12-08T17:44:44.353498085+00:00 stderr F type: Upgradeable 2025-12-08T17:44:44.353498085+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:44:44.353498085+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:44:44.353498085+00:00 stderr F reason: Unknown 2025-12-08T17:44:44.353498085+00:00 stderr F status: "False" 2025-12-08T17:44:44.353498085+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:44:44.353498085+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:44Z" 2025-12-08T17:44:44.353498085+00:00 stderr F status: "False" 2025-12-08T17:44:44.353498085+00:00 stderr F type: Progressing 2025-12-08T17:44:44.353498085+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:44:44.353498085+00:00 stderr F status: "True" 2025-12-08T17:44:44.353498085+00:00 stderr F type: Available 2025-12-08T17:44:44.496594036+00:00 stderr F I1208 17:44:44.496560 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:44.700487110+00:00 stderr F I1208 17:44:44.699863 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:44.895545648+00:00 stderr F I1208 17:44:44.895501 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:45.099343398+00:00 stderr F I1208 17:44:45.099253 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:45.298733857+00:00 stderr F I1208 17:44:45.298310 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:45.501420046+00:00 stderr F I1208 17:44:45.499845 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:45.699714254+00:00 stderr F I1208 17:44:45.699585 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:45.896098558+00:00 stderr F I1208 17:44:45.896054 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:46.096162574+00:00 stderr F I1208 17:44:46.095821 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:46.298885406+00:00 stderr F I1208 17:44:46.298824 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:46.495467965+00:00 stderr F I1208 17:44:46.495411 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:46.699424021+00:00 stderr F I1208 17:44:46.698509 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:46.897841241+00:00 stderr F I1208 17:44:46.897762 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:47.097060215+00:00 stderr F I1208 17:44:47.096862 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:47.300250218+00:00 stderr F I1208 17:44:47.300104 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:47.497153417+00:00 stderr F I1208 17:44:47.497078 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.681054644+00:00 stderr F I1208 17:44:59.680987 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.686618199+00:00 stderr F I1208 17:44:59.686575 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.691089923+00:00 stderr F I1208 17:44:59.691039 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.695664910+00:00 stderr F I1208 17:44:59.695625 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.699422195+00:00 stderr F I1208 17:44:59.699053 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.705741860+00:00 stderr F I1208 17:44:59.705698 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.709800144+00:00 stderr F I1208 17:44:59.709401 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.715720178+00:00 stderr F I1208 17:44:59.715673 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.722798145+00:00 stderr F I1208 17:44:59.722674 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:44:59.728166504+00:00 stderr F I1208 17:44:59.728121 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:08.525997664+00:00 stderr F I1208 17:45:08.525748 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:45:11.932193801+00:00 stderr F I1208 17:45:11.930601 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/client-ca-custom' 2025-12-08T17:45:11.940643176+00:00 stderr F I1208 17:45:11.940452 1 log.go:245] configmap 'openshift-config/client-ca-custom' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:45:14.212027468+00:00 stderr F I1208 17:45:14.210743 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/admin-kubeconfig-client-ca' 2025-12-08T17:45:14.219808714+00:00 stderr F I1208 17:45:14.218478 1 log.go:245] configmap 'openshift-config/admin-kubeconfig-client-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:45:15.877604942+00:00 stderr F I1208 17:45:15.877551 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.885700237+00:00 stderr F I1208 17:45:15.885669 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.890990935+00:00 stderr F I1208 17:45:15.889215 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.893243168+00:00 stderr F I1208 17:45:15.893211 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.896655072+00:00 stderr F I1208 17:45:15.896614 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.902497425+00:00 stderr F I1208 17:45:15.902472 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.906009902+00:00 stderr F I1208 17:45:15.905978 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.910278072+00:00 stderr F I1208 17:45:15.910243 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.914771086+00:00 stderr F I1208 17:45:15.914742 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:45:15.918650015+00:00 stderr F I1208 17:45:15.918627 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:16.041416431+00:00 stderr F I1208 17:45:16.039420 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.039361553 +0000 UTC))" 2025-12-08T17:45:16.041416431+00:00 stderr F I1208 17:45:16.039472 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.039449156 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049118 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.049057493 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049173 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.049150585 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049201 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.049182736 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049234 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.049212987 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049263 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.049245658 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049292 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.049272278 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049321 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.049304279 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049353 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.04933592 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049379 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.049362651 +0000 UTC))" 2025-12-08T17:45:16.049822795+00:00 stderr F I1208 17:45:16.049790 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"*.metrics.openshift-network-operator.svc\" [serving] validServingFor=[*.metrics.openshift-network-operator.svc,*.metrics.openshift-network-operator.svc.cluster.local,metrics.openshift-network-operator.svc,metrics.openshift-network-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:19 +0000 UTC to 2027-11-02 07:52:20 +0000 UTC (now=2025-12-08 17:45:16.049760473 +0000 UTC))" 2025-12-08T17:45:16.050435571+00:00 stderr F I1208 17:45:16.050149 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215843\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215842\" (2025-12-08 16:44:02 +0000 UTC to 2028-12-08 16:44:02 +0000 UTC (now=2025-12-08 17:45:16.050112973 +0000 UTC))" 2025-12-08T17:45:16.081763193+00:00 stderr F I1208 17:45:16.081705 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:45:16.277414087+00:00 stderr F I1208 17:45:16.277373 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:16.476743403+00:00 stderr F I1208 17:45:16.476685 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:16.677111219+00:00 stderr F I1208 17:45:16.677058 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:16.877553436+00:00 stderr F I1208 17:45:16.877495 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:17.078852557+00:00 stderr F I1208 17:45:17.078792 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:17.275679283+00:00 stderr F I1208 17:45:17.275631 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:45:17.477306004+00:00 stderr F I1208 17:45:17.477251 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:45:17.678673697+00:00 stderr F I1208 17:45:17.678585 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:45:17.880455121+00:00 stderr F I1208 17:45:17.880367 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:45:18.078044470+00:00 stderr F I1208 17:45:18.077964 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:03.104106568+00:00 stderr F E1208 17:46:03.103934 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-network-operator/leases/network-operator-lock?timeout=4m0s": dial tcp 38.102.83.243:6443: connect: connection refused, falling back to slow path 2025-12-08T17:46:03.104645635+00:00 stderr F E1208 17:46:03.104598 1 leaderelection.go:436] error retrieving resource lock openshift-network-operator/network-operator-lock: Get "https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-network-operator/leases/network-operator-lock?timeout=4m0s": dial tcp 38.102.83.243:6443: connect: connection refused 2025-12-08T17:46:03.346855724+00:00 stderr F E1208 17:46:03.346731 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:03.355446732+00:00 stderr F E1208 17:46:03.355383 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:03.368314499+00:00 stderr F E1208 17:46:03.367892 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:03.393802764+00:00 stderr F E1208 17:46:03.393719 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:03.436149775+00:00 stderr F E1208 17:46:03.436076 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:03.519027673+00:00 stderr F E1208 17:46:03.518459 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:03.681774888+00:00 stderr F E1208 17:46:03.681661 1 base_controller.go:279] "Unhandled Error" err="cluster-network-operator-ManagementState reconciliation failed: unable to get network operator configuration: Get \"https://api-int.crc.testing:6443/apis/operator.openshift.io/v1/networks/cluster\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:09.648804582+00:00 stderr F I1208 17:46:09.648670 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:09.990098295+00:00 stderr F I1208 17:46:09.990013 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:14.130626228+00:00 stderr F I1208 17:46:14.129926 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:19.733745647+00:00 stderr F I1208 17:46:19.733682 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:20.926196128+00:00 stderr F I1208 17:46:20.926037 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:20.939297182+00:00 stderr F I1208 17:46:20.939217 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.946664163+00:00 stderr F I1208 17:46:20.946607 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.953663033+00:00 stderr F I1208 17:46:20.953605 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.959005703+00:00 stderr F I1208 17:46:20.958955 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.964288292+00:00 stderr F I1208 17:46:20.964235 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.972337473+00:00 stderr F I1208 17:46:20.972278 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.977624842+00:00 stderr F I1208 17:46:20.977549 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.983716185+00:00 stderr F I1208 17:46:20.983655 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.990363174+00:00 stderr F I1208 17:46:20.990305 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:20.997384775+00:00 stderr F I1208 17:46:20.997311 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:21.136681746+00:00 stderr F I1208 17:46:21.136621 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:22.500210683+00:00 stderr F I1208 17:46:22.500118 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:22.500344697+00:00 stderr F I1208 17:46:22.500286 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:46:22.500344697+00:00 stderr F I1208 17:46:22.500327 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:46:22.500344697+00:00 stderr F I1208 17:46:22.500340 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:46:22.500363948+00:00 stderr F I1208 17:46:22.500346 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-additional-cni-plugins updated, re-generating status 2025-12-08T17:46:22.500363948+00:00 stderr F I1208 17:46:22.500359 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/network-metrics-daemon updated, re-generating status 2025-12-08T17:46:22.500377508+00:00 stderr F I1208 17:46:22.500365 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/network-metrics-daemon updated, re-generating status 2025-12-08T17:46:22.500377508+00:00 stderr F I1208 17:46:22.500372 1 pod_watcher.go:132] Operand /, Kind= openshift-network-node-identity/network-node-identity updated, re-generating status 2025-12-08T17:46:22.500390279+00:00 stderr F I1208 17:46:22.500378 1 pod_watcher.go:132] Operand /, Kind= openshift-network-node-identity/network-node-identity updated, re-generating status 2025-12-08T17:46:22.500402499+00:00 stderr F I1208 17:46:22.500388 1 pod_watcher.go:132] Operand /, Kind= openshift-network-operator/iptables-alerter updated, re-generating status 2025-12-08T17:46:22.500402499+00:00 stderr F I1208 17:46:22.500395 1 pod_watcher.go:132] Operand /, Kind= openshift-network-operator/iptables-alerter updated, re-generating status 2025-12-08T17:46:22.500415340+00:00 stderr F I1208 17:46:22.500403 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:46:22.500415340+00:00 stderr F I1208 17:46:22.500409 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:46:24.192753336+00:00 stderr F I1208 17:46:24.192666 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:24.687651801+00:00 stderr F I1208 17:46:24.687603 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:25.490196300+00:00 stderr F I1208 17:46:25.490123 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:25.490462709+00:00 stderr F I1208 17:46:25.490423 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-admission-controller updated, re-generating status 2025-12-08T17:46:25.490462709+00:00 stderr F I1208 17:46:25.490448 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus-admission-controller updated, re-generating status 2025-12-08T17:46:25.490483629+00:00 stderr F I1208 17:46:25.490461 1 pod_watcher.go:132] Operand /, Kind= openshift-network-console/networking-console-plugin updated, re-generating status 2025-12-08T17:46:25.490483629+00:00 stderr F I1208 17:46:25.490469 1 pod_watcher.go:132] Operand /, Kind= openshift-network-console/networking-console-plugin updated, re-generating status 2025-12-08T17:46:25.490497360+00:00 stderr F I1208 17:46:25.490481 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:46:25.490497360+00:00 stderr F I1208 17:46:25.490491 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:46:25.493692205+00:00 stderr F I1208 17:46:25.493643 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:25.690456281+00:00 stderr F I1208 17:46:25.690394 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:25.701616936+00:00 stderr F I1208 17:46:25.701538 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.706728740+00:00 stderr F I1208 17:46:25.706701 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.711813053+00:00 stderr F I1208 17:46:25.711775 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.717574755+00:00 stderr F I1208 17:46:25.717506 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.723589006+00:00 stderr F I1208 17:46:25.723560 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.731474083+00:00 stderr F I1208 17:46:25.731409 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.735959557+00:00 stderr F I1208 17:46:25.735924 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.741368079+00:00 stderr F I1208 17:46:25.741328 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.746728140+00:00 stderr F I1208 17:46:25.746690 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.753442091+00:00 stderr F I1208 17:46:25.753402 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:25.788239576+00:00 stderr F I1208 17:46:25.788195 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:25.901203377+00:00 stderr F I1208 17:46:25.901138 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:26.160849770+00:00 stderr F I1208 17:46:26.160699 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:26.252330377+00:00 stderr F I1208 17:46:26.252281 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:26.253357177+00:00 stderr F I1208 17:46:26.252829 1 log.go:245] Reconciling configmap from openshift-config-managed/trusted-ca-bundle 2025-12-08T17:46:26.256190432+00:00 stderr F I1208 17:46:26.256141 1 log.go:245] trusted-ca-bundle changed, updating 13 configMaps 2025-12-08T17:46:26.256232713+00:00 stderr F I1208 17:46:26.256211 1 log.go:245] ConfigMap openshift-image-registry/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256261354+00:00 stderr F I1208 17:46:26.256244 1 log.go:245] ConfigMap openshift-ingress-operator/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256290635+00:00 stderr F I1208 17:46:26.256273 1 log.go:245] ConfigMap openshift-kube-controller-manager/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256310376+00:00 stderr F I1208 17:46:26.256303 1 log.go:245] ConfigMap openshift-apiserver-operator/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256357277+00:00 stderr F I1208 17:46:26.256333 1 log.go:245] ConfigMap openshift-console/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256385758+00:00 stderr F I1208 17:46:26.256366 1 log.go:245] ConfigMap openshift-kube-apiserver/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256413469+00:00 stderr F I1208 17:46:26.256396 1 log.go:245] ConfigMap openshift-machine-api/mao-trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256450500+00:00 stderr F I1208 17:46:26.256427 1 log.go:245] ConfigMap openshift-marketplace/marketplace-trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256479131+00:00 stderr F I1208 17:46:26.256460 1 log.go:245] ConfigMap openshift-apiserver/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256508021+00:00 stderr F I1208 17:46:26.256489 1 log.go:245] ConfigMap openshift-authentication-operator/trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256541922+00:00 stderr F I1208 17:46:26.256520 1 log.go:245] ConfigMap openshift-authentication/v4-0-config-system-trusted-ca-bundle ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256569053+00:00 stderr F I1208 17:46:26.256552 1 log.go:245] ConfigMap openshift-console-operator/trusted-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.256605384+00:00 stderr F I1208 17:46:26.256583 1 log.go:245] ConfigMap openshift-controller-manager/openshift-global-ca ca-bundle.crt unchanged, skipping 2025-12-08T17:46:26.483209586+00:00 stderr F I1208 17:46:26.483131 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:26.483517405+00:00 stderr F I1208 17:46:26.483460 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:46:26.626415995+00:00 stderr F I1208 17:46:26.626329 1 reflector.go:430] "Caches populated" type="*v1.IngressController" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:26.626672672+00:00 stderr F I1208 17:46:26.626579 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:46:26.878321646+00:00 stderr F I1208 17:46:26.878243 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:26.904040828+00:00 stderr F I1208 17:46:26.903962 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.035430071+00:00 stderr F I1208 17:46:27.035333 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.149278919+00:00 stderr F I1208 17:46:27.149229 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:27.149372272+00:00 stderr F I1208 17:46:27.149352 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.150771503+00:00 stderr F I1208 17:46:27.150729 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:27.158933908+00:00 stderr F I1208 17:46:27.158860 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.163450394+00:00 stderr F I1208 17:46:27.163398 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.172149775+00:00 stderr F I1208 17:46:27.172104 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.179351161+00:00 stderr F I1208 17:46:27.178782 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.187042132+00:00 stderr F I1208 17:46:27.186988 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.193093214+00:00 stderr F I1208 17:46:27.193039 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.207976041+00:00 stderr F I1208 17:46:27.207861 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.300117107+00:00 stderr F I1208 17:46:27.300058 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.305389264+00:00 stderr F I1208 17:46:27.305331 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.348607072+00:00 stderr F I1208 17:46:27.348525 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.419495709+00:00 stderr F I1208 17:46:27.419437 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.419669044+00:00 stderr F I1208 17:46:27.419640 1 log.go:245] Reconciling proxy 'cluster' 2025-12-08T17:46:27.422221601+00:00 stderr F I1208 17:46:27.422166 1 log.go:245] httpProxy, httpsProxy and noProxy not defined for proxy 'cluster'; validation will be skipped 2025-12-08T17:46:27.435036726+00:00 stderr F I1208 17:46:27.434993 1 log.go:245] Reconciling proxy 'cluster' complete 2025-12-08T17:46:27.502925674+00:00 stderr F I1208 17:46:27.501756 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.642989208+00:00 stderr F I1208 17:46:27.642924 1 reflector.go:430] "Caches populated" type="*v1.OperatorPKI" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:27.643930426+00:00 stderr F I1208 17:46:27.643121 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-network-node-identity/network-node-identity 2025-12-08T17:46:27.643930426+00:00 stderr F I1208 17:46:27.643392 1 log.go:245] successful reconciliation 2025-12-08T17:46:27.654200944+00:00 stderr F I1208 17:46:27.654140 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/ovn 2025-12-08T17:46:27.654405950+00:00 stderr F I1208 17:46:27.654371 1 log.go:245] successful reconciliation 2025-12-08T17:46:27.667707750+00:00 stderr F I1208 17:46:27.667636 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/signer 2025-12-08T17:46:27.668243916+00:00 stderr F I1208 17:46:27.668204 1 log.go:245] successful reconciliation 2025-12-08T17:46:27.702422992+00:00 stderr F I1208 17:46:27.702331 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.901316442+00:00 stderr F I1208 17:46:27.901231 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:27.914093985+00:00 stderr F I1208 17:46:27.913983 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:28.039066606+00:00 stderr F I1208 17:46:28.038978 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:28.102653505+00:00 stderr F I1208 17:46:28.102587 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:28.305661309+00:00 stderr F I1208 17:46:28.305586 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:28.501485476+00:00 stderr F I1208 17:46:28.501416 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:28.702187570+00:00 stderr F I1208 17:46:28.702078 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:28.849391139+00:00 stderr F I1208 17:46:28.849332 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:28.867137922+00:00 stderr F I1208 17:46:28.867093 1 reflector.go:430] "Caches populated" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:28.922742511+00:00 stderr F I1208 17:46:28.899262 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:29.100859787+00:00 stderr F I1208 17:46:29.100804 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:29.236862979+00:00 stderr F I1208 17:46:29.236786 1 reflector.go:430] "Caches populated" type="*v1.MachineConfigPool" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:29.303357275+00:00 stderr F I1208 17:46:29.303265 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:29.498741789+00:00 stderr F I1208 17:46:29.498673 1 reflector.go:430] "Caches populated" type="*v1alpha1.PodNetworkConnectivityCheck" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:29.550964097+00:00 stderr F I1208 17:46:29.550848 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:29.551023229+00:00 stderr F I1208 17:46:29.550989 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:29.552871565+00:00 stderr F I1208 17:46:29.552812 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:29.701408243+00:00 stderr F I1208 17:46:29.701357 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:29.902901161+00:00 stderr F I1208 17:46:29.902800 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:29.990098218+00:00 stderr F I1208 17:46:29.989408 1 reflector.go:430] "Caches populated" type="*v1.MachineConfig" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:30.102545213+00:00 stderr F I1208 17:46:30.102479 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:30.261211356+00:00 stderr F I1208 17:46:30.261158 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:30.299583307+00:00 stderr F I1208 17:46:30.299545 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:30.502125827+00:00 stderr F I1208 17:46:30.502091 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:30.701343277+00:00 stderr F I1208 17:46:30.701279 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:30.887023870+00:00 stderr F I1208 17:46:30.886943 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:30.887191705+00:00 stderr F I1208 17:46:30.887156 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/admin-acks' 2025-12-08T17:46:30.891694010+00:00 stderr F I1208 17:46:30.891630 1 log.go:245] configmap 'openshift-config/admin-acks' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.891722201+00:00 stderr F I1208 17:46:30.891699 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/admin-kubeconfig-client-ca' 2025-12-08T17:46:30.896074481+00:00 stderr F I1208 17:46:30.896013 1 log.go:245] configmap 'openshift-config/admin-kubeconfig-client-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.896074481+00:00 stderr F I1208 17:46:30.896061 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/client-ca-custom' 2025-12-08T17:46:30.900375341+00:00 stderr F I1208 17:46:30.899671 1 log.go:245] configmap 'openshift-config/client-ca-custom' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.900375341+00:00 stderr F I1208 17:46:30.899732 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/etcd-ca-bundle' 2025-12-08T17:46:30.901214056+00:00 stderr F I1208 17:46:30.901170 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:30.903835235+00:00 stderr F I1208 17:46:30.903779 1 log.go:245] configmap 'openshift-config/etcd-ca-bundle' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.903859655+00:00 stderr F I1208 17:46:30.903830 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/etcd-serving-ca' 2025-12-08T17:46:30.907638408+00:00 stderr F I1208 17:46:30.907575 1 log.go:245] configmap 'openshift-config/etcd-serving-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.907638408+00:00 stderr F I1208 17:46:30.907627 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/initial-kube-apiserver-server-ca' 2025-12-08T17:46:30.912389951+00:00 stderr F I1208 17:46:30.912323 1 log.go:245] configmap 'openshift-config/initial-kube-apiserver-server-ca' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.912389951+00:00 stderr F I1208 17:46:30.912382 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/installer-images' 2025-12-08T17:46:30.925994550+00:00 stderr F I1208 17:46:30.919463 1 log.go:245] configmap 'openshift-config/installer-images' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.925994550+00:00 stderr F I1208 17:46:30.919518 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/kube-root-ca.crt' 2025-12-08T17:46:30.925994550+00:00 stderr F I1208 17:46:30.925178 1 log.go:245] configmap 'openshift-config/kube-root-ca.crt' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.925994550+00:00 stderr F I1208 17:46:30.925434 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/openshift-install-manifests' 2025-12-08T17:46:30.933105713+00:00 stderr F I1208 17:46:30.933052 1 log.go:245] configmap 'openshift-config/openshift-install-manifests' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.933421383+00:00 stderr F I1208 17:46:30.933408 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/openshift-service-ca.crt' 2025-12-08T17:46:30.938820374+00:00 stderr F I1208 17:46:30.938775 1 log.go:245] configmap 'openshift-config/openshift-service-ca.crt' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:30.938961348+00:00 stderr F I1208 17:46:30.938949 1 log.go:245] Reconciling additional trust bundle configmap 'openshift-config/registry-certs' 2025-12-08T17:46:31.092922320+00:00 stderr F I1208 17:46:31.092807 1 log.go:245] configmap 'openshift-config/registry-certs' name differs from trustedCA of proxy 'cluster' or trustedCA not set; reconciliation will be skipped 2025-12-08T17:46:31.099203529+00:00 stderr F I1208 17:46:31.099170 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:31.305001315+00:00 stderr F I1208 17:46:31.303481 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:31.499473203+00:00 stderr F I1208 17:46:31.499396 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:31.565289958+00:00 stderr F I1208 17:46:31.565201 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:31.703057953+00:00 stderr F I1208 17:46:31.703000 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:31.900131759+00:00 stderr F I1208 17:46:31.900093 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:31.939954604+00:00 stderr F I1208 17:46:31.939823 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:31.940031407+00:00 stderr F I1208 17:46:31.939954 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:31.941327845+00:00 stderr F I1208 17:46:31.941284 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:46:32.102222025+00:00 stderr F I1208 17:46:32.102132 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:32.301505106+00:00 stderr F I1208 17:46:32.301433 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:32.461084016+00:00 stderr F I1208 17:46:32.461017 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:32.502591342+00:00 stderr F I1208 17:46:32.502545 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:32.578696967+00:00 stderr F I1208 17:46:32.578616 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:32.701515993+00:00 stderr F I1208 17:46:32.701412 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:32.904019891+00:00 stderr F I1208 17:46:32.902435 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:33.102989973+00:00 stderr F I1208 17:46:33.102914 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:33.231944234+00:00 stderr F I1208 17:46:33.231844 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:33.303835812+00:00 stderr F I1208 17:46:33.303759 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:33.305153621+00:00 stderr F I1208 17:46:33.305106 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:33.504031531+00:00 stderr F I1208 17:46:33.503978 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:33.605392623+00:00 stderr F I1208 17:46:33.605278 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=openshiftapiservers" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:33.612458346+00:00 stderr F I1208 17:46:33.612367 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:33.699303192+00:00 stderr F I1208 17:46:33.699247 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:33.759681894+00:00 stderr F I1208 17:46:33.759639 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:33.900039208+00:00 stderr F I1208 17:46:33.899996 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:34.099219256+00:00 stderr F I1208 17:46:34.099179 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:34.217263229+00:00 stderr F I1208 17:46:34.217198 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:34.301242900+00:00 stderr F I1208 17:46:34.301170 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:34.499806100+00:00 stderr F I1208 17:46:34.499726 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:34.701123822+00:00 stderr F I1208 17:46:34.701090 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:34.903382744+00:00 stderr F I1208 17:46:34.901102 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:34.960966282+00:00 stderr F I1208 17:46:34.960854 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:35.106034626+00:00 stderr F I1208 17:46:35.106001 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:35.288332098+00:00 stderr F I1208 17:46:35.288255 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:35.303913096+00:00 stderr F I1208 17:46:35.303813 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:35.502177386+00:00 stderr F I1208 17:46:35.502135 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:35.703076747+00:00 stderr F I1208 17:46:35.703025 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:35.900279426+00:00 stderr F I1208 17:46:35.900235 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:35.917779971+00:00 stderr F I1208 17:46:35.917703 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:36.026592268+00:00 stderr F I1208 17:46:36.026534 1 reflector.go:430] "Caches populated" type="*v1.EgressRouter" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:36.104483895+00:00 stderr F I1208 17:46:36.104412 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:36.300116127+00:00 stderr F I1208 17:46:36.300058 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:36.504843792+00:00 stderr F I1208 17:46:36.504783 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:36.704508226+00:00 stderr F I1208 17:46:36.704399 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:36.903987333+00:00 stderr F I1208 17:46:36.903917 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:37.102634876+00:00 stderr F I1208 17:46:37.102359 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:37.304049722+00:00 stderr F I1208 17:46:37.303995 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:37.503786296+00:00 stderr F I1208 17:46:37.503701 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:37.662593674+00:00 stderr F I1208 17:46:37.662557 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:46:37.700206932+00:00 stderr F I1208 17:46:37.700119 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:37.902373230+00:00 stderr F I1208 17:46:37.902185 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:38.099750745+00:00 stderr F I1208 17:46:38.099687 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:38.302918543+00:00 stderr F I1208 17:46:38.302809 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:38.499372129+00:00 stderr F I1208 17:46:38.499306 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:38.700106545+00:00 stderr F I1208 17:46:38.700032 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:38.902616213+00:00 stderr F I1208 17:46:38.902554 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:39.101671088+00:00 stderr F I1208 17:46:39.101591 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:39.303697432+00:00 stderr F I1208 17:46:39.303640 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:39.503253822+00:00 stderr F I1208 17:46:39.503188 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:46:39.701256665+00:00 stderr F I1208 17:46:39.701198 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:39.902229267+00:00 stderr F I1208 17:46:39.902061 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:40.102107406+00:00 stderr F I1208 17:46:40.102043 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:40.301715859+00:00 stderr F I1208 17:46:40.301625 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:40.500952469+00:00 stderr F I1208 17:46:40.500865 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:40.701350393+00:00 stderr F I1208 17:46:40.701283 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:46:40.901982405+00:00 stderr F I1208 17:46:40.901869 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:46:41.104788012+00:00 stderr F I1208 17:46:41.104488 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:46:41.304665032+00:00 stderr F I1208 17:46:41.304585 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:46:41.502695576+00:00 stderr F I1208 17:46:41.502595 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:03.559991441+00:00 stderr F I1208 17:47:03.559920 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:47:12.370422616+00:00 stderr F I1208 17:47:12.370344 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.376061203+00:00 stderr F I1208 17:47:12.375998 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.380523064+00:00 stderr F I1208 17:47:12.379412 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.386971967+00:00 stderr F I1208 17:47:12.386245 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.390954752+00:00 stderr F I1208 17:47:12.390228 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.396639671+00:00 stderr F I1208 17:47:12.396595 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.400843284+00:00 stderr F I1208 17:47:12.400819 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.405141418+00:00 stderr F I1208 17:47:12.405114 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.408896207+00:00 stderr F I1208 17:47:12.408846 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.413008116+00:00 stderr F I1208 17:47:12.412987 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:12.568737699+00:00 stderr F I1208 17:47:12.568670 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.012067667+00:00 stderr F I1208 17:47:22.012030 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.017797777+00:00 stderr F I1208 17:47:22.017665 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.029170815+00:00 stderr F I1208 17:47:22.029131 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.034201884+00:00 stderr F I1208 17:47:22.033570 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.038270171+00:00 stderr F I1208 17:47:22.038243 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.043633710+00:00 stderr F I1208 17:47:22.043592 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.049022010+00:00 stderr F I1208 17:47:22.048994 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.053926864+00:00 stderr F I1208 17:47:22.053898 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.058859050+00:00 stderr F I1208 17:47:22.058801 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.064062853+00:00 stderr F I1208 17:47:22.064025 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:22.230275916+00:00 stderr F I1208 17:47:22.230227 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.699004714+00:00 stderr F I1208 17:47:29.696000 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.704762155+00:00 stderr F I1208 17:47:29.704691 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.709835285+00:00 stderr F I1208 17:47:29.709770 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.714854663+00:00 stderr F I1208 17:47:29.714387 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.722561125+00:00 stderr F I1208 17:47:29.722482 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.733544481+00:00 stderr F I1208 17:47:29.731566 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.736097232+00:00 stderr F I1208 17:47:29.736046 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.740808410+00:00 stderr F I1208 17:47:29.740743 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.747021806+00:00 stderr F I1208 17:47:29.746984 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.751287239+00:00 stderr F I1208 17:47:29.751244 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:47:29.886962901+00:00 stderr F I1208 17:47:29.886892 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:47:36.774379480+00:00 stderr F I1208 17:47:36.774299 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:47:36.896557176+00:00 stderr F I1208 17:47:36.896484 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:47:36.898566440+00:00 stderr F I1208 17:47:36.898515 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:47:36.900203771+00:00 stderr F I1208 17:47:36.900163 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:47:36.901811412+00:00 stderr F I1208 17:47:36.901762 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc003e26000 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:47:36.905905670+00:00 stderr F I1208 17:47:36.905818 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 3 -> 3 2025-12-08T17:47:36.905905670+00:00 stderr F I1208 17:47:36.905849 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:47:36.905905670+00:00 stderr F I1208 17:47:36.905858 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:47:36.908685408+00:00 stderr F I1208 17:47:36.908630 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 2 -> 2 2025-12-08T17:47:36.908685408+00:00 stderr F I1208 17:47:36.908650 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:47:36.908685408+00:00 stderr F I1208 17:47:36.908656 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 2 -> 2 2025-12-08T17:47:36.908685408+00:00 stderr F I1208 17:47:36.908662 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:47:36.908707898+00:00 stderr F I1208 17:47:36.908685 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:47:36.912059375+00:00 stderr F I1208 17:47:36.912018 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:47:36.912059375+00:00 stderr F I1208 17:47:36.912041 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:47:36.994956124+00:00 stderr F I1208 17:47:36.994866 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:47:37.004396631+00:00 stderr F I1208 17:47:37.004340 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:47:37.004396631+00:00 stderr F I1208 17:47:37.004372 1 log.go:245] Starting render phase 2025-12-08T17:47:37.005614759+00:00 stderr F I1208 17:47:37.005565 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:47:37.014938043+00:00 stderr F I1208 17:47:37.014859 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T17:47:37.042721597+00:00 stderr F I1208 17:47:37.042650 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T17:47:37.042721597+00:00 stderr F I1208 17:47:37.042676 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T17:47:37.042721597+00:00 stderr F I1208 17:47:37.042695 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T17:47:37.042721597+00:00 stderr F I1208 17:47:37.042712 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T17:47:37.201395532+00:00 stderr F I1208 17:47:37.200816 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T17:47:37.201395532+00:00 stderr F I1208 17:47:37.201333 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T17:47:37.412730425+00:00 stderr F I1208 17:47:37.412605 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T17:47:37.428540463+00:00 stderr F I1208 17:47:37.428436 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T17:47:37.437053931+00:00 stderr F I1208 17:47:37.436957 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T17:47:37.437053931+00:00 stderr F I1208 17:47:37.437018 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T17:47:37.454732397+00:00 stderr F I1208 17:47:37.454629 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T17:47:37.454732397+00:00 stderr F I1208 17:47:37.454693 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T17:47:37.464238967+00:00 stderr F I1208 17:47:37.464160 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T17:47:37.464238967+00:00 stderr F I1208 17:47:37.464208 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T17:47:37.471561847+00:00 stderr F I1208 17:47:37.471474 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T17:47:37.471561847+00:00 stderr F I1208 17:47:37.471512 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T17:47:37.481630404+00:00 stderr F I1208 17:47:37.481530 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T17:47:37.481630404+00:00 stderr F I1208 17:47:37.481582 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T17:47:37.488031715+00:00 stderr F I1208 17:47:37.487933 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T17:47:37.488031715+00:00 stderr F I1208 17:47:37.488000 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T17:47:37.494018053+00:00 stderr F I1208 17:47:37.493936 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T17:47:37.494018053+00:00 stderr F I1208 17:47:37.493985 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T17:47:37.499849667+00:00 stderr F I1208 17:47:37.499768 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T17:47:37.499849667+00:00 stderr F I1208 17:47:37.499834 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T17:47:37.503576115+00:00 stderr F I1208 17:47:37.503499 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T17:47:37.503576115+00:00 stderr F I1208 17:47:37.503541 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T17:47:37.508008434+00:00 stderr F I1208 17:47:37.507919 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T17:47:37.508008434+00:00 stderr F I1208 17:47:37.507952 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T17:47:37.635014022+00:00 stderr F I1208 17:47:37.634782 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T17:47:37.635014022+00:00 stderr F I1208 17:47:37.634822 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T17:47:37.836552337+00:00 stderr F I1208 17:47:37.836507 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T17:47:37.836644080+00:00 stderr F I1208 17:47:37.836630 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T17:47:38.033362562+00:00 stderr F I1208 17:47:38.033324 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T17:47:38.033440564+00:00 stderr F I1208 17:47:38.033430 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T17:47:38.237023273+00:00 stderr F I1208 17:47:38.236952 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T17:47:38.237075955+00:00 stderr F I1208 17:47:38.237058 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T17:47:38.435593803+00:00 stderr F I1208 17:47:38.435525 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T17:47:38.435749688+00:00 stderr F I1208 17:47:38.435724 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T17:47:38.635188177+00:00 stderr F I1208 17:47:38.635107 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T17:47:38.635188177+00:00 stderr F I1208 17:47:38.635165 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T17:47:38.835903595+00:00 stderr F I1208 17:47:38.835842 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T17:47:38.835983947+00:00 stderr F I1208 17:47:38.835972 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T17:47:39.034281539+00:00 stderr F I1208 17:47:39.034213 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T17:47:39.034281539+00:00 stderr F I1208 17:47:39.034252 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T17:47:39.234248675+00:00 stderr F I1208 17:47:39.234149 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T17:47:39.234248675+00:00 stderr F I1208 17:47:39.234219 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T17:47:39.433111775+00:00 stderr F I1208 17:47:39.433034 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T17:47:39.433111775+00:00 stderr F I1208 17:47:39.433079 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T17:47:39.634855715+00:00 stderr F I1208 17:47:39.634795 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T17:47:39.635024540+00:00 stderr F I1208 17:47:39.634999 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T17:47:39.834739027+00:00 stderr F I1208 17:47:39.834649 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T17:47:39.834739027+00:00 stderr F I1208 17:47:39.834712 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T17:47:40.035848648+00:00 stderr F I1208 17:47:40.035719 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T17:47:40.035848648+00:00 stderr F I1208 17:47:40.035799 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T17:47:40.262091839+00:00 stderr F I1208 17:47:40.262039 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T17:47:40.262218933+00:00 stderr F I1208 17:47:40.262200 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T17:47:40.461945421+00:00 stderr F I1208 17:47:40.461814 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T17:47:40.462005053+00:00 stderr F I1208 17:47:40.461957 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T17:47:40.636423513+00:00 stderr F I1208 17:47:40.636333 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T17:47:40.636423513+00:00 stderr F I1208 17:47:40.636401 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T17:47:40.834436877+00:00 stderr F I1208 17:47:40.834399 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T17:47:40.834519609+00:00 stderr F I1208 17:47:40.834509 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T17:47:41.034702680+00:00 stderr F I1208 17:47:41.034598 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T17:47:41.034702680+00:00 stderr F I1208 17:47:41.034666 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T17:47:41.244443613+00:00 stderr F I1208 17:47:41.244314 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T17:47:41.244443613+00:00 stderr F I1208 17:47:41.244368 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T17:47:41.439815663+00:00 stderr F I1208 17:47:41.439753 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T17:47:41.439868925+00:00 stderr F I1208 17:47:41.439825 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T17:47:41.636572857+00:00 stderr F I1208 17:47:41.636533 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T17:47:41.636660209+00:00 stderr F I1208 17:47:41.636646 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:47:41.833520626+00:00 stderr F I1208 17:47:41.833411 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:47:41.833520626+00:00 stderr F I1208 17:47:41.833464 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:47:42.035498854+00:00 stderr F I1208 17:47:42.035438 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:47:42.035642970+00:00 stderr F I1208 17:47:42.035621 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T17:47:42.236621195+00:00 stderr F I1208 17:47:42.236566 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T17:47:42.236621195+00:00 stderr F I1208 17:47:42.236608 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T17:47:42.435047422+00:00 stderr F I1208 17:47:42.434459 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T17:47:42.435047422+00:00 stderr F I1208 17:47:42.434968 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T17:47:42.633137748+00:00 stderr F I1208 17:47:42.633067 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T17:47:42.633137748+00:00 stderr F I1208 17:47:42.633111 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T17:47:42.833902427+00:00 stderr F I1208 17:47:42.833806 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T17:47:42.833943618+00:00 stderr F I1208 17:47:42.833927 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T17:47:43.043056841+00:00 stderr F I1208 17:47:43.042967 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T17:47:43.043056841+00:00 stderr F I1208 17:47:43.043034 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T17:47:43.243896404+00:00 stderr F I1208 17:47:43.243667 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T17:47:43.244005387+00:00 stderr F I1208 17:47:43.243986 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T17:47:43.434432702+00:00 stderr F I1208 17:47:43.434358 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T17:47:43.434432702+00:00 stderr F I1208 17:47:43.434409 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:47:43.633773297+00:00 stderr F I1208 17:47:43.633707 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:47:43.633773297+00:00 stderr F I1208 17:47:43.633755 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:47:43.835725454+00:00 stderr F I1208 17:47:43.835674 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:47:43.835820907+00:00 stderr F I1208 17:47:43.835807 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T17:47:44.039231000+00:00 stderr F I1208 17:47:44.039121 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T17:47:44.039231000+00:00 stderr F I1208 17:47:44.039191 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T17:47:44.236976354+00:00 stderr F I1208 17:47:44.236898 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T17:47:44.236976354+00:00 stderr F I1208 17:47:44.236951 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T17:47:44.448691419+00:00 stderr F I1208 17:47:44.448603 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T17:47:44.448691419+00:00 stderr F I1208 17:47:44.448669 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T17:47:44.639511116+00:00 stderr F I1208 17:47:44.639425 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T17:47:44.639511116+00:00 stderr F I1208 17:47:44.639473 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T17:47:44.848703521+00:00 stderr F I1208 17:47:44.848646 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T17:47:44.848735352+00:00 stderr F I1208 17:47:44.848703 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T17:47:45.045380912+00:00 stderr F I1208 17:47:45.045284 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T17:47:45.045380912+00:00 stderr F I1208 17:47:45.045350 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T17:47:45.239480902+00:00 stderr F I1208 17:47:45.239388 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T17:47:45.239480902+00:00 stderr F I1208 17:47:45.239464 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:47:45.491493725+00:00 stderr F I1208 17:47:45.491430 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:47:45.491641010+00:00 stderr F I1208 17:47:45.491619 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:47:45.681745144+00:00 stderr F I1208 17:47:45.681636 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:47:45.681745144+00:00 stderr F I1208 17:47:45.681696 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T17:47:45.842229486+00:00 stderr F I1208 17:47:45.840818 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T17:47:45.842229486+00:00 stderr F I1208 17:47:45.840859 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T17:47:46.053780896+00:00 stderr F I1208 17:47:46.053697 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:47:46.053833007+00:00 stderr F I1208 17:47:46.053781 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T17:47:46.286244844+00:00 stderr F I1208 17:47:46.286178 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:47:46.286280945+00:00 stderr F I1208 17:47:46.286247 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:47:46.432558669+00:00 stderr F I1208 17:47:46.432499 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:47:46.432558669+00:00 stderr F I1208 17:47:46.432536 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T17:47:46.634685732+00:00 stderr F I1208 17:47:46.634618 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:47:46.634685732+00:00 stderr F I1208 17:47:46.634666 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T17:47:46.833787709+00:00 stderr F I1208 17:47:46.833697 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T17:47:46.833787709+00:00 stderr F I1208 17:47:46.833743 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T17:47:47.038045209+00:00 stderr F I1208 17:47:47.037961 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:47:47.038106901+00:00 stderr F I1208 17:47:47.038039 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T17:47:47.235749093+00:00 stderr F I1208 17:47:47.235683 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T17:47:47.235749093+00:00 stderr F I1208 17:47:47.235734 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T17:47:47.440673504+00:00 stderr F I1208 17:47:47.440306 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T17:47:47.440673504+00:00 stderr F I1208 17:47:47.440351 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T17:47:47.634967140+00:00 stderr F I1208 17:47:47.634911 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T17:47:47.635019621+00:00 stderr F I1208 17:47:47.634965 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T17:47:47.835934936+00:00 stderr F I1208 17:47:47.835328 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T17:47:47.835934936+00:00 stderr F I1208 17:47:47.835872 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:47:48.035376634+00:00 stderr F I1208 17:47:48.035240 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:47:48.035376634+00:00 stderr F I1208 17:47:48.035310 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:47:48.237255469+00:00 stderr F I1208 17:47:48.237182 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:47:48.237255469+00:00 stderr F I1208 17:47:48.237230 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:47:48.434678445+00:00 stderr F I1208 17:47:48.434578 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:47:48.434678445+00:00 stderr F I1208 17:47:48.434621 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:47:48.633428591+00:00 stderr F I1208 17:47:48.633358 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:47:48.633428591+00:00 stderr F I1208 17:47:48.633413 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:47:48.833927182+00:00 stderr F I1208 17:47:48.833855 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:47:48.833985374+00:00 stderr F I1208 17:47:48.833926 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T17:47:49.034198907+00:00 stderr F I1208 17:47:49.034123 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T17:47:49.034198907+00:00 stderr F I1208 17:47:49.034170 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T17:47:49.232796069+00:00 stderr F I1208 17:47:49.232725 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T17:47:49.232796069+00:00 stderr F I1208 17:47:49.232766 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T17:47:49.436162770+00:00 stderr F I1208 17:47:49.436041 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T17:47:49.436162770+00:00 stderr F I1208 17:47:49.436095 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T17:47:49.635332840+00:00 stderr F I1208 17:47:49.635259 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T17:47:49.635332840+00:00 stderr F I1208 17:47:49.635299 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T17:47:49.840102536+00:00 stderr F I1208 17:47:49.839966 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T17:47:49.840102536+00:00 stderr F I1208 17:47:49.840044 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T17:47:50.035788026+00:00 stderr F I1208 17:47:50.035692 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T17:47:50.035788026+00:00 stderr F I1208 17:47:50.035768 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T17:47:50.234479150+00:00 stderr F I1208 17:47:50.234417 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T17:47:50.234515431+00:00 stderr F I1208 17:47:50.234478 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T17:47:50.436337084+00:00 stderr F I1208 17:47:50.436265 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T17:47:50.436337084+00:00 stderr F I1208 17:47:50.436309 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T17:47:50.642654739+00:00 stderr F I1208 17:47:50.642553 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T17:47:50.642654739+00:00 stderr F I1208 17:47:50.642634 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T17:47:50.833705413+00:00 stderr F I1208 17:47:50.833626 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T17:47:50.833705413+00:00 stderr F I1208 17:47:50.833687 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T17:47:51.035061912+00:00 stderr F I1208 17:47:51.035002 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T17:47:51.035061912+00:00 stderr F I1208 17:47:51.035050 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:47:51.234423007+00:00 stderr F I1208 17:47:51.234350 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:47:51.234423007+00:00 stderr F I1208 17:47:51.234411 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T17:47:51.436519659+00:00 stderr F I1208 17:47:51.436425 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T17:47:51.436519659+00:00 stderr F I1208 17:47:51.436477 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:47:51.637897548+00:00 stderr F I1208 17:47:51.637793 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:47:51.637897548+00:00 stderr F I1208 17:47:51.637840 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:47:51.834851498+00:00 stderr F I1208 17:47:51.834765 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:47:51.834851498+00:00 stderr F I1208 17:47:51.834828 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:47:52.035372051+00:00 stderr F I1208 17:47:52.035266 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:47:52.035372051+00:00 stderr F I1208 17:47:52.035331 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T17:47:52.238061618+00:00 stderr F I1208 17:47:52.237960 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T17:47:52.238061618+00:00 stderr F I1208 17:47:52.238015 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T17:47:52.435805781+00:00 stderr F I1208 17:47:52.435717 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T17:47:52.435805781+00:00 stderr F I1208 17:47:52.435755 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T17:47:52.645575708+00:00 stderr F I1208 17:47:52.645492 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T17:47:52.645575708+00:00 stderr F I1208 17:47:52.645546 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T17:47:52.877155654+00:00 stderr F I1208 17:47:52.877041 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T17:47:52.877155654+00:00 stderr F I1208 17:47:52.877117 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T17:47:53.037344951+00:00 stderr F I1208 17:47:53.037264 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T17:47:53.037344951+00:00 stderr F I1208 17:47:53.037307 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T17:47:53.235543007+00:00 stderr F I1208 17:47:53.235422 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T17:47:53.235725413+00:00 stderr F I1208 17:47:53.235659 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T17:47:53.440545520+00:00 stderr F I1208 17:47:53.440469 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T17:47:53.440545520+00:00 stderr F I1208 17:47:53.440528 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:47:53.633080074+00:00 stderr F I1208 17:47:53.633032 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:47:53.633080074+00:00 stderr F I1208 17:47:53.633071 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:47:53.835609363+00:00 stderr F I1208 17:47:53.835515 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:47:53.835609363+00:00 stderr F I1208 17:47:53.835555 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:47:54.033617513+00:00 stderr F I1208 17:47:54.033526 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:47:54.033617513+00:00 stderr F I1208 17:47:54.033566 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T17:47:54.236319006+00:00 stderr F I1208 17:47:54.236215 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T17:47:54.236319006+00:00 stderr F I1208 17:47:54.236255 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T17:47:54.435585535+00:00 stderr F I1208 17:47:54.435496 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T17:47:54.435585535+00:00 stderr F I1208 17:47:54.435563 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T17:47:54.635805242+00:00 stderr F I1208 17:47:54.635713 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T17:47:54.635805242+00:00 stderr F I1208 17:47:54.635790 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T17:47:54.847371044+00:00 stderr F I1208 17:47:54.847301 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:47:54.847544279+00:00 stderr F I1208 17:47:54.847518 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T17:47:55.033446663+00:00 stderr F I1208 17:47:55.033323 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:47:55.033446663+00:00 stderr F I1208 17:47:55.033372 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T17:47:55.239386124+00:00 stderr F I1208 17:47:55.238488 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:47:55.239386124+00:00 stderr F I1208 17:47:55.238543 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:47:55.435036744+00:00 stderr F I1208 17:47:55.434847 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:47:55.435036744+00:00 stderr F I1208 17:47:55.434938 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:47:55.633462137+00:00 stderr F I1208 17:47:55.633393 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:47:55.633462137+00:00 stderr F I1208 17:47:55.633448 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T17:47:55.837827820+00:00 stderr F I1208 17:47:55.837761 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:47:55.837827820+00:00 stderr F I1208 17:47:55.837816 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T17:47:56.036685527+00:00 stderr F I1208 17:47:56.036611 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:47:56.036742078+00:00 stderr F I1208 17:47:56.036679 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T17:47:56.236541344+00:00 stderr F I1208 17:47:56.236440 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T17:47:56.236541344+00:00 stderr F I1208 17:47:56.236521 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T17:47:56.434162192+00:00 stderr F I1208 17:47:56.434057 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T17:47:56.434162192+00:00 stderr F I1208 17:47:56.434129 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T17:47:56.639567718+00:00 stderr F I1208 17:47:56.639479 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T17:47:56.639567718+00:00 stderr F I1208 17:47:56.639553 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T17:47:56.834676250+00:00 stderr F I1208 17:47:56.834586 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:47:56.834676250+00:00 stderr F I1208 17:47:56.834657 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T17:47:57.035933099+00:00 stderr F I1208 17:47:57.035423 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T17:47:57.035989041+00:00 stderr F I1208 17:47:57.035952 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T17:47:57.234618961+00:00 stderr F I1208 17:47:57.233954 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T17:47:57.234618961+00:00 stderr F I1208 17:47:57.234585 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:47:57.435428957+00:00 stderr F I1208 17:47:57.433611 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:47:57.435428957+00:00 stderr F I1208 17:47:57.433677 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:47:57.634109758+00:00 stderr F I1208 17:47:57.634000 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:47:57.634109758+00:00 stderr F I1208 17:47:57.634058 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T17:47:57.834732197+00:00 stderr F I1208 17:47:57.834674 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T17:47:57.834763668+00:00 stderr F I1208 17:47:57.834731 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T17:47:58.033210673+00:00 stderr F I1208 17:47:58.033049 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T17:47:58.033210673+00:00 stderr F I1208 17:47:58.033095 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T17:47:58.425996387+00:00 stderr F I1208 17:47:58.425789 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:47:58.425996387+00:00 stderr F I1208 17:47:58.425830 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T17:47:58.434697370+00:00 stderr F I1208 17:47:58.434658 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T17:47:58.434724981+00:00 stderr F I1208 17:47:58.434715 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T17:47:58.643640002+00:00 stderr F I1208 17:47:58.643555 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:47:58.643640002+00:00 stderr F I1208 17:47:58.643620 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T17:47:58.835939650+00:00 stderr F I1208 17:47:58.835791 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T17:47:58.835939650+00:00 stderr F I1208 17:47:58.835854 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T17:47:59.033863608+00:00 stderr F I1208 17:47:59.033786 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T17:47:59.033863608+00:00 stderr F I1208 17:47:59.033845 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T17:47:59.236267973+00:00 stderr F I1208 17:47:59.236214 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:47:59.236267973+00:00 stderr F I1208 17:47:59.236250 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T17:47:59.435160650+00:00 stderr F I1208 17:47:59.434752 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T17:47:59.435160650+00:00 stderr F I1208 17:47:59.434802 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T17:47:59.632651535+00:00 stderr F I1208 17:47:59.632607 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T17:47:59.632651535+00:00 stderr F I1208 17:47:59.632646 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T17:47:59.840176154+00:00 stderr F I1208 17:47:59.840109 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:47:59.840176154+00:00 stderr F I1208 17:47:59.840150 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T17:48:00.035488093+00:00 stderr F I1208 17:48:00.035407 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T17:48:00.035488093+00:00 stderr F I1208 17:48:00.035455 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T17:48:00.233015689+00:00 stderr F I1208 17:48:00.232965 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:48:00.233015689+00:00 stderr F I1208 17:48:00.233005 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T17:48:00.441561039+00:00 stderr F I1208 17:48:00.441493 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:48:00.441561039+00:00 stderr F I1208 17:48:00.441540 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T17:48:00.633742993+00:00 stderr F I1208 17:48:00.633674 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:48:00.633742993+00:00 stderr F I1208 17:48:00.633714 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T17:48:00.835457376+00:00 stderr F I1208 17:48:00.835355 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T17:48:00.855706969+00:00 stderr F I1208 17:48:00.855640 1 log.go:245] Operconfig Controller complete 2025-12-08T17:49:05.360577064+00:00 stderr F I1208 17:49:05.360463 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-network-node-identity/network-node-identity 2025-12-08T17:49:05.361250248+00:00 stderr F I1208 17:49:05.361175 1 log.go:245] successful reconciliation 2025-12-08T17:49:08.558388008+00:00 stderr F I1208 17:49:08.558345 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/ovn 2025-12-08T17:49:08.558799043+00:00 stderr F I1208 17:49:08.558776 1 log.go:245] successful reconciliation 2025-12-08T17:49:10.356626710+00:00 stderr F I1208 17:49:10.356530 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/signer 2025-12-08T17:49:10.356844218+00:00 stderr F I1208 17:49:10.356821 1 log.go:245] successful reconciliation 2025-12-08T17:49:29.689012949+00:00 stderr F I1208 17:49:29.688727 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.697154992+00:00 stderr F I1208 17:49:29.696488 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.702689947+00:00 stderr F I1208 17:49:29.702624 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.709823862+00:00 stderr F I1208 17:49:29.709746 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.715067436+00:00 stderr F I1208 17:49:29.714984 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.726762170+00:00 stderr F I1208 17:49:29.726641 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.733072415+00:00 stderr F I1208 17:49:29.732919 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.742566997+00:00 stderr F I1208 17:49:29.742490 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.753571946+00:00 stderr F I1208 17:49:29.753518 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.759818008+00:00 stderr F I1208 17:49:29.759769 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:49:29.888496554+00:00 stderr F I1208 17:49:29.888412 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:50:03.568717475+00:00 stderr F I1208 17:50:03.568607 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:51:00.856774942+00:00 stderr F I1208 17:51:00.856704 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:51:01.020049880+00:00 stderr F I1208 17:51:01.019965 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:51:01.021833562+00:00 stderr F I1208 17:51:01.021796 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:51:01.023615453+00:00 stderr F I1208 17:51:01.023582 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:51:01.025064145+00:00 stderr F I1208 17:51:01.025015 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc00551f440 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:51:01.029095372+00:00 stderr F I1208 17:51:01.029050 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 3 -> 3 2025-12-08T17:51:01.029095372+00:00 stderr F I1208 17:51:01.029074 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:51:01.029095372+00:00 stderr F I1208 17:51:01.029080 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:51:01.031863102+00:00 stderr F I1208 17:51:01.031764 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 2 -> 2 2025-12-08T17:51:01.031863102+00:00 stderr F I1208 17:51:01.031788 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:51:01.031863102+00:00 stderr F I1208 17:51:01.031795 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 2 -> 2 2025-12-08T17:51:01.031863102+00:00 stderr F I1208 17:51:01.031801 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:51:01.031863102+00:00 stderr F I1208 17:51:01.031815 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:51:01.035446916+00:00 stderr F I1208 17:51:01.035370 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:51:01.035446916+00:00 stderr F I1208 17:51:01.035393 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:51:01.072911861+00:00 stderr F I1208 17:51:01.072831 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:51:01.086907036+00:00 stderr F I1208 17:51:01.086857 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:51:01.086907036+00:00 stderr F I1208 17:51:01.086902 1 log.go:245] Starting render phase 2025-12-08T17:51:01.099658005+00:00 stderr F I1208 17:51:01.099598 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T17:51:01.136826852+00:00 stderr F I1208 17:51:01.136751 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T17:51:01.136826852+00:00 stderr F I1208 17:51:01.136777 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T17:51:01.136826852+00:00 stderr F I1208 17:51:01.136799 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T17:51:01.136892174+00:00 stderr F I1208 17:51:01.136827 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T17:51:01.275767095+00:00 stderr F I1208 17:51:01.275657 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T17:51:01.275767095+00:00 stderr F I1208 17:51:01.275684 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T17:51:01.483436628+00:00 stderr F I1208 17:51:01.483350 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T17:51:01.494043675+00:00 stderr F I1208 17:51:01.493966 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T17:51:01.498265178+00:00 stderr F I1208 17:51:01.498221 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T17:51:01.498286269+00:00 stderr F I1208 17:51:01.498258 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T17:51:01.504505759+00:00 stderr F I1208 17:51:01.504434 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T17:51:01.504505759+00:00 stderr F I1208 17:51:01.504496 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T17:51:01.510412310+00:00 stderr F I1208 17:51:01.510360 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T17:51:01.510412310+00:00 stderr F I1208 17:51:01.510397 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T17:51:01.516693741+00:00 stderr F I1208 17:51:01.516604 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T17:51:01.516693741+00:00 stderr F I1208 17:51:01.516631 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T17:51:01.523642103+00:00 stderr F I1208 17:51:01.523609 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T17:51:01.523642103+00:00 stderr F I1208 17:51:01.523635 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T17:51:01.528285667+00:00 stderr F I1208 17:51:01.528193 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T17:51:01.528285667+00:00 stderr F I1208 17:51:01.528225 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T17:51:01.531718177+00:00 stderr F I1208 17:51:01.531347 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T17:51:01.531718177+00:00 stderr F I1208 17:51:01.531711 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T17:51:01.535610529+00:00 stderr F I1208 17:51:01.535575 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T17:51:01.535656961+00:00 stderr F I1208 17:51:01.535629 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T17:51:01.539863123+00:00 stderr F I1208 17:51:01.539817 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T17:51:01.539863123+00:00 stderr F I1208 17:51:01.539852 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T17:51:01.542869440+00:00 stderr F I1208 17:51:01.542837 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T17:51:01.542904871+00:00 stderr F I1208 17:51:01.542872 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T17:51:01.701287377+00:00 stderr F I1208 17:51:01.701222 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T17:51:01.701287377+00:00 stderr F I1208 17:51:01.701267 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T17:51:01.908974501+00:00 stderr F I1208 17:51:01.907324 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T17:51:01.908974501+00:00 stderr F I1208 17:51:01.907380 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T17:51:02.099074216+00:00 stderr F I1208 17:51:02.099014 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T17:51:02.099074216+00:00 stderr F I1208 17:51:02.099052 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T17:51:02.299686426+00:00 stderr F I1208 17:51:02.299606 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T17:51:02.299686426+00:00 stderr F I1208 17:51:02.299652 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T17:51:02.498573565+00:00 stderr F I1208 17:51:02.498521 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T17:51:02.498601406+00:00 stderr F I1208 17:51:02.498579 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T17:51:02.698835474+00:00 stderr F I1208 17:51:02.698765 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T17:51:02.698835474+00:00 stderr F I1208 17:51:02.698809 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T17:51:02.900036820+00:00 stderr F I1208 17:51:02.899937 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T17:51:02.900036820+00:00 stderr F I1208 17:51:02.899987 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T17:51:03.098624771+00:00 stderr F I1208 17:51:03.098552 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T17:51:03.098624771+00:00 stderr F I1208 17:51:03.098604 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T17:51:03.299594961+00:00 stderr F I1208 17:51:03.299523 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T17:51:03.299594961+00:00 stderr F I1208 17:51:03.299572 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T17:51:03.499439388+00:00 stderr F I1208 17:51:03.499360 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T17:51:03.499492089+00:00 stderr F I1208 17:51:03.499424 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T17:51:03.699250763+00:00 stderr F I1208 17:51:03.699174 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T17:51:03.699250763+00:00 stderr F I1208 17:51:03.699228 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T17:51:03.899702209+00:00 stderr F I1208 17:51:03.899629 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T17:51:03.899702209+00:00 stderr F I1208 17:51:03.899686 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T17:51:04.099212926+00:00 stderr F I1208 17:51:04.099145 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T17:51:04.099212926+00:00 stderr F I1208 17:51:04.099197 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T17:51:04.311354189+00:00 stderr F I1208 17:51:04.311293 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T17:51:04.311388220+00:00 stderr F I1208 17:51:04.311356 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T17:51:04.513494783+00:00 stderr F I1208 17:51:04.513430 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T17:51:04.513532694+00:00 stderr F I1208 17:51:04.513491 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T17:51:04.701000792+00:00 stderr F I1208 17:51:04.700952 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T17:51:04.701042564+00:00 stderr F I1208 17:51:04.701005 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T17:51:04.898304475+00:00 stderr F I1208 17:51:04.898224 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T17:51:04.898304475+00:00 stderr F I1208 17:51:04.898282 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T17:51:05.100554693+00:00 stderr F I1208 17:51:05.100483 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T17:51:05.100602184+00:00 stderr F I1208 17:51:05.100573 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T17:51:05.306729073+00:00 stderr F I1208 17:51:05.306652 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T17:51:05.306729073+00:00 stderr F I1208 17:51:05.306708 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T17:51:05.499857846+00:00 stderr F I1208 17:51:05.499772 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T17:51:05.499857846+00:00 stderr F I1208 17:51:05.499822 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T17:51:05.700788234+00:00 stderr F I1208 17:51:05.700742 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T17:51:05.700788234+00:00 stderr F I1208 17:51:05.700780 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:51:05.900923479+00:00 stderr F I1208 17:51:05.900797 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:51:05.900969981+00:00 stderr F I1208 17:51:05.900948 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:51:06.099659685+00:00 stderr F I1208 17:51:06.099595 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:51:06.099700246+00:00 stderr F I1208 17:51:06.099654 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T17:51:06.302018673+00:00 stderr F I1208 17:51:06.301837 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T17:51:06.302018673+00:00 stderr F I1208 17:51:06.301918 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T17:51:06.500425676+00:00 stderr F I1208 17:51:06.500345 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T17:51:06.500487107+00:00 stderr F I1208 17:51:06.500433 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T17:51:06.702655812+00:00 stderr F I1208 17:51:06.702573 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T17:51:06.702655812+00:00 stderr F I1208 17:51:06.702620 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T17:51:06.900063638+00:00 stderr F I1208 17:51:06.899351 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T17:51:06.900063638+00:00 stderr F I1208 17:51:06.899389 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T17:51:07.100133438+00:00 stderr F I1208 17:51:07.100072 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T17:51:07.100133438+00:00 stderr F I1208 17:51:07.100121 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T17:51:07.308363320+00:00 stderr F I1208 17:51:07.308290 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T17:51:07.308396651+00:00 stderr F I1208 17:51:07.308358 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T17:51:07.501487508+00:00 stderr F I1208 17:51:07.501420 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T17:51:07.501487508+00:00 stderr F I1208 17:51:07.501466 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:51:07.699481004+00:00 stderr F I1208 17:51:07.698942 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:51:07.699481004+00:00 stderr F I1208 17:51:07.699437 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:51:07.900232096+00:00 stderr F I1208 17:51:07.900154 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:51:07.900232096+00:00 stderr F I1208 17:51:07.900220 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T17:51:08.102626853+00:00 stderr F I1208 17:51:08.102503 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T17:51:08.102626853+00:00 stderr F I1208 17:51:08.102564 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T17:51:08.303483286+00:00 stderr F I1208 17:51:08.302992 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T17:51:08.303483286+00:00 stderr F I1208 17:51:08.303471 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T17:51:08.506302360+00:00 stderr F I1208 17:51:08.505770 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T17:51:08.506302360+00:00 stderr F I1208 17:51:08.505816 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T17:51:08.713226321+00:00 stderr F I1208 17:51:08.713128 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T17:51:08.713226321+00:00 stderr F I1208 17:51:08.713186 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T17:51:08.910234992+00:00 stderr F I1208 17:51:08.910126 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T17:51:08.910234992+00:00 stderr F I1208 17:51:08.910195 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T17:51:09.111202716+00:00 stderr F I1208 17:51:09.111121 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T17:51:09.111202716+00:00 stderr F I1208 17:51:09.111183 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T17:51:09.305862700+00:00 stderr F I1208 17:51:09.305786 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T17:51:09.305862700+00:00 stderr F I1208 17:51:09.305848 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:51:09.531918449+00:00 stderr F I1208 17:51:09.531817 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:51:09.531918449+00:00 stderr F I1208 17:51:09.531898 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:51:09.746820157+00:00 stderr F I1208 17:51:09.746755 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:51:09.746820157+00:00 stderr F I1208 17:51:09.746798 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T17:51:09.905084194+00:00 stderr F I1208 17:51:09.905007 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T17:51:09.905084194+00:00 stderr F I1208 17:51:09.905073 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T17:51:10.120512062+00:00 stderr F I1208 17:51:10.120297 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:51:10.120574803+00:00 stderr F I1208 17:51:10.120551 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T17:51:10.353115095+00:00 stderr F I1208 17:51:10.353059 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:51:10.353115095+00:00 stderr F I1208 17:51:10.353103 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:51:10.499797226+00:00 stderr F I1208 17:51:10.499696 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:51:10.499797226+00:00 stderr F I1208 17:51:10.499746 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T17:51:10.698108158+00:00 stderr F I1208 17:51:10.697973 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:51:10.698108158+00:00 stderr F I1208 17:51:10.698018 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T17:51:10.899692552+00:00 stderr F I1208 17:51:10.899628 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T17:51:10.899692552+00:00 stderr F I1208 17:51:10.899675 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T17:51:11.103440662+00:00 stderr F I1208 17:51:11.103352 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:51:11.103440662+00:00 stderr F I1208 17:51:11.103413 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T17:51:11.299626679+00:00 stderr F I1208 17:51:11.299553 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T17:51:11.299626679+00:00 stderr F I1208 17:51:11.299607 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T17:51:11.499496286+00:00 stderr F I1208 17:51:11.499433 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T17:51:11.499496286+00:00 stderr F I1208 17:51:11.499488 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T17:51:11.701699600+00:00 stderr F I1208 17:51:11.701611 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T17:51:11.701699600+00:00 stderr F I1208 17:51:11.701653 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T17:51:11.900682553+00:00 stderr F I1208 17:51:11.900600 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T17:51:11.900682553+00:00 stderr F I1208 17:51:11.900655 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:51:12.099210498+00:00 stderr F I1208 17:51:12.099146 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:51:12.099210498+00:00 stderr F I1208 17:51:12.099192 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:51:12.301670867+00:00 stderr F I1208 17:51:12.301615 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:51:12.301670867+00:00 stderr F I1208 17:51:12.301659 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:51:12.498264291+00:00 stderr F I1208 17:51:12.498204 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:51:12.498264291+00:00 stderr F I1208 17:51:12.498241 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:51:12.700228472+00:00 stderr F I1208 17:51:12.700148 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:51:12.700228472+00:00 stderr F I1208 17:51:12.700202 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:51:12.900569586+00:00 stderr F I1208 17:51:12.900501 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:51:12.900569586+00:00 stderr F I1208 17:51:12.900560 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T17:51:13.100417152+00:00 stderr F I1208 17:51:13.100334 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T17:51:13.100417152+00:00 stderr F I1208 17:51:13.100382 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T17:51:13.299531087+00:00 stderr F I1208 17:51:13.299446 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T17:51:13.299531087+00:00 stderr F I1208 17:51:13.299493 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T17:51:13.499162240+00:00 stderr F I1208 17:51:13.499076 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T17:51:13.499162240+00:00 stderr F I1208 17:51:13.499117 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T17:51:13.700392469+00:00 stderr F I1208 17:51:13.700311 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T17:51:13.700446490+00:00 stderr F I1208 17:51:13.700386 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T17:51:13.908113732+00:00 stderr F I1208 17:51:13.908036 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T17:51:13.908113732+00:00 stderr F I1208 17:51:13.908100 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T17:51:14.099581914+00:00 stderr F I1208 17:51:14.099524 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T17:51:14.099581914+00:00 stderr F I1208 17:51:14.099571 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T17:51:14.299777726+00:00 stderr F I1208 17:51:14.299708 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T17:51:14.299777726+00:00 stderr F I1208 17:51:14.299748 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T17:51:14.501291519+00:00 stderr F I1208 17:51:14.501224 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T17:51:14.501291519+00:00 stderr F I1208 17:51:14.501270 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T17:51:14.702064310+00:00 stderr F I1208 17:51:14.701996 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T17:51:14.702064310+00:00 stderr F I1208 17:51:14.702051 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T17:51:14.900100928+00:00 stderr F I1208 17:51:14.900038 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T17:51:14.900100928+00:00 stderr F I1208 17:51:14.900087 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T17:51:15.101450189+00:00 stderr F I1208 17:51:15.101381 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T17:51:15.101450189+00:00 stderr F I1208 17:51:15.101440 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:51:15.300609214+00:00 stderr F I1208 17:51:15.300542 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:51:15.300797307+00:00 stderr F I1208 17:51:15.300612 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T17:51:15.500361289+00:00 stderr F I1208 17:51:15.500295 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T17:51:15.500361289+00:00 stderr F I1208 17:51:15.500345 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:51:15.699870630+00:00 stderr F I1208 17:51:15.699804 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:51:15.699937781+00:00 stderr F I1208 17:51:15.699888 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:51:15.898550218+00:00 stderr F I1208 17:51:15.898483 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:51:15.898550218+00:00 stderr F I1208 17:51:15.898524 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:51:16.099094966+00:00 stderr F I1208 17:51:16.099035 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:51:16.099144127+00:00 stderr F I1208 17:51:16.099113 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T17:51:16.302376757+00:00 stderr F I1208 17:51:16.302295 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T17:51:16.302376757+00:00 stderr F I1208 17:51:16.302343 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T17:51:16.499311557+00:00 stderr F I1208 17:51:16.499253 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T17:51:16.499311557+00:00 stderr F I1208 17:51:16.499302 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T17:51:16.707076921+00:00 stderr F I1208 17:51:16.707004 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T17:51:16.707076921+00:00 stderr F I1208 17:51:16.707056 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T17:51:16.924919047+00:00 stderr F I1208 17:51:16.924823 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T17:51:16.924919047+00:00 stderr F I1208 17:51:16.924870 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T17:51:17.099706370+00:00 stderr F I1208 17:51:17.099640 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T17:51:17.099706370+00:00 stderr F I1208 17:51:17.099680 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T17:51:17.299265272+00:00 stderr F I1208 17:51:17.299148 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T17:51:17.299265272+00:00 stderr F I1208 17:51:17.299195 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T17:51:17.500548271+00:00 stderr F I1208 17:51:17.500472 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T17:51:17.500548271+00:00 stderr F I1208 17:51:17.500519 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:51:17.699931090+00:00 stderr F I1208 17:51:17.699828 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:51:17.699931090+00:00 stderr F I1208 17:51:17.699899 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:51:17.900184964+00:00 stderr F I1208 17:51:17.900116 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:51:17.900184964+00:00 stderr F I1208 17:51:17.900169 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:51:18.100659480+00:00 stderr F I1208 17:51:18.100563 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:51:18.100659480+00:00 stderr F I1208 17:51:18.100615 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T17:51:18.299262607+00:00 stderr F I1208 17:51:18.299157 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T17:51:18.299262607+00:00 stderr F I1208 17:51:18.299203 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T17:51:18.499672743+00:00 stderr F I1208 17:51:18.499576 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T17:51:18.499672743+00:00 stderr F I1208 17:51:18.499624 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T17:51:18.699751852+00:00 stderr F I1208 17:51:18.699662 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T17:51:18.699751852+00:00 stderr F I1208 17:51:18.699716 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T17:51:18.905951802+00:00 stderr F I1208 17:51:18.905853 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:51:18.906004432+00:00 stderr F I1208 17:51:18.905983 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T17:51:19.100941649+00:00 stderr F I1208 17:51:19.100841 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:51:19.100941649+00:00 stderr F I1208 17:51:19.100934 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T17:51:19.303183735+00:00 stderr F I1208 17:51:19.303088 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:51:19.303183735+00:00 stderr F I1208 17:51:19.303138 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:51:19.500594242+00:00 stderr F I1208 17:51:19.500504 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:51:19.500594242+00:00 stderr F I1208 17:51:19.500571 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:51:19.701048659+00:00 stderr F I1208 17:51:19.700937 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:51:19.701048659+00:00 stderr F I1208 17:51:19.701010 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T17:51:19.904073176+00:00 stderr F I1208 17:51:19.904013 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:51:19.904073176+00:00 stderr F I1208 17:51:19.904054 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T17:51:20.101500474+00:00 stderr F I1208 17:51:20.101421 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:51:20.101500474+00:00 stderr F I1208 17:51:20.101478 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T17:51:20.300262362+00:00 stderr F I1208 17:51:20.300198 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T17:51:20.300308184+00:00 stderr F I1208 17:51:20.300261 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T17:51:20.502182153+00:00 stderr F I1208 17:51:20.502087 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T17:51:20.502182153+00:00 stderr F I1208 17:51:20.502172 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T17:51:20.702741360+00:00 stderr F I1208 17:51:20.702655 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T17:51:20.702741360+00:00 stderr F I1208 17:51:20.702707 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T17:51:20.901550420+00:00 stderr F I1208 17:51:20.901426 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:51:20.901550420+00:00 stderr F I1208 17:51:20.901495 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T17:51:21.100026265+00:00 stderr F I1208 17:51:21.099958 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T17:51:21.100026265+00:00 stderr F I1208 17:51:21.100009 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T17:51:21.300157225+00:00 stderr F I1208 17:51:21.300076 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T17:51:21.300157225+00:00 stderr F I1208 17:51:21.300137 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:51:21.499128048+00:00 stderr F I1208 17:51:21.499056 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:51:21.499128048+00:00 stderr F I1208 17:51:21.499108 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:51:21.699521983+00:00 stderr F I1208 17:51:21.699475 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:51:21.699552354+00:00 stderr F I1208 17:51:21.699526 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T17:51:21.899931229+00:00 stderr F I1208 17:51:21.899823 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T17:51:21.899931229+00:00 stderr F I1208 17:51:21.899868 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T17:51:22.099218187+00:00 stderr F I1208 17:51:22.099165 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T17:51:22.099218187+00:00 stderr F I1208 17:51:22.099207 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T17:51:22.299472399+00:00 stderr F I1208 17:51:22.299402 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:51:22.299472399+00:00 stderr F I1208 17:51:22.299448 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T17:51:22.499746193+00:00 stderr F I1208 17:51:22.499700 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T17:51:22.499785264+00:00 stderr F I1208 17:51:22.499742 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T17:51:22.705557645+00:00 stderr F I1208 17:51:22.705509 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:51:22.705581315+00:00 stderr F I1208 17:51:22.705562 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T17:51:22.900181948+00:00 stderr F I1208 17:51:22.900105 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T17:51:22.900181948+00:00 stderr F I1208 17:51:22.900151 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T17:51:23.100007234+00:00 stderr F I1208 17:51:23.099953 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T17:51:23.100007234+00:00 stderr F I1208 17:51:23.099993 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T17:51:23.299821999+00:00 stderr F I1208 17:51:23.299738 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:51:23.299821999+00:00 stderr F I1208 17:51:23.299800 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T17:51:23.502342350+00:00 stderr F I1208 17:51:23.501289 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T17:51:23.502342350+00:00 stderr F I1208 17:51:23.501370 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T17:51:23.698974144+00:00 stderr F I1208 17:51:23.698895 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T17:51:23.698974144+00:00 stderr F I1208 17:51:23.698943 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T17:51:23.904904788+00:00 stderr F I1208 17:51:23.902078 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:51:23.904904788+00:00 stderr F I1208 17:51:23.902119 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T17:51:24.100675059+00:00 stderr F I1208 17:51:24.100575 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T17:51:24.100675059+00:00 stderr F I1208 17:51:24.100631 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T17:51:24.298639475+00:00 stderr F I1208 17:51:24.298586 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:51:24.298684306+00:00 stderr F I1208 17:51:24.298635 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T17:51:24.504626940+00:00 stderr F I1208 17:51:24.504202 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:51:24.504694071+00:00 stderr F I1208 17:51:24.504625 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T17:51:24.700955650+00:00 stderr F I1208 17:51:24.699020 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:51:24.700955650+00:00 stderr F I1208 17:51:24.699059 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T17:51:24.902631436+00:00 stderr F I1208 17:51:24.902569 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T17:51:24.918450651+00:00 stderr F I1208 17:51:24.918375 1 log.go:245] Operconfig Controller complete 2025-12-08T17:51:30.555645510+00:00 stderr F I1208 17:51:30.555575 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:52:44.943056068+00:00 stderr F I1208 17:52:44.943001 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:53:03.578950721+00:00 stderr F I1208 17:53:03.578847 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:53:18.154420453+00:00 stderr F I1208 17:53:18.154268 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:53:25.234510633+00:00 stderr F I1208 17:53:25.233505 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:53:25.386556237+00:00 stderr F I1208 17:53:25.386480 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:53:25.389060615+00:00 stderr F I1208 17:53:25.389023 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:53:25.391292686+00:00 stderr F I1208 17:53:25.391073 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:53:25.393073605+00:00 stderr F I1208 17:53:25.392979 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc004ace540 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:53:25.397272229+00:00 stderr F I1208 17:53:25.397224 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 3 -> 3 2025-12-08T17:53:25.397272229+00:00 stderr F I1208 17:53:25.397240 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:53:25.397272229+00:00 stderr F I1208 17:53:25.397246 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:53:25.399729916+00:00 stderr F I1208 17:53:25.399648 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 2 -> 2 2025-12-08T17:53:25.399729916+00:00 stderr F I1208 17:53:25.399678 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:53:25.399729916+00:00 stderr F I1208 17:53:25.399686 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 2 -> 2 2025-12-08T17:53:25.399729916+00:00 stderr F I1208 17:53:25.399695 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:53:25.399729916+00:00 stderr F I1208 17:53:25.399714 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:53:25.403626841+00:00 stderr F I1208 17:53:25.403590 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:53:25.403626841+00:00 stderr F I1208 17:53:25.403620 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:53:25.457178667+00:00 stderr F I1208 17:53:25.457123 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:53:25.470583392+00:00 stderr F I1208 17:53:25.470519 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:53:25.470900930+00:00 stderr F I1208 17:53:25.470863 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:53:25.470919761+00:00 stderr F I1208 17:53:25.470908 1 log.go:245] Starting render phase 2025-12-08T17:53:25.483914105+00:00 stderr F I1208 17:53:25.483846 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T17:53:25.521234209+00:00 stderr F I1208 17:53:25.521155 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T17:53:25.521234209+00:00 stderr F I1208 17:53:25.521179 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T17:53:25.521234209+00:00 stderr F I1208 17:53:25.521204 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T17:53:25.521300321+00:00 stderr F I1208 17:53:25.521229 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T17:53:25.659900350+00:00 stderr F I1208 17:53:25.659782 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T17:53:25.659900350+00:00 stderr F I1208 17:53:25.659828 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T17:53:25.865497140+00:00 stderr F I1208 17:53:25.865443 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T17:53:25.884653711+00:00 stderr F I1208 17:53:25.884575 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T17:53:25.889614066+00:00 stderr F I1208 17:53:25.889561 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T17:53:25.889614066+00:00 stderr F I1208 17:53:25.889600 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T17:53:25.896404840+00:00 stderr F I1208 17:53:25.896368 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T17:53:25.896404840+00:00 stderr F I1208 17:53:25.896393 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T17:53:25.906185347+00:00 stderr F I1208 17:53:25.906153 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T17:53:25.906185347+00:00 stderr F I1208 17:53:25.906176 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T17:53:25.914689348+00:00 stderr F I1208 17:53:25.914654 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T17:53:25.914689348+00:00 stderr F I1208 17:53:25.914675 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T17:53:25.921777870+00:00 stderr F I1208 17:53:25.921717 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T17:53:25.921777870+00:00 stderr F I1208 17:53:25.921740 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T17:53:25.929741687+00:00 stderr F I1208 17:53:25.929698 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T17:53:25.929741687+00:00 stderr F I1208 17:53:25.929719 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T17:53:25.934838765+00:00 stderr F I1208 17:53:25.934790 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T17:53:25.934838765+00:00 stderr F I1208 17:53:25.934811 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T17:53:25.940098109+00:00 stderr F I1208 17:53:25.940066 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T17:53:25.940122599+00:00 stderr F I1208 17:53:25.940114 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T17:53:25.943861841+00:00 stderr F I1208 17:53:25.943803 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T17:53:25.943861841+00:00 stderr F I1208 17:53:25.943828 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T17:53:25.947013396+00:00 stderr F I1208 17:53:25.946969 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T17:53:25.947013396+00:00 stderr F I1208 17:53:25.946991 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T17:53:26.089009227+00:00 stderr F I1208 17:53:26.088861 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T17:53:26.089009227+00:00 stderr F I1208 17:53:26.088925 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T17:53:26.296903990+00:00 stderr F I1208 17:53:26.295207 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T17:53:26.296903990+00:00 stderr F I1208 17:53:26.295245 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T17:53:26.488799968+00:00 stderr F I1208 17:53:26.488746 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T17:53:26.488799968+00:00 stderr F I1208 17:53:26.488790 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T17:53:26.690080841+00:00 stderr F I1208 17:53:26.690007 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T17:53:26.690139723+00:00 stderr F I1208 17:53:26.690087 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T17:53:26.889165774+00:00 stderr F I1208 17:53:26.889092 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T17:53:26.889165774+00:00 stderr F I1208 17:53:26.889138 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T17:53:27.090791436+00:00 stderr F I1208 17:53:27.090720 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T17:53:27.090791436+00:00 stderr F I1208 17:53:27.090780 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T17:53:27.289439847+00:00 stderr F I1208 17:53:27.289381 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T17:53:27.289439847+00:00 stderr F I1208 17:53:27.289425 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T17:53:27.490705219+00:00 stderr F I1208 17:53:27.490614 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T17:53:27.490748641+00:00 stderr F I1208 17:53:27.490714 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T17:53:27.688900609+00:00 stderr F I1208 17:53:27.688809 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T17:53:27.688900609+00:00 stderr F I1208 17:53:27.688855 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T17:53:27.890577522+00:00 stderr F I1208 17:53:27.890455 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T17:53:27.890577522+00:00 stderr F I1208 17:53:27.890503 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T17:53:28.091282890+00:00 stderr F I1208 17:53:28.091199 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T17:53:28.091282890+00:00 stderr F I1208 17:53:28.091245 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T17:53:28.290995600+00:00 stderr F I1208 17:53:28.290934 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T17:53:28.291023351+00:00 stderr F I1208 17:53:28.291001 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T17:53:28.491296556+00:00 stderr F I1208 17:53:28.490288 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T17:53:28.491296556+00:00 stderr F I1208 17:53:28.491119 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T17:53:28.704069561+00:00 stderr F I1208 17:53:28.703998 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T17:53:28.704120882+00:00 stderr F I1208 17:53:28.704082 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T17:53:28.903676329+00:00 stderr F I1208 17:53:28.903605 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T17:53:28.903676329+00:00 stderr F I1208 17:53:28.903661 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T17:53:29.090196350+00:00 stderr F I1208 17:53:29.090120 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T17:53:29.090196350+00:00 stderr F I1208 17:53:29.090179 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T17:53:29.293947720+00:00 stderr F I1208 17:53:29.291811 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T17:53:29.293947720+00:00 stderr F I1208 17:53:29.292255 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T17:53:29.491933644+00:00 stderr F I1208 17:53:29.491839 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T17:53:29.491988386+00:00 stderr F I1208 17:53:29.491932 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T17:53:29.699838357+00:00 stderr F I1208 17:53:29.699721 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T17:53:29.699838357+00:00 stderr F I1208 17:53:29.699768 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T17:53:29.891928699+00:00 stderr F I1208 17:53:29.891813 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T17:53:29.891928699+00:00 stderr F I1208 17:53:29.891867 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T17:53:30.090267082+00:00 stderr F I1208 17:53:30.090162 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T17:53:30.090267082+00:00 stderr F I1208 17:53:30.090213 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:53:30.288998476+00:00 stderr F I1208 17:53:30.288936 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:53:30.288998476+00:00 stderr F I1208 17:53:30.288977 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:53:30.490269859+00:00 stderr F I1208 17:53:30.490186 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:53:30.490269859+00:00 stderr F I1208 17:53:30.490248 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T17:53:30.692501967+00:00 stderr F I1208 17:53:30.692401 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T17:53:30.692501967+00:00 stderr F I1208 17:53:30.692469 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T17:53:30.892478815+00:00 stderr F I1208 17:53:30.892400 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T17:53:30.892556687+00:00 stderr F I1208 17:53:30.892484 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T17:53:31.093776928+00:00 stderr F I1208 17:53:31.093684 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T17:53:31.093776928+00:00 stderr F I1208 17:53:31.093739 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T17:53:31.292350447+00:00 stderr F I1208 17:53:31.292286 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T17:53:31.292545712+00:00 stderr F I1208 17:53:31.292523 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T17:53:31.491901564+00:00 stderr F I1208 17:53:31.491836 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T17:53:31.491994776+00:00 stderr F I1208 17:53:31.491981 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T17:53:31.696159808+00:00 stderr F I1208 17:53:31.696041 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T17:53:31.696159808+00:00 stderr F I1208 17:53:31.696113 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T17:53:31.893599925+00:00 stderr F I1208 17:53:31.893494 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T17:53:31.893599925+00:00 stderr F I1208 17:53:31.893567 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:53:32.092331239+00:00 stderr F I1208 17:53:32.092263 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:53:32.092511364+00:00 stderr F I1208 17:53:32.092480 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:53:32.290723064+00:00 stderr F I1208 17:53:32.290659 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:53:32.290763055+00:00 stderr F I1208 17:53:32.290718 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T17:53:32.492806688+00:00 stderr F I1208 17:53:32.492729 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T17:53:32.492806688+00:00 stderr F I1208 17:53:32.492791 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T17:53:32.694702458+00:00 stderr F I1208 17:53:32.694612 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T17:53:32.694702458+00:00 stderr F I1208 17:53:32.694669 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T17:53:32.901429679+00:00 stderr F I1208 17:53:32.901356 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T17:53:32.901429679+00:00 stderr F I1208 17:53:32.901413 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T17:53:33.102060234+00:00 stderr F I1208 17:53:33.102004 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T17:53:33.102142606+00:00 stderr F I1208 17:53:33.102060 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T17:53:33.304281103+00:00 stderr F I1208 17:53:33.304209 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T17:53:33.304334794+00:00 stderr F I1208 17:53:33.304280 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T17:53:33.509314088+00:00 stderr F I1208 17:53:33.509224 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T17:53:33.509314088+00:00 stderr F I1208 17:53:33.509287 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T17:53:33.700262329+00:00 stderr F I1208 17:53:33.700204 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T17:53:33.700491956+00:00 stderr F I1208 17:53:33.700459 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:53:33.928385802+00:00 stderr F I1208 17:53:33.928278 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:53:33.928385802+00:00 stderr F I1208 17:53:33.928370 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:53:34.129538701+00:00 stderr F I1208 17:53:34.129471 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:53:34.129538701+00:00 stderr F I1208 17:53:34.129513 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T17:53:34.294779974+00:00 stderr F I1208 17:53:34.294713 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T17:53:34.294974959+00:00 stderr F I1208 17:53:34.294951 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T17:53:34.529331412+00:00 stderr F I1208 17:53:34.529244 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:53:34.529331412+00:00 stderr F I1208 17:53:34.529294 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T17:53:34.743417172+00:00 stderr F I1208 17:53:34.743308 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:53:34.743417172+00:00 stderr F I1208 17:53:34.743399 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:53:34.891469488+00:00 stderr F I1208 17:53:34.891381 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:53:34.891529680+00:00 stderr F I1208 17:53:34.891474 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T17:53:35.095240329+00:00 stderr F I1208 17:53:35.093098 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:53:35.095240329+00:00 stderr F I1208 17:53:35.093828 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T17:53:35.291997469+00:00 stderr F I1208 17:53:35.291868 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T17:53:35.291997469+00:00 stderr F I1208 17:53:35.291984 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T17:53:35.493589360+00:00 stderr F I1208 17:53:35.493476 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:53:35.493589360+00:00 stderr F I1208 17:53:35.493538 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T17:53:35.691692917+00:00 stderr F I1208 17:53:35.691536 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T17:53:35.691692917+00:00 stderr F I1208 17:53:35.691602 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T17:53:35.890902563+00:00 stderr F I1208 17:53:35.890776 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T17:53:35.890902563+00:00 stderr F I1208 17:53:35.890844 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T17:53:36.090852290+00:00 stderr F I1208 17:53:36.090749 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T17:53:36.090852290+00:00 stderr F I1208 17:53:36.090797 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T17:53:36.292417330+00:00 stderr F I1208 17:53:36.292290 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T17:53:36.292417330+00:00 stderr F I1208 17:53:36.292362 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:53:36.490128687+00:00 stderr F I1208 17:53:36.490028 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:53:36.490128687+00:00 stderr F I1208 17:53:36.490095 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:53:36.694830713+00:00 stderr F I1208 17:53:36.694781 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:53:36.694996727+00:00 stderr F I1208 17:53:36.694976 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:53:36.891336355+00:00 stderr F I1208 17:53:36.891279 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:53:36.891472039+00:00 stderr F I1208 17:53:36.891450 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:53:37.092418063+00:00 stderr F I1208 17:53:37.092320 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:53:37.092418063+00:00 stderr F I1208 17:53:37.092379 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:53:37.289705947+00:00 stderr F I1208 17:53:37.289611 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:53:37.289705947+00:00 stderr F I1208 17:53:37.289679 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T17:53:37.491690189+00:00 stderr F I1208 17:53:37.491557 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T17:53:37.491690189+00:00 stderr F I1208 17:53:37.491642 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T17:53:37.692716435+00:00 stderr F I1208 17:53:37.692654 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T17:53:37.692716435+00:00 stderr F I1208 17:53:37.692709 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T17:53:37.893505715+00:00 stderr F I1208 17:53:37.893421 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T17:53:37.893505715+00:00 stderr F I1208 17:53:37.893484 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T17:53:38.091462997+00:00 stderr F I1208 17:53:38.091367 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T17:53:38.091462997+00:00 stderr F I1208 17:53:38.091446 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T17:53:38.292677838+00:00 stderr F I1208 17:53:38.292587 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T17:53:38.292677838+00:00 stderr F I1208 17:53:38.292629 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T17:53:38.491350140+00:00 stderr F I1208 17:53:38.491247 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T17:53:38.491350140+00:00 stderr F I1208 17:53:38.491314 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T17:53:38.690655129+00:00 stderr F I1208 17:53:38.690569 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T17:53:38.690655129+00:00 stderr F I1208 17:53:38.690636 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T17:53:38.893336301+00:00 stderr F I1208 17:53:38.893247 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T17:53:38.893390232+00:00 stderr F I1208 17:53:38.893329 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T17:53:39.096613547+00:00 stderr F I1208 17:53:39.096556 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T17:53:39.096613547+00:00 stderr F I1208 17:53:39.096606 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T17:53:39.291108886+00:00 stderr F I1208 17:53:39.290999 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T17:53:39.291108886+00:00 stderr F I1208 17:53:39.291067 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T17:53:39.491595737+00:00 stderr F I1208 17:53:39.491495 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T17:53:39.491595737+00:00 stderr F I1208 17:53:39.491562 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:53:39.690936897+00:00 stderr F I1208 17:53:39.690837 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:53:39.691002759+00:00 stderr F I1208 17:53:39.690930 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T17:53:39.893949487+00:00 stderr F I1208 17:53:39.893845 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T17:53:39.894025959+00:00 stderr F I1208 17:53:39.893971 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:53:40.093642207+00:00 stderr F I1208 17:53:40.093597 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:53:40.093738930+00:00 stderr F I1208 17:53:40.093727 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:53:40.290571711+00:00 stderr F I1208 17:53:40.290501 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:53:40.290607022+00:00 stderr F I1208 17:53:40.290574 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:53:40.491017112+00:00 stderr F I1208 17:53:40.490952 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:53:40.491162586+00:00 stderr F I1208 17:53:40.491138 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T17:53:40.693200869+00:00 stderr F I1208 17:53:40.693140 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T17:53:40.693389314+00:00 stderr F I1208 17:53:40.693367 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T17:53:40.892394995+00:00 stderr F I1208 17:53:40.892297 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T17:53:40.892394995+00:00 stderr F I1208 17:53:40.892369 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T17:53:41.101312356+00:00 stderr F I1208 17:53:41.101254 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.101432359+00:00 stderr F I1208 17:53:41.101411 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.101844130+00:00 stderr F I1208 17:53:41.101774 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:53:41.103041232+00:00 stderr F I1208 17:53:41.102947 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T17:53:41.103041232+00:00 stderr F I1208 17:53:41.103018 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T17:53:41.118167954+00:00 stderr F I1208 17:53:41.118105 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.118282597+00:00 stderr F I1208 17:53:41.118261 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.178648258+00:00 stderr F I1208 17:53:41.178577 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.178648258+00:00 stderr F I1208 17:53:41.178626 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.184396815+00:00 stderr F I1208 17:53:41.184339 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:53:41.185432283+00:00 stderr F I1208 17:53:41.184755 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:53:41.185432283+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:41.185432283+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:41.185432283+00:00 stderr F reason: Unknown 2025-12-08T17:53:41.185432283+00:00 stderr F status: "False" 2025-12-08T17:53:41.185432283+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:41.185432283+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:41.185432283+00:00 stderr F status: "False" 2025-12-08T17:53:41.185432283+00:00 stderr F type: Degraded 2025-12-08T17:53:41.185432283+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:41.185432283+00:00 stderr F status: "True" 2025-12-08T17:53:41.185432283+00:00 stderr F type: Upgradeable 2025-12-08T17:53:41.185432283+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:41.185432283+00:00 stderr F message: Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" update is 2025-12-08T17:53:41.185432283+00:00 stderr F being processed (generation 4, observed generation 3) 2025-12-08T17:53:41.185432283+00:00 stderr F reason: Deploying 2025-12-08T17:53:41.185432283+00:00 stderr F status: "True" 2025-12-08T17:53:41.185432283+00:00 stderr F type: Progressing 2025-12-08T17:53:41.185432283+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:41.185432283+00:00 stderr F status: "True" 2025-12-08T17:53:41.185432283+00:00 stderr F type: Available 2025-12-08T17:53:41.188530327+00:00 stderr F I1208 17:53:41.188486 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.188555848+00:00 stderr F I1208 17:53:41.188528 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.210904896+00:00 stderr F I1208 17:53:41.208610 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:53:41.210904896+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:41.210904896+00:00 stderr F status: "False" 2025-12-08T17:53:41.210904896+00:00 stderr F type: Degraded 2025-12-08T17:53:41.210904896+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:41.210904896+00:00 stderr F status: "True" 2025-12-08T17:53:41.210904896+00:00 stderr F type: Upgradeable 2025-12-08T17:53:41.210904896+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:41.210904896+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:41.210904896+00:00 stderr F reason: Unknown 2025-12-08T17:53:41.210904896+00:00 stderr F status: "False" 2025-12-08T17:53:41.210904896+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:41.210904896+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:41.210904896+00:00 stderr F message: Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" update is 2025-12-08T17:53:41.210904896+00:00 stderr F being processed (generation 4, observed generation 3) 2025-12-08T17:53:41.210904896+00:00 stderr F reason: Deploying 2025-12-08T17:53:41.210904896+00:00 stderr F status: "True" 2025-12-08T17:53:41.210904896+00:00 stderr F type: Progressing 2025-12-08T17:53:41.210904896+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:41.210904896+00:00 stderr F status: "True" 2025-12-08T17:53:41.210904896+00:00 stderr F type: Available 2025-12-08T17:53:41.218201174+00:00 stderr F I1208 17:53:41.218127 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.218201174+00:00 stderr F I1208 17:53:41.218151 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:41.271555205+00:00 stderr F I1208 17:53:41.270913 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:53:41.271555205+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:41.271555205+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:41.271555205+00:00 stderr F reason: Unknown 2025-12-08T17:53:41.271555205+00:00 stderr F status: "False" 2025-12-08T17:53:41.271555205+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:41.271555205+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:41.271555205+00:00 stderr F status: "False" 2025-12-08T17:53:41.271555205+00:00 stderr F type: Degraded 2025-12-08T17:53:41.271555205+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:41.271555205+00:00 stderr F status: "True" 2025-12-08T17:53:41.271555205+00:00 stderr F type: Upgradeable 2025-12-08T17:53:41.271555205+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:41.271555205+00:00 stderr F message: Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available 2025-12-08T17:53:41.271555205+00:00 stderr F (awaiting 1 nodes) 2025-12-08T17:53:41.271555205+00:00 stderr F reason: Deploying 2025-12-08T17:53:41.271555205+00:00 stderr F status: "True" 2025-12-08T17:53:41.271555205+00:00 stderr F type: Progressing 2025-12-08T17:53:41.271555205+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:41.271555205+00:00 stderr F status: "True" 2025-12-08T17:53:41.271555205+00:00 stderr F type: Available 2025-12-08T17:53:41.271916365+00:00 stderr F I1208 17:53:41.271868 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:53:41.304271964+00:00 stderr F I1208 17:53:41.304192 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.304271964+00:00 stderr F I1208 17:53:41.304222 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.304271964+00:00 stderr F I1208 17:53:41.304233 1 warnings.go:110] "Warning: spec.template.spec.containers[3].ports[0]: duplicate port name \"https\" with spec.template.spec.containers[2].ports[0], services and probes that select ports by name will use spec.template.spec.containers[2].ports[0]" 2025-12-08T17:53:41.306960087+00:00 stderr F I1208 17:53:41.305684 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T17:53:41.306960087+00:00 stderr F I1208 17:53:41.305729 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T17:53:41.316896758+00:00 stderr F I1208 17:53:41.316841 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:53:41.316896758+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:41.316896758+00:00 stderr F status: "False" 2025-12-08T17:53:41.316896758+00:00 stderr F type: Degraded 2025-12-08T17:53:41.316896758+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:41.316896758+00:00 stderr F status: "True" 2025-12-08T17:53:41.316896758+00:00 stderr F type: Upgradeable 2025-12-08T17:53:41.316896758+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:41.316896758+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:41.316896758+00:00 stderr F reason: Unknown 2025-12-08T17:53:41.316896758+00:00 stderr F status: "False" 2025-12-08T17:53:41.316896758+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:41.316896758+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:41.316896758+00:00 stderr F message: Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available 2025-12-08T17:53:41.316896758+00:00 stderr F (awaiting 1 nodes) 2025-12-08T17:53:41.316896758+00:00 stderr F reason: Deploying 2025-12-08T17:53:41.316896758+00:00 stderr F status: "True" 2025-12-08T17:53:41.316896758+00:00 stderr F type: Progressing 2025-12-08T17:53:41.316896758+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:41.316896758+00:00 stderr F status: "True" 2025-12-08T17:53:41.316896758+00:00 stderr F type: Available 2025-12-08T17:53:41.352066324+00:00 stderr F I1208 17:53:41.351535 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.352066324+00:00 stderr F I1208 17:53:41.351562 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.490061906+00:00 stderr F I1208 17:53:41.489853 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T17:53:41.490061906+00:00 stderr F I1208 17:53:41.489927 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T17:53:41.690054204+00:00 stderr F I1208 17:53:41.689507 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T17:53:41.690097365+00:00 stderr F I1208 17:53:41.690052 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T17:53:41.726494935+00:00 stderr F I1208 17:53:41.726412 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.726494935+00:00 stderr F I1208 17:53:41.726447 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.738233994+00:00 stderr F I1208 17:53:41.737903 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:53:41.738233994+00:00 stderr F I1208 17:53:41.737936 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:53:41.743709793+00:00 stderr F I1208 17:53:41.743586 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.743709793+00:00 stderr F I1208 17:53:41.743613 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:53:41.890722040+00:00 stderr F I1208 17:53:41.890661 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T17:53:41.890722040+00:00 stderr F I1208 17:53:41.890710 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:53:42.090014279+00:00 stderr F I1208 17:53:42.089970 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:53:42.090044360+00:00 stderr F I1208 17:53:42.090010 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:53:42.128495396+00:00 stderr F I1208 17:53:42.128406 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:53:42.128495396+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:42.128495396+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:42.128495396+00:00 stderr F reason: Unknown 2025-12-08T17:53:42.128495396+00:00 stderr F status: "False" 2025-12-08T17:53:42.128495396+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:42.128495396+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:42.128495396+00:00 stderr F status: "False" 2025-12-08T17:53:42.128495396+00:00 stderr F type: Degraded 2025-12-08T17:53:42.128495396+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:42.128495396+00:00 stderr F status: "True" 2025-12-08T17:53:42.128495396+00:00 stderr F type: Upgradeable 2025-12-08T17:53:42.128495396+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:42.128495396+00:00 stderr F message: |- 2025-12-08T17:53:42.128495396+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" update is rolling out (0 out of 1 updated) 2025-12-08T17:53:42.128495396+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:53:42.128495396+00:00 stderr F reason: Deploying 2025-12-08T17:53:42.128495396+00:00 stderr F status: "True" 2025-12-08T17:53:42.128495396+00:00 stderr F type: Progressing 2025-12-08T17:53:42.128495396+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:42.128495396+00:00 stderr F status: "True" 2025-12-08T17:53:42.128495396+00:00 stderr F type: Available 2025-12-08T17:53:42.132624947+00:00 stderr F I1208 17:53:42.132590 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:53:42.290610333+00:00 stderr F I1208 17:53:42.289901 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:53:42.290610333+00:00 stderr F I1208 17:53:42.289956 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:53:42.492572184+00:00 stderr F I1208 17:53:42.492500 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:53:42.492572184+00:00 stderr F I1208 17:53:42.492561 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T17:53:42.524077061+00:00 stderr F I1208 17:53:42.523775 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:53:42.524077061+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:42.524077061+00:00 stderr F status: "False" 2025-12-08T17:53:42.524077061+00:00 stderr F type: Degraded 2025-12-08T17:53:42.524077061+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:42.524077061+00:00 stderr F status: "True" 2025-12-08T17:53:42.524077061+00:00 stderr F type: Upgradeable 2025-12-08T17:53:42.524077061+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:42.524077061+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:42.524077061+00:00 stderr F reason: Unknown 2025-12-08T17:53:42.524077061+00:00 stderr F status: "False" 2025-12-08T17:53:42.524077061+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:42.524077061+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:42.524077061+00:00 stderr F message: |- 2025-12-08T17:53:42.524077061+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" update is rolling out (0 out of 1 updated) 2025-12-08T17:53:42.524077061+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:53:42.524077061+00:00 stderr F reason: Deploying 2025-12-08T17:53:42.524077061+00:00 stderr F status: "True" 2025-12-08T17:53:42.524077061+00:00 stderr F type: Progressing 2025-12-08T17:53:42.524077061+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:42.524077061+00:00 stderr F status: "True" 2025-12-08T17:53:42.524077061+00:00 stderr F type: Available 2025-12-08T17:53:42.690251239+00:00 stderr F I1208 17:53:42.690160 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T17:53:42.690251239+00:00 stderr F I1208 17:53:42.690219 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T17:53:42.742386487+00:00 stderr F I1208 17:53:42.742338 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:53:42.742457229+00:00 stderr F I1208 17:53:42.742444 1 pod_watcher.go:132] Operand /, Kind= openshift-multus/multus updated, re-generating status 2025-12-08T17:53:42.790133546+00:00 stderr F I1208 17:53:42.789613 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:42.790200387+00:00 stderr F I1208 17:53:42.790190 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-control-plane updated, re-generating status 2025-12-08T17:53:42.889321712+00:00 stderr F I1208 17:53:42.889251 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T17:53:42.889321712+00:00 stderr F I1208 17:53:42.889298 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T17:53:43.088364984+00:00 stderr F I1208 17:53:43.088302 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T17:53:43.088364984+00:00 stderr F I1208 17:53:43.088352 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T17:53:43.292034122+00:00 stderr F I1208 17:53:43.291978 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:53:43.292034122+00:00 stderr F I1208 17:53:43.292024 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T17:53:43.330862508+00:00 stderr F I1208 17:53:43.330794 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:53:43.330862508+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:43.330862508+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:43.330862508+00:00 stderr F reason: Unknown 2025-12-08T17:53:43.330862508+00:00 stderr F status: "False" 2025-12-08T17:53:43.330862508+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:43.330862508+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:43.330862508+00:00 stderr F status: "False" 2025-12-08T17:53:43.330862508+00:00 stderr F type: Degraded 2025-12-08T17:53:43.330862508+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:43.330862508+00:00 stderr F status: "True" 2025-12-08T17:53:43.330862508+00:00 stderr F type: Upgradeable 2025-12-08T17:53:43.330862508+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:43.330862508+00:00 stderr F message: |- 2025-12-08T17:53:43.330862508+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:53:43.330862508+00:00 stderr F DaemonSet "/openshift-multus/multus" is not available (awaiting 1 nodes) 2025-12-08T17:53:43.330862508+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:53:43.330862508+00:00 stderr F reason: Deploying 2025-12-08T17:53:43.330862508+00:00 stderr F status: "True" 2025-12-08T17:53:43.330862508+00:00 stderr F type: Progressing 2025-12-08T17:53:43.330862508+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:43.330862508+00:00 stderr F status: "True" 2025-12-08T17:53:43.330862508+00:00 stderr F type: Available 2025-12-08T17:53:43.331061233+00:00 stderr F I1208 17:53:43.331021 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:53:43.488896165+00:00 stderr F I1208 17:53:43.488841 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:53:43.488964286+00:00 stderr F I1208 17:53:43.488954 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T17:53:43.691021390+00:00 stderr F I1208 17:53:43.690982 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:53:43.691102792+00:00 stderr F I1208 17:53:43.691093 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:53:43.718223850+00:00 stderr F I1208 17:53:43.717977 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:53:43.718223850+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:43.718223850+00:00 stderr F status: "False" 2025-12-08T17:53:43.718223850+00:00 stderr F type: Degraded 2025-12-08T17:53:43.718223850+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:43.718223850+00:00 stderr F status: "True" 2025-12-08T17:53:43.718223850+00:00 stderr F type: Upgradeable 2025-12-08T17:53:43.718223850+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:43.718223850+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:43.718223850+00:00 stderr F reason: Unknown 2025-12-08T17:53:43.718223850+00:00 stderr F status: "False" 2025-12-08T17:53:43.718223850+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:43.718223850+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:43.718223850+00:00 stderr F message: |- 2025-12-08T17:53:43.718223850+00:00 stderr F DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 1 nodes) 2025-12-08T17:53:43.718223850+00:00 stderr F DaemonSet "/openshift-multus/multus" is not available (awaiting 1 nodes) 2025-12-08T17:53:43.718223850+00:00 stderr F Deployment "/openshift-ovn-kubernetes/ovnkube-control-plane" is not available (awaiting 1 nodes) 2025-12-08T17:53:43.718223850+00:00 stderr F reason: Deploying 2025-12-08T17:53:43.718223850+00:00 stderr F status: "True" 2025-12-08T17:53:43.718223850+00:00 stderr F type: Progressing 2025-12-08T17:53:43.718223850+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:43.718223850+00:00 stderr F status: "True" 2025-12-08T17:53:43.718223850+00:00 stderr F type: Available 2025-12-08T17:53:43.889848186+00:00 stderr F I1208 17:53:43.889748 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:53:43.889848186+00:00 stderr F I1208 17:53:43.889787 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:53:44.089201407+00:00 stderr F I1208 17:53:44.089139 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:53:44.089201407+00:00 stderr F I1208 17:53:44.089192 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T17:53:44.297388778+00:00 stderr F I1208 17:53:44.297332 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:53:44.297518921+00:00 stderr F I1208 17:53:44.297492 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T17:53:44.492659427+00:00 stderr F I1208 17:53:44.492601 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:53:44.492793510+00:00 stderr F I1208 17:53:44.492771 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T17:53:44.544532947+00:00 stderr F I1208 17:53:44.544434 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:53:44.545457262+00:00 stderr F I1208 17:53:44.545410 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:53:44.545457262+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:44.545457262+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:44.545457262+00:00 stderr F reason: Unknown 2025-12-08T17:53:44.545457262+00:00 stderr F status: "False" 2025-12-08T17:53:44.545457262+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:44.545457262+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:44.545457262+00:00 stderr F status: "False" 2025-12-08T17:53:44.545457262+00:00 stderr F type: Degraded 2025-12-08T17:53:44.545457262+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:44.545457262+00:00 stderr F status: "True" 2025-12-08T17:53:44.545457262+00:00 stderr F type: Upgradeable 2025-12-08T17:53:44.545457262+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:44.545457262+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 2025-12-08T17:53:44.545457262+00:00 stderr F 1 nodes) 2025-12-08T17:53:44.545457262+00:00 stderr F reason: Deploying 2025-12-08T17:53:44.545457262+00:00 stderr F status: "True" 2025-12-08T17:53:44.545457262+00:00 stderr F type: Progressing 2025-12-08T17:53:44.545457262+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:44.545457262+00:00 stderr F status: "True" 2025-12-08T17:53:44.545457262+00:00 stderr F type: Available 2025-12-08T17:53:44.692233814+00:00 stderr F I1208 17:53:44.692124 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T17:53:44.692296395+00:00 stderr F I1208 17:53:44.692231 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T17:53:44.891872232+00:00 stderr F I1208 17:53:44.890563 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T17:53:44.891872232+00:00 stderr F I1208 17:53:44.890627 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T17:53:44.922969827+00:00 stderr F I1208 17:53:44.921362 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:53:44.922969827+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:53:44.922969827+00:00 stderr F status: "False" 2025-12-08T17:53:44.922969827+00:00 stderr F type: Degraded 2025-12-08T17:53:44.922969827+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:53:44.922969827+00:00 stderr F status: "True" 2025-12-08T17:53:44.922969827+00:00 stderr F type: Upgradeable 2025-12-08T17:53:44.922969827+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:53:44.922969827+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:53:44.922969827+00:00 stderr F reason: Unknown 2025-12-08T17:53:44.922969827+00:00 stderr F status: "False" 2025-12-08T17:53:44.922969827+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:53:44.922969827+00:00 stderr F - lastTransitionTime: "2025-12-08T17:53:41Z" 2025-12-08T17:53:44.922969827+00:00 stderr F message: DaemonSet "/openshift-ovn-kubernetes/ovnkube-node" is not available (awaiting 2025-12-08T17:53:44.922969827+00:00 stderr F 1 nodes) 2025-12-08T17:53:44.922969827+00:00 stderr F reason: Deploying 2025-12-08T17:53:44.922969827+00:00 stderr F status: "True" 2025-12-08T17:53:44.922969827+00:00 stderr F type: Progressing 2025-12-08T17:53:44.922969827+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:53:44.922969827+00:00 stderr F status: "True" 2025-12-08T17:53:44.922969827+00:00 stderr F type: Available 2025-12-08T17:53:45.093146624+00:00 stderr F I1208 17:53:45.093088 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T17:53:45.093272738+00:00 stderr F I1208 17:53:45.093247 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T17:53:45.290231913+00:00 stderr F I1208 17:53:45.290181 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:53:45.290270644+00:00 stderr F I1208 17:53:45.290231 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T17:53:45.491252709+00:00 stderr F I1208 17:53:45.491204 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T17:53:45.491384332+00:00 stderr F I1208 17:53:45.491347 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T17:53:45.691451212+00:00 stderr F I1208 17:53:45.691344 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T17:53:45.691629557+00:00 stderr F I1208 17:53:45.691602 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:53:45.891223084+00:00 stderr F I1208 17:53:45.891157 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:53:45.891223084+00:00 stderr F I1208 17:53:45.891202 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:53:46.089080423+00:00 stderr F I1208 17:53:46.088618 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:53:46.089113914+00:00 stderr F I1208 17:53:46.089083 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T17:53:46.291466757+00:00 stderr F I1208 17:53:46.289809 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T17:53:46.291466757+00:00 stderr F I1208 17:53:46.289894 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T17:53:46.489028898+00:00 stderr F I1208 17:53:46.488988 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T17:53:46.489107920+00:00 stderr F I1208 17:53:46.489098 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T17:53:46.691787171+00:00 stderr F I1208 17:53:46.691675 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:53:46.691787171+00:00 stderr F I1208 17:53:46.691752 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T17:53:46.894117973+00:00 stderr F I1208 17:53:46.894049 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T17:53:46.894117973+00:00 stderr F I1208 17:53:46.894092 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T17:53:47.097273666+00:00 stderr F I1208 17:53:47.096791 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:53:47.097377349+00:00 stderr F I1208 17:53:47.097364 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T17:53:47.291822417+00:00 stderr F I1208 17:53:47.291742 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T17:53:47.291822417+00:00 stderr F I1208 17:53:47.291806 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T17:53:47.491226068+00:00 stderr F I1208 17:53:47.491107 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T17:53:47.491226068+00:00 stderr F I1208 17:53:47.491167 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T17:53:47.690718672+00:00 stderr F I1208 17:53:47.690637 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:53:47.690718672+00:00 stderr F I1208 17:53:47.690685 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T17:53:47.892681804+00:00 stderr F I1208 17:53:47.892598 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T17:53:47.892845458+00:00 stderr F I1208 17:53:47.892817 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T17:53:48.094444580+00:00 stderr F I1208 17:53:48.094330 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T17:53:48.094444580+00:00 stderr F I1208 17:53:48.094408 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T17:53:48.293264216+00:00 stderr F I1208 17:53:48.293222 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:53:48.293383889+00:00 stderr F I1208 17:53:48.293373 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T17:53:48.492181244+00:00 stderr F I1208 17:53:48.492130 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T17:53:48.492294587+00:00 stderr F I1208 17:53:48.492281 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T17:53:48.691920625+00:00 stderr F I1208 17:53:48.691804 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:53:48.691920625+00:00 stderr F I1208 17:53:48.691869 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T17:53:48.894182665+00:00 stderr F I1208 17:53:48.894110 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:53:48.894245847+00:00 stderr F I1208 17:53:48.894181 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T17:53:49.093136754+00:00 stderr F I1208 17:53:49.093048 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:53:49.093136754+00:00 stderr F I1208 17:53:49.093094 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T17:53:49.296643047+00:00 stderr F I1208 17:53:49.296524 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T17:53:49.316725023+00:00 stderr F I1208 17:53:49.316630 1 log.go:245] Operconfig Controller complete 2025-12-08T17:54:05.376248911+00:00 stderr F I1208 17:54:05.375609 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-network-node-identity/network-node-identity 2025-12-08T17:54:05.376993571+00:00 stderr F I1208 17:54:05.376960 1 log.go:245] successful reconciliation 2025-12-08T17:54:08.572804616+00:00 stderr F I1208 17:54:08.572720 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/ovn 2025-12-08T17:54:08.573400842+00:00 stderr F I1208 17:54:08.573280 1 log.go:245] successful reconciliation 2025-12-08T17:54:10.369169729+00:00 stderr F I1208 17:54:10.369103 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/signer 2025-12-08T17:54:10.369665793+00:00 stderr F I1208 17:54:10.369627 1 log.go:245] successful reconciliation 2025-12-08T17:54:21.882647209+00:00 stderr F I1208 17:54:21.882546 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:54:21.882647209+00:00 stderr F I1208 17:54:21.882598 1 pod_watcher.go:132] Operand /, Kind= openshift-ovn-kubernetes/ovnkube-node updated, re-generating status 2025-12-08T17:54:21.947091074+00:00 stderr F I1208 17:54:21.944900 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:54:21.948912123+00:00 stderr F I1208 17:54:21.948827 1 log.go:245] Network operator config updated with conditions: 2025-12-08T17:54:21.948912123+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:54:21.948912123+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:54:21.948912123+00:00 stderr F reason: Unknown 2025-12-08T17:54:21.948912123+00:00 stderr F status: "False" 2025-12-08T17:54:21.948912123+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:54:21.948912123+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:54:21.948912123+00:00 stderr F status: "False" 2025-12-08T17:54:21.948912123+00:00 stderr F type: Degraded 2025-12-08T17:54:21.948912123+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:54:21.948912123+00:00 stderr F status: "True" 2025-12-08T17:54:21.948912123+00:00 stderr F type: Upgradeable 2025-12-08T17:54:21.948912123+00:00 stderr F - lastTransitionTime: "2025-12-08T17:54:21Z" 2025-12-08T17:54:21.948912123+00:00 stderr F status: "False" 2025-12-08T17:54:21.948912123+00:00 stderr F type: Progressing 2025-12-08T17:54:21.948912123+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:54:21.948912123+00:00 stderr F status: "True" 2025-12-08T17:54:21.948912123+00:00 stderr F type: Available 2025-12-08T17:54:21.971604014+00:00 stderr F I1208 17:54:21.967915 1 log.go:245] ClusterOperator config status updated with conditions: 2025-12-08T17:54:21.971604014+00:00 stderr F - lastTransitionTime: "2025-12-08T17:44:40Z" 2025-12-08T17:54:21.971604014+00:00 stderr F status: "False" 2025-12-08T17:54:21.971604014+00:00 stderr F type: Degraded 2025-12-08T17:54:21.971604014+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:03Z" 2025-12-08T17:54:21.971604014+00:00 stderr F status: "True" 2025-12-08T17:54:21.971604014+00:00 stderr F type: Upgradeable 2025-12-08T17:54:21.971604014+00:00 stderr F - lastTransitionTime: "2025-11-02T07:50:15Z" 2025-12-08T17:54:21.971604014+00:00 stderr F message: Unsupported management state "" for cluster-network-operator operator 2025-12-08T17:54:21.971604014+00:00 stderr F reason: Unknown 2025-12-08T17:54:21.971604014+00:00 stderr F status: "False" 2025-12-08T17:54:21.971604014+00:00 stderr F type: ManagementStateDegraded 2025-12-08T17:54:21.971604014+00:00 stderr F - lastTransitionTime: "2025-12-08T17:54:21Z" 2025-12-08T17:54:21.971604014+00:00 stderr F status: "False" 2025-12-08T17:54:21.971604014+00:00 stderr F type: Progressing 2025-12-08T17:54:21.971604014+00:00 stderr F - lastTransitionTime: "2025-11-02T07:51:40Z" 2025-12-08T17:54:21.971604014+00:00 stderr F status: "True" 2025-12-08T17:54:21.971604014+00:00 stderr F type: Available 2025-12-08T17:54:24.918759105+00:00 stderr F I1208 17:54:24.918688 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:54:25.132062395+00:00 stderr F I1208 17:54:25.132012 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:54:25.133646668+00:00 stderr F I1208 17:54:25.133616 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:54:25.135414306+00:00 stderr F I1208 17:54:25.135363 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:54:25.137338927+00:00 stderr F I1208 17:54:25.137283 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc002c63140 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:54:25.142388913+00:00 stderr F I1208 17:54:25.142351 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 4 -> 4 2025-12-08T17:54:25.142388913+00:00 stderr F I1208 17:54:25.142375 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:54:25.142409194+00:00 stderr F I1208 17:54:25.142386 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:54:25.144898721+00:00 stderr F I1208 17:54:25.144846 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T17:54:25.144898721+00:00 stderr F I1208 17:54:25.144865 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:54:25.144898721+00:00 stderr F I1208 17:54:25.144871 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T17:54:25.144898721+00:00 stderr F I1208 17:54:25.144894 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:54:25.144918471+00:00 stderr F I1208 17:54:25.144910 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:54:25.147820849+00:00 stderr F I1208 17:54:25.147780 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:54:25.147820849+00:00 stderr F I1208 17:54:25.147802 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:54:25.150558813+00:00 stderr F I1208 17:54:25.150519 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:54:25.163462221+00:00 stderr F I1208 17:54:25.163400 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:54:25.163462221+00:00 stderr F I1208 17:54:25.163424 1 log.go:245] Starting render phase 2025-12-08T17:54:25.164922139+00:00 stderr F I1208 17:54:25.164845 1 log.go:245] Skipping reconcile of Network.operator.openshift.io: spec unchanged 2025-12-08T17:54:25.171955219+00:00 stderr F I1208 17:54:25.171825 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T17:54:25.204275308+00:00 stderr F I1208 17:54:25.204205 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T17:54:25.204275308+00:00 stderr F I1208 17:54:25.204230 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T17:54:25.204275308+00:00 stderr F I1208 17:54:25.204251 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T17:54:25.204322890+00:00 stderr F I1208 17:54:25.204269 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T17:54:25.339399974+00:00 stderr F I1208 17:54:25.339307 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T17:54:25.339399974+00:00 stderr F I1208 17:54:25.339336 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T17:54:25.552146580+00:00 stderr F I1208 17:54:25.552077 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T17:54:25.569781105+00:00 stderr F I1208 17:54:25.569716 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T17:54:25.573761742+00:00 stderr F I1208 17:54:25.573714 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T17:54:25.573761742+00:00 stderr F I1208 17:54:25.573743 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T17:54:25.580845093+00:00 stderr F I1208 17:54:25.580802 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T17:54:25.580896414+00:00 stderr F I1208 17:54:25.580867 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T17:54:25.591996873+00:00 stderr F I1208 17:54:25.591912 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T17:54:25.591996873+00:00 stderr F I1208 17:54:25.591990 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T17:54:25.598966110+00:00 stderr F I1208 17:54:25.598919 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T17:54:25.598966110+00:00 stderr F I1208 17:54:25.598961 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T17:54:25.605431864+00:00 stderr F I1208 17:54:25.605384 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T17:54:25.605454044+00:00 stderr F I1208 17:54:25.605432 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T17:54:25.611345683+00:00 stderr F I1208 17:54:25.611305 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T17:54:25.611363263+00:00 stderr F I1208 17:54:25.611355 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T17:54:25.615547376+00:00 stderr F I1208 17:54:25.615502 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T17:54:25.615547376+00:00 stderr F I1208 17:54:25.615542 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T17:54:25.618690571+00:00 stderr F I1208 17:54:25.618643 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T17:54:25.618690571+00:00 stderr F I1208 17:54:25.618676 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T17:54:25.622002960+00:00 stderr F I1208 17:54:25.621944 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T17:54:25.622002960+00:00 stderr F I1208 17:54:25.621982 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T17:54:25.624726133+00:00 stderr F I1208 17:54:25.624685 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T17:54:25.624726133+00:00 stderr F I1208 17:54:25.624720 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T17:54:25.774984517+00:00 stderr F I1208 17:54:25.774925 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T17:54:25.775018528+00:00 stderr F I1208 17:54:25.774982 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T17:54:25.977360833+00:00 stderr F I1208 17:54:25.976942 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T17:54:25.977360833+00:00 stderr F I1208 17:54:25.976984 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T17:54:26.175694571+00:00 stderr F I1208 17:54:26.175577 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T17:54:26.175694571+00:00 stderr F I1208 17:54:26.175633 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T17:54:26.374679306+00:00 stderr F I1208 17:54:26.374600 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T17:54:26.374679306+00:00 stderr F I1208 17:54:26.374639 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T17:54:26.576270191+00:00 stderr F I1208 17:54:26.575701 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T17:54:26.576270191+00:00 stderr F I1208 17:54:26.576243 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T17:54:26.776615662+00:00 stderr F I1208 17:54:26.776242 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T17:54:26.776615662+00:00 stderr F I1208 17:54:26.776300 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T17:54:26.976539253+00:00 stderr F I1208 17:54:26.976456 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T17:54:26.976539253+00:00 stderr F I1208 17:54:26.976507 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T17:54:27.176057422+00:00 stderr F I1208 17:54:27.175941 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T17:54:27.176057422+00:00 stderr F I1208 17:54:27.176001 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T17:54:27.376698452+00:00 stderr F I1208 17:54:27.376633 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T17:54:27.376753683+00:00 stderr F I1208 17:54:27.376707 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T17:54:27.577532926+00:00 stderr F I1208 17:54:27.577458 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T17:54:27.577532926+00:00 stderr F I1208 17:54:27.577510 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T17:54:27.777645471+00:00 stderr F I1208 17:54:27.777566 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T17:54:27.777645471+00:00 stderr F I1208 17:54:27.777620 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T17:54:27.977021237+00:00 stderr F I1208 17:54:27.976921 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T17:54:27.977021237+00:00 stderr F I1208 17:54:27.976983 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T17:54:28.178118429+00:00 stderr F I1208 17:54:28.177576 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T17:54:28.178118429+00:00 stderr F I1208 17:54:28.178053 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T17:54:28.386954328+00:00 stderr F I1208 17:54:28.386842 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T17:54:28.386954328+00:00 stderr F I1208 17:54:28.386923 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T17:54:28.591039640+00:00 stderr F I1208 17:54:28.590959 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T17:54:28.591039640+00:00 stderr F I1208 17:54:28.590999 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T17:54:28.774888528+00:00 stderr F I1208 17:54:28.774830 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T17:54:28.774888528+00:00 stderr F I1208 17:54:28.774867 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T17:54:28.974982253+00:00 stderr F I1208 17:54:28.974907 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T17:54:28.974982253+00:00 stderr F I1208 17:54:28.974972 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T17:54:29.176203018+00:00 stderr F I1208 17:54:29.176142 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T17:54:29.176371443+00:00 stderr F I1208 17:54:29.176344 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T17:54:29.384402401+00:00 stderr F I1208 17:54:29.384326 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T17:54:29.384402401+00:00 stderr F I1208 17:54:29.384394 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T17:54:29.578526776+00:00 stderr F I1208 17:54:29.578454 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T17:54:29.578559506+00:00 stderr F I1208 17:54:29.578526 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T17:54:29.777443539+00:00 stderr F I1208 17:54:29.776869 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T17:54:29.777443539+00:00 stderr F I1208 17:54:29.777395 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:54:29.978668023+00:00 stderr F I1208 17:54:29.978575 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:54:29.978668023+00:00 stderr F I1208 17:54:29.978636 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:54:30.175074529+00:00 stderr F I1208 17:54:30.175000 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:54:30.175074529+00:00 stderr F I1208 17:54:30.175059 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T17:54:30.375765200+00:00 stderr F I1208 17:54:30.375675 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T17:54:30.375765200+00:00 stderr F I1208 17:54:30.375725 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T17:54:30.576670967+00:00 stderr F I1208 17:54:30.576608 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T17:54:30.576721428+00:00 stderr F I1208 17:54:30.576664 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T17:54:30.775655512+00:00 stderr F I1208 17:54:30.775597 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T17:54:30.775655512+00:00 stderr F I1208 17:54:30.775636 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T17:54:30.974325238+00:00 stderr F I1208 17:54:30.974266 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T17:54:30.974325238+00:00 stderr F I1208 17:54:30.974318 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T17:54:31.175989105+00:00 stderr F I1208 17:54:31.175941 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T17:54:31.175989105+00:00 stderr F I1208 17:54:31.175982 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T17:54:31.379028159+00:00 stderr F I1208 17:54:31.378943 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T17:54:31.379072700+00:00 stderr F I1208 17:54:31.379026 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T17:54:31.575947748+00:00 stderr F I1208 17:54:31.575903 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T17:54:31.576054511+00:00 stderr F I1208 17:54:31.576040 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:54:31.776407762+00:00 stderr F I1208 17:54:31.776338 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:54:31.776407762+00:00 stderr F I1208 17:54:31.776396 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:54:31.975223213+00:00 stderr F I1208 17:54:31.975133 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:54:31.975280095+00:00 stderr F I1208 17:54:31.975201 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T17:54:32.175907104+00:00 stderr F I1208 17:54:32.175845 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T17:54:32.175936785+00:00 stderr F I1208 17:54:32.175917 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T17:54:32.377051107+00:00 stderr F I1208 17:54:32.376563 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T17:54:32.377051107+00:00 stderr F I1208 17:54:32.377025 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T17:54:32.582189828+00:00 stderr F I1208 17:54:32.582136 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T17:54:32.582189828+00:00 stderr F I1208 17:54:32.582179 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T17:54:32.786843345+00:00 stderr F I1208 17:54:32.786747 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T17:54:32.786843345+00:00 stderr F I1208 17:54:32.786800 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T17:54:32.985778558+00:00 stderr F I1208 17:54:32.985719 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T17:54:32.985778558+00:00 stderr F I1208 17:54:32.985764 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T17:54:33.192537703+00:00 stderr F I1208 17:54:33.192448 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T17:54:33.192537703+00:00 stderr F I1208 17:54:33.192490 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T17:54:33.386929154+00:00 stderr F I1208 17:54:33.386827 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T17:54:33.386984695+00:00 stderr F I1208 17:54:33.386927 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:54:33.611029255+00:00 stderr F I1208 17:54:33.610939 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:54:33.611029255+00:00 stderr F I1208 17:54:33.611002 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:54:33.801654844+00:00 stderr F I1208 17:54:33.801589 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:54:33.801654844+00:00 stderr F I1208 17:54:33.801638 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T17:54:33.981200636+00:00 stderr F I1208 17:54:33.981102 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T17:54:33.981200636+00:00 stderr F I1208 17:54:33.981155 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T17:54:34.217293749+00:00 stderr F I1208 17:54:34.217212 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:54:34.217293749+00:00 stderr F I1208 17:54:34.217268 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T17:54:34.431395391+00:00 stderr F I1208 17:54:34.431306 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:54:34.431433002+00:00 stderr F I1208 17:54:34.431389 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:54:34.577050121+00:00 stderr F I1208 17:54:34.576953 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:54:34.577050121+00:00 stderr F I1208 17:54:34.577014 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T17:54:34.776492738+00:00 stderr F I1208 17:54:34.776098 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:54:34.776492738+00:00 stderr F I1208 17:54:34.776457 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T17:54:34.976374667+00:00 stderr F I1208 17:54:34.976273 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T17:54:34.976374667+00:00 stderr F I1208 17:54:34.976344 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T17:54:35.179472023+00:00 stderr F I1208 17:54:35.179394 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:54:35.179472023+00:00 stderr F I1208 17:54:35.179459 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T17:54:35.376844554+00:00 stderr F I1208 17:54:35.376730 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T17:54:35.376844554+00:00 stderr F I1208 17:54:35.376816 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T17:54:35.576802215+00:00 stderr F I1208 17:54:35.576694 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T17:54:35.576802215+00:00 stderr F I1208 17:54:35.576764 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T17:54:35.778596105+00:00 stderr F I1208 17:54:35.778487 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T17:54:35.778596105+00:00 stderr F I1208 17:54:35.778547 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T17:54:35.973846419+00:00 stderr F I1208 17:54:35.973763 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T17:54:35.973846419+00:00 stderr F I1208 17:54:35.973804 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:54:36.176426692+00:00 stderr F I1208 17:54:36.176346 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:54:36.176426692+00:00 stderr F I1208 17:54:36.176400 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:54:36.379368422+00:00 stderr F I1208 17:54:36.379254 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:54:36.379368422+00:00 stderr F I1208 17:54:36.379328 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:54:36.576402635+00:00 stderr F I1208 17:54:36.576315 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:54:36.576402635+00:00 stderr F I1208 17:54:36.576367 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:54:36.777983380+00:00 stderr F I1208 17:54:36.776984 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:54:36.777983380+00:00 stderr F I1208 17:54:36.777047 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:54:36.974981321+00:00 stderr F I1208 17:54:36.974921 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:54:36.974981321+00:00 stderr F I1208 17:54:36.974971 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T17:54:37.177189652+00:00 stderr F I1208 17:54:37.177098 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T17:54:37.177189652+00:00 stderr F I1208 17:54:37.177150 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T17:54:37.376800814+00:00 stderr F I1208 17:54:37.376723 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T17:54:37.376840205+00:00 stderr F I1208 17:54:37.376797 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T17:54:37.577071624+00:00 stderr F I1208 17:54:37.576994 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T17:54:37.577071624+00:00 stderr F I1208 17:54:37.577034 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T17:54:37.777768594+00:00 stderr F I1208 17:54:37.777668 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T17:54:37.777768594+00:00 stderr F I1208 17:54:37.777735 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T17:54:37.983153122+00:00 stderr F I1208 17:54:37.983060 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T17:54:37.983153122+00:00 stderr F I1208 17:54:37.983136 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T17:54:38.177487301+00:00 stderr F I1208 17:54:38.177378 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T17:54:38.177487301+00:00 stderr F I1208 17:54:38.177459 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T17:54:38.377668199+00:00 stderr F I1208 17:54:38.377555 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T17:54:38.377668199+00:00 stderr F I1208 17:54:38.377624 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T17:54:38.578483582+00:00 stderr F I1208 17:54:38.578395 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T17:54:38.578483582+00:00 stderr F I1208 17:54:38.578463 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T17:54:38.779707077+00:00 stderr F I1208 17:54:38.779628 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T17:54:38.779707077+00:00 stderr F I1208 17:54:38.779694 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T17:54:38.976595045+00:00 stderr F I1208 17:54:38.976552 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T17:54:38.976814831+00:00 stderr F I1208 17:54:38.976783 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T17:54:39.183234986+00:00 stderr F I1208 17:54:39.182936 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T17:54:39.183402341+00:00 stderr F I1208 17:54:39.183381 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:54:39.374920175+00:00 stderr F I1208 17:54:39.374847 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:54:39.374920175+00:00 stderr F I1208 17:54:39.374910 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T17:54:39.580973579+00:00 stderr F I1208 17:54:39.580140 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T17:54:39.580973579+00:00 stderr F I1208 17:54:39.580204 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:54:39.777994861+00:00 stderr F I1208 17:54:39.777393 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:54:39.777994861+00:00 stderr F I1208 17:54:39.777454 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:54:39.977409858+00:00 stderr F I1208 17:54:39.977323 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:54:39.977409858+00:00 stderr F I1208 17:54:39.977386 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:54:40.176653860+00:00 stderr F I1208 17:54:40.176551 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:54:40.176653860+00:00 stderr F I1208 17:54:40.176628 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T17:54:40.377858865+00:00 stderr F I1208 17:54:40.377786 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T17:54:40.377858865+00:00 stderr F I1208 17:54:40.377828 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T17:54:40.577644771+00:00 stderr F I1208 17:54:40.577545 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T17:54:40.577644771+00:00 stderr F I1208 17:54:40.577596 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T17:54:40.786818010+00:00 stderr F I1208 17:54:40.786713 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T17:54:40.786818010+00:00 stderr F I1208 17:54:40.786791 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T17:54:41.013828629+00:00 stderr F I1208 17:54:41.013718 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T17:54:41.013828629+00:00 stderr F I1208 17:54:41.013791 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T17:54:41.179412685+00:00 stderr F I1208 17:54:41.179324 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T17:54:41.179412685+00:00 stderr F I1208 17:54:41.179401 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T17:54:41.380021063+00:00 stderr F I1208 17:54:41.375079 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T17:54:41.380021063+00:00 stderr F I1208 17:54:41.375119 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T17:54:41.578808832+00:00 stderr F I1208 17:54:41.578748 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T17:54:41.578868994+00:00 stderr F I1208 17:54:41.578813 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:54:41.774622162+00:00 stderr F I1208 17:54:41.774561 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:54:41.774622162+00:00 stderr F I1208 17:54:41.774608 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:54:41.975051646+00:00 stderr F I1208 17:54:41.974997 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:54:41.975082287+00:00 stderr F I1208 17:54:41.975048 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:54:42.174863203+00:00 stderr F I1208 17:54:42.174777 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:54:42.174863203+00:00 stderr F I1208 17:54:42.174829 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T17:54:42.376983372+00:00 stderr F I1208 17:54:42.375684 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T17:54:42.376983372+00:00 stderr F I1208 17:54:42.375721 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T17:54:42.574171120+00:00 stderr F I1208 17:54:42.574129 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T17:54:42.574270962+00:00 stderr F I1208 17:54:42.574256 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T17:54:42.775125187+00:00 stderr F I1208 17:54:42.775030 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T17:54:42.775125187+00:00 stderr F I1208 17:54:42.775085 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T17:54:42.978751857+00:00 stderr F I1208 17:54:42.978630 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:54:42.978751857+00:00 stderr F I1208 17:54:42.978682 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T17:54:43.182798338+00:00 stderr F I1208 17:54:43.179379 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:54:43.183133047+00:00 stderr F I1208 17:54:43.183082 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T17:54:43.376489700+00:00 stderr F I1208 17:54:43.376417 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:54:43.376489700+00:00 stderr F I1208 17:54:43.376479 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:54:43.576677117+00:00 stderr F I1208 17:54:43.576615 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:54:43.576825591+00:00 stderr F I1208 17:54:43.576802 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:54:43.774429269+00:00 stderr F I1208 17:54:43.774355 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:54:43.774429269+00:00 stderr F I1208 17:54:43.774420 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T17:54:43.977781232+00:00 stderr F I1208 17:54:43.977253 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:54:43.977781232+00:00 stderr F I1208 17:54:43.977760 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T17:54:44.175858782+00:00 stderr F I1208 17:54:44.175802 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:54:44.176061577+00:00 stderr F I1208 17:54:44.176028 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T17:54:44.375172865+00:00 stderr F I1208 17:54:44.374867 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T17:54:44.375243588+00:00 stderr F I1208 17:54:44.375234 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T17:54:44.576484763+00:00 stderr F I1208 17:54:44.576399 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T17:54:44.576484763+00:00 stderr F I1208 17:54:44.576451 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T17:54:44.780708849+00:00 stderr F I1208 17:54:44.780629 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T17:54:44.780846773+00:00 stderr F I1208 17:54:44.780825 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T17:54:44.976861758+00:00 stderr F I1208 17:54:44.976804 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:54:44.977039303+00:00 stderr F I1208 17:54:44.977009 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T17:54:45.176713637+00:00 stderr F I1208 17:54:45.176614 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T17:54:45.176713637+00:00 stderr F I1208 17:54:45.176693 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T17:54:45.374807087+00:00 stderr F I1208 17:54:45.374771 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T17:54:45.374971351+00:00 stderr F I1208 17:54:45.374959 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:54:45.574413359+00:00 stderr F I1208 17:54:45.574362 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:54:45.574532102+00:00 stderr F I1208 17:54:45.574519 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:54:45.777226557+00:00 stderr F I1208 17:54:45.777159 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:54:45.777226557+00:00 stderr F I1208 17:54:45.777211 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T17:54:45.976074848+00:00 stderr F I1208 17:54:45.975974 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T17:54:45.976074848+00:00 stderr F I1208 17:54:45.976044 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T17:54:46.173829390+00:00 stderr F I1208 17:54:46.173731 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T17:54:46.173829390+00:00 stderr F I1208 17:54:46.173780 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T17:54:46.378562670+00:00 stderr F I1208 17:54:46.378454 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:54:46.378562670+00:00 stderr F I1208 17:54:46.378518 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T17:54:46.579445665+00:00 stderr F I1208 17:54:46.579326 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T17:54:46.579445665+00:00 stderr F I1208 17:54:46.579391 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T17:54:46.785060679+00:00 stderr F I1208 17:54:46.784342 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:54:46.785060679+00:00 stderr F I1208 17:54:46.784434 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T17:54:46.974940879+00:00 stderr F I1208 17:54:46.974845 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T17:54:46.974940879+00:00 stderr F I1208 17:54:46.974922 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T17:54:47.175190358+00:00 stderr F I1208 17:54:47.175133 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T17:54:47.175190358+00:00 stderr F I1208 17:54:47.175172 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T17:54:47.377673246+00:00 stderr F I1208 17:54:47.377117 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:54:47.377673246+00:00 stderr F I1208 17:54:47.377175 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T17:54:47.577297529+00:00 stderr F I1208 17:54:47.577234 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T17:54:47.577297529+00:00 stderr F I1208 17:54:47.577289 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T17:54:47.778938756+00:00 stderr F I1208 17:54:47.778808 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T17:54:47.778991817+00:00 stderr F I1208 17:54:47.778948 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T17:54:47.984566739+00:00 stderr F I1208 17:54:47.983977 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:54:47.984674542+00:00 stderr F I1208 17:54:47.984661 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T17:54:48.177498171+00:00 stderr F I1208 17:54:48.177436 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T17:54:48.177498171+00:00 stderr F I1208 17:54:48.177489 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T17:54:48.375299704+00:00 stderr F I1208 17:54:48.375195 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:54:48.375299704+00:00 stderr F I1208 17:54:48.375247 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T17:54:48.584729310+00:00 stderr F I1208 17:54:48.584644 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:54:48.584729310+00:00 stderr F I1208 17:54:48.584717 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T17:54:48.780409216+00:00 stderr F I1208 17:54:48.780298 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:54:48.780409216+00:00 stderr F I1208 17:54:48.780366 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T17:54:48.978848146+00:00 stderr F I1208 17:54:48.978736 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T17:54:48.994182488+00:00 stderr F I1208 17:54:48.994090 1 log.go:245] Operconfig Controller complete 2025-12-08T17:55:08.037069345+00:00 stderr F I1208 17:55:08.033726 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.052929263+00:00 stderr F I1208 17:55:08.052475 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.088306944+00:00 stderr F I1208 17:55:08.088217 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.115554028+00:00 stderr F I1208 17:55:08.112440 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.131336813+00:00 stderr F I1208 17:55:08.130494 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.138982358+00:00 stderr F I1208 17:55:08.137601 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.151365832+00:00 stderr F I1208 17:55:08.151319 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.160237870+00:00 stderr F I1208 17:55:08.160183 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.168191594+00:00 stderr F I1208 17:55:08.165344 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.173006804+00:00 stderr F I1208 17:55:08.172956 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.223682168+00:00 stderr F I1208 17:55:08.223613 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.411483062+00:00 stderr F I1208 17:55:08.411432 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.607493957+00:00 stderr F I1208 17:55:08.605656 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:08.819928463+00:00 stderr F I1208 17:55:08.818261 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:09.005122397+00:00 stderr F I1208 17:55:09.004902 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:09.236408420+00:00 stderr F I1208 17:55:09.235816 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:09.403826526+00:00 stderr F I1208 17:55:09.402443 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:09.603178051+00:00 stderr F I1208 17:55:09.601647 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:09.803333996+00:00 stderr F I1208 17:55:09.803274 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:10.002954039+00:00 stderr F I1208 17:55:09.999620 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:10.202271322+00:00 stderr F I1208 17:55:10.201469 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:10.430972027+00:00 stderr F I1208 17:55:10.425353 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:10.604033453+00:00 stderr F I1208 17:55:10.603471 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:10.802258358+00:00 stderr F I1208 17:55:10.801483 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:10.997939284+00:00 stderr F I1208 17:55:10.997899 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:11.203923457+00:00 stderr F I1208 17:55:11.201630 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:11.420985318+00:00 stderr F I1208 17:55:11.417431 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:11.611952467+00:00 stderr F I1208 17:55:11.611454 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:11.799512895+00:00 stderr F I1208 17:55:11.799214 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:12.004470030+00:00 stderr F I1208 17:55:12.004386 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:12.200738312+00:00 stderr F I1208 17:55:12.198979 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:12.407952319+00:00 stderr F I1208 17:55:12.405371 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:12.597725136+00:00 stderr F I1208 17:55:12.597658 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:12.801606813+00:00 stderr F I1208 17:55:12.801513 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:13.000839854+00:00 stderr F I1208 17:55:13.000352 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:13.203036325+00:00 stderr F I1208 17:55:13.201379 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:13.406919702+00:00 stderr F I1208 17:55:13.406374 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:13.605041114+00:00 stderr F I1208 17:55:13.604820 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:13.801448139+00:00 stderr F I1208 17:55:13.801375 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:14.003553318+00:00 stderr F I1208 17:55:14.003500 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:14.198698399+00:00 stderr F I1208 17:55:14.198484 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:14.409703698+00:00 stderr F I1208 17:55:14.409432 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:14.609921945+00:00 stderr F I1208 17:55:14.607450 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:14.798069898+00:00 stderr F I1208 17:55:14.797576 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:15.001790971+00:00 stderr F I1208 17:55:15.001733 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:15.199017029+00:00 stderr F I1208 17:55:15.198954 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:15.398013534+00:00 stderr F I1208 17:55:15.397683 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:15.599590309+00:00 stderr F I1208 17:55:15.599436 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:15.799117758+00:00 stderr F I1208 17:55:15.798454 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:16.002287005+00:00 stderr F I1208 17:55:16.000084 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:16.204918509+00:00 stderr F I1208 17:55:16.204564 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:16.402357872+00:00 stderr F I1208 17:55:16.402292 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:16.598993503+00:00 stderr F I1208 17:55:16.598741 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:16.801265977+00:00 stderr F I1208 17:55:16.801223 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:16.999639845+00:00 stderr F I1208 17:55:16.999040 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:17.200391238+00:00 stderr F I1208 17:55:17.199849 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:17.399346442+00:00 stderr F I1208 17:55:17.399279 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:17.599141448+00:00 stderr F I1208 17:55:17.598720 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:17.803698903+00:00 stderr F I1208 17:55:17.803182 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:18.006299536+00:00 stderr F I1208 17:55:18.006241 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:18.201034626+00:00 stderr F I1208 17:55:18.200895 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:18.400921226+00:00 stderr F I1208 17:55:18.400434 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:18.597684281+00:00 stderr F I1208 17:55:18.597608 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:18.801514926+00:00 stderr F I1208 17:55:18.801109 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:19.009958226+00:00 stderr F I1208 17:55:19.009788 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:19.205255191+00:00 stderr F I1208 17:55:19.204992 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:19.401070361+00:00 stderr F I1208 17:55:19.399544 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:19.608642317+00:00 stderr F I1208 17:55:19.608586 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:19.800058328+00:00 stderr F I1208 17:55:19.798718 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:19.998476798+00:00 stderr F I1208 17:55:19.998414 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:20.217028149+00:00 stderr F I1208 17:55:20.213568 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:20.403521988+00:00 stderr F I1208 17:55:20.403481 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:20.601207188+00:00 stderr F I1208 17:55:20.601158 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:20.802958627+00:00 stderr F I1208 17:55:20.800962 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:21.002456246+00:00 stderr F I1208 17:55:21.001694 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:21.200929017+00:00 stderr F I1208 17:55:21.199196 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:21.402440580+00:00 stderr F I1208 17:55:21.402357 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:21.603463470+00:00 stderr F I1208 17:55:21.603397 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:21.813214885+00:00 stderr F I1208 17:55:21.811324 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:21.998315286+00:00 stderr F I1208 17:55:21.998277 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:22.198263166+00:00 stderr F I1208 17:55:22.198232 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:22.404919168+00:00 stderr F I1208 17:55:22.404834 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:22.599321669+00:00 stderr F I1208 17:55:22.599284 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:22.797925644+00:00 stderr F I1208 17:55:22.797847 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:22.998606185+00:00 stderr F I1208 17:55:22.998559 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:23.204281719+00:00 stderr F I1208 17:55:23.202527 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:23.397339555+00:00 stderr F I1208 17:55:23.397260 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:23.600064070+00:00 stderr F I1208 17:55:23.599297 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:23.810633174+00:00 stderr F I1208 17:55:23.810537 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:24.001293105+00:00 stderr F I1208 17:55:24.000623 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:24.199213706+00:00 stderr F I1208 17:55:24.199172 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:24.398866435+00:00 stderr F I1208 17:55:24.398228 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:24.602780779+00:00 stderr F I1208 17:55:24.602379 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:24.800120475+00:00 stderr F I1208 17:55:24.798570 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:25.000418360+00:00 stderr F I1208 17:55:25.000363 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:25.197968842+00:00 stderr F I1208 17:55:25.197911 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:25.398558055+00:00 stderr F I1208 17:55:25.398501 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:25.599345635+00:00 stderr F I1208 17:55:25.599295 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:25.797646427+00:00 stderr F I1208 17:55:25.797561 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:26.412961341+00:00 stderr F I1208 17:55:26.411840 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:26.420911268+00:00 stderr F I1208 17:55:26.420853 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:26.426720458+00:00 stderr F I1208 17:55:26.425755 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:26.600934369+00:00 stderr F I1208 17:55:26.599528 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:26.798467149+00:00 stderr F I1208 17:55:26.798415 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:26.998899169+00:00 stderr F I1208 17:55:26.998809 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:27.199831222+00:00 stderr F I1208 17:55:27.199704 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:27.400737575+00:00 stderr F I1208 17:55:27.400568 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:27.607522759+00:00 stderr F I1208 17:55:27.607474 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:27.798396266+00:00 stderr F I1208 17:55:27.798310 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:27.999593187+00:00 stderr F I1208 17:55:27.999544 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:28.199733419+00:00 stderr F I1208 17:55:28.199671 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:28.421187265+00:00 stderr F I1208 17:55:28.420969 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:28.658301132+00:00 stderr F I1208 17:55:28.657228 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:28.797919463+00:00 stderr F I1208 17:55:28.797780 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:28.999700370+00:00 stderr F I1208 17:55:28.999631 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:29.199663037+00:00 stderr F I1208 17:55:29.199581 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:29.397084054+00:00 stderr F I1208 17:55:29.397010 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:29.598415739+00:00 stderr F I1208 17:55:29.598345 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:29.799035683+00:00 stderr F I1208 17:55:29.798991 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:29.998453445+00:00 stderr F I1208 17:55:29.998406 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:30.199413900+00:00 stderr F I1208 17:55:30.199353 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:30.403291294+00:00 stderr F I1208 17:55:30.402235 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:30.598929263+00:00 stderr F I1208 17:55:30.598863 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:30.798304233+00:00 stderr F I1208 17:55:30.798253 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:30.998243949+00:00 stderr F I1208 17:55:30.997627 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:31.201487587+00:00 stderr F I1208 17:55:31.201417 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:31.401046052+00:00 stderr F I1208 17:55:31.401003 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:31.597392230+00:00 stderr F I1208 17:55:31.597352 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:31.796975026+00:00 stderr F I1208 17:55:31.796849 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:31.996920373+00:00 stderr F I1208 17:55:31.996847 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:32.197695433+00:00 stderr F I1208 17:55:32.197644 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:32.398380149+00:00 stderr F I1208 17:55:32.397967 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.313839988+00:00 stderr F I1208 17:55:36.308413 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.313839988+00:00 stderr F I1208 17:55:36.313741 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.327175944+00:00 stderr F I1208 17:55:36.326634 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.335629976+00:00 stderr F I1208 17:55:36.335396 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.342082343+00:00 stderr F I1208 17:55:36.342036 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.347343938+00:00 stderr F I1208 17:55:36.347292 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.355936213+00:00 stderr F I1208 17:55:36.355383 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.365968248+00:00 stderr F I1208 17:55:36.363435 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.368478907+00:00 stderr F I1208 17:55:36.368421 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.373605048+00:00 stderr F I1208 17:55:36.373483 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.520059296+00:00 stderr F I1208 17:55:36.518921 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.798263141+00:00 stderr F I1208 17:55:36.798214 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:36.915470467+00:00 stderr F I1208 17:55:36.913895 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:37.109817990+00:00 stderr F I1208 17:55:37.109594 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:37.311772911+00:00 stderr F I1208 17:55:37.311718 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:37.506772752+00:00 stderr F I1208 17:55:37.506702 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:37.710435570+00:00 stderr F I1208 17:55:37.710405 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:37.908614708+00:00 stderr F I1208 17:55:37.908546 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:38.110835617+00:00 stderr F I1208 17:55:38.110723 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:38.309412696+00:00 stderr F I1208 17:55:38.309359 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:38.508353295+00:00 stderr F I1208 17:55:38.506215 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:38.714804430+00:00 stderr F I1208 17:55:38.712359 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:38.909960095+00:00 stderr F I1208 17:55:38.907865 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:39.111462364+00:00 stderr F I1208 17:55:39.111421 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:39.310058913+00:00 stderr F I1208 17:55:39.310022 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:39.506342469+00:00 stderr F I1208 17:55:39.506112 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:39.706939274+00:00 stderr F I1208 17:55:39.706857 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:39.907867778+00:00 stderr F I1208 17:55:39.907805 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:40.107796263+00:00 stderr F I1208 17:55:40.107728 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:40.307199575+00:00 stderr F I1208 17:55:40.307131 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:40.515157622+00:00 stderr F I1208 17:55:40.514603 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:40.709700269+00:00 stderr F I1208 17:55:40.708779 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:40.907435156+00:00 stderr F I1208 17:55:40.907049 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:41.110100406+00:00 stderr F I1208 17:55:41.110062 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:41.308111740+00:00 stderr F I1208 17:55:41.308048 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:41.507967464+00:00 stderr F I1208 17:55:41.506949 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:41.705948737+00:00 stderr F I1208 17:55:41.705698 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:41.906121519+00:00 stderr F I1208 17:55:41.906048 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:42.109596032+00:00 stderr F I1208 17:55:42.109544 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:42.311384689+00:00 stderr F I1208 17:55:42.311316 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:42.518471532+00:00 stderr F I1208 17:55:42.518027 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:42.708939758+00:00 stderr F I1208 17:55:42.707071 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:42.907352712+00:00 stderr F I1208 17:55:42.907316 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:43.107531725+00:00 stderr F I1208 17:55:43.107378 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:43.307193434+00:00 stderr F I1208 17:55:43.307028 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:43.508783645+00:00 stderr F I1208 17:55:43.508743 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:43.706748118+00:00 stderr F I1208 17:55:43.706702 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:43.907066464+00:00 stderr F I1208 17:55:43.906995 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:44.107055022+00:00 stderr F I1208 17:55:44.106990 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:44.309540388+00:00 stderr F I1208 17:55:44.309096 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:44.506123572+00:00 stderr F I1208 17:55:44.506069 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:44.713031570+00:00 stderr F I1208 17:55:44.712989 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:44.906946411+00:00 stderr F I1208 17:55:44.905993 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:45.105715495+00:00 stderr F I1208 17:55:45.105673 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:45.306173296+00:00 stderr F I1208 17:55:45.306119 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:45.508070566+00:00 stderr F I1208 17:55:45.507993 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:45.706693076+00:00 stderr F I1208 17:55:45.706629 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:45.909956573+00:00 stderr F I1208 17:55:45.909566 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:46.107041951+00:00 stderr F I1208 17:55:46.106985 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:46.305533988+00:00 stderr F I1208 17:55:46.305477 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:46.508242700+00:00 stderr F I1208 17:55:46.508194 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:46.707686003+00:00 stderr F I1208 17:55:46.706001 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:46.908655268+00:00 stderr F I1208 17:55:46.908613 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:47.112914932+00:00 stderr F I1208 17:55:47.112853 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:47.308426927+00:00 stderr F I1208 17:55:47.307389 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:47.513330500+00:00 stderr F I1208 17:55:47.513290 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:47.716946746+00:00 stderr F I1208 17:55:47.713130 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:55:47.911868495+00:00 stderr F I1208 17:55:47.911822 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:48.118747322+00:00 stderr F I1208 17:55:48.118175 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:48.312382585+00:00 stderr F I1208 17:55:48.312330 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:48.508983100+00:00 stderr F I1208 17:55:48.508910 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:48.706713486+00:00 stderr F I1208 17:55:48.706657 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:48.907742201+00:00 stderr F I1208 17:55:48.907689 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:55:49.108222463+00:00 stderr F I1208 17:55:49.107356 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:55:49.307920482+00:00 stderr F I1208 17:55:49.307858 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:55:49.517971246+00:00 stderr F I1208 17:55:49.512355 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:55:49.706381977+00:00 stderr F I1208 17:55:49.706335 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:03.588362405+00:00 stderr F I1208 17:56:03.588286 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:56:20.942238481+00:00 stderr F I1208 17:56:20.942147 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.951640569+00:00 stderr F I1208 17:56:20.951571 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.960177863+00:00 stderr F I1208 17:56:20.960111 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.968209464+00:00 stderr F I1208 17:56:20.968126 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.976333847+00:00 stderr F I1208 17:56:20.976239 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.982638820+00:00 stderr F I1208 17:56:20.982537 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.989360244+00:00 stderr F I1208 17:56:20.989239 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:20.994132515+00:00 stderr F I1208 17:56:20.994045 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:21.002851564+00:00 stderr F I1208 17:56:21.002732 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:21.011444590+00:00 stderr F I1208 17:56:21.011357 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:21.145186330+00:00 stderr F I1208 17:56:21.145055 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.699663714+00:00 stderr F I1208 17:56:25.699576 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.705237197+00:00 stderr F I1208 17:56:25.705178 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.710139252+00:00 stderr F I1208 17:56:25.710086 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.717041931+00:00 stderr F I1208 17:56:25.716997 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.722663995+00:00 stderr F I1208 17:56:25.722602 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.731109957+00:00 stderr F I1208 17:56:25.731039 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.736012612+00:00 stderr F I1208 17:56:25.735830 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.741124002+00:00 stderr F I1208 17:56:25.741060 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.745993165+00:00 stderr F I1208 17:56:25.745860 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.751652380+00:00 stderr F I1208 17:56:25.751569 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:25.903414775+00:00 stderr F I1208 17:56:25.903305 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.163377379+00:00 stderr F I1208 17:56:27.163285 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.169401485+00:00 stderr F I1208 17:56:27.169333 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.176646873+00:00 stderr F I1208 17:56:27.176570 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.184373095+00:00 stderr F I1208 17:56:27.184279 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.190140994+00:00 stderr F I1208 17:56:27.190070 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.200722504+00:00 stderr F I1208 17:56:27.200668 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.300872182+00:00 stderr F I1208 17:56:27.300791 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.500970163+00:00 stderr F I1208 17:56:27.500852 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.703529891+00:00 stderr F I1208 17:56:27.703435 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:27.903062296+00:00 stderr F I1208 17:56:27.902983 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:28.101692126+00:00 stderr F I1208 17:56:28.101607 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:28.303811762+00:00 stderr F I1208 17:56:28.303716 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:28.504888447+00:00 stderr F I1208 17:56:28.504797 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:28.701908526+00:00 stderr F I1208 17:56:28.701742 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:28.904564262+00:00 stderr F I1208 17:56:28.904474 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:29.108249434+00:00 stderr F I1208 17:56:29.108171 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:29.303458325+00:00 stderr F I1208 17:56:29.303380 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:29.500851334+00:00 stderr F I1208 17:56:29.499938 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:29.701183459+00:00 stderr F I1208 17:56:29.701057 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:29.902066568+00:00 stderr F I1208 17:56:29.901977 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:30.103217975+00:00 stderr F I1208 17:56:30.103139 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:30.301307282+00:00 stderr F I1208 17:56:30.301217 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:30.502014066+00:00 stderr F I1208 17:56:30.501940 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:30.702951257+00:00 stderr F I1208 17:56:30.702665 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:30.903845487+00:00 stderr F I1208 17:56:30.903766 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:31.104121560+00:00 stderr F I1208 17:56:31.104037 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:31.303415118+00:00 stderr F I1208 17:56:31.302476 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:31.504199315+00:00 stderr F I1208 17:56:31.504128 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:31.703729809+00:00 stderr F I1208 17:56:31.703637 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:31.904290551+00:00 stderr F I1208 17:56:31.904235 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:32.102041038+00:00 stderr F I1208 17:56:32.101978 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:32.303727038+00:00 stderr F I1208 17:56:32.303653 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:32.501252740+00:00 stderr F I1208 17:56:32.501137 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:32.707259583+00:00 stderr F I1208 17:56:32.707200 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:32.901219712+00:00 stderr F I1208 17:56:32.901165 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:33.104540475+00:00 stderr F I1208 17:56:33.104002 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:33.302229851+00:00 stderr F I1208 17:56:33.302143 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:33.502136835+00:00 stderr F I1208 17:56:33.502040 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:33.700713124+00:00 stderr F I1208 17:56:33.700615 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:33.900212138+00:00 stderr F I1208 17:56:33.900158 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:34.105669827+00:00 stderr F I1208 17:56:34.104693 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:34.301916025+00:00 stderr F I1208 17:56:34.300938 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:34.501472370+00:00 stderr F I1208 17:56:34.501388 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:34.700260585+00:00 stderr F I1208 17:56:34.700199 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:34.903446274+00:00 stderr F I1208 17:56:34.903229 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:35.101687304+00:00 stderr F I1208 17:56:35.101625 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:35.302028780+00:00 stderr F I1208 17:56:35.301960 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:35.499756276+00:00 stderr F I1208 17:56:35.499696 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:35.703024849+00:00 stderr F I1208 17:56:35.701420 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:35.903114977+00:00 stderr F I1208 17:56:35.903035 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:36.106434800+00:00 stderr F I1208 17:56:36.106328 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:36.306166789+00:00 stderr F I1208 17:56:36.303627 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:36.502191952+00:00 stderr F I1208 17:56:36.502099 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:36.712143178+00:00 stderr F I1208 17:56:36.712069 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:36.903574901+00:00 stderr F I1208 17:56:36.903499 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:37.105984491+00:00 stderr F I1208 17:56:37.104264 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:37.312955878+00:00 stderr F I1208 17:56:37.309708 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:37.501400254+00:00 stderr F I1208 17:56:37.501345 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:37.704617213+00:00 stderr F I1208 17:56:37.704547 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:37.905360229+00:00 stderr F I1208 17:56:37.905261 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:38.101623418+00:00 stderr F I1208 17:56:38.101563 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:38.299631373+00:00 stderr F I1208 17:56:38.299572 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:38.500300766+00:00 stderr F I1208 17:56:38.499738 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:38.699341768+00:00 stderr F I1208 17:56:38.699233 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:38.899919670+00:00 stderr F I1208 17:56:38.899863 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:39.103276404+00:00 stderr F I1208 17:56:39.103204 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:39.304118931+00:00 stderr F I1208 17:56:39.304067 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:39.502482395+00:00 stderr F I1208 17:56:39.502424 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:39.699822672+00:00 stderr F I1208 17:56:39.699753 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:39.902916089+00:00 stderr F I1208 17:56:39.902322 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:40.102174226+00:00 stderr F I1208 17:56:40.099954 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:40.307371948+00:00 stderr F I1208 17:56:40.307324 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:40.499166071+00:00 stderr F I1208 17:56:40.499102 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:40.699647309+00:00 stderr F I1208 17:56:40.699607 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:40.903406864+00:00 stderr F I1208 17:56:40.903354 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:41.102084546+00:00 stderr F I1208 17:56:41.102032 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:41.302928295+00:00 stderr F I1208 17:56:41.299375 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:41.505460587+00:00 stderr F I1208 17:56:41.505386 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:41.700852353+00:00 stderr F I1208 17:56:41.700380 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:41.899418422+00:00 stderr F I1208 17:56:41.899294 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:42.104768298+00:00 stderr F I1208 17:56:42.104319 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:42.301665873+00:00 stderr F I1208 17:56:42.301604 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:42.503716943+00:00 stderr F I1208 17:56:42.503658 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:42.702114838+00:00 stderr F I1208 17:56:42.702023 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:42.905229346+00:00 stderr F I1208 17:56:42.905151 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:43.105488559+00:00 stderr F I1208 17:56:43.105431 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:43.300841744+00:00 stderr F I1208 17:56:43.300709 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:43.499352142+00:00 stderr F I1208 17:56:43.499295 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:43.702083609+00:00 stderr F I1208 17:56:43.701524 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:43.901914901+00:00 stderr F I1208 17:56:43.900078 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:44.100337886+00:00 stderr F I1208 17:56:44.100288 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:44.299691276+00:00 stderr F I1208 17:56:44.299593 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:44.499563599+00:00 stderr F I1208 17:56:44.499384 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:44.701263280+00:00 stderr F I1208 17:56:44.701211 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:44.901361208+00:00 stderr F I1208 17:56:44.901314 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:45.102944896+00:00 stderr F I1208 17:56:45.101491 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:45.300130860+00:00 stderr F I1208 17:56:45.300019 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:45.503241877+00:00 stderr F I1208 17:56:45.503139 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:45.701346724+00:00 stderr F I1208 17:56:45.701295 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:45.900514459+00:00 stderr F I1208 17:56:45.900454 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:46.100757521+00:00 stderr F I1208 17:56:46.100708 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:46.303386327+00:00 stderr F I1208 17:56:46.303320 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:46.544839644+00:00 stderr F I1208 17:56:46.544632 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:46.699864207+00:00 stderr F I1208 17:56:46.699791 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:46.901706851+00:00 stderr F I1208 17:56:46.901641 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:47.112957412+00:00 stderr F I1208 17:56:47.107707 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:47.299484716+00:00 stderr F I1208 17:56:47.299416 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:47.499918344+00:00 stderr F I1208 17:56:47.499837 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:47.703487654+00:00 stderr F I1208 17:56:47.703036 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:47.902525305+00:00 stderr F I1208 17:56:47.902326 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:48.101397082+00:00 stderr F I1208 17:56:48.101322 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:48.300537516+00:00 stderr F I1208 17:56:48.300366 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:48.499997518+00:00 stderr F I1208 17:56:48.499942 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:48.701642718+00:00 stderr F I1208 17:56:48.701566 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:48.901406138+00:00 stderr F I1208 17:56:48.901345 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:49.102812381+00:00 stderr F I1208 17:56:49.102311 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:49.306835972+00:00 stderr F I1208 17:56:49.306481 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:49.500765550+00:00 stderr F I1208 17:56:49.500705 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:49.700240933+00:00 stderr F I1208 17:56:49.699695 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:49.901841751+00:00 stderr F I1208 17:56:49.901564 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:50.100128563+00:00 stderr F I1208 17:56:50.100059 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:50.580983255+00:00 stderr F I1208 17:56:50.579082 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:50.587178086+00:00 stderr F I1208 17:56:50.586173 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:50.700689526+00:00 stderr F I1208 17:56:50.700625 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:50.902515940+00:00 stderr F I1208 17:56:50.900950 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:51.103620326+00:00 stderr F I1208 17:56:51.103137 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:51.310637125+00:00 stderr F I1208 17:56:51.310294 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:51.500420915+00:00 stderr F I1208 17:56:51.500353 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:51.701067389+00:00 stderr F I1208 17:56:51.700609 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:51.914708461+00:00 stderr F I1208 17:56:51.913536 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:52.103467434+00:00 stderr F I1208 17:56:52.101399 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:52.299612129+00:00 stderr F I1208 17:56:52.299554 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:52.501522035+00:00 stderr F I1208 17:56:52.501462 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:52.700099695+00:00 stderr F I1208 17:56:52.700054 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:52.900659836+00:00 stderr F I1208 17:56:52.900609 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:53.099766229+00:00 stderr F I1208 17:56:53.099705 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:53.301008118+00:00 stderr F I1208 17:56:53.300317 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:53.502393341+00:00 stderr F I1208 17:56:53.502344 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:53.700099687+00:00 stderr F I1208 17:56:53.700044 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:53.902124966+00:00 stderr F I1208 17:56:53.901616 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:54.244397043+00:00 stderr F I1208 17:56:54.243571 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:54.300993960+00:00 stderr F I1208 17:56:54.300662 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:54.502950547+00:00 stderr F I1208 17:56:54.502020 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:54.832866661+00:00 stderr F I1208 17:56:54.832587 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:54.901582454+00:00 stderr F I1208 17:56:54.901482 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:55.100240346+00:00 stderr F I1208 17:56:55.100187 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:55.301142726+00:00 stderr F I1208 17:56:55.301096 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:55.500675540+00:00 stderr F I1208 17:56:55.500631 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:55.703718395+00:00 stderr F I1208 17:56:55.703676 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:55.900315054+00:00 stderr F I1208 17:56:55.900255 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:56.102052055+00:00 stderr F I1208 17:56:56.102012 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:56.302041431+00:00 stderr F I1208 17:56:56.299972 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:56.499414018+00:00 stderr F I1208 17:56:56.499364 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:56.700297688+00:00 stderr F I1208 17:56:56.700237 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:56.903712914+00:00 stderr F I1208 17:56:56.902920 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:56:57.100790604+00:00 stderr F I1208 17:56:57.100735 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:57.301401827+00:00 stderr F I1208 17:56:57.301348 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:57.503973500+00:00 stderr F I1208 17:56:57.503919 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:57.701201653+00:00 stderr F I1208 17:56:57.701143 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:57.902389381+00:00 stderr F I1208 17:56:57.902300 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:58.100246412+00:00 stderr F I1208 17:56:58.100183 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:56:58.299401906+00:00 stderr F I1208 17:56:58.299334 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:56:58.499985408+00:00 stderr F I1208 17:56:58.499922 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:56:58.699795579+00:00 stderr F I1208 17:56:58.699745 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:56:58.900281278+00:00 stderr F I1208 17:56:58.900207 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.840842405+00:00 stderr F I1208 17:57:33.840752 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.845383083+00:00 stderr F I1208 17:57:33.845323 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.850318140+00:00 stderr F I1208 17:57:33.850140 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.858561723+00:00 stderr F I1208 17:57:33.857348 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.864515957+00:00 stderr F I1208 17:57:33.864425 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.870812940+00:00 stderr F I1208 17:57:33.870749 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.882418640+00:00 stderr F I1208 17:57:33.882325 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.892298765+00:00 stderr F I1208 17:57:33.891441 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.900686732+00:00 stderr F I1208 17:57:33.899499 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:57:33.904458460+00:00 stderr F I1208 17:57:33.904402 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:57:34.039317175+00:00 stderr F I1208 17:57:34.039249 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T17:57:48.995711383+00:00 stderr F I1208 17:57:48.994952 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T17:57:49.165406778+00:00 stderr F I1208 17:57:49.165319 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T17:57:49.167593935+00:00 stderr F I1208 17:57:49.167523 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T17:57:49.174513454+00:00 stderr F I1208 17:57:49.174442 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T17:57:49.179066311+00:00 stderr F I1208 17:57:49.178986 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc0033f2900 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T17:57:49.185073357+00:00 stderr F I1208 17:57:49.185004 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 4 -> 4 2025-12-08T17:57:49.185073357+00:00 stderr F I1208 17:57:49.185039 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T17:57:49.185073357+00:00 stderr F I1208 17:57:49.185049 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T17:57:49.187295264+00:00 stderr F I1208 17:57:49.187242 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T17:57:49.187295264+00:00 stderr F I1208 17:57:49.187271 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:57:49.187295264+00:00 stderr F I1208 17:57:49.187278 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T17:57:49.187295264+00:00 stderr F I1208 17:57:49.187284 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T17:57:49.187330335+00:00 stderr F I1208 17:57:49.187303 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T17:57:49.191276288+00:00 stderr F I1208 17:57:49.191223 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:57:49.191276288+00:00 stderr F I1208 17:57:49.191255 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T17:57:49.219991920+00:00 stderr F I1208 17:57:49.219932 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T17:57:49.232789100+00:00 stderr F I1208 17:57:49.232734 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T17:57:49.232789100+00:00 stderr F I1208 17:57:49.232763 1 log.go:245] Starting render phase 2025-12-08T17:57:49.240989532+00:00 stderr F I1208 17:57:49.240944 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T17:57:49.273386100+00:00 stderr F I1208 17:57:49.273324 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T17:57:49.273386100+00:00 stderr F I1208 17:57:49.273345 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T17:57:49.273386100+00:00 stderr F I1208 17:57:49.273365 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T17:57:49.273436951+00:00 stderr F I1208 17:57:49.273386 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T17:57:49.422330259+00:00 stderr F I1208 17:57:49.422274 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T17:57:49.422330259+00:00 stderr F I1208 17:57:49.422298 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T17:57:49.637478650+00:00 stderr F I1208 17:57:49.637419 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T17:57:49.692823831+00:00 stderr F I1208 17:57:49.692753 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T17:57:49.698823795+00:00 stderr F I1208 17:57:49.698768 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T17:57:49.698823795+00:00 stderr F I1208 17:57:49.698812 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T17:57:49.708214248+00:00 stderr F I1208 17:57:49.708152 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T17:57:49.708251439+00:00 stderr F I1208 17:57:49.708217 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T17:57:49.718020932+00:00 stderr F I1208 17:57:49.717963 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T17:57:49.718064743+00:00 stderr F I1208 17:57:49.718020 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T17:57:49.732625739+00:00 stderr F I1208 17:57:49.732523 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T17:57:49.732683920+00:00 stderr F I1208 17:57:49.732619 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T17:57:49.741914319+00:00 stderr F I1208 17:57:49.741800 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T17:57:49.741914319+00:00 stderr F I1208 17:57:49.741850 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T17:57:49.750363417+00:00 stderr F I1208 17:57:49.750289 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T17:57:49.750363417+00:00 stderr F I1208 17:57:49.750334 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T17:57:49.756543797+00:00 stderr F I1208 17:57:49.756502 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T17:57:49.756576447+00:00 stderr F I1208 17:57:49.756548 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T17:57:49.761307860+00:00 stderr F I1208 17:57:49.761266 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T17:57:49.761307860+00:00 stderr F I1208 17:57:49.761299 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T17:57:49.766337440+00:00 stderr F I1208 17:57:49.766286 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T17:57:49.766371291+00:00 stderr F I1208 17:57:49.766347 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T17:57:49.771159575+00:00 stderr F I1208 17:57:49.771113 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T17:57:49.771196836+00:00 stderr F I1208 17:57:49.771160 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T17:57:49.899223544+00:00 stderr F I1208 17:57:49.899161 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T17:57:49.899223544+00:00 stderr F I1208 17:57:49.899208 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T17:57:50.097270013+00:00 stderr F I1208 17:57:50.097224 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T17:57:50.097270013+00:00 stderr F I1208 17:57:50.097265 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T17:57:50.297311173+00:00 stderr F I1208 17:57:50.297243 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T17:57:50.297311173+00:00 stderr F I1208 17:57:50.297297 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T17:57:50.497069557+00:00 stderr F I1208 17:57:50.496998 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T17:57:50.497069557+00:00 stderr F I1208 17:57:50.497050 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T17:57:50.697175608+00:00 stderr F I1208 17:57:50.697130 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T17:57:50.697175608+00:00 stderr F I1208 17:57:50.697170 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T17:57:50.899102987+00:00 stderr F I1208 17:57:50.899053 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T17:57:50.899148158+00:00 stderr F I1208 17:57:50.899104 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T17:57:51.098866091+00:00 stderr F I1208 17:57:51.098799 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T17:57:51.098866091+00:00 stderr F I1208 17:57:51.098851 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T17:57:51.296915529+00:00 stderr F I1208 17:57:51.296833 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T17:57:51.296955470+00:00 stderr F I1208 17:57:51.296920 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T17:57:51.497187395+00:00 stderr F I1208 17:57:51.497144 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T17:57:51.497187395+00:00 stderr F I1208 17:57:51.497181 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T17:57:51.725689891+00:00 stderr F I1208 17:57:51.725590 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T17:57:51.725689891+00:00 stderr F I1208 17:57:51.725630 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T17:57:51.915573239+00:00 stderr F I1208 17:57:51.915504 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T17:57:51.915573239+00:00 stderr F I1208 17:57:51.915543 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T17:57:52.097272315+00:00 stderr F I1208 17:57:52.096797 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T17:57:52.097272315+00:00 stderr F I1208 17:57:52.097227 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T17:57:52.305579678+00:00 stderr F I1208 17:57:52.305518 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T17:57:52.305579678+00:00 stderr F I1208 17:57:52.305559 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T17:57:52.505022134+00:00 stderr F I1208 17:57:52.504952 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T17:57:52.505022134+00:00 stderr F I1208 17:57:52.505002 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T17:57:52.707182159+00:00 stderr F I1208 17:57:52.707056 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T17:57:52.707182159+00:00 stderr F I1208 17:57:52.707092 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T17:57:52.898446662+00:00 stderr F I1208 17:57:52.898392 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T17:57:52.898446662+00:00 stderr F I1208 17:57:52.898439 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T17:57:53.098020699+00:00 stderr F I1208 17:57:53.097958 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T17:57:53.098020699+00:00 stderr F I1208 17:57:53.097998 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T17:57:53.298313276+00:00 stderr F I1208 17:57:53.298251 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T17:57:53.298313276+00:00 stderr F I1208 17:57:53.298303 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T17:57:53.502923835+00:00 stderr F I1208 17:57:53.502809 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T17:57:53.502923835+00:00 stderr F I1208 17:57:53.502912 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T17:57:53.702919673+00:00 stderr F I1208 17:57:53.698955 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T17:57:53.702919673+00:00 stderr F I1208 17:57:53.699015 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T17:57:53.898012816+00:00 stderr F I1208 17:57:53.897956 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T17:57:53.898012816+00:00 stderr F I1208 17:57:53.897992 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:57:54.098472036+00:00 stderr F I1208 17:57:54.098387 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:57:54.098472036+00:00 stderr F I1208 17:57:54.098435 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:57:54.308160416+00:00 stderr F I1208 17:57:54.308091 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:57:54.308160416+00:00 stderr F I1208 17:57:54.308146 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T17:57:54.499699067+00:00 stderr F I1208 17:57:54.499583 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T17:57:54.499699067+00:00 stderr F I1208 17:57:54.499663 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T17:57:54.769517120+00:00 stderr F I1208 17:57:54.697369 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T17:57:54.769517120+00:00 stderr F I1208 17:57:54.697416 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T17:57:54.900508326+00:00 stderr F I1208 17:57:54.900443 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T17:57:54.900508326+00:00 stderr F I1208 17:57:54.900498 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T17:57:55.097738094+00:00 stderr F I1208 17:57:55.097228 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T17:57:55.097738094+00:00 stderr F I1208 17:57:55.097269 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T17:57:55.299624571+00:00 stderr F I1208 17:57:55.299543 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T17:57:55.299624571+00:00 stderr F I1208 17:57:55.299599 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T17:57:55.506388526+00:00 stderr F I1208 17:57:55.506328 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T17:57:55.506388526+00:00 stderr F I1208 17:57:55.506375 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T17:57:55.699621890+00:00 stderr F I1208 17:57:55.699554 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T17:57:55.699621890+00:00 stderr F I1208 17:57:55.699609 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T17:57:55.909144005+00:00 stderr F I1208 17:57:55.909091 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T17:57:55.909144005+00:00 stderr F I1208 17:57:55.909137 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T17:57:56.098076228+00:00 stderr F I1208 17:57:56.097993 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T17:57:56.098076228+00:00 stderr F I1208 17:57:56.098037 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T17:57:56.299155255+00:00 stderr F I1208 17:57:56.299089 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T17:57:56.299186956+00:00 stderr F I1208 17:57:56.299161 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T17:57:56.500382606+00:00 stderr F I1208 17:57:56.500327 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T17:57:56.500417447+00:00 stderr F I1208 17:57:56.500379 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T17:57:56.704041339+00:00 stderr F I1208 17:57:56.703982 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T17:57:56.704041339+00:00 stderr F I1208 17:57:56.704024 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T17:57:56.902860088+00:00 stderr F I1208 17:57:56.902808 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T17:57:56.902944000+00:00 stderr F I1208 17:57:56.902857 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T17:57:57.105989418+00:00 stderr F I1208 17:57:57.105927 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T17:57:57.105989418+00:00 stderr F I1208 17:57:57.105975 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T17:57:57.306272824+00:00 stderr F I1208 17:57:57.306217 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T17:57:57.306272824+00:00 stderr F I1208 17:57:57.306266 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T17:57:57.506292735+00:00 stderr F I1208 17:57:57.506235 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T17:57:57.506292735+00:00 stderr F I1208 17:57:57.506276 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:57:57.734458421+00:00 stderr F I1208 17:57:57.734403 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:57:57.734532513+00:00 stderr F I1208 17:57:57.734522 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T17:57:57.939916412+00:00 stderr F I1208 17:57:57.939852 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T17:57:57.939952643+00:00 stderr F I1208 17:57:57.939916 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T17:57:58.100506733+00:00 stderr F I1208 17:57:58.100463 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T17:57:58.100583095+00:00 stderr F I1208 17:57:58.100572 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T17:57:58.324867831+00:00 stderr F I1208 17:57:58.324810 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:57:58.324867831+00:00 stderr F I1208 17:57:58.324850 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T17:57:58.541472309+00:00 stderr F I1208 17:57:58.540960 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T17:57:58.541472309+00:00 stderr F I1208 17:57:58.541002 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:57:58.697922243+00:00 stderr F I1208 17:57:58.696988 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:57:58.697922243+00:00 stderr F I1208 17:57:58.697035 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T17:57:58.897920542+00:00 stderr F I1208 17:57:58.897457 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:57:58.897920542+00:00 stderr F I1208 17:57:58.897507 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T17:57:59.100792665+00:00 stderr F I1208 17:57:59.100283 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T17:57:59.100836867+00:00 stderr F I1208 17:57:59.100790 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T17:57:59.298604278+00:00 stderr F I1208 17:57:59.298508 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T17:57:59.298604278+00:00 stderr F I1208 17:57:59.298559 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T17:57:59.497373376+00:00 stderr F I1208 17:57:59.497322 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T17:57:59.497373376+00:00 stderr F I1208 17:57:59.497361 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T17:57:59.697583890+00:00 stderr F I1208 17:57:59.697522 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T17:57:59.697620961+00:00 stderr F I1208 17:57:59.697588 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T17:57:59.897701522+00:00 stderr F I1208 17:57:59.896681 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T17:57:59.897701522+00:00 stderr F I1208 17:57:59.896721 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T17:58:00.097782284+00:00 stderr F I1208 17:58:00.097724 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T17:58:00.097782284+00:00 stderr F I1208 17:58:00.097764 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:58:00.298323526+00:00 stderr F I1208 17:58:00.298265 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:58:00.298323526+00:00 stderr F I1208 17:58:00.298308 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:58:00.502982707+00:00 stderr F I1208 17:58:00.500067 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:58:00.502982707+00:00 stderr F I1208 17:58:00.500114 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:58:00.697847122+00:00 stderr F I1208 17:58:00.697799 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:58:00.697914164+00:00 stderr F I1208 17:58:00.697844 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:58:00.897965575+00:00 stderr F I1208 17:58:00.897894 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:58:00.897965575+00:00 stderr F I1208 17:58:00.897932 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T17:58:01.100328945+00:00 stderr F I1208 17:58:01.100021 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T17:58:01.100328945+00:00 stderr F I1208 17:58:01.100069 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T17:58:01.297941552+00:00 stderr F I1208 17:58:01.297574 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T17:58:01.297941552+00:00 stderr F I1208 17:58:01.297614 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T17:58:01.499347648+00:00 stderr F I1208 17:58:01.497778 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T17:58:01.499347648+00:00 stderr F I1208 17:58:01.497832 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T17:58:01.699119261+00:00 stderr F I1208 17:58:01.699075 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T17:58:01.699150132+00:00 stderr F I1208 17:58:01.699127 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T17:58:01.897964440+00:00 stderr F I1208 17:58:01.896618 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T17:58:01.897964440+00:00 stderr F I1208 17:58:01.896659 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T17:58:02.101531761+00:00 stderr F I1208 17:58:02.101184 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T17:58:02.101531761+00:00 stderr F I1208 17:58:02.101518 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T17:58:02.297639990+00:00 stderr F I1208 17:58:02.297170 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T17:58:02.297639990+00:00 stderr F I1208 17:58:02.297618 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T17:58:02.497191857+00:00 stderr F I1208 17:58:02.497131 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T17:58:02.497191857+00:00 stderr F I1208 17:58:02.497177 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T17:58:02.699862026+00:00 stderr F I1208 17:58:02.699819 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T17:58:02.699955978+00:00 stderr F I1208 17:58:02.699861 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T17:58:02.907246316+00:00 stderr F I1208 17:58:02.907097 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T17:58:02.907246316+00:00 stderr F I1208 17:58:02.907133 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T17:58:03.097592456+00:00 stderr F I1208 17:58:03.097523 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T17:58:03.097592456+00:00 stderr F I1208 17:58:03.097579 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T17:58:03.298166609+00:00 stderr F I1208 17:58:03.298104 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T17:58:03.298166609+00:00 stderr F I1208 17:58:03.298148 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T17:58:03.498211650+00:00 stderr F I1208 17:58:03.498110 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T17:58:03.498211650+00:00 stderr F I1208 17:58:03.498164 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T17:58:03.700369915+00:00 stderr F I1208 17:58:03.699817 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T17:58:03.700369915+00:00 stderr F I1208 17:58:03.700358 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T17:58:03.898903556+00:00 stderr F I1208 17:58:03.898796 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T17:58:03.898903556+00:00 stderr F I1208 17:58:03.898844 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:58:04.098498105+00:00 stderr F I1208 17:58:04.098439 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:58:04.098498105+00:00 stderr F I1208 17:58:04.098491 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T17:58:04.297701723+00:00 stderr F I1208 17:58:04.297649 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T17:58:04.297751414+00:00 stderr F I1208 17:58:04.297725 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T17:58:04.500739801+00:00 stderr F I1208 17:58:04.499956 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T17:58:04.500739801+00:00 stderr F I1208 17:58:04.499993 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T17:58:04.700947375+00:00 stderr F I1208 17:58:04.699022 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T17:58:04.700947375+00:00 stderr F I1208 17:58:04.699070 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T17:58:04.927529321+00:00 stderr F I1208 17:58:04.926667 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T17:58:04.927529321+00:00 stderr F I1208 17:58:04.926765 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T17:58:05.126927975+00:00 stderr F I1208 17:58:05.126437 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T17:58:05.126927975+00:00 stderr F I1208 17:58:05.126473 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T17:58:05.299203348+00:00 stderr F I1208 17:58:05.299131 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T17:58:05.299203348+00:00 stderr F I1208 17:58:05.299188 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T17:58:05.497609755+00:00 stderr F I1208 17:58:05.497546 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T17:58:05.497609755+00:00 stderr F I1208 17:58:05.497594 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T17:58:05.700772976+00:00 stderr F I1208 17:58:05.699954 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T17:58:05.700772976+00:00 stderr F I1208 17:58:05.700025 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:58:05.897746747+00:00 stderr F I1208 17:58:05.897690 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:58:05.897746747+00:00 stderr F I1208 17:58:05.897730 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:58:06.105926018+00:00 stderr F I1208 17:58:06.100610 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:58:06.105926018+00:00 stderr F I1208 17:58:06.104220 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T17:58:06.298923985+00:00 stderr F I1208 17:58:06.298263 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T17:58:06.298923985+00:00 stderr F I1208 17:58:06.298790 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T17:58:06.498997667+00:00 stderr F I1208 17:58:06.498832 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T17:58:06.498997667+00:00 stderr F I1208 17:58:06.498893 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T17:58:06.700633138+00:00 stderr F I1208 17:58:06.700573 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T17:58:06.700688510+00:00 stderr F I1208 17:58:06.700630 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T17:58:06.896836169+00:00 stderr F I1208 17:58:06.896767 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T17:58:06.896901921+00:00 stderr F I1208 17:58:06.896846 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T17:58:07.102110035+00:00 stderr F I1208 17:58:07.102060 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:58:07.102110035+00:00 stderr F I1208 17:58:07.102102 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T17:58:07.298417288+00:00 stderr F I1208 17:58:07.298367 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:58:07.298417288+00:00 stderr F I1208 17:58:07.298408 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T17:58:07.498119660+00:00 stderr F I1208 17:58:07.498066 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T17:58:07.498119660+00:00 stderr F I1208 17:58:07.498105 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:58:07.697948215+00:00 stderr F I1208 17:58:07.697829 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:58:07.697948215+00:00 stderr F I1208 17:58:07.697871 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T17:58:07.897567784+00:00 stderr F I1208 17:58:07.897524 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T17:58:07.897648796+00:00 stderr F I1208 17:58:07.897577 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T17:58:08.101126845+00:00 stderr F I1208 17:58:08.100807 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:58:08.101126845+00:00 stderr F I1208 17:58:08.101107 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T17:58:08.298317821+00:00 stderr F I1208 17:58:08.298277 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T17:58:08.298397753+00:00 stderr F I1208 17:58:08.298388 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T17:58:08.496791171+00:00 stderr F I1208 17:58:08.496748 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T17:58:08.496919835+00:00 stderr F I1208 17:58:08.496907 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T17:58:08.697140430+00:00 stderr F I1208 17:58:08.697102 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T17:58:08.697280093+00:00 stderr F I1208 17:58:08.697222 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T17:58:08.898961266+00:00 stderr F I1208 17:58:08.898910 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T17:58:08.898961266+00:00 stderr F I1208 17:58:08.898955 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T17:58:09.098691468+00:00 stderr F I1208 17:58:09.098621 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:58:09.098788080+00:00 stderr F I1208 17:58:09.098777 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T17:58:09.299640112+00:00 stderr F I1208 17:58:09.299596 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T17:58:09.299758845+00:00 stderr F I1208 17:58:09.299746 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T17:58:09.497479775+00:00 stderr F I1208 17:58:09.497414 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T17:58:09.497479775+00:00 stderr F I1208 17:58:09.497468 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:58:09.698222823+00:00 stderr F I1208 17:58:09.698176 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:58:09.698304685+00:00 stderr F I1208 17:58:09.698294 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T17:58:09.904316609+00:00 stderr F I1208 17:58:09.901832 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T17:58:09.904316609+00:00 stderr F I1208 17:58:09.901896 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T17:58:10.097913394+00:00 stderr F I1208 17:58:10.097798 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T17:58:10.097913394+00:00 stderr F I1208 17:58:10.097847 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T17:58:10.300466109+00:00 stderr F I1208 17:58:10.299964 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T17:58:10.300466109+00:00 stderr F I1208 17:58:10.300006 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T17:58:10.500139049+00:00 stderr F I1208 17:58:10.498550 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:58:10.500139049+00:00 stderr F I1208 17:58:10.498591 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T17:58:10.698984198+00:00 stderr F I1208 17:58:10.698930 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T17:58:10.699016349+00:00 stderr F I1208 17:58:10.698981 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T17:58:10.901258106+00:00 stderr F I1208 17:58:10.901209 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T17:58:10.901258106+00:00 stderr F I1208 17:58:10.901249 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T17:58:11.099339526+00:00 stderr F I1208 17:58:11.099291 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T17:58:11.099387957+00:00 stderr F I1208 17:58:11.099336 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T17:58:11.297607030+00:00 stderr F I1208 17:58:11.297551 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T17:58:11.297607030+00:00 stderr F I1208 17:58:11.297591 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T17:58:11.498223145+00:00 stderr F I1208 17:58:11.498161 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:58:11.498223145+00:00 stderr F I1208 17:58:11.498214 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T17:58:11.698819120+00:00 stderr F I1208 17:58:11.698760 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T17:58:11.698849201+00:00 stderr F I1208 17:58:11.698816 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T17:58:11.900571554+00:00 stderr F I1208 17:58:11.900533 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T17:58:11.900669547+00:00 stderr F I1208 17:58:11.900656 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T17:58:12.105307906+00:00 stderr F I1208 17:58:12.105234 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T17:58:12.105476390+00:00 stderr F I1208 17:58:12.105465 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T17:58:12.299501384+00:00 stderr F I1208 17:58:12.299463 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T17:58:12.299575407+00:00 stderr F I1208 17:58:12.299565 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T17:58:12.496605359+00:00 stderr F I1208 17:58:12.496571 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:58:12.496690512+00:00 stderr F I1208 17:58:12.496680 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T17:58:12.702557802+00:00 stderr F I1208 17:58:12.702516 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:58:12.702647414+00:00 stderr F I1208 17:58:12.702634 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T17:58:12.899015829+00:00 stderr F I1208 17:58:12.898544 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T17:58:12.899097412+00:00 stderr F I1208 17:58:12.899086 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T17:58:13.098115716+00:00 stderr F I1208 17:58:13.098056 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T17:58:13.110368722+00:00 stderr F I1208 17:58:13.110303 1 log.go:245] Operconfig Controller complete 2025-12-08T17:58:52.947931577+00:00 stderr F I1208 17:58:52.947845 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:59:00.556937906+00:00 stderr F I1208 17:59:00.556859 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:59:03.600594933+00:00 stderr F I1208 17:59:03.600527 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T17:59:05.395582781+00:00 stderr F I1208 17:59:05.394770 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-network-node-identity/network-node-identity 2025-12-08T17:59:05.396089274+00:00 stderr F I1208 17:59:05.396025 1 log.go:245] successful reconciliation 2025-12-08T17:59:08.592001535+00:00 stderr F I1208 17:59:08.591924 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/ovn 2025-12-08T17:59:08.592567519+00:00 stderr F I1208 17:59:08.592533 1 log.go:245] successful reconciliation 2025-12-08T17:59:10.382300359+00:00 stderr F I1208 17:59:10.381831 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/signer 2025-12-08T17:59:10.382596857+00:00 stderr F I1208 17:59:10.382574 1 log.go:245] successful reconciliation 2025-12-08T17:59:56.159954705+00:00 stderr F I1208 17:59:56.158452 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:59:59.678679890+00:00 stderr F I1208 17:59:59.678258 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.687758980+00:00 stderr F I1208 17:59:59.687606 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.692645698+00:00 stderr F I1208 17:59:59.692594 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.699310233+00:00 stderr F I1208 17:59:59.699250 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.704645144+00:00 stderr F I1208 17:59:59.704549 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.712146910+00:00 stderr F I1208 17:59:59.711716 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.720932701+00:00 stderr F I1208 17:59:59.720566 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.727075884+00:00 stderr F I1208 17:59:59.726822 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.732198228+00:00 stderr F I1208 17:59:59.732141 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T17:59:59.736550802+00:00 stderr F I1208 17:59:59.736489 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:00:00.021839455+00:00 stderr F I1208 18:00:00.021765 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T18:00:00.080521788+00:00 stderr F I1208 18:00:00.080464 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T18:00:00.272768933+00:00 stderr F I1208 18:00:00.272704 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:00:00.474661033+00:00 stderr F I1208 18:00:00.474142 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:00:00.704520127+00:00 stderr F I1208 18:00:00.704463 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:00:00.873189184+00:00 stderr F I1208 18:00:00.873112 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:00:01.074612331+00:00 stderr F I1208 18:00:01.074531 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:00:01.272662599+00:00 stderr F I1208 18:00:01.272496 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:00:01.474729502+00:00 stderr F I1208 18:00:01.474673 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T18:00:01.675908963+00:00 stderr F I1208 18:00:01.674852 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T18:00:01.874766793+00:00 stderr F I1208 18:00:01.874696 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:00:02.075195324+00:00 stderr F I1208 18:00:02.075133 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T18:01:13.111269568+00:00 stderr F I1208 18:01:13.111202 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T18:01:13.300345806+00:00 stderr F I1208 18:01:13.300290 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T18:01:13.302575436+00:00 stderr F I1208 18:01:13.302529 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T18:01:13.305089982+00:00 stderr F I1208 18:01:13.305065 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T18:01:13.307616459+00:00 stderr F I1208 18:01:13.307558 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc0051403c0 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T18:01:13.312820608+00:00 stderr F I1208 18:01:13.312795 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 4 -> 4 2025-12-08T18:01:13.312869739+00:00 stderr F I1208 18:01:13.312856 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T18:01:13.312935031+00:00 stderr F I1208 18:01:13.312921 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T18:01:13.315965472+00:00 stderr F I1208 18:01:13.315850 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T18:01:13.315965472+00:00 stderr F I1208 18:01:13.315916 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T18:01:13.315965472+00:00 stderr F I1208 18:01:13.315927 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T18:01:13.315965472+00:00 stderr F I1208 18:01:13.315937 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T18:01:13.315965472+00:00 stderr F I1208 18:01:13.315959 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T18:01:13.320126333+00:00 stderr F I1208 18:01:13.320056 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T18:01:13.320126333+00:00 stderr F I1208 18:01:13.320103 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T18:01:13.331160777+00:00 stderr F I1208 18:01:13.331091 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T18:01:13.345137969+00:00 stderr F I1208 18:01:13.345097 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T18:01:13.345223362+00:00 stderr F I1208 18:01:13.345203 1 log.go:245] Starting render phase 2025-12-08T18:01:13.359464401+00:00 stderr F I1208 18:01:13.359419 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T18:01:13.399984960+00:00 stderr F I1208 18:01:13.399938 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T18:01:13.400053252+00:00 stderr F I1208 18:01:13.400041 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T18:01:13.400109973+00:00 stderr F I1208 18:01:13.400098 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T18:01:13.400159165+00:00 stderr F I1208 18:01:13.400147 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T18:01:13.535098860+00:00 stderr F I1208 18:01:13.535016 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T18:01:13.535212113+00:00 stderr F I1208 18:01:13.535190 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T18:01:13.746100621+00:00 stderr F I1208 18:01:13.746029 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T18:01:13.765139529+00:00 stderr F I1208 18:01:13.765070 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T18:01:13.769712270+00:00 stderr F I1208 18:01:13.769652 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T18:01:13.769712270+00:00 stderr F I1208 18:01:13.769700 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T18:01:13.776038329+00:00 stderr F I1208 18:01:13.776001 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T18:01:13.776038329+00:00 stderr F I1208 18:01:13.776031 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T18:01:13.782681596+00:00 stderr F I1208 18:01:13.782639 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T18:01:13.782681596+00:00 stderr F I1208 18:01:13.782664 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T18:01:13.788273305+00:00 stderr F I1208 18:01:13.788232 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T18:01:13.788273305+00:00 stderr F I1208 18:01:13.788255 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T18:01:13.795765955+00:00 stderr F I1208 18:01:13.795698 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T18:01:13.795765955+00:00 stderr F I1208 18:01:13.795752 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T18:01:13.801020084+00:00 stderr F I1208 18:01:13.800963 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T18:01:13.801020084+00:00 stderr F I1208 18:01:13.800998 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T18:01:13.805157865+00:00 stderr F I1208 18:01:13.805112 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T18:01:13.805157865+00:00 stderr F I1208 18:01:13.805150 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T18:01:13.809299155+00:00 stderr F I1208 18:01:13.809248 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T18:01:13.809322055+00:00 stderr F I1208 18:01:13.809297 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T18:01:13.812718817+00:00 stderr F I1208 18:01:13.812680 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T18:01:13.812718817+00:00 stderr F I1208 18:01:13.812712 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T18:01:13.815514731+00:00 stderr F I1208 18:01:13.815470 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T18:01:13.815514731+00:00 stderr F I1208 18:01:13.815499 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T18:01:13.970508300+00:00 stderr F I1208 18:01:13.970427 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T18:01:13.970508300+00:00 stderr F I1208 18:01:13.970484 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T18:01:14.172435929+00:00 stderr F I1208 18:01:14.172342 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T18:01:14.172435929+00:00 stderr F I1208 18:01:14.172383 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T18:01:14.369658043+00:00 stderr F I1208 18:01:14.369596 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T18:01:14.369658043+00:00 stderr F I1208 18:01:14.369638 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T18:01:14.570762211+00:00 stderr F I1208 18:01:14.570672 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T18:01:14.570762211+00:00 stderr F I1208 18:01:14.570749 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T18:01:14.773158993+00:00 stderr F I1208 18:01:14.773088 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T18:01:14.773158993+00:00 stderr F I1208 18:01:14.773130 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T18:01:14.972487614+00:00 stderr F I1208 18:01:14.972412 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T18:01:14.972487614+00:00 stderr F I1208 18:01:14.972460 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T18:01:15.173813918+00:00 stderr F I1208 18:01:15.173717 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T18:01:15.173867320+00:00 stderr F I1208 18:01:15.173808 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T18:01:15.372357187+00:00 stderr F I1208 18:01:15.372285 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T18:01:15.372357187+00:00 stderr F I1208 18:01:15.372335 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T18:01:15.573238669+00:00 stderr F I1208 18:01:15.573142 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T18:01:15.573238669+00:00 stderr F I1208 18:01:15.573189 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T18:01:15.771516182+00:00 stderr F I1208 18:01:15.771435 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T18:01:15.771516182+00:00 stderr F I1208 18:01:15.771493 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T18:01:15.972627290+00:00 stderr F I1208 18:01:15.972533 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T18:01:15.972627290+00:00 stderr F I1208 18:01:15.972610 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T18:01:16.170431529+00:00 stderr F I1208 18:01:16.170362 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T18:01:16.170431529+00:00 stderr F I1208 18:01:16.170423 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T18:01:16.389779583+00:00 stderr F I1208 18:01:16.389688 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T18:01:16.389779583+00:00 stderr F I1208 18:01:16.389770 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T18:01:16.579791775+00:00 stderr F I1208 18:01:16.579736 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T18:01:16.579791775+00:00 stderr F I1208 18:01:16.579782 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T18:01:16.786335428+00:00 stderr F I1208 18:01:16.786241 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T18:01:16.786335428+00:00 stderr F I1208 18:01:16.786318 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T18:01:16.970979217+00:00 stderr F I1208 18:01:16.970860 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T18:01:16.970979217+00:00 stderr F I1208 18:01:16.970965 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T18:01:17.171443878+00:00 stderr F I1208 18:01:17.171348 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T18:01:17.171481559+00:00 stderr F I1208 18:01:17.171449 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T18:01:17.371474327+00:00 stderr F I1208 18:01:17.371374 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T18:01:17.371474327+00:00 stderr F I1208 18:01:17.371423 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T18:01:17.581167583+00:00 stderr F I1208 18:01:17.580590 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T18:01:17.581167583+00:00 stderr F I1208 18:01:17.581135 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T18:01:17.771644128+00:00 stderr F I1208 18:01:17.771567 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T18:01:17.771644128+00:00 stderr F I1208 18:01:17.771636 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T18:01:17.972908840+00:00 stderr F I1208 18:01:17.972714 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T18:01:17.972908840+00:00 stderr F I1208 18:01:17.972778 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T18:01:18.170499594+00:00 stderr F I1208 18:01:18.170407 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T18:01:18.170499594+00:00 stderr F I1208 18:01:18.170459 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T18:01:18.369742092+00:00 stderr F I1208 18:01:18.369679 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T18:01:18.369742092+00:00 stderr F I1208 18:01:18.369725 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T18:01:18.576212673+00:00 stderr F I1208 18:01:18.576108 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T18:01:18.576212673+00:00 stderr F I1208 18:01:18.576172 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T18:01:18.770785217+00:00 stderr F I1208 18:01:18.770717 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T18:01:18.770831949+00:00 stderr F I1208 18:01:18.770780 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T18:01:18.971363880+00:00 stderr F I1208 18:01:18.970393 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T18:01:18.971363880+00:00 stderr F I1208 18:01:18.970456 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T18:01:19.170493565+00:00 stderr F I1208 18:01:19.170407 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T18:01:19.170493565+00:00 stderr F I1208 18:01:19.170452 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T18:01:19.371792369+00:00 stderr F I1208 18:01:19.371295 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T18:01:19.371792369+00:00 stderr F I1208 18:01:19.371763 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T18:01:19.574313054+00:00 stderr F I1208 18:01:19.574197 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T18:01:19.574386966+00:00 stderr F I1208 18:01:19.574318 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T18:01:19.775835853+00:00 stderr F I1208 18:01:19.775736 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T18:01:19.775835853+00:00 stderr F I1208 18:01:19.775791 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T18:01:19.970364936+00:00 stderr F I1208 18:01:19.970294 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T18:01:19.970364936+00:00 stderr F I1208 18:01:19.970346 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T18:01:20.170373394+00:00 stderr F I1208 18:01:20.170240 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T18:01:20.170373394+00:00 stderr F I1208 18:01:20.170285 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T18:01:20.388247849+00:00 stderr F I1208 18:01:20.388132 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T18:01:20.388247849+00:00 stderr F I1208 18:01:20.388195 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T18:01:20.571791178+00:00 stderr F I1208 18:01:20.571707 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T18:01:20.571791178+00:00 stderr F I1208 18:01:20.571771 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T18:01:20.774929801+00:00 stderr F I1208 18:01:20.774832 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T18:01:20.774929801+00:00 stderr F I1208 18:01:20.774913 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T18:01:20.979398988+00:00 stderr F I1208 18:01:20.979287 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T18:01:20.979398988+00:00 stderr F I1208 18:01:20.979353 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T18:01:21.183140926+00:00 stderr F I1208 18:01:21.183042 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T18:01:21.183140926+00:00 stderr F I1208 18:01:21.183094 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T18:01:21.383027331+00:00 stderr F I1208 18:01:21.382963 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T18:01:21.383027331+00:00 stderr F I1208 18:01:21.383009 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T18:01:21.581819367+00:00 stderr F I1208 18:01:21.581739 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T18:01:21.581819367+00:00 stderr F I1208 18:01:21.581798 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T18:01:21.830997046+00:00 stderr F I1208 18:01:21.830918 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T18:01:21.830997046+00:00 stderr F I1208 18:01:21.830960 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T18:01:22.006646356+00:00 stderr F I1208 18:01:22.006568 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T18:01:22.006646356+00:00 stderr F I1208 18:01:22.006637 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T18:01:22.176237624+00:00 stderr F I1208 18:01:22.176161 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T18:01:22.176237624+00:00 stderr F I1208 18:01:22.176226 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T18:01:22.405023999+00:00 stderr F I1208 18:01:22.404932 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T18:01:22.405023999+00:00 stderr F I1208 18:01:22.405002 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T18:01:22.657406373+00:00 stderr F I1208 18:01:22.656942 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T18:01:22.657406373+00:00 stderr F I1208 18:01:22.657391 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T18:01:22.771536314+00:00 stderr F I1208 18:01:22.771066 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T18:01:22.771536314+00:00 stderr F I1208 18:01:22.771394 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T18:01:22.969670872+00:00 stderr F I1208 18:01:22.969566 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T18:01:22.969670872+00:00 stderr F I1208 18:01:22.969620 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T18:01:23.173396660+00:00 stderr F I1208 18:01:23.172743 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T18:01:23.173396660+00:00 stderr F I1208 18:01:23.173383 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T18:01:23.376107910+00:00 stderr F I1208 18:01:23.376028 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T18:01:23.376107910+00:00 stderr F I1208 18:01:23.376087 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T18:01:23.569948675+00:00 stderr F I1208 18:01:23.569897 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T18:01:23.569948675+00:00 stderr F I1208 18:01:23.569935 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T18:01:23.770708643+00:00 stderr F I1208 18:01:23.770632 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T18:01:23.770708643+00:00 stderr F I1208 18:01:23.770684 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T18:01:23.971608305+00:00 stderr F I1208 18:01:23.971166 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T18:01:23.971608305+00:00 stderr F I1208 18:01:23.971219 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T18:01:24.171340846+00:00 stderr F I1208 18:01:24.171231 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T18:01:24.171340846+00:00 stderr F I1208 18:01:24.171272 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T18:01:24.370914114+00:00 stderr F I1208 18:01:24.370807 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T18:01:24.370914114+00:00 stderr F I1208 18:01:24.370858 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:01:24.572038681+00:00 stderr F I1208 18:01:24.571922 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:01:24.572090473+00:00 stderr F I1208 18:01:24.572034 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:01:24.769998766+00:00 stderr F I1208 18:01:24.769864 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:01:24.769998766+00:00 stderr F I1208 18:01:24.769976 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:01:24.969602723+00:00 stderr F I1208 18:01:24.969529 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:01:24.969602723+00:00 stderr F I1208 18:01:24.969576 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:01:25.171278486+00:00 stderr F I1208 18:01:25.171150 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:01:25.171278486+00:00 stderr F I1208 18:01:25.171236 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T18:01:25.373526714+00:00 stderr F I1208 18:01:25.373412 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T18:01:25.373526714+00:00 stderr F I1208 18:01:25.373473 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T18:01:25.570955124+00:00 stderr F I1208 18:01:25.570862 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T18:01:25.570955124+00:00 stderr F I1208 18:01:25.570942 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T18:01:25.772952166+00:00 stderr F I1208 18:01:25.772813 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T18:01:25.772952166+00:00 stderr F I1208 18:01:25.772859 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T18:01:25.972059480+00:00 stderr F I1208 18:01:25.971850 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T18:01:25.972059480+00:00 stderr F I1208 18:01:25.971934 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T18:01:26.174563275+00:00 stderr F I1208 18:01:26.174478 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T18:01:26.174563275+00:00 stderr F I1208 18:01:26.174540 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T18:01:26.372399666+00:00 stderr F I1208 18:01:26.372312 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T18:01:26.372399666+00:00 stderr F I1208 18:01:26.372371 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T18:01:26.569709472+00:00 stderr F I1208 18:01:26.569636 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T18:01:26.569709472+00:00 stderr F I1208 18:01:26.569676 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T18:01:26.773149412+00:00 stderr F I1208 18:01:26.773070 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T18:01:26.773149412+00:00 stderr F I1208 18:01:26.773116 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T18:01:26.973103389+00:00 stderr F I1208 18:01:26.973035 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T18:01:26.973103389+00:00 stderr F I1208 18:01:26.973091 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T18:01:27.171403102+00:00 stderr F I1208 18:01:27.171339 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T18:01:27.171403102+00:00 stderr F I1208 18:01:27.171394 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T18:01:27.371193165+00:00 stderr F I1208 18:01:27.371111 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T18:01:27.371193165+00:00 stderr F I1208 18:01:27.371155 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T18:01:27.572923229+00:00 stderr F I1208 18:01:27.572844 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T18:01:27.573038262+00:00 stderr F I1208 18:01:27.572978 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T18:01:27.771281083+00:00 stderr F I1208 18:01:27.771232 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T18:01:27.771389546+00:00 stderr F I1208 18:01:27.771374 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T18:01:27.971114967+00:00 stderr F I1208 18:01:27.970526 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T18:01:27.971114967+00:00 stderr F I1208 18:01:27.971061 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T18:01:28.170317744+00:00 stderr F I1208 18:01:28.169932 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T18:01:28.170317744+00:00 stderr F I1208 18:01:28.170297 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T18:01:28.369641904+00:00 stderr F I1208 18:01:28.369589 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T18:01:28.369751537+00:00 stderr F I1208 18:01:28.369739 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T18:01:28.574399119+00:00 stderr F I1208 18:01:28.574325 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T18:01:28.574399119+00:00 stderr F I1208 18:01:28.574391 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T18:01:28.772429605+00:00 stderr F I1208 18:01:28.772376 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T18:01:28.772564229+00:00 stderr F I1208 18:01:28.772521 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T18:01:28.979025959+00:00 stderr F I1208 18:01:28.978966 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T18:01:28.979025959+00:00 stderr F I1208 18:01:28.979004 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T18:01:29.191750326+00:00 stderr F I1208 18:01:29.191685 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T18:01:29.191750326+00:00 stderr F I1208 18:01:29.191739 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T18:01:29.370610041+00:00 stderr F I1208 18:01:29.370566 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T18:01:29.370699444+00:00 stderr F I1208 18:01:29.370689 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T18:01:29.570576890+00:00 stderr F I1208 18:01:29.570479 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T18:01:29.570576890+00:00 stderr F I1208 18:01:29.570548 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T18:01:29.773571527+00:00 stderr F I1208 18:01:29.773523 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T18:01:29.773763812+00:00 stderr F I1208 18:01:29.773749 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T18:01:29.969546208+00:00 stderr F I1208 18:01:29.969448 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T18:01:29.969546208+00:00 stderr F I1208 18:01:29.969508 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T18:01:30.171434517+00:00 stderr F I1208 18:01:30.171328 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T18:01:30.171434517+00:00 stderr F I1208 18:01:30.171408 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T18:01:30.372318049+00:00 stderr F I1208 18:01:30.372232 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T18:01:30.372475473+00:00 stderr F I1208 18:01:30.372458 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T18:01:30.572140372+00:00 stderr F I1208 18:01:30.572062 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T18:01:30.572140372+00:00 stderr F I1208 18:01:30.572120 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T18:01:30.770293302+00:00 stderr F I1208 18:01:30.770212 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T18:01:30.770293302+00:00 stderr F I1208 18:01:30.770261 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T18:01:30.970013272+00:00 stderr F I1208 18:01:30.969963 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T18:01:30.970042873+00:00 stderr F I1208 18:01:30.970010 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T18:01:31.174922181+00:00 stderr F I1208 18:01:31.174789 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T18:01:31.174922181+00:00 stderr F I1208 18:01:31.174842 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T18:01:31.373179204+00:00 stderr F I1208 18:01:31.373087 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T18:01:31.373179204+00:00 stderr F I1208 18:01:31.373150 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T18:01:31.571381354+00:00 stderr F I1208 18:01:31.570727 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T18:01:31.571429816+00:00 stderr F I1208 18:01:31.571377 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T18:01:31.769562934+00:00 stderr F I1208 18:01:31.769486 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T18:01:31.769603335+00:00 stderr F I1208 18:01:31.769566 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T18:01:31.973821146+00:00 stderr F I1208 18:01:31.973300 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T18:01:31.973821146+00:00 stderr F I1208 18:01:31.973361 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T18:01:32.172730905+00:00 stderr F I1208 18:01:32.172677 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T18:01:32.172730905+00:00 stderr F I1208 18:01:32.172724 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T18:01:32.372086486+00:00 stderr F I1208 18:01:32.372020 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T18:01:32.372086486+00:00 stderr F I1208 18:01:32.372071 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T18:01:32.569930366+00:00 stderr F I1208 18:01:32.569852 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T18:01:32.569981138+00:00 stderr F I1208 18:01:32.569945 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T18:01:32.774830474+00:00 stderr F I1208 18:01:32.773401 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T18:01:32.774870805+00:00 stderr F I1208 18:01:32.774863 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T18:01:32.976354493+00:00 stderr F I1208 18:01:32.975638 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T18:01:32.976354493+00:00 stderr F I1208 18:01:32.976296 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T18:01:33.170632450+00:00 stderr F I1208 18:01:33.170555 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T18:01:33.170632450+00:00 stderr F I1208 18:01:33.170602 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T18:01:33.370394901+00:00 stderr F I1208 18:01:33.370331 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T18:01:33.370394901+00:00 stderr F I1208 18:01:33.370372 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T18:01:33.573621766+00:00 stderr F I1208 18:01:33.573512 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T18:01:33.573621766+00:00 stderr F I1208 18:01:33.573578 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T18:01:33.771740654+00:00 stderr F I1208 18:01:33.771662 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T18:01:33.771740654+00:00 stderr F I1208 18:01:33.771715 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T18:01:33.969686698+00:00 stderr F I1208 18:01:33.969642 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T18:01:33.969686698+00:00 stderr F I1208 18:01:33.969681 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 2025-12-08T18:01:34.172140021+00:00 stderr F I1208 18:01:34.172062 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 was successful 2025-12-08T18:01:34.172140021+00:00 stderr F I1208 18:01:34.172118 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm 2025-12-08T18:01:34.370337101+00:00 stderr F I1208 18:01:34.370264 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-node-identity/ovnkube-identity-cm was successful 2025-12-08T18:01:34.370337101+00:00 stderr F I1208 18:01:34.370313 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity 2025-12-08T18:01:34.571949293+00:00 stderr F I1208 18:01:34.571822 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-network-node-identity/network-node-identity was successful 2025-12-08T18:01:34.571949293+00:00 stderr F I1208 18:01:34.571908 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io 2025-12-08T18:01:34.772046333+00:00 stderr F I1208 18:01:34.771987 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /network-node-identity.openshift.io was successful 2025-12-08T18:01:34.772046333+00:00 stderr F I1208 18:01:34.772033 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity 2025-12-08T18:01:34.976031728+00:00 stderr F I1208 18:01:34.975862 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-node-identity/network-node-identity was successful 2025-12-08T18:01:34.976031728+00:00 stderr F I1208 18:01:34.975971 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules 2025-12-08T18:01:35.175291167+00:00 stderr F I1208 18:01:35.175195 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-network-operator/openshift-network-operator-ipsec-rules was successful 2025-12-08T18:01:35.175291167+00:00 stderr F I1208 18:01:35.175258 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter 2025-12-08T18:01:35.369459300+00:00 stderr F I1208 18:01:35.369363 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-iptables-alerter was successful 2025-12-08T18:01:35.369459300+00:00 stderr F I1208 18:01:35.369406 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter 2025-12-08T18:01:35.570517656+00:00 stderr F I1208 18:01:35.570429 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-operator/iptables-alerter was successful 2025-12-08T18:01:35.570517656+00:00 stderr F I1208 18:01:35.570475 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter 2025-12-08T18:01:35.770787092+00:00 stderr F I1208 18:01:35.770704 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-iptables-alerter was successful 2025-12-08T18:01:35.770851294+00:00 stderr F I1208 18:01:35.770795 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script 2025-12-08T18:01:35.973702507+00:00 stderr F I1208 18:01:35.973615 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/iptables-alerter-script was successful 2025-12-08T18:01:35.973702507+00:00 stderr F I1208 18:01:35.973690 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter 2025-12-08T18:01:36.176315766+00:00 stderr F I1208 18:01:36.176216 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-operator/iptables-alerter was successful 2025-12-08T18:01:36.176315766+00:00 stderr F I1208 18:01:36.176282 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-console 2025-12-08T18:01:36.374950478+00:00 stderr F I1208 18:01:36.374821 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-console was successful 2025-12-08T18:01:36.374998919+00:00 stderr F I1208 18:01:36.374946 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin 2025-12-08T18:01:36.569428099+00:00 stderr F I1208 18:01:36.569348 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-console/networking-console-plugin was successful 2025-12-08T18:01:36.569428099+00:00 stderr F I1208 18:01:36.569391 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin 2025-12-08T18:01:36.781973832+00:00 stderr F I1208 18:01:36.781818 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-console/networking-console-plugin was successful 2025-12-08T18:01:36.781973832+00:00 stderr F I1208 18:01:36.781905 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-console/networking-console-plugin 2025-12-08T18:01:36.971653955+00:00 stderr F I1208 18:01:36.971250 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-console/networking-console-plugin was successful 2025-12-08T18:01:36.971653955+00:00 stderr F I1208 18:01:36.971301 1 log.go:245] reconciling (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin 2025-12-08T18:01:37.169725812+00:00 stderr F I1208 18:01:37.169655 1 log.go:245] Apply / Create of (console.openshift.io/v1, Kind=ConsolePlugin) /networking-console-plugin was successful 2025-12-08T18:01:37.180354675+00:00 stderr F I1208 18:01:37.180245 1 log.go:245] Operconfig Controller complete 2025-12-08T18:02:03.611015330+00:00 stderr F I1208 18:02:03.610495 1 log.go:245] Reconciling update to IngressController openshift-ingress-operator/default 2025-12-08T18:03:01.643006733+00:00 stderr F I1208 18:03:01.641631 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-default-service-cluster-0 -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.646847546+00:00 stderr F I1208 18:03:01.646830 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.651608554+00:00 stderr F I1208 18:03:01.651586 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.657768069+00:00 stderr F I1208 18:03:01.657731 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.663408090+00:00 stderr F I1208 18:03:01.663373 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-kubernetes-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.695592133+00:00 stderr F I1208 18:03:01.695545 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.702294343+00:00 stderr F I1208 18:03:01.702267 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-openshift-apiserver-endpoint-crc -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.710586245+00:00 stderr F I1208 18:03:01.710557 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-external -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.726799169+00:00 stderr F I1208 18:03:01.726762 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-load-balancer-api-internal -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.735014069+00:00 stderr F I1208 18:03:01.734979 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-service-cluster -n openshift-network-diagnostics is applied 2025-12-08T18:03:01.847688509+00:00 stderr F I1208 18:03:01.847631 1 log.go:245] The check PodNetworkConnectivityCheck/network-check-source-crc-to-network-check-target-crc -n openshift-network-diagnostics is applied 2025-12-08T18:04:05.407054255+00:00 stderr F I1208 18:04:05.406661 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-network-node-identity/network-node-identity 2025-12-08T18:04:05.407184419+00:00 stderr F I1208 18:04:05.407139 1 log.go:245] successful reconciliation 2025-12-08T18:04:08.607763999+00:00 stderr F I1208 18:04:08.607708 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/ovn 2025-12-08T18:04:08.608140019+00:00 stderr F I1208 18:04:08.608116 1 log.go:245] successful reconciliation 2025-12-08T18:04:10.394616936+00:00 stderr F I1208 18:04:10.394114 1 log.go:245] Reconciling pki.network.operator.openshift.io openshift-ovn-kubernetes/signer 2025-12-08T18:04:10.394991117+00:00 stderr F I1208 18:04:10.394958 1 log.go:245] successful reconciliation 2025-12-08T18:04:37.180743890+00:00 stderr F I1208 18:04:37.180682 1 log.go:245] Reconciling Network.operator.openshift.io cluster 2025-12-08T18:04:37.301586245+00:00 stderr F I1208 18:04:37.301537 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu-host=, the list of nodes are [] 2025-12-08T18:04:37.302814968+00:00 stderr F I1208 18:04:37.302779 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/dpu=, the list of nodes are [] 2025-12-08T18:04:37.304123362+00:00 stderr F I1208 18:04:37.304100 1 ovn_kubernetes.go:791] For Label network.operator.openshift.io/smart-nic=, the list of nodes are [] 2025-12-08T18:04:37.305555221+00:00 stderr F I1208 18:04:37.305339 1 ovn_kubernetes.go:956] OVN configuration is now &{GatewayMode: HyperShiftConfig:0xc003e278c0 DisableUDPAggregation:false DpuHostModeLabel:network.operator.openshift.io/dpu-host DpuHostModeNodes:[] DpuHostModeValue: DpuModeLabel:network.operator.openshift.io/dpu DpuModeNodes:[] SmartNicModeLabel:network.operator.openshift.io/smart-nic SmartNicModeNodes:[] SmartNicModeValue: MgmtPortResourceName: ConfigOverrides:map[]} 2025-12-08T18:04:37.308893269+00:00 stderr F I1208 18:04:37.308827 1 ovn_kubernetes.go:1728] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete; 1/1 scheduled; 1 available; generation 4 -> 4 2025-12-08T18:04:37.308893269+00:00 stderr F I1208 18:04:37.308850 1 ovn_kubernetes.go:1733] deployment openshift-ovn-kubernetes/ovnkube-control-plane rollout complete 2025-12-08T18:04:37.308893269+00:00 stderr F I1208 18:04:37.308856 1 ovn_kubernetes.go:1248] ovnkube-control-plane deployment status: progressing=false 2025-12-08T18:04:37.310698817+00:00 stderr F I1208 18:04:37.310634 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T18:04:37.310698817+00:00 stderr F I1208 18:04:37.310657 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T18:04:37.310698817+00:00 stderr F I1208 18:04:37.310663 1 ovn_kubernetes.go:1693] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 3 -> 3 2025-12-08T18:04:37.310698817+00:00 stderr F I1208 18:04:37.310668 1 ovn_kubernetes.go:1698] daemonset openshift-ovn-kubernetes/ovnkube-node rollout complete 2025-12-08T18:04:37.310698817+00:00 stderr F I1208 18:04:37.310683 1 ovn_kubernetes.go:1279] ovnkube-node DaemonSet status: progressing=false 2025-12-08T18:04:37.317147978+00:00 stderr F I1208 18:04:37.317092 1 ovn_kubernetes.go:1321] Found the DefaultV4MasqueradeSubnet(169.254.0.0/17) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T18:04:37.317210980+00:00 stderr F I1208 18:04:37.317180 1 ovn_kubernetes.go:1318] Found the DefaultV6MasqueradeSubnet(fd69::/112) in the "networkoperator.openshift.io/default-masquerade-network-cidrs" annotation 2025-12-08T18:04:37.391606532+00:00 stderr F I1208 18:04:37.391549 1 log.go:245] reconciling (operator.openshift.io/v1, Kind=Network) /cluster 2025-12-08T18:04:37.404652238+00:00 stderr F I1208 18:04:37.404602 1 log.go:245] Apply / Create of (operator.openshift.io/v1, Kind=Network) /cluster was successful 2025-12-08T18:04:37.404652238+00:00 stderr F I1208 18:04:37.404629 1 log.go:245] Starting render phase 2025-12-08T18:04:37.413175244+00:00 stderr F I1208 18:04:37.413133 1 ovn_kubernetes.go:344] OVN_EGRESSIP_HEALTHCHECK_PORT env var is not defined. Using: 9107 2025-12-08T18:04:37.448983594+00:00 stderr F I1208 18:04:37.448928 1 ovn_kubernetes.go:1457] IP family mode: node=single-stack, controlPlane=single-stack 2025-12-08T18:04:37.448983594+00:00 stderr F I1208 18:04:37.448949 1 ovn_kubernetes.go:1429] IP family change: updateNode=true, updateControlPlane=true 2025-12-08T18:04:37.448983594+00:00 stderr F I1208 18:04:37.448967 1 ovn_kubernetes.go:1601] OVN-Kubernetes control-plane and node already at release version 4.20.1; no changes required 2025-12-08T18:04:37.449018615+00:00 stderr F I1208 18:04:37.448987 1 ovn_kubernetes.go:531] ovnk components: ovnkube-node: isRunning=true, update=true; ovnkube-control-plane: isRunning=true, update=true 2025-12-08T18:04:37.595419647+00:00 stderr F I1208 18:04:37.595357 1 ovn_kubernetes.go:1693] daemonset openshift-network-node-identity/network-node-identity rollout complete; 1/1 scheduled; 0 unavailable; 1 available; generation 1 -> 1 2025-12-08T18:04:37.595419647+00:00 stderr F I1208 18:04:37.595391 1 ovn_kubernetes.go:1698] daemonset openshift-network-node-identity/network-node-identity rollout complete 2025-12-08T18:04:37.807027350+00:00 stderr F I1208 18:04:37.806978 1 log.go:245] Render phase done, rendered 126 objects 2025-12-08T18:04:37.820789245+00:00 stderr F I1208 18:04:37.820727 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster 2025-12-08T18:04:37.825644643+00:00 stderr F I1208 18:04:37.825583 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-network-operator/applied-cluster was successful 2025-12-08T18:04:37.825644643+00:00 stderr F I1208 18:04:37.825628 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io 2025-12-08T18:04:37.831122828+00:00 stderr F I1208 18:04:37.831011 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /network-attachment-definitions.k8s.cni.cncf.io was successful 2025-12-08T18:04:37.831122828+00:00 stderr F I1208 18:04:37.831053 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io 2025-12-08T18:04:37.836560402+00:00 stderr F I1208 18:04:37.836515 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ippools.whereabouts.cni.cncf.io was successful 2025-12-08T18:04:37.836589263+00:00 stderr F I1208 18:04:37.836562 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io 2025-12-08T18:04:37.841805842+00:00 stderr F I1208 18:04:37.841684 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /overlappingrangeipreservations.whereabouts.cni.cncf.io was successful 2025-12-08T18:04:37.841805842+00:00 stderr F I1208 18:04:37.841732 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io 2025-12-08T18:04:37.851237733+00:00 stderr F I1208 18:04:37.851188 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /nodeslicepools.whereabouts.cni.cncf.io was successful 2025-12-08T18:04:37.851237733+00:00 stderr F I1208 18:04:37.851226 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-multus 2025-12-08T18:04:37.856207944+00:00 stderr F I1208 18:04:37.856153 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-multus was successful 2025-12-08T18:04:37.856207944+00:00 stderr F I1208 18:04:37.856192 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus 2025-12-08T18:04:37.861127204+00:00 stderr F I1208 18:04:37.861063 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus was successful 2025-12-08T18:04:37.861127204+00:00 stderr F I1208 18:04:37.861091 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools 2025-12-08T18:04:37.865452069+00:00 stderr F I1208 18:04:37.865371 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-ancillary-tools was successful 2025-12-08T18:04:37.865452069+00:00 stderr F I1208 18:04:37.865436 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus 2025-12-08T18:04:37.872116096+00:00 stderr F I1208 18:04:37.869043 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus was successful 2025-12-08T18:04:37.872116096+00:00 stderr F I1208 18:04:37.869078 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient 2025-12-08T18:04:37.873424590+00:00 stderr F I1208 18:04:37.873356 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-transient was successful 2025-12-08T18:04:37.873466941+00:00 stderr F I1208 18:04:37.873427 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group 2025-12-08T18:04:38.025604807+00:00 stderr F I1208 18:04:38.025063 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-group was successful 2025-12-08T18:04:38.025604807+00:00 stderr F I1208 18:04:38.025569 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools 2025-12-08T18:04:38.229207935+00:00 stderr F I1208 18:04:38.228461 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ancillary-tools was successful 2025-12-08T18:04:38.229207935+00:00 stderr F I1208 18:04:38.229096 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools 2025-12-08T18:04:38.428302456+00:00 stderr F I1208 18:04:38.428161 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-ancillary-tools was successful 2025-12-08T18:04:38.428302456+00:00 stderr F I1208 18:04:38.428277 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers 2025-12-08T18:04:38.634450163+00:00 stderr F I1208 18:04:38.634369 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-cluster-readers was successful 2025-12-08T18:04:38.634450163+00:00 stderr F I1208 18:04:38.634429 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts 2025-12-08T18:04:38.826072305+00:00 stderr F I1208 18:04:38.826011 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-whereabouts was successful 2025-12-08T18:04:38.826104916+00:00 stderr F I1208 18:04:38.826071 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts 2025-12-08T18:04:39.026241483+00:00 stderr F I1208 18:04:39.026177 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/multus-whereabouts was successful 2025-12-08T18:04:39.026278584+00:00 stderr F I1208 18:04:39.026245 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni 2025-12-08T18:04:39.228100667+00:00 stderr F I1208 18:04:39.228024 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /whereabouts-cni was successful 2025-12-08T18:04:39.228100667+00:00 stderr F I1208 18:04:39.228073 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni 2025-12-08T18:04:39.440996623+00:00 stderr F I1208 18:04:39.429416 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/whereabouts-cni was successful 2025-12-08T18:04:39.440996623+00:00 stderr F I1208 18:04:39.429464 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project 2025-12-08T18:04:39.626818411+00:00 stderr F I1208 18:04:39.626747 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /net-attach-def-project was successful 2025-12-08T18:04:39.626870842+00:00 stderr F I1208 18:04:39.626819 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist 2025-12-08T18:04:39.827953605+00:00 stderr F I1208 18:04:39.827831 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/default-cni-sysctl-allowlist was successful 2025-12-08T18:04:39.827953605+00:00 stderr F I1208 18:04:39.827901 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources 2025-12-08T18:04:40.027062605+00:00 stderr F I1208 18:04:40.026979 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/cni-copy-resources was successful 2025-12-08T18:04:40.027062605+00:00 stderr F I1208 18:04:40.027041 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config 2025-12-08T18:04:40.226002781+00:00 stderr F I1208 18:04:40.225936 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/whereabouts-flatfile-config was successful 2025-12-08T18:04:40.226002781+00:00 stderr F I1208 18:04:40.225989 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config 2025-12-08T18:04:40.425437630+00:00 stderr F I1208 18:04:40.425299 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-multus/multus-daemon-config was successful 2025-12-08T18:04:40.425437630+00:00 stderr F I1208 18:04:40.425372 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus 2025-12-08T18:04:40.648025404+00:00 stderr F I1208 18:04:40.647918 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus was successful 2025-12-08T18:04:40.648025404+00:00 stderr F I1208 18:04:40.647992 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins 2025-12-08T18:04:40.839147522+00:00 stderr F I1208 18:04:40.839060 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/multus-additional-cni-plugins was successful 2025-12-08T18:04:40.839147522+00:00 stderr F I1208 18:04:40.839115 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa 2025-12-08T18:04:41.029680025+00:00 stderr F I1208 18:04:41.029612 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/metrics-daemon-sa was successful 2025-12-08T18:04:41.029680025+00:00 stderr F I1208 18:04:41.029652 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role 2025-12-08T18:04:41.228063946+00:00 stderr F I1208 18:04:41.227950 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /metrics-daemon-role was successful 2025-12-08T18:04:41.228063946+00:00 stderr F I1208 18:04:41.228018 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding 2025-12-08T18:04:41.426399896+00:00 stderr F I1208 18:04:41.426325 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /metrics-daemon-sa-rolebinding was successful 2025-12-08T18:04:41.426399896+00:00 stderr F I1208 18:04:41.426390 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon 2025-12-08T18:04:41.640053942+00:00 stderr F I1208 18:04:41.639963 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-multus/network-metrics-daemon was successful 2025-12-08T18:04:41.640053942+00:00 stderr F I1208 18:04:41.640038 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network 2025-12-08T18:04:41.826921518+00:00 stderr F I1208 18:04:41.826796 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-network was successful 2025-12-08T18:04:41.826921518+00:00 stderr F I1208 18:04:41.826859 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/network-metrics-service 2025-12-08T18:04:42.029639774+00:00 stderr F I1208 18:04:42.029543 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/network-metrics-service was successful 2025-12-08T18:04:42.029639774+00:00 stderr F I1208 18:04:42.029602 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T18:04:42.226180976+00:00 stderr F I1208 18:04:42.226112 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T18:04:42.226226368+00:00 stderr F I1208 18:04:42.226183 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T18:04:42.425971095+00:00 stderr F I1208 18:04:42.425845 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T18:04:42.426011936+00:00 stderr F I1208 18:04:42.425968 1 log.go:245] reconciling (/v1, Kind=Service) openshift-multus/multus-admission-controller 2025-12-08T18:04:42.627988613+00:00 stderr F I1208 18:04:42.627921 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-multus/multus-admission-controller was successful 2025-12-08T18:04:42.627988613+00:00 stderr F I1208 18:04:42.627962 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-multus/multus-ac 2025-12-08T18:04:42.832028834+00:00 stderr F I1208 18:04:42.831957 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-multus/multus-ac was successful 2025-12-08T18:04:42.832057945+00:00 stderr F I1208 18:04:42.832033 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook 2025-12-08T18:04:43.030502337+00:00 stderr F I1208 18:04:43.029964 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /multus-admission-controller-webhook was successful 2025-12-08T18:04:43.030502337+00:00 stderr F I1208 18:04:43.030481 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook 2025-12-08T18:04:43.229035462+00:00 stderr F I1208 18:04:43.227823 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /multus-admission-controller-webhook was successful 2025-12-08T18:04:43.229035462+00:00 stderr F I1208 18:04:43.228354 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io 2025-12-08T18:04:43.428898303+00:00 stderr F I1208 18:04:43.428445 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingWebhookConfiguration) /multus.openshift.io was successful 2025-12-08T18:04:43.428898303+00:00 stderr F I1208 18:04:43.428495 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller 2025-12-08T18:04:43.629480512+00:00 stderr F I1208 18:04:43.629442 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-multus/multus-admission-controller was successful 2025-12-08T18:04:43.629551094+00:00 stderr F I1208 18:04:43.629541 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller 2025-12-08T18:04:43.829007704+00:00 stderr F I1208 18:04:43.827791 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-multus/monitor-multus-admission-controller was successful 2025-12-08T18:04:43.829007704+00:00 stderr F I1208 18:04:43.828244 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s 2025-12-08T18:04:44.025133465+00:00 stderr F I1208 18:04:44.025036 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-multus/prometheus-k8s was successful 2025-12-08T18:04:44.025133465+00:00 stderr F I1208 18:04:44.025076 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s 2025-12-08T18:04:44.231338384+00:00 stderr F I1208 18:04:44.231284 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-multus/prometheus-k8s was successful 2025-12-08T18:04:44.231338384+00:00 stderr F I1208 18:04:44.231330 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules 2025-12-08T18:04:44.430115285+00:00 stderr F I1208 18:04:44.430044 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-multus/prometheus-k8s-rules was successful 2025-12-08T18:04:44.430146036+00:00 stderr F I1208 18:04:44.430128 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-ovn-kubernetes 2025-12-08T18:04:44.631803764+00:00 stderr F I1208 18:04:44.631692 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-ovn-kubernetes was successful 2025-12-08T18:04:44.631803764+00:00 stderr F I1208 18:04:44.631776 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org 2025-12-08T18:04:44.845520522+00:00 stderr F I1208 18:04:44.845410 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressfirewalls.k8s.ovn.org was successful 2025-12-08T18:04:44.845520522+00:00 stderr F I1208 18:04:44.845503 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org 2025-12-08T18:04:45.042012103+00:00 stderr F I1208 18:04:45.041940 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressips.k8s.ovn.org was successful 2025-12-08T18:04:45.042012103+00:00 stderr F I1208 18:04:45.041993 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org 2025-12-08T18:04:45.249145396+00:00 stderr F I1208 18:04:45.248781 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressqoses.k8s.ovn.org was successful 2025-12-08T18:04:45.249331931+00:00 stderr F I1208 18:04:45.249298 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org 2025-12-08T18:04:45.437282727+00:00 stderr F I1208 18:04:45.437229 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminpolicybasedexternalroutes.k8s.ovn.org was successful 2025-12-08T18:04:45.437282727+00:00 stderr F I1208 18:04:45.437271 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org 2025-12-08T18:04:45.629703989+00:00 stderr F I1208 18:04:45.629660 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /egressservices.k8s.ovn.org was successful 2025-12-08T18:04:45.629797941+00:00 stderr F I1208 18:04:45.629785 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io 2025-12-08T18:04:45.866147620+00:00 stderr F I1208 18:04:45.866103 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /adminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T18:04:45.866276694+00:00 stderr F I1208 18:04:45.866262 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io 2025-12-08T18:04:46.063414431+00:00 stderr F I1208 18:04:46.063371 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /baselineadminnetworkpolicies.policy.networking.k8s.io was successful 2025-12-08T18:04:46.063473943+00:00 stderr F I1208 18:04:46.063418 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io 2025-12-08T18:04:46.229589578+00:00 stderr F I1208 18:04:46.229523 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /ipamclaims.k8s.cni.cncf.io was successful 2025-12-08T18:04:46.229738472+00:00 stderr F I1208 18:04:46.229716 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org 2025-12-08T18:04:46.455068938+00:00 stderr F I1208 18:04:46.455010 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /userdefinednetworks.k8s.ovn.org was successful 2025-12-08T18:04:46.455119789+00:00 stderr F I1208 18:04:46.455076 1 log.go:245] reconciling (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org 2025-12-08T18:04:46.676397657+00:00 stderr F I1208 18:04:46.676302 1 log.go:245] Apply / Create of (apiextensions.k8s.io/v1, Kind=CustomResourceDefinition) /clusteruserdefinednetworks.k8s.ovn.org was successful 2025-12-08T18:04:46.676397657+00:00 stderr F I1208 18:04:46.676364 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T18:04:46.826957611+00:00 stderr F I1208 18:04:46.826888 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T18:04:46.826957611+00:00 stderr F I1208 18:04:46.826922 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited 2025-12-08T18:04:47.027675803+00:00 stderr F I1208 18:04:47.026626 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-node-limited was successful 2025-12-08T18:04:47.027721364+00:00 stderr F I1208 18:04:47.027681 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited 2025-12-08T18:04:47.227843282+00:00 stderr F I1208 18:04:47.227782 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-nodes-identity-limited was successful 2025-12-08T18:04:47.227991006+00:00 stderr F I1208 18:04:47.227959 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited 2025-12-08T18:04:47.426593133+00:00 stderr F I1208 18:04:47.426503 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-node-limited was successful 2025-12-08T18:04:47.426593133+00:00 stderr F I1208 18:04:47.426561 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited 2025-12-08T18:04:47.627004707+00:00 stderr F I1208 18:04:47.626933 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-identity-limited was successful 2025-12-08T18:04:47.627004707+00:00 stderr F I1208 18:04:47.626976 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy 2025-12-08T18:04:47.827621038+00:00 stderr F I1208 18:04:47.827044 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-kube-rbac-proxy was successful 2025-12-08T18:04:47.827621038+00:00 stderr F I1208 18:04:47.827517 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy 2025-12-08T18:04:48.026437841+00:00 stderr F I1208 18:04:48.026367 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-node-kube-rbac-proxy was successful 2025-12-08T18:04:48.026437841+00:00 stderr F I1208 18:04:48.026424 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config 2025-12-08T18:04:48.226403194+00:00 stderr F I1208 18:04:48.226177 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-config was successful 2025-12-08T18:04:48.226403194+00:00 stderr F I1208 18:04:48.226276 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T18:04:48.427930678+00:00 stderr F I1208 18:04:48.427854 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T18:04:48.427991890+00:00 stderr F I1208 18:04:48.427950 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:04:48.627850200+00:00 stderr F I1208 18:04:48.627796 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:04:48.627850200+00:00 stderr F I1208 18:04:48.627841 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:04:48.826379845+00:00 stderr F I1208 18:04:48.826337 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:04:48.826471077+00:00 stderr F I1208 18:04:48.826461 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:04:49.027024186+00:00 stderr F I1208 18:04:49.026984 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:04:49.027162670+00:00 stderr F I1208 18:04:49.027151 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited 2025-12-08T18:04:49.226543767+00:00 stderr F I1208 18:04:49.226496 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/openshift-ovn-kubernetes-control-plane-limited was successful 2025-12-08T18:04:49.226593359+00:00 stderr F I1208 18:04:49.226549 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn 2025-12-08T18:04:49.427745433+00:00 stderr F I1208 18:04:49.427698 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/ovn was successful 2025-12-08T18:04:49.427795845+00:00 stderr F I1208 18:04:49.427755 1 log.go:245] reconciling (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer 2025-12-08T18:04:49.627060979+00:00 stderr F I1208 18:04:49.627020 1 log.go:245] Apply / Create of (network.operator.openshift.io/v1, Kind=OperatorPKI) openshift-ovn-kubernetes/signer was successful 2025-12-08T18:04:49.627137341+00:00 stderr F I1208 18:04:49.627128 1 log.go:245] reconciling (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes 2025-12-08T18:04:49.826722604+00:00 stderr F I1208 18:04:49.826669 1 log.go:245] Apply / Create of (flowcontrol.apiserver.k8s.io/v1, Kind=FlowSchema) /openshift-ovn-kubernetes was successful 2025-12-08T18:04:49.826759475+00:00 stderr F I1208 18:04:49.826725 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader 2025-12-08T18:04:50.025992939+00:00 stderr F I1208 18:04:50.025936 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-cluster-reader was successful 2025-12-08T18:04:50.025992939+00:00 stderr F I1208 18:04:50.025977 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib 2025-12-08T18:04:50.228609953+00:00 stderr F I1208 18:04:50.228539 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-ovn-kubernetes/ovnkube-script-lib was successful 2025-12-08T18:04:50.228609953+00:00 stderr F I1208 18:04:50.228584 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor 2025-12-08T18:04:50.424775045+00:00 stderr F I1208 18:04:50.424736 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-editor was successful 2025-12-08T18:04:50.424863217+00:00 stderr F I1208 18:04:50.424850 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer 2025-12-08T18:04:50.625597891+00:00 stderr F I1208 18:04:50.625537 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /openshift-ovn-kubernetes-udn-viewer was successful 2025-12-08T18:04:50.625597891+00:00 stderr F I1208 18:04:50.625580 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules 2025-12-08T18:04:50.831613754+00:00 stderr F I1208 18:04:50.831542 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/master-rules was successful 2025-12-08T18:04:50.831613754+00:00 stderr F I1208 18:04:50.831599 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules 2025-12-08T18:04:51.032508802+00:00 stderr F I1208 18:04:51.032447 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=PrometheusRule) openshift-ovn-kubernetes/networking-rules was successful 2025-12-08T18:04:51.032508802+00:00 stderr F I1208 18:04:51.032496 1 log.go:245] reconciling (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features 2025-12-08T18:04:51.226296001+00:00 stderr F I1208 18:04:51.225558 1 log.go:245] Apply / Create of (/v1, Kind=ConfigMap) openshift-config-managed/openshift-network-features was successful 2025-12-08T18:04:51.226296001+00:00 stderr F I1208 18:04:51.226264 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics 2025-12-08T18:04:51.430084756+00:00 stderr F I1208 18:04:51.430025 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-control-plane-metrics was successful 2025-12-08T18:04:51.430166058+00:00 stderr F I1208 18:04:51.430155 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane 2025-12-08T18:04:51.626733331+00:00 stderr F I1208 18:04:51.626638 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-control-plane was successful 2025-12-08T18:04:51.626733331+00:00 stderr F I1208 18:04:51.626682 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node 2025-12-08T18:04:51.831780079+00:00 stderr F I1208 18:04:51.831707 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-ovn-kubernetes/monitor-ovn-node was successful 2025-12-08T18:04:51.831822210+00:00 stderr F I1208 18:04:51.831785 1 log.go:245] reconciling (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node 2025-12-08T18:04:52.029927304+00:00 stderr F I1208 18:04:52.029808 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-ovn-kubernetes/ovn-kubernetes-node was successful 2025-12-08T18:04:52.030028076+00:00 stderr F I1208 18:04:52.030012 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T18:04:52.227586995+00:00 stderr F I1208 18:04:52.227536 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T18:04:52.227701288+00:00 stderr F I1208 18:04:52.227687 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s 2025-12-08T18:04:52.428480083+00:00 stderr F I1208 18:04:52.428399 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-ovn-kubernetes/prometheus-k8s was successful 2025-12-08T18:04:52.428480083+00:00 stderr F I1208 18:04:52.428466 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-host-network 2025-12-08T18:04:52.627510531+00:00 stderr F I1208 18:04:52.627441 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-host-network was successful 2025-12-08T18:04:52.627510531+00:00 stderr F I1208 18:04:52.627498 1 log.go:245] reconciling (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas 2025-12-08T18:04:52.828945804+00:00 stderr F I1208 18:04:52.828422 1 log.go:245] Apply / Create of (/v1, Kind=ResourceQuota) openshift-host-network/host-network-namespace-quotas was successful 2025-12-08T18:04:52.828945804+00:00 stderr F I1208 18:04:52.828479 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane 2025-12-08T18:04:53.032766349+00:00 stderr F I1208 18:04:53.032694 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-ovn-kubernetes/ovnkube-control-plane was successful 2025-12-08T18:04:53.032766349+00:00 stderr F I1208 18:04:53.032742 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node 2025-12-08T18:04:53.243731344+00:00 stderr F I1208 18:04:53.243671 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-ovn-kubernetes/ovnkube-node was successful 2025-12-08T18:04:53.243731344+00:00 stderr F I1208 18:04:53.243724 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label 2025-12-08T18:04:53.431122614+00:00 stderr F I1208 18:04:53.427016 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicy) /user-defined-networks-namespace-label was successful 2025-12-08T18:04:53.431122614+00:00 stderr F I1208 18:04:53.427069 1 log.go:245] reconciling (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding 2025-12-08T18:04:53.625845928+00:00 stderr F I1208 18:04:53.625781 1 log.go:245] Apply / Create of (admissionregistration.k8s.io/v1, Kind=ValidatingAdmissionPolicyBinding) /user-defined-networks-namespace-label-binding was successful 2025-12-08T18:04:53.625845928+00:00 stderr F I1208 18:04:53.625831 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-diagnostics 2025-12-08T18:04:53.827702051+00:00 stderr F I1208 18:04:53.827028 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-diagnostics was successful 2025-12-08T18:04:53.829088258+00:00 stderr F I1208 18:04:53.829072 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics 2025-12-08T18:04:54.026166264+00:00 stderr F I1208 18:04:54.026093 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T18:04:54.026166264+00:00 stderr F I1208 18:04:54.026145 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics 2025-12-08T18:04:54.227798911+00:00 stderr F I1208 18:04:54.227738 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T18:04:54.227798911+00:00 stderr F I1208 18:04:54.227783 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics 2025-12-08T18:04:54.424363605+00:00 stderr F I1208 18:04:54.424306 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/network-diagnostics was successful 2025-12-08T18:04:54.424363605+00:00 stderr F I1208 18:04:54.424344 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics 2025-12-08T18:04:54.632960357+00:00 stderr F I1208 18:04:54.632305 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-diagnostics was successful 2025-12-08T18:04:54.632960357+00:00 stderr F I1208 18:04:54.632351 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics 2025-12-08T18:04:54.826546920+00:00 stderr F I1208 18:04:54.826400 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-diagnostics was successful 2025-12-08T18:04:54.826546920+00:00 stderr F I1208 18:04:54.826443 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics 2025-12-08T18:04:55.026953846+00:00 stderr F I1208 18:04:55.025996 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) kube-system/network-diagnostics was successful 2025-12-08T18:04:55.026953846+00:00 stderr F I1208 18:04:55.026856 1 log.go:245] reconciling (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source 2025-12-08T18:04:55.236434480+00:00 stderr F I1208 18:04:55.236381 1 log.go:245] Apply / Create of (apps/v1, Kind=Deployment) openshift-network-diagnostics/network-check-source was successful 2025-12-08T18:04:55.236434480+00:00 stderr F I1208 18:04:55.236426 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-source 2025-12-08T18:04:55.425993608+00:00 stderr F I1208 18:04:55.425931 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-source was successful 2025-12-08T18:04:55.426045399+00:00 stderr F I1208 18:04:55.426011 1 log.go:245] reconciling (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source 2025-12-08T18:04:55.627563034+00:00 stderr F I1208 18:04:55.627456 1 log.go:245] Apply / Create of (monitoring.coreos.com/v1, Kind=ServiceMonitor) openshift-network-diagnostics/network-check-source was successful 2025-12-08T18:04:55.627563034+00:00 stderr F I1208 18:04:55.627532 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s 2025-12-08T18:04:55.828269397+00:00 stderr F I1208 18:04:55.828196 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T18:04:55.828269397+00:00 stderr F I1208 18:04:55.828246 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s 2025-12-08T18:04:56.027465679+00:00 stderr F I1208 18:04:56.027402 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-diagnostics/prometheus-k8s was successful 2025-12-08T18:04:56.027465679+00:00 stderr F I1208 18:04:56.027445 1 log.go:245] reconciling (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target 2025-12-08T18:04:56.228714017+00:00 stderr F I1208 18:04:56.228604 1 log.go:245] Apply / Create of (apps/v1, Kind=DaemonSet) openshift-network-diagnostics/network-check-target was successful 2025-12-08T18:04:56.228714017+00:00 stderr F I1208 18:04:56.228693 1 log.go:245] reconciling (/v1, Kind=Service) openshift-network-diagnostics/network-check-target 2025-12-08T18:04:56.426078090+00:00 stderr F I1208 18:04:56.426017 1 log.go:245] Apply / Create of (/v1, Kind=Service) openshift-network-diagnostics/network-check-target was successful 2025-12-08T18:04:56.426078090+00:00 stderr F I1208 18:04:56.426069 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role 2025-12-08T18:04:56.625957571+00:00 stderr F I1208 18:04:56.625356 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-config-managed/openshift-network-public-role was successful 2025-12-08T18:04:56.625957571+00:00 stderr F I1208 18:04:56.625410 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding 2025-12-08T18:04:56.826231423+00:00 stderr F I1208 18:04:56.826115 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-config-managed/openshift-network-public-role-binding was successful 2025-12-08T18:04:56.826231423+00:00 stderr F I1208 18:04:56.826163 1 log.go:245] reconciling (/v1, Kind=Namespace) /openshift-network-node-identity 2025-12-08T18:04:57.028414834+00:00 stderr F I1208 18:04:57.028350 1 log.go:245] Apply / Create of (/v1, Kind=Namespace) /openshift-network-node-identity was successful 2025-12-08T18:04:57.028414834+00:00 stderr F I1208 18:04:57.028404 1 log.go:245] reconciling (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity 2025-12-08T18:04:57.229136138+00:00 stderr F I1208 18:04:57.228425 1 log.go:245] Apply / Create of (/v1, Kind=ServiceAccount) openshift-network-node-identity/network-node-identity was successful 2025-12-08T18:04:57.229136138+00:00 stderr F I1208 18:04:57.228472 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity 2025-12-08T18:04:57.425572597+00:00 stderr F I1208 18:04:57.425515 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRoleBinding) /network-node-identity was successful 2025-12-08T18:04:57.425572597+00:00 stderr F I1208 18:04:57.425555 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity 2025-12-08T18:04:57.625409487+00:00 stderr F I1208 18:04:57.625323 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=ClusterRole) /network-node-identity was successful 2025-12-08T18:04:57.625409487+00:00 stderr F I1208 18:04:57.625367 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases 2025-12-08T18:04:57.824580229+00:00 stderr F I1208 18:04:57.824517 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T18:04:57.824615550+00:00 stderr F I1208 18:04:57.824580 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases 2025-12-08T18:04:58.025681902+00:00 stderr F I1208 18:04:58.025631 1 log.go:245] Apply / Create of (rbac.authorization.k8s.io/v1, Kind=Role) openshift-network-node-identity/network-node-identity-leases was successful 2025-12-08T18:04:58.025713693+00:00 stderr F I1208 18:04:58.025690 1 log.go:245] reconciling (rbac.authorization.k8s.io/v1, Kind=RoleBinding) openshift-network-node-identity/system:openshift:scc:hostnetwork-v2 ././@LongLink0000644000000000000000000000030400000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_0000755000175000017500000000000015115611513033007 5ustar zuulzuul././@LongLink0000644000000000000000000000033200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_0000755000175000017500000000000015115611520033005 5ustar zuulzuul././@LongLink0000644000000000000000000000033700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_0000644000175000017500000017726015115611513033026 0ustar zuulzuul2025-12-08T17:55:37.824273664+00:00 stderr F W1208 17:55:37.823851 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:55:37.824517061+00:00 stderr F I1208 17:55:37.824323 1 crypto.go:594] Generating new CA for cert-manager-operator-signer@1765216537 cert, and key in /tmp/serving-cert-2819449867/serving-signer.crt, /tmp/serving-cert-2819449867/serving-signer.key 2025-12-08T17:55:37.824517061+00:00 stderr F Validity period of the certificate for "cert-manager-operator-signer@1765216537" is unset, resetting to 43800h0m0s! 2025-12-08T17:55:38.135563856+00:00 stderr F I1208 17:55:38.135259 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:55:38.135921705+00:00 stderr F I1208 17:55:38.135868 1 observer_polling.go:159] Starting file observer 2025-12-08T17:55:38.148739527+00:00 stderr F I1208 17:55:38.148679 1 builder.go:304] cert-manager-operator version - 2025-12-08T17:55:38.149716244+00:00 stderr F I1208 17:55:38.149668 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/tmp/serving-cert-2819449867/tls.crt::/tmp/serving-cert-2819449867/tls.key" 2025-12-08T17:55:38.946711454+00:00 stderr F I1208 17:55:38.946461 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:55:38.951907777+00:00 stderr F I1208 17:55:38.950947 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:55:38.951907777+00:00 stderr F I1208 17:55:38.950966 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:55:38.951907777+00:00 stderr F I1208 17:55:38.950983 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:55:38.951907777+00:00 stderr F I1208 17:55:38.950988 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:55:38.955010321+00:00 stderr F I1208 17:55:38.954962 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:55:38.955010321+00:00 stderr F I1208 17:55:38.954984 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:55:38.955010321+00:00 stderr F W1208 17:55:38.954997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:55:38.955010321+00:00 stderr F W1208 17:55:38.955003 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:55:38.955031452+00:00 stderr F W1208 17:55:38.955009 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:55:38.955031452+00:00 stderr F W1208 17:55:38.955012 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:55:38.955031452+00:00 stderr F W1208 17:55:38.955016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:55:38.955031452+00:00 stderr F W1208 17:55:38.955019 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:55:38.956459411+00:00 stderr F I1208 17:55:38.956438 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:55:38.956921563+00:00 stderr F I1208 17:55:38.956906 1 leaderelection.go:257] attempting to acquire leader lease cert-manager-operator/cert-manager-operator-lock... 2025-12-08T17:55:38.957850729+00:00 stderr F I1208 17:55:38.957826 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-2819449867/tls.crt::/tmp/serving-cert-2819449867/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"cert-manager-operator-signer@1765216537\" (2025-12-08 17:55:37 +0000 UTC to 2025-12-08 17:55:38 +0000 UTC (now=2025-12-08 17:55:38.957796348 +0000 UTC))" 2025-12-08T17:55:38.958111426+00:00 stderr F I1208 17:55:38.958093 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216538\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216538\" (2025-12-08 16:55:38 +0000 UTC to 2028-12-08 16:55:38 +0000 UTC (now=2025-12-08 17:55:38.958068655 +0000 UTC))" 2025-12-08T17:55:38.958164278+00:00 stderr F I1208 17:55:38.958151 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:55:38.958207619+00:00 stderr F I1208 17:55:38.958196 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:55:38.958251730+00:00 stderr F I1208 17:55:38.958240 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:55:38.958380294+00:00 stderr F I1208 17:55:38.958329 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:55:38.958471676+00:00 stderr F I1208 17:55:38.958428 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/tmp/serving-cert-2819449867/tls.crt::/tmp/serving-cert-2819449867/tls.key" 2025-12-08T17:55:38.958596110+00:00 stderr F I1208 17:55:38.958580 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:55:38.958723473+00:00 stderr F I1208 17:55:38.958710 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:55:38.958752134+00:00 stderr F I1208 17:55:38.958742 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:55:38.959159285+00:00 stderr F I1208 17:55:38.959128 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:55:38.959178465+00:00 stderr F I1208 17:55:38.959154 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:55:38.960017338+00:00 stderr F I1208 17:55:38.959999 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go:183" 2025-12-08T17:55:38.965358345+00:00 stderr F I1208 17:55:38.963189 1 leaderelection.go:271] successfully acquired lease cert-manager-operator/cert-manager-operator-lock 2025-12-08T17:55:38.965358345+00:00 stderr F I1208 17:55:38.963454 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:55:38.965358345+00:00 stderr F I1208 17:55:38.963588 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:209" 2025-12-08T17:55:38.965358345+00:00 stderr F I1208 17:55:38.964383 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"cert-manager-operator", Name:"cert-manager-operator-lock", UID:"dff8c5f1-ff66-4ed9-96ca-e94b792df03e", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"42500", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' cert-manager-operator-controller-manager-64c74584c4-qtkx9_f03e831d-c739-48d0-9d60-0349dcdc2f41 became leader 2025-12-08T17:55:38.986957788+00:00 stderr F I1208 17:55:38.986048 1 setup_manager.go:52] "setting up operator manager" logger="setup-manager" controller="cert-manager-istio-csr-controller" 2025-12-08T17:55:38.986957788+00:00 stderr F I1208 17:55:38.986079 1 setup_manager.go:53] "controller" logger="setup-manager" version="" 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987237 1 base_controller.go:76] Waiting for caches to sync for cert-manager-controller-static-resources--StaticResources 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987361 1 base_controller.go:76] Waiting for caches to sync for cert-manager-controller-deployment 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987522 1 base_controller.go:76] Waiting for caches to sync for cert-manager-webhook-static-resources--StaticResources 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987535 1 base_controller.go:76] Waiting for caches to sync for cert-manager-webhook-deployment 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987713 1 base_controller.go:76] Waiting for caches to sync for cert-manager-cainjector-deployment 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987732 1 base_controller.go:76] Waiting for caches to sync for cert-manager-cainjector-static-resources--StaticResources 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987774 1 base_controller.go:76] Waiting for caches to sync for cert-manager-networkpolicy-user-defined 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987785 1 base_controller.go:76] Waiting for caches to sync for DefaultCertManager 2025-12-08T17:55:38.990920906+00:00 stderr F I1208 17:55:38.987930 1 base_controller.go:76] Waiting for caches to sync for cert-manager-networkpolicy-static-resources--StaticResources 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.993199 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.993405 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.993572 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.994459 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.995228 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.995614 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.995923374+00:00 stderr F I1208 17:55:38.995852 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.999909554+00:00 stderr F I1208 17:55:38.996261 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:55:38.999909554+00:00 stderr F I1208 17:55:38.996799 1 reflector.go:430] "Caches populated" type="*v1.NetworkPolicy" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:38.999909554+00:00 stderr F I1208 17:55:38.998998 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:39.002556646+00:00 stderr F I1208 17:55:39.000762 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:39.002556646+00:00 stderr F I1208 17:55:39.001167 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:39.007365017+00:00 stderr F I1208 17:55:39.007297 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:39.007916023+00:00 stderr F I1208 17:55:39.007798 1 reflector.go:430] "Caches populated" type="*v1alpha1.CertManager" reflector="github.com/openshift/cert-manager-operator/pkg/operator/informers/externalversions/factory.go:125" 2025-12-08T17:55:39.020647732+00:00 stderr F I1208 17:55:39.020574 1 server.go:208] "Starting metrics server" logger="controller-runtime.metrics" 2025-12-08T17:55:39.020946470+00:00 stderr F I1208 17:55:39.020815 1 server.go:247] "Serving metrics server" logger="controller-runtime.metrics" bindAddress=":8080" secure=false 2025-12-08T17:55:39.021018992+00:00 stderr F I1208 17:55:39.020978 1 recorder.go:104] "controller is starting" logger="operator-manager.events" type="Normal" object={"kind":"IstioCSR","apiVersion":"operator.openshift.io/v1alpha1"} reason="ControllerStarted" 2025-12-08T17:55:39.021442084+00:00 stderr F I1208 17:55:39.021408 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:55:39.021677430+00:00 stderr F I1208 17:55:39.021644 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1alpha1.IstioCSR" 2025-12-08T17:55:39.021688180+00:00 stderr F I1208 17:55:39.021679 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.Certificate" 2025-12-08T17:55:39.021696781+00:00 stderr F I1208 17:55:39.021690 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.Deployment" 2025-12-08T17:55:39.021705521+00:00 stderr F I1208 17:55:39.021700 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.ClusterRole" 2025-12-08T17:55:39.021727341+00:00 stderr F I1208 17:55:39.021711 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.ClusterRoleBinding" 2025-12-08T17:55:39.021734262+00:00 stderr F I1208 17:55:39.021726 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.Role" 2025-12-08T17:55:39.021753272+00:00 stderr F I1208 17:55:39.021738 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.RoleBinding" 2025-12-08T17:55:39.021760072+00:00 stderr F I1208 17:55:39.021752 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.Service" 2025-12-08T17:55:39.021767132+00:00 stderr F I1208 17:55:39.021762 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.ServiceAccount" 2025-12-08T17:55:39.021794183+00:00 stderr F I1208 17:55:39.021779 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.ConfigMap" 2025-12-08T17:55:39.021813384+00:00 stderr F I1208 17:55:39.021797 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:39.021829264+00:00 stderr F I1208 17:55:39.021814 1 controller.go:175] "Starting EventSource" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" source="kind source: *v1.NetworkPolicy" 2025-12-08T17:55:39.021829264+00:00 stderr F I1208 17:55:39.021824 1 controller.go:183] "Starting Controller" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" 2025-12-08T17:55:39.027512171+00:00 stderr F I1208 17:55:39.025042 1 reflector.go:430] "Caches populated" type="*v1.NetworkPolicy" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.027512171+00:00 stderr F I1208 17:55:39.025245 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.029145635+00:00 stderr F I1208 17:55:39.028725 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.029201327+00:00 stderr F I1208 17:55:39.029178 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.029697850+00:00 stderr F I1208 17:55:39.029667 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.035871380+00:00 stderr F I1208 17:55:39.035820 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.036180979+00:00 stderr F I1208 17:55:39.036129 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.036322803+00:00 stderr F I1208 17:55:39.036296 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.038042229+00:00 stderr F I1208 17:55:39.038007 1 reflector.go:430] "Caches populated" type="*v1.PartialObjectMetadata" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.062926922+00:00 stderr F I1208 17:55:39.062680 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:55:39.062986164+00:00 stderr F I1208 17:55:39.062970 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:55:39.063014865+00:00 stderr F I1208 17:55:39.062681 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:55:39.063278312+00:00 stderr F I1208 17:55:39.063229 1 reflector.go:430] "Caches populated" type="*v1alpha1.IstioCSR" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.063307262+00:00 stderr F I1208 17:55:39.063281 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:55:39.06323369 +0000 UTC))" 2025-12-08T17:55:39.063606811+00:00 stderr F I1208 17:55:39.063564 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-2819449867/tls.crt::/tmp/serving-cert-2819449867/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"cert-manager-operator-signer@1765216537\" (2025-12-08 17:55:37 +0000 UTC to 2025-12-08 17:55:38 +0000 UTC (now=2025-12-08 17:55:39.063545039 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.063748 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216538\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216538\" (2025-12-08 16:55:38 +0000 UTC to 2028-12-08 16:55:38 +0000 UTC (now=2025-12-08 17:55:39.063730144 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064009 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:55:39.063992422 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064031 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:55:39.064019013 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064048 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:55:39.064036603 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064065 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:55:39.064055144 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064098 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:55:39.064071964 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064119 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:55:39.064105755 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064138 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:55:39.064123966 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064155 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:55:39.064143826 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064179 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:55:39.064161057 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064201 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:55:39.064190077 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064218 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:55:39.064207218 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064428 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-2819449867/tls.crt::/tmp/serving-cert-2819449867/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"cert-manager-operator-signer@1765216537\" (2025-12-08 17:55:37 +0000 UTC to 2025-12-08 17:55:38 +0000 UTC (now=2025-12-08 17:55:39.064413053 +0000 UTC))" 2025-12-08T17:55:39.065979386+00:00 stderr F I1208 17:55:39.064619 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216538\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216538\" (2025-12-08 16:55:38 +0000 UTC to 2028-12-08 16:55:38 +0000 UTC (now=2025-12-08 17:55:39.064603639 +0000 UTC))" 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.092363 1 base_controller.go:82] Caches are synced for cert-manager-controller-static-resources--StaticResources 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.092393 1 base_controller.go:119] Starting #1 worker of cert-manager-controller-static-resources--StaticResources controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F E1208 17:55:39.092499 1 base_controller.go:279] "Unhandled Error" err="cert-manager-controller-static-resources--StaticResources reconciliation failed: certmanager.operator.openshift.io \"cluster\" not found" 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.093989 1 base_controller.go:82] Caches are synced for DefaultCertManager 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094012 1 base_controller.go:119] Starting #1 worker of DefaultCertManager controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094050 1 base_controller.go:82] Caches are synced for cert-manager-controller-deployment 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094054 1 base_controller.go:119] Starting #1 worker of cert-manager-controller-deployment controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094074 1 base_controller.go:82] Caches are synced for cert-manager-webhook-static-resources--StaticResources 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094082 1 base_controller.go:119] Starting #1 worker of cert-manager-webhook-static-resources--StaticResources controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094091 1 base_controller.go:82] Caches are synced for cert-manager-webhook-deployment 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094095 1 base_controller.go:119] Starting #1 worker of cert-manager-webhook-deployment controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094110 1 base_controller.go:82] Caches are synced for cert-manager-cainjector-static-resources--StaticResources 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094114 1 base_controller.go:119] Starting #1 worker of cert-manager-cainjector-static-resources--StaticResources controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094123 1 base_controller.go:82] Caches are synced for cert-manager-networkpolicy-user-defined 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094126 1 base_controller.go:119] Starting #1 worker of cert-manager-networkpolicy-user-defined controller ... 2025-12-08T17:55:39.094960072+00:00 stderr F E1208 17:55:39.094721 1 base_controller.go:279] "Unhandled Error" err="cert-manager-webhook-static-resources--StaticResources reconciliation failed: certmanager.operator.openshift.io \"cluster\" not found" 2025-12-08T17:55:39.094960072+00:00 stderr F E1208 17:55:39.094780 1 base_controller.go:279] "Unhandled Error" err="cert-manager-cainjector-static-resources--StaticResources reconciliation failed: certmanager.operator.openshift.io \"cluster\" not found" 2025-12-08T17:55:39.094960072+00:00 stderr F I1208 17:55:39.094798 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'StatusNotFound' Creating "cluster" certmanager 2025-12-08T17:55:39.095016263+00:00 stderr F I1208 17:55:39.094977 1 base_controller.go:82] Caches are synced for cert-manager-networkpolicy-static-resources--StaticResources 2025-12-08T17:55:39.095016263+00:00 stderr F I1208 17:55:39.094985 1 base_controller.go:119] Starting #1 worker of cert-manager-networkpolicy-static-resources--StaticResources controller ... 2025-12-08T17:55:39.095026894+00:00 stderr F I1208 17:55:39.095021 1 base_controller.go:82] Caches are synced for cert-manager-cainjector-deployment 2025-12-08T17:55:39.095034024+00:00 stderr F I1208 17:55:39.095026 1 base_controller.go:119] Starting #1 worker of cert-manager-cainjector-deployment controller ... 2025-12-08T17:55:39.095623730+00:00 stderr F E1208 17:55:39.095071 1 base_controller.go:279] "Unhandled Error" err="cert-manager-networkpolicy-static-resources--StaticResources reconciliation failed: certmanager.operator.openshift.io \"cluster\" not found" 2025-12-08T17:55:39.105134370+00:00 stderr F E1208 17:55:39.101360 1 base_controller.go:279] "Unhandled Error" err="cert-manager-webhook-static-resources--StaticResources reconciliation failed: certmanager.operator.openshift.io \"cluster\" not found" 2025-12-08T17:55:39.105134370+00:00 stderr F E1208 17:55:39.101437 1 base_controller.go:279] "Unhandled Error" err="cert-manager-cainjector-static-resources--StaticResources reconciliation failed: certmanager.operator.openshift.io \"cluster\" not found" 2025-12-08T17:55:39.131918776+00:00 stderr F I1208 17:55:39.128667 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MutatingWebhookConfigurationCreated' Created MutatingWebhookConfiguration.admissionregistration.k8s.io/cert-manager-webhook because it was missing 2025-12-08T17:55:39.138944848+00:00 stderr F I1208 17:55:39.135843 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'NamespaceCreated' Created Namespace/cert-manager because it was missing 2025-12-08T17:55:39.144418458+00:00 stderr F I1208 17:55:39.144112 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ValidatingWebhookConfigurationCreated' Created ValidatingWebhookConfiguration.admissionregistration.k8s.io/cert-manager-webhook because it was missing 2025-12-08T17:55:39.160461509+00:00 stderr F I1208 17:55:39.157954 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-cainjector because it was missing 2025-12-08T17:55:39.224200928+00:00 stderr F I1208 17:55:39.224021 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:39.320076979+00:00 stderr F I1208 17:55:39.317179 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-cluster-view because it was missing 2025-12-08T17:55:39.391221350+00:00 stderr F I1208 17:55:39.391159 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'RoleBindingCreateFailed' Failed to create RoleBinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving -n cert-manager: rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found 2025-12-08T17:55:39.423752513+00:00 stderr F I1208 17:55:39.423237 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentCreated' Created Deployment.apps/cert-manager -n cert-manager because it was missing 2025-12-08T17:55:40.044922458+00:00 stderr F I1208 17:55:40.044575 1 reflector.go:430] "Caches populated" type="*v1.Certificate" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:106" 2025-12-08T17:55:40.129479478+00:00 stderr F I1208 17:55:40.129394 1 controller.go:217] "Starting workers" logger="operator-manager" controller="cert-manager-istio-csr-controller" controllerGroup="operator.openshift.io" controllerKind="IstioCSR" worker count=1 2025-12-08T17:55:40.210327577+00:00 stderr F I1208 17:55:40.210267 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-cainjector because it was missing 2025-12-08T17:55:40.442006133+00:00 stderr F I1208 17:55:40.441931 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentCreated' Created Deployment.apps/cert-manager-webhook -n cert-manager because it was missing 2025-12-08T17:55:40.449747546+00:00 stderr F I1208 17:55:40.449657 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-certificates because it was missing 2025-12-08T17:55:40.617342805+00:00 stderr F I1208 17:55:40.614888 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleCreated' Created Role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving -n cert-manager because it was missing 2025-12-08T17:55:40.620495862+00:00 stderr F I1208 17:55:40.620430 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ServiceAccountCreated' Created ServiceAccount/cert-manager-webhook -n cert-manager because it was missing 2025-12-08T17:55:41.390058638+00:00 stderr F I1208 17:55:41.389770 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'RoleBindingCreateFailed' Failed to create RoleBinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection -n kube-system: rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found 2025-12-08T17:55:41.609350545+00:00 stderr F I1208 17:55:41.608961 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-certificates because it was missing 2025-12-08T17:55:41.827913703+00:00 stderr F I1208 17:55:41.827753 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews because it was missing 2025-12-08T17:55:41.831790400+00:00 stderr F I1208 17:55:41.831080 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentCreated' Created Deployment.apps/cert-manager-cainjector -n cert-manager because it was missing 2025-12-08T17:55:42.678221986+00:00 stderr F I1208 17:55:42.676239 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleCreated' Created Role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection -n kube-system because it was missing 2025-12-08T17:55:42.688062915+00:00 stderr F I1208 17:55:42.687257 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ServiceAccountCreated' Created ServiceAccount/cert-manager-cainjector -n cert-manager because it was missing 2025-12-08T17:55:42.726671315+00:00 stderr F I1208 17:55:42.723693 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ServiceCreated' Created Service/cert-manager-cainjector -n cert-manager because it was missing 2025-12-08T17:55:42.847072738+00:00 stderr F I1208 17:55:42.845449 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-challenges because it was missing 2025-12-08T17:55:43.022519423+00:00 stderr F I1208 17:55:43.022019 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews because it was missing 2025-12-08T17:55:43.043707674+00:00 stderr F I1208 17:55:43.043609 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ServiceCreated' Created Service/cert-manager-webhook -n cert-manager because it was missing 2025-12-08T17:55:43.401938604+00:00 stderr F I1208 17:55:43.401120 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-challenges because it was missing 2025-12-08T17:55:43.805768054+00:00 stderr F I1208 17:55:43.805225 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers because it was missing 2025-12-08T17:55:44.189119734+00:00 stderr F I1208 17:55:44.189069 1 request.go:752] "Waited before sending request" delay="1.145620146s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:55:44.211373364+00:00 stderr F I1208 17:55:44.211310 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers because it was missing 2025-12-08T17:55:44.605346155+00:00 stderr F I1208 17:55:44.605255 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim because it was missing 2025-12-08T17:55:44.797581840+00:00 stderr F E1208 17:55:44.797537 1 base_controller.go:279] "Unhandled Error" err="cert-manager-cainjector-static-resources--StaticResources reconciliation failed: \"cert-manager-deployment/cainjector/cert-manager-cainjector-leaderelection-rb.yaml\" (string): rolebindings.rbac.authorization.k8s.io \"cert-manager-cainjector:leaderelection\" not found" 2025-12-08T17:55:45.012497997+00:00 stderr F I1208 17:55:45.012437 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim because it was missing 2025-12-08T17:55:45.387523008+00:00 stderr F I1208 17:55:45.387344 1 request.go:752] "Waited before sending request" delay="1.192002238s" reason="client-side throttling, not priority and fairness" verb="PATCH" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster/status?fieldManager=cert-manager-webhook-static-resources--StaticResources&force=true" 2025-12-08T17:55:45.397611824+00:00 stderr F E1208 17:55:45.397536 1 base_controller.go:279] "Unhandled Error" err="cert-manager-webhook-static-resources--StaticResources reconciliation failed: \"cert-manager-deployment/webhook/cert-manager-webhook-dynamic-serving-rb.yaml\" (string): rolebindings.rbac.authorization.k8s.io \"cert-manager-webhook:dynamic-serving\" not found" 2025-12-08T17:55:45.805632280+00:00 stderr F I1208 17:55:45.805563 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-issuers because it was missing 2025-12-08T17:55:46.605307194+00:00 stderr F I1208 17:55:46.605240 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleBindingCreated' Created RoleBinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving -n cert-manager because it was missing 2025-12-08T17:55:46.807220204+00:00 stderr F I1208 17:55:46.805713 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleBindingCreated' Created RoleBinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection -n kube-system because it was missing 2025-12-08T17:55:47.006559574+00:00 stderr F I1208 17:55:47.006484 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-issuers because it was missing 2025-12-08T17:55:48.033958186+00:00 stderr F I1208 17:55:48.031959 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-orders because it was missing 2025-12-08T17:55:48.609939410+00:00 stderr F I1208 17:55:48.609840 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-orders because it was missing 2025-12-08T17:55:49.011034496+00:00 stderr F I1208 17:55:49.010663 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-edit because it was missing 2025-12-08T17:55:49.387377183+00:00 stderr F I1208 17:55:49.387324 1 request.go:752] "Waited before sending request" delay="1.189200502s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:55:49.393935063+00:00 stderr F I1208 17:55:49.393524 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'RoleBindingCreateFailed' Failed to create RoleBinding.rbac.authorization.k8s.io/cert-manager:leaderelection -n kube-system: rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found 2025-12-08T17:55:50.210773637+00:00 stderr F I1208 17:55:50.210686 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleCreated' Created Role.rbac.authorization.k8s.io/cert-manager:leaderelection -n kube-system because it was missing 2025-12-08T17:55:50.217619095+00:00 stderr F I1208 17:55:50.216208 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ServiceAccountCreated' Created ServiceAccount/cert-manager -n cert-manager because it was missing 2025-12-08T17:55:50.235055843+00:00 stderr F I1208 17:55:50.234741 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ServiceCreated' Created Service/cert-manager -n cert-manager because it was missing 2025-12-08T17:55:51.189862483+00:00 stderr F I1208 17:55:51.189776 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'RoleBindingCreateFailed' Failed to create RoleBinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest -n cert-manager: rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found 2025-12-08T17:55:52.008972778+00:00 stderr F I1208 17:55:52.008755 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleCreated' Created Role.rbac.authorization.k8s.io/cert-manager-tokenrequest -n cert-manager because it was missing 2025-12-08T17:55:52.604304925+00:00 stderr F I1208 17:55:52.604221 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-view because it was missing 2025-12-08T17:55:53.013980326+00:00 stderr F I1208 17:55:53.013706 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io because it was missing 2025-12-08T17:55:53.387094444+00:00 stderr F I1208 17:55:53.387001 1 request.go:752] "Waited before sending request" delay="1.18589338s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:55:53.801658899+00:00 stderr F I1208 17:55:53.801281 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io because it was missing 2025-12-08T17:55:54.604181471+00:00 stderr F I1208 17:55:54.604116 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleCreated' Created ClusterRole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests because it was missing 2025-12-08T17:55:55.411075352+00:00 stderr F I1208 17:55:55.410625 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ClusterRoleBindingCreated' Created ClusterRoleBinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests because it was missing 2025-12-08T17:55:57.200461133+00:00 stderr F E1208 17:55:57.199469 1 base_controller.go:279] "Unhandled Error" err="cert-manager-controller-static-resources--StaticResources reconciliation failed: [\"cert-manager-deployment/controller/cert-manager-leaderelection-rb.yaml\" (string): rolebindings.rbac.authorization.k8s.io \"cert-manager:leaderelection\" not found, \"cert-manager-deployment/controller/cert-manager-cert-manager-tokenrequest-rb.yaml\" (string): rolebindings.rbac.authorization.k8s.io \"cert-manager-tokenrequest\" not found]" 2025-12-08T17:55:58.391486814+00:00 stderr F I1208 17:55:58.387427 1 request.go:752] "Waited before sending request" delay="1.175802543s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:56:01.403778370+00:00 stderr F I1208 17:56:01.403242 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleBindingCreated' Created RoleBinding.rbac.authorization.k8s.io/cert-manager:leaderelection -n kube-system because it was missing 2025-12-08T17:56:02.037149320+00:00 stderr F I1208 17:56:02.036655 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RoleBindingCreated' Created RoleBinding.rbac.authorization.k8s.io/cert-manager-cert-manager-tokenrequest -n cert-manager because it was missing 2025-12-08T17:56:02.050926758+00:00 stderr F I1208 17:56:02.048733 1 admissionregistration.go:69] MutatingWebhookConfiguration "/cert-manager-webhook" changes: {"webhooks":[{"admissionReviewVersions":["v1"],"clientConfig":{"caBundle":"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ3VENDQVVlZ0F3SUJBZ0lRSGUvb1NOMUZ4czlZbXZQUmVpdFhXVEFLQmdncWhrak9QUVFEQXpBaU1TQXcKSGdZRFZRUURFeGRqWlhKMExXMWhibUZuWlhJdGQyVmlhRzl2YXkxallUQWVGdzB5TlRFeU1EZ3hOelUxTlRkYQpGdzB5TmpFeU1EZ3hOelUxTlRkYU1DSXhJREFlQmdOVkJBTVRGMk5sY25RdGJXRnVZV2RsY2kxM1pXSm9iMjlyCkxXTmhNSFl3RUFZSEtvWkl6ajBDQVFZRks0RUVBQ0lEWWdBRTNCUEcwZXFZOVpFM3NDWVVPbkp1NEt3WWNLUkEKOHJmUGliQU5ESkNhZzJ6WFE0NlBhZkllSGlXdlkrM2lvSWZKMThFSVdwaDl2djhBRCtNY2lmWWtmMDZnWTFBUQpibU95OU5aT09vamRsWE9zeCtkSW15b0pmUlhwUTFzYTBxSFVvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBcVF3CkR3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWFyV0ZsZzJCaDR0UHRpMExwWmFpL3YwWVNIMHcKQ2dZSUtvWkl6ajBFQXdNRGFBQXdaUUl4QU9jRzR2SjVFanVVS2N6T0o4N05HSkpDWGw4VmdhWVlFamk0eWhFVgpYTHgvUVZSNGFNRWJpZE1tN3ZoMWRZQ2RBd0l3Qlo5VmsvYm5VNkhHSW9FS3ROS1VYWUtSSE1GU3ZtUGRkUHUzCjZpTkdUd3JSS0ZnK251eUU0SHBiWHQ5azJXYmgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=","service":{"name":"cert-manager-webhook","namespace":"cert-manager","path":"/mutate"}},"failurePolicy":"Fail","matchPolicy":"Equivalent","name":"webhook.cert-manager.io","rules":[{"apiGroups":["cert-manager.io"],"apiVersions":["v1"],"operations":["CREATE"],"resources":["certificaterequests"]}],"sideEffects":"None","timeoutSeconds":30}]} 2025-12-08T17:56:02.057021025+00:00 stderr F I1208 17:56:02.054967 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MutatingWebhookConfigurationUpdated' Updated MutatingWebhookConfiguration.admissionregistration.k8s.io/cert-manager-webhook because it changed 2025-12-08T17:56:02.060582284+00:00 stderr F I1208 17:56:02.060526 1 admissionregistration.go:144] ValidatingWebhookConfiguration "/cert-manager-webhook" changes: {"webhooks":[{"admissionReviewVersions":["v1"],"clientConfig":{"caBundle":"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ3VENDQVVlZ0F3SUJBZ0lRSGUvb1NOMUZ4czlZbXZQUmVpdFhXVEFLQmdncWhrak9QUVFEQXpBaU1TQXcKSGdZRFZRUURFeGRqWlhKMExXMWhibUZuWlhJdGQyVmlhRzl2YXkxallUQWVGdzB5TlRFeU1EZ3hOelUxTlRkYQpGdzB5TmpFeU1EZ3hOelUxTlRkYU1DSXhJREFlQmdOVkJBTVRGMk5sY25RdGJXRnVZV2RsY2kxM1pXSm9iMjlyCkxXTmhNSFl3RUFZSEtvWkl6ajBDQVFZRks0RUVBQ0lEWWdBRTNCUEcwZXFZOVpFM3NDWVVPbkp1NEt3WWNLUkEKOHJmUGliQU5ESkNhZzJ6WFE0NlBhZkllSGlXdlkrM2lvSWZKMThFSVdwaDl2djhBRCtNY2lmWWtmMDZnWTFBUQpibU95OU5aT09vamRsWE9zeCtkSW15b0pmUlhwUTFzYTBxSFVvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBcVF3CkR3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWFyV0ZsZzJCaDR0UHRpMExwWmFpL3YwWVNIMHcKQ2dZSUtvWkl6ajBFQXdNRGFBQXdaUUl4QU9jRzR2SjVFanVVS2N6T0o4N05HSkpDWGw4VmdhWVlFamk0eWhFVgpYTHgvUVZSNGFNRWJpZE1tN3ZoMWRZQ2RBd0l3Qlo5VmsvYm5VNkhHSW9FS3ROS1VYWUtSSE1GU3ZtUGRkUHUzCjZpTkdUd3JSS0ZnK251eUU0SHBiWHQ5azJXYmgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=","service":{"name":"cert-manager-webhook","namespace":"cert-manager","path":"/validate"}},"failurePolicy":"Fail","matchPolicy":"Equivalent","name":"webhook.cert-manager.io","namespaceSelector":{"matchExpressions":[{"key":"cert-manager.io/disable-validation","operator":"NotIn","values":["true"]}]},"rules":[{"apiGroups":["cert-manager.io","acme.cert-manager.io"],"apiVersions":["v1"],"operations":["CREATE","UPDATE"],"resources":["*/*"]}],"sideEffects":"None","timeoutSeconds":30}]} 2025-12-08T17:56:02.066560307+00:00 stderr F I1208 17:56:02.066509 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"cert-manager-operator", Name:"cert-manager-operator-controller-manager", UID:"dd1dc99c-6454-4887-83ff-22c54d6488f9", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ValidatingWebhookConfigurationUpdated' Updated ValidatingWebhookConfiguration.admissionregistration.k8s.io/cert-manager-webhook because it changed 2025-12-08T17:56:05.987948969+00:00 stderr F I1208 17:56:05.987387 1 request.go:752] "Waited before sending request" delay="1.195989798s" reason="client-side throttling, not priority and fairness" verb="PATCH" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster/status?fieldManager=cert-manager-cainjector-deployment-Deployment&force=true" 2025-12-08T17:56:07.188003108+00:00 stderr F I1208 17:56:07.187938 1 request.go:752] "Waited before sending request" delay="1.190182968s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:56:08.386867505+00:00 stderr F I1208 17:56:08.386781 1 request.go:752] "Waited before sending request" delay="1.194373974s" reason="client-side throttling, not priority and fairness" verb="PATCH" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster/status?fieldManager=cert-manager-cainjector-deployment-reportDegraded&force=true" 2025-12-08T17:56:09.388118999+00:00 stderr F I1208 17:56:09.387702 1 request.go:752] "Waited before sending request" delay="1.187261928s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:56:12.388118288+00:00 stderr F I1208 17:56:12.387376 1 request.go:752] "Waited before sending request" delay="1.19316254s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:56:13.389004882+00:00 stderr F I1208 17:56:13.387990 1 request.go:752] "Waited before sending request" delay="1.17966297s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" 2025-12-08T17:56:14.395730187+00:00 stderr F I1208 17:56:14.395032 1 request.go:752] "Waited before sending request" delay="1.001934304s" reason="client-side throttling, not priority and fairness" verb="PATCH" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster/status?fieldManager=cert-manager-cainjector-deployment-Deployment&force=true" 2025-12-08T17:56:15.587870888+00:00 stderr F I1208 17:56:15.587572 1 request.go:752] "Waited before sending request" delay="1.179037223s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/apis/operator.openshift.io/v1alpha1/certmanagers/cluster" ././@LongLink0000644000000000000000000000024400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611546033104 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server/0.log.gzhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000024533215115611513033111 0ustar zuulzuul‹K7i0.logì_odÇqÅßó)¾{ÕUÕõ§à‹ 8 F'†@‘£]B\’!¹‚•OŸ;\Û±ã•Mšœb]öYíJ+j4;œ:óëSÕÕÕÜXß¿kñ_äï{¼'[‚EU׿ýskï[;º»?ßÝÞýêèþâÓîä˜ÿúùÍñÑåîÇÝåÉÅÕ÷×GŸî>œýùêüh÷û‹»û‹«Gg§gwGg×W÷»«û»ã£ïNÏ~Ø]ŸÜ\¸Ý}·üH_qòÍî÷÷·§g÷»ówg§÷§—×¾ùÓÿúýŇ»¯?àáŸÿÄ_ÿ^œ™©=ç{¹»?½}ø.nnn¯¿?Z_÷ÍõÅÕýñÑéùùíîîîäøòúìôòãõÝý{kÖŽ¿òRbñ¦ƒÖ×})ñµ—²»ýqÿJnwÖwöö§ãç¿oG7×·÷'ÚšÒϼnkDdíY¯ûþúæfwþðjôpóùhÿf^\îŽÎ××ñ”÷’û2ÈÖÿÈÍùš¸ÿ\X^ÓååñÑûÓÏ÷¯o/î:9þíÿ½†‡÷åwÇ”î»ûŸnÖ?à·§77—ëxq}õ͇ۛ³õû,gןn®¯Öžì#µ»ýòÕO»õ©ÏO~ùqwößåÛ‡gû|uzûÓ—/ßîþûóîî~9ßž_^\}õ[ÑßüáÏzÐÂÙîäá_>îN/ï?®¤å_~÷‡‡ì¿ÅoÿÖ›òð°ý¾ýtwÒ–Fãèf·»]þb_Ö¯/ô^†E;Þ‡íþúìúòáÏ=ú¼¾Šw§ößñño÷_ùöË+ùv}Øw»‡§÷áúZLþÝ£ùýÅÕÅÝÇçùîä?þíÄ6,;¶º§Çp—>[};ŸT{±hêW¢)?Ìîfý0ÁôÜÚs‚ù&>¨ -µÈŽ­/«ãsî]å±±õ§}P× |ú|µþþÝõÍîöôþúönYwu÷ñâûûwŸNoØÝß\žží–»Ïžþw§gg»›ûw»«U«½Ø¿ ÿsqó¨ú¿¯>ê«W½ÜÝýµ*¾<þÛÕiíN?=N*_Þš¿PÇéÍÅòŸ°l…EôWôn Ó^dﻆÙß•ÅßÁXAm:ÔŸ#‚¿õU<Û„/</0/Òª„ @M #¶˜·.€ŒØ+A uy½²ÁycOX°ì•И”„š(ð q@…a@`ò°¾š~ý)ðsßÿXÿL¢ÿÿI3ûNu”ÀáQœÃËSöÙS# ©±(6@P`OaPV˜ºŒ(( €ÀÜí¥'0/vi{ÌSbo¡¡I‹Ø¿¥Îâˆ=€+a”%<Ì¡Ár€Ã€Àä¥APch@ÙM¡è@ì'‹½§ )Gìk®þ`>œœßìE (JxP‚¥¬>d8fÓOé^ ö À!•°Æ/ǵU Ô¢€o$ö Àa)0\@Pà°±wk>@ºˆØŠ@Ί+—‘³GÒĆ@ …Oä\S%l _`#B€1Øh'yô&½ P×P”%¬L„ðåtaä(A‡5L-ž±§äb ÖpÎÕ¶ÑØP7¬5}6Ç h7´†@c#J6:i ¬‰4´U¶I5‚ç+ØèС!MZë @] HÎ!ãP(°ÑýP >r @’c& öun ¶@É«ƒ€:‹©! @U˜Ö˜”Ô‘€{ DN )(0í0b[óv± +BFÑtº„ Å^OéBìç:k4|tW8¿ºà”ÞãNDŽ##Ž363é†ØÏ· `<ÌV€Ê+@„A P‰í'„f(Ašr Œ+–¤ø@‘¶šEì§»£ÜdÿÉgl^\RÊ])‚q\hÆmãlØš±hÑ:7/«„‘4yzÍMåÀ))à¢Ã;b?a bÄ‚áAu‡ŽSεäCVÉj`]%¬QÏX¼qD\GRZ S¤ (T®µ”ã£/¡xÄm6 :Iw'˜Äº B_­¥HÁ\I‘*Ã@Š/dÃ}£Å戤A½¡C`ÂŽ1]=¦cƒhÂÍA—ÁbŠNÑm?b?ë¶€wÒ}‹8B?!òû’ÄSBßµ o©Ѿ·ø:p|ÆÕ~ïõŠz3®öº2¿tþ"öˆýLɽº·ØÏØïéÃ:ŽWûÜgõô5½7ÂÅ@S~î]×gÂç~ÊM¼haaøÜO˜Þ‡5í8Ú3£Í2eé¨èN˜Þ#öˆ=b?_ЇØc½Gì§Û¼âM±;á/Ä~Þa®Áƒ™éý„U½®}ý‰ØOhõú \ßQÍêe4ë…’º8vrfLïûŠ·!ö8‚{èØ»ãî„Íù¡,êØÂp÷¡GèúRݧÇëSud÷^Ò¾Æ>ºb¯X†9NOZˆb?a5_…¼!öÅ>÷)Cwר ÜÀ2㹌Ð.Ö1`kÒØ‡PCE·ØzŸÒµ£ÆD·R,ö='¿·1\3•ë*a0„!œp,Crê½îºþ¬vq"öÓVõy\;ûSîì??öXþ¬ËYýGü¼ŒPÂAàX8gãßÚê:˜ …ÊR`† …½(§dd:wT JS!’¤à<3Úgì0b5A¥ Ø€”3¦¼ŸÖ% ðж¢Pàå+€ p"ic¾¦¥À/>__î~u}û˧WW»ËŠ—ò¦ÀxŒ†’·¼µ Ô*+ÁGŠ#pÊ´Wv2ÆF¤G°õdÎÔLpptƆrç afž`˜WØ$ #™=#ØT $‰{%x߈à6Ÿ'Èêf¼‹˜S8ìÌæ ›°Iä´¡{±¦àBÑùÒ)'Ó][gœHª{Fìa 24n­ Ëyï“ûÓI¦¼>rìÓÈ@éx»ĆI'n( TeDû €¢" ôaŠCPDÎtâaj²jX˜x\Í03‰€A€ ‚².1I£±Ã%Vu‰96Ñ[wœG¯€¤Ø[9R„-¤)# ^BXÈKÁàÖ·YLY:DìgíYc/Aˆ}µ< ¥6èM¬z 70™b¡–# Š.Þàg.½€À…IŒÅzŠMp6÷†É%•MCÐF”€â€+„§ˆ@,¬caâŽãy5€ŠÑðnLŠq5ˆ=¬`ÉSé%@ IANÙH#œƒ T ‚7¢8„—O R |߄Ԟ{ŸC}ûyïX¶Ðm”´¹à£JGSçA½;*mA}ä,ƒö§˜!‰-¤±E …Øh;j4¡Ñ)DU%Œ…R¦B õ™ 9™CwR˜Æm$“QÂ6祯±7ukXª*Á–žÓ³¬Ö “ó7±.äô&AÅá@¼¤\©²*aH‡E({[,§-!$B‡Ý7±óà9…¥îæØy(L‡XÂrLBwÁ¸Ü ¯Ñ¡ƒlÀ –6ˆžr–%Lº78Ä /d­Á V·EzŽj:¤PV cQ‘V„ö.&Ö퉗3Ÿq_?ÿC½hõ%ÁË€$`¾8E“(î`£e#P >ÖPÓv'ƒ Àž£3(3 ÌŒJ«Óî!À€"'˜w˜zNè‘¢¡¤2(çìê0_b0æ”fàù±œNãáј1#uÊ.‚çÇ808§@Æ ÌX |~ìAS@(§}d˜uCKጓ ^ öÀÀ¡1à—B,mýës900Yiàˆ}ùèêÀÀ´)±'Un ¨Œ‘ƒ²•8€^ 9±gÝÏ' c@?çø0ðFj±5U cÀzE)od¿ð!ö׿(ŒçcØÝÅ1Óxʤ@•Ä ¨ŒÈ)Y }üíÚ9SA|íœaΖ‡’Oo 6! ’œbA¬yB‡=˜rë`P AͰ®=à…X6"؃ÃÙÊY †9á É”Ý$´æŠ†n’XÔ2± ?þ²m¬ ¹kBï9"L T5)uD£p´VNXRê«QxÞÎxp@ŒtAM$ÙCíM‚!‚™K¤ÑÂQ7ªº­”ã œ[oÔRŽF„7¶P@:üà¢Iì/ÌdôT]RDÀûÞ1ˆ ¨MLÙ]`"ëc`qØÄâ0 HâÏ%á9’ð#Hb’Ð$Iˆ…cça’àœº#$±!JhŽ—èÆaبکà9"ˆN¨ú’ EZNíA­¶¨Ê r,¤Ñè£kuµ¶c`ÒFCGs±Žæ”Ð{ë¡èY«Û³&‹‘äH†1Æã»Z3 kèu…10zŽŒ¦} AÜBnÐ%ÇŽ.«OÀÊP+7ˆ‚±ÇÊð +ƒ*( 2ö²Úa L,&Š”0ð–0 Úú@—bÍí"â”θ Äò’à%R6Ö‚ËJ»ƒÐ‚R€;xCۈ솥‚ºðÅÍ Jx;;Ê.ëÛM(T¦@(çH¡wêØQž‘}ý‰ åÒ’2͵uz|{)(pøàÓÒÝ ÆHÇ@,:$G ãɴX{Ù({`àÜyNeÀ:™aûpÆéÙn1œqßZi Œ)¸2ö ŠaÀ Æx pN3‘{Dï ÔÂ@J3‘Rrl–Æ€iA)9`xÀÀÈqCxMEùö cíªáze HÎÔhÔ$Ð50%"F 2Pš#¥k`?ŒÈÊÈ¡@ˆÛñg‡C*¡/šr²„Xe°)”PU ¾ô”Óç´ª =aïëAÊz ‘{' lVóžûÐPCy°® EúV¤€Óçœ_¢a6`¾‘5ö¡$Á[¸ÉSp ûÛónj꺃¶ˆkކèóZ‰ …ƒ:Úï mE 0 /nRf•’2w c sŽ9àÖŸY<¶yÔ„DÌS¬K—r®Rˆp\¹^W 2–%t×fè)˜ð´‰²4éð…e}áXzNS¨ÛPƒ KÁˆs¤60×¼°7Øg‹»‹LÊ£ÍxýU½ý„Wë6`²!þ¼YUàÁa'WqïQr…—ï7«{¸‚ôâ‘,nÀ#4ô΄jÑ„‡Ò×' E×iݼP—æ\P À@ ÆH‰}ô`Á}WE;ŽS.=cªÝÚN· ‰ðH\8œ8Ç#ŽÐ@_Aé*Áà”T‘[[ N'<•Âë3’)¶Kc ']x¢€`HÃ@_ZÊ9e``ÞÑE¼þ0cÌ1¬ŒÊ©q¢†qU‹‡9k9Ž"V[ rÒA>4€¢óJzŽº¸¢T\T9®PÚh2[Hç4–=Q09Æ 'öÜÈ Îg4…ˆý¼§KDVì3’ÁbW §Tƒö‰d`páŒ{COŒ=ŠÂéEaMºéŽÅV×ß1Êz™DO=0~¨ÀÉ‘·1€)³s ÂéÒÊÍ1†>:Æ 7 ’“zã¡(TNRfšsoÝP%*|]º¥T;Ss !*:~„sDÀ«)€ªN¢ÊÁ~€qg´ ”—Äš. hšøËJÒ ‚šÀb‘|é¸.üÅ8S9šÐæêè3©…ƒ‰=ŠIVBÎvsWYW(¡°(©²¤Ý»¡¨p9ñåh«ŒBÑ„V¬1§ d#IÂp°†K¸C°'ìYIO”üArâ˜ãBˆ›AS…a¶ª"(š)PAÀ-¾¦Eè9Soµ¹SL…â&0A9mCÉ;0±LŒ‘’T*£hÀÄ0‘3*û‰’&^³öÔ#ekJ{kÑ[SUkO‘uÇ”°ûv¶ÂìOmtr®v·†«¸KOÎuNZÆúËAl"ƒÈ¡ƒ‰¬‹ãÈmu· …U,l5礴®>qˆNaÖ'ÃCi äl3Fï§bKûBÍ©" 6T§¬ >?ö À¡³Ãœ‰{O”¬Á+XkŠaÚ!#ôÖšÄã¯gÒ;ÚJÈ‘BÊŒ~P g')%ô$¤Ž2Ae ÏQB(5LË™194θ²­0h•BÊž1»Å@ rµ0'946!BrXlEÈÁ€ðŒYY¥}áà‚RÀŠðsl¢X¨áÀâŒÕÂ.A]E•׃–s€ý‰RÞ´™?~Ô>(ð  ª(Pà-Q@½ó ÷ôœ{ûLCÙÑUT˜ ‘¤ë­+aŠÅ¦XX@€Ã~+!(gsÙ<ÄQ9*æs¶‘\L¸Ã.–Mc‘œæ=VÇ ̈hn‚N³ÊHš^a!êä˜^Q-Wˆ¯ä -帪Î,˜™½ IHJÎàëÂ43n¶! Í‘„q‹g¥˜}u04N)+9­kRSp¡¼$X—®’£ nØœ±UÕ)´w^(]_Ê™}åLÂ2€1À¶¦˜jPº¾9QÈÕpž¹ZÎ ¬Þpp¥4rîWrMSP‹a Æx POéLóÎ]6‹%='ö£ Æ…Ê`³‚RÞ’ÐÞÄP",ž“ªuS”‹¹'hkNà(VÆ€JΊ`b¬ L؉êÊ*À@a „æô˜9±œK™Ò ¸Ž‹’jc i§À5``Æ Ã5A‰°tRÀIRX…€ÈÅ0 9±âhªŒ‘4ÞÆG&”Šå)ù`´cokS€5G ܆¡JTwz‰,#G kŽ€‰—S^mpST‰J¯‘R0 bC•hÆ*QPŒ’ÚÈ9k̦ŒrJß“ÓZ,Žf¢)KÂ÷§Ö]dé9µãêÝ …º±×¨·*HÂÑÓjWé&a`¬kΜÕ]|QÏ1†]ÔºbPUÍAU9‡ÏB©³aÌå&رåH´DŽ ã&îEÈÚR°5oTÊKbM!ÅsŠŠÞ$LÊgÖŽ¼¡r%)éÊõpý_öÎG$×Â;Jߤqw4û·oþ팣…ª:Šè8 £Œ4*>¾I]Ε5›¥0oß’é<°- Qaöü& ”Œ `¼Î–ÅNƒ}e@QL1an…©…~®•’„­EcÂcþùFQ¢ðò™71âõxÂvçØ34–Ë~£ŒLþ€£0cD(ü…B`ÒÊ)’É"ó•iå YλÖ[» ‰ò̵x×zk‹ C­W¸múʼr¥«°¼´¯ ă9=ñ $pËìÏm™Å&šž_¼I³=òº˜Mä3V•\?½ëúió™K 0œ 券ÁÖRÿC§Œþ=qÃûöÙÉÅ­KÏ©-ü. ”Œ `žþu x«jï” "+QØùzay`Ppï.Œ{Œ ¨2“tõÌ8 ÓÔeÔ…Mu!!ø ­ .lD?"}Ô…ÔŒÓè©¢E]8A“Xò‘üÚ \êÂñþBĬÅ8â]PŸC .ü .` P56Bá]¥G?‡²( W‡_‡€²’…TL1QÕì_»Zd‰·°í](= êÂÏé¤EÄK‡QÄ ²Ð R¥ -aq·»`k-Qêº0‰1VoléÔ…»u¡ËÆ© Û#1ïkAÑâ/ù٫ÃMng=B“uü$( Ç» ak¨ 'èB`Ü…ÈqgzaWwã/¤®YB]8@3/C$ÎQ‰4µ–ð$à^ƒÕzÈÓsšö'APyB01DiVp½ÂfëšoëÛ;Æ”‹q¡ÂÆ ü™™€.o¦—Oˆ 0£q$b÷Å;ö> &—Ô¼ü´? ³! tÿ=›Ø¤£+øô›==¦cuô3õtßÛïçþOÁ´%;ÓöGÂãQŒk¨Ÿ Î%½[§ „Bd QØ…x³QEW›‹;ŒAa–g2j¸pƒ¿Šé,ßY$Ÿ…ÍÒì6Ø™§PªP+sˆÂÆ(d:…– ¢°7 ˜èQ­—QØÙ@`ú”¿† ßß’4˜·/ åùø}C0§~“„?a “NÔON)ìeÒ£ªö¹òc4_µ+«™Eòìf©äÀ¼}õ·ö ã X‡''VnœVR·×ï`SúÖÞ€‚Hxc‚bGâÍm©ÅÁ„3:Ã0Hä¸ uáêÆ“´”à*þ#t!1×ã5ÓüÆU'gƒbªÌ3lL=C^ÏÂDËk.šù„ˆN[Ï,tìˆe#dR.kV¶–Ì"œ\Ÿ´bQ¶’Hê(e¹(SÊ›Fˆ˜[)!²Ø’¾³`âB’°û‚ÔyB¶AØ·5ži;z‡‡•RÆûŸß}§ ÀeÀŸH!ß‹-ÊÀ^%¤Æ¼½º,N§ÝX.Hµ¨æÛ_iþu–.!ÝØvü oO×ïG]¿|³“2p±ëgËÚØBvuoiš,§ìõö†Ùk™–Ãѳ+c?¾ýµÓÇßðöôÿzOˆ„ñ–$ »’`ï«Æðï· \íxû'P(™Çc#^"Ö‡’˜&ø‚ˆ” à Š銿ñŠÖÞ!$¦eäÓ3’Üc·ïˆÉ+™ º‡e„ÿ\=rVbÖœD?Â?íÀÏŠò¯ÍžÑFüprÉ&¯P«8‹öŸ‹.g/ãù¬CLD)&­ÐZµ¸°b[‘ï+aVdûLsekoÁ1‰¥N³µè-\aš‘žg߉x_R}C&˜qÆèÄ(Ô2ïâR£3Î…AÁÕ,èì© ²Ï¤äsV‘é„üØ>Üz?)ÎÖ+P¥nº„þÁ¶·TÞ÷ÆÌ·–f,ccÊiƒ²µºØ˜²³ `ìéˆ3H¸ºmµ¬;x`íŒ Am6d‚…¦?qŒã+ø¥§28òô+©y:ÃDdä©Bž6íBaø ú­Ç)³8òtB¿ÚzÒ1C¤è™¥(g@AeL†ðç ¶B"ˆmÅ/¶BƒŒÅUÚ¼! Ó ßÛ×鶮߷tDÖe0ÊT¢aîa‚j•_‡€&â‡;›s«ÊW$w=ÖÒƒc*ú ј(¢MëŸ_i"²:…‘‰Î÷Øé¸ïЬ?‚Ù«ð¢Ð‹M¯£ÈRŒg*QØ…Äì÷®ñIu›^wÙêÁÐõ¹ÍgRΙlÜ´Ä@©Éí-„}ÎC˜è•>Åîø#ªÖƒi}ü&h+~ÎV`ƒµ¬e\Õ·µïøþR0(„i,êÁ¦…ê9: ¨ŽGLæ¹ÕeJ© ›ú  ^?ÁÁ¦@ÚáÛ$Z˜]Ú‚9z($Ê0H¼n£'uán]ø2ÔXKF\t53Œw;îiÓ„àê4sè’æš…»• t„-Œ[Á$æLGÉ([wFaæh~Ð4`zÒr‚™Å»k _‡€Ò…Lsö2ž‘lÆäkI“Í»¶©`ðVg;ë]*F"P~ÊÆ,íëJeÂùî„sg8÷ú^½uÉ#BS¹«q$êñI ÓÃYÙ¶¬ôS A"Ý&Pî:3kêÓÕÈ…\' Šñ*fqfëRã5TÉjŽÆÝ\žz¿Ãhâ„hÂc"º_i`Ùúî,Ã|fã‚l Aa ÈeÆé‡ŒC`Ò 3ý{¾IÄŸ¬Qb,E.ý¸ ´{Z k )YÕ{®ì„ŒB‚½!ð@ ùÏ·»ÿ{ Øú ¹ä!oÏäòO’0O5Æu5áɸNÆ)‰2¥a¸Ò0|ùíi~ø˜¨+†„îߨ7Q0'0o¯ÞÒÁÞƒm{Þ7’@>Kü51n¡fz]ƒ]Qx_ÜÆKÔ‰¥Iß`¯òôæ«KÕ®îEK«x?F.ÞÕïéÖɃ ÛnÈÀ0Ðá‹Û“v#œ`Âø…>5KÙov¦á3ŸlO¿ÛKü=˜5ú‰ƒ„¸ õb N q‚84¦Ú\ª‘\¹x@óI棘î“Ò\«9Ï´§0xC”Ë–ÅâRŒ]CJ¦Ö\ª·ïqà7” QM?áqLæ¹\$bh!6M.`⇎%f´»Z—gaÖ¨Uψ1”<ÂD8ÄDôR7oªÃ¶þã<É=¶¸6o ïK"ÂËPŸváB­#˜€lÈÍTIZŒ}-ÊŸ ­å«ØÝvóQñx}פ‰Ø yMd"ê;˜ 0ü`â)0Dõ0Û°?ù¨9‰‘*–,¯NH¿Œw³Pu@Lé$¨´•cÊÜÓL` &«$82±«±h Óét"Op"½ 9i‹Ên:‘W—ßu¤ F] .4uºð7]h',XþŸåc%4,X&õ€&âoK?ÆFP~Ðw„´³X©ˆ, ÃÂén±6­fwË GãÖ`dbV«'mºáÓ6„€A%ü@?a˜xbF×°!zëüBCLƒ¯%¿ýACYù÷Ùù8óµl‰)êȺ·— —ÒEƒ±íåy¢@(Ô²¢°5 KQ½„(ì}Oƒ‚Du;QØY +|ˆÂ ªàNˆÂÁŒÐ¸d5ùQ¼T‡ ¶LƒSUDâ¿HDª›vív‚LKD­úâ*ê*©i’ŽÏâÁÚ=ÉÂô<…ýQˆä¢cT_£DKq‚óÐuô ðÄH²œ+b@bž9sùy…z Ûz öŒ Ñ)Æ—‡lˆ Þ˸Íç‹a“$:Ї Ï ÅÌÖ¤hšs¶æ€ÔäpbdB¢]Ð@òt(†‰Y–fYdZ/ýÚh-ÅnwÁx.¶š¨,…Gax¿äœ¡Ñ$Ò<Øùp"±Ýr3á-ÌmƒŒÀ@0+”åAÆ`vNWh Ç3·ÍJ ‚Žm! ƒj‚ªÔ© ÃÍWr+Ëe\ ëÉ‚”°ª^øšuÌ«/Vy*;éOyd@LdèçñvÇKylAº"»—Í×¶ÄÑFüœHHe⯘RU‘†ÆlšþÌPˆÓwÜuŠB1´}Ç3À¸Ž³¦”å©mu’lñÑ¢Ó¸k²1ŒÎb=êêzÔèû)áÆ¯«á~taŒ‡ õÕõâÔ…ÔHÇʘö—‚ãs_‡€Æ6‰\B¾–V¢.ü .`xE)„ËBÀÂx+“‰·§Â-˜L¼{åÆÄë—¶qQCDy%Ó˻ʄ¿–¼2•xuóÔZ:lGÚ+ žf\~9˜¸¡M$7ìj0º0j–Üày…93ô HP~°ÄÒ!¸¹Æky,ö°Ÿa ãNßuáðŒ!`jñe ¥ƒ …3z“‚„Hôb5úæB$!à ±5Âjô!Æ!ˆ‘ÀÏCçRY²8ûtÄêŒ. 9eL;ÞÜИËÄ{8ûtsków@@ãBÂÆ_0mc#ÓíºbÆšÔÎB¢»XޏÛitõ¥\®u·qp«Õ<0Éù_˜eÞñ $ŠH‰¿_ŽKµœÃr7oe$›—®1.dyi0ޏy*&ßB’Åê3<Å ñúŒÖ,J]ÝÜ&Ëü ¨ L>ÿ‰juú Wç¥2Œ{¸võ£3™ÆŽ…®>§QWd+ý…(Å‘±šµê]ÝH¢Q?32FwárL‚ܽaA?‡ Š;˜v…R|Tµ0á Ù3Z0º`¯28³ÍW¯ÙP3ñ";B OÆ8ج^L!1£ Bâõ™C¸ºä0ZÉa¨‹w0Õ+툻zðå… –±'áņAbf±‡qÛnfDí±L–8×»_í%¾HH±¥ýâCPešñŵ t0MþèJ–Ãâ㮣.aðªôvõAv•`WÂ@»h²sõ"8Œ–°sõ$<1–bÊ%¸\åˆJ¤Cº2ˆÄ ¡¥† ‘åÆ›Ã›ÎB†ÐV®q¾º0ùêÍŠB°id‰QÏ7Œ`oó H@+–g³Ž7/Øù( ˜ÀAP5)2qΰt&e‚HüÝ{XD‚HüÝrÌ ]Jº”H3äe ˜sÚ !ͪåõˆ\4d˜i*Ë–gì`Â˜Šˆ çÜÔ®™i=ê {d0±ågÿÿ¢.ìª Î·—Ï~6꺠ÙD‚HüâBê#˜žé\6“ìn¸ys[•é¨RÎØ×ÉE÷ØÈ2"qBR²éµ—NYS%N@¢¡-)bΤä+?!DtNq©ÛÕþdˬ:‡,ï@¤£Z—Œ û]Ž®YçAµ-Š©‡MuÁ þ‚zDòåÕ=O­a‹o7ïxk}¿3ÂÈáâMÀmbãÆÊõÍÇ„^§P=ÝGT#ƒD.éÅí^W‡ V“쌿ù˜P»Èˆ0^¼yÙcgh¦p1üÕJ‘aô îŽ2f½¿{Æ GTš@º0³”e…»u!µ¿x5„ºÊ!¼a‰0‹"'T 1ík™U³xVærSÑÍÕ~‡ìƒŒ©(3f¯®GgùûÏPp!äBVú(Œœ±!ãBVO;î®G´hð€õ¶§†ûRö¯íÚ£ B¢8ù²?ó &Œ˜¥]‘\Þ½¾ÜÅH~°5E0„M1}¸«›ˆa :˜BÞ6g0:†Ü/¶¼’)ä]•Á§Û€l€ HI& NqrT»I«.a¿êcošA¢Þ¿\Ï|u÷Ië¼×g`Ž'1ó°mVk¸:á[¡¨-­íêŽÍ¡…iSh›lÎÍŸ öÈ‚¬ÛMÌ…Lœ Ö˜|„g~±¯‘L`tbÃÌJvȪfŽêÈGãODèÎJîZÆAPÍÕϧ$$@L¤Ì€’‰ý™xTèÌt1樎ðsoò³È/™8Á›ÀôJwiL²1êê³õ=Aò„ôC<&ˆôÃ,‹nn?£´å$>9ÊÅJø–c *!Zf<$±m7%„˜ '»¶U‚l«á¸Ý® ²£«—&KÛGlñ›bbì‹+âÉ&¶¬'!»¼FmÌ‚mQGä’ƒM§¹m"R1Ô'Ia8b…$°Ðy™(nÞUa€²€‰+úõ!© ÔAÌa-7Y $öG"ïÆ 1ÚÜï°m"LZç8ï ²'c*&Õ„þÂÕ _ë³æƒºp1.*f\"}uÞÑ¥\þºñüý1§¬ÎX BÂkKT7§¢ÝV(7ÿœpÁö‰ÄÈ‚™V±õíêL´»|†ó© èB†ˆÔp–W{ ±Â”;eOP… ¯© ,\oJíâþºÐégñr™`Ýú泦ã=ÕÎ5>è‚=a­VâÀ„?² Ɇˆœ Þ%ØÔV@ö¿ï¯åÌD¡ ˜–èÈ|ÿa"zW²!J¥ƒó¶'ÈÂ`F+ãÿÙ»–IŒÝx£FòOÞÁ ÝÀxxðÊðÊ÷œÕzžVë®**9Œ=B¦3*üwœÐdð¡r¡¥a)’„Rô„U‰µzx!k)£Ú0`­N?V´T¦­Üž´ýC¬˜>ÌàÄA†Öõ©®nÒ‚ÍŽFåÕE&ß¹ƒ',?G@¢§ÊôH€¦7*Á “l› ‘…>åŒõ$“î%µ0Órõε‡øLÆOà…ìÙ˜ñ(–…Zô€Âc|TÏ%/c(Ê ‘#«…&‚ƒBP‹>´I•-ò!Œ|ÁÊi¾ï‡o)AEí?&&Üh*?DÉ"xÄθŠÜc™¼|a¾aÆ|¹÷`ÂE`|÷,\ŠZ$ŽÞ^ÝÂJ¥ LÅÞÍŸæ=˜tºz?mq¢Ä0¢êMÁÁ”kaÃòT^h)3åNVáÕ Ðè))¸&/@â|HÄG4IHOò@2yõÆD{*:3V© $¾ áaщàzn¯ èêQI L/2ô¨F@¢‰%‰)ig|õˆ‰²Ç 3`bM¸¶äµ˜Eàô3ã*²7aÂWæ€Ø!-Ž`Ek¥bÛbB¹JzäÄÖ—Y ‰9c2®g¯¿(X˜:ÇÔ[Úà%Zä˜0¡ÜãGþL ßõF³ÐôìI6•Ö'ˆÇC"?¢çšz¹æâH ™rm‚D†*BÅ©¡¢¥§UII3ÉCA M `÷ÄPõÕŽ¢•n,ðy™ êñyÙ5 ' US“^Ø¿zÛ#òˆª†-=ZkE-˜Ò+![0@ä" Ü»–·AÀÄË  Gì`uŒ¸< ±¿ÁÕåâˇºTíˆdz?¸£ö¼!±KSŒM0 ôŽÔ’¶H±RA?âbË— ‹KB0\lþ´AB„ Ý Ña}[&RYѨ¼¸GEKx¹àêÝéä @€èе#áÀ0ñ… ýÈŽóV€Ä$HTOÕ ò]*ð û§ZàðA/€~ïäÒ+’ªâ¹6ˆa:1$‹+ÃGöjÕ˜"˜a¨çŠúaQz¼ºjì w–ñ^eb0æÂ}Š!vªÀ0ñÍÅɼ¥îT;·`Ã}«<ñá9ДÓçßÀ ƒ’Í`bMhOý˜3áÐÓ¯€z8Z=tŠ ÉÅ8Ÿ}(X†€¡¡Ȩ̈‡HÝ+1ß0ÙÂäRUpn9µÕƒ141ÏÄSÂwÚ€&戋UéÕƒ‰bI¸ùŒ¸NÔâæC”KrF]š½£Iº$]Pr:Õý/[@@;TˆaDnÉ=` '¸ùœZ†lÁ€¬Ø’´0-fÁ¼¨¤ ݉SKNÙÖÐÂ2ÅHD‡ëÒ…Ü›»–¬´T3Ò# ‘=¼@IhQÝ<Ë›Øí‰å… Y”, ^85há…b{üìÁ #ÊŽl‘¥N²ãÅLjDHR eÇ´`-Èbf\8=U.´€@×’p”f´#: ¡LåD^8taF[@À¾ÒÀ 3.w "4 ´ps7BÃŒ8@ 3äB$L•–@.Ë Öĉ²SA`M ( †3ðÝL`;uxÎ ¡+q0턪=wÄè‚„7A¢1½Ð8bñ³¦ÁPoy홥@=ÌØÃH°Øþð¡mJo°0¦š†èn„Ц¢ät*/4@Å ¸¹)ea´²pŒêêâó @…ðSÙ-XøJO¬ÔÂÝñ7Ø©!˜‘G´”¢#ÍÃQ_8u;¢¥+ ë3¶¬;B…/-£DVqjj©- °"ƒ[Ö¦¾ A‹x-Ì;^Í ´Õ‚`m yÄ$8e‘`Ëúf7ç"N‡^˜±ѱ7å’žW–cõBÇü‚ëZÅ‚Ã#æšVK¡´–/Ô®Ö Jæd¸I7C/´äÊά8q󼣫-ÇñˆuÇh„-¦DŸòn^°|˜0€Fô);F`=teòˆ‹Êl Ô&Ý0üøûÛHa=âX¹Ð¢ëÓ± ¼€ã¯„xaö}sÐÂ(¿O-à¦LÀ ßyr¼ÀM¼àe‚©¦^z!dY9¦¯ž^¡¨Í àt)@"ö;À­éêt!I$ÏmSo\©Í! @pø©Vj7$bYb´íæ–²RXvZ‚n‘˜„ýÉ™÷@BÖà3Vj›X"ÜLÞ]dÒÏc„H&o>SÊoxGpÀ™Ú/Hì<+µCÜ{ !ë‘W T\*”l+ðÂ^PéD9æšf@¢%µÜ±‚¥UÜìÊa¶Jaåwêဇ¶á¶^peÁº!nMщ ¸Äß½N± #-3ª-ˆHv ­½Ø|!r­ Cãz†€l©A&ñãæ=háæBt2+<ÜFð‚t¢sÇU†‚Ó¡¼ 9à….^Ðä‚‚@ ß ^/x¼ðzB…ÄXA_ÝŒHV©€ùÌš¾ !‹–ÌšNåéA.Ɖûå…Õ1Ï’*),v<µKÙ ª¦º°B…vÄÿB¢dIX Myªƒ¶€€ŒV C†[x¡Xw´@qª^È! /´µ)Á à…Sð¨úB‚À Ð à…ï¼ÐbÑR%šË±q*/P ”*cÐðþ$x‘¤.Ö x”c®iFÁà%fŽyÇ‹ë ûkä‚uÊz!{ ª0_¸yzæ Cx¡E/˜å’…ù…{Ý6<=¬€{ > (cà >¿ ‘; l€'@B¸%È Kƒ=ø™ °ŽËSLÄìáƒ>òÑáÇÁÄ"/hн'î7tÿvÈ… Ò´¶ˆ¶¦.Þ²~pB+¬ glGx $*HiÄÅWŒ™d-‰BÅ î ¿D¸(º”»¸ñc²Í“1=Ã"žZ Á’†iÇ›]Y PO¸¸ Ñ -uGÊBçúr^Pµ‚^âÊÒ“G¸kxáæíˆ µzŒAƒF•éI-·ZÁvÄÅ.nÅ)€àæ"“RdfXfŒ;z$ÒUPdººY­,Ë ®Þ“"KL,L€„6!B·^ ÐÂÕšQ*caÀv _Ðâ'/ˆ€ÆkÆçA^èÚ“êpT ö…^õˆPÑ)Ü59Å D´¢5Ü ž¯—‹‡,3E¤˜1½Ð‘gî/ªèÂôÂÍ]ʇ_V¢Ø0‚ZfxWâ–õÍ^Mî‡%8ha„…["båÆJ§^ £ihaFQ-¨TÃ%ëcdš@PaŽS%cKpÈåK ³Ž#‚C¶ðBY ê8$耄0ùþ¦¨:ž*¼ìJ°|RuìhP}®Xs-ðÂÅeDâqPõ….ÀÞÑ Ò¥«6úÀ w)_ðBÛŽ5 ¼^øîáàð‚À ßëŽÐ à…–Ñ6ðÂ$Hôx ¼^ø¾5½^@‘ è98†à0é°A/ßõB!T\*ôC¼^ø‰T/\/!ÿ¾6å/ÿZøË¿FWÒFè@"ïÂmütX5¨ì¿g†#¯‚z .ÚŠ”ý¬ý%Uð‹=µƒÅ- àbš8u3¿â¡F#6jª¦R84qõ$Ü+@^h3—HxQ!™¼úÈ€­4 †™øHTK.I‹)±€{õbþADÂtH)[ Á¢H#N• 4à…¶4¢‰2Ö‚WÃHD‹£¼‘H&2ËcCEK%šT]TÀ #Æ_Zм2•¼ph‡ª¥ìÈd"¤àt¨~@BT…¡®®D³îTzaˆmp^°\ 9•zô‚‡Šc—bFÑáÕ`žš;»/šGô€ b…c zá$‚rSxáâ‹u\É ´€ÓT_P×tœ¬»:SUÆ š 3Ú‰2¬d_ݦ¬ýƒwÇ¤Û ëè–4¢h§°Ð w·)K¸8±H7Á}ª'T¸¹`Ëújëh«¨%X˜Á ÖS‰~$À ïãm)2Õ V¨;NàÏŽPá;ƒ­ ðÂÍzá /üT§&|q2\Üî®;ú²И1ÖT-p.ÇæýÕF=¾‚à cÐ#æ´EB ±a¬éÔ±¦^^nŒ5맯¥ºC‚NÕ -¢Q,ÒuGœ¾þ‰T×Jß<¿à΋àÔ3#°!/¼‘€hÆlA-Š…Ù¶š$¾Ë› „ŠákSi!hGÌpkjé\×Êý§¶#´”+qávHya $À ÓÇšJHVA/Ì8@=`ðÂÍmÊreM€àæàK¶D0XÿN€„ €Äw½Ð1¾êæ¸c|l¨0ðÂOåî^@?‚Óv¾7è)kS:à…ᇠÁ £FZ¼^èÑ ´Ìaã6d~¡²òˆ›KÑi;›4…íëŒyÇDkî›_ݦ|@ miD$"< ëS-» @p°! @ph‚[Gí¹t?C Üχ„|HËDË+ PñÆñ…/€¾… -À$>8pbb÷Òs9 H|ŸvìD®4V„ŠCõBJRSÐ3L_;וûÚ”§ò x¼^èse/€~·Ñ"!kgœ^8t†E†€¼ÐÕŽX cM²ˆ#˜àîxª^ˆ/ÆXÓ± ˆèã„2‡SAC@…ÐUQk„ ùBñy†hìÑ ¦¤•ÚSçzôBxR¡Y=² @â»±_‡û†D¥«0BŽ.ñ/x¡+«H¨ÓâBÝñP½ -ÁAs)šRW7« ` ËÅgí *C‘iF³º•û! GˆÆåpâLè…»õ‚3)%,>gXùõð‚•#TL°_hqƒ~Ü[œŠ½©‹OnDØŽà…ˆ–~±–š.ð¡²ÎÉcSX›ú‚D²:háXc?í(Ó/Œ(Bv "Œ™1½pì ›µ€ –²ˆ°- 3¼Š\^§Êïx.L;ÎhPu Àîo¸'ã/öߨÌ(/̸)ÓQud&]™˜v¼øÖ”<2É’^€!ÇHˆÒNbQv¼¹ì¸A ê ·)g¸5õðBº\_¯tÛjÁ9nM#òˆžº£¦)xáâ÷¶¸7î‡ ºµ”¢>WhÀ ÷Þ¸ß °Pƒ×±¢‘{@A¯Û-õ…Çj¥ê çC‚{î•n0¬•¨;ÞìÊ"[3–òˆ†*-Ï%È#N5üìzÖÄâýHÐGR $,•©å±¼ÐQd7-#ô#&@¢´E¾ÞM§Ö[‚C2o$`?b‚^ è¨BêFÞN#À §òB¶€€ƒÔQwœQŠn/²pÔN­;z ö· AÝqÄ>¥tHH•(/…±ß©{Ö= H×^2×Ô±O©a!‹±7uó>¥>zR‰½)ûý€„‘¬|rõ¼0¼¾`¤Â®¨;ÎØ³îf¶¿Œ=ë«ýšlàq9¼0á ]ËmkÛb!ž,Eƒ†ç–¥‚ë”uû‚Ä£ÞÄJðk:µ!C@^h;pÞ‘Zºi*æOÍ#Z@à+Ró #x¡eoÊ‹56,À ‡ê…ðB[Ñ*‚ª¤ û”7ïS“ü ‡è…–Õû°•i…yÇCyA­Âá„lâC za‚½c6œL3©Øo£…Ûó+þûoÿëÿÿïûéþãÿõÇ@¾eÚ±!6$‘eYÝ==ì›:îY'i9±ñ…ÌðË?þùŸû‡u85ô  œÕ/¯0 ÁDGé19,*ôÂŽÄ¿@ð´\̩Ǘ¼ýÏAœŒêÈ%7\Š—ƒŽbéaÂÄÙ,Ð!ÅLY Rà,è¸9ù’· ¼ ž`kÁÛ_+>½z $p+ tì-àíðôg}ìoíÛ#ß»÷í ¢¢ÿ„( Hx M€{Ãáí¯}{Çۣꃷ¿îíñô×>ýßWñûoÿÛ·ÿ«]rò?6?mèÏÿ¹@Ê õA¼ýµâOoio=žþ §¼=òÁÃß= ŸcH8 -¸œPBB¸|/„A˜ÇÛc0o O݇·G¸õÛÿòRÍÿWsÙ*þƒ×92„gËÆ=œñç¯Îø{9£†¼=êoFB*XÉ"Xàj5°Àµf‚  |®•$|Z âöKÁW³€G @ÂF‚¡H€"HànÐ` Høl1âF ÀW³@âð«õß×N¾Ó›æ/BåûgôûÏÊ…ÙÄŸþÀç ¡šì+„ó› : È&f¹ œŽÚH@<:ý,îõ/ œ\@Øh \MV€xçÛ;•Xàä‘ @Âçµcpð«¿Q A”ÁÐ&"ÞþÞÜàøÜ¤€ ¡¯ì¼¿î$ ü8“Pg !¶<½P¸£Tx˜…žùÀ{ßž£Ð+:Ûɪ§6(µB ,pŸõ-žþâ .–‚pròWCšàéï홪êÀGû×MAX`è"‘›©™ãí/ÌþœÉ|ŽkBÂV ¾4‡µzú?®B„·?,4}î3ÓÞþ¬·ïù܇Šj¿WÖ~Ÿ{(¿w½nBR,Áý»+÷Äðö÷Ö~Ÿ{€w×~{¤@-Ž…à£íct¦jâµæ¿o¼iê•j†)à;@Ï?=‚ÿ»‡€[ª±4LQ ¾1Ä2OF%ø°·o)‘P1Þþ¾àÿ‚§Gðwæ=Hö@%øÆÌoïøGlú_‰ ð“›´»R þwc/oqàO¼ý)Cbíø·¿oíOäÓ'Þû³ŸùÞOq**$F  [>jBã—“‘P$ Ÿk =#ê\˜+ãI’£ xÀCSC'fG!øÆ9 çŸüÿî]°€„O%Єa se_Ðu­ÄÛ_ùöÁª‰ºðao¯xûk³?ÂÛ#ý;ûé!úŽÐ_„… XDR¤¡tå8x “a!èÊMðZ”0ƒ?ÍZ‡¼=¢ÿ»£ ð"A/ðÊuàÊ\ 3¨ÃîÀÞg€Þùö¹ˆà ìèUpîABq.8–ÞùÞ÷@Þòö$bŽE£{@MHPNEåÿÆáŸä¥™ÿ¿Ñæoðîê¯õ Å = ½ARØ”Q¼±ï·ÇÛãí/Ìý•u'}xû?÷Ï¿=Tÿ»þ,p+ ôLû¦‘ @Î6„ò!H ¼>÷—ž·—ÅŒºÏaoßSó³”ÂùÏÓÞ¾'ÿ³b¬û_hú”ž+~ÀG§2 ¡»ÿ`þ_=…€øöÎeÅÎd¹Âoô“qhbƒÇý ð@´Áßß¹ë8ªî—*Ù¹t™ˆ¦¤®øöʸG¤1š†©@ÁöŸhûùŸ_ÿãëßþí¿þû_ÿóßýõo_¯î‚\»:£8æþ¯Ìýí¯#ŠÜß•õÿÛîÿ_bî?‹‹2W¾þ•\†ù_Ô€³Ê¶n'ሰ£ÅE¨m„*l½±áaðžt‘–òÛ#Ï€ §‘´|qeB@B¼ bÖ„.ÜÍ4°Uj”í·ql¯«Ü9P4d{jš$PZõÚ&tá\¸Å óIH£È¥P>ºq¾T‰E“àÐ#Ã|x >—„²9¥ª9ž|r¸eø|“ ¥J aò úž×A_š ¨ž0|.=Ï„1+ö’“ƒÕc{_ĉìá¶§%Vˆ/.)…¨`+éT¤‚d2CgáÍ JD‚µTó‘ ‡JAºð‰‡ ;غ4 u:FTˆ( sI×_° ‰ƒ®SÄ/Ä'¾ ®‡Àñ6ÀKxSƒ‚ÜZVí¯D#Ø^,µ†í¯³½ÃöØC€L1bÀß¾øÃ !<°=b@Øû+°íŶŸ¶½¼é½×Ò5Âóû\Ï/›@L¤†1ä¯W—€^ëk ·Ž‡ ¶˜^^–Ç.Âa]ä Û_j{¶Û°‚p˜ä÷DýZ©…„7Ί­ýýÙ¶ïqó]ˆȇ5zlí{ߣùAêΰý¬Ôb{dz?9âË&x ˆ¢Ûcô†¹žÊ_Æò…%RW6}Âöצú‹#ª<ÃÜÀè±½j*¿C]ÄfX(;ÙñÓè!!K ä¦îka€È÷§®,þ(YˆÂùf{ï±}-þ®´=³y$<ÀÁàê‰Ø(ÎŒ &Á³%¨²Ìs„ÁšÐ”x)fÁ¿Þ\P/[†Q¶ØþÚäpOJ 8ÂÐ>ëágë±}‰3RÃvC÷¤RÍ—G'‚=ÀZº—G'“`ÚT¦9á=˜å¶ø¶ö—2 &¡z@0+è‘r«r0˜MÞFZÂFã\‹±2bX‰@±=ÎI5lj-N2#l¸r{ô°=œ‚ÏÞ-@+  @Ã1DxÛãÒôçBP+²7&† @Kù°H¨E£±$=åP9xww$špõ‚k8Èöü¤KíCÝQ9½‘¬Ç?ä`3´ÜØNP²ŒÑm<ÍöÑc{5UFr`hv° §ž ð©"?= ‰Kɘñáxq'"{Øña8^ˆ›ëG€›I6¥‚lÁ mÇë‘êa„,Âpõë`!nî8,_²ÁP¡G \Œ…à"œ€Dõt¸—12ŒC!èÙePqåáÃC‡^ ÛM¿^]k u$˜O@‚šÞ†HµBnñî·¡”=êS3=BµKœ)ô”3V r‰W¿EY†TâÝBPf*†¶%7A!;ˆ€pñ¯u8«*÷¤§:}Šûû®D‰\â׋çZ÷÷=¬>v@CÚÒ„`‹VI"—x÷ã@–FèS< o9¨a‹—!«t·¿À&ZH+ õ*{ ¨¨…ðqªx ba8­rÆõèyt»"§ñ•è @OÂÝýj¶l½΀àêÀaû A…·á$¬'Ïhn©¸Às-uÉDm"Яpóˆ¼-/ߎ#táˆ("ZˆUT@â$ª'¦µý DœÐÔ$=ïFx˜b/ó H„õ¤Ÿ"‰3\‰ìq%RÒ8Ä Ç!@ æüƳ'UšáعrD1Óè$  §W-hÅ"„™Gè‚öT3ÄIãT-ÞѲDÇüØë€ÔA®–'<!=O3ùBÇüÍkû6Üé ]h[çÙB„î]. VXâw„»`¦@H¼ó {f±IÙ“ OÅÐÈRz ¨…±šàgy.˜xháÜè}çF·íÝ‚`ûû–:nÓg/ ‚´t¶‘“*áÞô“÷ä,ÛøP±§†ý©êñÝ9Š!³ž€ì±}U8.ŽO¾8žÚ“!ö+WyŽ–î‘pÎeÉ2À=áal½I ƒE¡éAÈ·¶<7†‡©E˜pšý 4U”3ÖB™àÊ‘&*Žm~ÈÀX°í ôø…%Œ2M½ Ø¢¼uG ûZç#Qõ‘x™¬P<SŸ~ˆ£‡„2’HcIˆ§ZLĺÃ…‰&õô€ŒâaËÚÓqÎ\ì ͦÃd %wÌò²>C—ª)>ã%¸17 !;.€ –]M/B3îÆ½¹ë”U—£Õðˆjbõ,Fdõ”@÷éàjâÊ^çãÑ}zåt:› -´MN5­?d‹,EÚh˜Ø“=v!3¤&Ë@H [xõÄŒnš‚åɯƒ7½;RTÌ+_Ù‡LŒû £3ÉÒs]…C}ÿ “„žòR‡ cte£Q®Z˜WžÝa=2â" ¸p^™350¯ši™CÕå!Œ´çP•„¯ŒÎ8v ÙA®EnÎ(S…/”ŽN(icÀVZ$N@¢§ª ¯syŠy”ɽâ=®£x–áNÖT¯¡¥Â¬Z‹+ñDŒG‚¶%{2Kj²#Š'¸ =§rÔ¢ˆÐž:ËöÞXº¬r´§ÎuéIî!Am‚‡ÑeHé!!È «/‡•z²ÍÁ¶­<Ì.Ù)„¯(ô¨ŽEÁžÅ=©æH.GŸâ°ROp˜‰ÒèfÉžX1,±úòÆËÚZá8Ÿ9[,(…·Ãyz x]t³¿ßì¸reL`Ë) I¢ÉÎ@Eö P.„$Ñ\ø!nÉ)—a¢3+öJ ^¸ßX,1¸6ºu‰ô  I@ap“Q&È=FX„i"ž‰•W7v–˜T• Rœìh“g ¤Bè'&ÕcûàĶ›Ñ2àÕ#ÆÁ;§8µ„`®®çÊ€>=á¡“E¡Ÿ`²(ôDî’›YŸÝXÒÓ\‹3P`- =š’¼°y˜ØSFÜ^ BÅɳH\=(¤0-l-˜¿µ@÷ËÐâ#øÒµ —á¾&'&GqôÃ`=[ð€Âp6 =õE'eÆÕÝi ‡-•g²ý*0øAH áÕtMšà†®3ä·H…1Å+G\t-AMiò|RõÜÚu)eGâ`Q£CHÀƒð£mÏÞã ¨E¢¨4ºÿÐzܹzº…ɱbg`«–áîâäp¡)i`Êk¡%ùÆ–d·Ú”!a4×3g5…ŠÎÎŒÑ0èñ =— zO'Ë@Óq Ýþò76ŸzdKFË79†)Qðpð"ü6\ˆ{Z×½„ý£Y=­ç•!XcsåMÕØš‰ðp´ T !‘Ž™¤.©:·< A.¸2ìahI/aÃæÓɃöø‡ÁJf8¨nÕCB23&ËF‰- m/¾22ÖåÈ&ÁWÏs û+r†£GÎ{*EÊÁ,Û7©€Š áêé°Ú1÷ØÞÃq¶b´/г¯>œ±—ôÎS)ŒQ9 Š$Âm¥wV|©²„sG ·¹{B—e4ᕵcß¡ÁöW†…;*¬0ØþÆ` ’ÂsIàõ”ö4 P]h"A÷¯@7áЗ¡''a$( kèi‰Ì•°ýcÆ™²ÂQö¹ïyøSyON&!›H(ô“Ïî'ï©ÔJWÔ††5 õd‡Jªn‰Qú›fûÓó" Ü›ühOøÇRŒzÏ•³@Ån‚A³O6½UŽóWvÿ•°'æA®,ø•˜ˆa]Ôè!€¢ ;³ºöoœ—¼qì£të¿¢ pãiÑR'Nä~¯Ìýj‘2*@hø®W hš~o>0UfœŒ[wÚ>i1f“ MN¯ZØ 6»*Ø“tq%Ôn̺'ÊW–¼;†G…=®@pðÂN ¡Q¡ôÈAØöP¸²0)þ±ñ<Ÿ|W®„$§B•àÊÔPJ,Á‚ Ñ ‚z\tÝß~\‘8áŠDOž ¨äÆ#gD=«c«„âÆAh!ض÷\ÈOûÜgíëuF¶¿¯mÌ×Vu<ü‹ÿ†À¥$N«Š®L•ƒˆˆŠW‚¡å¢\,<ƒqëA"DùãÉç¦ÔzPà,M:ŒW‡× ²žC6‰áãÉê Ú“S§ý(LF¡åÁF¡B™…¹ªw@a=©GÒdUàjBÁÃnìB:$éì-D¹G!†ÑcÜјäšDìhL:B&ÒAoÅáÉhÍ,sœ2ûzñ8›k-^†œãM«-Ó ‰×d‹Afê‚i/ ºp„ÏØâ/ØJŽ@~ajK d«0ãr\-/…ÉU”(çf éaiBAlÚž§îÉk‰(M÷ Á‰'â„'"«‰Rx ó‘¨í5´´0˜…zÝÎè„mÙ±ïYZB$Æ#‘Û $€Ä·*áËxç^2d Þzà¢e'ã@C÷g®ãƒ Œ—xÔ€Pøí å”@(¼9=iH pRè€ÔáM¼à3…ß¾ø£Q oÞc2¸ ïŠÄCyø{p‰ZHø‡÷ˆ‡(lQ¨Ç=Pƒ¸²AÏ’Ÿ—müåÿ/ßýkÊÿa DãG¿;æÌ&pþÔ¢x?Æ  Ïr @áËËâ@é‡o+!§<˜±ù¼V—ž™;@0{uGáqÀãðï¨ÂøŽÛwô§’Px»D¦ $¼rÓ&ð€ÂvøI @áÕïÄÁˆ&¯&©FD“ï£É®¹ÊKuaö0L«éWÁö·Úž^þð—#lÿÿ£ZäJ ù/LJÏ®¿ï|_¡?×öÛ_ûBÀö÷Ú^`û[mŸ°ý½É€<Äö(|ò|ƒð÷«‚ŠU£žùx@0‚£€GáuŽ<¡ЃEð @Ð3¿fŸ™V¸‡p¿Ä³ÿ#€Ã"¡ý¼jâ/x >—ïC¢è—ûÆ ÿô_ ™8˲ÄÁ˜xǯ`âŸLl+B%@ÄûUßvŠHÀÿ<¿ˆ C° @ <„÷sÑpÄ7HØ£"PÏøBƒðê†å2¸h|B6ó(‚Óo‘‚Ù8 ?Ï1ü1¤k"ßõ ¾kUËòúýGûÇ‚ó§À´µ·­ƒ(Ì?E@éèo= íª\~ ¸™çÏÜA¦/v×®Š%P˜ŽBuõÀ…3 ÊGá@BÞH  ^-ø @áÍg°p„“'á8Âq|¿ÛÉ H‰wn)dnÃvÖv@Ôá½:dÀ“Ä8ûi ÿeÔ?¤Dõ]"­þ ÿ7AYi®xPNxP¢eÍ ˜8iÍ‹» 0ñN'ØL€‰wÅ.“:„ #‡§µ! çÃË’a¸õ°YË #èÁøä„mÿ*€õOÈ/À=xïT@ TÃ7ÂÀM‹á Ã=8pÔ¡¹–W-»?¸=9zÔ †–@†“hÉ)‰&´[Vûà‰ÀÊPèÁYmÓ ]@b  Ð…wHdîB,ªQgÈB ´ÊMº0Tä  MHPËètºp©ÐèA  ï¯B ÈÂû0b1tº]€.ü®‘º] žX’íµä ºpì='O èÂÅ7p“BHÝ Gè÷ QUž ºpñžd·¨XД) Ð…Ó˔ЅƒtA ±‡,@~×½wº€0ºðûrDBà/t0 ¬ì‰CDgt/è!H@ßí ]8*Œh¦ô$²åÐ…›Ãˆt¡ íéjòT+¬ç™ºžGÖc-Ò‹Òj…±›šô19x §÷4ÅRõ…è¶|¾®ÍµC/bÇ1öØÖë‰(Â|¤'® 2¥¼'¼]Lðª„< –ò&(‰ÑÄÐh¢‡óµ\˜í8J îLH. •ƒžåžÁɆö•»ßáŠÄÐÜí+=é%‘2ÃÊß«/„¸ç¢…«¤B"#P|8cÑB.(¹ÿ/{g’£Éµ[áØ7»Ð ½ÿ¹ã·¿Œz’ ;³¨KÇA ¨É‚ÉÃæNãå/HH¨:\…S]ŽËfFÒ’21àxª«€Óï3s+ÎænäŠÔÑAˆx®OΤ ^Ò™ó‘¸SÈpY²‡í—3Dº¼yQê @t³šfJ˸ÃÞ'Ý1ãh=“0ÄýƒªwO²|LhG[b6ã@e}þtáÕ…D6›Â‹~õ„Ûýs(°qîÔ³\C%HXdAÏ$ŽT“a…Ý™`Lv ø ÍâBðNÍlµRç#ÑWÏ QÔ8â†ýˆ/Hã©ÚW?.Ôi„ňvãÌ Ö-]§éÝkÖ­Ö˜|^°ÁSÈÜ™j5޲,yк€x¦3= uPÇV#ÛsíwL•–wO5µ™c”eV3Á!Ì÷Á7LÀ^2”B&Q)* 6a74u29<§Só@|Âdjþù¹qžoœ;Ö+J Ê#9D©7?@×|ÿåÀ°ã±SMC˜¤ãÙâ7¶}dB¼\ î‚;õÇ>`>c%h—Z§*A@0“˜ ¤%®Î´,T ¯ ¾;88IæXß<Ì|C f ëèT%È"Ø0̼arµª– ]Xo$y*Œ¤—C ’Ž+ ;޶!Q™„úH µ £Kð¤àËCEj‘ÃVz9ÉxôáíŽBÞB`˜T\ñ&¹Ï´¡ZÅPL¾|Rñû@¦Vá,€„!¢ÃHtR6Ƹҩç2F:O)7%8‚àÚó$üÒ™ *©”É@b…¥#.SºY ªÉ *=£ž¥Œ;K§î¾ŒÔ™êhAœzlk†IL7¿ÚPÈjWÅIÖƒ*:²YDd‚·E_½W¤UŒA•SƒCÍ@P!8óÅ&VØsxw8àÛŒ–é}'@<Ì0Áüê¹ÅRq`]Q0Ï ¡ž$ÎG"®šñ—K›îŸ $ 3wÊÔIðýŠ\bæpwYk)n1žÚŠœÉÂKvó™Ç4Èm S£K:ShS9|ÇCƒƒÎdŸµB{z….¤Î ˆS;“#Á¡)ŠÉCŸ“6Ÿ UŽÂ†:Âg’ÆæÏÃó$ŽG¢¯î‘²%È[1GøH y‡OH,@Bkdèµ­ÃcC.Ñ3¶í‘ÚX”zw¡w1ãp^¼$Ó÷ïbÞp!w 1±E{#QTŠgß< ÙäŸ'EÉ  ç't9 ˜ÏXa@H|†4r $žï4Ï…«%D ÆøÞTªð¥ÄàË‘L<“w„ ØQÐèÂ×X!)U›`„ TÐèÂW]Ћ v„í+Ã/ ‚âÂÀ߬Æ#…Aé'ƒÅ_ájñO'bÿ‚þôŸ‡Œ"#CIæ·!€ŒLɈM…ÃÉg€Âax C„]­†0@Âg §—ÃSÊQJ@¨ †‡0tC "Ãa°ûÃC ”ÃCÄ Âa€0@žÂ0qb´YÙ©ñ Þ«uUB[^¯ üË¿þÛýî@äþzå@!â 2"€Ä–¬áþ\#³ 7™&xq:Ä©«Ž¯™[˜@yñG&vtÙMˆqzúÍ—ÀÀáË% ÀføÝ6ÃÈ,PØ$#GŸ€Ä¦;` •ÏŠ’À˜x8O}y%˜¿4-FŠ=m…‡0aS±#½3 Oìâå£ U)0¨ß¼bÃYmšèf.ˆyñÌ L±ÇD@^¼bÃ¥©Ü aØ 1òþÕ-?T¬axóÄl…YQC6C«Ž0‘” a8S†°(‡¹á…õ™PQt˜¸A½¢¯©3)dYº#…<5…œ1:E )äŠræà÷ý‚áF¿ùv,·¨J% xñÐëÇÎÌ)AJúÕ+¶ì¬}†‰g0±¢œ”™¬Q™\±âÅO‘܈£œ\¡ 4¤ nXÈÞ€„[Í Þ‰† HÈPBi䆖Պ–U Ï Áî $ÎGBùbŸÉ/ÍZû+JŽšI&,øþêÎC»CºPé Þ ³(c áÝÝ ¿µÀáL/hè+e¦¸ Õl¸’dÂ|¦³Ö˜q95VÌèBZPÀa@5ù ¿…/œª C”„£š|yph!¬^¿‚ºÿ²ÀRx÷PKIaQæõhž®{{8ˆÏv5 8sTef,¡-Ya&n ¢GOBdw©€gNÝ•ŠZ2à½yZˆï‚ÑðJÝŠî£r0!ÞÙèH¯`‚FVì…nöȰ±b¶5|&ÐTA7rÍ3•…Q#©|óË…7f‰v䩌ìÌÝÜ:PŽè°`AŠy&‰t u›+–­gì‡JiÁ½žw‘Ÿg$‡8ÞÜ®&j¬Ø¯(&§^º™¨û‡byvÇ àZ‚bÅò‡§„9" uÄŽXA3±‚³X1éôên6‹• :—;„¡f‚…¤—£º|óÃSÂ*¸é·CØu† —Ä+…;d"x‰¾&v\‰žI)͘½ìWw(ØŠ¹ÑËÞQWèLÃÂZó H¾ q'”†aú%SP3*ᡪpx·-ùÙ§ Ø’;ÒŸéWÄ]m&lÉw ç)° VlZõP¿":ÅÀÄŽ$RFH‘ ¬ë¿{@2Óè{F$ta&V\f39deâÎשº0ÓÆ¬Êö„.¬ØÌΙ~E»+ž6=Vfè®ÄÈË Ó©z¤âdg衺02 'âT 7zÃbvJÏ ‘&†|áL„F ÕŠ,lÛ­~›YÀ¼¿1Á‹> ¿lf÷î@¨øW:  PLŽdIæŒçÇŽíO; @8@ðz|¢:PªÏõWIoÞ–ظP@Áñh(”0C@0² P"þ˜.A"ïïÿºë@%3‹Ï€àp`€@ÿ¾pðÇÁüÅ×?Ýó€åüóÈüÅÿjèÆ›šO€% €oÜ_ç» Cá@JêË|¤XÖ´Àáƒó™0¾({ »êA6é‚Jƒ 0ñå”ÖÍD!T T@0rÁaSppÈdA …GaÚLÃD5bŸ* }*#²ê@)±BbdÌÖˆïñ½`a8Ìö€@”÷wÜôð&ò0ÀÄùÁ‚/Îa"98‘@ìØ×y~ɨºH°«uêðÓL°èæÄ¨Ë›öŒÉžר¿‘šàóèÆŒ¶ø•#«5ƬTÚ†C-†Ò2¾ú¬ƒ±º›¡;yªÌDmb¤‰/‡À¼ Ð;ÌÅös‘R Ð+ÆjŸ)'=Ú©Sñ1ÂD§£1µäÕ‚™ØÖ\X¾];®´™3•ÔЫ\:.£$ŠZ ƒ±+æßÊr† w!T+˜èÒ‰¶pœ¥Þ`XeÎX×Ý…‡7¯âšYRÊ )eϤÂD˜wXaPùÈ{y7ª‘hnœÏ„óE2£RÒ ƒjAÍLÓË-J&¤‡=LÙº cRg–ÁC”¨a£âÕµ¦‘9ƤÞ|ÉÃÌ=31þ°ãA›‘¤Ñâþ é‡ó‘ˆËl‰dÂѧcgëG¶ï쳄۸ãñæ L«$+ô-7Ä·™2¢¥4.,ð˜îO3UE'±Ãw\ñ$AÕHèø|†;|€‰ó+M½|‰ûv´6´¬òò™½=§RvT¯¶¦Z:Á©¥æÈ@ƒ³D:•¯îO8[H‚7¿pwCÐbWŠ^}Ðá @±0S?òÅC ö½V%„a}ŠdxyÉ€¼5/a°ü  Ç]:š tA.Ÿ9%î)¢XœÙD^÷7ŸAB“ H¬XÏÚ›ñÌï>Y&f˜ð©Å™›‰j0±C'8gÒ‰b)Åi—L åemŒÉ&Û̹oVŒ䰫jˆ w Ø+b‡ ÅŽ.ÿæG`b&vèE:R‹‹}ó]01ÂD^#gì]¤½‹Û§¾‡€#’2FYÀ»Þ€ ñDb¢Òt³;‹4LI½xqû'€,L12#YÀh=taÑÐètºð‹.L ;x6õ78@~cáK €.LÙ ìÐè@Ψˆ ˆ $Ò€x 1r.²¥g]N…`ä 4 8[‚à D<ÜgD ¸ ȳ°„,@Fœg誡¦$*>KTxÙðÕºð@¦Êˆ‘¡&ètº°«QÐè‚A  ÏaGÔЯg GÈÈ\éYQJÊL¶PÔ¸¼ðöR²Œ% œ ÁÈû•7Ýß|«Áa ™œ±o$¯m@Âh$_hÒr¬Rî@bfò±?¹²‡CSÈž éþa€àÅ;S)­™¸ ¿¤%e@H<,§Ž%H T,ïRB6­Ë”B  ]€.<[T ]ÀÒ”@  _¹ë‡.`íùtá—툑ÒÒ¬û›@–÷#~èÂØidž.@  Ð…§.Œä€.^G0tºð˜kâAÂ%å{±Ð…íýˆH*W ·j>oÁaìB@ây­i†ˆS”§FŠY(f¯~Vè† B=©ÓLÙ*™Ø¤Û„Í ‘ÝŠ—)OµF<¦$3K”;6fr‰¬r\äxµí˜ì’hGìX ‘UJÈ]W@--G’ÆÔ;c ä ;œè ß±äs’#¡ §ê/º06îÐèd²ð,#&§Šf ØŽ¯¶è˜í8QF453;ÝÞ< pÀP¸ p©í§ @­06é¬@H ñßÞaA ‰ìQL‚5u‚ÃØK„ ]€. L&dÏÇ)xšL¨#€Ä/ÓïŸÉôÇ!ñ—¿ççúË?mè Ÿ(‡úÓ MyífþO Šä7ŽÂ tºð¨HJ  Œkètá+9U–‰=oY2BBRHèÂ/o_+tÍ®€.@žoÜŽXNÖTh‚¿ù¸ÓO@]Ó…‚. _pètáD _€.Ìø Ð…MGßFt¡H-‚¡ o®#~è˜.@PF$d²ð܃,@²YxÀ6tº]€.ür#îtaäv4ta•.tº€Ö³"8 8<ƒÃŒ÷ÌíE˜vÜ1ý»‰è‹ˆ›²¼)Þzñ`²í§ @l‹ $€Ä°íø@¢*~ý·ØŽ?ta –„.¼^~¿ãΊà€àðÜI $ùÂïØ¡âüPáÐèÂãX“ŽdÉ÷w€.ª ¹èÂT¾ #ù‚gR„CÞì;z‘4taƒ.LD¦)P…7g A)ÚUØ€DŠJ¶‚.ª 6A°5A6 ‘3º Ùj ]x³ëæ–…*bEA#®c„s5êˆw×)íÐ…SM#°Q÷¯F—òÕuDÞ¸(taÅôBŒ !,wÂ]8TF‚Cjz&|Ǻ0CDºˆ¡Iyª,ÌĆ&…í¸Bl‰Ò`jØŽ‡êBØŽ•­ÿ‹ ñÐ…¿ —‰Ê’ùÎ «/ª ¦3t+ xµ÷Ì^VF^l(Ü´´¢ u"vYL¤‰,Á؉YDŽ4 8´¢¾7ö ]ø}ÁAgtÁBDÑr8õÀ‚Œ@‰ùµc• ”`&” º ÒD—@âQLŽ\Ùà¼ó’ Œ4®è9Ô Ò• *CH˜§e‰*Ñ4bEg¸•B%ÎG".‘$ʺ—yÎÔ… +ºî_N¸Ìs>~)1ÿ@‚¯”‰yG!¥ûC¨xñà±`Ì5jS÷ÆZÒrÇ0ü ­w¾YxñŽŒPHF—ހ„ÒLºð.XÀ·ÿBâsØWÜ^]F05¥`v¥3º Þ ]8´²äœ©¾wúºðÿê*ËDÝ™ lÇ7_kŽÏC¨#vÔ9‚DÊý7PG¼y¹R¸˜HѦ\‘/ŒUî ìÕ½ZÔXÂ15¿ãRÏHi©.ýïì]ÛŽž×m}•>ÁÍ3ÙËH{Ñ‹¼á&d*9ôù»¿™¸2";c4,?í%øB #ë_kó¸H²cÀjª]àxtò7ÙÝbTW(nÖžLj’ꎷ „¶0Â4, Œ?ú¨ ›“  Z8´´PbB´ð™µÊ-©£SI_”šÐ+L%õ .ÉHpðšWvÞlSœº…˜-[—Å;%îPOðžxAâ’½‚·8PÚC U)Fjy‹EN«'–ÆÊø±»@ù&$€]ø¦î qø%‚…ªéèD3ŽX%++âï14ÕQp c®í,`NN#LÜ +âo¢vôJhj`×Ñ×J#Í,pol* T[Hàâ„Iû›Üë°$ɪTHÛ†Úã¨\£R° whU÷˜ßV!fa¨YD™.\¸ŸO pj %®QÊÂZ¿{ˆšZ(Áê‹]Ê“‡)“Í\ váeÇ–èÁˆV)ìÂT»`=$¨Z»p»Ð/XHîÄváàá`¸shá@®Åe$,?¡ÄŽ ª[„ -‡$2R$k§6®{HPfƒµ÷° "è̪ƒz蜽µ¤’×5ºÄ†ø{\”éÈ)Š(Mp¡ðèâB9!ZÀáˆÏ”I^hFLÈ\:xFuáò׎IJ”8œP]8xA|)³/A¸pEÐÔB »¤¿F\…L0b},-®âºK¹‰»pð(å&A°&ì65ý´mV`\æd»°I |‚]¸Ey!;(ÁYŽxáè n´d áŽ5Ä Ÿanä‰eS«ŽÚB‚Ü_w”o1JÙAˆ,U X…©V!:HP«"ûÛna:fk‰ÈŒk Ç&ÕB‚/.¬îBqá3%LÕ,!>xÀz“Àw&¡(.Ü"^h !/•[ˆ`¶ö\MÓæ€‹-HoRsìðLE¢ŽÑˆƒ5MÄ×WyáÊÔ=¬;êЬ"…4âdíïTrA»p=MÔA ÷ÊÄqû“¯ÕnÔÕ—Bq »P-®¢¼tìÂT»Ð’LVЧ£ìˆ<âGJh9%qÂ.œ»¨‰L.4ÎÏÝ€òÐx裔=“âµ!¤è¯³ O?ì/~üá/ý•hÿsûñÃþô&ЮŠý#hÏ¿øøáû§¿~|ü7=þýùg¯@ïùCùÇ_$|A·Áû×,ÿߨ}÷òòÝþcÿùá_^‹f8›È[Ðügü p¾öyÞ[þ²IôÞØÚ#„y©½z!ëóGð¼TÿjhÚϹ^ÿE4ÍÄæ¢ùM¼ÔwÅ–¾lÛ¼7¶þðÅä"¯^«ÿü|±öìhç%²úË—¬xùóßíxèÃ÷ÿõêÀêKvüÊÀêó7èÌÁ7 j‰q¬W×ì–·µïA YôÐw ¿¢Ò…è@Ëðûïÿð§ýa 7 Üb¬<íÕk0¿]ÓpNðû+Â.J°d‰fþíÃxk æ}Ìõ`_QìgG Ù@‘LÐÅ*+`FYí>©´ý(è½Åè¥üÔìGaÿîÒ¾¿c_Y¯Þ3ì¿¡<•…Ôôþçݳ¿z÷°o²ù=±ÞžÕð fBö$|ÂokÀ |}+н%‘Õ‚˜œù{b3)À0#Ðâ¬d1# œ…½hö¹*N®û¿ûNg&8Q•¡8ÌpöLðþÊ@Ò‚=s”*<À`&T.`0“ ¯–&€‹ŠRÀL%=vÞÄȒ¦2ÒÄ•òB>p¢ À5x)ûY¡–ž€‹¤ÀÌõÔÓpó²Bf8Ù&DOmð¥80+hÂ>”¹áèA b­ÂÌ’4a/ª„É€5B~uŠ`ÿW„ZxR%èÙ#,Š(„NT‡y © ÷öî{¾Êíÿ1>ZÐâvî§„*Àè(°gH<6ÊÓbëÁÞËe€ëÀAT˜Ý ª&G2˜0— lML¨òÄò€#ý»¤£.pâò€^i?2`2äÃê-µàPµR䃫@&=VÀH‹Ñ ž$ŠnàdMðêÉbÕbdƒGÆauáãá-ªàH"1Lˆéý÷÷ C×çH›_¾ÌõŸøîsQDâ2̰%Ñ=ÐkDG6úse DgbO+t¡á7Zô×ã˜T‚þ—Ã'³ƒÙæ¹À wb&3!šˆP²°hšh©ö¦h…#ó?±â—ºÈéß°]܃½ò"ì€öî{ ¯„šöì[ 7SGoô=Q¾ £¿7¬Ì£=Ø_º>DùGFùéFŠ­ß“=µÞò4‡´w2zBZLNèùÏrÞ’”¯*„ÓR€ìÁ>-ï~XØ’TìHQìµÌÇ[*~•K4Qñ…½‰÷`/Vu`Éï7ûôǧ¿ûáÏ¿ýøý§Ožf6ø¥Eß]Eº}?R¿¹æÀ½'õ«°…~ÿ‰U`Yk¹b¿Ó4ì£{•°‚œ ,ëaÂU†òcX¨Z°'NÌv8ã¹±7·…ÝÎc™@jù”ÅÎáÿì?=Q¡ì°ÑÌ5éa‚yV‚ ƒ+D-ãm.¥ â„ÁqÂÊ›`K° vò€ ôÔ œ–b7ЉúÑ}&/Dc™ Ëž¼1v®`(ÚØWÈ­ø±L‡6u’2+ü.«öˆ‰†6‘ZBZ*h%vÞ²AHˆVrÌÁPsà=$Èe˜/¼%„[ ‡Ä%…#’ÓBÄèÅ]Õ¢±DØ€—ö0áR‚ƒ¡ÁA Ô£âPD‹òŒÌLP1:pטˆUÐr$7°Ôb Äs9nœ¸@$XóÄу{Dæ=$pVÇ Â(·ÜIYf¨Þ"0k¢D¦c=ÝdãÐS0’¬kD ŽaVÕ¸'((òÀÉùiØ÷x€R+ÆJŠYcG=¥A)wÂ:’iï¾ û¢…Vá\&äëE/ kñoЂ ïË„ía‚V%–“œ¨+UZkF &G…=î€$\°®tX¿8{°OhμE)ÊaÊ0˜ ÕRT!IFB0™ ÙÄ+ÆÌñ´÷`_®¼`&­é) ¨4#^*ÞØ{úbˆÆ†º‚–IS5MAjpJ45 ÌœÙâèóF=u«LÁÓÉûl›*H¾DàÂÅ0]yOûÈK×BëðDù ³òÃeD±³ä‡O'/&Ód7Q`h¯ 'úK_Æ Ö;î©fÙW¡@Ô• 6Ɉ* ­Ãa £s`;ñÐB28쨑õ`Ïa mãÉLèÙ7a×-0a2„½‡ ¡(¸‚Êh©(îYîô¸ârÁª‰ÑªRía‚._`ÂÜwë=g‘ŠEÃ#™„.L›N Z…¦´²°™xj۰Ǩ&M‡€ž¼PÃ,°‹ð’â&J”9†Ü>a&ìŠ<àÀ›5Û °ª#¼”ÖÍY²ò³‡9x!q¸ÇšÒžÁ¯ÑÂ]8Û.„Å žN>xêÁK[ÉŽÌ#ˆ yâÓÁ«é<³T!Íbç„êpî =v´ÞCß©bœ|É$Ö@œNáôoqÍ¢§¹+/%n@ ©–xa'j† fw „·Tƒ©* Œ¸C(=~ƒCŠ _<º’¢…àá”à&» K=”ÀýÌÏ”ÐTÄ“7¹¡ØD‰KÇ„€ò”èiZ‡± ¶¥ž­rßá4L‡“ X„BΧ=$zJ’áBŠ-j'J#‰Å°Em®’áá¦=L¸*NÈîàŒ›ŒC9dNƒÝ÷ÜÞŒÒR,\Ÿ"´s‘8t-ƒCy¨TLcXþdísÒv¸·6 úÕóþ)B9Â|kÀ´&sPYË ŽÌZ.r%s¼Qµ*|wº“B‘.x—3¹v^š0S‰°#ƒÕR3JYìŒvóÑ"Ö[Ì $8ùÉ ¿YØWö¾½=ö‹ÖùÙ…z,¥ÐšK…·ô´Ý“M ÑVÁzT±¢ÕàÑTèIâ*c$`2$z¢Æˆ4Pa´Qè)&/M ì•,c\¨™l´g©\UÐ*, 8o`\×ZäØ`c/”ìT lì© 6+Â=Ø—V@và *]¥Œ¹à#ý=•»ûaØG ö&ª ™Àò `îl(°Ç»ö'ÆzÀØûAدö§bo ìQÏöˆó=êùÀþ›ßï ì1éý¾Ð[9¦þ¦µï=*;/OPóÈî~ô¦ÀþÈìÞMˆ Ô<²¢»->”š.÷д@©yde'„5ý‘þ>üšÎöGb¿ÓûÂLî‰ÃxéÅ ¥aa~Oó¾˜Ç{Î,ë½Üëö³ªù=e½ÊeÂ=R¸AûïCiçLì•M‘Þ¸·ucš‚ôþÄôžh9/ÅÚ¥±k—ü±$z¨ Uf Âä[MÚÄ`_& 6 ¢=q!ÓÊÂZ¾ÉTP§*(©`½ÿh*dƒà`u¬÷*¨ôø!"œxmØzüƒqa§û許gdç²°Þ4lùM¨pÏââoþöéO~÷ßûñûOŸ>< #Ø؃ËdÏܨp*°ƒ  ÂE—@…*‚ `Â6 ñ€^*Á„— ÑõÓa/+:bPá™ *€ › üà@˜&¼…&…J¨/ÅUéÉenIx‘§bªéÄ©&`?r’5o‚=<À{Ì Bn H!yÌÌÃK€(=-GPa>¨§‚*̧‚‚ ³ =‚ /î!›¨ %†æó‰§‹y%³ ¢<º¢,- s˜Øœ°4e4´'N$ePa4²eè‘•X btœHu* N¼kœx*ö÷xe N5 ìqØŸ÷îñ?âÿk†•VàØKØ++°™Mª30a¼?H”ƒÎ•!ûs³Á‚€¸˜¨ ›úc¡O`,öMï^,Øþ€=Úï‹}H’¡ÿ?³â³ $À–²wÅÞö—°?²ÿìÏ]6bªêìgUùØûî{b= "ögÚüHÖö'v÷|m–û#‡<|½í*°ÿúØ{öb*À~Xœß„½•óö'.”ôP]„ÂþÑA_ìï‰ó#göCÖ\û¯oü{þ2rƒã?R ìÏ-ðûyØK“¿?û›üw1˜áx×­NúÈ@…M…õHGH×€”è d-IÃi™[h¿¥‡¼h1ÌÂÁ®A–/wU¸#¸gÐŒvÄàHç‘æ-u%ó%ò6 ¨ð®T°‡hôPA]9ÄÓÉ“â׸ 6F?¼]ï«zH↱á£ÕEÁËyAR~¢º(8I}!g¼Oè9FâJ„¸àéä­â¡å¹fᔞ”ð QØ…³CÅÚ¡"ìÂxJè#¥%^ØnÂ,q‹pl.Á½hÙI°S‰bw$’³{Ôüq€ ØŸÛïAÉ! Ç¢6ììŠý/}t¤R_^rùè ÷ÔþŸ ¦|û«€ýÉ=a`?11¼ öè¿wFàX#è1ìq¬ØSD¾w®»Ç»‡æ ØŸgó ¹>rý+×À„‹ wpl(pÙCýïÜ\u¯ïÇþ«*ƒ~ñ/øb˜2Wô³€ÁJ{ĉrE‡€F Ø{`>1¼ÿQÞŸoÃX¯oVVàyk˜‚ `Bã aøƒƒUÃÀþØí±À~â»ïQV)¹ûYØ·H…dYUb‡ÄwHMˆÄ4€ý±ž"G¾ûý-EqøKĨgN\„ÅÝ$·²‡y– f’ÀZ ¢Å›(=-šìÙ/(&Î…®Àd*÷” |E2!d¼£`o¢D–¨‚w DÏð™D˜-Gyò JÉÍ.àèT²ŒÜÎa<%(”ÞÉØvœ¸ƒ™ˆžeÝæÈÞÖyF†ù®ÖÿÃޙ䈑,×vGë›ö …¼ý¿GMT|’€d¥Éq™LH “ Zs­ãadV ñqë-“a¬]™pûÃ~Šr†‰TOä[ ÃȘŠIÚù†+ C”'Là ÃÇ ƒ© qÕÛ«T ̨CÙãff¬À ´$ì%ÁffZÍ\TÑ׺,èKÞVà—­@Τ¯”H˜f[¶ûr&*tIqÂÛ¯Š{F+tm ‚ؼÑbÆxJà ìòCo_ê?‹ñöÿôÛÇL ¢…ÊùÀÛïÊÿ oöÓ ·Â|q¨Ý¢ˆË`–B0S ˆó‰)ÖµÌ)iŠÒàÞÆRx¨"”^’ {ðéÒP‰5c”yï(³?Z3AbsP`”ùŠVžbBE`¶š¥Çb&\èäÄ&¤oÏ0;IF8|Ä>b&¥t:I%9|ÄâÒf4f§Òh¨ ›Q² Fè;Þ+3éã3-èÎ'd4Ÿn¶ Õ<ƒ‚PØMfÍ8.ïFCâêHÁgP°Âú›KrJb"Ó\ 6-mW‰58#ÌËê’3OßÒ!ao¤ÈScK~r†v¬ÀÜ~Ë-I£Á—ÛØb»ã<ÏÌ–›ó£Iѳ$þ ‰8Éc`ýþ~$Äžª™ð¡¨£ Lì—˜Þƒî3)fWªRÌÅ#ñ9WI)£eóL„ö qŒB@r\•^ÎÌßüÒ0 ±·$ÉÍT§ƒ«‚ rÓR{0€¦v£ÕùŠœ¡f´…°:¡"dè•jÂc3÷ã|!ÅœÜ †A›¹×áì¢`â:Ff!"+Å0^¿5†œ ªõ©Kziä”Wtº v1mµ #Ée2K RWØÉ!&‚8!:míy¡S‚‰Ä†ÁD/a†ár5:µ-¿¸¬m÷öÖq)­•³’{ “:ÕÈ–Ç`×ÞV•|r(RŒ& ¬óýô•×l;ÿôH.9ÿ¤u p¿è"FZ:¸°^èó¨h‚Cì•"[5Ãá8*×°î@bæN§Ò¡váÓrRšJbŸý·å¤ôjCgéVf¤l—‚°ümKPT…må_‡À¼!%ß„ÏhJ IùÛÂ1 NP¿­&¶g©Á9ܰ¨`¨=¹“*¡-^„M„Ad,’@âŠR€øÓJø U–¸—‡Dó¿` rt6‰¿!a} @ÚùáêDPþðD ¸‚J hW„2ƒ„µ #^¸£¯ÉFpi\Ó[; ¡ú I%07ø‹˜‘‚Ië4·2yËdtÄo…`Ƥ*9:_wB 1“>dŸü¡!Ü!øŒs(!²­Žï>:”Q¶hc–öãAc[)ÄÇK”¦}‰"¶”JÓo’ ñXŒ)Õ<:ôÖAÖ‘¨AÝ=YaÖnä¡gDiÐ÷Ÿáã‡ï:†¦¤zÝnp=Óþ¨ÍJ„Œb?24]sPn ±7fˆG§ŒC™#·¼C™ÙÿFªÝ8ÿòÁÝßçíSú‹ÇÿvŸ€Õ‘·gIu4ÀÿëÃÇÿ]úÅË/› @Ì”žLŒÌPüòå×Acƒçj9ÙF–6†©¥h…µuG{˜FŠLvÌe…µVÁŸî™(ÁªSpd/ qb…ñð|¡3bÊaiÀ¨3„Úù@ðií(Í“°Úy«v4“/ ¶wn]£2“)¼+Þ[QPÜ?×`1ÑæÁh7Ø+$è£2CB'Ú îp1r(œT4±°ñ‚ö´~¨k†‰V6ÌËÞ`&t$ÁtDn±Ue‰\Ø­°ïÓ*ƒK6%’‰Íõ‡jŸAᯣ³@a- ùpÏdêN†¹§+²ˆœYÚéÚ¦È"ËNõØL‘ÒM‚ ½ÍŸ^­â¢‚v…­9äH-ÂË:k{7ÇŒF3N¡OpŠö¥µö`Ä)q8Ê7,L8Ïß1ÄwÊô0Ôã6’L†„´¦;ìÄP’Ùýqo,Éåˆþ*ŒY©Í2œ2ƒÂÛð†^è­iŦï†%D û#H:0î.­X­ðÁµŒáç¼ý/À¼»»égeHÄ··®%½#èO¸¢£ufzºJ¥“Rw\zH $þÜÆ)@ÂÒ;FÙ­3(‡…Í£D]‚rŠ_Ì)FJ’Õ¤’(?­µù0 ‘ÀåVWŸfv-TK¹Bi\¶ŠkæéµÎ×ÂÓpquªúöz}z¦ÞP]ÑŒû_œnh"Ê@™áŠý:R3Hp¢ò´[@Z«Ó¤„µ:7X‡|r&Xh2qAs§W8·s%AMÞ:=:ºzöê,N"Äò@^zþ©½œòñ²·w¼ýGßÞ"ðöGv!t X Î@ÀÈÖnMã§g†šßcoM {°³à¦ÏIfŽ=š»7.ÏÀçK„Í$ôÌA†N9©j‰wL¯Ía„»7kJÎDé$ v£0CÂ_‹ó@ÂZè‰!‰9S¼p8~µQ ÿPç·CPiZ%Ú-o3ðÛf`fÝE—tPØŒ‚+ ð¢ 2„‚™Ö\lF†üƒŸÜë®ÐCfxGp³å_ŸnT¬Nœ€Ý½î‚bfj¡Y™Ñ·øí–µé[!˜ [ß¹@ðåãðÝö߆À›#6Þ°k;g×ÞÎ5Fë·'œ»4üëË×8º;‡ÚV¯Ãñ‘ÍHI¤EX‡s‰¼ì3H¸r‰ î¶É£3LT©£…q¯Ç¨ÇFölº}Í«ƒ‡˜‰˜­›‘K,Í%do¬ÌZmd¤«õ  ™Ô·í 4‘=¬GBòñœA"*1;¿ûf—P€uø›u G{† cc"0±ž ­GJg˜gȰwWÄLúÐÉÔ˜³þ´®Àä.¦µ‚×Ṳ̀٘îèbÚ›Iúc>C[cËó ÑB<¦5ÃD²ú®ˆ {¦—…å—@VqõØÈ1âc$N ÑŸ¹‚ wa¢=P¼¼‰·;zD}`³8A&²Å-±<3|Ë–‘? -!Dýž5À+³\Ä é…ÏLX²‡ž—€]XÚ=R¹â;qÂÚ¦b&Ðó_?[ë{ð‹ö€/öàWã}B(…Ñ @á/«£þ"!g–¿…ý(ÔL›PØ?%Q#WI ‚(…w`¦±ê–¨ËzãçõF‚a€aøó¾L¦À0 Ñ€Þº¼ÃßÃÆ~R¡@#«ü«ñ™«à#(üßu,üç?dþ·ïÉ$*ÿm–ÖãŸ)j× 8ÿó#"¸ØØ+ï&ÀÄ¿-òj~Ëw8œWb 0ä£ç¥.aIÈï%!3 aö_+£¡® °>Ãd ˆþˆª LÀ:¼WoÛ#Ôkd”ðÿæ#‚˜ø³'ª L nø«—ÚªPø³óÞÐB^Â…)RõÿNføÏ_5 ÿëßDßëÿN'ÌÈÿo\ñ ðþÏï+²Žˆ>FDø›‘xš8˜@òñÊT<c…kÌ{€ ˜‡·•–ÑÌÃßõª‡§Dl qO@iCPgc>º‚ å!E³›°­þ&ø±ž‰,ňÕY.Ž,¥f\†¤eÌà .#fd)Q*BwBy&ãT5«‚]Ø_ͱfí÷ 7†ÅÛÊ3¤ñ1h–ù¶°¤ K׊å% PBB†Ú_\ ’ÓHÌaiåâ x¨¢y|s vØ; u8d$¡†y‡£¨ñ±K*váË9…1³ì HDö ¥8°ý鿦ÆÍ »p•#ñ‚Y$+Zž®˜ïZs¾Pžo_±´8!—@Ã0µ}˜c&~ˆWs*°7/¬Ÿ)`[hŸ@aq ¤ÍDaUÞ a1 êCFÁ«(¬6 2dª½n{Q8é¤Ì¬nøP@:ù{éäÉýÆG؃_v 1s×ÈR‰ (ìu þ¨Ô Ýhƒ^ºì+g¦)-Ý´°§c³kHš³¤EÂZä [ÝÂj%a¦æP¤^ a3 >Ô¾R*£P3‡ÿ8\ßÎVÁŽžÆµÂâ Õ…9˜‹ËÍä VLj3½‹ýjI ß&ò!q¹ÓE±‡iëMÃ!×p‚ÄÂÈà HØLîè¤é~„­v!f 8™¡?ioœÀÅHœàÌâiÙ;n–%Ï0QÔ }ñÓcôÎÝ™X»sa¨§‡â†“QBjº… ·&¢šÓ =m2u|Ê¥ÈL\a'Jgâ íêD·ë·ƒJ@€Î7 al~þ¶%0—6K׬ÌMVï!BĉW¬ðœI']È ;]ÄÈ,½I(öq};d .f@ðé‹1áÎØî¼‚KfíŒøà†k3Cr~’ˆdWd3…Ê8¯ Wñé ìnuä¤ÌÐïúi±9H;lÍf(#FâpǹšA¢½ÏW‰Q›ƒéíˆ7 1³¡í/éYqö‚f6( L€‰?&©ªg¢ Á†Õ§z&Šó+PX‹‚Ÿœ!!ÂðW´³Ì¬y 5³ÄR† âÉ~¤naå/ªÔ3¤¾•l‚a¸ÁWðLž©m©˜—ùvõÊ¢£1*±µze€–À ˜ Oìá‰í<Ÿî…÷jPaû÷VK0S™Œp1$Œw« H‰?ºœò$à*nŸ´¨0h‹wØ…!$RR­nwt?Ú%HÀU\ŸU¼3¶»p‡]r‘?¼L »ð‹!äŒ](enHN[!iTIÖ$BãóÎaÄ.(N̈ô{›]óñ©Jt‘ÃEl½G™3x3šß÷ÚƒóâV3(ô{.w¦6»³ ×PDç§"…ürâЖXúüéÖ•:ߦx¸NáWÂy½ŠœBÂff& ÑŠ¬aõôt‹ p~›Wì7[³« ì…ñéÕV¡\fPx'á (lFAz?Ÿ@a3 9ƒB+ap~¥¢àCÛ¾‹”´Ñá¼vÑó ¦¬¢p k=­C(´ã†Øîø`¦i­È3N´6[…žA!L€%ßÎ0£R3•_¾S'8=? 3Üàr&hT&NG-ó$|Fy:éJÿPy‚«¸=P3·Ÿ]’—ßš,u!Ä ”°ýQb¢X«Q—X\—hµÚÏ'ÌÃ.>r>DßSÑ.{Ga¢Fê—šÌèzºÂNô£3Ý‘úV´qP䊎—ããGè·±‘åæ±Üœ9Gö¯NBÁˆIƲT q€PÇ\æ·k™F¯…ÂÕ ¤×HiLjšÅ§]ŒiÀ6¸+ìÂYÐô®“SÃ59²2Ò,IpÙø ¿Á3{><„ŒTl½R¦3Du¡Kv+<AÜÁ§Àˆ1YKn…`¤9Úße?+R‚û!tÁ*ÈO«Jçë´zâ?½ ÒÛª‡¬·B0¢&Æù>*P¯ŽfÐI'+¸ í!$J~8>$¦¬ÄLO[°Ÿ$“Ÿ« nnGñéÓÖ!¤$€àÓÉdˆ–¶ò,…`fZ.”ÒEçO Ì¡Üå€àÓ«B£Ü !Ü‘0Î aÛ„w ‘3â³o„ËCw 3y„e“`úŠXbFcr7o¬¾¢a#›z"Þ=^¨\ï„@†*בƨ\_€?T3Jtê± ˆ'ÑÌð7$2OüW±T’œÑ¥ë½"!Úˆ‘º)D°†åO1„„% ’Šo·Ãv±ŠWK!ˆ‘p!ohǰܧ+˜ÉêAèoÜ:;=Ã@g¤ç $&~ÜG´ç“H²ÂÄo;U2†ö|EMjf+|j¶4:ž®P|D{N³ã.°–ëŽðÁFJTéN? )>ü¢¯˜1 ïÜÔÏbHìƒÿÕ}ð652‘áì„CwLÑÌD’Ѭ‰…mŸž¬Í<¿M ±ÖEäãCÑBZw£«é‚tôŒþtÂ!tÉ_‘YêÇ(¦Ä¥€K"É™úUg¢~µ {Df9Ó[$ïÑbxˆýŠôy~ó&zºFØÒ>䂜?,yFÞÆÉ ¶¢.MÌpBÔ[±\z…&ÄFêÏKVX%¼D:lšrâ–úÙSyâ.í0+œJiw¸¬½-|F^Ù>=ù¼œ0+öÍ4>)y³"9½c€{„ÊžÌAÁT¼Ð_˜¤(¡š½ Áð± $T5Õì£ï´šx7šáV€Af*ÔžGQ1hµ9j>KV‚­; 53Q¡Vh¦?;¥õŒ+`+î/‰|Ìô¼è†,N+Œ²¯0XX€…^# pa{ÝÒD"pq…$T’€$ÞÌêS a¢²ý÷ׄ^œˆÿA«RàR.ÐD>l$eF—òl—ÒJ¥Zà?è£ldxßœ+ q l_œˆMx}p¡ŒÅöÝ?ö\ÿä,v ™q "­uÌ›‚aÈcH-Bt¹ ö ™Ií+°pËN¾—êĤÛ+ÀpE}3šhRM€á¦`ÉG»¤z£%rÌFºá܈%>z˜{³À° ™3`ˆöë¯Ã=Á0sÀÈ3:nãME #íÑ^RÅH*œí"4Kb¹øŽþ¦ê™¤Bk`ûè= Ašâðîh jIŸxtÝ!ø9…µ=GŸ¶ î¾Üˆà¦$Ñ€˜_ÿ89` )Rìj:Ü%PvS´",ÈÔC‡<3JÂ…‘û»JFšœã[µ ÈÈ©ÒS\ŠHB{ëŠ!ëËÕÑD çvh¢gö‡'q'ö‡oÐD<8G“= gvhBfF#R¬ =n;4¡4R¶J5NƒíXŠÊÃz$yfÊ‚ôÄŠûe:Ó—Îó'Ë~ae3gDq‘`¸¿±àGÏl}»b¨o¬ÀDÍ`".KcÏî‡Ëb‰F¢r…>“¨¬*ÅÜ£;£²Ÿ×pذd>tÄTqáœÙ‚°¢e#õ‹b‚û°Á}GÌì|+ñ$Cúa'|êjéõ;¡ö½—?a#uÎ2n1ŒbÝuoÃHªœ%‹»Æš3ÖÁC£0²¿A¢3\(ŽÀä’à™‹$Ÿ ˜ŠísZÕt‰›wueFW@‰hr…$‚F$Ñ,¤ãpôþ·f¿B tÓo„Í,}j2Ìkîˆ#bHq%ì{8ºï©•[ËàîÚ(=R½n{æà/¬03Ë€>Aà ÃLázE“h€<úœeG¶8Öz¬&uÆ_H#3ì ½k’i&|,Šb8+’Ï1£Ï´/옭+H’xÓÑ23Uõ4È8Ý5¨x¹®ßëúÁT† bƒ$úõ™è%áÐ}lÆé›˜SåÝí ¼Lù¯3ÜáïwÆá_(‰ž1bŤÄSQ¯¥û& ½ Åû㘊aSa#"0RËHB}D&™ .Ü” ="SÒjpa$†áTÎ ,Ü Â3"h)ÏxWÛ #"ˆ~ÿ¾ˆ`V¯oyýQÏö…„ƒ°!p˜ÁBÊsÙ°p42Ý›€… qƒÎäê²*àÂÉ%)·¤jDw-BDœÖãp{Iðƒg$qYs‡q¸«Ó˜"ÈÒpáö’å2E9QØ ‰®˜‘D…8\È» F‚É,#Iô1­È/ÈHÊ)Û­ÞÃý%¡•E´„£@u׌ÓHPQŒ´ãñ‘eÉe¶aƒm0‰,K“¼P¦¼)fjÕeR.l#F² u)E,°¢#¯ÚÒ#îêAŽˆ ‰¸ª ‚›fGŒC3‘5j7T#XkFüÑqHb*ë8 fJ'ƒ î/ˆx„M ݳ¨Åå«Âw¸iª!FD`ؼ°$ÕP1;©˜¸pÓ˜bFî„qÚÛÖ(}F­"HC¯° 4#‰+~•‚»°B\3’(Õ6˜Š£M…È󔸰Ai#’èÌ"¤îš^1Aõ<3.lðF¸qHP$J–wåˆ4ºPžÚ…™jDx– ÚYîšuI;¦\‘ˆ’ˆàI´¶Ü…»¦Fª”EÆŠî…’HåItaôÉûá¹®H‚+{V¬y -U©CqÄ]Ó 9"qø +$!d#’ÐëþÂÉûY.D$Ž ÝU>ã!Øå4:Ž áxÈÏŠðÐ$¸ +Ü›‘D¤u «éä3Sª©Î(GìDÍx™e¸.st9Bµ” “sGß™Rí  ´¶a×çÏ’0b ƒÓxrMêA³N­¨I,ûTc·Þƒ$¦ò 3™h“ŒJì=yjJMC‡– ØÎ˜ KgC»ãÉËÁõyǺ ƒ÷;Æ#FRNåì¨\Ÿ|¦R-C«Zî8Z9#‰òªÂ²¦³SN­iØ¿$Žñü¹˜Eо+xFmÉðvø #¦Â9K¥ë³¹pÉÀ×DVø 9ÒêænÒ„…Ÿ/}U¡+”V„C’èl `áäN7OG±d¹ã JÔaÄÑåoýèH-¸0æ/ŒtÀ^dànÀm¹0S«î‹ ›Ú1`ËK$.,?V .lêŒYø .`œ\Ø$ —ÀÀ…7Û EÁpXÞ,ƒ^“q^¦$Ñ‚´#¸0S“¶H‚”. L , ¿Ä’Ž ÂÐl°°ifjdMO{†àtÄáňˆjV45ôÅÛõ•Ž]Ð'7/ùb­gt§£±Ã_ÈI”K¸p6ÚE¢Á…Ó#qDQZò 'OGx y8ÚšVpÁFü…zZ …¿pt~¡ž'è¬À… ý #SSÞ©ÊŠ¶¦»r¡FDPÒ¡˜²ÞGŒô/t7S¸pî4e\»_ÿXX±Ü1G$Á&‰vÇ£Ó —ª¯h"¸©m°HrÏ;jR:" u§D™r%rÄ_0o‰Dqrzá3D.Œµ5¸.¸.¼‘D À¨*¼õF‘Æ%Øìxr¯cX‰¢§iI¾IG$á”T .œÜÓΩ‚ËKz&mêŠQÊ£s Ÿ `alõ¸.€ àÂw’ðÀÀ…·½Ž .€ .€ o{šÀpAà/€ ßqX€»*€ o£€dÁ…ﲎ.€ ðÀ….$Y‹¡©é艩LöDgÛÙuÉOŒÃØ1s…$ ‰7‡¦`*`*†ª”eÚ!8(³A™[$.¼Ž #WÁ…5’àëµ\€¿, ¿Äg À‚ƒ àÂ/¹ #]MIDi„ôÂ]¹ÐKD.Œ•)\\ÞîžØà–Ú¤-hk:¹ñ"¾ÒN`YGPaÕbG€`XxÛù*€  ¨ðFÖÀ°, ox À°,|‡d"À…ï·@ ¸.¸.¼½<7Ñ.ÉÕ .Üt«ã Sî‚T(ݬ¢ X8øÆÔ%‚ª¶p¨ö/ŠåÆIÊ£··}†€…¯……41N`áh,äõI +°0ã@¶&’ gs¡/‡Á]ØÑÿ&—ˆÆaH:²”§âú=ðGߨ´Ð@BálW*Ú˜VH¢grÏÕ‘)àÂ=E0ä!tzú˜À…Ÿ%ÑäÔŽRõŽËö#-,-Nƈ,WD–6C‰P%ÌXß6ªÈdQ ±ãôÜ šÍ™è»FþB»s!Û°¢B#ŠH.)$!œA# ¹¤Ûq„ ÜÞE.œ»Øñ34,LaAX@v¡Ápaþ°=¸€0‚J(› _w,‚žè­Ó.páä0â¹–'( “mD€Fø s,[X’gìIx‘a v‡$xF‘]#VHbd³c…qf)÷¬¯XRÀ…¦b&´ô07¤îZ“1ùüa¸MzrÇëgˆÆaª³’€$Þ G̘Š"¥"t;ž\ލ+”äÄÌÔ K1ãQ¢º‚/òMO{Mÿ5Ë[ÿ5õúÈ_ôšÂZyÍ/ñM}éÛþJ*îÅoë…•ú¹ñ½){ÿ:6Õ?»nïš úåk½ê5+ýݱ´U›úÚ·å–é·½(,š™–殺›êŸÆ]ÿ5›ªñ÷_óy¬ì¶¯ù%¾©¯}ÛîÑ·íßÑEañf÷w^?}þ’/òM½þ(ŸÄÝŸ>”<ÿ××äË7¾íkîÿ¦¾úmY|úm/ ëõ²bjï}[ÿ:ßÔø´×ü5îúopWìÓc™¿¼fˆHçG^óK|S_ú¶ì5ü¶|Q˜9¥ãǤŸ¿äË|SùÓ¸Ëö®Þ_¾æ§Ç2Ÿ÷š_á›úâ·íž~Û#‰~çÑOÁù¦~wÙßÕñókšÐ b™Ÿ^³\Ë>òš_â›úÒ·e­á·•oÙBSw¾­|›*ŸÆ]ùÕ85~ã5Yí¶¯ù¾©¯}[~Ûg.ÒÚßY+ÿé#ø"ßÔOã®üjî÷ïgÔ™âU¯ùÿì]ËŽ%Çqý"62Þ¼±{ãá0ÔX"4$å…¿ÞU= %zn—ªq+CY™°Ypº§ï9yâ‘'#˜ì4§8©]±ÈÆ–öna(C³³ØÒ<1•.Ó]zX§úšŠÃ¢9ÃIí‹íQLí„íÞ-l[¸Öv¶²¡yb*]¦»ô(¦ªt µšäö šSœÔ®ØB6´»Q`û9g/àxžÊ—É.¿3¤nhZ74ÕYÚ3hÎpPûb»%-ÙØîÍB >ùþ?æ±ýÆeÖÐxlû¥·äîØ MA|Í)Nj_lƒ’±•W6hg“_™'¦ÊeºûØö{„fˆ‹æ 'µ/¶€ší¦ÂÍDIíl[_扩r™î>¶ý\§nŸ÷¸hNqRûb‰­´/FQPµvnàúë_™ã¤î¿Ê5ºûˇòUL=BÓA‡Eóö'µ;¶ éØîÍBbsm~[™ç¤êeh¾÷yjƒ« g¿¢)´å_ñ šSœÔ®ØK2¶»Q”Û†,àÙ“ óÄT¸LwZCåèÑ^}åv!š3œÔ¾ØG6¶»©…£±¶³* óÄT¸LwÚ~é C²‘Ñœâ¤öÅÖ-ÛÝ(ŠÍ‚QÏb‹óÄT¼LwÚ~O*Á¸hÎpR;cë˜íÞ-T1U‚³ØÎSñ2ÝÅwÆT2bB蹇Qo¡9ÅIí‹mX2¶»Q”÷„ YNbKóÄTºLwÚ~“1z'4ÅMΙ ßBs†“Ú[ðtl÷Ê&Âø¬ Ó<1•.Ó]zgï—‘x\4§8©]±¡dl_¢„îÌz[ž'¦òeºûÐ÷«Ghrë…&1Ø3hÎpRûb !ÙØî•˜¨ÉÙ¾>ÏSù2Ý}<î÷`x3³ó°hNqR»b ”íîÝ×pEkgcªÌSå2Ý}èû=pè K‡NþUhÎpR;cœí^Ù2x´³ØÎSå2Ý}è Å’©¶aÑœâ¤öÅöà–¦¶ðêqlât [˜Ç÷ —yCá ßï˜F>,˜÷?¨½¡ÝÌÆv/l<õ¤Ã<¶_¸Ì m¿ôIHtX4§8©]±‹dl_çÃnõ/»ÈIlaž —é.¼/¤ÒõËÀ.s†ƒÚZ8XŽÐ Û/žŒFqÛyB*\&»¡z€&EÍ)NjWlá(]ê‚í—ñ°²ýè“Øâvýøš’‹æ'µ/¶ÉØÒ—û·h¢gU˜æ‰©t™îÒûÊÔ LÇN` ÛÉíQo9ÃAí -Pdc»ÀÆ âlWŸæ ©t™ìÒ;÷¼_>cýB4§8©]±=:©}°Ým¢„'Ÿ¼Á<¦_¸Ì M¿;i64;\AÓYð4g8©}±ΆvŸË$äg¡'¤òe²Ëï¬RI¡Cwð*4§8¨]±ÝÐKÆö‹KTô´÷ æñüÂe¾PxÃó+Ghê¸hÎpRûbûwNjl¿,Û$=Û‚ybª\¦»}¡z0Ë|X4§8©]±=ZÚ[|µ‰r³FÀç°Åy<¿x™1{~–ïÎÜ6,š÷?©½±…£é°}°ÝU¸™o!g±•yNª^†¦¼÷¤^¾ ìW4àì‹7œÕôÛÛ£ÖolwŸ(¹¶8;sç1ýâeÎP|Ãô{¤»fØ Mk[\}ÍNj_lA5Û]…áä#)œÇõ‹—9Cñ±ë÷ð’&¼šDÛÏ‘gМâ¤vÅöÐõÛÛ×é°h|r–ÎãúÅËœ¡øØõ{d|ÆÐä&Ï 9ÃIí‹-‚dc»©07‘½·Ûyb*^¦»]¿†3hâ½Ð °gМâ¤vÅš&cûjEÖ­N哨Ò<1•.Ó݇®_>‡s†ƒÚÚM ³±}ºcÁ.§±'¤Òe²Kïœ÷ jæÃ¢9ÅIíŠ- %cû:\EOcËó„T¾Lwº~.ÇÕ‡Es†“ÚÛƒ%ǰ}¸ÎÂvrà:ÎcûÅˬ¡øØökG+i¹šSšÊ3hNqR»b‹GÏÈ»`»EÕ㬠Ë<1U.Ó݇¶_;Z0„®Ð܈¦Ï 9ÃIí‹íaë·¶¯Ë6÷†áYl物r™îÊ{ëÔ0hâ9ÅIíŠ-t {`K¯FQR#–“ÃÅù4¶óÄT¹Lwßmù¥ï„fà–›Ñ3hNqR»b{hùí€-¿ºDщâ\C‰ç±üòe¾P~lù=°›É>·šŒÎÏ yÿ“Ú[ÎÆvWaÄMÈÎb+óœT½ ͇1†wˆh'4‰¶ô7žAsŠ“Ú[8²‰vÁø…÷Gäªè'±}¯éwåOù¼ýù›?ÿðñÇ?ÿùÇŸ^¶?}þéßÿ÷ÏßüéÃüøóŸ>|÷ñå§ÿùîðóá»ï>þðó7?o<øþóï÷Oá¿ÿáÔYÿïúù_þòùwŸ>þô5+¾üÿÿõÓÏ?~üð§“숿±‡þÂŽ?|ÿòŸ¿ý¨_HvHЇÞá¯*¢ýeäN ÐbñVð¿Ë5ãj Ïpàè|ÿCIpV!îÁ´ ½)¼˜¯( ß~øîÛ‡5¸2@Š4„!KðòÒpNÀ‹ÝíÀ ÃÅ”áß?þB‚çòÈ^z ‚½ªx“¥õ`p&`JÒèaB*^*0” 䈀c‹‰šÂ„Ø{¸• &9 P¸°_.Dk¸}Þ•ŽLÖ&8øÉ—V%s‰@S±‘E ‰ b U .)n!U ŽL„¯×.uar´±D ' DÝòM(ìÇÂ>'`(!V˜ n)L qlÕZR˜Z´ê Vþåd~û+ÆçÊ¿Š}™À_oSèÂ…æPyàX*`95€¢B©À¸L ¤x º—Å„‘3ƒ"XsnTá`Å‚ÀƒêŠxX&À£Áô]˜àìNuO8°&a ^·6EŃ¡âAKI ·ÂPA+ {ÄÂ~Ùi¸ÕÀ%g>‘K=ø-×Ë9÷û:̨¡O#Wû‘Ó÷ ßâUû ^ñ1@kVÅþŠ–NFkFeíøq'YJÈdÛW•+–̬+r¢¿"Ö£î™`){ƒZÔÜ×ÑT 'ú«ƒUù7Z ý j¸ÏhØ{öáÚ*óÌÞ›SûïW~Xýƒû”Ë^Ùª Œºè_1ר>q¯ËÞÁÎ}J'[¦ç­.üFÞú›ü…›”Ï{d"äLvxòú¯˜Ð™ ”Ã5Ãrþ®™zóÐjŽ…½ääNÊR ÀÁ ‚”ë? vn¦WEÿ)fûH87¨!+¾ùRt¯k€=àÊÚ¨,€C[=eÌ“ªEÈ‚*ð/ùü»OÿíÏ?þë>|þüñÓÊÏÕ›T°â×íü+rÝŒÌ„È ΪT^ÀÁÂè :+N÷ß»ZVÀ%§¾YÃ(+àš€¬9jU~ƒŽùOñÔö÷¡GÿAœ}EgðMA)·Â¶/{¨éÏ£ÍÌÉ V¬o"I$PÂ*–lƒP«‚àÓÊÃaŒQµ-ï º%€“tA¹n¼0•Vîð÷™¸V)8ôôÂI¥  i-û^Ò d†dµòqɇf±Ï/ìW¬þ̽f ý4,'ós6«UpkFÿW¬‡C_I¶ò¿T`Á\À›’3— Ì„{†I]®xìPÁÈÖ°œV°#«D+n‡tâTià’iàóØWè] æ0£SYƒVálX·+Ã]ܤzÂKÎvåýf°°_p:„¯¸þ&’H ŽUŒÜ Ê©ö1! ÆRè1šô+Ž ð-4®k mà€Uø­¸(.šD«`Köþ œë5øÒ…_àÆ¶¨œo°É@‘ƒ=ï…=¾ÅCàJ‚S=^ÑÄ\ƒÂÊ ܰV/9  _×ÌÔ¤Ì`#3sÚB"5(nÔÖ€äd (5(n´Z åN DêN`Í4À¶ÿjsØØ^”÷aÐÄkHЊc"<ÀjkÜ ) 't¬µf[ÅQD‡^#Ì)L‚¦µ;àÓ²F¡H̵Fj4è#{2¯—a륂öÛwzRû+ èœ`N>(.Ñj‰äzÃb ûu­!öŠ!ÏM‹®З 9@̓¤˜0p.`’Â#B©]B£ É æVžÁ‘å@<… Û÷ql%ëÚ6„2·ò JN!4¢p®g%7 EJš Œ+ΖºÉSC/ Rú€mÓœºG5Cà¹DewxxŠI”pzreQ"-iÌ©#ˆj“ŸÖLº‘€›q“· &…Š¢Ä(‘*öÙuPwQ 2ÚH ÀHVºp]ȹ”1%¨Bó…fމÈjýéÐ7Ö9Ú „ÍêÂzé k°¦®XbxJÀ‹ä\Uï­§²9/øÐlŸƒ^æÖa™/Þ,… ÞB´fß,yEíÒDµT``À$PjnU"¬üö B­^¿mqbB‹K‰-ϡľ7¹ö§¬xß„ÛÏiQãr‡e¾hNÏwŸR™×†Öó&(8ÔÕ¡o—(‡ 52aä‘ s˜ñäB• I åhlÚ`Õ]^oëba¿²™qû¼µ†(vî1{µÚ¬·âÂØW-З šÓ/F4hQ󒯽[† ¤€°åZTzÍJNZH›*pí\\ò‰H¡^¨ Bri@«»äõv®íØ›Ka¿fuHîu{B&'®þàÐÓišŠ ›(XÎüÊbÂø›0ñ.L¨Ôঋ.ÈUPj2ÁЂçP!À@ëÎ`$ð_$é¾(¤¡s=C´Y”ãwR)¬<Ô˜™kÿÝ=ž5¿ %J:ö±HPÁ!)8ì“£‚ÂE% ›Ô‹Qu!ÅrÆØ€WÜ“XÁáoI`E‚•gÛ15ÖÇ7Q‚œZ˜À¹ÒÄ;P"r(ÁÂV”¸%T‹E‰ß“l9”0Ѝ&ÓÒÓyÿä©H0( ,'8±Aµ•FU‚°@T·ùŒ O’SÅV”¸ƒHˆäPÂU ÚŽŸVž†ÇêܬڎkßB=O‚ iÁ!G¬AXùZ×Ö…çIPºÖ^Èq­±ª%n@ ͢ȶû‹÷¸¡J¢„‰ˆVö°´­É|Ÿ¡Sºp‡kJÌéBúîw¬›ë;PbC+‰aef¸‰3>G%¸LÑkûaW%ÁØÃ÷rš Á­°ìÜûM°¯¹*}GkXR÷9´9Ae†·È SŽÒ ´ær–¤Šˆjµ¾sÜÀ€/MR:‹‚î^TX~c1aü)|ž3gEЂ¤6:ŽLËy %䌕&®·¸AØI¨²Ä¡U@ïB…R›.qZû›PR¸ˆXÖŸ¡;Ê7¡BéÁÍ_ÅŠ2ˆ×«Ø[0"gé·y]1݃7aD 7Ô,ÞŒˆ+_¸wn7¡BÉÀõÄœ²Ñ‰°ž¸]6rN Ñ™]ª…8Ø øœr1’U÷hhˆ$*€‡YÉÀXEAÎMBˆ4*‡ÉÐ2àITP&Œ’e@#A­{Y '1¼€ %×Ë— ” |) 4‡ Âh%ƒÉ€ä``%CË@ÊMB òvS€9Øï#ŽêÂpì¢à.T(¸>HÉ›UQ0´ hF6€,¬²÷)\}É@gû¥lè[Wn2ÁŒKJÒv²UV°î…Rá³~«â n J––}‰”®qQáNs +2,› ZÉAE†=AT¯ÈP‘á7C™£"Cµ ûåêEªPPYÁ?#¼´Û„€¢B×d ^P+X7øÇE„oo€ý›¿·öõˆ¹™’C”‡lÅ‹Rº *^\­ %%;ÄJÖ}¢B¥¥û£x1¡˜°gY>¤ŠãŃæ…}9 *,¾6OKjB¥ë^/ö5ߺҀÅ±¨PTؽ§LQ¡RªK^»ÅY×0áÛ’ƒ¾$x mÊ_¯¡kçê[ÿʪ)æ]د»ky]ìo’9 Šè7!A•§RT¨§¬¿ŽE‰¢Äo(ÁIÖÔâÄð 'àJ +,5¨ñWFІ–%Š¥¿€SEŠe{!J~!ˆ+BT ñêa–Š JV–yqúÇE„o‹ MO}6 ¼‰=AèW2p’7ÿnµ –ÚÚò< ªÂHj8pÒXÞ¢Äà×ÚôbIûýŠ ãßWi1¡˜°g \L(&l冿`+*^ pq*T1Ù14PéA“¿¹½nTº°ìE©Ae¯ ¥D žËö«a/VØ/ûDZ û2§öuî ûÚàØû±§è¼ù›{ÓÿÏâbÊeLyøù–J¬0³°{¥Â~ÙsÿÇÞÙíVºÜèùVr U$‹,à£$9è;0&ÆÁÙF®?ßR{k©[N;’¨*Õ³mvÃ-u‹ÏzùÏÂöÇf‚íÕüÀöÇj>qþ¹¶ßåsOÿ÷C§Ãä6$Aî(XÕá#<±÷ö‘åe@'*pl-` ¨À]8¶(DF€|O'=¡s'D?Ï|ÃöŸû*sþÔö¯š0Ò4^~|ߟ”Ÿ~HùÌ»F³*cÀöçf‹Ø~Áz¡b{v°ýq¶wlæ/n{ C|r¬*€÷G™}¢Áô§™çªíû ýãsíϳý è'è¿{Gð¨ÀÑ“a·nŸç¾ÞÃV¯ýí£Éì?|vVðšâáÏrxÏõÛó¨ppuÛ>~tç(!žÊ p|PÌbûÓlïESdØþÜR2¶?÷Â4¶Çßcûó.Nbûsß›Âöø{læcûs.Ëcûs_”ø™ízõ7Iø˜/”/ý íÏŽJœpAÛ<öfÛ3ðÁ$L<3UE |Â&Hô¼e:LÀijí£Ëð5ïWvQ‘T–Ëw±=yÃG’ ýÖ#AžÒ ‡€ œí̈ )" $ŒE„qSŸ5L¸[ « CÉÎçl–‰0ì cn‚º°ûÛÇèÂVºÃñÂP3¼Œ0lÓz²‚H*ï\_º@¨4^$\ö®™J@ν]€ ¬.vë!ÈõGHê0°÷ŠœÈm d€øÀ§@øPKŽ[Ú„ ˜x–8ÄMôótâÛ‡9‹WþÑúðh/É$ÍXhYò‘f¬´ý€yŽ„ŒÜ êO›^TÂöçö&±ý’%>÷çÞWžØþØÏ=¦?ÖÝlOýÛc{lŠíÍ?÷s¯Øž0Ÿ"ÿÙíž„H¸/¤$@Â÷êþi$|#0xGÛ¿ú“7™Ò_~W}lý•?.’±P1w!ÉØô²¶_Òö˜þXÓ¶?Öö†íµ=šnË@°=Ó ØþÜÍÓIŇŠÏýAº€‹BÏ–ý6f#GäaUy½µyH÷)CAaYäfi  w| P…ûñë1:(€ÂŸ¤We• °zV9n9vA ã»ß™plnW‰tðÛŸºÜF.‚“C¨19Ö!lr¿Á±kjØþ\Û;aaÀýe ªÆ ð„BÎG $¤X€<Õ‹­æ6-(¬ß[ÖÔOCáÛJ(¼öÞdÚK†ÑV¤!?ÿù3ÉxòøLìÄy‡ b§9¶¢‰PØç•Œ :ÜG›A$Üý„à'@á»(xóMP —¹é‰$d`ý–ùD¸´ œ,÷MiCPx ƒJ3$|'3(|Ga*þ¾£°ƒ cxÿŒáÓLÿíxõb2}¼ü,ì“^D/iùéÏ ÷±Œû¸O7(€Â=’H‡Hx"¡æí?PØ!½  êyÓPø>à˜p|G‡ïò0Ayx!–ÈÃÓÅgÊ ð}iŠB$|¢)(<õ$tP‰…'U#T…ï(T º€Âò{s1? …o ð3^ýú˜ý‡OÓgiHÕ¢ÕOÿÒp³PŒ™Ý7AK8?9€Gk Fd@ÇêÙ”ôeþ™0Ïe¢OÅWà+’È»bÀó#ˆ±Á“¿ðýE!L<×3Ldx’¥²H¶¨d çúlîm–ø4Û#C\²^ôýÁêW-9ÃÃ_~Š? Ÿ~[Z Q4ùf$ð'ïGÆ&¶'¯üмRnRÕ……åK ÛxPø`UèU÷»Ö «ŠÎØ~µQö›WmÓcûõ>÷UO†àþ'!ªî¸¢ ¶(  ¿ý©ßúì¨*€í³=pa #G-‘:ÇÏ‘ÐFqøÜI¤Ï³ý·BÛ¿öçš]Å~dòùȼbˆ“ÊS!XÛgÈ&¶'£üè™CCˆ€à\W Øž&lœí¶§Ÿ„éÉü°=¡¶ÿêûeFҦáãVA€EÀ; NPp¬chŽ à>ãr@Ið³¦I¾t?èÕºZf¼üÀmr0ûçxÂÈ…´ÃÇ&$à7Þ¿¶”Øž[xØþ4Ûšn® ØþXÍ'è'迯MAŽš»T)­[bûg†±ý©ÃÚ®ÄÒL°ýZ¶ï5¶¢Sé®ÿ”®Ü2kô wwÂXLFé%¦aúÅžDÌÛ>’Ñ‘-6 Sj˜Ì®,QDBÞ}Žá¼† ¶ëKÃö‹5…jòC1i99(ppgð‚À#5ààîÀAd(âÅl_ã´G³†íÏ»OzÙ^­ÍF¸0 Y¤îÓ:*°VŸÈkl¡“á‘á¿uIWl¿–ík¢~“!Ax‡DPZM)Ø\Ô€pÙWLõfQ“Xˆ‡Q:º=pýäû‰ã"›@P“Žn¦ƒ8a‹ƒD5³DcŠK%ë>t(7Û\Ä’€ ¦žà¦:’8ay$â6k|ƒÏ¦NiÙ0AúMG‡ˆîâ®,uóMP XøÀe”šJs¸ö.èÁ²z·^4…™>Ôê í–VS`š¦øˆESj|Ä4º‘Kû­)-LŸ.–%An—/A!/IžÁüÛу˩éJÿi]=ðÛ”šjB†4KPX¶Æ8o6KPè­yÒ€ØÀQô~k%O']L˜Gï0±<yÙ}Ô ‘SŒÒãÊÁÃåÕKPèâ:– äæn5(Œ‹*Ž;øTªvŽžw~p ˆÖ„ŠªÝç${Øaô½¦ôØõJ(zÓÊ­«xí:ÛŒ7®‹Â¸ù¨)=šÌ>o\Z¬ÆAØÈTÊKëÖôvø5(d 9.Œ‚Ý,j* £O ÊK«Ž-ÔD cΖŒ1­ìÚ¬ÉFz —Žcêså¥=öb$@âawrzÉ‘¾bÂ\ÓÒu¦è5#‚êÅëL^s´½Ïfª”WnSŽš3m}v +£5Ç9F6!­\t€¥&^œÑ»Sk<»Ö˜6\)0}’«ç¸BNwîñîgÉt£\ú£ƒ×Uua”dÒµ9µ¤=†[mÖ a]{CNΤ§Øä>ßÑ­i1 Ig+ŒÖc—>ÀÕ²…™ÓxèìŒA»ó‹ëêõ[‹š2’Zr†mÝFS S¼ÃÀÙ‰‚fï§_ÎNìÒöäÙ“ŽÄ°–“XqᙤZƒÂôù¶¸ˆísÇÑs8­…³ƒÅ¡Ý-Ç-ℚkœ2¦N ËJ^ú·æÉ@{<1]ã*|jãÕñµ ¬yòCB[O2®ŒB«yQVÂ\‚’óÑSí’9Zg¿åì’óÛ! d¬Z‰î­$Ð&©Ã`b™’xA{;$–³¤¡Òºè$z8¹¡âÞ)DoDdMð Ý»½{¨DÍá-Uóœ ±Ç}¾""“Óg—.UI=z‡wFoYsÞ[‡Žh;¨DÍkÄ6:yæÑOW¸ Ã¸Âtô=6}âvˆ¢fuJóJ%”§ê·@"JâìÐlá8bn‚ÑÃîÓ.èÂF®b‚H<$Äo³æ©j|•i©’ƒöÄÑCúIjNG¸äl]œõû•Ÿš sP Z|\º­!¢ß'[‡UÅá²Ò¨y0(§å‘Ÿ×5þ½/U(ZëMz2°lŒp°d~å=P@Þ]´×Ø^-Û/eûQ䦺p™kÕñ”,@†5Þ{8zFÉšÚÔ`z ø‚[ZMjS§% Ö²·Ù™`[…š !G6R‰¥I¨ {3 HX–»e(€ÂwQІG‚ÂÊ(Œ^¤ éÃicop\oª5Lt¿bÀÄ͉šÑ†.Ùœ•˜¥_”1+)>v•Ѹij6 5É¥ù¥ «’`r“’­(PØ)d(J2ߎÊÍïkXŸéTb‡[L³f—ú=@6?¹bÒš)½Êt!omJ cpâuåÄò²xÍvµ´9œöÂ(øÍk–-¥K뽃ÂʪPÓ«º¾b·½.EäøËTELé\l\‚T‡yÀÀ¢ Ô¤ :b)äÛÖ£¤ä,vaæ8»´d9{'J\)Q•˜—‡‚“Oô¼DU'zJž#FˆÐ…^ƒ‘ÛT ˜xÆD»Y Îgá@@&ÑŒá!Dh£„ OKЇŸcz†*aè5­(˜Øg!Fjž ÁW0¾„.ìT{ì a¨gB¶Ùˆ¹¬?@‚Ñ÷û™¦‹@p/Þ!§D¨?'ÜSI½…P^ÀEo² „RXÁéÈ6N>°êœ@€;€ª „@ @U[ @°2Ä…@ @J™:)"4Ü|bñíäÂ+?Êyås]x¥¶ùœ·1KÀyåo 8 M'å€Î¥.l¤ Sˆ(aâyü0n=É7aâQ'¤??t„axx:F€H<Ô«£SR¯~zX°G'l l@ƒ»H͹ä€1X{<öÓøoaàÕµK‹ÞâÚ°öðÓ¿Èœôp㘪ªK0€|‡ ¦šQb\xBÞÓjP˜CÖ­6Ëͬ…뇟Sú…eUÁo³ärPØgLÁoVr±ç=˜ rüÀfTéÞïð0טžè¥%dYxGÇUÀÄã¾”µ„ ˜øƒ‰qs>0ä†, OJYNt´Â„á1¯‰ç!¤ÝKxÜŸšcÀLvÔî ‹žŒ¯©I«ŽÞ¹΋c ab]]XÔ9Ôä”vgV… &‹´‘1©:ïñxDsâaO>{A`Ú#àèÁ›êä!ûÃ!èÓx/b$jN¾u!Imñèë>=zØ`Y‹gj‹‚ÆPM‰-’ɬ)>G´f¼#p¶«˜6™^ÙB¢†Ïì”Ζ…œ³7ÆV¶ˆ kVk¥ËÐNOjÕë.Q0 8ºì(¢1gâvx¼æê“h×ë[ħ]ÀUl¾s/jùÆn²P´£ê5HDËIƒjÕ²dŽIL¦p9x‡ã~“‘5Lxú0±C%Úk|…M ؉+Ö/AbXÉPô*Q„ÄðNq¬QÖdšÞ¯_§k¹jµ¡&­ðÙTŠ>zmJBÓ¨9íD!1gKÖ¬~ü‰'OÁ[5g7²§göìQ7I5w* |Í›;LÀÄcÀ`5¾"Ç|ã <¾b÷ÜÒ³›êl¾¡Ê7LA œÎá±Òè  š5*˜Øè*¸©ÁL¯Dõ $V<  í— V¯Y{ˆ…ö—ð&€~þ%hÅ%²&ôx;Ô½?®îÝæ& %H䕦tILAЅ纚èñB ê:úœè `â0Ïf'.óבïÀÎâ㜺€.<²U`Ô²# ÈBýc:èÂ⺀, ²Pò”/²°x%z¢ è£.º€.”ÌDÙëÆ›œ{¤» .|`¡@€sÀ9àƒâÐut]x@b‚H<"‘†« „$^‚ŽsÀ9¼ˆ $@âÆp<Ä <™„s ^‰‘ ³ÄS3AÑlÛO!xÃ]Ÿ×¾÷ë[—¬áI> Ñš¤ãç–%¸8¸yñv$p,Û7/€€ŠÎa§b„‚H|F} WA¼0¦JO6j޾ù(Á{@@„P!$H€Äc«ƒH<ûšDD@@‰sxœmèºPóN&º°Ñüº€.Pw‚ªQ8 s L¤.ðôº°5c|ßÞI^ýž9ºüð<7*²Å•ùW¬‡'Y$Â4 ‚]”€p¢,͉ǩ–ÄUà*¾ßð¸i Ï‘ÐAR/FåQ x‘T8ñ$…è.  Ï‘°@èr. Hø¨Ñ…ôé" ±ÃÜ| Ѭkv ωŸ<,Y¸ÊËsH# \5Ï,vÉ0“Îýù-t¡äÉÃi‘½©  ן¦MÓ™Š.lñŽU/A"-D]XT*RË9š«dÁ¢ïdK sôIæpðÄÓÚUÄ ¶jˆtƒˆ-Z–½&Z°+6í,eî1Ý¢5HˆŽNyò‡9FÓ0º[¸ /B¢çŒŽ.¬© VÃ@ª ŠŽ;1Š<…wk‡PO> :G´ÞœÌr ]°šdhººpt3"õúNŒ>î€D”Ós0츃,¸ôM@¶O#rÎÆé…-t!z ÑZ›ëUÓˆ"ºjÁÉ[tÑ¥Ï •ÜÁ7ÌšSÈ—t†¶X¬ ß <Åîà !sšsús}$äÖfɰc¨dkt)Þ¥ ‹Þ!·Ði%݈³[§ì¸ª.”L¶…ëh¡&ŽÿŽÄìÑ™^X÷xW ꑎ,lq.^k°LI ÑG‡ ÷åˆÞرޢì8*š9g^™CM«† ^AºÏ ±….”‘ÍE”ë¯Ï:fö+,*Ï«žrÌÒ„76 kž<'³Eu¡d¹ö^Zð7&–¸ŠÍ›”™zéÂ`‚eÕT²×@ànJ[òä1¦Lëj< ³,VM̲m‘9´ç`a̲^UºC66åöxŒ´¦ 0Z ãdÛHX§Ç1Å´êK5®âb u X5¨éDÞ ‹Ê±ç³«J>Ft—ög S$Ab$Ôk™#8δE*ÞÍAb‹W…Jj3gA]z“jCJÌ+ˆí#~ÕªuéQÁÌëG.lñØ}/AB"FP{8¸uAp…ª*,Gl¡ o^HŒ+bàëeã…ç¡6Ð…-Æ ]KÈy‡]8wÉz6몞”œöˆJ\Å2‡2¿jSk ˜Ú ]ØC F¤go­©ºprݱ7K£±E‹ª$^è×?ª]XU¤i‘ô)É#ž!¡Í×DN~™ò‚À‡„Ò§ÜbÊÉJBÈ3ÇàˆÛªõ…,`º5¥±ÇüBI}A½MstaÕ!1› ç®4j3éFæpòëÁ4›dGGÚ5l:ü€ªtɌ é¨q2Üåæ-* ³&h¼R‡Éc›,×$Ô3ŒdrÕèAJ 0$“‡w .îoÓâvx˜6‹xzÇ]Xt¢&ð‘™ … dPE£§$aâÑlæÁ;bËæ 5 ¤)ψíADÖÓ§ ˆU}CIÉšLJ‹[T›£†ˆ˜Îå÷=ZR^²6kbÝ;%¦ðš»—‚ë;_ßQ£]Ò€,,OD¿uË$2ºØ!x5HxôæŒ8í‘aÔ$añÆy‚‡lM”0Úñ{¼_^# -…‚äª9EÉXüh]‡º°AR¡>jQg"z}$äfVÒÎ]昴-¶X¯5*!³K²T·jRQ’U õ¦Nô°ƒ«¸\z Ö»q˜Dów$LÂfÜv\uòÅ6](».  }  è£.$º€.t ‚ @@€@€ð¸J# ¯“â)ð]Ñtá ‰ÇòB‰Jh6Ù¥Yvî­¤¡¢Î;„GטT[ô¡8‡-zÕEHL aN~Ô²¤¡æ2:©åª®¢¤ä¤c^²Àì›–Y£ >B¹óutsB#&×`wy¬´ÆSÜ•¡.- )³qæk“BtCš±„¿êUЬµÔta$fÍr„5׊.œÜ¸¶>UŒ4b$¢ä`‹™j £AurawŒpa Y( ì©u,œ¼Ki÷{nK [œã(Ò…¡žÊ1¯£Ã…ûC¶Áá¦ny•æ`¤içŒKˆèڇѢeFv¤þ@ÂÄÆ Â´E…I‹¸+¦£Õ÷~Ô0‚ÆU!ðÜÇšàvp%ož[„æp6)÷!k²ŠHwš”{¬FhãÈûÅx¢‡Uw•´#f‰d7b$r–D©2ÝH-jzÐ…²x¡¤Ú—TéN¬º3U£ Þµ¿íÙs ؽEu¥’ùÆñFœCÙBmÉK†¶.\vÜ"^0¯AÂCƒúÂÑ#-9Å[£¾°Å¥¦6jÈÌÆ¥¦³ë ×?oÌ#Ð…ªx!*\Åh}„;— ON-G“Ö¯¡ [® ¢ìÚH#Ž–…w€Y(›hqt]Pt]xZÅDËîC“ÕѺ "–Ζ5[Öc0Õ´*£FÔÔÙ²>º%5Ätvga†˜ñÓ“ë=v¨¼&^#ÕI#>á6Ä›*ÏÏmA–TÕdÆäXÓѤZ¸&§÷˜-É,M¯p¡.Ý¥|Ð…2]躀.  /¦dA‘dá1\Ht]¨ ¬ ÊŽ»œé©I#¼¥tÞ«=[üþÂ=kâ…ß‘£uÄ Gçcè#^ØC*Ök‡{^L0íx´.¼èÂ×ZšBÐtvÄK"ÂF2öT“ÇÌ dé…ß‘ÈûËS¼X»(2k È+` MI;âw$"c¤rÂíè4â @ÊöèJ:TórI6‰Õ…šdr^NÈ‚vÄ[S%©å¼ïRê Gצßï©/ð*áïH\iåh‰.­ imp"~]¨(9y›½s úd]¸ ˜Ó’qÇ=t¡bÜÑ{o&ñ…“ŸŽ¸ Mçæëýˆ]p1i¬G¬Ú§Œ¦ ޏmÒ¨èSºZ¶&o4–<8æ&áÞ‰öÈ#*FÝ|Ìœ Wqô¼£ “ù…=\EIjéCµ çN>ËâwïЕuÊ=ú”%uGŸ’Ê÷g§–>¯€ÁØ›Úã ºŠó >gKÔWÝÈ,¼º°E¼Pâ*RL&oY/;¿P’G¤†‰1ï¸G}¡¢O­…vc®éäù…¸¾Mœ}Ý$(A¢ûh"ìY­ =ij 8¸ø|A0u(É$7ÁGbÌ´Lö#ÖGÂ.ë—¨„»º±g½¨.¸–è‚§Kº°i{Ö‘&ÙynjÙº£•@0ºÒؤ0ÛÔÌ ¾°¨.hEñyö6î   !£— ‘Úz2×´j¼P1=¥Yöä¾ãñ¨!§uŸA¼°¬.”0 M%H#¶…Y.X¨úäŒÛÉå…i9FŒAo‘F”¼(2ÃM:ãŽËÞ÷fÓáœkÚ£¼PDv›¤‹ê‚i ~Ÿ‡§M¹‡.T„éÍfÖ¦Vt›%HùԳE}¡dm*§Kº¡ GëÂŒh®èºàHdë*}*kS«êB”@`veœqc¬éw$¬_ÒlÌ|–%ïˆX4ê [è‚•¸Š£ñÂÙºàÙ5'³m«6«¥‚hq¹nùmQ|®¸ý›-g&œ_Øaòúô– ‘qý‹¬PðsþêÇsÛÎñ¶½W€’Åçe{…\ÛTIgca‹J`–xð0ë¡”e‹@YÁ˜L&o²±PQ–æ×—22þUua”@#Îá&‹Ï®Bä !ÇDNn‰dj ‡RöhU 1»™cóyÕW=J —kF±Cçðã+Nã:MeŽ_î%¯Ó ðw+"Ÿ}žû«Eäѯñý‹Èß­9ÍEÞbÍ/Ñ øPÛJ±eå^J¿r½øÕLO¾N#OÞ­Ù#?Ý þ‡Öœ²¬5¿ÂçôcmÛœÒühÛ^ܧ^ÿñ«¶ý:UÞMuågU_5¦jv[Ö˜_âƒú±¦³Ø¶z‰ð(>~ùÊ´~—ªï&»úkG9ž[s®kͯðIýXÛv©6í=«£õ_VJ¿ŽGÕw“]ý™ìú?ˆ}[ƺÖüÔµm(¶­ÝE¸K³é¿Zd¶¯ãRíÝd×~íþݳL¦Yû(kŽáÙÞbͯðIýXÛöXøÛÞUXzwï¿z‹À¾ŽOµwÓ]ÿ\ð;fÿÝ}'k~‰Oê‡Ú¶kµm‡ÝlJ>y«g|‘Æíû¿ýß6Ýo?Rñýÿÿçßþþ¿þò¯ÿóéß4oéÒòeÝñ/:£ËÛtß!xº,–‡Að_þò÷oÿúoÿ~ý¬Þ¦ e{/±½6í6Û[lÿšö+“ЭW`]=½¡K©€HíÇEV&!¢„„©–:Q¥lß³Æö6ýýTà3H˜ ‹˜ÝµB’0pŒÞÝXYJTÀÅ­õ@ \EÅ –NJ@ˆ©®80ð©2Mã«W,p?&Ž ¬¥›˜øh° rZØ/Ï _HÞÁôˆÀW( ¤åÔ|[Lˆ¼ÿ¬@…ÈÑ4›£+Ï ”ˆ@ˆŽF$°˜héCä—/É¢Ÿ¡½¢&p…„ó¾kެeû¾‰íQ&aŒ rÞwÖ ¬ ¸~ÚM®t`¢+«€”à-ÂPÅJƒZc{ÖÈ–® z ÝeбA°XY`–Øþú:Ì œ¾G4š¶)Whˆ ¬•ÔØ¾ûŽ ¬¬R’Üu@’Œ`±A‘’8pôæã—ß‹E>CjHxš MbÅ2‚à)öÆš*ðѱ@‰ „wÍdzx±ê଱ý´T`eh%þà~cÈ‚+C'Æ3®_6––ž)é§èõ*°– xí¯¬ 15´2 9JH˜­½ñê*°åi»í]tÐ#XZ*ºE½]_IÄ‹Å5¶7ïc0;¸tu°¢BÔ»hz§.p`,pÙ><wG—Þ--!AšYôSYcû>ÉV&aTTˆº·Ù´ œwƒünûÌbï_ \_Ç‚»£‹‰€ÕØ~ŽÆ òµÇJ Da]¤Ñ(\LFíµ©Ð(\:( cšLn™\¡€›óÁÒ %%ÁBÏI£ðÄÁ³« ¯"¦!?ð)‚ËöC¬kK ”W, Éâ—‰.Û§·D–,8>:´!á4 S¬±½§u…K÷*êÄÚ{oímǨQ÷¯ XíÍÔ82²ôè`/!!3¬ódùÇGUÚÈÌ ¬ ”psª÷6Ùeú)–ìÿ>ÕP³¡#I’‚ i“‘¡µ/ŒTìèõetËm[íÍÜQ¥Kƒ*`mªµÁøðb*5¶Úˆ¸9hÝïQ\¬AP¢=ºgRXz‰ „q1ŸÔ¬ ˜ÜÓAƇ×*‰Ô$['#X,(²}JV‰Vδ„™Ú’„`±²@‰és4å´ÀÚ‚Š1róŠ ƒëÃ'VFöðÆ­±ãW,î3Cƒ‘¡ÅT $5õÉ{$T¦iƒ„`­8°ä ›v%Œ`a$*üÁèê™J,pà±±ÑMt}Â¥û„Óc¢190ràáÑËöžC¨ ÿ*ÑðÞ,““ƒ¾JtÙÞæH^%ZZJüÁ 4 <6vÙÞ]Xúä`… ÜŸ¤ð@N<6v…9¨.]¨ð~ýê•{°Q¸XF5¶ÏlA,°ôáÑŠyWáIàÀºÀeûtzKoVt ݼ{'8ñA’Ëö³I'8þº€_É¡‡²K´Øì`¯±ýììß#pÏf!TSQcûÑ©. ”ÔÒ$† ¬¥6kloу#äÇ?W왳Åd§ð¼Äqý´M&£ƒK‹@EBf2X)<ò=’÷°=*ðÁ$Œ ðZ1*pøÐPIThžÑ5‚ó. ……vg‹`m¨èÆoÍ80!rE*°t,PR!š÷‹SL ¸Lt™>º&"°´T §X—Œ 8:ø¶G¾ÂJaÊ•Š£‹©€–Ø~v¡Qx|‹ Õd˜°X|^Bð¦G¾„˜˜·ÆÁ‚ËöÞ'7F¸<š!y¿0€ œ÷Pi†z˜Q\y—¨—¨ÀlS³qx±„ÀJlßíÿ°w>;–åH£’ãÄ,aÂoР¥–˜Å ·`Q·ŽN’và´£RjuWWguÝŸý9þ"ÂŒªñÒuÅ)Q¡Q%SÉaoæZÉÁ¥U c!1‰U»±Å.)@0ºk '[º¹Àü…  ©®†Þé‹°/˜¹à›dT–j¿J_su8oXqg/X}†–>Ø2V¡iåÏ{'¾T`ƒm‚Ü/¡5§ô¸ …úhu!Xú™0%0Rr¯B¢ÅD ¾ûRÉ*€˜±¼….•<¯¨¸³§ë>PöáÓ+‰úJ°F€e^,9˜Ä>@­Ì+Ç9QaPS§J ,¦”ÃÞ¢b¥U %9În5äÀöÃ}H@=®­yèÁ'ÔˆÂÅT@rØ{ÔÁÒöጨ i_ UPx^ûáÎÜË.°ôAFTýÃGq,8­ÃÈô%´,XTJJJ*-P*P*pM"(8V D D D Z—”¤L&+8¸ž°T &“ý÷J FŽjA~â3áö¥[ÄÆne^ìF)ìåª )XÚ8˜brjQÆÁß 1ªûðÒ…)Q¡›…Cœ×ƒüb¢­– „ ÖàÄX œ¬Õl²•W‚¤dˆ®^cÎTEK±Gáö¡ˆUP¸pÿáÐŒ7@ì:P*pÞ ³w–V-FVÎ6HY F,5¬øÀR¢Ž^[]–N d<¢’7÷z"8Ð.0‚}©À¥DhWP•<098‚}©Àì#‘²H½ÒËõÌ9‰­’ƒ+§2Ö#D ,?35è(áR÷¥#  †Æ jÉyC‰:z± j5¶òBðœ•-Ì+80)0‚}©À¾A’PÕV¡ÀböEØ— ì0¥´T T T`åA†oTÄÐÊ1tžchúÙ¡@Š˜‡U9ቡÀö¥;8†ÈQÈ¥’ƒ†Ža­ž —®(cIY áæV"pàÀrŠþq“– ,H1 Di”mðÀn ½U5a*fƒŠìAÎÒ˜BkHé²+¿Qʀřû·*XË8˜rH°9V,°r,Òi†ÈJNŒ-Tªãàá…D­ß©ž Ôá®þVY¥52"‘hÂR‘À5#Ø— ìPC ڤ̫eRT@¡GåX»ß`F=¡¶DR½‡‹ì‹°/˜í(8V¨D Dàu!È9”®.Ô¥çY†:{S®ñ„k«@FšX¡y_ •<°’HUrðøQÅ "lÕut5àöálu#(è?ëd5‡àÀ¾£=s«QÅk«@FQ™R‹VÙÁÅbý"ìK&¯I¹²‡Ö`1»€ä°äRµ­ƒ AÀÜÊ9xboÎ>[µZ:HY ŠWYYu<ñ¡pûRšõ`=*,8ð¡pûRÙ*r#0lŒÕƒüÄŠÂΞ‰j*ÑÚ7‚”¨Ð”­i*>¯ñèô%[8‰«¹À‰iìKf_RÎç@‡J.¦)ï„.@]JV¾¤øF¢!6+ÏЉ±@´þ­¼F“UZ H$¬¸µJ ¬¼œSV‚@Ô`1q{Aþ\X*0;A¤)+! H«Œ`±¼@z7÷J , ¤\˜Àû¿)XëBÀ9ì…YK–V”«!»£DM';¯é`glÁ•Xº¢0c!ô/¯FCŒ`_"0»ÑPJPhبJ‰Ë ¤¤Ì5Ê2´´[ e%Äå Æ2ž7‹àbOhZ*°t£¡Œ7Ä®wÂÅ’ƒÃÞÍ¥’ƒK§4e%\½¬’ƒçµ»Ø³ W‹‘¥cŒ¼#ËUPR*°V,ðEЗÌ2ÒL(hÕ…|5Hb¯®\OK»RÎGÒÖ*9¸Ø¤°§u!XºÃHÆÕP€ ¥ú ØgH€Àªˆ`eÈXhD•<¯„ £ÀVÀÒŽ¡ŒP¨‚å\,)9ì‰*)°´ ¤œZ5Å«U’f¼ 7R­.CK_r.†W1 `]twöîZ^µÛŽf8†D”¸j–s E{¥Æ5ˆ`iÇPŠ (j¿–YàÀ¢âìKvpK0qHÝNTrªaÅ+w µ–ñF MªÍÐozµœµê-°ö€ÂŒX@QëFp`,Ð/×°j)X9;˜‘'Vrm¢u#XìÀRØkx5[:È8"ÂY©• œWK:‚}©ÀdððFÞ„k*Ñj™á$öÚU ò+,‡Œ…p•ÔP¢ó†Px3piÕfhiˆŒ…З‚c9œRÚÙwþå<Ý9ØW‚X? ªŠ`1Àö ЀRÃ+ ûIFÌ ,öN˜Ä>Zu\üj˜‘‚fÈ(•8¯ÙØö¥³³Ä–±À°T-Ñym††°/Ø`*Q_ N Ë3´Úl²”8[ÿjLéÚS‰Rbäf Õi輩DCØ— Ìv¥Ü ©¯¸– œ—ì쥡WEáÒÙÁŒ7cèÇ|2O\*ð%{ŒtöF5²üø#}%5Åò ×€øBoTv¥'dG@Ør=œWV|±÷ªvcK?¦<)Sj.°X(À_„}©À±€šhƒºœ ¨]E…u#X:HQoêUV|ÞÈò‹}VEáÚÉÁ”•WïÑRµ ĜÞZY—žVœòXPÖÁ#íØúÍèÊ–¶ ¤¬xÍ)¬7‚ŲƒžÃ>Ä[½,=ž,c!`¿¸UZà@ÏPg¯RÉÁ¥/)¡¡\=ˆKÖJ ä 7®¬ÀñÍ'‘¬9PeÈaÏVY¥_RV7iö9÷X©À×|'ììµÿ¨¬Àñ…Èz½×;áb*ÃÞÀ!êF°ô AÊyЗšVAá‰q £5Š*+^ù… RT œ½ª;(‡}Õ Ò¥U@2òÄÔŒMjhùj*ÃÞ±yTs¥‡¤¬„«=K=Øt°³ðϽ• ÌV »’Tvp¹XrØre+ 2'ÄÊ8˜¨³µº¬Ý†<#CD‚LývX*p^‘+-,å:~<i#c¯Ä‹å,‡½·*#XùFâ!e3£*&:¯Œ€TÀV‹‘¥ËR‚Buh­æ¯&9*àÚÌ*XZ2‚]îa­ûÀ=ɰÔ@’ê6F&Ú£ ì3D¦¸BµU £ÍySp®…‹©@Êe°ßû ¥+«@ÊÕ0¬Ë€”[àÀî=_ÅÅ¥+§2bný<+8P:{dn¥ÇwàÆ¤Šå:0/ÐÙ+³—gèøª2{ ¯.X‰=¦´eP­Z¢•W§8É•D[õ;1Ä«ËT«X`eç`Êt2&ÖÏfˆJ¾hH"ÒjjùÒ*à~2g/¿ÀµD,ÌhTâ…W‚FJž¸Ç…ÍªßØ‰3 ;{ƒ&Õolé‘$š±BY«ùr±å°7 ¨>äK«@F, Í™5ê¥p±—BÍaïñÉ’R ЉDذBÅBôŠ j.ÑÚ½2|#‚ÔBë>pbAg¹EJŽï6&Ôšy…‹ÍsØK¹ÖžH’±âª'lUP¸Øå°©öÃ+‹@F¯™•Yöá?ü±ÿäoüÓŸ?ùžÜßþöë?ýËç õ?Šüoh¯øí×ßÿáÏ¿}ûøöw¯¿û?ßëHîá…®Ãû8>`÷Ë÷ÿ“_ú/ûÇï+ô M¤Ï¶‘½ÛןÀùtW~¶ðã ñ ¶5.àÄbÞ@ë¼>ÝܪM­Ì}‹©€~ö¥³U c% ´°2÷gñíìÀ±b¥x±”ÀNoó÷Óßüû¿ýó~ýÓKáû¯ÿ¥“üõ÷ÿúÿ¹$˜ùëô£A¾YS–Æð8­+û$ttØ¥_Þ%f~é¿6óøKÿ‹¦p˜ëghn‘ЙÊ~ìà8•­ü®uùf `~عáúO6Ù©ý2(=÷_ÊaÜÐŒæ“h2j{˜zý ͯ¿Sg³l¶]…ÙY5}ÊVöÙ©:Œ¦<êœûš è³h ZCÿ Í-vêT¶šÌ®+Qpó¦ô-ìs¦Â0Ý…·3Ðè†&q[–æ;u2Û°l¶]…á*Sg~ÌvŸ3†é.È£ÉÅÿó8mJ¾,Í-vê\¶®Él±«p•´³å‡lqŸ3‡é.>{.økšÆËÒÜa§Îe -íK…›AÈÓ› îs¦â0ÝÅ·¹ß›¬C€¬Ks‹:—­{2[º¬]­Aìù×ùAÔeiî°S'³ Ìf{©0ô;ðÓnŸ²ïW†yCå½ï—î|¿>‰&“øghn±S§²…¦©lõåj"ÏÞÊu߯ó†ê{ß/ó Mäui~ý:mx6ۮ†*OÙÊ>;U‡Ñüà=•ƒu]š[ìÔ¹loÞSç°}9E@ûù![ØçL…aºûÖjxCSš.Ks‡:—-°d³½^àú—èÃòuÝÇ÷«Ã¼¡úßïîȺ4·Ø©SÙB“d¶—S”<¸Q´‡lqŸ3‡éî[߯ð M$œC3ZAø Ívê\¶póž:‰íÕ%ÀС=Ž—pŸ3‡éî[oèM%y§‰ëÒÜb§ÎeÉhéu¤ªõü)ZÚçH¥a²ûÖzc&ì4}Í®èüš;lÔ¹l4›íu±Q‰§l÷9Ri˜ì¾µýÞ´…´Y4UšµÏÐÜb§Îe{SH>‡íe%&íä§;•÷9Sy˜îò[>zóeiî°S'³½{NÃöejA²à§Ñ/ïs¦ò0Ý}kû½«5&äY0û5Iø30·Ø¨SÑÞe~ç°•—«6a{ÈVö9Re˜ì¾u†Þµïà nßa@‹¦‰†æ;u.[ Èfû½‹ôÀúi: ÷9Sq˜î~ÔïK¾.Í-vêT¶Ùd_}aƒ Ÿ’¥}NT¦ºôÑ o"û֣¹Ã>ÌönÂÛ¶×L0ºzx<Ö`ÚçD¥aªûÖª7s£„—¥¹ÅNÊÁ’Ùò÷zØmÒö±ûÚ0K¨ýÄî{S•zí¦I4{ø nŸ¡¹ÃNËö¶Óö¯\!¨5xšà}ÎT¦»ï»üÞø}]œ—¥¹ÅNÊövnÔ¶¯¾°nÖXžª°ìs¦Ê0Ýýp—_× º;Šæ;u2Û›{ê$¶]……0ÇK²Ï™*Ãt÷­á÷¦ØM”I—¥¹ÅNÊö¶†f[™DåÊC<3‰ú>–_f õ÷–ß› o¢¼,ͯ¿S§³½™ð6‰íkÎf 4lOÙÊ>;U‡Ñü`«ð¶,Í-vêT¶wóç°½|¢Ü5‰Ÿª0ìs¦Â0Ý}oú½‰úwðeiî°S'³½éÇ=‰íõ׌¯§l÷9Sa˜îÂs¿|u\–æ;u.Û»ÎùSؾ|¢ ÖžÆK¸Ï™ŠÃt÷­ë—ð®g3­Ks‡:™mh6ÛïÅŒ(fOã%ÜçLÅaºûÖõË7;U|Yš[ìÔ©l=™í«1¬zÿmü© Ó>g* Ó]úè™*jº,Ívêd¶áÙl_ÙBnÌOÙîs¦Ò0Ý}ëû廞„>!ë0Šæ;u*[€l¶/§()ËSæ}ÎT¦»ï}¿·µÆt÷;M†æ„Ÿ¡¹ÃNËîÞSç°½T˜Éž²ÝçLåaºû¾ÍïMÖbÂ]fÍ-vêT¶öeUusö‡heŸ#U†Éî{Û¯ÝÑ4^–æu.[Èf{‰°{SçÙçH•a²ûÖö+7ÆrŸFSY]?Cs‹:•-¨¦²—QTBõšóøˆmìcûaÖÐxo 囫 Åð±òãh~ý:›ím»³9l¯â Dhô°•]ìcûaÖÐxoû½)6– á6–¿ÐŒ&ŒŸ¡¹ÅNÊ’Ù^FQ‚®Á,OÙÂ>g* Ó]øà„7 FŸEÓ‚bWÛïl¶wƇIl_FQŇÓûbÛo ³†Æ{kèMÑï±>Žæ;u.[OF{ùD‘9ÜVÇ>®ßæ Ÿ¸~o`Jãeaî°O'£uÏfû²‰¶~=¶§·TÜçDÅaª‹œ™Ê-|Y˜[lÔ©h5™í«;¬™?dKûœ¨4Lvß{~o.2 R£hî°Sç²½ív6‡íõü ò4\¢}ŽT¦»ôÁJ¤øŸì]ËŽ\Wrü"N¾3ÚŒñÆ Á?`zF˜%ˆ”þzßjb 5r Ý•}ng f!B‹¬ˆŠÌùʵ-š¯â›zSlévýÞÛk—(¯Vy¬¡¯'¦ê³éî—{~ÿLQßÌ×ðE½-´DÒíýà…G…Úc±}=!UŸMvÿìª_)ÎÜÍWñM½)¶ô›Û`kÇo¥¦1Ëc-£?÷M=@ùñ×wÇ?óÓÏoyóá§_Þ_Žz÷þ?ü÷‡o~|óË?ß~øùîÍ÷o/ïÿçû? ÿ7o¾ÿþíϾyûîàÁïþ~ýþ÷‡Ÿõ]ÿÞøË¯ïþv÷öýç¬øøïÿ×û¿¼}óãcØqÝ^÷ñ£yÀŽ7?ÿpùÏ·?þSÿ"Ù’â·ð»/üç­0÷‡®¬ðƒFùˆ³×ÿ øZMª$*, øO~Á_”•ˆsPBŽÔ)>_5|RHEf–>…'U†ïÞ|ÿÏãÃÚˆü%i¨¸%¤áœÐCÉ©¶®Ë£;v^‹2üûÛ‘à ‰ä õÀz°W YÀ~/ìÅ{°÷07`¿ö\-Ø'“û­°§&ìÅœf瀛3,z˜bP½T 'úû zä<9°mØ»Õã]b`߃} ô±Bƒý^Ð[ö¦’ ì÷zð5}ïÝcû‘á>—ùÓ ^<ønÌ„hb‚¤¨BöRži)°{7Ã>[°¯#õ#<ö7þÇä«dDôßØî•–÷-Ò•&ìœrŽp`0€´Ê àHˆý‰ À™PÕÃõbØ›©@Oð§LJ˜@b±˜@ËıBÐó7Ñü¥û¡0tzÌúEDæïÈx/Îü4ËØŸ{].±ðÚߘ ÞTx%<¿‰v¯ï}ؽ»A¯=؇›ÀäÛl¸³¥êO&ŽÁÞ‰~dÊDÀ~äcßôÓôï\âëyþ¹/)ؽ#3?/Y »wd¸ž•aX~›a/=Ø?þè%¢ÿK0!{|Ÿû¦oø>#‹½éņ6¿­ß=L¨+àLœû¡ò¬…6¿‰­^¼Ö‘ ÂBé——D*\ ‰€y•ÞßÏ€="À­W< T`ìÖ§ö¨{`ì‘õ?/ö¢^Èü6f‚µœ|:˜p½²Œ:ÀÖ.÷0!iáÄÏÐ\ ( ØÌDŽo>j@{a¿z¢¿˜/ÔÿfÖÿ$Uk>&ŽýIÙBÅo3¿·'Ï×Có%ðâÛùÅ×ì5Ž¥#k¿Æ¡ €½°×žj¯c`çmßÞÄ„ˆÂú—½;@zœ_n˜™™ ¸Hv€ŒtÝ£°b¦óïµTýÄ pŽãgš"óç…ÃÄu€‘™_¤ ¡02ó;^þšÀ~âNeIìþÙ¬þ×},Ǚω{~¹¨[Ÿf¾õKÓ×G~í“R`íoì[°—uürû_{Y" ‡<÷vø¼‡ ÁŠÏÈN_¡ƒd˜îÙá/¤bØðµwwO“ „-œõœŽ_.ÌzŒ¬ò s¨ÁçýÙžjô#úßzΧz˜p5þ L¿ãCd)/T€&öúˆ¸¬à‘Õ?`?· å,Xõ¶u …*ä…)Ÿ‘>€zb·ÛTì‹5PÚ ûž÷ÞuÃv<ÍÌùMSØOÜç)ÎÇ ¾ÿHßßmñBƒÿHìcqœþ­¯zödýqÄþ@¿ÿÄ说4qÝaëý^=*ð L€ œtÆW)UX »2.ÕsßU™(àÄ-Ç¢ ¨ÀÆ*ê'aTाT`{&¤!€ {Añ=aÉIaô BD¾V VÀn¬"ÕÄ„b†A4Ò ò ‡ ll¯ž\À+ÊÀ„­Ÿ†=ƒeŸiæµnˆщ-ÃšŽÆ‘™À‰ž¬°h¹byàÆLÐâ&èR‡C´W<žxPÅúP}UÀ›|â§3*pÒÒ¶8*°+¤++´¥¤86±dl+â‰61 ?鹕W Œ7†ìzL*0p°ñõxfK7f7ÅV²ÄléÄ4½ ·¤FŽ›pfÁ Þì€Dä‹e-`?qq°ÉñƒÐ'ºõ:íñÿ”­þßľ Ó#í7€# @ÍåŒ~À‘ß{[¡ì'N©ßÕéö³~SqöM> eG øÌ¯}^›½€ýÀöó•Çg£gc&hOß)ÈPá›8l~=ƒßÄ[1æÇïû‰å]¯\À~&öá²°!væ‹Ø5zâÀ^ýf³=i~.!Ìò̬é%‘)°Ÿ(ù)\Ž%O[·tö¼õÓ¢LÀš+Ád|0áLÀ„û™ßžæÏJuBUxëèÐÒèk ªÂ-_šÁX ¹õeaêaBiVCní ´XÈGV@% h#:™ˆ¡r4qÌ)§!ª›œÉ7"ö~ f¤ G¶‹9çb,„9h.$昜8ìbT %‚÷¬&ˆëÌT`àÈˆë Œ ÌUÉE™=¯AÍ%( ì]è)ڒĽ¨‘£Ãn†y’™ Ç+ U‘UK ö#«n$ ^0v„{rƒã03€`Z†µa##@H$Äþ/¿¾ûÛÝÛ¿þôË¿ýãÍ»woïF§€áK%€‘…ÀHRE `$öÉ\‚­Q#ZJÀêØÿ}üô3]þbÍÀ=ø­Gô¼úË•=À#ƒÕrÃ]˜‰µ¾XªÆ Çg²åëxû%l¿»ÉÏÿXuànr*Daµ@‚Ñ9iï>%¢'C ŠP†.Œ¼*$Ø”+{H l$ Á¦z2ö`Qdg „õ<9Õ'ÞA†ð;ÈJcƒ.œ€Ôô˜¥µº0y²<į9H0ù&qH­\Ln.sG½qv8P/B½q[#©'1ÔbW”š6%·4 Fl fw䢢%p N@‰#r·P⺾ð`Ü“&ÜC‚2–„.œ€ÞSeH¦Š…—æù‚´” S8’1¤:Ñ?J1feÄ„3T˜Ü{(Qá$bÛUèràÓÂ551d›Fˆž§£–“a’qb‰1ÍI3l/tážK·µŠxâF›“”Zƒ¢«•„槈GÔna[I:"Ä<Åž%(Ï@ Dˆ³·¤”^÷ ,èÂ(‘-3eR‹1ét7x9ZyrJû3‚/ê-Õ§ŠÈRDŠ3P‚µÅvª$'Ä3P‚Z_Çÿ/£÷jù¡ ÇÅÍšTì$ØÃr| “ñ`ÂöæóAlZöÓ°g@¯=¢ÿà蟗À„oë²ÎÍ"8DÑDø˜ A>ZÆ…¼W·¡£ójé/ Ì_:°W‘öó: ‚ùBØ84Õ‹Ãë-FŽ!ØWaPmkh!BJ’ã*÷ÄL ¤¢°qHzž‚eDVÝ:´ÜßóãmÀe°‡ÐMÊÇÿƒ Lؘ ÚÒUÆÄ>r³ ŒBg¶•©P}™àÜR>fQ2 0acMðžÌ@3)&ú†Bq݃ ì=°”û-[‰ñ½‹}ÊI°GÖã™â @…+x%"ÂØL2pÏ„žáEŠU¦{;‚=¯Cާu“!œÕ¸ÇU$ÔD×Ò2¨ÀDoXóø T÷}ò%WÊ‚„­-#o¡ÂJ‰@…U{æŽõxŠˆã‰°YrÐÑ@ǧO¼¥ ìÏy%óÀ^ÙpGyïR"õ0¡ÒLq¼à µ[ž ±ÜÌàÌL <Žœç“7þ'x>{„‚.J´ Æ VÑ€ì%yìñF¸u ©‡¶Ǭo( `½»€§(ä`k9Ð&˜V¢¸¸uq±)E(ybuL¸-´ç&œàÅØBºn³À¬‰]GDºÒ°Äfà›û\±°ºdc&pÏ|±P-”’&¶¡Yb.q3ì{žr¤~*p '× „Ý墑ٿDˆbôpdq@ª81z8òå§b¶PÚºÐãý©êNÎ5×  æ­, 2*š¸ÄüߎèÒ…"ÀH ÈlT‰“0.Ñ ]¢–fR^Ĥ—h{J]8[„‰¹&6—Ã>þ= ²Tpèâ ”p鉋±â ”¦PñtJ Tœ>‡„.œ%‡¼´TY¯·3.ö.C^<{ÔAÍd¡e×ÑRib3^n'Ü3ÀJì"L«‡ ‘†½·Ã_žnT'«Z&ÞGeybÌi×9Ç–FÎa ]8ƒ»ÐÄ=”0öÍýbÞó®<~Ž»BÎ/¼å1¡Å•† Ù;78e:€ 'º¸ÆgQ<)ÎÞÛ]8Ï“"z –ÐètáDºPäÐ…©£2 /I¼$?¾$馤!£÷¬Ù€  Bㄱ›·!»§…q©HPT8U<nÍT˜p˜*0<5 ‘£Âw/$_ý3zÕç-×Ö»^_A¢±ÓUPT¸ŠBÅI˜€,ò´Y$T`÷ÉH † `S'd`¶ 4­Y°®*°{@Ю1ÈÀÜ~4ÈÀö»ÕØ (@fË@0Ê Âûo뢕`˜po 3€OÐŽýwG1᫟i©ûg«èšòˆ¶a×/þ%!n>öÈn<å¶LîïŃ Âuk!/À°#4`tZ Ê ¨Ð9â†x€xØ5tYÌP¹3d`ÿ´ñ2 0!÷2à0Á„“«È !“Ã^øå¼Âï Æ×m¿þùÕÆ?÷oC3¶Ñ ¹,Ó$DãeEã,ØCn*|¡hŠš*ìÜÃÞÒ€î*QØš v& 58é{*°¿«@R ¨€ÔLø ‘H ÐŒmxT`üýÞ$Øûˆ3ž‹8âüp|º]€.@>iUsP”xè+ ('eÛ-ä {ç HA$ŽÈR‚“_Œ=ö¾ö'Š•Ë?ošBNñÈ…J=‹¯ „ZÕ€6–§c—ÅÛXÒTûÁ„íWì9#5ÀE¨ÀìÔÀ¼ Se`d2p•Z*€ WQðÀ„ûFw„0¡säLØ3^ ¨'@†Åž¦¬*ÜØE^(0Ï /÷>˜|öñ«Ÿ“úÙ.|ØE»ÊÑ_ü@>v*B‚ ·DM§ÿž È#ž=à„ @®[’ ˜è*KCNC ƒ,@$ÀÆAl@lx‘†èÂκ@ÐèÂæ$м%¡ Ÿ¾%Qt%PBá/ T¬…2tá“Pñr”øºðt|õ7[ñÙwðT*òÅ¿TdrÂùdJ@EΟ]€;»”ˆ ˆ 7B(J<è‰MC¤@ºàÐèÂÃ]žÐè1H€à€à€àðIp€½]€.@RBšü…”夠Ä(!--Ôé*¥†P1y˜âH]h¢Dô¬ó€.@æ’`ï}>Œ˜€9üû‘*† ŒUöc±Gô‹½Â€æ#ó¾Oæîa¨Tà>p¨¶:{ØÀ î| ¹Ž5.º  ä劯ìÌÃWÿêKü —^¢”7ÙK_ü!#›¼)OÂ451‚ÅA $š×DSb|€àTd!Bœ sÌ‹ž„…“çàÀÞ¡Á@Œ¾ƒà„$ ¼ðNxx…ÜA PâáÒÕ@¤@K‚ |E‡ONCtºà H€4$hQ‚J²**dûWý’-+žƒ„¡­™•" è‰/„áw=ŒvÑpœxpW–±ÍK ]@›û·|!#0L¸¯NÃØùz˜ P+J*0U¨ÀÇT°rØcÏ"ÀÔuûÀêåvì7Ø®ôµ?.3-ZŸQšñ[ÖØ=¾4cÀÛñéØ#s¸qQ *€Ì*0¼šTP¨°GìÇ|ïØãL2¿áÎqOVQ$T`¯¾R= öP[O£ô´™»¨9AÆ-C« ªZè'6’^w,ì.¸|®¯êPÿU Þsö$@*xK&(_*z€‘C Æ“À AAá[µ‹õØ„ ÂîTKº‚ ØRñàã ‰l¡åtáLW(¡ xJ"uDêx¤ŽyQGˆ@ˆød‘>ŒgPâ%¬gq2(±¿íK‘@"`j‡Š@°Þ䆨Ó*Mát`?®) Øã{s{'¸™qQÈÁX9 `Ù@葌œP…@n=-'³D!`ë]…q&@Ιÿû±{J€ýÜád`Äð)F ¤…iÄŽ5 BÏ=ä © dgo®”ðà4†mª -1L´Œ@‚-I œ'!LÂ÷÷¼È+4 )âÀØÆ>—Hb…íD“p,öØ`ø{°0öï}Çæ$D@$8gÉ€RœÐÏL4-qýv³Åµ-س¬bØ?{÷ˆåI˜€§ÀٛŠÛËiõ0Aß`ÈÁä !H$0[ ÜM.°xÈ îæž<½’ Ê dçØ?ØD‰â•h$›­ ºT$¸\@`%wQp`´}¤"ú4 AxMëF¨…¸jð;JX„"Ŧ‘¢…BìE˜GÛl)môZ è.$J`?Ò%ª ÃŒÁfÍ%-ó¦Â«ÕG_ÁÆ}Ù“ø1‡8n½˜°'°“ ˜&W:®TÌÌ ýØÁá0 5Q;^€~¢÷+–º€ýHPI|-$ý3¡ô$L€ <¿ØýU8™€ýÀm¢A<±“«†ìS\€ýÌ̯ŠCùí€ýYßþ®& ØO\!"~(>R`H.Ë{‚žUØ5;3‹£ïgfù'Ä ?»-›mÂÞ‚Ÿ†=L€×°@"˽w„HòñÀŠÙm™P—ê&¸ŒÜ.º· ô¸ÀYÊŒð^Ø÷œœ”"bú¸»Ù$H]‚2ÐȰ*×ÄRÀ9`µÀºxIaü{[9ðu‰È*”‘âáÖT¨*\ â‹p_*ð%{LB%c÷…,aò¬°2?”Ð9°=%È.+µ‡¬*¸T·« =$¬ÀAš»ÉåCX ÛÅO\z„AX+/ˆm_rQi’]² û˃ʅVOò(y½uyØUL.ÙÓ‚|¨j:äayàKJK_‚ªQr@v•µ‹hO"©©µ/ÌM§U{b„qbÄö”àu‘žS‡j‘µàIŸÂu¨¦`á¬aüìYŽ%GŽEwä0#ÓGîA é¿Þ?Ðþ²‡ŒR6ª‘L³öƒ‚êCª AáG——ƒ‘ø†}ƒõ„ _c)•éGŸD^¡R¼9!Fø5šâŽÂœ­c„7ÉCªP…Û;Ú÷…NèºàML¬L/„áÑÂà2ËyRu€0ø5znÌ„{ø ÐpÀFàuYO-:FÓog !Ò#!^ 0!Wµ<ÄŒœs,ÊGÈ„gOC;—æ`ümß×WuUõ”¨Ó<8V¶uG[{–ÁEz‰ÂÎOÖ³,J<©Mî=ûÖck­X̾í|ˆdõô1+§'³o{OAö˜ÆÛœ(ì- -ž1Ç”9Ù¸óîØìÉ$sÈk®#e„¹²å}­,1ŸâÓ‡Ü<‹¡Ï»fO°ø L _' k" $ßuP F¼ŸjÙæFˆ`ÜñA™ÿÚ»Zt¡g9K]ÚÓžˆXK”jÓ“Šü†.a°UÂàÂð& žÂÐã2× ¦Ý@BÇëßÙC˜@Nw‚Ç/&8œ´ ]@ a GùêQfË :P8Ff]Ñ3ÇBŒØ»²°€£@AtxL™xG¼ãwŸÐsšNzR©oD,ºøÈžuMY9Çdg×®/#zJMUžÅ8üÁ¡¥Q¥³ÂH&¶-DÛ¥a‡ @ˆøBŸ @ðxT ø„·¢ã 8  =gjÑ…ƒžÓZGþ cH¬b~W¿pÈBî‘BÕ4])7ɀÏ@@ãaJp¸Ä¶³È² MÄ’d!×®³î=.Ñg•²ëˆÊ¢k:ݸ%µ«.ô˜FO—¡è º³‰0›B'òÉ,Œ4?»²˜cú‚g'÷ωä>ù#Í=…ZZAAáÉ#Œº†ç$q8£ÚÜòÔA—,ŸƒPqF)[Ð1Mh@<¹/¹ÔB„ÛÔOžl¾éÈrúGØ…ÙcÌ=•M ' Q=¿ BÅá·£tù0¶þ?û™ì [L†ÙŽÈ%«‰pÍ xò5]9´Œ<â€e¾—zO}!—Õ¯U¢Ñ…/|ÑÂ@•KàwmIµv‡<ã ±az‹.ؼ‰œlßÚuû–¯+{FÚl¾ê Œ;ï"rtÔ–ÖAê`“ÂÖÛøÌç!( _¸AÁà !ØW^¥$ãÛSOÄà¾M¿¿’ ø I$(ÜAa] @Iù]r"Hº€.|l5ål)%åœÁÂÃuØ©ó; 8´ a×ZèºpLp@štÁLÐt€`j «rNÞrúéw .oÑ…ct!z&Ñüºp.¤¢ èB_¨û÷žIgò](?„dáð5ÎÈÂ1H¼ž$@â#Öò‚~UÜ,Nì:þ\‡@€.tͶ©¡ XÈ@xñÍòšˆ?к4ŠAˆ@†7$V*H€Ä$"®LbÅÃcżfÏè+0ú ,ÝÀ"•Nº-˜€‰ÇeFwŠ`00hGAÏêVÂA!þ`Ÿú¯_†Ÿþ­lÄøû©Bdä¬-À?ûŽÈȆ2"Ëa‚I‡o˯êjU€Â9 !‚äã ]ƒhJФ¾²€`×kåI¤M7I¥uÀx‹Ý_2`&>ÌÍË5Ö„ ˜øÁD]æzø‡Ã‹ÐèÂAHDËÖ8 IB¡¢å¢=ºp y•ã@âc¦Ùõº&’‰4Ç>i’V  oë!5º€.à!Ñ…º`—g" C" ÃG$tü¹Üò/ØpNz^öýÃÏxˆ1–ým‘ðÿFÐ?ÿ?ˆ,Š,¿2Ò„D©o¾UÜ&CAñe;ÂÔÑBÄ»Ó2îÑtá#n„ ¬ã¿³uiËÝd" -ôà¨A6ŸöŒæZ0ï§L\ðøG†·Ò“, „A„á­M+ѶAý¹ìò_›ŠúéO4–Ÿ±™ÿ'çò~öÑm‘ûиøâ³º#‘ä˜{R!&¾7³ á; ‘  ÿ) “€S4 ‚ (ÐøôÊr„Ú2u„‡CP—j€`’,þ\vð×)üô*læßÈÆAü&dþñ·K‘aÇ倶&èF¼êM] °5 2.-â£:øµ&LÀħÍÂR0ïL8:ï÷R£éâÙ¯3AÁê몶ò‹o+/!D@Â÷ ÈèÚå »¿šr  /l‚(ü—*@Ù䇽^÷‡× ›|z6J·Šq‡ˆºôÏ¥ýÿ‘ƒŸþc—¥Öç„ù—ùÇß®b?W¡—u=Ëýe&p_8^.  ïËÆ $@âm¬/ìL¼3sÁL¼3áØ ˜xg"›Æ(s¤'LìÏDÄU=O0jd¹€Äþ2¡×Ì ”·¿Ý_/£ÇY–Øš °/ ãòìyó[¶D•Hq„yОU•äZÔ­÷¬[÷¼â•a·4pÌf$äN*ZtA¦x Ͳ ¹´I&dhÍ G0‘=HøÌ%؇'ßÂ]¡Š‡Üõ}^˨µ,Y5ž|ÊLV†°‡ú$îÿQ æ¦5@â$fÏe±È08¢c•£Ç>øŠ‘$»&-½*‰Qæ a8B¢‰‰Û@,Cž- s°ÿq̼¼g+”äŒá4­èfçµzÊY‹¦Õþaµ•¤Êm8Éæ§ÑZÜ„ÎtäiFÓ›n!iÌÆ:fO_Sçýç“rÄ §ôÜf©bt9wírvÄ OŠœ@ðàSz¿‚C×á“–ýèÂÞºÐò ]8é€;€0L À5>62ïè€mDzÞP 礓¯  ˜x›¦— xëfÏ(üþÄaø¼ú)a&>Ê„Nb±Â1‘èÂ;Eù$>Ö%ÇUµþ?»Œô³Ÿ!+æúÛX%‘¥}‘œ¶ óÏŸÙñÞbdÂÂð…}ÏB†íjZ1‰0l>.·„áà ¹ÂA$ÞLd•+ž+äª%ÂðAÖý¹&Â@Ùi" ÃÛAn/\$L|dâõ%&`âm 2$@â#kR—ÄRS—‚/ƒ •‚Áá=8Œ?§ ýŸøéOYõ·mS¨ÈŽæ7eÿøa %›ø‰y„’$ò²®vHœ²^p¦‚H¼¨š6†€Ä1H˜L %†Ò€€Ô²G –«ýÚÁ øÂÙúž’ÔŠðäêÉOÿïÏ%=LØ(ùµ««ÃF‡–éàMï0uEõÔ¢=‡‘Mž1)—«'“ȱJ8¤wÄXµžBdê’™0±ÿë ¹Ê[bGŒ)êxÊ]“M=„¡'XÈ•3a&Þî)† Á‚`±„჋ÔkH€Ä›˜ ™€‰Oû(‘ øt'§ç]–roKI®‰0|òC`&>‹uÍé0o5)Í‚ ˜xojÅ„ ˜øTèð˜1ËÖ˜8€‰¼?‡Ÿ×U.'”²½¥–³be°#äÁC¶¿ƒt¡‰ˆ©¡"õvs²°©,T ®óCè º°zˆ¸%²°iû¢'6„i =¬'ïJÍ; ް>‚1ƒp‚A0íAÂÙ9º¯.Œ–Äa¡#6µ‰0Þ‚m! ‚d¦”Á£Ãl A•#ÂÌ (A5ä&$Ô­‰5óHg¨D6!C$ÎP‰¦<¢2Œ5G4gJ¨TL¦Ÿ]iÔ¥¿øÊbCiî‡ð]x½‡3ÚPî@˜ÔœÜØ‚ž7o¿‚CWæ] 8,t]x+2~]è)+¡ /-dYdYøˆÄZ¤èv]xG"–¡ è~]x×… ¼€.àÐ…w$ê˜úHt©Dô4-U<„KèGTÂ@$ÞúØ-)FÌ™œ¼>ãåDH¼»¬C ë<}GºpÌÁ©+ ]@¦¢ èÂG]ÈJ‰÷ÛÆ$š ñ)«p‰·ééáJ %¡]x7”‹D]èÙ‰.œ£ Õ3 ƒ.p7Y8HdH¼ß'…ˆx†ªŽ~f©:ïðäæÕï€]h›Zè—¬€€Ø@lxË"F‚H¼¯& RF  Ÿ0dY@…O¯r©.  €`ÁàÐÿô$N*/(@“f¡á É?‡Ä_¿¤ ?ý;ETÖgøP‘óTä?$¡äY~žþ˜æw@€Ÿh«>$º€.Lt]x_ÝA¨‰÷Ôsa! ]]Ltá$ ‰.œ£ @püÛ bÃ9åjÏÈÂDÐ…OoªZrÉ9­Jºðä²ão€]h+;" È‚! ÈÂûGÐ…@Ð…w]Ptª#¦zú‡“–1$º€.€@¨(þÄ«ktaïàèºðÖ˜\ ïHÅgBEÏ ºp.ôÔD\K@â•ð–DÓj®±˜tÛ‰×} ˆ€ˆD¬kN øˆÄŠST‚ã S t]øtg¢¥«í#W:¥‡MuA[æÞ|ÞÌ@Ž(=´„ ×aKºðd¿àkˆ—  Gø…–*uz™¸¢ ›N»´$“eK}l Á<‚C ï~¡g ŽP±w¨Pt]øˆÄêécWÌt¥¾ðèwu•ÓÆRtሺã×!CÇ”•,<÷EÕ Á”ô5…#d!Zû?èÂsï^ßè %—Ü‚ÕÁ’P§W}Æ‹ª"B¼ŒêÕ…ÿA"kÙØ…]§šôÐ…¶4bv Q3ãèP‹…¼E¡râ¼Èé†àNZ-_8chK¨°;‰˜L;>ùÁÌ Á²aJ?‚±¦HØz=§Dž{Xâ†À§èLtá¿ÐCDÆ/:HdáìÛ¥7Q£„å ,kúo$dŒ,+ì®iD´@ .”N@BZdaj¤Qu|tuAæ ±‰]8cªI[¸3KMv;>¹K)Óc,º”‡œ›éA"ÇXEyáÙºP>çfÎÐoÉ,ïŸ8+˜‚~´.ˆ¨§“G0½ð ¾yÄ“Û"·YòˆCê -H¨äT¥îøàñ7ꯥ]èÂSÐÖ‚ÄZ+¿ðèÄAVY±˜jz´,¸ßAˆ¡&VAÿ%SY½ïð‚·@ ³¼^8b¨©ˆ%6ŒâÉO#n|FýG{ç¶k¹q£áW™'Ø(žÉ¹ÍͼE ˜¹˜`ž?Òê¤Ûmkk´±ªU‰° øÜv?Åú‹ª÷´wõ¯¤´Œ!Ž5xžãÝ\NË(ä¬%‰9>aœÓ/(Ô)âÑŽØ¿JéJuŠ˜cº µ 1«™ã];HŸD•Ò¾K‰•ª]H*±{X*/Ôˈ)±Qƒ8Ýõeçˆ v K‰àÁ£gu"ñê'© "‰•÷^Ó–$Ò¾'“rŒˆÖ¶JQý“Л\¨U©˜Aê)'Ë·àzaýì¼€±i ú…$±µú)’`0{oGå…É×n"ÒúpÄ’€–“œ´IÝF<Ù¼ ‚TûÛ¦H œrKi ÚV(j¼p×¼ 9"ðhå"/¨sŠ$Ð`û •¼Þ±3V¿0C^ð$I0R=šzv^Pq‰r5Ý_øÑ7ÀV„Êž0ŘÑs$Ò$ª_˜¢_H119 "Bž«…ü! t­£å]N)-$Z´z:9Ū…”)b`ÄVŠ˜¡RXI¢$ñó;ÊH18‘)[}”òÙÍCãfu]5ƒ$(ÅïÌdu‹}×¼Ò/°‚T¿0ÇwJSæO‚îVûXn›RŠƒ0z«!ä$W)Ž'µ¶{\*/Ü4/äˆÀ‘¤Aå…)>br´ôæ¡õUÊgû[Jõœ¶ƒª6¤}±¸QŠøùãsÓ…Ø à&Š*Ï-Ñ@¹|sŠI€H®þñ®ëÛR4`»ù±î(§°³äT Ú°•îš2<ò@ޭܝO6°l"ØN õÊÛfLnå¨DpOHäˆ@z •æ°¬ääÚ~ «'ÖO¶²m"°@­÷1SR$Á$ 5hœAl9ç5©Rñä¿f„P¥bŠå]œR*„š•$¦x “R)Ñkâôà-![ÿhõé¹9f ’rK),(­lðO~"ÂõlnޤÜR*±×³¹›Nœ&Aå…$IhÎXÚÔZ}6â¾ýBJ^0³©/Ýõâ:EÞÀê09ňI=EH[j¨ç´sÌRú'±Ð²¼ÞµT¤ÌU=ê6bI&ÜYÒöç” ËövIø‡¨M"‰*s{¢Kw7´x‰ †L^Bu?ãK¥ˆŸ|nMªRT»`•*/ütMY½C)âg/W¥¨JAÕAV^øÝ÷ SNÖ°•/þ¹¾øÒ@i 4p÷“$L"‚ê²$áT’(I¤{šJsíæêª{A‰ ú…*¿k!¥$Q’ø¹_ÐI82µZýùh››rD½¥|ð[Jj¾ Zê7Ç'S“Ak÷ƒ÷¿î"¯·”õ¼ö‡$PLê+Ö“8¢SνÕ&†Ld!€Ô|aм Ù@¸ÕŠVG˯õÛEªXÏîk­ßwI°[å…Ï©ÖúM!‰ÈXÇ­E«×wÍ ˜#‚@ôê¦èZJ©0 ¨ïÛ?ùŠ L(ØëKÖ5wü. W‘úlÀ]_SZм‘`]QM‘<ãÖ¢ rå…Ûž#rD *•æð;¦œ#‚5Èëžòɯ+{ˆ òÂRÖx„&äï•ŠÊ “Ï€‰*/LqO™" BsÅꞼ׉¶Æ¤î)«_ø$ÄP­ü O~gÝC•ÒòBÆÑ™T„kîød_2‹2W¿0Ç9"EŠÈ­U¿pWÿBJqPjÎRsÇ9î#"E¢ºý»*/Ü4/¤œ#6 q=½¯~á»$Œ‰£ü ·=G¤%®}M“¬üL‘D Qå…G÷ a!­æ åkú. ÔílY÷”wÍ ˜"2¬½¯7CŽAÊÄRK8~H‚M›HIbŠ÷)ý‰Yõ O^íG¤‚5_˜cî˜# òò5Ýö=eЏ5æZù9Ç×)SjKü“Ý ›\C´²ÂÛRNaÍ*/Ü4/HÆ Òæ­òBÝRþV,æT[_ŸüjŠTŒ-ÊÕ4ÇERÎjêÆÕ/<ù•5©³5­Ûˆ9¶¾f,ö£ØŽàµ}áÑó…`PÀº¸.WÓo$AÖ¸¶A?;/ˆR½²žâ6B2.¨›¸i#žìvdmÍjÈôd #ʦ‚2ÕJð’ ãr/Ìq‘¡!'€/<ùã,ìõÑÚ9Ž)•a'6“2Aß5/¤h€ƒÜjê8E·`iA€›TZxòtA@Ä¥vAÏ‘"ã!¼7 ï=»¯¼0ùm„°¢X#&yK™R*  Ô³û'ï|#Vhåjšã‘ mmë!Ëýh´n? {­jš¤]È8F(8…Ö£©G:BÉ8ª8Üõ,“ˆ ŠCÚMuÆxA™%Lk%Ëý%'KUnûçǪTÜ´Täˆ@ê‘õ[ý2º‡}4ˆ•«éÉþW¦í—Ê S¸šRn®›5)_ümW¸QŠÀ´|ñekú. eàæµÚñÑý‚*KÍæxL9^L É˽ðàñÂv‚hŠQcÇ $A>|ºÀ›"Dð²Ï¿–þö÷íOþú÷ÿýÇaŸüÓ¯¿ü÷ÿ¼ÁìÛÿŠüžÙë~ýå/ûǯÿÿõú½+ðøÒgÜÑmðþ“b;Éÿ¿ìþüí¿äÏÛßö×_þ#æY|¿ójtNÀe³•ã ¦t}e»¬©Ú¦Eª~J“]­Ý–æ‘:”-üñmÙ8¶['°±Ü²ðëQ‰\Ù|÷¯d…Hýö¿Ò#ïþøIùǵÎ"Õ„ÑD }‡æä‘šÀö.ÍÑh÷$¼¯–q¶«he@Õn0J*Ÿ4HÖ oKs‰@Ê“ÙÂ~°¡F ÁÙÂ:%º¥]8*©rFØoKs…HËL²ÙnY×GÆ«YÖ©©Ð-ïÂïSÅpMh ¿Cs‰HÊ)íîE`‘°«\§¤b·´{ìúÅ3𯷥¹B fë–ÍvO¤ˆ€~•í:%»¥ÝC×ïIóËè·¥¹D¤e{°½0ÛÝ(J¡ M¯²¥uj*u˻Ƕ_9’Þ–æ ‘:–-ˆg³Ý‡…lÛom¿°ŽíºYCáØöK'y—¬ùmi.©CÙd³Ý¢èÆí*[^§¦r·¼{hû•3˜Ž2/˜Ñ£Ð;0WÔ±h³Ñî6Qde½ŠvŠÊݲî¡é÷l¨Òi.¨CÙž]¦Žaû²‰2"‰^M²NE•ni÷˦_åsüo4 (\ß¡¹B¤Že †Ùl·,Ü<^í}eš*Ýòî¡é×>TóA4£‘_}l«š~G³OE‹/—(Ãv¨ñ¸6*Äu<¿ØÍŠŸlú=¹L×v[šóêh¶p2Äv¿~CÕPŒ«leHÕn4ýI'cü&êƒhn¿òÅÕu¸ªçw4Û³¢cؾ–Ã6ººu×ñüb7_(~âù=yÕïKs…HËÀ³Ùî×oÂÀtí:5ºåÝcÏïÙûE3¼-Í%"u(Û³U¿cØâ·,¬ÆÂÙâ:5»åݯ®úeuäÛÒ\!RDz…ÆÙl÷, Ž„bWÙ®SS±[ÞůÖT&lƒh†y{‡æ‘:”-h6[zyZd;ëÕi!­SS©[Þ=4ý’ŸÑt½-Í"u,[8ûÌÛ¶¯ëTP½š…išJÝòî¡é—O,JnÔÑDnâøÍ%"u(Û³WäcØ~3ýn½o»hQÂuL¿ØÍŠÇ¦_:Û]7âv¼Í"u0ÛHgûzz¡ W÷çã:¶_ìf ÅOl¿'KZÄ}i.©cÙ†&³}Ec#Ë—O6²NM•nyW¾ºðûšÜ¶®;‚Þ¡¹B¤f–Íöu²Ù¿œWO6²NM•ny÷Ðö{v–Áh÷¥¹D¤e ÍRÙÒË) Æ$Í®m\§u|¿ÔÍJŸø~ጦòmiΩ£Ùz6ÛÝ)jÄ~u;,­ãû¥nÞP:öýž.Q¢a4·šª½Ñª¾ßÑlA9™ík;,(™\tèÓ:¾_êæ ¥cßïÉFFEÓ͸½Cs…HËNbûÚ¸NÁ~ñ~´Žï—ºyCéØ÷‹gÉuDÞíDs‰HÊöìÕÛ¶»St+©MUà"[\§¦b·¼{ìû=™:hÿÛñ~4WˆÔÁlƒ³ÙîYX¹Y¸_e»NMÅny¿ºFi;Ëèmi.©CÙžÖÔ!l_NQvôhx‘-­SS©[Þýê²_r–ùF3@ãwh®©cÙE6Û×ë m„r9Rש©Ô-ïû~?:8ßæ:-&³Ý¢èíê°×)©Ü-í~Õö»w1~[š+Dê`¶AÙl÷,lŠ$põŽ×)©Ü-ïZCùÄêØýãòßijP³wh.©CÙÂÉ~Ø1låõø¢©Úå±¾¬SS¥[Þ•/>O'ðÛÒ\!RDz…“ úƒØîYM.dš*Ýòî¡í÷d $ÑLÑÜXúeレZSDzÝ:—T¶ü2Šasòk‘ÊëØ~¹›5”m¿'çT è¾À£Íù#u8ÛÀl¶ûwLšqÉU¶²N¤j7š_ý9S÷ÇÆÿ¦é ‚ôšKDêP¶Ùlá[6ò‹Ÿãul¿ÜÍÊǶߓæ—Yô¶0WÔ±hÓÙîI8Â÷ƒêU¶ë”Tè–v¿ºíWº ¬Í%"u([NfûÚË©]Ü:Éë¸~¹›3”?qýžÀDòÛÂ\!P£õÈfûZ¹ÞDåâý¯cúånÆPþÄúy¤Ró šÐ1Þ¡¹D¤Že«Ì–^kwÐ0^dKë”Tê–w¿ºì—bÄQ¦Í"u,[@Îf»l°9Z\­©´NM¥ny—¾¸˜„ÝoKs‰HÊvûINf˯ńÂâvuªÏëÔTî–w¡'w4$"|[š+DêX¶Àél÷,¬á[¾z²áuj*wË»‡®_9ñç+p±ô¢¹D¤e œŒöe ue¿š„e’*ÝÒî±éWOhÒ€´Û‹æ :˜mX6Û}X¸”€®Žõe’*ÝÒî—wý† :ô¢¹D¤Že{©]Øþóét=„A././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000755000175000017500000000000015115611513032746 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000755000175000017500000000000015115611520032744 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000644000175000017500000000010715115611513032746 0ustar zuulzuul2025-12-08T17:56:30.900341355+00:00 stdout F '/bin/cpb' -> '/util/cpb' ././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000755000175000017500000000000015115611520032744 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000644000175000017500000000364415115611513032757 0ustar zuulzuul2025-12-08T17:56:32.790958526+00:00 stdout F skipping a dir without errors: / 2025-12-08T17:56:32.790958526+00:00 stdout F skipping a dir without errors: /bundle 2025-12-08T17:56:32.790958526+00:00 stdout F skipping all files in the dir: /dev 2025-12-08T17:56:32.790958526+00:00 stdout F skipping a dir without errors: /etc 2025-12-08T17:56:32.791200932+00:00 stdout F skipping a dir without errors: /manifests 2025-12-08T17:56:32.791200932+00:00 stdout F skipping a dir without errors: /metadata 2025-12-08T17:56:32.791219203+00:00 stdout F skipping all files in the dir: /proc 2025-12-08T17:56:32.791271454+00:00 stdout F skipping a dir without errors: /run 2025-12-08T17:56:32.791286824+00:00 stdout F skipping a dir without errors: /run/secrets 2025-12-08T17:56:32.791324855+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm 2025-12-08T17:56:32.791342946+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/ca 2025-12-08T17:56:32.791415728+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/syspurpose 2025-12-08T17:56:32.791456269+00:00 stdout F skipping all files in the dir: /sys 2025-12-08T17:56:32.791474999+00:00 stdout F skipping a dir without errors: /util 2025-12-08T17:56:32.791525101+00:00 stdout F skipping a dir without errors: /var 2025-12-08T17:56:32.791565572+00:00 stdout F skipping a dir without errors: /var/run 2025-12-08T17:56:32.791583442+00:00 stdout F skipping a dir without errors: /var/run/secrets 2025-12-08T17:56:32.791626373+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io 2025-12-08T17:56:32.791679906+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount 2025-12-08T17:56:32.791720607+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount/..2025_12_08_17_56_28.1488302032 2025-12-08T17:56:32.791819859+00:00 stdout F &{metadata/annotations.yaml manifests/} ././@LongLink0000644000000000000000000000031600000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000755000175000017500000000000015115611520032744 5ustar zuulzuul././@LongLink0000644000000000000000000000032300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ff0000644000175000017500000000114615115611513032752 0ustar zuulzuul2025-12-08T17:56:33.540687421+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:56:33.551900953+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/manifests/smart-gateway-operator.clusterserviceversion.yaml 2025-12-08T17:56:33.554753468+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/manifests/smartgateway.infra.watch_smartgateways.yaml 2025-12-08T17:56:33.556724469+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="Reading file" file=/bundle/metadata/annotations.yaml ././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611514033053 5ustar zuulzuul././@LongLink0000644000000000000000000000032100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611521033051 5ustar zuulzuul././@LongLink0000644000000000000000000000032600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000000203615115611514033056 0ustar zuulzuul2025-12-08T17:44:19.250916731+00:00 stderr F W1208 17:44:19.250707 1 deprecated.go:66] 2025-12-08T17:44:19.250916731+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:19.250916731+00:00 stderr F 2025-12-08T17:44:19.250916731+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:19.250916731+00:00 stderr F 2025-12-08T17:44:19.250916731+00:00 stderr F =============================================== 2025-12-08T17:44:19.250916731+00:00 stderr F 2025-12-08T17:44:19.251395915+00:00 stderr F I1208 17:44:19.251349 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:19.252348671+00:00 stderr F I1208 17:44:19.252333 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:19.252717621+00:00 stderr F I1208 17:44:19.252686 1 kube-rbac-proxy.go:397] Starting TCP socket on 0.0.0.0:8443 2025-12-08T17:44:19.253276226+00:00 stderr F I1208 17:44:19.253248 1 kube-rbac-proxy.go:404] Listening securely on 0.0.0.0:8443 ././@LongLink0000644000000000000000000000033000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611521033051 5ustar zuulzuul././@LongLink0000644000000000000000000000033500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000001747515115611514033073 0ustar zuulzuul2025-12-08T17:44:24.243396651+00:00 stderr F 2025-12-08T17:44:24Z INFO setup starting manager 2025-12-08T17:44:24.262456591+00:00 stderr F 2025-12-08T17:44:24Z INFO controller-runtime.metrics Starting metrics server 2025-12-08T17:44:24.262456591+00:00 stderr F 2025-12-08T17:44:24Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":9090", "secure": false} 2025-12-08T17:44:24.262456591+00:00 stderr F 2025-12-08T17:44:24Z INFO starting server {"name": "pprof", "addr": "[::]:6060"} 2025-12-08T17:44:24.262456591+00:00 stderr F 2025-12-08T17:44:24Z INFO starting server {"name": "health probe", "addr": "[::]:8080"} 2025-12-08T17:44:24.262456591+00:00 stderr F I1208 17:44:24.261003 1 leaderelection.go:257] attempting to acquire leader lease openshift-operator-lifecycle-manager/packageserver-controller-lock... 2025-12-08T17:44:24.314925542+00:00 stderr F I1208 17:44:24.312102 1 leaderelection.go:271] successfully acquired lease openshift-operator-lifecycle-manager/packageserver-controller-lock 2025-12-08T17:44:24.314925542+00:00 stderr F 2025-12-08T17:44:24Z DEBUG events package-server-manager-77f986bd66-d8qsj_e5059018-1058-4589-8dd5-33db024be099 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"openshift-operator-lifecycle-manager","name":"packageserver-controller-lock","uid":"84a9c24b-6b87-4131-a141-45b6e73e53e9","apiVersion":"coordination.k8s.io/v1","resourceVersion":"37484"}, "reason": "LeaderElection"} 2025-12-08T17:44:24.314925542+00:00 stderr F 2025-12-08T17:44:24Z INFO Starting EventSource {"controller": "clusterserviceversion", "controllerGroup": "operators.coreos.com", "controllerKind": "ClusterServiceVersion", "source": "kind source: *v1.Infrastructure"} 2025-12-08T17:44:24.314925542+00:00 stderr F 2025-12-08T17:44:24Z INFO Starting EventSource {"controller": "clusterserviceversion", "controllerGroup": "operators.coreos.com", "controllerKind": "ClusterServiceVersion", "source": "kind source: *v1alpha1.ClusterServiceVersion"} 2025-12-08T17:44:24.418973830+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver requeueing the packageserver deployment after encountering infrastructure event {"infrastructure": "cluster"} 2025-12-08T17:44:24.418973830+00:00 stderr F 2025-12-08T17:44:24Z INFO Starting Controller {"controller": "clusterserviceversion", "controllerGroup": "operators.coreos.com", "controllerKind": "ClusterServiceVersion"} 2025-12-08T17:44:24.418973830+00:00 stderr F 2025-12-08T17:44:24Z INFO Starting workers {"controller": "clusterserviceversion", "controllerGroup": "operators.coreos.com", "controllerKind": "ClusterServiceVersion", "worker count": 1} 2025-12-08T17:44:24.418973830+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver handling current request {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "request": "openshift-operator-lifecycle-manager/packageserver"} 2025-12-08T17:44:24.418973830+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver checking to see if required RBAC exists {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:44:24.740310206+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver confimed required RBAC exists {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:44:24.740310206+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver currently topology mode {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "highly available": false} 2025-12-08T17:44:24.776913194+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver reconciliation result {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "res": "unchanged"} 2025-12-08T17:44:24.776913194+00:00 stderr F 2025-12-08T17:44:24Z INFO controllers.packageserver finished request reconciliation {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:46:24.341543332+00:00 stderr F E1208 17:46:24.340744 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-operator-lifecycle-manager/leases/packageserver-controller-lock?timeout=2m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:24.342494531+00:00 stderr F E1208 17:46:24.342431 1 leaderelection.go:436] error retrieving resource lock openshift-operator-lifecycle-manager/packageserver-controller-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-operator-lifecycle-manager/leases/packageserver-controller-lock?timeout=2m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:59.942915119+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver requeueing the packageserver deployment after encountering infrastructure event {"infrastructure": "cluster"} 2025-12-08T17:46:59.943089094+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver requeueing the packageserver deployment after encountering infrastructure event {"infrastructure": "cluster"} 2025-12-08T17:46:59.943844578+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver handling current request {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "request": "openshift-operator-lifecycle-manager/packageserver"} 2025-12-08T17:46:59.943844578+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver checking to see if required RBAC exists {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:46:59.944032013+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver confimed required RBAC exists {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:46:59.944128616+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver currently topology mode {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "highly available": false} 2025-12-08T17:46:59.956611290+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver reconciliation result {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "res": "unchanged"} 2025-12-08T17:46:59.956611290+00:00 stderr F 2025-12-08T17:46:59Z INFO controllers.packageserver finished request reconciliation {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:47:02.929032039+00:00 stderr F 2025-12-08T17:47:02Z INFO controllers.packageserver handling current request {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "request": "openshift-operator-lifecycle-manager/packageserver"} 2025-12-08T17:47:02.929032039+00:00 stderr F 2025-12-08T17:47:02Z INFO controllers.packageserver checking to see if required RBAC exists {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:47:02.929032039+00:00 stderr F 2025-12-08T17:47:02Z INFO controllers.packageserver confimed required RBAC exists {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} 2025-12-08T17:47:02.929032039+00:00 stderr F 2025-12-08T17:47:02Z INFO controllers.packageserver currently topology mode {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "highly available": false} 2025-12-08T17:47:02.940944514+00:00 stderr F 2025-12-08T17:47:02Z INFO controllers.packageserver reconciliation result {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}, "res": "unchanged"} 2025-12-08T17:47:02.940944514+00:00 stderr F 2025-12-08T17:47:02Z INFO controllers.packageserver finished request reconciliation {"csv": {"name":"packageserver","namespace":"openshift-operator-lifecycle-manager"}} ././@LongLink0000644000000000000000000000022600000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-res0000755000175000017500000000000015115611514033030 5ustar zuulzuul././@LongLink0000644000000000000000000000025000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-res0000755000175000017500000000000015115611521033026 5ustar zuulzuul././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-res0000644000175000017500000000014015115611514033025 0ustar zuulzuul2025-12-08T17:44:35.715291290+00:00 stdout F /tmp/hosts.tmp /etc/hosts differ: char 159, line 3 ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000032200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611520033074 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000422515115611513033103 0ustar zuulzuul2025-12-08T17:55:06.587360762+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:55:06.603093745+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/agents.agent.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.606452906+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/apmservers.apm.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.611455610+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/beats.beat.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.612831958+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/elasticmapsservers.maps.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.614526173+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/elasticsearch-eck-operator-certified.clusterserviceversion.yaml 2025-12-08T17:55:06.615604703+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/elasticsearchautoscalers.autoscaling.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.616527387+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/elasticsearches.elasticsearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.622684363+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/enterprisesearches.enterprisesearch.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.624705667+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/kibanas.kibana.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.626857965+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/logstashes.logstash.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.629352462+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/manifests/stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.crd.yaml 2025-12-08T17:55:06.629944518+00:00 stderr F time="2025-12-08T17:55:06Z" level=info msg="Reading file" file=/bundle/metadata/annotations.yaml ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611520033074 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000364415115611513033107 0ustar zuulzuul2025-12-08T17:55:06.307857321+00:00 stdout F skipping a dir without errors: / 2025-12-08T17:55:06.307857321+00:00 stdout F skipping a dir without errors: /bundle 2025-12-08T17:55:06.307857321+00:00 stdout F skipping all files in the dir: /dev 2025-12-08T17:55:06.308135268+00:00 stdout F skipping a dir without errors: /etc 2025-12-08T17:55:06.308135268+00:00 stdout F skipping a dir without errors: /manifests 2025-12-08T17:55:06.308230310+00:00 stdout F skipping a dir without errors: /metadata 2025-12-08T17:55:06.308390035+00:00 stdout F skipping all files in the dir: /proc 2025-12-08T17:55:06.308408205+00:00 stdout F skipping a dir without errors: /run 2025-12-08T17:55:06.308464517+00:00 stdout F skipping a dir without errors: /run/secrets 2025-12-08T17:55:06.308464517+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm 2025-12-08T17:55:06.308483607+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/ca 2025-12-08T17:55:06.308543009+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/syspurpose 2025-12-08T17:55:06.308562409+00:00 stdout F skipping all files in the dir: /sys 2025-12-08T17:55:06.308593520+00:00 stdout F skipping a dir without errors: /util 2025-12-08T17:55:06.308632661+00:00 stdout F skipping a dir without errors: /var 2025-12-08T17:55:06.308650502+00:00 stdout F skipping a dir without errors: /var/run 2025-12-08T17:55:06.308690123+00:00 stdout F skipping a dir without errors: /var/run/secrets 2025-12-08T17:55:06.308708523+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io 2025-12-08T17:55:06.308757515+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount 2025-12-08T17:55:06.308796416+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount/..2025_12_08_17_55_04.1897565788 2025-12-08T17:55:06.308905068+00:00 stdout F &{metadata/annotations.yaml manifests/} ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611520033074 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000010715115611513033076 0ustar zuulzuul2025-12-08T17:55:05.332464112+00:00 stdout F '/bin/cpb' -> '/util/cpb' ././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-contro0000755000175000017500000000000015115611513033107 5ustar zuulzuul././@LongLink0000644000000000000000000000033200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-contro0000755000175000017500000000000015115611520033105 5ustar zuulzuul././@LongLink0000644000000000000000000000033700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-contro0000644000175000017500000004357515115611513033127 0ustar zuulzuul2025-12-08T17:48:05.226750887+00:00 stderr F I1208 17:48:05.226422 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:48:05.226956693+00:00 stderr F I1208 17:48:05.226729 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:48:05.229105299+00:00 stderr F I1208 17:48:05.228610 1 builder.go:304] route-controller-manager version 4.20.0-202510211040.p2.gbf2fa66.assembly.stream.el9-bf2fa66-bf2fa662f57f233d8541f94c4953e0dcd7a5ab20 2025-12-08T17:48:05.230131519+00:00 stderr F I1208 17:48:05.230070 1 observer_polling.go:159] Starting file observer 2025-12-08T17:48:05.231312936+00:00 stderr F I1208 17:48:05.231244 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:48:06.135741549+00:00 stderr F I1208 17:48:06.135194 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:48:06.140033689+00:00 stderr F I1208 17:48:06.139980 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:48:06.140033689+00:00 stderr F I1208 17:48:06.140000 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:48:06.140033689+00:00 stderr F I1208 17:48:06.140021 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:48:06.140033689+00:00 stderr F I1208 17:48:06.140026 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:48:06.144059621+00:00 stderr F I1208 17:48:06.143996 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:48:06.144059621+00:00 stderr F I1208 17:48:06.143995 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:48:06.144059621+00:00 stderr F W1208 17:48:06.144032 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:48:06.144059621+00:00 stderr F W1208 17:48:06.144036 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:48:06.144059621+00:00 stderr F W1208 17:48:06.144040 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:48:06.144059621+00:00 stderr F W1208 17:48:06.144044 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:48:06.144059621+00:00 stderr F W1208 17:48:06.144047 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:48:06.144059621+00:00 stderr F W1208 17:48:06.144050 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:48:06.149675430+00:00 stderr F I1208 17:48:06.149616 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:48:06.150341991+00:00 stderr F I1208 17:48:06.150296 1 leaderelection.go:257] attempting to acquire leader lease openshift-route-controller-manager/openshift-route-controllers... 2025-12-08T17:48:06.152307520+00:00 stderr F I1208 17:48:06.152259 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:48:06.152377862+00:00 stderr F I1208 17:48:06.152340 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:48:06.152416783+00:00 stderr F I1208 17:48:06.152396 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:48:06.152427194+00:00 stderr F I1208 17:48:06.152414 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:48:06.152455555+00:00 stderr F I1208 17:48:06.152437 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:48:06.152455555+00:00 stderr F I1208 17:48:06.152448 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:48:06.153064504+00:00 stderr F I1208 17:48:06.153019 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"route-controller-manager.openshift-route-controller-manager.svc\" [serving] validServingFor=[route-controller-manager.openshift-route-controller-manager.svc,route-controller-manager.openshift-route-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:22 +0000 UTC to 2027-11-02 07:52:23 +0000 UTC (now=2025-12-08 17:48:06.152977341 +0000 UTC))" 2025-12-08T17:48:06.153231569+00:00 stderr F I1208 17:48:06.153194 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:48:06.153350962+00:00 stderr F I1208 17:48:06.153318 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216086\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216085\" (2025-12-08 16:48:05 +0000 UTC to 2028-12-08 16:48:05 +0000 UTC (now=2025-12-08 17:48:06.153293021 +0000 UTC))" 2025-12-08T17:48:06.153350962+00:00 stderr F I1208 17:48:06.153345 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:48:06.153390584+00:00 stderr F I1208 17:48:06.153371 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:48:06.153416344+00:00 stderr F I1208 17:48:06.153400 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:48:06.159113657+00:00 stderr F I1208 17:48:06.159043 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.159283702+00:00 stderr F I1208 17:48:06.159230 1 leaderelection.go:271] successfully acquired lease openshift-route-controller-manager/openshift-route-controllers 2025-12-08T17:48:06.159378485+00:00 stderr F I1208 17:48:06.159327 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.159824118+00:00 stderr F I1208 17:48:06.159655 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-route-controller-manager", Name:"openshift-route-controllers", UID:"cf913b48-978b-4cc3-ac76-eb193fd84b87", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"39447", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' route-controller-manager-7dd6d6d8c8-wfznc_5f3225d6-677f-404d-82e2-f610e4e06b95 became leader 2025-12-08T17:48:06.161365875+00:00 stderr F I1208 17:48:06.160651 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.162775927+00:00 stderr F I1208 17:48:06.162716 1 controller_manager.go:36] Starting "openshift.io/ingress-ip" 2025-12-08T17:48:06.162775927+00:00 stderr F I1208 17:48:06.162737 1 controller_manager.go:46] Started "openshift.io/ingress-ip" 2025-12-08T17:48:06.162775927+00:00 stderr F I1208 17:48:06.162749 1 controller_manager.go:36] Starting "openshift.io/ingress-to-route" 2025-12-08T17:48:06.169750329+00:00 stderr F I1208 17:48:06.169662 1 ingress.go:262] ingress-to-route metrics registered with prometheus 2025-12-08T17:48:06.169750329+00:00 stderr F I1208 17:48:06.169700 1 controller_manager.go:46] Started "openshift.io/ingress-to-route" 2025-12-08T17:48:06.169750329+00:00 stderr F I1208 17:48:06.169712 1 controller_manager.go:48] Started Route Controllers 2025-12-08T17:48:06.170106449+00:00 stderr F I1208 17:48:06.170074 1 ingress.go:313] Starting controller 2025-12-08T17:48:06.176560804+00:00 stderr F I1208 17:48:06.176528 1 reflector.go:430] "Caches populated" type="*v1.IngressClass" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.176619926+00:00 stderr F I1208 17:48:06.176573 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.179637947+00:00 stderr F I1208 17:48:06.179602 1 reflector.go:430] "Caches populated" type="*v1.Route" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.179867834+00:00 stderr F I1208 17:48:06.179830 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.253440660+00:00 stderr F I1208 17:48:06.253378 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:48:06.253683728+00:00 stderr F I1208 17:48:06.253439 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:48:06.253773501+00:00 stderr F I1208 17:48:06.253466 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:48:06.254152452+00:00 stderr F I1208 17:48:06.254123 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:48:06.25407434 +0000 UTC))" 2025-12-08T17:48:06.254641737+00:00 stderr F I1208 17:48:06.254615 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"route-controller-manager.openshift-route-controller-manager.svc\" [serving] validServingFor=[route-controller-manager.openshift-route-controller-manager.svc,route-controller-manager.openshift-route-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:22 +0000 UTC to 2027-11-02 07:52:23 +0000 UTC (now=2025-12-08 17:48:06.254572585 +0000 UTC))" 2025-12-08T17:48:06.255053999+00:00 stderr F I1208 17:48:06.255029 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216086\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216085\" (2025-12-08 16:48:05 +0000 UTC to 2028-12-08 16:48:05 +0000 UTC (now=2025-12-08 17:48:06.254999718 +0000 UTC))" 2025-12-08T17:48:06.256132141+00:00 stderr F I1208 17:48:06.256109 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:48:06.25607888 +0000 UTC))" 2025-12-08T17:48:06.256195413+00:00 stderr F I1208 17:48:06.256181 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:48:06.256165622 +0000 UTC))" 2025-12-08T17:48:06.256258985+00:00 stderr F I1208 17:48:06.256245 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:48:06.256229864 +0000 UTC))" 2025-12-08T17:48:06.256383149+00:00 stderr F I1208 17:48:06.256366 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:48:06.256282846 +0000 UTC))" 2025-12-08T17:48:06.256432820+00:00 stderr F I1208 17:48:06.256420 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:48:06.25640373 +0000 UTC))" 2025-12-08T17:48:06.256485843+00:00 stderr F I1208 17:48:06.256473 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:48:06.256451451 +0000 UTC))" 2025-12-08T17:48:06.256535834+00:00 stderr F I1208 17:48:06.256523 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:48:06.256503864 +0000 UTC))" 2025-12-08T17:48:06.256593036+00:00 stderr F I1208 17:48:06.256579 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:48:06.256555345 +0000 UTC))" 2025-12-08T17:48:06.256643228+00:00 stderr F I1208 17:48:06.256630 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:48:06.256615657 +0000 UTC))" 2025-12-08T17:48:06.256695779+00:00 stderr F I1208 17:48:06.256683 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:48:06.256667818 +0000 UTC))" 2025-12-08T17:48:06.256782492+00:00 stderr F I1208 17:48:06.256764 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:48:06.25672646 +0000 UTC))" 2025-12-08T17:48:06.257126152+00:00 stderr F I1208 17:48:06.257103 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"route-controller-manager.openshift-route-controller-manager.svc\" [serving] validServingFor=[route-controller-manager.openshift-route-controller-manager.svc,route-controller-manager.openshift-route-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:22 +0000 UTC to 2027-11-02 07:52:23 +0000 UTC (now=2025-12-08 17:48:06.257080091 +0000 UTC))" 2025-12-08T17:48:06.257414581+00:00 stderr F I1208 17:48:06.257391 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216086\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216085\" (2025-12-08 16:48:05 +0000 UTC to 2028-12-08 16:48:05 +0000 UTC (now=2025-12-08 17:48:06.257373049 +0000 UTC))" 2025-12-08T17:48:06.279099487+00:00 stderr F I1208 17:48:06.279035 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" ././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_netwo0000755000175000017500000000000015115611513033214 5ustar zuulzuul././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_netwo0000755000175000017500000000000015115611520033212 5ustar zuulzuul././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_netwo0000644000175000017500000000202015115611513033210 0ustar zuulzuul2025-12-08T17:44:23.858386219+00:00 stderr F W1208 17:44:23.857918 1 deprecated.go:66] 2025-12-08T17:44:23.858386219+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:23.858386219+00:00 stderr F 2025-12-08T17:44:23.858386219+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:23.858386219+00:00 stderr F 2025-12-08T17:44:23.858386219+00:00 stderr F =============================================== 2025-12-08T17:44:23.858386219+00:00 stderr F 2025-12-08T17:44:23.858852762+00:00 stderr F I1208 17:44:23.858829 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:23.859817389+00:00 stderr F I1208 17:44:23.859793 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:23.860458996+00:00 stderr F I1208 17:44:23.860418 1 kube-rbac-proxy.go:397] Starting TCP socket on :8443 2025-12-08T17:44:23.860893868+00:00 stderr F I1208 17:44:23.860859 1 kube-rbac-proxy.go:404] Listening securely on :8443 ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_netwo0000755000175000017500000000000015115611520033212 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_netwo0000644000175000017500000012041515115611513033221 0ustar zuulzuul2025-12-08T17:44:21.847392276+00:00 stderr F I1208 17:44:21.846524 1 main.go:45] Version:2f68fe12a9b9bb7676d1f0933b6be632cd4deff2 2025-12-08T17:44:21.847392276+00:00 stderr F I1208 17:44:21.847193 1 main.go:46] Starting with config{ :9091 crc} 2025-12-08T17:44:21.847773336+00:00 stderr F W1208 17:44:21.847741 1 client_config.go:618] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:21.868012989+00:00 stderr F I1208 17:44:21.864788 1 controller.go:42] Setting up event handlers 2025-12-08T17:44:21.868012989+00:00 stderr F I1208 17:44:21.865227 1 podmetrics.go:101] Serving network metrics 2025-12-08T17:44:21.868012989+00:00 stderr F I1208 17:44:21.865235 1 controller.go:101] Starting pod controller 2025-12-08T17:44:21.868012989+00:00 stderr F I1208 17:44:21.865239 1 controller.go:104] Waiting for informer caches to sync 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065507 1 controller.go:109] Starting workers 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065567 1 controller.go:114] Started workers 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065798 1 controller.go:192] Received pod 'csi-hostpathplugin-qrls7' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065922 1 controller.go:151] Successfully synced 'hostpath-provisioner/csi-hostpathplugin-qrls7' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065933 1 controller.go:192] Received pod 'apiserver-9ddfb9f55-8h8fl' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065949 1 controller.go:151] Successfully synced 'openshift-apiserver/apiserver-9ddfb9f55-8h8fl' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065954 1 controller.go:192] Received pod 'authentication-operator-7f5c659b84-5scww' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065967 1 controller.go:151] Successfully synced 'openshift-authentication-operator/authentication-operator-7f5c659b84-5scww' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065974 1 controller.go:192] Received pod 'oauth-openshift-66458b6674-ztdrc' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065988 1 controller.go:151] Successfully synced 'openshift-authentication/oauth-openshift-66458b6674-ztdrc' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.065993 1 controller.go:192] Received pod 'cluster-samples-operator-6b564684c8-2cnx5' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066006 1 controller.go:151] Successfully synced 'openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-2cnx5' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066012 1 controller.go:192] Received pod 'openshift-config-operator-5777786469-v69x6' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066024 1 controller.go:151] Successfully synced 'openshift-config-operator/openshift-config-operator-5777786469-v69x6' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066028 1 controller.go:192] Received pod 'console-operator-67c89758df-79mps' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066040 1 controller.go:151] Successfully synced 'openshift-console-operator/console-operator-67c89758df-79mps' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066044 1 controller.go:192] Received pod 'console-64d44f6ddf-dhfvx' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066056 1 controller.go:151] Successfully synced 'openshift-console/console-64d44f6ddf-dhfvx' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066061 1 controller.go:192] Received pod 'downloads-747b44746d-x7wvx' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066079 1 controller.go:151] Successfully synced 'openshift-console/downloads-747b44746d-x7wvx' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066084 1 controller.go:192] Received pod 'openshift-controller-manager-operator-686468bdd5-m5ltz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066096 1 controller.go:151] Successfully synced 'openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-m5ltz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066101 1 controller.go:192] Received pod 'controller-manager-65b6cccf98-6wjgz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066123 1 controller.go:151] Successfully synced 'openshift-controller-manager/controller-manager-65b6cccf98-6wjgz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066127 1 controller.go:192] Received pod 'dns-operator-799b87ffcd-9b988' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066139 1 controller.go:151] Successfully synced 'openshift-dns-operator/dns-operator-799b87ffcd-9b988' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066143 1 controller.go:192] Received pod 'dns-default-c5tbq' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066157 1 controller.go:151] Successfully synced 'openshift-dns/dns-default-c5tbq' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066162 1 controller.go:192] Received pod 'etcd-operator-69b85846b6-k26tc' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066174 1 controller.go:151] Successfully synced 'openshift-etcd-operator/etcd-operator-69b85846b6-k26tc' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066179 1 controller.go:192] Received pod 'cluster-image-registry-operator-86c45576b9-rwgjl' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066205 1 controller.go:151] Successfully synced 'openshift-image-registry/cluster-image-registry-operator-86c45576b9-rwgjl' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066210 1 controller.go:192] Received pod 'ingress-canary-psjrr' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066221 1 controller.go:151] Successfully synced 'openshift-ingress-canary/ingress-canary-psjrr' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066226 1 controller.go:192] Received pod 'ingress-operator-6b9cb4dbcf-2pwhz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066238 1 controller.go:151] Successfully synced 'openshift-ingress-operator/ingress-operator-6b9cb4dbcf-2pwhz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066243 1 controller.go:192] Received pod 'kube-apiserver-operator-575994946d-bhk9x' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066258 1 controller.go:151] Successfully synced 'openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-bhk9x' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066277 1 controller.go:192] Received pod 'kube-controller-manager-operator-69d5f845f8-6lgwk' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066300 1 controller.go:151] Successfully synced 'openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-6lgwk' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066304 1 controller.go:192] Received pod 'openshift-kube-scheduler-operator-54f497555d-gvb6q' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066317 1 controller.go:151] Successfully synced 'openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-gvb6q' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066322 1 controller.go:192] Received pod 'kube-storage-version-migrator-operator-565b79b866-6gkgz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066334 1 controller.go:151] Successfully synced 'openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-6gkgz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066338 1 controller.go:192] Received pod 'migrator-866fcbc849-5pp5q' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066350 1 controller.go:151] Successfully synced 'openshift-kube-storage-version-migrator/migrator-866fcbc849-5pp5q' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066354 1 controller.go:192] Received pod 'control-plane-machine-set-operator-75ffdb6fcd-dhfht' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066367 1 controller.go:151] Successfully synced 'openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066371 1 controller.go:192] Received pod 'machine-api-operator-755bb95488-5httz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066385 1 controller.go:151] Successfully synced 'openshift-machine-api/machine-api-operator-755bb95488-5httz' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066390 1 controller.go:192] Received pod 'machine-config-controller-f9cdd68f7-p88k2' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066402 1 controller.go:151] Successfully synced 'openshift-machine-config-operator/machine-config-controller-f9cdd68f7-p88k2' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066407 1 controller.go:192] Received pod 'machine-config-operator-67c9d58cbb-4g75z' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066419 1 controller.go:151] Successfully synced 'openshift-machine-config-operator/machine-config-operator-67c9d58cbb-4g75z' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066424 1 controller.go:192] Received pod 'marketplace-operator-547dbd544d-85wdh' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066436 1 controller.go:151] Successfully synced 'openshift-marketplace/marketplace-operator-547dbd544d-85wdh' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066440 1 controller.go:192] Received pod 'multus-admission-controller-69db94689b-v9sxk' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066452 1 controller.go:151] Successfully synced 'openshift-multus/multus-admission-controller-69db94689b-v9sxk' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066456 1 controller.go:192] Received pod 'network-metrics-daemon-54w78' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066476 1 controller.go:151] Successfully synced 'openshift-multus/network-metrics-daemon-54w78' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066480 1 controller.go:192] Received pod 'networking-console-plugin-5ff7774fd9-nljh6' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066492 1 controller.go:151] Successfully synced 'openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066497 1 controller.go:192] Received pod 'network-check-source-5bb8f5cd97-xdvz5' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066509 1 controller.go:151] Successfully synced 'openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066513 1 controller.go:192] Received pod 'network-check-target-fhkjl' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066524 1 controller.go:151] Successfully synced 'openshift-network-diagnostics/network-check-target-fhkjl' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066528 1 controller.go:192] Received pod 'apiserver-8596bd845d-rdv9c' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066539 1 controller.go:151] Successfully synced 'openshift-oauth-apiserver/apiserver-8596bd845d-rdv9c' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066543 1 controller.go:192] Received pod 'catalog-operator-75ff9f647d-bl822' 2025-12-08T17:44:22.068166338+00:00 stderr F I1208 17:44:22.066555 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-bl822' 2025-12-08T17:44:22.068166338+00:00 stderr P I1208 17:44:22.066560 1 2025-12-08T17:44:22.068247640+00:00 stderr F controller.go:192] Received pod 'collect-profiles-29420250-qhrfp' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066571 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/collect-profiles-29420250-qhrfp' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066575 1 controller.go:192] Received pod 'olm-operator-5cdf44d969-ggh59' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066587 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-ggh59' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066591 1 controller.go:192] Received pod 'package-server-manager-77f986bd66-d8qsj' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066601 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-d8qsj' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066606 1 controller.go:192] Received pod 'packageserver-7d4fc7d867-4kjg6' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066623 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-4kjg6' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066627 1 controller.go:192] Received pod 'route-controller-manager-776cdc94d6-qkg2q' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066638 1 controller.go:151] Successfully synced 'openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066642 1 controller.go:192] Received pod 'service-ca-operator-5b9c976747-cdz4v' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066653 1 controller.go:151] Successfully synced 'openshift-service-ca-operator/service-ca-operator-5b9c976747-cdz4v' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066657 1 controller.go:192] Received pod 'service-ca-74545575db-d69qv' 2025-12-08T17:44:22.068247640+00:00 stderr F I1208 17:44:22.066667 1 controller.go:151] Successfully synced 'openshift-service-ca/service-ca-74545575db-d69qv' 2025-12-08T17:44:22.075096267+00:00 stderr F I1208 17:44:22.070622 1 controller.go:192] Received pod 'openshift-apiserver-operator-846cbfc458-q6lj7' 2025-12-08T17:44:22.075096267+00:00 stderr F I1208 17:44:22.070727 1 controller.go:151] Successfully synced 'openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-q6lj7' 2025-12-08T17:44:23.446902515+00:00 stderr F I1208 17:44:23.445749 1 controller.go:192] Received pod 'certified-operators-lxwl6' 2025-12-08T17:44:23.446902515+00:00 stderr F I1208 17:44:23.446442 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-lxwl6' 2025-12-08T17:44:23.773951126+00:00 stderr F I1208 17:44:23.771830 1 controller.go:192] Received pod 'community-operators-sb7gg' 2025-12-08T17:44:23.773977237+00:00 stderr F I1208 17:44:23.773946 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-sb7gg' 2025-12-08T17:44:23.803947025+00:00 stderr F I1208 17:44:23.803625 1 controller.go:192] Received pod 'community-operators-r22jf' 2025-12-08T17:44:23.803947025+00:00 stderr F I1208 17:44:23.803662 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-r22jf' 2025-12-08T17:44:24.018960429+00:00 stderr F I1208 17:44:24.018176 1 controller.go:192] Received pod 'certified-operators-n5vp7' 2025-12-08T17:44:24.018960429+00:00 stderr F I1208 17:44:24.018227 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-n5vp7' 2025-12-08T17:44:25.028467666+00:00 stderr F I1208 17:44:25.026921 1 controller.go:192] Received pod 'redhat-marketplace-rvglb' 2025-12-08T17:44:25.028467666+00:00 stderr F I1208 17:44:25.027470 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-marketplace-rvglb' 2025-12-08T17:44:25.556280312+00:00 stderr F I1208 17:44:25.554888 1 controller.go:192] Received pod 'redhat-marketplace-6m6rs' 2025-12-08T17:44:25.556280312+00:00 stderr F I1208 17:44:25.555421 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-marketplace-6m6rs' 2025-12-08T17:44:25.856128881+00:00 stderr F I1208 17:44:25.854603 1 controller.go:192] Received pod 'redhat-operators-zfv6j' 2025-12-08T17:44:25.856128881+00:00 stderr F I1208 17:44:25.855553 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-zfv6j' 2025-12-08T17:44:26.155336143+00:00 stderr F I1208 17:44:26.152539 1 controller.go:192] Received pod 'revision-pruner-6-crc' 2025-12-08T17:44:26.155336143+00:00 stderr F I1208 17:44:26.152575 1 controller.go:151] Successfully synced 'openshift-kube-scheduler/revision-pruner-6-crc' 2025-12-08T17:44:26.294004936+00:00 stderr F I1208 17:44:26.292393 1 controller.go:192] Received pod 'redhat-operators-w7jrs' 2025-12-08T17:44:26.294004936+00:00 stderr F I1208 17:44:26.293449 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-w7jrs' 2025-12-08T17:44:28.258844420+00:00 stderr F I1208 17:44:28.257296 1 controller.go:192] Received pod 'revision-pruner-11-crc' 2025-12-08T17:44:28.258844420+00:00 stderr F I1208 17:44:28.258169 1 controller.go:151] Successfully synced 'openshift-kube-apiserver/revision-pruner-11-crc' 2025-12-08T17:44:41.338488662+00:00 stderr F I1208 17:44:41.336808 1 controller.go:192] Received pod 'image-registry-66587d64c8-s6hn4' 2025-12-08T17:44:41.338488662+00:00 stderr F I1208 17:44:41.337368 1 controller.go:151] Successfully synced 'openshift-image-registry/image-registry-66587d64c8-s6hn4' 2025-12-08T17:44:58.960720080+00:00 stderr F I1208 17:44:58.960208 1 controller.go:151] Successfully synced 'openshift-multus/cni-sysctl-allowlist-ds-bdhnb' 2025-12-08T17:44:59.283480142+00:00 stderr F I1208 17:44:59.283407 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-sb7gg' 2025-12-08T17:44:59.294396395+00:00 stderr F I1208 17:44:59.294345 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-marketplace-6m6rs' 2025-12-08T17:44:59.969195111+00:00 stderr F I1208 17:44:59.969123 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-n5vp7' 2025-12-08T17:45:00.845027311+00:00 stderr F I1208 17:45:00.844394 1 controller.go:192] Received pod 'collect-profiles-29420265-vsxwc' 2025-12-08T17:45:00.845027311+00:00 stderr F I1208 17:45:00.844984 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/collect-profiles-29420265-vsxwc' 2025-12-08T17:45:00.996537097+00:00 stderr F I1208 17:45:00.996468 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-w7jrs' 2025-12-08T17:45:04.538987216+00:00 stderr F I1208 17:45:04.536293 1 controller.go:192] Received pod 'revision-pruner-12-crc' 2025-12-08T17:45:04.538987216+00:00 stderr F I1208 17:45:04.536925 1 controller.go:151] Successfully synced 'openshift-kube-apiserver/revision-pruner-12-crc' 2025-12-08T17:45:12.113433515+00:00 stderr F I1208 17:45:12.112833 1 controller.go:192] Received pod 'installer-12-crc' 2025-12-08T17:45:12.113484966+00:00 stderr F I1208 17:45:12.113443 1 controller.go:151] Successfully synced 'openshift-kube-apiserver/installer-12-crc' 2025-12-08T17:45:48.097622899+00:00 stderr F I1208 17:45:48.097190 1 controller.go:192] Received pod 'oauth-openshift-57ffdf54dd-5dg99' 2025-12-08T17:45:48.097701591+00:00 stderr F I1208 17:45:48.097616 1 controller.go:151] Successfully synced 'openshift-authentication/oauth-openshift-57ffdf54dd-5dg99' 2025-12-08T17:45:48.339593701+00:00 stderr F I1208 17:45:48.339534 1 controller.go:151] Successfully synced 'openshift-authentication/oauth-openshift-66458b6674-ztdrc' 2025-12-08T17:47:24.637859764+00:00 stderr F I1208 17:47:24.637240 1 controller.go:192] Received pod 'controller-manager-6cd9c44569-vhg58' 2025-12-08T17:47:24.637859764+00:00 stderr F I1208 17:47:24.637841 1 controller.go:151] Successfully synced 'openshift-controller-manager/controller-manager-6cd9c44569-vhg58' 2025-12-08T17:47:24.638212025+00:00 stderr F I1208 17:47:24.637857 1 controller.go:151] Successfully synced 'openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q' 2025-12-08T17:47:24.638212025+00:00 stderr F I1208 17:47:24.637863 1 controller.go:151] Successfully synced 'openshift-kube-apiserver/kube-apiserver-startup-monitor-crc' 2025-12-08T17:47:24.638212025+00:00 stderr F I1208 17:47:24.637633 1 controller.go:192] Received pod 'route-controller-manager-6975b9f87f-8vkdj' 2025-12-08T17:47:24.638212025+00:00 stderr F I1208 17:47:24.638038 1 controller.go:151] Successfully synced 'openshift-route-controller-manager/route-controller-manager-6975b9f87f-8vkdj' 2025-12-08T17:47:24.638212025+00:00 stderr F I1208 17:47:24.637913 1 controller.go:151] Successfully synced 'openshift-controller-manager/controller-manager-65b6cccf98-6wjgz' 2025-12-08T17:47:24.959590862+00:00 stderr F I1208 17:47:24.959168 1 controller.go:192] Received pod 'controller-manager-5cb6f9d449-mjxkv' 2025-12-08T17:47:24.959590862+00:00 stderr F I1208 17:47:24.959580 1 controller.go:151] Successfully synced 'openshift-controller-manager/controller-manager-5cb6f9d449-mjxkv' 2025-12-08T17:47:25.086487476+00:00 stderr F I1208 17:47:25.086422 1 controller.go:151] Successfully synced 'openshift-controller-manager/controller-manager-6cd9c44569-vhg58' 2025-12-08T17:48:04.518367355+00:00 stderr F I1208 17:48:04.517701 1 controller.go:151] Successfully synced 'openshift-route-controller-manager/route-controller-manager-6975b9f87f-8vkdj' 2025-12-08T17:48:05.018976731+00:00 stderr F I1208 17:48:05.018253 1 controller.go:192] Received pod 'route-controller-manager-7dd6d6d8c8-wfznc' 2025-12-08T17:48:05.018976731+00:00 stderr F I1208 17:48:05.018932 1 controller.go:151] Successfully synced 'openshift-route-controller-manager/route-controller-manager-7dd6d6d8c8-wfznc' 2025-12-08T17:48:10.193254492+00:00 stderr F I1208 17:48:10.192659 1 controller.go:192] Received pod 'marketplace-operator-547dbd544d-6bbtn' 2025-12-08T17:48:10.193291953+00:00 stderr F I1208 17:48:10.193251 1 controller.go:151] Successfully synced 'openshift-marketplace/marketplace-operator-547dbd544d-6bbtn' 2025-12-08T17:48:10.578176508+00:00 stderr F I1208 17:48:10.577385 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-r22jf' 2025-12-08T17:48:10.592390998+00:00 stderr F I1208 17:48:10.590024 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-lxwl6' 2025-12-08T17:48:10.607970879+00:00 stderr F I1208 17:48:10.607204 1 controller.go:151] Successfully synced 'openshift-marketplace/marketplace-operator-547dbd544d-85wdh' 2025-12-08T17:48:10.615549779+00:00 stderr F I1208 17:48:10.615439 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-marketplace-rvglb' 2025-12-08T17:48:10.628764098+00:00 stderr F I1208 17:48:10.628087 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-zfv6j' 2025-12-08T17:48:11.911052075+00:00 stderr F I1208 17:48:11.910996 1 controller.go:192] Received pod 'certified-operators-58d6l' 2025-12-08T17:48:11.911098446+00:00 stderr F I1208 17:48:11.911049 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-58d6l' 2025-12-08T17:48:12.739219072+00:00 stderr F I1208 17:48:12.738574 1 controller.go:192] Received pod 'redhat-marketplace-xp5vr' 2025-12-08T17:48:12.739319565+00:00 stderr F I1208 17:48:12.739304 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-marketplace-xp5vr' 2025-12-08T17:48:14.332661331+00:00 stderr F I1208 17:48:14.326909 1 controller.go:192] Received pod 'redhat-operators-xpnf9' 2025-12-08T17:48:14.332661331+00:00 stderr F I1208 17:48:14.327440 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-xpnf9' 2025-12-08T17:48:15.068964269+00:00 stderr F I1208 17:48:15.066371 1 controller.go:192] Received pod 'community-operators-zdvxg' 2025-12-08T17:48:15.068964269+00:00 stderr F I1208 17:48:15.067048 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-zdvxg' 2025-12-08T17:53:41.746133739+00:00 stderr F I1208 17:53:41.745617 1 controller.go:151] Successfully synced 'openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-x68jp' 2025-12-08T17:53:42.011292509+00:00 stderr F I1208 17:53:42.010726 1 controller.go:151] Successfully synced 'openshift-ovn-kubernetes/ovnkube-node-wr4x4' 2025-12-08T17:54:28.970899983+00:00 stderr F I1208 17:54:28.970325 1 controller.go:192] Received pod 'certified-operators-tkpnz' 2025-12-08T17:54:28.970964155+00:00 stderr F I1208 17:54:28.970914 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-tkpnz' 2025-12-08T17:54:42.181815761+00:00 stderr F I1208 17:54:42.181311 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-tkpnz' 2025-12-08T17:54:42.404452552+00:00 stderr F I1208 17:54:42.404200 1 controller.go:192] Received pod 'redhat-operators-hl4hq' 2025-12-08T17:54:42.404452552+00:00 stderr F I1208 17:54:42.404429 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-hl4hq' 2025-12-08T17:54:51.278790930+00:00 stderr F I1208 17:54:51.278238 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-marketplace-xp5vr' 2025-12-08T17:54:51.701729232+00:00 stderr F I1208 17:54:51.701278 1 controller.go:192] Received pod 'image-registry-5d9d95bf5b-cmjbz' 2025-12-08T17:54:51.701784164+00:00 stderr F I1208 17:54:51.701726 1 controller.go:151] Successfully synced 'openshift-image-registry/image-registry-5d9d95bf5b-cmjbz' 2025-12-08T17:54:58.061848170+00:00 stderr F I1208 17:54:58.061296 1 controller.go:192] Received pod '6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5' 2025-12-08T17:54:58.061848170+00:00 stderr F I1208 17:54:58.061832 1 controller.go:151] Successfully synced 'openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5' 2025-12-08T17:54:59.641735107+00:00 stderr F I1208 17:54:59.640784 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-hl4hq' 2025-12-08T17:55:04.973752858+00:00 stderr F I1208 17:55:04.972810 1 controller.go:192] Received pod '8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f' 2025-12-08T17:55:04.974015616+00:00 stderr F I1208 17:55:04.973958 1 controller.go:151] Successfully synced 'openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f' 2025-12-08T17:55:05.188204999+00:00 stderr F I1208 17:55:05.187430 1 controller.go:192] Received pod '1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj' 2025-12-08T17:55:05.188204999+00:00 stderr F I1208 17:55:05.187466 1 controller.go:151] Successfully synced 'openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj' 2025-12-08T17:55:16.310934101+00:00 stderr F I1208 17:55:16.310410 1 controller.go:192] Received pod 'obo-prometheus-operator-86648f486b-4j9kn' 2025-12-08T17:55:16.310934101+00:00 stderr F I1208 17:55:16.310855 1 controller.go:151] Successfully synced 'openshift-operators/obo-prometheus-operator-86648f486b-4j9kn' 2025-12-08T17:55:16.676236232+00:00 stderr F I1208 17:55:16.676199 1 controller.go:192] Received pod 'obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm' 2025-12-08T17:55:16.676324225+00:00 stderr F I1208 17:55:16.676314 1 controller.go:151] Successfully synced 'openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm' 2025-12-08T17:55:16.786507260+00:00 stderr F I1208 17:55:16.785382 1 controller.go:192] Received pod 'obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t' 2025-12-08T17:55:16.786634343+00:00 stderr F I1208 17:55:16.786621 1 controller.go:151] Successfully synced 'openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t' 2025-12-08T17:55:16.859141404+00:00 stderr F I1208 17:55:16.853829 1 controller.go:192] Received pod 'observability-operator-78c97476f4-mg4b2' 2025-12-08T17:55:16.859323019+00:00 stderr F I1208 17:55:16.859298 1 controller.go:151] Successfully synced 'openshift-operators/observability-operator-78c97476f4-mg4b2' 2025-12-08T17:55:16.871947339+00:00 stderr F I1208 17:55:16.865446 1 controller.go:192] Received pod 'perses-operator-68bdb49cbf-m2cdr' 2025-12-08T17:55:16.872139664+00:00 stderr F I1208 17:55:16.872117 1 controller.go:151] Successfully synced 'openshift-operators/perses-operator-68bdb49cbf-m2cdr' 2025-12-08T17:55:17.342394650+00:00 stderr F I1208 17:55:17.342353 1 controller.go:192] Received pod 'elastic-operator-c9c86658-4qchz' 2025-12-08T17:55:17.342496062+00:00 stderr F I1208 17:55:17.342483 1 controller.go:151] Successfully synced 'service-telemetry/elastic-operator-c9c86658-4qchz' 2025-12-08T17:55:34.325997082+00:00 stderr F I1208 17:55:34.321806 1 controller.go:192] Received pod 'cert-manager-operator-controller-manager-64c74584c4-qtkx9' 2025-12-08T17:55:34.325997082+00:00 stderr F I1208 17:55:34.322282 1 controller.go:151] Successfully synced 'cert-manager-operator/cert-manager-operator-controller-manager-64c74584c4-qtkx9' 2025-12-08T17:55:39.793986982+00:00 stderr F I1208 17:55:39.792529 1 controller.go:151] Successfully synced 'openshift-image-registry/image-registry-66587d64c8-s6hn4' 2025-12-08T17:55:41.230072848+00:00 stderr F I1208 17:55:41.226799 1 controller.go:192] Received pod 'cert-manager-webhook-7894b5b9b4-wdn4b' 2025-12-08T17:55:41.230072848+00:00 stderr F I1208 17:55:41.229795 1 controller.go:151] Successfully synced 'cert-manager/cert-manager-webhook-7894b5b9b4-wdn4b' 2025-12-08T17:55:42.910797547+00:00 stderr F I1208 17:55:42.908232 1 controller.go:192] Received pod 'elasticsearch-es-default-0' 2025-12-08T17:55:42.910797547+00:00 stderr F I1208 17:55:42.908725 1 controller.go:151] Successfully synced 'service-telemetry/elasticsearch-es-default-0' 2025-12-08T17:55:43.945988342+00:00 stderr F I1208 17:55:43.943310 1 controller.go:192] Received pod 'cert-manager-cainjector-7dbf76d5c8-fdk5q' 2025-12-08T17:55:43.945988342+00:00 stderr F I1208 17:55:43.944170 1 controller.go:151] Successfully synced 'cert-manager/cert-manager-cainjector-7dbf76d5c8-fdk5q' 2025-12-08T17:56:00.842318185+00:00 stderr F I1208 17:56:00.841742 1 controller.go:192] Received pod 'cert-manager-858d87f86b-7q2ss' 2025-12-08T17:56:00.842318185+00:00 stderr F I1208 17:56:00.842286 1 controller.go:151] Successfully synced 'cert-manager/cert-manager-858d87f86b-7q2ss' 2025-12-08T17:56:09.944534397+00:00 stderr F I1208 17:56:09.944084 1 controller.go:192] Received pod 'infrawatch-operators-xmhcm' 2025-12-08T17:56:09.947483678+00:00 stderr F I1208 17:56:09.945769 1 controller.go:151] Successfully synced 'service-telemetry/infrawatch-operators-xmhcm' 2025-12-08T17:56:13.817968713+00:00 stderr F I1208 17:56:13.817586 1 controller.go:192] Received pod 'infrawatch-operators-tv99j' 2025-12-08T17:56:13.819982628+00:00 stderr F I1208 17:56:13.819927 1 controller.go:151] Successfully synced 'service-telemetry/infrawatch-operators-tv99j' 2025-12-08T17:56:14.079089418+00:00 stderr F I1208 17:56:14.079038 1 controller.go:151] Successfully synced 'service-telemetry/infrawatch-operators-xmhcm' 2025-12-08T17:56:29.213540780+00:00 stderr F I1208 17:56:29.212889 1 controller.go:192] Received pod '36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq' 2025-12-08T17:56:29.213540780+00:00 stderr F I1208 17:56:29.213428 1 controller.go:151] Successfully synced 'service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq' 2025-12-08T17:56:29.787974672+00:00 stderr F I1208 17:56:29.785567 1 controller.go:192] Received pod 'f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx' 2025-12-08T17:56:29.787974672+00:00 stderr F I1208 17:56:29.786467 1 controller.go:151] Successfully synced 'service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx' 2025-12-08T17:56:30.709685442+00:00 stderr F I1208 17:56:30.709642 1 controller.go:192] Received pod '6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj' 2025-12-08T17:56:30.709685442+00:00 stderr F I1208 17:56:30.709680 1 controller.go:151] Successfully synced 'openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj' 2025-12-08T17:56:40.166278098+00:00 stderr F I1208 17:56:40.165512 1 controller.go:192] Received pod 'interconnect-operator-78b9bd8798-456sz' 2025-12-08T17:56:40.166278098+00:00 stderr F I1208 17:56:40.166135 1 controller.go:151] Successfully synced 'service-telemetry/interconnect-operator-78b9bd8798-456sz' 2025-12-08T17:56:41.789728422+00:00 stderr F I1208 17:56:41.789048 1 controller.go:192] Received pod 'service-telemetry-operator-79647f8775-zs8hl' 2025-12-08T17:56:41.789728422+00:00 stderr F I1208 17:56:41.789502 1 controller.go:151] Successfully synced 'service-telemetry/service-telemetry-operator-79647f8775-zs8hl' 2025-12-08T17:56:43.270495313+00:00 stderr F I1208 17:56:43.269980 1 controller.go:192] Received pod 'smart-gateway-operator-5cd794ff55-w8r45' 2025-12-08T17:56:43.270495313+00:00 stderr F I1208 17:56:43.270481 1 controller.go:151] Successfully synced 'service-telemetry/smart-gateway-operator-5cd794ff55-w8r45' 2025-12-08T17:57:29.350998964+00:00 stderr F I1208 17:57:29.349967 1 controller.go:192] Received pod 'default-interconnect-55bf8d5cb-76n5w' 2025-12-08T17:57:29.350998964+00:00 stderr F I1208 17:57:29.350511 1 controller.go:151] Successfully synced 'service-telemetry/default-interconnect-55bf8d5cb-76n5w' 2025-12-08T17:57:40.652415666+00:00 stderr F I1208 17:57:40.651918 1 controller.go:192] Received pod 'prometheus-default-0' 2025-12-08T17:57:40.652463257+00:00 stderr F I1208 17:57:40.652410 1 controller.go:151] Successfully synced 'service-telemetry/prometheus-default-0' 2025-12-08T17:57:49.131515252+00:00 stderr F I1208 17:57:49.130507 1 controller.go:192] Received pod 'default-snmp-webhook-6774d8dfbc-75fxn' 2025-12-08T17:57:49.131515252+00:00 stderr F I1208 17:57:49.131500 1 controller.go:151] Successfully synced 'service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn' 2025-12-08T17:57:57.991815653+00:00 stderr F I1208 17:57:57.991308 1 controller.go:192] Received pod 'alertmanager-default-0' 2025-12-08T17:57:57.991815653+00:00 stderr F I1208 17:57:57.991808 1 controller.go:151] Successfully synced 'service-telemetry/alertmanager-default-0' 2025-12-08T17:58:10.636574316+00:00 stderr F I1208 17:58:10.635313 1 controller.go:192] Received pod 'default-cloud1-coll-meter-smartgateway-787645d794-4zrzx' 2025-12-08T17:58:10.636574316+00:00 stderr F I1208 17:58:10.636518 1 controller.go:151] Successfully synced 'service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx' 2025-12-08T17:58:11.592419930+00:00 stderr F I1208 17:58:11.592156 1 controller.go:192] Received pod 'default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v' 2025-12-08T17:58:11.592506592+00:00 stderr F I1208 17:58:11.592496 1 controller.go:151] Successfully synced 'service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v' 2025-12-08T17:58:19.451921404+00:00 stderr F I1208 17:58:19.444856 1 controller.go:192] Received pod 'default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp' 2025-12-08T17:58:19.451921404+00:00 stderr F I1208 17:58:19.451241 1 controller.go:151] Successfully synced 'service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp' 2025-12-08T17:58:20.900957304+00:00 stderr F I1208 17:58:20.900424 1 controller.go:192] Received pod 'default-cloud1-coll-event-smartgateway-d956b4648-jwkwn' 2025-12-08T17:58:20.900957304+00:00 stderr F I1208 17:58:20.900945 1 controller.go:151] Successfully synced 'service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn' 2025-12-08T17:58:34.284014978+00:00 stderr F I1208 17:58:34.283275 1 controller.go:192] Received pod 'default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk' 2025-12-08T17:58:34.284014978+00:00 stderr F I1208 17:58:34.284006 1 controller.go:151] Successfully synced 'service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk' 2025-12-08T17:58:35.513838434+00:00 stderr F I1208 17:58:35.511929 1 controller.go:151] Successfully synced 'service-telemetry/default-interconnect-55bf8d5cb-76n5w' 2025-12-08T17:58:35.545397100+00:00 stderr F I1208 17:58:35.545022 1 controller.go:192] Received pod 'default-interconnect-55bf8d5cb-rwr2k' 2025-12-08T17:58:35.545397100+00:00 stderr F I1208 17:58:35.545064 1 controller.go:151] Successfully synced 'service-telemetry/default-interconnect-55bf8d5cb-rwr2k' 2025-12-08T17:59:06.838770457+00:00 stderr F I1208 17:59:06.837753 1 controller.go:192] Received pod 'qdr-test' 2025-12-08T17:59:06.838770457+00:00 stderr F I1208 17:59:06.838750 1 controller.go:151] Successfully synced 'service-telemetry/qdr-test' 2025-12-08T17:59:14.760750985+00:00 stderr F I1208 17:59:14.759237 1 controller.go:192] Received pod 'stf-smoketest-smoke1-pbhxq' 2025-12-08T17:59:14.760867808+00:00 stderr F I1208 17:59:14.760849 1 controller.go:151] Successfully synced 'service-telemetry/stf-smoketest-smoke1-pbhxq' 2025-12-08T17:59:15.279671692+00:00 stderr F I1208 17:59:15.277527 1 controller.go:192] Received pod 'curl' 2025-12-08T17:59:15.279671692+00:00 stderr F I1208 17:59:15.277700 1 controller.go:151] Successfully synced 'service-telemetry/curl' 2025-12-08T17:59:52.280945996+00:00 stderr F I1208 17:59:52.277463 1 controller.go:192] Received pod 'community-operators-jlbqc' 2025-12-08T17:59:52.280945996+00:00 stderr F I1208 17:59:52.279169 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-jlbqc' 2025-12-08T18:00:00.842407744+00:00 stderr F I1208 18:00:00.841862 1 controller.go:192] Received pod 'collect-profiles-29420280-hxvtb' 2025-12-08T18:00:00.842468326+00:00 stderr F I1208 18:00:00.842421 1 controller.go:151] Successfully synced 'openshift-operator-lifecycle-manager/collect-profiles-29420280-hxvtb' 2025-12-08T18:00:05.325773537+00:00 stderr F I1208 18:00:05.325095 1 controller.go:151] Successfully synced 'openshift-marketplace/community-operators-jlbqc' 2025-12-08T18:01:18.845551128+00:00 stderr F I1208 18:01:18.845016 1 controller.go:192] Received pod 'infrawatch-operators-b88kp' 2025-12-08T18:01:18.845551128+00:00 stderr F I1208 18:01:18.845524 1 controller.go:151] Successfully synced 'service-telemetry/infrawatch-operators-b88kp' 2025-12-08T18:01:32.027087465+00:00 stderr F I1208 18:01:32.026380 1 controller.go:151] Successfully synced 'service-telemetry/infrawatch-operators-b88kp' 2025-12-08T18:02:41.306139570+00:00 stderr F I1208 18:02:41.305658 1 controller.go:192] Received pod 'must-gather-5cz8j' 2025-12-08T18:02:41.306178001+00:00 stderr F I1208 18:02:41.306152 1 controller.go:151] Successfully synced 'openshift-must-gather-gctth/must-gather-5cz8j' 2025-12-08T18:04:32.535172199+00:00 stderr F I1208 18:04:32.534454 1 controller.go:192] Received pod 'certified-operators-p8pz8' 2025-12-08T18:04:32.535245431+00:00 stderr F I1208 18:04:32.535168 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-p8pz8' 2025-12-08T18:04:43.537045821+00:00 stderr F I1208 18:04:43.536235 1 controller.go:192] Received pod 'redhat-operators-5gtms' 2025-12-08T18:04:43.537045821+00:00 stderr F I1208 18:04:43.537013 1 controller.go:151] Successfully synced 'openshift-marketplace/redhat-operators-5gtms' 2025-12-08T18:04:46.650075519+00:00 stderr F I1208 18:04:46.647820 1 controller.go:151] Successfully synced 'openshift-marketplace/certified-operators-p8pz8' 2025-12-08T18:04:56.115922255+00:00 stderr F I1208 18:04:56.113134 1 controller.go:151] Successfully synced 'openshift-must-gather-gctth/must-gather-5cz8j' ././@LongLink0000644000000000000000000000024700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000755000175000017500000000000015115611514032747 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000755000175000017500000000000015115611521032745 5ustar zuulzuul././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000644000175000017500000000766115115611514032763 0ustar zuulzuul2025-12-08T17:55:57.780647823+00:00 stderr F I1208 17:55:57.780442 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:57.780647823+00:00 stderr F I1208 17:55:57.780597 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:57.780647823+00:00 stderr F I1208 17:55:57.780602 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:57.780647823+00:00 stderr F I1208 17:55:57.780606 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:57.783809189+00:00 stderr F I1208 17:55:57.783778 1 webhook.go:132] "using dynamic certificate generating using CA stored in Secret resource" logger="cert-manager.webhook.webhook" secret_namespace="cert-manager" secret_name="cert-manager-webhook-ca" 2025-12-08T17:55:57.783809189+00:00 stderr F I1208 17:55:57.783797 1 webhook.go:144] "serving insecurely as tls certificate data not provided" logger="cert-manager.webhook.webhook" 2025-12-08T17:55:57.784379895+00:00 stderr F I1208 17:55:57.784356 1 server.go:192] "listening for insecure healthz connections" logger="cert-manager.webhook" address=6080 2025-12-08T17:55:57.784469787+00:00 stderr F I1208 17:55:57.784452 1 server.go:183] "Registering webhook" logger="cert-manager.controller-runtime.webhook" path="/mutate" 2025-12-08T17:55:57.784512398+00:00 stderr F I1208 17:55:57.784497 1 server.go:183] "Registering webhook" logger="cert-manager.controller-runtime.webhook" path="/validate" 2025-12-08T17:55:57.784546599+00:00 stderr F I1208 17:55:57.784535 1 server.go:208] "Starting metrics server" logger="cert-manager.controller-runtime.metrics" 2025-12-08T17:55:57.784670413+00:00 stderr F I1208 17:55:57.784652 1 server.go:247] "Serving metrics server" logger="cert-manager.controller-runtime.metrics" bindAddress="0.0.0.0:9402" secure=false 2025-12-08T17:55:57.784796676+00:00 stderr F I1208 17:55:57.784753 1 server.go:191] "Starting webhook server" logger="cert-manager.controller-runtime.webhook" 2025-12-08T17:55:57.785097654+00:00 stderr F I1208 17:55:57.785026 1 server.go:242] "Serving webhook server" logger="cert-manager.controller-runtime.webhook" host="" port=10250 2025-12-08T17:55:57.803728546+00:00 stderr F E1208 17:55:57.803663 1 dynamic_source.go:221] "Failed to generate serving certificate, retrying..." err="no tls.Certificate available yet, try again later" logger="cert-manager" interval="1s" 2025-12-08T17:55:57.805063272+00:00 stderr F I1208 17:55:57.805015 1 reflector.go:376] Caches populated for *v1.Secret from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:57.886621831+00:00 stderr F I1208 17:55:57.886560 1 authority.go:273] "Will regenerate CA" logger="cert-manager" reason="CA secret not found" 2025-12-08T17:55:57.893993362+00:00 stderr F I1208 17:55:57.893918 1 authority.go:416] "Created new root CA Secret" logger="cert-manager" 2025-12-08T17:55:57.895110364+00:00 stderr F I1208 17:55:57.895065 1 authority.go:293] "Detected change in CA secret data, update current CA data and notify watches" logger="cert-manager" 2025-12-08T17:55:58.792582770+00:00 stderr F I1208 17:55:58.792500 1 dynamic_source.go:290] "Updated cert-manager TLS certificate" logger="cert-manager" DNSNames=["cert-manager-webhook","cert-manager-webhook.cert-manager","cert-manager-webhook.cert-manager.svc"] 2025-12-08T17:55:58.792627441+00:00 stderr F I1208 17:55:58.792591 1 dynamic_source.go:172] "Detected root CA rotation - regenerating serving certificates" logger="cert-manager" 2025-12-08T17:55:58.799442927+00:00 stderr F I1208 17:55:58.799381 1 dynamic_source.go:290] "Updated cert-manager TLS certificate" logger="cert-manager" DNSNames=["cert-manager-webhook","cert-manager-webhook.cert-manager","cert-manager-webhook.cert-manager.svc"] ././@LongLink0000644000000000000000000000026500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-m0000755000175000017500000000000015115611514033065 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-m0000755000175000017500000000000015115611521033063 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-m0000644000175000017500000020257615115611521033101 0ustar zuulzuul2025-12-08T17:47:25.146860277+00:00 stderr F I1208 17:47:25.146742 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:47:25.146860277+00:00 stderr F I1208 17:47:25.146787 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:47:25.147322462+00:00 stderr F I1208 17:47:25.147283 1 observer_polling.go:159] Starting file observer 2025-12-08T17:47:25.147757175+00:00 stderr F I1208 17:47:25.147712 1 builder.go:304] openshift-controller-manager version 4.20.0-202510211040.p2.gd9e543d.assembly.stream.el9-d9e543d-d9e543dd31e981f279c447e4f92f0dac3f665f9e 2025-12-08T17:47:25.148502438+00:00 stderr F I1208 17:47:25.148451 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:47:25.396680691+00:00 stderr F I1208 17:47:25.396581 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:47:25.403972731+00:00 stderr F I1208 17:47:25.402728 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:47:25.403972731+00:00 stderr F I1208 17:47:25.402765 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:47:25.403972731+00:00 stderr F I1208 17:47:25.402815 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:47:25.403972731+00:00 stderr F I1208 17:47:25.402830 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:47:25.409538236+00:00 stderr F I1208 17:47:25.409474 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:47:25.409538236+00:00 stderr F W1208 17:47:25.409508 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:47:25.409538236+00:00 stderr F W1208 17:47:25.409518 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:47:25.409538236+00:00 stderr F I1208 17:47:25.409521 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:47:25.409570207+00:00 stderr F W1208 17:47:25.409529 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:47:25.409570207+00:00 stderr F W1208 17:47:25.409562 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:47:25.409583648+00:00 stderr F W1208 17:47:25.409568 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:47:25.409583648+00:00 stderr F W1208 17:47:25.409574 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:47:25.412594662+00:00 stderr F I1208 17:47:25.412544 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:47:25.412972634+00:00 stderr F I1208 17:47:25.412933 1 leaderelection.go:257] attempting to acquire leader lease openshift-controller-manager/openshift-master-controllers... 2025-12-08T17:47:25.414910244+00:00 stderr F I1208 17:47:25.414841 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:47:25.414955827+00:00 stderr F I1208 17:47:25.414928 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:47:25.414984178+00:00 stderr F I1208 17:47:25.414966 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:47:25.414984178+00:00 stderr F I1208 17:47:25.414977 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:47:25.415014809+00:00 stderr F I1208 17:47:25.414993 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:47:25.415014809+00:00 stderr F I1208 17:47:25.415002 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:47:25.415074150+00:00 stderr F I1208 17:47:25.415040 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"controller-manager.openshift-controller-manager.svc\" [serving] validServingFor=[controller-manager.openshift-controller-manager.svc,controller-manager.openshift-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:47:25.415011279 +0000 UTC))" 2025-12-08T17:47:25.415315468+00:00 stderr F I1208 17:47:25.415283 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216045\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216045\" (2025-12-08 16:47:25 +0000 UTC to 2028-12-08 16:47:25 +0000 UTC (now=2025-12-08 17:47:25.415244686 +0000 UTC))" 2025-12-08T17:47:25.415315468+00:00 stderr F I1208 17:47:25.415305 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:47:25.415353389+00:00 stderr F I1208 17:47:25.415324 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:47:25.415353389+00:00 stderr F I1208 17:47:25.415346 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:47:25.415448772+00:00 stderr F I1208 17:47:25.415417 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:47:25.417061832+00:00 stderr F I1208 17:47:25.417024 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:25.417083873+00:00 stderr F I1208 17:47:25.417024 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:25.419070836+00:00 stderr F I1208 17:47:25.418791 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:25.419569801+00:00 stderr F I1208 17:47:25.419533 1 leaderelection.go:271] successfully acquired lease openshift-controller-manager/openshift-master-controllers 2025-12-08T17:47:25.419821269+00:00 stderr F I1208 17:47:25.419758 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-controller-manager", Name:"openshift-master-controllers", UID:"cc6e5479-520b-4462-9d3d-114ac8ace3c5", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"39283", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' controller-manager-5cb6f9d449-mjxkv_0d113863-5b3a-498c-aca2-924550d62737 became leader 2025-12-08T17:47:25.420637586+00:00 stderr F I1208 17:47:25.420597 1 controller_manager.go:35] DeploymentConfig controller using images from "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:472481b81b280ece6218fbb410c2a32ea6c826e5ac56b95f5935fa37773be0af" 2025-12-08T17:47:25.420637586+00:00 stderr F I1208 17:47:25.420610 1 controller_manager.go:41] Build controller using images from "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c2a80d6dd943dbbb5c0bc63f4aa17d55e44dbde22a3ea4e6a41a32930dc4ac77" 2025-12-08T17:47:25.421759921+00:00 stderr F W1208 17:47:25.421681 1 controller_manager.go:88] "openshift.io/default-rolebindings" is disabled 2025-12-08T17:47:25.421759921+00:00 stderr F I1208 17:47:25.421695 1 controller_manager.go:92] Starting "openshift.io/build-config-change" 2025-12-08T17:47:25.425106806+00:00 stderr F I1208 17:47:25.425061 1 controller_manager.go:101] Started "openshift.io/build-config-change" 2025-12-08T17:47:25.425106806+00:00 stderr F I1208 17:47:25.425071 1 controller_manager.go:92] Starting "openshift.io/builder-rolebindings" 2025-12-08T17:47:25.427358157+00:00 stderr F I1208 17:47:25.426730 1 controller_manager.go:101] Started "openshift.io/builder-rolebindings" 2025-12-08T17:47:25.427358157+00:00 stderr F I1208 17:47:25.426743 1 controller_manager.go:92] Starting "openshift.io/deploymentconfig" 2025-12-08T17:47:25.427358157+00:00 stderr F I1208 17:47:25.426851 1 defaultrolebindings.go:154] Starting BuilderRoleBindingController 2025-12-08T17:47:25.427358157+00:00 stderr F I1208 17:47:25.426861 1 shared_informer.go:350] "Waiting for caches to sync" controller="BuilderRoleBindingController" 2025-12-08T17:47:25.430178076+00:00 stderr F I1208 17:47:25.430079 1 controller_manager.go:101] Started "openshift.io/deploymentconfig" 2025-12-08T17:47:25.430178076+00:00 stderr F I1208 17:47:25.430099 1 controller_manager.go:92] Starting "openshift.io/deployer-rolebindings" 2025-12-08T17:47:25.430266368+00:00 stderr F I1208 17:47:25.430220 1 factory.go:78] Starting deploymentconfig controller 2025-12-08T17:47:25.433771918+00:00 stderr F I1208 17:47:25.433715 1 controller_manager.go:101] Started "openshift.io/deployer-rolebindings" 2025-12-08T17:47:25.433771918+00:00 stderr F I1208 17:47:25.433736 1 controller_manager.go:92] Starting "openshift.io/image-trigger" 2025-12-08T17:47:25.434047477+00:00 stderr F I1208 17:47:25.433915 1 defaultrolebindings.go:154] Starting DeployerRoleBindingController 2025-12-08T17:47:25.434047477+00:00 stderr F I1208 17:47:25.433932 1 shared_informer.go:350] "Waiting for caches to sync" controller="DeployerRoleBindingController" 2025-12-08T17:47:25.444704193+00:00 stderr F I1208 17:47:25.444613 1 controller_manager.go:101] Started "openshift.io/image-trigger" 2025-12-08T17:47:25.444704193+00:00 stderr F I1208 17:47:25.444633 1 controller_manager.go:92] Starting "openshift.io/unidling" 2025-12-08T17:47:25.444792885+00:00 stderr F I1208 17:47:25.444755 1 image_trigger_controller.go:229] Starting trigger controller 2025-12-08T17:47:25.515623075+00:00 stderr F I1208 17:47:25.515550 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:47:25.515662506+00:00 stderr F I1208 17:47:25.515651 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:47:25.515769870+00:00 stderr F I1208 17:47:25.515739 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:47:25.515941616+00:00 stderr F I1208 17:47:25.515910 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:47:25.515866273 +0000 UTC))" 2025-12-08T17:47:25.516172413+00:00 stderr F I1208 17:47:25.516150 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"controller-manager.openshift-controller-manager.svc\" [serving] validServingFor=[controller-manager.openshift-controller-manager.svc,controller-manager.openshift-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:47:25.516132852 +0000 UTC))" 2025-12-08T17:47:25.516434691+00:00 stderr F I1208 17:47:25.516407 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216045\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216045\" (2025-12-08 16:47:25 +0000 UTC to 2028-12-08 16:47:25 +0000 UTC (now=2025-12-08 17:47:25.516361679 +0000 UTC))" 2025-12-08T17:47:25.516947777+00:00 stderr F I1208 17:47:25.516920 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:47:25.516870915 +0000 UTC))" 2025-12-08T17:47:25.516963757+00:00 stderr F I1208 17:47:25.516955 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:47:25.516939257 +0000 UTC))" 2025-12-08T17:47:25.516993988+00:00 stderr F I1208 17:47:25.516978 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:47:25.516963397 +0000 UTC))" 2025-12-08T17:47:25.517038190+00:00 stderr F I1208 17:47:25.517013 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:47:25.516989898 +0000 UTC))" 2025-12-08T17:47:25.517072081+00:00 stderr F I1208 17:47:25.517057 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:47:25.51702946 +0000 UTC))" 2025-12-08T17:47:25.517100002+00:00 stderr F I1208 17:47:25.517085 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:47:25.517069191 +0000 UTC))" 2025-12-08T17:47:25.517428822+00:00 stderr F I1208 17:47:25.517397 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:47:25.517095552 +0000 UTC))" 2025-12-08T17:47:25.517441262+00:00 stderr F I1208 17:47:25.517434 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:47:25.517418432 +0000 UTC))" 2025-12-08T17:47:25.517458943+00:00 stderr F I1208 17:47:25.517453 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:47:25.517441082 +0000 UTC))" 2025-12-08T17:47:25.517618198+00:00 stderr F I1208 17:47:25.517595 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:47:25.517582987 +0000 UTC))" 2025-12-08T17:47:25.517627818+00:00 stderr F I1208 17:47:25.517618 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:47:25.517606317 +0000 UTC))" 2025-12-08T17:47:25.518008350+00:00 stderr F I1208 17:47:25.517938 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"controller-manager.openshift-controller-manager.svc\" [serving] validServingFor=[controller-manager.openshift-controller-manager.svc,controller-manager.openshift-controller-manager.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:47:25.517921307 +0000 UTC))" 2025-12-08T17:47:25.518229827+00:00 stderr F I1208 17:47:25.518172 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765216045\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765216045\" (2025-12-08 16:47:25 +0000 UTC to 2028-12-08 16:47:25 +0000 UTC (now=2025-12-08 17:47:25.518157575 +0000 UTC))" 2025-12-08T17:47:25.625726491+00:00 stderr F I1208 17:47:25.625655 1 controller_manager.go:101] Started "openshift.io/unidling" 2025-12-08T17:47:25.625726491+00:00 stderr F I1208 17:47:25.625682 1 controller_manager.go:92] Starting "openshift.io/origin-namespace" 2025-12-08T17:47:25.826468240+00:00 stderr F I1208 17:47:25.826420 1 controller_manager.go:101] Started "openshift.io/origin-namespace" 2025-12-08T17:47:25.826542162+00:00 stderr F I1208 17:47:25.826529 1 controller_manager.go:92] Starting "openshift.io/build" 2025-12-08T17:47:26.564579405+00:00 stderr F I1208 17:47:26.564507 1 reflector.go:430] "Caches populated" type="*v1.Event" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:26.625132132+00:00 stderr F I1208 17:47:26.625033 1 controller_manager.go:101] Started "openshift.io/build" 2025-12-08T17:47:26.625132132+00:00 stderr F I1208 17:47:26.625063 1 controller_manager.go:92] Starting "openshift.io/serviceaccount-pull-secrets" 2025-12-08T17:47:26.826455719+00:00 stderr F I1208 17:47:26.826174 1 controller_manager.go:101] Started "openshift.io/serviceaccount-pull-secrets" 2025-12-08T17:47:26.826455719+00:00 stderr F I1208 17:47:26.826411 1 controller_manager.go:92] Starting "openshift.io/deployer-serviceaccount" 2025-12-08T17:47:26.826455719+00:00 stderr F I1208 17:47:26.826200 1 keyid_observation_controller.go:164] "Starting controller" name="openshift.io/internal-image-registry-pull-secrets_kids" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826450 1 shared_informer.go:350] "Waiting for caches to sync" controller="openshift.io/internal-image-registry-pull-secrets_kids" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826245 1 service_account_controller.go:338] "Starting controller" name="openshift.io/internal-image-registry-pull-secrets_service-account" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826470 1 shared_informer.go:350] "Waiting for caches to sync" controller="openshift.io/internal-image-registry-pull-secrets_service-account" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826255 1 image_pull_secret_controller.go:372] "Starting controller" name="openshift.io/internal-image-registry-pull-secrets_image-pull-secret" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826483 1 shared_informer.go:350] "Waiting for caches to sync" controller="openshift.io/internal-image-registry-pull-secrets_image-pull-secret" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826260 1 legacy_token_secret_controller.go:103] "Starting controller" name="openshift.io/internal-image-registry-pull-secrets_legacy-token-secret" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826493 1 shared_informer.go:350] "Waiting for caches to sync" controller="openshift.io/internal-image-registry-pull-secrets_legacy-token-secret" 2025-12-08T17:47:26.826506640+00:00 stderr F I1208 17:47:26.826274 1 registry_urls_observation_controller.go:140] "Starting controller" name="openshift.io/internal-image-registry-pull-secrets_urls" 2025-12-08T17:47:26.826519471+00:00 stderr F I1208 17:47:26.826503 1 shared_informer.go:350] "Waiting for caches to sync" controller="openshift.io/internal-image-registry-pull-secrets_urls" 2025-12-08T17:47:26.826519471+00:00 stderr F I1208 17:47:26.826318 1 legacy_image_pull_secret_controller.go:131] "Starting controller" name="openshift.io/internal-image-registry-pull-secrets_legacy-image-pull-secret" 2025-12-08T17:47:26.826528491+00:00 stderr F I1208 17:47:26.826515 1 shared_informer.go:350] "Waiting for caches to sync" controller="openshift.io/internal-image-registry-pull-secrets_legacy-image-pull-secret" 2025-12-08T17:47:27.025747613+00:00 stderr F I1208 17:47:27.025669 1 controller_manager.go:101] Started "openshift.io/deployer-serviceaccount" 2025-12-08T17:47:27.025747613+00:00 stderr F I1208 17:47:27.025701 1 controller_manager.go:92] Starting "openshift.io/deployer" 2025-12-08T17:47:27.025783424+00:00 stderr F I1208 17:47:27.025775 1 serviceaccounts_controller.go:114] "Starting service account controller" 2025-12-08T17:47:27.025793524+00:00 stderr F I1208 17:47:27.025788 1 shared_informer.go:350] "Waiting for caches to sync" controller="service account" 2025-12-08T17:47:27.226370038+00:00 stderr F I1208 17:47:27.225983 1 controller_manager.go:101] Started "openshift.io/deployer" 2025-12-08T17:47:27.226370038+00:00 stderr F I1208 17:47:27.226335 1 controller_manager.go:92] Starting "openshift.io/image-import" 2025-12-08T17:47:27.226402619+00:00 stderr F I1208 17:47:27.226050 1 factory.go:73] Starting deployer controller 2025-12-08T17:47:27.429573935+00:00 stderr F I1208 17:47:27.429481 1 imagestream_controller.go:66] Starting image stream controller 2025-12-08T17:47:27.627047001+00:00 stderr F I1208 17:47:27.626327 1 controller_manager.go:101] Started "openshift.io/image-import" 2025-12-08T17:47:27.627047001+00:00 stderr F I1208 17:47:27.626365 1 controller_manager.go:92] Starting "openshift.io/image-signature-import" 2025-12-08T17:47:27.627047001+00:00 stderr F I1208 17:47:27.626383 1 scheduled_image_controller.go:68] Starting scheduled import controller 2025-12-08T17:47:27.826026924+00:00 stderr F I1208 17:47:27.825935 1 controller_manager.go:101] Started "openshift.io/image-signature-import" 2025-12-08T17:47:27.826026924+00:00 stderr F I1208 17:47:27.825983 1 controller_manager.go:92] Starting "openshift.io/templateinstancefinalizer" 2025-12-08T17:47:28.224810008+00:00 stderr F I1208 17:47:28.224752 1 controller_manager.go:101] Started "openshift.io/templateinstancefinalizer" 2025-12-08T17:47:28.225059416+00:00 stderr F I1208 17:47:28.225032 1 controller_manager.go:92] Starting "openshift.io/serviceaccount" 2025-12-08T17:47:28.225171600+00:00 stderr F I1208 17:47:28.225151 1 serviceaccount.go:16] openshift.io/serviceaccount: no managed names specified 2025-12-08T17:47:28.225287513+00:00 stderr F W1208 17:47:28.225251 1 controller_manager.go:98] Skipping "openshift.io/serviceaccount" 2025-12-08T17:47:28.225354285+00:00 stderr F I1208 17:47:28.225335 1 controller_manager.go:92] Starting "openshift.io/builder-serviceaccount" 2025-12-08T17:47:28.225472799+00:00 stderr F I1208 17:47:28.224988 1 templateinstance_finalizer.go:189] TemplateInstanceFinalizer controller waiting for cache sync 2025-12-08T17:47:28.425084612+00:00 stderr F I1208 17:47:28.425002 1 controller_manager.go:101] Started "openshift.io/builder-serviceaccount" 2025-12-08T17:47:28.425084612+00:00 stderr F I1208 17:47:28.425037 1 controller_manager.go:92] Starting "openshift.io/image-puller-rolebindings" 2025-12-08T17:47:28.425145564+00:00 stderr F I1208 17:47:28.425096 1 serviceaccounts_controller.go:114] "Starting service account controller" 2025-12-08T17:47:28.425145564+00:00 stderr F I1208 17:47:28.425113 1 shared_informer.go:350] "Waiting for caches to sync" controller="service account" 2025-12-08T17:47:28.625957386+00:00 stderr F I1208 17:47:28.625867 1 controller_manager.go:101] Started "openshift.io/image-puller-rolebindings" 2025-12-08T17:47:28.626030388+00:00 stderr F I1208 17:47:28.626014 1 controller_manager.go:92] Starting "openshift.io/templateinstance" 2025-12-08T17:47:28.626106080+00:00 stderr F I1208 17:47:28.626091 1 defaultrolebindings.go:154] Starting ImagePullerRoleBindingController 2025-12-08T17:47:28.626149632+00:00 stderr F I1208 17:47:28.626134 1 shared_informer.go:350] "Waiting for caches to sync" controller="ImagePullerRoleBindingController" 2025-12-08T17:47:29.625675146+00:00 stderr F I1208 17:47:29.625630 1 controller_manager.go:101] Started "openshift.io/templateinstance" 2025-12-08T17:47:29.625742528+00:00 stderr F I1208 17:47:29.625733 1 controller_manager.go:106] Started Origin Controllers 2025-12-08T17:47:29.631816619+00:00 stderr F I1208 17:47:29.631663 1 reflector.go:430] "Caches populated" type="*v1.ImageDigestMirrorSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.632156340+00:00 stderr F I1208 17:47:29.632087 1 reflector.go:430] "Caches populated" type="*v1.ImageTagMirrorSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.633442300+00:00 stderr F I1208 17:47:29.633396 1 reflector.go:430] "Caches populated" type="*v1.Build" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.637602491+00:00 stderr F I1208 17:47:29.636490 1 reflector.go:430] "Caches populated" type="*v1.CronJob" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.637602491+00:00 stderr F I1208 17:47:29.637040 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.637620332+00:00 stderr F I1208 17:47:29.637602 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.641255896+00:00 stderr F I1208 17:47:29.641205 1 reflector.go:430] "Caches populated" type="*v1alpha1.ImageContentSourcePolicy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.642148094+00:00 stderr F I1208 17:47:29.642115 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.642415903+00:00 stderr F I1208 17:47:29.642382 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.644217899+00:00 stderr F I1208 17:47:29.644169 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:47:29.644779367+00:00 stderr F I1208 17:47:29.644733 1 reflector.go:430] "Caches populated" type="*v1.TemplateInstance" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.650455125+00:00 stderr F I1208 17:47:29.650405 1 reflector.go:430] "Caches populated" type="*v1.BuildConfig" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.650455125+00:00 stderr F I1208 17:47:29.650445 1 reflector.go:430] "Caches populated" type="*v1.Build" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.653082798+00:00 stderr F I1208 17:47:29.653043 1 reflector.go:430] "Caches populated" type="*v1.DeploymentConfig" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.653671157+00:00 stderr F I1208 17:47:29.653644 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.654429361+00:00 stderr F I1208 17:47:29.654283 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.678587801+00:00 stderr F I1208 17:47:29.674824 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:47:29.686434098+00:00 stderr F I1208 17:47:29.686387 1 reflector.go:430] "Caches populated" type="*v1.ImageStream" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.727414918+00:00 stderr F I1208 17:47:29.726279 1 templateinstance_finalizer.go:194] Starting TemplateInstanceFinalizer controller 2025-12-08T17:47:29.727414918+00:00 stderr F I1208 17:47:29.726404 1 buildconfig_controller.go:212] Starting buildconfig controller 2025-12-08T17:47:29.727414918+00:00 stderr F I1208 17:47:29.726490 1 templateinstance_controller.go:297] Starting TemplateInstance controller 2025-12-08T17:47:29.731305791+00:00 stderr F I1208 17:47:29.731111 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.824807435+00:00 stderr F I1208 17:47:29.824744 1 reflector.go:430] "Caches populated" type="*v1.ReplicationController" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.830334739+00:00 stderr F I1208 17:47:29.830292 1 factory.go:85] deploymentconfig controller caches are synced. Starting workers. 2025-12-08T17:47:30.145966174+00:00 stderr F I1208 17:47:30.145901 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.240500480+00:00 stderr F I1208 17:47:30.240446 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.326488377+00:00 stderr F I1208 17:47:30.326421 1 factory.go:80] Deployer controller caches are synced. Starting workers. 2025-12-08T17:47:30.434148836+00:00 stderr F I1208 17:47:30.433584 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.631248241+00:00 stderr F I1208 17:47:30.631185 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.822132459+00:00 stderr F I1208 17:47:30.822038 1 request.go:752] "Waited before sending request" delay="1.192770768s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/secrets?limit=500&resourceVersion=0" 2025-12-08T17:47:30.888656083+00:00 stderr F I1208 17:47:30.888577 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.926827715+00:00 stderr F I1208 17:47:30.926750 1 shared_informer.go:357] "Caches are synced" controller="openshift.io/internal-image-registry-pull-secrets_image-pull-secret" 2025-12-08T17:47:30.926827715+00:00 stderr F I1208 17:47:30.926784 1 shared_informer.go:357] "Caches are synced" controller="openshift.io/internal-image-registry-pull-secrets_legacy-token-secret" 2025-12-08T17:47:30.926827715+00:00 stderr F I1208 17:47:30.926785 1 image_pull_secret_controller.go:398] Waiting for service account token signing cert to be observed 2025-12-08T17:47:30.926827715+00:00 stderr F I1208 17:47:30.926750 1 shared_informer.go:357] "Caches are synced" controller="openshift.io/internal-image-registry-pull-secrets_legacy-image-pull-secret" 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926813 1 shared_informer.go:357] "Caches are synced" controller="openshift.io/internal-image-registry-pull-secrets_kids" 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926818 1 shared_informer.go:357] "Caches are synced" controller="openshift.io/internal-image-registry-pull-secrets_service-account" 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926847 1 image_pull_secret_controller.go:384] Waiting for image registry urls to be observed 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926847 1 keyid_observation_controller.go:172] "Started controller" name="openshift.io/internal-image-registry-pull-secrets_kids" 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926826 1 legacy_image_pull_secret_controller.go:138] "Started controller" name="openshift.io/internal-image-registry-pull-secrets_legacy-image-pull-secret" 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926859 1 service_account_controller.go:345] "Started controller" name="openshift.io/internal-image-registry-pull-secrets_service-account" 2025-12-08T17:47:30.926900437+00:00 stderr F I1208 17:47:30.926821 1 legacy_token_secret_controller.go:110] "Started controller" name="openshift.io/internal-image-registry-pull-secrets_legacy-token-secret" 2025-12-08T17:47:30.927126174+00:00 stderr F I1208 17:47:30.927093 1 image_pull_secret_controller.go:401] "Observed service account token signing certs" kids=["aV377pYUivc_NpjUTRV8mkI5FRM9rTZehB0Fpev8Yjk"] 2025-12-08T17:47:31.028962600+00:00 stderr F I1208 17:47:31.028896 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:31.125441957+00:00 stderr F I1208 17:47:31.125342 1 build_controller.go:502] Starting build controller 2025-12-08T17:47:31.125441957+00:00 stderr F I1208 17:47:31.125390 1 build_controller.go:504] OpenShift image registry hostname: image-registry.openshift-image-registry.svc:5000 2025-12-08T17:47:31.225124185+00:00 stderr F I1208 17:47:31.225069 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:31.227039395+00:00 stderr F I1208 17:47:31.227014 1 shared_informer.go:357] "Caches are synced" controller="openshift.io/internal-image-registry-pull-secrets_urls" 2025-12-08T17:47:31.227039395+00:00 stderr F I1208 17:47:31.227031 1 registry_urls_observation_controller.go:147] "Started controller" name="openshift.io/internal-image-registry-pull-secrets_urls" 2025-12-08T17:47:31.227102387+00:00 stderr F I1208 17:47:31.227081 1 image_pull_secret_controller.go:388] "Observed image registry urls" urls=["10.217.5.148:5000","default-route-openshift-image-registry.apps-crc.testing","image-registry.openshift-image-registry.svc.cluster.local:5000","image-registry.openshift-image-registry.svc:5000"] 2025-12-08T17:47:31.227113557+00:00 stderr F I1208 17:47:31.227098 1 image_pull_secret_controller.go:445] "Started controller" name="openshift.io/internal-image-registry-pull-secrets_image-pull-secret" 2025-12-08T17:47:31.425374049+00:00 stderr F I1208 17:47:31.425303 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:31.429537080+00:00 stderr F I1208 17:47:31.426283 1 shared_informer.go:357] "Caches are synced" controller="ImagePullerRoleBindingController" 2025-12-08T17:47:31.429537080+00:00 stderr F I1208 17:47:31.427013 1 shared_informer.go:357] "Caches are synced" controller="BuilderRoleBindingController" 2025-12-08T17:47:31.434821156+00:00 stderr F I1208 17:47:31.434753 1 shared_informer.go:357] "Caches are synced" controller="DeployerRoleBindingController" 2025-12-08T17:47:31.525868432+00:00 stderr F I1208 17:47:31.525773 1 shared_informer.go:357] "Caches are synced" controller="service account" 2025-12-08T17:47:31.526050918+00:00 stderr F I1208 17:47:31.526020 1 shared_informer.go:357] "Caches are synced" controller="service account" 2025-12-08T17:53:18.737830585+00:00 stderr F I1208 17:53:18.737072 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openstack" name="builder-dockercfg-99spk" expected=4 actual=0 2025-12-08T17:53:18.737871467+00:00 stderr F I1208 17:53:18.737831 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openstack" name="builder-dockercfg-99spk" serviceaccount="builder" 2025-12-08T17:53:18.737933538+00:00 stderr F I1208 17:53:18.737170 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openstack" name="deployer-dockercfg-l2v7l" expected=4 actual=0 2025-12-08T17:53:18.737944038+00:00 stderr F I1208 17:53:18.737928 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openstack" name="deployer-dockercfg-l2v7l" serviceaccount="deployer" 2025-12-08T17:53:18.739743178+00:00 stderr F I1208 17:53:18.739701 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openstack" name="default-dockercfg-st742" expected=4 actual=0 2025-12-08T17:53:18.739743178+00:00 stderr F I1208 17:53:18.739730 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openstack" name="default-dockercfg-st742" serviceaccount="default" 2025-12-08T17:53:19.360746922+00:00 stderr F I1208 17:53:19.360665 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openstack-operators" name="default-dockercfg-rvbrp" expected=4 actual=0 2025-12-08T17:53:19.360746922+00:00 stderr F I1208 17:53:19.360691 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openstack-operators" name="default-dockercfg-rvbrp" serviceaccount="default" 2025-12-08T17:53:19.362470900+00:00 stderr F I1208 17:53:19.362404 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openstack-operators" name="builder-dockercfg-f4vbv" expected=4 actual=0 2025-12-08T17:53:19.362470900+00:00 stderr F I1208 17:53:19.362424 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openstack-operators" name="builder-dockercfg-f4vbv" serviceaccount="builder" 2025-12-08T17:53:19.363316212+00:00 stderr F I1208 17:53:19.363241 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openstack-operators" name="deployer-dockercfg-r2w8n" expected=4 actual=0 2025-12-08T17:53:19.363316212+00:00 stderr F I1208 17:53:19.363256 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openstack-operators" name="deployer-dockercfg-r2w8n" serviceaccount="deployer" 2025-12-08T17:54:28.333174751+00:00 stderr F I1208 17:54:28.331213 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="deployer-dockercfg-s4kbm" expected=4 actual=0 2025-12-08T17:54:28.333174751+00:00 stderr F I1208 17:54:28.333126 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="deployer-dockercfg-s4kbm" serviceaccount="deployer" 2025-12-08T17:54:28.333259713+00:00 stderr F I1208 17:54:28.332327 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="builder-dockercfg-m8htt" expected=4 actual=0 2025-12-08T17:54:28.333259713+00:00 stderr F I1208 17:54:28.333221 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="builder-dockercfg-m8htt" serviceaccount="builder" 2025-12-08T17:54:28.333434958+00:00 stderr F I1208 17:54:28.332359 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="default-dockercfg-t7fjv" expected=4 actual=0 2025-12-08T17:54:28.333434958+00:00 stderr F I1208 17:54:28.333410 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="default-dockercfg-t7fjv" serviceaccount="default" 2025-12-08T17:54:35.676057046+00:00 stderr F I1208 17:54:35.675407 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T17:54:53.171042582+00:00 stderr F I1208 17:54:53.169728 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager-operator" name="builder-dockercfg-854ns" expected=4 actual=0 2025-12-08T17:54:53.171042582+00:00 stderr F I1208 17:54:53.170367 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager-operator" name="builder-dockercfg-854ns" serviceaccount="builder" 2025-12-08T17:54:53.171042582+00:00 stderr F I1208 17:54:53.169805 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager-operator" name="deployer-dockercfg-fxnz5" expected=4 actual=0 2025-12-08T17:54:53.171042582+00:00 stderr F I1208 17:54:53.170723 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager-operator" name="deployer-dockercfg-fxnz5" serviceaccount="deployer" 2025-12-08T17:54:53.173940630+00:00 stderr F I1208 17:54:53.173892 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager-operator" name="default-dockercfg-lfngl" expected=4 actual=0 2025-12-08T17:54:53.173940630+00:00 stderr F I1208 17:54:53.173926 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager-operator" name="default-dockercfg-lfngl" serviceaccount="default" 2025-12-08T17:55:12.170254932+00:00 stderr F I1208 17:55:12.168557 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-operators" name="perses-dockercfg-47clb" expected=4 actual=0 2025-12-08T17:55:12.170254932+00:00 stderr F I1208 17:55:12.169074 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-operators" name="perses-dockercfg-47clb" serviceaccount="perses" 2025-12-08T17:55:12.633454768+00:00 stderr F I1208 17:55:12.633380 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-operators" name="obo-prometheus-operator-dockercfg-4qph4" expected=4 actual=0 2025-12-08T17:55:12.633454768+00:00 stderr F I1208 17:55:12.633422 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-operators" name="obo-prometheus-operator-dockercfg-4qph4" serviceaccount="obo-prometheus-operator" 2025-12-08T17:55:13.653572709+00:00 stderr F I1208 17:55:13.653179 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-operators" name="obo-prometheus-operator-admission-webhook-dockercfg-wpwsd" expected=4 actual=0 2025-12-08T17:55:13.653572709+00:00 stderr F I1208 17:55:13.653544 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-operators" name="obo-prometheus-operator-admission-webhook-dockercfg-wpwsd" serviceaccount="obo-prometheus-operator-admission-webhook" 2025-12-08T17:55:14.221639727+00:00 stderr F I1208 17:55:14.221321 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-operators" name="observability-operator-sa-dockercfg-lq686" expected=4 actual=0 2025-12-08T17:55:14.221639727+00:00 stderr F I1208 17:55:14.221603 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-operators" name="observability-operator-sa-dockercfg-lq686" serviceaccount="observability-operator-sa" 2025-12-08T17:55:14.442656925+00:00 stderr F I1208 17:55:14.442592 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="elastic-operator-dockercfg-2vdv5" expected=4 actual=0 2025-12-08T17:55:14.442656925+00:00 stderr F I1208 17:55:14.442615 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="elastic-operator-dockercfg-2vdv5" serviceaccount="elastic-operator" 2025-12-08T17:55:15.039099534+00:00 stderr F I1208 17:55:15.039029 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-operators" name="perses-operator-dockercfg-8bg9q" expected=4 actual=0 2025-12-08T17:55:15.039099534+00:00 stderr F I1208 17:55:15.039069 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-operators" name="perses-operator-dockercfg-8bg9q" serviceaccount="perses-operator" 2025-12-08T17:55:28.229082845+00:00 stderr F I1208 17:55:28.228435 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager-operator" name="cert-manager-operator-controller-manager-dockercfg-r7685" expected=4 actual=0 2025-12-08T17:55:28.229082845+00:00 stderr F I1208 17:55:28.229060 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager-operator" name="cert-manager-operator-controller-manager-dockercfg-r7685" serviceaccount="cert-manager-operator-controller-manager" 2025-12-08T17:55:39.143975426+00:00 stderr F I1208 17:55:39.143257 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager" name="builder-dockercfg-np96d" expected=4 actual=0 2025-12-08T17:55:39.143975426+00:00 stderr F I1208 17:55:39.143771 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager" name="builder-dockercfg-np96d" serviceaccount="builder" 2025-12-08T17:55:39.144019067+00:00 stderr F I1208 17:55:39.143471 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager" name="default-dockercfg-zmbrj" expected=4 actual=0 2025-12-08T17:55:39.144019067+00:00 stderr F I1208 17:55:39.144001 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager" name="default-dockercfg-zmbrj" serviceaccount="default" 2025-12-08T17:55:39.148412868+00:00 stderr F I1208 17:55:39.148295 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager" name="deployer-dockercfg-g5kmt" expected=4 actual=0 2025-12-08T17:55:39.148412868+00:00 stderr F I1208 17:55:39.148328 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager" name="deployer-dockercfg-g5kmt" serviceaccount="deployer" 2025-12-08T17:55:40.634999549+00:00 stderr F I1208 17:55:40.634661 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager" name="cert-manager-webhook-dockercfg-p6v7n" expected=4 actual=0 2025-12-08T17:55:40.634999549+00:00 stderr F I1208 17:55:40.634969 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager" name="cert-manager-webhook-dockercfg-p6v7n" serviceaccount="cert-manager-webhook" 2025-12-08T17:55:42.702641575+00:00 stderr F I1208 17:55:42.701447 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager" name="cert-manager-cainjector-dockercfg-ktkxz" expected=4 actual=0 2025-12-08T17:55:42.702730177+00:00 stderr F I1208 17:55:42.702713 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager" name="cert-manager-cainjector-dockercfg-ktkxz" serviceaccount="cert-manager-cainjector" 2025-12-08T17:55:50.223922927+00:00 stderr F I1208 17:55:50.223404 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="cert-manager" name="cert-manager-dockercfg-pkxzc" expected=4 actual=0 2025-12-08T17:55:50.224005559+00:00 stderr F I1208 17:55:50.223995 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="cert-manager" name="cert-manager-dockercfg-pkxzc" serviceaccount="cert-manager" 2025-12-08T17:56:07.102505732+00:00 stderr F I1208 17:56:07.101767 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="infrawatch-operators-dockercfg-bcx4t" expected=4 actual=0 2025-12-08T17:56:07.102505732+00:00 stderr F I1208 17:56:07.102490 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="infrawatch-operators-dockercfg-bcx4t" serviceaccount="infrawatch-operators" 2025-12-08T17:56:38.692033788+00:00 stderr F I1208 17:56:38.691458 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="interconnect-operator-dockercfg-xvhmb" expected=4 actual=0 2025-12-08T17:56:38.692033788+00:00 stderr F I1208 17:56:38.691989 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="interconnect-operator-dockercfg-xvhmb" serviceaccount="interconnect-operator" 2025-12-08T17:56:39.890455605+00:00 stderr F I1208 17:56:39.889142 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="service-telemetry-operator-dockercfg-tqm5c" expected=4 actual=0 2025-12-08T17:56:39.890455605+00:00 stderr F I1208 17:56:39.889676 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="service-telemetry-operator-dockercfg-tqm5c" serviceaccount="service-telemetry-operator" 2025-12-08T17:56:41.291440655+00:00 stderr F I1208 17:56:41.290901 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="smart-gateway-operator-dockercfg-7jw7l" expected=4 actual=0 2025-12-08T17:56:41.291440655+00:00 stderr F I1208 17:56:41.291412 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="smart-gateway-operator-dockercfg-7jw7l" serviceaccount="smart-gateway-operator" 2025-12-08T17:56:59.263706227+00:00 stderr F I1208 17:56:59.263113 1 reflector.go:430] "Caches populated" type="*v1.Event" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:57:28.515287657+00:00 stderr F I1208 17:57:28.514726 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="default-interconnect-dockercfg-nxt7g" expected=4 actual=0 2025-12-08T17:57:28.515287657+00:00 stderr F I1208 17:57:28.515239 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="default-interconnect-dockercfg-nxt7g" serviceaccount="default-interconnect" 2025-12-08T17:57:30.895949569+00:00 stderr F I1208 17:57:30.895506 1 image_pull_secret_controller.go:434] "Observed service account token signing certs" kids=["aV377pYUivc_NpjUTRV8mkI5FRM9rTZehB0Fpev8Yjk"] 2025-12-08T17:57:31.226568443+00:00 stderr F I1208 17:57:31.226507 1 image_pull_secret_controller.go:424] "Observed image registry urls" urls=["10.217.5.148:5000","default-route-openshift-image-registry.apps-crc.testing","image-registry.openshift-image-registry.svc.cluster.local:5000","image-registry.openshift-image-registry.svc:5000"] 2025-12-08T17:57:31.782084581+00:00 stderr F I1208 17:57:31.781766 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="prometheus-stf-dockercfg-p6qm4" expected=4 actual=0 2025-12-08T17:57:31.782084581+00:00 stderr F I1208 17:57:31.781793 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="prometheus-stf-dockercfg-p6qm4" serviceaccount="prometheus-stf" 2025-12-08T17:57:41.281297278+00:00 stderr F I1208 17:57:41.280451 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="stf-prometheus-reader-dockercfg-8qhnd" expected=4 actual=0 2025-12-08T17:57:41.281297278+00:00 stderr F I1208 17:57:41.281204 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="stf-prometheus-reader-dockercfg-8qhnd" serviceaccount="stf-prometheus-reader" 2025-12-08T17:57:50.153280951+00:00 stderr F I1208 17:57:50.152731 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="alertmanager-stf-dockercfg-5zfwx" expected=4 actual=0 2025-12-08T17:57:50.153280951+00:00 stderr F I1208 17:57:50.153261 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="alertmanager-stf-dockercfg-5zfwx" serviceaccount="alertmanager-stf" 2025-12-08T17:58:02.015210610+00:00 stderr F I1208 17:58:02.014734 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="service-telemetry" name="smart-gateway-dockercfg-vjrnk" expected=4 actual=0 2025-12-08T17:58:02.015210610+00:00 stderr F I1208 17:58:02.015181 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="service-telemetry" name="smart-gateway-dockercfg-vjrnk" serviceaccount="smart-gateway" 2025-12-08T17:59:54.684636998+00:00 stderr F I1208 17:59:54.684135 1 warnings.go:110] "Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+" 2025-12-08T18:02:40.533602410+00:00 stderr F I1208 18:02:40.533149 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-must-gather-gctth" name="default-dockercfg-ddssk" expected=4 actual=0 2025-12-08T18:02:40.533602410+00:00 stderr F I1208 18:02:40.533550 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-must-gather-gctth" name="default-dockercfg-ddssk" serviceaccount="default" 2025-12-08T18:02:40.539314662+00:00 stderr F I1208 18:02:40.538867 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-must-gather-gctth" name="builder-dockercfg-r5zhx" expected=4 actual=0 2025-12-08T18:02:40.539314662+00:00 stderr F I1208 18:02:40.538939 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-must-gather-gctth" name="builder-dockercfg-r5zhx" serviceaccount="builder" 2025-12-08T18:02:40.546578386+00:00 stderr F I1208 18:02:40.545206 1 image_pull_secret_controller.go:294] "Internal registry pull secret auth data does not contain the correct number of entries" ns="openshift-must-gather-gctth" name="deployer-dockercfg-f8h45" expected=4 actual=0 2025-12-08T18:02:40.546578386+00:00 stderr F I1208 18:02:40.545228 1 image_pull_secret_controller.go:176] "Refreshing image pull secret" ns="openshift-must-gather-gctth" name="deployer-dockercfg-f8h45" serviceaccount="deployer" 2025-12-08T18:04:56.181199817+00:00 stderr F E1208 18:04:56.180553 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.182443079+00:00 stderr F E1208 18:04:56.182392 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.184757981+00:00 stderr F E1208 18:04:56.184697 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.191802058+00:00 stderr F E1208 18:04:56.191732 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.192623629+00:00 stderr F E1208 18:04:56.192578 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.193894723+00:00 stderr F E1208 18:04:56.193821 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.208117160+00:00 stderr F E1208 18:04:56.208032 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.208309465+00:00 stderr F E1208 18:04:56.208269 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.208780578+00:00 stderr F E1208 18:04:56.208734 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.235714542+00:00 stderr F E1208 18:04:56.235629 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.241478695+00:00 stderr F E1208 18:04:56.241407 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.241586347+00:00 stderr F E1208 18:04:56.241564 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.282114143+00:00 stderr F E1208 18:04:56.282049 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.286794076+00:00 stderr F E1208 18:04:56.286720 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.287645800+00:00 stderr F E1208 18:04:56.287605 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.366947442+00:00 stderr F E1208 18:04:56.366887 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.371737340+00:00 stderr F E1208 18:04:56.371672 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.372529230+00:00 stderr F E1208 18:04:56.372497 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.532098922+00:00 stderr F E1208 18:04:56.532014 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/builder failed with : unable to update managed image pull secret: se**********ts \"builder-dockercfg-r5zhx\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.537444634+00:00 stderr F E1208 18:04:56.537379 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/deployer failed with : unable to update managed image pull secret: se**********ts \"deployer-dockercfg-f8h45\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" 2025-12-08T18:04:56.538574214+00:00 stderr F E1208 18:04:56.538526 1 service_account_controller.go:368] "Unhandled Error" err="openshift-must-gather-gctth/default failed with : unable to update managed image pull secret: se**********ts \"default-dockercfg-ddssk\" is forbidden: unable to create new content in namespace openshift-must-gather-gctth because it is being terminated" ././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000755000175000017500000000000015115611513033037 5ustar zuulzuul././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000755000175000017500000000000015115611520033035 5ustar zuulzuul././@LongLink0000644000000000000000000000030400000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000644000175000017500000000352315115611513033044 0ustar zuulzuul2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.812933 2 migrator.go:18] FLAG: --add_dir_header="false" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814476 2 migrator.go:18] FLAG: --alsologtostderr="true" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814482 2 migrator.go:18] FLAG: --kube-api-burst="1000" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814488 2 migrator.go:18] FLAG: --kube-api-qps="40" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814496 2 migrator.go:18] FLAG: --kubeconfig="" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814501 2 migrator.go:18] FLAG: --log_backtrace_at=":0" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814518 2 migrator.go:18] FLAG: --log_dir="" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814522 2 migrator.go:18] FLAG: --log_file="" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814525 2 migrator.go:18] FLAG: --log_file_max_size="1800" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814528 2 migrator.go:18] FLAG: --logtostderr="true" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814531 2 migrator.go:18] FLAG: --one_output="false" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814534 2 migrator.go:18] FLAG: --skip_headers="false" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814536 2 migrator.go:18] FLAG: --skip_log_headers="false" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814540 2 migrator.go:18] FLAG: --stderrthreshold="2" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814543 2 migrator.go:18] FLAG: --v="2" 2025-12-08T17:44:19.820939100+00:00 stderr F I1208 17:44:19.814586 2 migrator.go:18] FLAG: --vmodule="" ././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000755000175000017500000000000015115611520033035 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage0000644000175000017500000000011015115611513033031 0ustar zuulzuul2025-12-08T17:44:21.448687030+00:00 stdout F Waiting for termination... ././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diag0000755000175000017500000000000015115611513033042 5ustar zuulzuul././@LongLink0000644000000000000000000000031400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diag0000755000175000017500000000000015115611521033041 5ustar zuulzuul././@LongLink0000644000000000000000000000032100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diag0000644000175000017500000000007515115611513033046 0ustar zuulzuul2025-12-08T17:44:42.894378405+00:00 stdout F serving on 8080 ././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611514033077 5ustar zuulzuul././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000006236415115611514033114 0ustar zuulzuul2025-12-08T17:48:10.471160010+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Go Version: go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:48:10.471160010+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Go OS/Arch: linux/amd64" 2025-12-08T17:48:10.471352376+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[metrics] Registering marketplace metrics" 2025-12-08T17:48:10.471352376+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[metrics] Serving marketplace metrics" 2025-12-08T17:48:10.471929243+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="TLS keys set, using https for metrics" 2025-12-08T17:48:10.528911898+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Config API is available" 2025-12-08T17:48:10.529003630+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="setting up scheme" 2025-12-08T17:48:10.600717480+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="setting up health checks" 2025-12-08T17:48:10.604690120+00:00 stderr F I1208 17:48:10.603528 1 leaderelection.go:257] attempting to acquire leader lease openshift-marketplace/marketplace-operator-lock... 2025-12-08T17:48:10.611683841+00:00 stderr F I1208 17:48:10.611628 1 leaderelection.go:271] successfully acquired lease openshift-marketplace/marketplace-operator-lock 2025-12-08T17:48:10.612385362+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="became leader: marketplace-operator-547dbd544d-6bbtn" 2025-12-08T17:48:10.612427454+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="registering components" 2025-12-08T17:48:10.612457055+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="setting up the marketplace clusteroperator status reporter" 2025-12-08T17:48:10.632095759+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="setting up controllers" 2025-12-08T17:48:10.634089169+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="starting the marketplace clusteroperator status reporter" 2025-12-08T17:48:10.634164022+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="starting manager" 2025-12-08T17:48:10.634362438+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"starting server","name":"pprof","addr":"[::]:6060"} 2025-12-08T17:48:10.634754789+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting EventSource","controller":"catalogsource-controller","controllerGroup":"operators.coreos.com","controllerKind":"CatalogSource","source":"kind source: *v1alpha1.CatalogSource"} 2025-12-08T17:48:10.640205284+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting EventSource","controller":"operatorhub-controller","controllerGroup":"config.openshift.io","controllerKind":"OperatorHub","source":"kind source: *v1.OperatorHub"} 2025-12-08T17:48:10.640763982+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting EventSource","controller":"configmap-controller","controllerGroup":"","controllerKind":"ConfigMap","source":"kind source: *v1.ConfigMap"} 2025-12-08T17:48:10.736186779+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting Controller","controller":"catalogsource-controller","controllerGroup":"operators.coreos.com","controllerKind":"CatalogSource"} 2025-12-08T17:48:10.736186779+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting workers","controller":"catalogsource-controller","controllerGroup":"operators.coreos.com","controllerKind":"CatalogSource","worker count":1} 2025-12-08T17:48:10.742602733+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting Controller","controller":"operatorhub-controller","controllerGroup":"config.openshift.io","controllerKind":"OperatorHub"} 2025-12-08T17:48:10.742642534+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting workers","controller":"operatorhub-controller","controllerGroup":"config.openshift.io","controllerKind":"OperatorHub","worker count":1} 2025-12-08T17:48:10.742700326+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting Controller","controller":"configmap-controller","controllerGroup":"","controllerKind":"ConfigMap"} 2025-12-08T17:48:10.742700326+00:00 stderr F {"level":"info","ts":"2025-12-08T17:48:10Z","msg":"Starting workers","controller":"configmap-controller","controllerGroup":"","controllerKind":"ConfigMap","worker count":1} 2025-12-08T17:48:10.743446718+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.744356426+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling ConfigMap openshift-marketplace/marketplace-trusted-ca" 2025-12-08T17:48:10.744667675+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.744718427+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.744749278+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.746055827+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.746194531+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[ca] Certificate Authorization ConfigMap openshift-marketplace/marketplace-trusted-ca is in sync with disk." name=marketplace-trusted-ca type=ConfigMap 2025-12-08T17:48:10.756715520+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.756795132+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.756831443+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.757894335+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.758026919+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.766201406+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.766365711+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.766505085+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.766593348+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.766692581+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.776967462+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.777193509+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.777292882+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.777372434+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.777440056+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.783604514+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.783739028+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.783968014+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.784693236+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.785367866+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.790488671+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.790630306+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.790800011+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.791376828+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.791541843+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.802091372+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.802199406+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.802250757+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.802296018+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.802334170+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.808253839+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.808864598+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.810050083+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.810149196+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.810218798+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.817298683+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.817416757+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.817492529+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.817550821+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.817601042+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.824260883+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.824395687+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.824684356+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.824779409+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.824851141+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.830180982+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.830381758+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.830454120+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.830563724+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.830638586+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.839459533+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.839607608+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.839676420+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.839720071+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.839766693+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.845718673+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.845963530+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.846091474+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.846175126+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.846260139+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.851367224+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:48:10.851534169+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.851613491+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.855625902+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:10.855726325+00:00 stderr F time="2025-12-08T17:48:10Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:11.161232118+00:00 stderr F time="2025-12-08T17:48:11Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:12.156026516+00:00 stderr F time="2025-12-08T17:48:12Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:13.554231140+00:00 stderr F time="2025-12-08T17:48:13Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:14.556257876+00:00 stderr F time="2025-12-08T17:48:14Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:17.547246831+00:00 stderr F time="2025-12-08T17:48:17Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:18.552249398+00:00 stderr F time="2025-12-08T17:48:18Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:24.552422987+00:00 stderr F time="2025-12-08T17:48:24Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:25.552597148+00:00 stderr F time="2025-12-08T17:48:25Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:26.547642063+00:00 stderr F time="2025-12-08T17:48:26Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:27.547839805+00:00 stderr F time="2025-12-08T17:48:27Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:48:28.548889832+00:00 stderr F time="2025-12-08T17:48:28Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:29.553404685+00:00 stderr F time="2025-12-08T17:48:29Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:48:36.603406856+00:00 stderr F time="2025-12-08T17:48:36Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:14.420267710+00:00 stderr F time="2025-12-08T17:54:14Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:15.635344168+00:00 stderr F time="2025-12-08T17:54:15Z" level=info msg="[defaults] CatalogSource redhat-marketplace is annotated and its spec is the same as the default spec" 2025-12-08T17:54:28.446317725+00:00 stderr F time="2025-12-08T17:54:28Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:41.500312240+00:00 stderr F time="2025-12-08T17:54:41Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.004565069+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:54:50.005502884+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.005582536+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.007057457+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.016428189+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] Deleting CatalogSource redhat-marketplace" 2025-12-08T17:54:50.024314731+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:54:50.024513356+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.024737782+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.024826525+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.047226427+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:54:50.047226427+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.047226427+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.047226427+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.057352020+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:54:50.058208113+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.059144698+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.059305973+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.075961860+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:54:50.076109635+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.076190717+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.076385273+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.090737909+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="Reconciling OperatorHub cluster" 2025-12-08T17:54:50.090737909+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.090737909+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:54:50.090737909+00:00 stderr F time="2025-12-08T17:54:50Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:56:33.494414133+00:00 stderr F time="2025-12-08T17:56:33Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:59:50.676944154+00:00 stderr F time="2025-12-08T17:59:50Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T17:59:51.696042594+00:00 stderr F time="2025-12-08T17:59:51Z" level=info msg="[defaults] CatalogSource community-operators is annotated and its spec is the same as the default spec" 2025-12-08T18:04:31.740155485+00:00 stderr F time="2025-12-08T18:04:31Z" level=info msg="[defaults] CatalogSource certified-operators is annotated and its spec is the same as the default spec" 2025-12-08T18:04:43.046922043+00:00 stderr F time="2025-12-08T18:04:43Z" level=info msg="[defaults] CatalogSource redhat-operators is annotated and its spec is the same as the default spec" ././@LongLink0000644000000000000000000000024400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611513033044 5ustar zuulzuul././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611520033042 5ustar zuulzuul././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000000364715115611513033060 0ustar zuulzuul2025-12-08T17:45:05.612676751+00:00 stderr F I1208 17:45:05.612326 1 cmd.go:39] &{ true {false} prune true map[cert-dir:0xc00080abe0 max-eligible-revision:0xc00080a960 protected-revisions:0xc00080aa00 resource-dir:0xc00080aaa0 static-pod-name:0xc00080ab40 v:0xc00080bcc0] [0xc00080bcc0 0xc00080a960 0xc00080aa00 0xc00080aaa0 0xc00080abe0 0xc00080ab40] [] map[cert-dir:0xc00080abe0 help:0xc0009901e0 log-flush-frequency:0xc00080bc20 max-eligible-revision:0xc00080a960 protected-revisions:0xc00080aa00 resource-dir:0xc00080aaa0 static-pod-name:0xc00080ab40 v:0xc00080bcc0 vmodule:0xc00080bd60] [0xc00080a960 0xc00080aa00 0xc00080aaa0 0xc00080ab40 0xc00080abe0 0xc00080bc20 0xc00080bcc0 0xc00080bd60 0xc0009901e0] [0xc00080abe0 0xc0009901e0 0xc00080bc20 0xc00080a960 0xc00080aa00 0xc00080aaa0 0xc00080ab40 0xc00080bcc0 0xc00080bd60] map[104:0xc0009901e0 118:0xc00080bcc0] [] -1 0 0xc0004f2f30 true 0xae3c00 []} 2025-12-08T17:45:05.612676751+00:00 stderr F I1208 17:45:05.612636 1 cmd.go:40] (*prune.PruneOptions)(0xc00060eb40)({ 2025-12-08T17:45:05.612676751+00:00 stderr F MaxEligibleRevision: (int) 12, 2025-12-08T17:45:05.612676751+00:00 stderr F ProtectedRevisions: ([]int) (len=6 cap=6) { 2025-12-08T17:45:05.612676751+00:00 stderr F (int) 7, 2025-12-08T17:45:05.612676751+00:00 stderr F (int) 8, 2025-12-08T17:45:05.612676751+00:00 stderr F (int) 9, 2025-12-08T17:45:05.612676751+00:00 stderr F (int) 10, 2025-12-08T17:45:05.612676751+00:00 stderr F (int) 11, 2025-12-08T17:45:05.612676751+00:00 stderr F (int) 12 2025-12-08T17:45:05.612676751+00:00 stderr F }, 2025-12-08T17:45:05.612676751+00:00 stderr F ResourceDir: (string) (len=36) "/etc/kubernetes/static-pod-resources", 2025-12-08T17:45:05.612676751+00:00 stderr F CertDir: (string) (len=20) "kube-apiserver-certs", 2025-12-08T17:45:05.612676751+00:00 stderr F StaticPodName: (string) (len=18) "kube-apiserver-pod" 2025-12-08T17:45:05.612676751+00:00 stderr F }) ././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000755000175000017500000000000015115611514033130 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000755000175000017500000000000015115611521033126 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000644000175000017500000033207515115611514033144 0ustar zuulzuul2025-12-08T17:44:24.571257915+00:00 stdout F Copying system trust bundle 2025-12-08T17:44:25.653562096+00:00 stderr F W1208 17:44:25.653495 1 feature_gate.go:350] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:44:25.653562096+00:00 stderr F W1208 17:44:25.653520 1 feature_gate.go:352] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:44:25.653562096+00:00 stderr F W1208 17:44:25.653528 1 feature_gate.go:352] Setting GA feature gate RouteExternalCertificate=true. It will be removed in a future release. 2025-12-08T17:44:25.653562096+00:00 stderr F I1208 17:44:25.653534 1 feature_gate.go:385] feature gates: {map[DynamicResourceAllocation:false EventedPLEG:false ImageVolume:true KMSv1:true MaxUnavailableStatefulSet:false MutatingAdmissionPolicy:false NodeSwap:false ProcMountType:true RouteExternalCertificate:true SELinuxMount:false ServiceAccountTokenNodeBinding:true TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:true UserNamespacesSupport:true VolumeAttributesClass:false]} 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657259 1 config.go:124] Ignoring unknown FeatureGate "NetworkLiveMigration" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657299 1 config.go:124] Ignoring unknown FeatureGate "AzureDedicatedHosts" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657303 1 config.go:124] Ignoring unknown FeatureGate "DyanmicServiceEndpointIBMCloud" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657307 1 config.go:124] Ignoring unknown FeatureGate "IrreconcilableMachineConfig" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657311 1 config.go:124] Ignoring unknown FeatureGate "SignatureStores" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657314 1 config.go:124] Ignoring unknown FeatureGate "AzureWorkloadIdentity" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657316 1 config.go:124] Ignoring unknown FeatureGate "BuildCSIVolumes" 2025-12-08T17:44:25.657326079+00:00 stderr F W1208 17:44:25.657319 1 config.go:124] Ignoring unknown FeatureGate "Example2" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657322 1 config.go:124] Ignoring unknown FeatureGate "GCPClusterHostedDNS" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657326 1 config.go:124] Ignoring unknown FeatureGate "OpenShiftPodSecurityAdmission" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657333 1 config.go:124] Ignoring unknown FeatureGate "ShortCertRotation" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657336 1 config.go:124] Ignoring unknown FeatureGate "VSphereHostVMGroupZonal" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657339 1 config.go:124] Ignoring unknown FeatureGate "StoragePerformantSecurityPolicy" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657342 1 config.go:124] Ignoring unknown FeatureGate "MixedCPUsAllocation" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657344 1 config.go:124] Ignoring unknown FeatureGate "NewOLMPreflightPermissionChecks" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657347 1 config.go:124] Ignoring unknown FeatureGate "VolumeGroupSnapshot" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657353 1 config.go:124] Ignoring unknown FeatureGate "VSphereMultiNetworks" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657356 1 config.go:124] Ignoring unknown FeatureGate "BootImageSkewEnforcement" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657359 1 config.go:124] Ignoring unknown FeatureGate "MultiDiskSetup" 2025-12-08T17:44:25.657366370+00:00 stderr F W1208 17:44:25.657362 1 config.go:124] Ignoring unknown FeatureGate "GatewayAPIController" 2025-12-08T17:44:25.657378970+00:00 stderr F W1208 17:44:25.657365 1 config.go:124] Ignoring unknown FeatureGate "SetEIPForNLBIngressController" 2025-12-08T17:44:25.657378970+00:00 stderr F W1208 17:44:25.657368 1 config.go:124] Ignoring unknown FeatureGate "UpgradeStatus" 2025-12-08T17:44:25.657378970+00:00 stderr F W1208 17:44:25.657370 1 config.go:124] Ignoring unknown FeatureGate "MultiArchInstallAzure" 2025-12-08T17:44:25.657378970+00:00 stderr F W1208 17:44:25.657373 1 config.go:124] Ignoring unknown FeatureGate "SigstoreImageVerificationPKI" 2025-12-08T17:44:25.657378970+00:00 stderr F W1208 17:44:25.657375 1 config.go:124] Ignoring unknown FeatureGate "NetworkSegmentation" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657379 1 config.go:124] Ignoring unknown FeatureGate "AWSClusterHostedDNSInstall" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657381 1 config.go:124] Ignoring unknown FeatureGate "ClusterAPIInstallIBMCloud" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657384 1 config.go:124] Ignoring unknown FeatureGate "InsightsConfigAPI" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657386 1 config.go:124] Ignoring unknown FeatureGate "AWSDedicatedHosts" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657392 1 config.go:124] Ignoring unknown FeatureGate "NewOLMCatalogdAPIV1Metas" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657395 1 config.go:124] Ignoring unknown FeatureGate "NewOLMOwnSingleNamespace" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657398 1 config.go:124] Ignoring unknown FeatureGate "AdditionalRoutingCapabilities" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657401 1 config.go:124] Ignoring unknown FeatureGate "RouteAdvertisements" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657404 1 config.go:124] Ignoring unknown FeatureGate "ClusterMonitoringConfig" 2025-12-08T17:44:25.657409921+00:00 stderr F W1208 17:44:25.657406 1 config.go:124] Ignoring unknown FeatureGate "InsightsConfig" 2025-12-08T17:44:25.657420981+00:00 stderr F W1208 17:44:25.657409 1 config.go:124] Ignoring unknown FeatureGate "MachineAPIMigration" 2025-12-08T17:44:25.657420981+00:00 stderr F W1208 17:44:25.657412 1 config.go:124] Ignoring unknown FeatureGate "MinimumKubeletVersion" 2025-12-08T17:44:25.657420981+00:00 stderr F W1208 17:44:25.657414 1 config.go:124] Ignoring unknown FeatureGate "NoRegistryClusterOperations" 2025-12-08T17:44:25.657420981+00:00 stderr F W1208 17:44:25.657417 1 config.go:124] Ignoring unknown FeatureGate "ManagedBootImagesAWS" 2025-12-08T17:44:25.657430151+00:00 stderr F W1208 17:44:25.657419 1 config.go:124] Ignoring unknown FeatureGate "AzureMultiDisk" 2025-12-08T17:44:25.657430151+00:00 stderr F W1208 17:44:25.657423 1 config.go:124] Ignoring unknown FeatureGate "ImageModeStatusReporting" 2025-12-08T17:44:25.657430151+00:00 stderr F W1208 17:44:25.657426 1 config.go:124] Ignoring unknown FeatureGate "ManagedBootImagesAzure" 2025-12-08T17:44:25.657438812+00:00 stderr F W1208 17:44:25.657429 1 config.go:124] Ignoring unknown FeatureGate "PreconfiguredUDNAddresses" 2025-12-08T17:44:25.657438812+00:00 stderr F W1208 17:44:25.657432 1 config.go:124] Ignoring unknown FeatureGate "HighlyAvailableArbiter" 2025-12-08T17:44:25.657438812+00:00 stderr F W1208 17:44:25.657435 1 config.go:124] Ignoring unknown FeatureGate "ManagedBootImages" 2025-12-08T17:44:25.657447892+00:00 stderr F W1208 17:44:25.657437 1 config.go:124] Ignoring unknown FeatureGate "AWSClusterHostedDNS" 2025-12-08T17:44:25.657447892+00:00 stderr F W1208 17:44:25.657440 1 config.go:124] Ignoring unknown FeatureGate "GCPClusterHostedDNSInstall" 2025-12-08T17:44:25.657447892+00:00 stderr F W1208 17:44:25.657442 1 config.go:124] Ignoring unknown FeatureGate "InsightsOnDemandDataGather" 2025-12-08T17:44:25.657447892+00:00 stderr F W1208 17:44:25.657445 1 config.go:124] Ignoring unknown FeatureGate "KMSEncryptionProvider" 2025-12-08T17:44:25.657457322+00:00 stderr F W1208 17:44:25.657447 1 config.go:124] Ignoring unknown FeatureGate "MachineAPIOperatorDisableMachineHealthCheckController" 2025-12-08T17:44:25.657457322+00:00 stderr F W1208 17:44:25.657452 1 config.go:124] Ignoring unknown FeatureGate "NetworkDiagnosticsConfig" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657455 1 config.go:124] Ignoring unknown FeatureGate "NewOLM" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657458 1 config.go:124] Ignoring unknown FeatureGate "AzureClusterHostedDNSInstall" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657461 1 config.go:124] Ignoring unknown FeatureGate "BootcNodeManagement" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657463 1 config.go:124] Ignoring unknown FeatureGate "ExternalOIDCWithUIDAndExtraClaimMappings" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657466 1 config.go:124] Ignoring unknown FeatureGate "IngressControllerDynamicConfigurationManager" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657469 1 config.go:124] Ignoring unknown FeatureGate "ConsolePluginContentSecurityPolicy" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657471 1 config.go:124] Ignoring unknown FeatureGate "IngressControllerLBSubnetsAWS" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657474 1 config.go:124] Ignoring unknown FeatureGate "PinnedImages" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657477 1 config.go:124] Ignoring unknown FeatureGate "ClusterAPIInstall" 2025-12-08T17:44:25.657482293+00:00 stderr F W1208 17:44:25.657479 1 config.go:124] Ignoring unknown FeatureGate "NewOLMWebhookProviderOpenshiftServiceCA" 2025-12-08T17:44:25.657496793+00:00 stderr F W1208 17:44:25.657481 1 config.go:124] Ignoring unknown FeatureGate "VSphereConfigurableMaxAllowedBlockVolumesPerNode" 2025-12-08T17:44:25.657496793+00:00 stderr F W1208 17:44:25.657484 1 config.go:124] Ignoring unknown FeatureGate "AdminNetworkPolicy" 2025-12-08T17:44:25.657496793+00:00 stderr F W1208 17:44:25.657487 1 config.go:124] Ignoring unknown FeatureGate "MetricsCollectionProfiles" 2025-12-08T17:44:25.657496793+00:00 stderr F W1208 17:44:25.657489 1 config.go:124] Ignoring unknown FeatureGate "AutomatedEtcdBackup" 2025-12-08T17:44:25.657496793+00:00 stderr F W1208 17:44:25.657492 1 config.go:124] Ignoring unknown FeatureGate "ClusterVersionOperatorConfiguration" 2025-12-08T17:44:25.657506803+00:00 stderr F W1208 17:44:25.657494 1 config.go:124] Ignoring unknown FeatureGate "ExternalOIDC" 2025-12-08T17:44:25.657506803+00:00 stderr F W1208 17:44:25.657498 1 config.go:124] Ignoring unknown FeatureGate "GCPCustomAPIEndpoints" 2025-12-08T17:44:25.657506803+00:00 stderr F W1208 17:44:25.657500 1 config.go:124] Ignoring unknown FeatureGate "MachineConfigNodes" 2025-12-08T17:44:25.657506803+00:00 stderr F W1208 17:44:25.657503 1 config.go:124] Ignoring unknown FeatureGate "SigstoreImageVerification" 2025-12-08T17:44:25.657515984+00:00 stderr F W1208 17:44:25.657505 1 config.go:124] Ignoring unknown FeatureGate "DualReplica" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657522 1 config.go:124] Ignoring unknown FeatureGate "EtcdBackendQuota" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657536 1 config.go:124] Ignoring unknown FeatureGate "ImageStreamImportMode" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657541 1 config.go:124] Ignoring unknown FeatureGate "VSphereMixedNodeEnv" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657544 1 config.go:124] Ignoring unknown FeatureGate "Example" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657547 1 config.go:124] Ignoring unknown FeatureGate "GatewayAPI" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657549 1 config.go:124] Ignoring unknown FeatureGate "VSphereMultiDisk" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657552 1 config.go:124] Ignoring unknown FeatureGate "AWSServiceLBNetworkSecurityGroup" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657554 1 config.go:124] Ignoring unknown FeatureGate "DNSNameResolver" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657557 1 config.go:124] Ignoring unknown FeatureGate "GCPCustomAPIEndpointsInstall" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657561 1 config.go:124] Ignoring unknown FeatureGate "AlibabaPlatform" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657563 1 config.go:124] Ignoring unknown FeatureGate "CPMSMachineNamePrefix" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657566 1 config.go:124] Ignoring unknown FeatureGate "ExternalSnapshotMetadata" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657569 1 config.go:124] Ignoring unknown FeatureGate "ManagedBootImagesvSphere" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657571 1 config.go:124] Ignoring unknown FeatureGate "NutanixMultiSubnets" 2025-12-08T17:44:25.657902624+00:00 stderr F W1208 17:44:25.657574 1 config.go:124] Ignoring unknown FeatureGate "OVNObservability" 2025-12-08T17:44:25.674720193+00:00 stderr F I1208 17:44:25.671698 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:27.065820368+00:00 stderr F I1208 17:44:27.065748 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:44:27.079580094+00:00 stderr F I1208 17:44:27.079522 1 audit.go:340] Using audit backend: ignoreErrors 2025-12-08T17:44:27.081587239+00:00 stderr F I1208 17:44:27.081359 1 plugins.go:83] "Registered admission plugin" plugin="NamespaceLifecycle" 2025-12-08T17:44:27.081587239+00:00 stderr F I1208 17:44:27.081376 1 plugins.go:83] "Registered admission plugin" plugin="ValidatingAdmissionWebhook" 2025-12-08T17:44:27.081587239+00:00 stderr F I1208 17:44:27.081381 1 plugins.go:83] "Registered admission plugin" plugin="MutatingAdmissionWebhook" 2025-12-08T17:44:27.081587239+00:00 stderr F I1208 17:44:27.081386 1 plugins.go:83] "Registered admission plugin" plugin="ValidatingAdmissionPolicy" 2025-12-08T17:44:27.081587239+00:00 stderr F I1208 17:44:27.081389 1 plugins.go:83] "Registered admission plugin" plugin="MutatingAdmissionPolicy" 2025-12-08T17:44:27.083072259+00:00 stderr F I1208 17:44:27.082343 1 admission.go:48] Admission plugin "project.openshift.io/ProjectRequestLimit" is not configured so it will be disabled. 2025-12-08T17:44:27.096181347+00:00 stderr F I1208 17:44:27.094059 1 plugins.go:157] Loaded 5 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,build.openshift.io/BuildConfigSecretInjector,image.openshift.io/ImageLimitRange,image.openshift.io/ImagePolicy,MutatingAdmissionWebhook. 2025-12-08T17:44:27.096181347+00:00 stderr F I1208 17:44:27.094513 1 plugins.go:160] Loaded 9 validating admission controller(s) successfully in the following order: OwnerReferencesPermissionEnforcement,build.openshift.io/BuildConfigSecretInjector,build.openshift.io/BuildByStrategy,image.openshift.io/ImageLimitRange,image.openshift.io/ImagePolicy,quota.openshift.io/ClusterResourceQuota,route.openshift.io/RequiredRouteAnnotations,ValidatingAdmissionWebhook,ResourceQuota. 2025-12-08T17:44:27.096181347+00:00 stderr F I1208 17:44:27.095523 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.096181347+00:00 stderr F I1208 17:44:27.095534 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.096181347+00:00 stderr F I1208 17:44:27.095556 1 maxinflight.go:116] "Set denominator for readonly requests" limit=3000 2025-12-08T17:44:27.096181347+00:00 stderr F I1208 17:44:27.095560 1 maxinflight.go:120] "Set denominator for mutating requests" limit=1500 2025-12-08T17:44:27.132070366+00:00 stderr F I1208 17:44:27.131997 1 store.go:1663] "Monitoring resource count at path" resource="deploymentconfigs.apps.openshift.io" path="//deploymentconfigs" 2025-12-08T17:44:27.138253504+00:00 stderr F I1208 17:44:27.138213 1 cacher.go:469] cacher (deploymentconfigs.apps.openshift.io): initialized 2025-12-08T17:44:27.138432489+00:00 stderr F I1208 17:44:27.138399 1 reflector.go:430] "Caches populated" type="*apps.DeploymentConfig" reflector="storage/cacher.go:/deploymentconfigs" 2025-12-08T17:44:27.147965149+00:00 stderr F I1208 17:44:27.147910 1 handler.go:288] Adding GroupVersion apps.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.148059151+00:00 stderr F I1208 17:44:27.148032 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.148059151+00:00 stderr F I1208 17:44:27.148047 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.164709645+00:00 stderr F I1208 17:44:27.160276 1 handler.go:288] Adding GroupVersion authorization.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.164709645+00:00 stderr F I1208 17:44:27.160411 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.164709645+00:00 stderr F I1208 17:44:27.160423 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.171455280+00:00 stderr F I1208 17:44:27.170834 1 handler.go:288] Adding GroupVersion project.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.171455280+00:00 stderr F I1208 17:44:27.171196 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.171455280+00:00 stderr F I1208 17:44:27.171213 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.187536768+00:00 stderr F I1208 17:44:27.187492 1 store.go:1663] "Monitoring resource count at path" resource="routes.route.openshift.io" path="//routes" 2025-12-08T17:44:27.192475513+00:00 stderr F I1208 17:44:27.189494 1 handler.go:288] Adding GroupVersion route.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.192475513+00:00 stderr F I1208 17:44:27.189580 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.192475513+00:00 stderr F I1208 17:44:27.189587 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.192475513+00:00 stderr F I1208 17:44:27.191184 1 cacher.go:469] cacher (routes.route.openshift.io): initialized 2025-12-08T17:44:27.192475513+00:00 stderr F I1208 17:44:27.191214 1 reflector.go:430] "Caches populated" type="*route.Route" reflector="storage/cacher.go:/routes" 2025-12-08T17:44:27.220420265+00:00 stderr F I1208 17:44:27.215697 1 store.go:1663] "Monitoring resource count at path" resource="rangeallocations.security.openshift.io" path="//rangeallocations" 2025-12-08T17:44:27.221866115+00:00 stderr F I1208 17:44:27.221829 1 handler.go:288] Adding GroupVersion security.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.222535573+00:00 stderr F I1208 17:44:27.221993 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.222535573+00:00 stderr F I1208 17:44:27.222006 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.225864844+00:00 stderr F I1208 17:44:27.224543 1 cacher.go:469] cacher (rangeallocations.security.openshift.io): initialized 2025-12-08T17:44:27.225864844+00:00 stderr F I1208 17:44:27.224577 1 reflector.go:430] "Caches populated" type="*security.RangeAllocation" reflector="storage/cacher.go:/rangeallocations" 2025-12-08T17:44:27.239604569+00:00 stderr F I1208 17:44:27.239523 1 store.go:1663] "Monitoring resource count at path" resource="templates.template.openshift.io" path="//templates" 2025-12-08T17:44:27.258912185+00:00 stderr F I1208 17:44:27.258615 1 cacher.go:469] cacher (templates.template.openshift.io): initialized 2025-12-08T17:44:27.258912185+00:00 stderr F I1208 17:44:27.258655 1 reflector.go:430] "Caches populated" type="*template.Template" reflector="storage/cacher.go:/templates" 2025-12-08T17:44:27.263371097+00:00 stderr F I1208 17:44:27.263033 1 store.go:1663] "Monitoring resource count at path" resource="templateinstances.template.openshift.io" path="//templateinstances" 2025-12-08T17:44:27.265242348+00:00 stderr F I1208 17:44:27.265150 1 cacher.go:469] cacher (templateinstances.template.openshift.io): initialized 2025-12-08T17:44:27.265242348+00:00 stderr F I1208 17:44:27.265204 1 reflector.go:430] "Caches populated" type="*template.TemplateInstance" reflector="storage/cacher.go:/templateinstances" 2025-12-08T17:44:27.273037901+00:00 stderr F I1208 17:44:27.272784 1 store.go:1663] "Monitoring resource count at path" resource="brokertemplateinstances.template.openshift.io" path="//brokertemplateinstances" 2025-12-08T17:44:27.279225519+00:00 stderr F I1208 17:44:27.277971 1 cacher.go:469] cacher (brokertemplateinstances.template.openshift.io): initialized 2025-12-08T17:44:27.279225519+00:00 stderr F I1208 17:44:27.278005 1 reflector.go:430] "Caches populated" type="*template.BrokerTemplateInstance" reflector="storage/cacher.go:/brokertemplateinstances" 2025-12-08T17:44:27.283257850+00:00 stderr F I1208 17:44:27.283211 1 handler.go:288] Adding GroupVersion template.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.286933620+00:00 stderr F I1208 17:44:27.283337 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.286933620+00:00 stderr F I1208 17:44:27.283348 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.306938225+00:00 stderr F I1208 17:44:27.303290 1 store.go:1663] "Monitoring resource count at path" resource="builds.build.openshift.io" path="//builds" 2025-12-08T17:44:27.306938225+00:00 stderr F I1208 17:44:27.304826 1 cacher.go:469] cacher (builds.build.openshift.io): initialized 2025-12-08T17:44:27.306938225+00:00 stderr F I1208 17:44:27.304845 1 reflector.go:430] "Caches populated" type="*build.Build" reflector="storage/cacher.go:/builds" 2025-12-08T17:44:27.321200394+00:00 stderr F I1208 17:44:27.321133 1 store.go:1663] "Monitoring resource count at path" resource="buildconfigs.build.openshift.io" path="//buildconfigs" 2025-12-08T17:44:27.325538303+00:00 stderr F I1208 17:44:27.325411 1 cacher.go:469] cacher (buildconfigs.build.openshift.io): initialized 2025-12-08T17:44:27.325538303+00:00 stderr F I1208 17:44:27.325470 1 reflector.go:430] "Caches populated" type="*build.BuildConfig" reflector="storage/cacher.go:/buildconfigs" 2025-12-08T17:44:27.326517039+00:00 stderr F I1208 17:44:27.326488 1 handler.go:288] Adding GroupVersion build.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.326630822+00:00 stderr F I1208 17:44:27.326608 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.326630822+00:00 stderr F I1208 17:44:27.326620 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367496 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367534 1 apiserver.go:161] skipping dir or symlink: /var/run/configmaps/image-import-ca 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367568 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/..2025_12_08_17_44_15.797397511, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367572 1 apiserver.go:161] skipping dir or symlink: /var/run/configmaps/image-import-ca/..2025_12_08_17_44_15.797397511 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367584 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/..2025_12_08_17_44_15.797397511/default-route-openshift-image-registry.apps-crc.testing, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367643 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/..2025_12_08_17_44_15.797397511/image-registry.openshift-image-registry.svc..5000, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367689 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/..2025_12_08_17_44_15.797397511/image-registry.openshift-image-registry.svc.cluster.local..5000, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367737 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/..data, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367741 1 apiserver.go:161] skipping dir or symlink: /var/run/configmaps/image-import-ca/..data 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367752 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/default-route-openshift-image-registry.apps-crc.testing, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367755 1 apiserver.go:161] skipping dir or symlink: /var/run/configmaps/image-import-ca/default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367766 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/image-registry.openshift-image-registry.svc..5000, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367769 1 apiserver.go:161] skipping dir or symlink: /var/run/configmaps/image-import-ca/image-registry.openshift-image-registry.svc..5000 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367780 1 apiserver.go:156] reading image import ca path: /var/run/configmaps/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000, incoming err: 2025-12-08T17:44:27.369950134+00:00 stderr F I1208 17:44:27.367785 1 apiserver.go:161] skipping dir or symlink: /var/run/configmaps/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:44:27.388488260+00:00 stderr F I1208 17:44:27.385003 1 store.go:1663] "Monitoring resource count at path" resource="images.image.openshift.io" path="//images" 2025-12-08T17:44:27.398398540+00:00 stderr F I1208 17:44:27.395674 1 store.go:1663] "Monitoring resource count at path" resource="imagestreams.image.openshift.io" path="//imagestreams" 2025-12-08T17:44:27.403420717+00:00 stderr F I1208 17:44:27.403366 1 handler.go:288] Adding GroupVersion image.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.403420717+00:00 stderr F W1208 17:44:27.403386 1 genericapiserver.go:792] Skipping API image.openshift.io/1.0 because it has no resources. 2025-12-08T17:44:27.403420717+00:00 stderr F W1208 17:44:27.403393 1 genericapiserver.go:792] Skipping API image.openshift.io/pre012 because it has no resources. 2025-12-08T17:44:27.405907194+00:00 stderr F I1208 17:44:27.403612 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.405907194+00:00 stderr F I1208 17:44:27.403625 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.409980776+00:00 stderr F I1208 17:44:27.406954 1 handler.go:288] Adding GroupVersion quota.openshift.io v1 to ResourceManager 2025-12-08T17:44:27.409980776+00:00 stderr F I1208 17:44:27.407015 1 maxinflight.go:139] "Initialized nonMutatingChan" len=3000 2025-12-08T17:44:27.409980776+00:00 stderr F I1208 17:44:27.407022 1 maxinflight.go:145] "Initialized mutatingChan" len=1500 2025-12-08T17:44:27.433504777+00:00 stderr F I1208 17:44:27.429170 1 cacher.go:469] cacher (imagestreams.image.openshift.io): initialized 2025-12-08T17:44:27.433504777+00:00 stderr F I1208 17:44:27.429239 1 reflector.go:430] "Caches populated" type="*image.ImageStream" reflector="storage/cacher.go:/imagestreams" 2025-12-08T17:44:27.508044941+00:00 stderr F I1208 17:44:27.507113 1 cacher.go:469] cacher (images.image.openshift.io): initialized 2025-12-08T17:44:27.508044941+00:00 stderr F I1208 17:44:27.507157 1 reflector.go:430] "Caches populated" type="*image.Image" reflector="storage/cacher.go:/images" 2025-12-08T17:44:27.930554885+00:00 stderr F I1208 17:44:27.930503 1 server.go:50] Starting master on 0.0.0.0:8443 (v0.0.0-master+$Format:%H$) 2025-12-08T17:44:27.930737480+00:00 stderr F I1208 17:44:27.930699 1 genericapiserver.go:551] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.937475 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.937505 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.937537 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.937543 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.937556 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.937561 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.938483 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-apiserver.svc\" [serving] validServingFor=[api.openshift-apiserver.svc,api.openshift-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:27.938449121 +0000 UTC))" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.938683 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:27.938660827 +0000 UTC))" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.938707 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.938736 1 genericapiserver.go:706] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.938805 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:27.939061558+00:00 stderr F I1208 17:44:27.939003 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:27.939213032+00:00 stderr F I1208 17:44:27.939172 1 clusterquotamapping.go:127] Starting ClusterQuotaMappingController controller 2025-12-08T17:44:27.939302264+00:00 stderr F I1208 17:44:27.939275 1 openshift_apiserver.go:603] Using default project node label selector: 2025-12-08T17:44:27.942903142+00:00 stderr F I1208 17:44:27.942847 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.943204340+00:00 stderr F I1208 17:44:27.943169 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.943521659+00:00 stderr F I1208 17:44:27.943489 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.944185287+00:00 stderr F I1208 17:44:27.944153 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.944249119+00:00 stderr F I1208 17:44:27.944221 1 reflector.go:430] "Caches populated" type="*v1.LimitRange" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.944404993+00:00 stderr F I1208 17:44:27.944377 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.946700025+00:00 stderr F I1208 17:44:27.946652 1 reflector.go:430] "Caches populated" type="*v1.ResourceQuota" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.947766395+00:00 stderr F I1208 17:44:27.947745 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.950730756+00:00 stderr F I1208 17:44:27.950370 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.950835978+00:00 stderr F I1208 17:44:27.950723 1 reflector.go:430] "Caches populated" type="*v1.ImageDigestMirrorSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.952745671+00:00 stderr F I1208 17:44:27.952695 1 reflector.go:430] "Caches populated" type="*v1.Route" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.953156502+00:00 stderr F I1208 17:44:27.953104 1 reflector.go:430] "Caches populated" type="*v1.ClusterResourceQuota" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.953399198+00:00 stderr F I1208 17:44:27.953359 1 reflector.go:430] "Caches populated" type="*v1alpha1.ImageContentSourcePolicy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.953837930+00:00 stderr F I1208 17:44:27.953801 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.954648472+00:00 stderr F I1208 17:44:27.954611 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.954928950+00:00 stderr F I1208 17:44:27.954907 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.955141735+00:00 stderr F I1208 17:44:27.955098 1 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.959449513+00:00 stderr F I1208 17:44:27.959413 1 reflector.go:430] "Caches populated" type="*v1.ImageTagMirrorSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.960966575+00:00 stderr F I1208 17:44:27.960919 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.970299729+00:00 stderr F I1208 17:44:27.970209 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.976675213+00:00 stderr F I1208 17:44:27.974466 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.988246108+00:00 stderr F I1208 17:44:27.988182 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:27.990682665+00:00 stderr F I1208 17:44:27.990614 1 reflector.go:430] "Caches populated" type="*v1.ImageStream" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:28.037704858+00:00 stderr F I1208 17:44:28.037643 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:28.037740459+00:00 stderr F I1208 17:44:28.037709 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:28.037772240+00:00 stderr F I1208 17:44:28.037714 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:28.037927004+00:00 stderr F I1208 17:44:28.037904 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:28.037864392 +0000 UTC))" 2025-12-08T17:44:28.038156500+00:00 stderr F I1208 17:44:28.038133 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-apiserver.svc\" [serving] validServingFor=[api.openshift-apiserver.svc,api.openshift-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:28.038117869 +0000 UTC))" 2025-12-08T17:44:28.038395427+00:00 stderr F I1208 17:44:28.038316 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:28.038301454 +0000 UTC))" 2025-12-08T17:44:28.038453768+00:00 stderr F I1208 17:44:28.038436 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:28.038424287 +0000 UTC))" 2025-12-08T17:44:28.038461948+00:00 stderr F I1208 17:44:28.038454 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:28.038445798 +0000 UTC))" 2025-12-08T17:44:28.038480599+00:00 stderr F I1208 17:44:28.038466 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:28.038458658 +0000 UTC))" 2025-12-08T17:44:28.038489989+00:00 stderr F I1208 17:44:28.038483 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:28.038475949 +0000 UTC))" 2025-12-08T17:44:28.038515390+00:00 stderr F I1208 17:44:28.038497 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:28.038487859 +0000 UTC))" 2025-12-08T17:44:28.038524350+00:00 stderr F I1208 17:44:28.038513 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:28.038503989 +0000 UTC))" 2025-12-08T17:44:28.038533800+00:00 stderr F I1208 17:44:28.038528 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:28.03851814 +0000 UTC))" 2025-12-08T17:44:28.038554881+00:00 stderr F I1208 17:44:28.038540 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:28.03853192 +0000 UTC))" 2025-12-08T17:44:28.038720395+00:00 stderr F I1208 17:44:28.038691 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-apiserver.svc\" [serving] validServingFor=[api.openshift-apiserver.svc,api.openshift-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:28.038681594 +0000 UTC))" 2025-12-08T17:44:28.038893380+00:00 stderr F I1208 17:44:28.038862 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:28.038852729 +0000 UTC))" 2025-12-08T17:44:28.099065862+00:00 stderr F I1208 17:44:28.098987 1 reflector.go:430] "Caches populated" type="*etcd.ImageLayers" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:44:28.473625939+00:00 stderr F I1208 17:44:28.473558 1 healthz.go:280] poststarthook/authorization.openshift.io-bootstrapclusterroles,poststarthook/authorization.openshift.io-ensurenodebootstrap-sa check failed: livez 2025-12-08T17:44:28.473625939+00:00 stderr F [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: not finished 2025-12-08T17:44:28.473625939+00:00 stderr F [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: not finished 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604205 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.604147273 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604245 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.604230665 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604261 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.604250385 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604277 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.604266366 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604296 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.604282516 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604312 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.604301137 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604331 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.604317317 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604350 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.604337388 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604367 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.604356098 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604387 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.604374849 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604613 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-apiserver.svc\" [serving] validServingFor=[api.openshift-apiserver.svc,api.openshift-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:30.604595655 +0000 UTC))" 2025-12-08T17:44:30.606164028+00:00 stderr F I1208 17:44:30.604821 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:30.60480066 +0000 UTC))" 2025-12-08T17:44:39.698626734+00:00 stderr F E1208 17:44:39.698560 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:52f9b4df3f7833876ee502a6bff2539491db07e060b213b6a0a8fda0c4a881c1\": unexpected end of JSON input" 2025-12-08T17:44:40.015954423+00:00 stderr F E1208 17:44:40.015867 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:3bc55cb1bafdd281a784ec7950c8e95914079522f152f642e8172869e83b4585\": unexpected end of JSON input" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040138 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.040093123 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040163 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.040154435 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040181 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.040167815 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040194 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.040185666 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040208 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.040198396 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040221 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.040212067 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040238 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.040227667 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040276 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.040243037 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040291 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.040280688 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040306 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.040297309 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040327 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.040315689 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040539 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"api.openshift-apiserver.svc\" [serving] validServingFor=[api.openshift-apiserver.svc,api.openshift-apiserver.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:45:16.040527305 +0000 UTC))" 2025-12-08T17:45:16.041975046+00:00 stderr F I1208 17:45:16.040693 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:45:16.040674959 +0000 UTC))" 2025-12-08T17:46:05.324112953+00:00 stderr F E1208 17:46:05.324055 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.324176515+00:00 stderr F E1208 17:46:05.324147 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.333534236+00:00 stderr F E1208 17:46:05.333451 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.333584557+00:00 stderr F E1208 17:46:05.333534 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.340090743+00:00 stderr F E1208 17:46:05.340032 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.340133765+00:00 stderr F E1208 17:46:05.340092 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.354041132+00:00 stderr F E1208 17:46:05.350158 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.354041132+00:00 stderr F E1208 17:46:05.350225 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.381996041+00:00 stderr F E1208 17:46:05.381315 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.381996041+00:00 stderr F E1208 17:46:05.381383 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.385250539+00:00 stderr F E1208 17:46:05.385171 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.385250539+00:00 stderr F E1208 17:46:05.385201 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.391317361+00:00 stderr F E1208 17:46:05.391258 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.391348512+00:00 stderr F E1208 17:46:05.391330 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.395604419+00:00 stderr F E1208 17:46:05.395568 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.395624599+00:00 stderr F E1208 17:46:05.395606 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:05.399231908+00:00 stderr F E1208 17:46:05.399188 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.399251309+00:00 stderr F E1208 17:46:05.399230 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.314825231+00:00 stderr F E1208 17:46:06.313644 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.314825231+00:00 stderr F E1208 17:46:06.313714 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.320647675+00:00 stderr F E1208 17:46:06.319299 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.320647675+00:00 stderr F E1208 17:46:06.319343 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.324026667+00:00 stderr F E1208 17:46:06.323844 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.324026667+00:00 stderr F E1208 17:46:06.323944 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.331592154+00:00 stderr F E1208 17:46:06.331329 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.331592154+00:00 stderr F E1208 17:46:06.331394 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.336102879+00:00 stderr F E1208 17:46:06.336067 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.336129050+00:00 stderr F E1208 17:46:06.336107 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.339663005+00:00 stderr F E1208 17:46:06.339620 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.339663005+00:00 stderr F E1208 17:46:06.339655 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.349239303+00:00 stderr F E1208 17:46:06.349208 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.349302015+00:00 stderr F E1208 17:46:06.349275 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.354144321+00:00 stderr F E1208 17:46:06.354105 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.354173472+00:00 stderr F E1208 17:46:06.354160 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.358908853+00:00 stderr F E1208 17:46:06.358237 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.358908853+00:00 stderr F E1208 17:46:06.358289 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.367975356+00:00 stderr F E1208 17:46:06.365276 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.367975356+00:00 stderr F E1208 17:46:06.365319 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.368974816+00:00 stderr F E1208 17:46:06.368919 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.368974816+00:00 stderr F E1208 17:46:06.368960 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.372664336+00:00 stderr F E1208 17:46:06.372307 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.372664336+00:00 stderr F E1208 17:46:06.372354 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.376746879+00:00 stderr F E1208 17:46:06.376675 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.376791161+00:00 stderr F E1208 17:46:06.376778 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.385920944+00:00 stderr F E1208 17:46:06.385494 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.385920944+00:00 stderr F E1208 17:46:06.385553 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.398915455+00:00 stderr F E1208 17:46:06.398514 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.398915455+00:00 stderr F E1208 17:46:06.398593 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.410664587+00:00 stderr F E1208 17:46:06.410610 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.410707658+00:00 stderr F E1208 17:46:06.410663 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.414434050+00:00 stderr F E1208 17:46:06.414381 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.414494152+00:00 stderr F E1208 17:46:06.414457 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.418056948+00:00 stderr F E1208 17:46:06.418021 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.418072089+00:00 stderr F E1208 17:46:06.418064 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:06.884566871+00:00 stderr F E1208 17:46:06.884447 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.884749977+00:00 stderr F E1208 17:46:06.884719 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:12.164601766+00:00 stderr F E1208 17:46:12.164502 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:12.164781051+00:00 stderr F E1208 17:46:12.164729 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:12.491223339+00:00 stderr F E1208 17:46:12.491117 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:12.491413145+00:00 stderr F E1208 17:46:12.491343 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:12.862344989+00:00 stderr F E1208 17:46:12.862263 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:12.862486263+00:00 stderr F E1208 17:46:12.862440 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:12.924628128+00:00 stderr F E1208 17:46:12.924569 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:12.924758362+00:00 stderr F E1208 17:46:12.924731 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:13.591723751+00:00 stderr F E1208 17:46:13.591630 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:13.591908467+00:00 stderr F E1208 17:46:13.591838 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:19.904682658+00:00 stderr F E1208 17:46:19.904616 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:19.904750340+00:00 stderr F E1208 17:46:19.904723 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.275391903+00:00 stderr F E1208 17:46:25.275254 1 webhook.go:269] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.275663601+00:00 stderr F E1208 17:46:25.275388 1 errors.go:83] "Unhandled Error" err="Post \"https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:06.323744021+00:00 stderr F I1208 17:47:06.323595 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:08.068711872+00:00 stderr F I1208 17:47:08.068641 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:09.092127208+00:00 stderr F I1208 17:47:09.092057 1 reflector.go:430] "Caches populated" type="*v1.LimitRange" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:09.256936156+00:00 stderr F I1208 17:47:09.253290 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:10.296306615+00:00 stderr F I1208 17:47:10.296217 1 reflector.go:430] "Caches populated" type="*v1.ImageDigestMirrorSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:12.555964956+00:00 stderr F I1208 17:47:12.555909 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:16.950434221+00:00 stderr F I1208 17:47:16.950354 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:18.399182666+00:00 stderr F I1208 17:47:18.398715 1 reflector.go:430] "Caches populated" type="*v1.ClusterResourceQuota" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:20.118145898+00:00 stderr F I1208 17:47:20.118041 1 reflector.go:430] "Caches populated" type="*v1.Ingress" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:36.182391835+00:00 stderr F I1208 17:47:36.182321 1 reflector.go:430] "Caches populated" type="*v1.ImageTagMirrorSet" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:38.163929811+00:00 stderr F I1208 17:47:38.163844 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:38.977855963+00:00 stderr F I1208 17:47:38.977758 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:40.274218031+00:00 stderr F I1208 17:47:40.274149 1 reflector.go:430] "Caches populated" type="*v1.ResourceQuota" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:40.387002302+00:00 stderr F I1208 17:47:40.386922 1 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:45.615844280+00:00 stderr F I1208 17:47:45.615757 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:47.853132077+00:00 stderr F I1208 17:47:47.852869 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:51.830127510+00:00 stderr F I1208 17:47:51.830032 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:52.841070042+00:00 stderr F I1208 17:47:52.840934 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:53.522586362+00:00 stderr F I1208 17:47:53.522482 1 reflector.go:430] "Caches populated" type="*v1alpha1.ImageContentSourcePolicy" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:59.673786910+00:00 stderr F I1208 17:47:59.673738 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:00.185999187+00:00 stderr F I1208 17:48:00.185933 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:53:26.685129626+00:00 stderr F E1208 17:53:26.685063 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:4f35566977c35306a8f2102841ceb7fa10a6d9ac47c079131caed5655140f9b2\": unexpected end of JSON input" 2025-12-08T17:53:27.304971320+00:00 stderr F E1208 17:53:27.304930 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:70a21b3f93c05843ce9d07f125b1464436caf01680bb733754a2a5df5bc3b11b\": unexpected end of JSON input" 2025-12-08T17:53:27.309061560+00:00 stderr F E1208 17:53:27.309035 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:7201e059b92acc55fe9fe1cc390d44e92f0e2af297fbe52b3f1bb56327f59624\": unexpected end of JSON input" 2025-12-08T17:53:27.311933519+00:00 stderr F E1208 17:53:27.311902 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:ec784f172735873a5893504e07c57dcbd56b7b049a395c5629c6058dbfac21a3\": unexpected end of JSON input" 2025-12-08T17:53:27.319211057+00:00 stderr F I1208 17:53:27.318925 1 trace.go:236] Trace[302812914]: "Create" accept:application/json, */*,audit-id:af20795e-8b64-464c-9398-dd706bcc12f5,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.275) (total time: 1043ms): 2025-12-08T17:53:27.319211057+00:00 stderr F Trace[302812914]: [1.043539894s] [1.043539894s] END 2025-12-08T17:53:27.408169686+00:00 stderr F E1208 17:53:27.408102 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:5dfcc5b000a1fab4be66bbd43e4db44b61176e2bcba9c24f6fe887dea9b7fd49\": unexpected end of JSON input" 2025-12-08T17:53:27.411822335+00:00 stderr F E1208 17:53:27.411776 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b998c58c88dd98365531bacc631dc92deb73de17cd3b6f86466f421c409f8583\": unexpected end of JSON input" 2025-12-08T17:53:27.414283002+00:00 stderr F E1208 17:53:27.414246 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:ee797c115858fef35cad6ce8a13fc15b482d7672e37f485cd65579f009d51f0d\": unexpected end of JSON input" 2025-12-08T17:53:27.420156971+00:00 stderr F I1208 17:53:27.420102 1 trace.go:236] Trace[1808335229]: "Create" accept:application/json, */*,audit-id:3c6c2ca2-e269-478f-9feb-3053e8d09af1,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.552) (total time: 867ms): 2025-12-08T17:53:27.420156971+00:00 stderr F Trace[1808335229]: [867.093346ms] [867.093346ms] END 2025-12-08T17:53:28.080012134+00:00 stderr F E1208 17:53:28.079925 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:55dc61c31ea50a8f7a45e993a9b3220097974948b5cd1ab3f317e7702e8cb6fc\": unexpected end of JSON input" 2025-12-08T17:53:28.987861408+00:00 stderr F E1208 17:53:28.987778 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:a8e4081414cfa644e212ded354dfee12706e63afb19a27c0c0ae2c8c64e56ca6\": unexpected end of JSON input" 2025-12-08T17:53:28.991298031+00:00 stderr F E1208 17:53:28.991257 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:38c7e4f7dea04bb536f05d78e0107ebc2a3607cf030db7f5c249f13ce1f52d59\": unexpected end of JSON input" 2025-12-08T17:53:28.994655882+00:00 stderr F E1208 17:53:28.994612 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:d2f17aaf2f871fda5620466d69ac67b9c355c0bae5912a1dbef9a51ca8813e50\": unexpected end of JSON input" 2025-12-08T17:53:28.998009803+00:00 stderr F E1208 17:53:28.997988 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:e4be2fb7216f432632819b2441df42a5a0063f7f473c2923ca6912b2d64b7494\": unexpected end of JSON input" 2025-12-08T17:53:29.001047146+00:00 stderr F E1208 17:53:29.001004 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:14de89e89efc97aee3b50141108b7833708c3a93ad90bf89940025ab5267ba86\": unexpected end of JSON input" 2025-12-08T17:53:29.003542464+00:00 stderr F E1208 17:53:29.003507 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:f438230ed2c2e609d0d7dbc430ccf1e9bad2660e6410187fd6e9b14a2952e70b\": unexpected end of JSON input" 2025-12-08T17:53:29.006161235+00:00 stderr F E1208 17:53:29.006141 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:f953734d89252219c3dcd8f703ba8b58c9c8a0f5dfa9425c9e56ec0834f7d288\": unexpected end of JSON input" 2025-12-08T17:53:29.009169597+00:00 stderr F E1208 17:53:29.009150 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:e4223a60b887ec24cad7dd70fdb6c3f2c107fb7118331be6f45d626219cfe7f3\": unexpected end of JSON input" 2025-12-08T17:53:29.015753907+00:00 stderr F I1208 17:53:29.014314 1 trace.go:236] Trace[1754543681]: "Create" accept:application/json, */*,audit-id:45b56f27-c15b-43cc-b8e2-fe6550c75761,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.265) (total time: 2749ms): 2025-12-08T17:53:29.015753907+00:00 stderr F Trace[1754543681]: [2.74917079s] [2.74917079s] END 2025-12-08T17:53:29.025155822+00:00 stderr F E1208 17:53:29.025135 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:df8858f0c01ae1657a14234a94f6785cbb2fba7f12c9d0325f427a3f1284481b\": unexpected end of JSON input" 2025-12-08T17:53:29.030545108+00:00 stderr F E1208 17:53:29.030501 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:9b5d2fc574a13613f18fa983ac2901593c1e812836e918095bc3d15b6cc4ba57\": unexpected end of JSON input" 2025-12-08T17:53:29.033134449+00:00 stderr F E1208 17:53:29.033105 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:57ab1f0ad24e02143978fc79c5219a02c4d6a5a27225ee5454c85a47839b6ddc\": unexpected end of JSON input" 2025-12-08T17:53:29.038744462+00:00 stderr F I1208 17:53:29.038666 1 trace.go:236] Trace[2031175986]: "Create" accept:application/json, */*,audit-id:9399d575-74b2-4ade-a7d0-e4235b89c5a2,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:27.834) (total time: 1204ms): 2025-12-08T17:53:29.038744462+00:00 stderr F Trace[2031175986]: [1.204228354s] [1.204228354s] END 2025-12-08T17:53:30.171198343+00:00 stderr F E1208 17:53:30.171129 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:7bcc365e0ba823ed020ee6e6c3e0c23be5871c8dea3f7f1a65029002c83f9e55\": unexpected end of JSON input" 2025-12-08T17:53:30.176051955+00:00 stderr F E1208 17:53:30.175985 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:6a9e81b2eea2f32f2750909b6aa037c2c2e68be3bc9daf3c7a3163c9e1df379f\": unexpected end of JSON input" 2025-12-08T17:53:30.179077127+00:00 stderr F E1208 17:53:30.178981 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:00cf28cf9a6c427962f922855a6cc32692c760764ce2ce7411cf605dd510367f\": unexpected end of JSON input" 2025-12-08T17:53:30.182702786+00:00 stderr F E1208 17:53:30.182617 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:2cee344e4cfcfdc9a117fd82baa6f2d5daa7eeed450e02cd5d5554b424410439\": unexpected end of JSON input" 2025-12-08T17:53:30.186281224+00:00 stderr F E1208 17:53:30.186191 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:aa02a20c2edf83a009746b45a0fd2e0b4a2b224fdef1581046f6afef38c0bee2\": unexpected end of JSON input" 2025-12-08T17:53:30.188379870+00:00 stderr F E1208 17:53:30.188301 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:496e23be70520863bce6f7cdc54d280aca2c133d06e992795c4dcbde1a9dd1ab\": unexpected end of JSON input" 2025-12-08T17:53:30.189271405+00:00 stderr F E1208 17:53:30.189215 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:59b88fb0c467ca43bf3c1af6bfd8777577638dd8079f995cdb20b6f4e20ce0b6\": unexpected end of JSON input" 2025-12-08T17:53:30.192335358+00:00 stderr F E1208 17:53:30.192284 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:603d10af5e3476add5b5726fdef893033869ae89824ee43949a46c9f004ef65d\": unexpected end of JSON input" 2025-12-08T17:53:30.192442641+00:00 stderr F E1208 17:53:30.192408 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:022488b1bf697b7dd8c393171a3247bef4ea545a9ab828501e72168f2aac9415\": unexpected end of JSON input" 2025-12-08T17:53:30.195386671+00:00 stderr F E1208 17:53:30.195357 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:eed7e29bf583e4f01e170bb9f22f2a78098bf15243269b670c307caa6813b783\": unexpected end of JSON input" 2025-12-08T17:53:30.195781652+00:00 stderr F E1208 17:53:30.195742 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:7164a06e9ba98a3ce9991bd7019512488efe30895175bb463e255f00eb9421fd\": unexpected end of JSON input" 2025-12-08T17:53:30.198624589+00:00 stderr F E1208 17:53:30.198582 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b80a514f136f738736d6bf654dc3258c13b04a819e001dd8a39ef2f7475fd9d9\": unexpected end of JSON input" 2025-12-08T17:53:30.200195982+00:00 stderr F E1208 17:53:30.200151 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:81684e422367a075ac113e69ea11d8721416ce4bedea035e25313c5e726fd7d1\": unexpected end of JSON input" 2025-12-08T17:53:30.201582659+00:00 stderr F E1208 17:53:30.201546 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:7ef75cdbc399425105060771cb8e700198cc0bddcfb60bf4311bf87ea62fd440\": unexpected end of JSON input" 2025-12-08T17:53:30.203858281+00:00 stderr F E1208 17:53:30.203806 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b838fa18dab68d43a19f0c329c3643850691b8f9915823c4f8d25685eb293a11\": unexpected end of JSON input" 2025-12-08T17:53:30.207082119+00:00 stderr F E1208 17:53:30.207043 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:8a5b580b76c2fc2dfe55d13bb0dd53e8c71d718fc1a3773264b1710f49060222\": unexpected end of JSON input" 2025-12-08T17:53:30.210550933+00:00 stderr F E1208 17:53:30.210474 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:2f59ad75b66a3169b0b03032afb09aa3cfa531dbd844e3d3a562246e7d09c282\": unexpected end of JSON input" 2025-12-08T17:53:30.213628646+00:00 stderr F I1208 17:53:30.213598 1 trace.go:236] Trace[830131108]: "Create" accept:application/json, */*,audit-id:53fef59e-c5d8-4c76-a004-2e253ef148b9,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.434) (total time: 3779ms): 2025-12-08T17:53:30.213628646+00:00 stderr F Trace[830131108]: [3.779366672s] [3.779366672s] END 2025-12-08T17:53:30.217143583+00:00 stderr F E1208 17:53:30.217090 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:9d759db3bb650e5367216ce261779c5a58693fc7ae10f21cd264011562bd746d\": unexpected end of JSON input" 2025-12-08T17:53:30.231082912+00:00 stderr F E1208 17:53:30.230994 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:bf5e518dba2aa935829d9db88d933a264e54ffbfa80041b41287fd70c1c35ba5\": unexpected end of JSON input" 2025-12-08T17:53:30.234694410+00:00 stderr F E1208 17:53:30.234645 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:f7ca08a8dda3610fcc10cc1fe5f5d0b9f8fc7a283b01975d0fe2c1e77ae06193\": unexpected end of JSON input" 2025-12-08T17:53:30.242212054+00:00 stderr F I1208 17:53:30.242186 1 trace.go:236] Trace[1809520582]: "Create" accept:application/json, */*,audit-id:9f2eb7c9-7a97-4875-aa33-4245f83a279c,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.394) (total time: 3847ms): 2025-12-08T17:53:30.242212054+00:00 stderr F Trace[1809520582]: [3.847377611s] [3.847377611s] END 2025-12-08T17:53:30.499798717+00:00 stderr F E1208 17:53:30.499730 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:d186c94f8843f854d77b2b05d10efb0d272f88a4bf4f1d8ebe304428b9396392\": unexpected end of JSON input" 2025-12-08T17:53:30.505972156+00:00 stderr F E1208 17:53:30.505928 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:e37aeaeb0159194a9855350e13e399470f39ce340d6381069933742990741fb8\": unexpected end of JSON input" 2025-12-08T17:53:30.510537450+00:00 stderr F E1208 17:53:30.510446 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:f89a54e6d1340be8ddd84a602cb4f1f27c1983417f655941645bf11809d49f18\": unexpected end of JSON input" 2025-12-08T17:53:30.515023162+00:00 stderr F E1208 17:53:30.514980 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:739fac452e78a21a16b66e0451b85590b9e48ec7a1ed3887fbb9ed85cf564275\": unexpected end of JSON input" 2025-12-08T17:53:30.519432161+00:00 stderr F E1208 17:53:30.519393 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:0eea1d20aaa26041edf26b925fb204d839e5b93122190191893a0299b2e1b589\": unexpected end of JSON input" 2025-12-08T17:53:30.523471842+00:00 stderr F E1208 17:53:30.523434 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:3b94ccfa422b8ba0014302a3cfc6916b69f0f5a9dfd757b6704049834d4ff0ae\": unexpected end of JSON input" 2025-12-08T17:53:30.527389868+00:00 stderr F E1208 17:53:30.527338 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:46a4e73ddb085d1f36b39903ea13ba307bb958789707e9afde048764b3e3cae2\": unexpected end of JSON input" 2025-12-08T17:53:30.533097893+00:00 stderr F E1208 17:53:30.533061 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:bcb0e15cc9d2d3449f0b1acac7b0275035a80e1b3b835391b5464f7bf4553b89\": unexpected end of JSON input" 2025-12-08T17:53:30.543248349+00:00 stderr F I1208 17:53:30.542618 1 trace.go:236] Trace[1175727144]: "Create" accept:application/json, */*,audit-id:f50c099b-5b1d-4977-802c-34274821c38a,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:27.714) (total time: 2827ms): 2025-12-08T17:53:30.543248349+00:00 stderr F Trace[1175727144]: [2.827910102s] [2.827910102s] END 2025-12-08T17:53:30.681750515+00:00 stderr F E1208 17:53:30.681669 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:e851770fd181ef49193111f7afcdbf872ad23f3a8234e0e07a742c4ca2882c3d\": unexpected end of JSON input" 2025-12-08T17:53:30.685416295+00:00 stderr F E1208 17:53:30.685389 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:ce5c0becf829aca80734b4caf3ab6b76cb00f7d78f4e39fb136636a764dea7f6\": unexpected end of JSON input" 2025-12-08T17:53:30.689067664+00:00 stderr F E1208 17:53:30.689009 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:3f00540ce2a3a01d2a147a7d73825fe78697be213a050bd09edae36266d6bc40\": unexpected end of JSON input" 2025-12-08T17:53:30.692838706+00:00 stderr F E1208 17:53:30.692805 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:868224c3b7c309b9e04003af70a5563af8e4c662f0c53f2a7606e0573c9fad85\": unexpected end of JSON input" 2025-12-08T17:53:30.697670638+00:00 stderr F E1208 17:53:30.697642 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:0669a28577b41bb05c67492ef18a1d48a299ac54d1500df8f9f8f760ce4be24b\": unexpected end of JSON input" 2025-12-08T17:53:30.701012409+00:00 stderr F E1208 17:53:30.700964 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:9036a59a8275f9c205ef5fc674f38c0495275a1a7912029f9a784406bb00b1f5\": unexpected end of JSON input" 2025-12-08T17:53:30.705169232+00:00 stderr F E1208 17:53:30.705053 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:425e2c7c355bea32be238aa2c7bdd363b6ab3709412bdf095efe28a8f6c07d84\": unexpected end of JSON input" 2025-12-08T17:53:30.709317574+00:00 stderr F E1208 17:53:30.709233 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:67fee4b64b269f5666a1051d806635b675903ef56d07b7cc019d3d59ff1aa97c\": unexpected end of JSON input" 2025-12-08T17:53:30.713559770+00:00 stderr F E1208 17:53:30.713476 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b85cbdbc289752c91ac7f468cffef916fe9ab01865f3e32cfcc44ccdd633b168\": unexpected end of JSON input" 2025-12-08T17:53:30.717199629+00:00 stderr F E1208 17:53:30.717160 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:663eb81388ae8f824e7920c272f6d2e2274cf6c140d61416607261cdce9d50e2\": unexpected end of JSON input" 2025-12-08T17:53:30.727990542+00:00 stderr F I1208 17:53:30.727935 1 trace.go:236] Trace[1566642012]: "Create" accept:application/json, */*,audit-id:daeee14e-5c79-47ec-a789-799f8fb2f78a,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.955) (total time: 3772ms): 2025-12-08T17:53:30.727990542+00:00 stderr F Trace[1566642012]: [3.772148387s] [3.772148387s] END 2025-12-08T17:53:30.850934055+00:00 stderr F E1208 17:53:30.850826 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:5fb3543c0d42146f0506c1ea4d09575131da6a2f27885729b7cfce13a0fa90e3\": unexpected end of JSON input" 2025-12-08T17:53:30.855940412+00:00 stderr F E1208 17:53:30.855868 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:1d68b58a73f4cf15fcd886ab39fddf18be923b52b24cb8ec3ab1da2d3e9bd5f6\": unexpected end of JSON input" 2025-12-08T17:53:30.861534154+00:00 stderr F E1208 17:53:30.861494 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:7de877b0e748cdb47cb702400f3ddaa3c3744a022887e2213c2bb27775ab4b25\": unexpected end of JSON input" 2025-12-08T17:53:30.866369805+00:00 stderr F E1208 17:53:30.866307 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:af9c08644ca057d83ef4b7d8de1489f01c5a52ff8670133b8a09162831b7fb34\": unexpected end of JSON input" 2025-12-08T17:53:30.870588760+00:00 stderr F E1208 17:53:30.870517 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b053401886c06581d3c296855525cc13e0613100a596ed007bb69d5f8e972346\": unexpected end of JSON input" 2025-12-08T17:53:30.875036290+00:00 stderr F E1208 17:53:30.874972 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:61555b923dabe4ff734279ed1bdb9eb6d450c760e1cc04463cf88608ac8d1338\": unexpected end of JSON input" 2025-12-08T17:53:30.878792333+00:00 stderr F E1208 17:53:30.878747 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:9ab26cb4005e9b60fd6349950957bbd0120efba216036da53c547c6f1c9e5e7f\": unexpected end of JSON input" 2025-12-08T17:53:30.881772934+00:00 stderr F E1208 17:53:30.881721 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:431753c8a6a8541fdc0edd3385b2c765925d244fdd2347d2baa61303789696be\": unexpected end of JSON input" 2025-12-08T17:53:30.883265174+00:00 stderr F E1208 17:53:30.883180 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:2254dc2f421f496b504aafbbd8ea37e660652c4b6b4f9a0681664b10873be7fe\": unexpected end of JSON input" 2025-12-08T17:53:30.886252315+00:00 stderr F E1208 17:53:30.886142 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:64acf3403b5c2c85f7a28f326c63f1312b568db059c66d90b34e3c59fde3a74b\": unexpected end of JSON input" 2025-12-08T17:53:30.887334035+00:00 stderr F E1208 17:53:30.887214 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:e4b1599ba6e88f6df7c4e67d6397371d61b6829d926411184e9855e71e840b8c\": unexpected end of JSON input" 2025-12-08T17:53:30.890923062+00:00 stderr F E1208 17:53:30.890819 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:74051f86b00fb102e34276f03a310c16bc57b9c2a001a56ba66359e15ee48ba6\": unexpected end of JSON input" 2025-12-08T17:53:30.895201069+00:00 stderr F E1208 17:53:30.895104 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:33d4dff40514e91d86b42e90b24b09a5ca770d9f67657c936363d348cd33d188\": unexpected end of JSON input" 2025-12-08T17:53:30.900435851+00:00 stderr F E1208 17:53:30.900361 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:7711108ef60ef6f0536bfa26914af2afaf6455ce6e4c4abd391e31a2d95d0178\": unexpected end of JSON input" 2025-12-08T17:53:30.900484412+00:00 stderr F I1208 17:53:30.900431 1 trace.go:236] Trace[1100011299]: "Create" accept:application/json, */*,audit-id:8cfd47dc-e986-4eae-bac1-711da9268482,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:27.637) (total time: 3263ms): 2025-12-08T17:53:30.900484412+00:00 stderr F Trace[1100011299]: [3.263147665s] [3.263147665s] END 2025-12-08T17:53:30.904123992+00:00 stderr F E1208 17:53:30.904046 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b163564be6ed5b80816e61a4ee31e42f42dbbf345253daac10ecc9fadf31baa3\": unexpected end of JSON input" 2025-12-08T17:53:30.907411901+00:00 stderr F E1208 17:53:30.907363 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:920ff7e5efc777cb523669c425fd7b553176c9f4b34a85ceddcb548c2ac5f78a\": unexpected end of JSON input" 2025-12-08T17:53:30.911014659+00:00 stderr F E1208 17:53:30.910953 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:32a5e806bd88b40568d46864fd313541498e38fabfc5afb5f3bdfe052c4b4c5f\": unexpected end of JSON input" 2025-12-08T17:53:30.916432956+00:00 stderr F E1208 17:53:30.916333 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:229ee7b88c5f700c95d557d0b37b8f78dbb6b125b188c3bf050cfdb32aec7962\": unexpected end of JSON input" 2025-12-08T17:53:30.919771847+00:00 stderr F E1208 17:53:30.919708 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:78bf175cecb15524b2ef81bff8cc11acdf7c0f74c08417f0e443483912e4878a\": unexpected end of JSON input" 2025-12-08T17:53:30.928658259+00:00 stderr F I1208 17:53:30.928566 1 trace.go:236] Trace[1593996513]: "Create" accept:application/json, */*,audit-id:8c8932f3-5c9d-4a76-95bd-a42c98d8ac98,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:26.994) (total time: 3933ms): 2025-12-08T17:53:30.928658259+00:00 stderr F Trace[1593996513]: [3.933898594s] [3.933898594s] END 2025-12-08T17:53:32.519425622+00:00 stderr F E1208 17:53:32.519223 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:2ae058ee7239213fb495491112be8cc7e6d6661864fd399deb27f23f50f05eb4\": unexpected end of JSON input" 2025-12-08T17:53:32.524325585+00:00 stderr F E1208 17:53:32.524233 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:db3f5192237bfdab2355304f17916e09bc29d6d529fdec48b09a08290ae35905\": unexpected end of JSON input" 2025-12-08T17:53:32.528929621+00:00 stderr F E1208 17:53:32.528844 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:b4cb02a4e7cb915b6890d592ed5b4ab67bcef19bf855029c95231f51dd071352\": unexpected end of JSON input" 2025-12-08T17:53:32.533566556+00:00 stderr F E1208 17:53:32.533480 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:fa9556628c15b8eb22cafccb737b3fbcecfd681a5c2cfea3302dd771c644a7db\": unexpected end of JSON input" 2025-12-08T17:53:32.538965543+00:00 stderr F E1208 17:53:32.538866 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:a0a6db2dcdb3d49e36bd0665e3e00f242a690391700e42cab14e86b154152bfd\": unexpected end of JSON input" 2025-12-08T17:53:32.542923031+00:00 stderr F E1208 17:53:32.542862 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:e90172ca0f09acf5db1721bd7df304dffd184e00145072132cb71c7f0797adf6\": unexpected end of JSON input" 2025-12-08T17:53:32.547067483+00:00 stderr F E1208 17:53:32.546987 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:421d1f6a10e263677b7687ccea8e4a59058e2e3c80585505eec9a9c2e6f9f40e\": unexpected end of JSON input" 2025-12-08T17:53:32.551151085+00:00 stderr F E1208 17:53:32.551087 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:6c009f430da02bdcff618a7dcd085d7d22547263eeebfb8d6377a4cf6f58769d\": unexpected end of JSON input" 2025-12-08T17:53:32.554947628+00:00 stderr F E1208 17:53:32.554807 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:dc84fed0f6f40975a2277c126438c8aa15c70eeac75981dbaa4b6b853eff61a6\": unexpected end of JSON input" 2025-12-08T17:53:32.557649411+00:00 stderr F E1208 17:53:32.557597 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:78af15475eac13d2ff439b33a9c3bdd39147858a824c420e8042fd5f35adce15\": unexpected end of JSON input" 2025-12-08T17:53:32.560792237+00:00 stderr F E1208 17:53:32.560760 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:06bbbf9272d5c5161f444388593e9bd8db793d8a2d95a50b429b3c0301fafcdd\": unexpected end of JSON input" 2025-12-08T17:53:32.563692665+00:00 stderr F E1208 17:53:32.563641 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:caba895933209aa9a4f3121f9ec8e5e8013398ab4f72bd3ff255227aad8d2c3e\": unexpected end of JSON input" 2025-12-08T17:53:32.567538041+00:00 stderr F E1208 17:53:32.567481 1 strategy.go:60] "Unhandled Error" err="unable to parse manifest for \"sha256:dbe9905fe2b20ed30b0e2d64543016fa9c145eeb5a678f720ba9d2055f0c9f88\": unexpected end of JSON input" 2025-12-08T17:53:32.578910570+00:00 stderr F I1208 17:53:32.578818 1 trace.go:236] Trace[1633430139]: "Create" accept:application/json, */*,audit-id:68c25e44-b959-4cb1-bcda-94aefeac62d0,client:10.217.0.62,api-group:image.openshift.io,api-version:v1,name:,subresource:,namespace:openshift,protocol:HTTP/2.0,resource:imagestreamimports,scope:resource,url:/apis/image.openshift.io/v1/namespaces/openshift/imagestreamimports,user-agent:openshift-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/system:serviceaccount:openshift-infra:image-import-controller,verb:POST (08-Dec-2025 17:53:27.240) (total time: 5338ms): 2025-12-08T17:53:32.578910570+00:00 stderr F Trace[1633430139]: [5.338088555s] [5.338088555s] END 2025-12-08T17:57:28.593749743+00:00 stderr F I1208 17:57:28.593222 1 controller.go:667] quota admission added evaluator for: routes.route.openshift.io 2025-12-08T17:57:28.593749743+00:00 stderr F I1208 17:57:28.593503 1 controller.go:667] quota admission added evaluator for: routes.route.openshift.io ././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000755000175000017500000000000015115611521033126 5ustar zuulzuul././@LongLink0000644000000000000000000000027500000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000644000175000017500000000000015115611514033120 0ustar zuulzuul././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000755000175000017500000000000015115611521033126 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_ap0000644000175000017500000007235715115611514033150 0ustar zuulzuul2025-12-08T17:44:25.176434532+00:00 stderr F W1208 17:44:25.176257 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:44:25.177348537+00:00 stderr F I1208 17:44:25.177321 1 crypto.go:594] Generating new CA for check-endpoints-signer@1765215865 cert, and key in /tmp/serving-cert-1883038990/serving-signer.crt, /tmp/serving-cert-1883038990/serving-signer.key 2025-12-08T17:44:25.177348537+00:00 stderr F Validity period of the certificate for "check-endpoints-signer@1765215865" is unset, resetting to 43800h0m0s! 2025-12-08T17:44:25.800405281+00:00 stderr F I1208 17:44:25.799893 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:25.800405281+00:00 stderr F I1208 17:44:25.800367 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:25.800405281+00:00 stderr F I1208 17:44:25.800373 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:25.800405281+00:00 stderr F I1208 17:44:25.800378 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:25.800405281+00:00 stderr F I1208 17:44:25.800382 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:25.802130859+00:00 stderr F I1208 17:44:25.800117 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:25.829015552+00:00 stderr F I1208 17:44:25.828747 1 builder.go:304] check-endpoints version v0.0.0-unknown-c3d9642-c3d9642 2025-12-08T17:44:25.830266946+00:00 stderr F I1208 17:44:25.829844 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" 2025-12-08T17:44:26.280489237+00:00 stderr F I1208 17:44:26.280159 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:44:26.288783913+00:00 stderr F I1208 17:44:26.288711 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:26.288783913+00:00 stderr F I1208 17:44:26.288739 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:26.288783913+00:00 stderr F I1208 17:44:26.288763 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:44:26.288783913+00:00 stderr F I1208 17:44:26.288771 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:44:26.298090447+00:00 stderr F I1208 17:44:26.298019 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:26.298090447+00:00 stderr F W1208 17:44:26.298047 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:26.298090447+00:00 stderr F W1208 17:44:26.298053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:26.298090447+00:00 stderr F W1208 17:44:26.298057 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:26.298090447+00:00 stderr F W1208 17:44:26.298060 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:26.298090447+00:00 stderr F W1208 17:44:26.298063 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:26.298090447+00:00 stderr F W1208 17:44:26.298066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:26.298279112+00:00 stderr F I1208 17:44:26.298248 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:44:26.304063710+00:00 stderr F I1208 17:44:26.303995 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215865\" (2025-12-08 17:44:24 +0000 UTC to 2025-12-08 17:44:25 +0000 UTC (now=2025-12-08 17:44:26.303959397 +0000 UTC))" 2025-12-08T17:44:26.304180253+00:00 stderr F I1208 17:44:26.304157 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215865\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:26.304141302 +0000 UTC))" 2025-12-08T17:44:26.304180253+00:00 stderr F I1208 17:44:26.304176 1 secure_serving.go:211] Serving securely on [::]:17698 2025-12-08T17:44:26.304216184+00:00 stderr F I1208 17:44:26.304196 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:44:26.304224214+00:00 stderr F I1208 17:44:26.304218 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:26.304273485+00:00 stderr F I1208 17:44:26.304251 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:26.304300726+00:00 stderr F I1208 17:44:26.304280 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" 2025-12-08T17:44:26.304372148+00:00 stderr F I1208 17:44:26.304352 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:26.304477741+00:00 stderr F I1208 17:44:26.304443 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:26.304477741+00:00 stderr F I1208 17:44:26.304455 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:26.304477741+00:00 stderr F I1208 17:44:26.304468 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:26.304477741+00:00 stderr F I1208 17:44:26.304473 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:26.305394437+00:00 stderr F I1208 17:44:26.305364 1 base_controller.go:76] Waiting for caches to sync for CheckEndpointsTimeToStart 2025-12-08T17:44:26.310322840+00:00 stderr F I1208 17:44:26.310284 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.310463794+00:00 stderr F I1208 17:44:26.310446 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.310585367+00:00 stderr F I1208 17:44:26.310553 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.406306 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.406376 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.406450 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.406681 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:26.406648758 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.406925 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215865\" (2025-12-08 17:44:24 +0000 UTC to 2025-12-08 17:44:25 +0000 UTC (now=2025-12-08 17:44:26.406908475 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407101 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215865\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:26.40708408 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407232 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:26.407216714 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407252 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:26.407240964 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407270 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:26.407257225 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407296 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:26.407276375 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407313 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:26.407301566 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407330 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:26.407319886 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407352 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:26.407337057 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407369 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:26.407357527 +0000 UTC))" 2025-12-08T17:44:26.407955133+00:00 stderr F I1208 17:44:26.407550 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215865\" (2025-12-08 17:44:24 +0000 UTC to 2025-12-08 17:44:25 +0000 UTC (now=2025-12-08 17:44:26.407534372 +0000 UTC))" 2025-12-08T17:44:26.424281599+00:00 stderr F I1208 17:44:26.424198 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215865\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:26.424132245 +0000 UTC))" 2025-12-08T17:44:26.586148895+00:00 stderr F I1208 17:44:26.585755 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.606106479+00:00 stderr F I1208 17:44:26.605985 1 base_controller.go:82] Caches are synced for CheckEndpointsTimeToStart 2025-12-08T17:44:26.606106479+00:00 stderr F I1208 17:44:26.606044 1 base_controller.go:119] Starting #1 worker of CheckEndpointsTimeToStart controller ... 2025-12-08T17:44:26.606174731+00:00 stderr F I1208 17:44:26.606156 1 base_controller.go:76] Waiting for caches to sync for CheckEndpointsStop 2025-12-08T17:44:26.606174731+00:00 stderr F I1208 17:44:26.606165 1 base_controller.go:82] Caches are synced for CheckEndpointsStop 2025-12-08T17:44:26.606174731+00:00 stderr F I1208 17:44:26.606169 1 base_controller.go:119] Starting #1 worker of CheckEndpointsStop controller ... 2025-12-08T17:44:26.606683224+00:00 stderr F I1208 17:44:26.606201 1 base_controller.go:181] Shutting down CheckEndpointsTimeToStart ... 2025-12-08T17:44:26.609054299+00:00 stderr F I1208 17:44:26.607227 1 base_controller.go:76] Waiting for caches to sync for check-endpoints 2025-12-08T17:44:26.609054299+00:00 stderr F I1208 17:44:26.607250 1 base_controller.go:123] Shutting down worker of CheckEndpointsTimeToStart controller ... 2025-12-08T17:44:26.609054299+00:00 stderr F I1208 17:44:26.607256 1 base_controller.go:113] All CheckEndpointsTimeToStart workers have been terminated 2025-12-08T17:44:26.610007755+00:00 stderr F I1208 17:44:26.609360 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.610007755+00:00 stderr F I1208 17:44:26.609791 1 reflector.go:430] "Caches populated" type="*v1alpha1.PodNetworkConnectivityCheck" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.708206613+00:00 stderr F I1208 17:44:26.707934 1 base_controller.go:82] Caches are synced for check-endpoints 2025-12-08T17:44:26.708206613+00:00 stderr F I1208 17:44:26.707959 1 base_controller.go:119] Starting #1 worker of check-endpoints controller ... 2025-12-08T17:44:30.608678007+00:00 stderr F I1208 17:44:30.608426 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.608395229 +0000 UTC))" 2025-12-08T17:44:30.608678007+00:00 stderr F I1208 17:44:30.608667 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.608656586 +0000 UTC))" 2025-12-08T17:44:30.608700487+00:00 stderr F I1208 17:44:30.608679 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.608672176 +0000 UTC))" 2025-12-08T17:44:30.608700487+00:00 stderr F I1208 17:44:30.608691 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.608684057 +0000 UTC))" 2025-12-08T17:44:30.608722278+00:00 stderr F I1208 17:44:30.608706 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.608694727 +0000 UTC))" 2025-12-08T17:44:30.608729798+00:00 stderr F I1208 17:44:30.608723 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.608714848 +0000 UTC))" 2025-12-08T17:44:30.608750309+00:00 stderr F I1208 17:44:30.608735 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.608727188 +0000 UTC))" 2025-12-08T17:44:30.608757669+00:00 stderr F I1208 17:44:30.608749 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.608742488 +0000 UTC))" 2025-12-08T17:44:30.608766169+00:00 stderr F I1208 17:44:30.608761 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.608753829 +0000 UTC))" 2025-12-08T17:44:30.608782349+00:00 stderr F I1208 17:44:30.608773 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.608766379 +0000 UTC))" 2025-12-08T17:44:30.609533750+00:00 stderr F I1208 17:44:30.608982 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215865\" (2025-12-08 17:44:24 +0000 UTC to 2025-12-08 17:44:25 +0000 UTC (now=2025-12-08 17:44:30.608970214 +0000 UTC))" 2025-12-08T17:44:30.609533750+00:00 stderr F I1208 17:44:30.609149 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215865\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:30.609137809 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.040484 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.040452013 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041037 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.04102357 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041050 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.04104195 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041063 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.041055501 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041075 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.041067491 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041088 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.041081001 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041101 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.041093322 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041113 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041105782 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041126 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041117332 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041139 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.041132163 +0000 UTC))" 2025-12-08T17:45:16.041327278+00:00 stderr F I1208 17:45:16.041153 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.041145543 +0000 UTC))" 2025-12-08T17:45:16.041417131+00:00 stderr F I1208 17:45:16.041321 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-1883038990/tls.crt::/tmp/serving-cert-1883038990/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215865\" (2025-12-08 17:44:24 +0000 UTC to 2025-12-08 17:44:25 +0000 UTC (now=2025-12-08 17:45:16.041303958 +0000 UTC))" 2025-12-08T17:45:16.045098813+00:00 stderr F I1208 17:45:16.041452 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215865\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:45:16.041442481 +0000 UTC))" 2025-12-08T17:47:05.235299658+00:00 stderr F I1208 17:47:05.234728 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.603599587+00:00 stderr F I1208 17:47:10.603026 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.267561293+00:00 stderr F I1208 17:47:17.266953 1 reflector.go:430] "Caches populated" type="*v1alpha1.PodNetworkConnectivityCheck" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:30.278950541+00:00 stderr F I1208 17:47:30.277844 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:38.110633734+00:00 stderr F I1208 17:47:38.110117 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:50.950767858+00:00 stderr F I1208 17:47:50.950254 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" ././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operato0000755000175000017500000000000015115611514033036 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operato0000755000175000017500000000000015115611521033034 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operato0000644000175000017500000036367515115611514033064 0ustar zuulzuul2025-12-08T17:44:22.773904018+00:00 stderr F I1208 17:44:22.773341 1 profiler.go:21] Starting profiling endpoint at http://127.0.0.1:6060/debug/pprof/ 2025-12-08T17:44:22.775890983+00:00 stderr F I1208 17:44:22.774687 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:22.787032477+00:00 stderr F I1208 17:44:22.786172 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:22.787032477+00:00 stderr F I1208 17:44:22.786225 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:22.788044854+00:00 stderr F I1208 17:44:22.787730 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:22.990666290+00:00 stderr F I1208 17:44:22.989071 1 builder.go:304] openshift-cluster-etcd-operator version 4.20.0-202510211040.p2.g49412ac.assembly.stream.el9-49412ac-49412ac13833adf0da4c44b9c9a0e91f8ac04e4d 2025-12-08T17:44:24.859031984+00:00 stderr F I1208 17:44:24.857770 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:24.859031984+00:00 stderr F W1208 17:44:24.858238 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:24.859031984+00:00 stderr F W1208 17:44:24.858245 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:24.859031984+00:00 stderr F W1208 17:44:24.858251 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:24.859031984+00:00 stderr F W1208 17:44:24.858254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:24.859031984+00:00 stderr F W1208 17:44:24.858257 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:24.859031984+00:00 stderr F W1208 17:44:24.858260 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.861641 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.861671 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.861805 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.861888 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.861932 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.862064 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.862292 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.862439 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.862663 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.862744 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:24.864953475+00:00 stderr F I1208 17:44:24.863083 1 leaderelection.go:257] attempting to acquire leader lease openshift-etcd-operator/openshift-cluster-etcd-operator-lock... 2025-12-08T17:44:24.873121598+00:00 stderr F I1208 17:44:24.873059 1 leaderelection.go:271] successfully acquired lease openshift-etcd-operator/openshift-cluster-etcd-operator-lock 2025-12-08T17:44:24.876971013+00:00 stderr F I1208 17:44:24.873468 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-etcd-operator", Name:"openshift-cluster-etcd-operator-lock", UID:"96e3a435-a12f-40b1-a2b8-68a65c03c2b3", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37518", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' etcd-operator-69b85846b6-k26tc_30f42aba-4c9b-483d-a2c5-e3bc34684f81 became leader 2025-12-08T17:44:24.918197567+00:00 stderr F I1208 17:44:24.916797 1 starter.go:195] recorded cluster versions: map[etcd:4.20.1 operator:4.20.1 raw-internal:4.20.1] 2025-12-08T17:44:24.966136296+00:00 stderr F I1208 17:44:24.965072 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.966136296+00:00 stderr F I1208 17:44:24.965481 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.966136296+00:00 stderr F I1208 17:44:24.965658 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:24.967122622+00:00 stderr F E1208 17:44:24.967020 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:24.972067547+00:00 stderr F E1208 17:44:24.969966 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:24.974227076+00:00 stderr F E1208 17:44:24.974196 1 static_resource_controller.go:221] "Unhandled Error" err="missing informer for namespace \"\"; no dynamic wiring added, time-based only." 2025-12-08T17:44:24.979556191+00:00 stderr F I1208 17:44:24.976668 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:24.987482837+00:00 stderr F I1208 17:44:24.984925 1 starter.go:411] FeatureGates initializedenabled[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks]disabled[AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:24.987482837+00:00 stderr F I1208 17:44:24.985455 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:25.015799370+00:00 stderr F I1208 17:44:25.015750 1 starter.go:545] waiting for cluster version informer sync... 2025-12-08T17:44:25.037995015+00:00 stderr F I1208 17:44:25.036915 1 starter.go:568] Detected available machine API, starting vertical scaling related controllers and informers... 2025-12-08T17:44:25.049157600+00:00 stderr F I1208 17:44:25.046947 1 base_controller.go:76] Waiting for caches to sync for ClusterMemberRemovalController 2025-12-08T17:44:25.049157600+00:00 stderr F I1208 17:44:25.046994 1 base_controller.go:76] Waiting for caches to sync for MachineDeletionHooksController 2025-12-08T17:44:25.050924268+00:00 stderr F I1208 17:44:25.050717 1 base_controller.go:76] Waiting for caches to sync for MissingStaticPodController 2025-12-08T17:44:25.060381836+00:00 stderr F I1208 17:44:25.058640 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_etcd 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.061955 1 base_controller.go:76] Waiting for caches to sync for etcd-Node 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062004 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062020 1 base_controller.go:76] Waiting for caches to sync for ClusterMemberController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062097 1 base_controller.go:76] Waiting for caches to sync for EtcdMembersController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062101 1 base_controller.go:82] Caches are synced for EtcdMembersController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062107 1 base_controller.go:119] Starting #1 worker of EtcdMembersController controller ... 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062148 1 base_controller.go:76] Waiting for caches to sync for BootstrapTeardownController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062158 1 base_controller.go:76] Waiting for caches to sync for etcd-UnsupportedConfigOverrides 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062167 1 base_controller.go:76] Waiting for caches to sync for ScriptController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062179 1 base_controller.go:76] Waiting for caches to sync for DefragController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062193 1 envvarcontroller.go:236] Starting EnvVarController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062203 1 base_controller.go:76] Waiting for caches to sync for RevisionController 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062442 1 base_controller.go:76] Waiting for caches to sync for Installer 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062454 1 base_controller.go:76] Waiting for caches to sync for etcd-InstallerState 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062462 1 base_controller.go:76] Waiting for caches to sync for etcd-StaticPodState 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.062471 1 base_controller.go:76] Waiting for caches to sync for PruneController 2025-12-08T17:44:25.064004964+00:00 stderr F E1208 17:44:25.062667 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.063208 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ReportEtcdMembersErrorUpdatingStatus' etcds.operator.openshift.io "cluster" not found 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.063589 1 base_controller.go:76] Waiting for caches to sync for BackingResourceController-StaticResources 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.063605 1 base_controller.go:76] Waiting for caches to sync for etcd-operator-UnsupportedConfigOverrides 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.063615 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:25.064004964+00:00 stderr F I1208 17:44:25.063625 1 base_controller.go:76] Waiting for caches to sync for GuardController 2025-12-08T17:44:25.064135138+00:00 stderr F I1208 17:44:25.064057 1 base_controller.go:76] Waiting for caches to sync for FSyncController 2025-12-08T17:44:25.064135138+00:00 stderr F I1208 17:44:25.064098 1 base_controller.go:82] Caches are synced for FSyncController 2025-12-08T17:44:25.064135138+00:00 stderr F I1208 17:44:25.064114 1 base_controller.go:119] Starting #1 worker of FSyncController controller ... 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.065261 1 base_controller.go:76] Waiting for caches to sync for EtcdCertCleanerController 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.065287 1 base_controller.go:82] Caches are synced for EtcdCertCleanerController 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.065293 1 base_controller.go:119] Starting #1 worker of EtcdCertCleanerController controller ... 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.065825 1 base_controller.go:76] Waiting for caches to sync for TargetConfigController 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.065853 1 base_controller.go:76] Waiting for caches to sync for EtcdCertSignerController 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.066016 1 base_controller.go:76] Waiting for caches to sync for etcd 2025-12-08T17:44:25.066939965+00:00 stderr F I1208 17:44:25.066030 1 base_controller.go:76] Waiting for caches to sync for EtcdEndpointsController 2025-12-08T17:44:25.077194455+00:00 stderr F I1208 17:44:25.067613 1 base_controller.go:76] Waiting for caches to sync for EtcdStaticResources-StaticResources 2025-12-08T17:44:25.077194455+00:00 stderr F E1208 17:44:25.074385 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.077194455+00:00 stderr F I1208 17:44:25.074417 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ReportEtcdMembersErrorUpdatingStatus' etcds.operator.openshift.io "cluster" not found 2025-12-08T17:44:25.100507070+00:00 stderr F E1208 17:44:25.100394 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.100507070+00:00 stderr F I1208 17:44:25.100469 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ReportEtcdMembersErrorUpdatingStatus' etcds.operator.openshift.io "cluster" not found 2025-12-08T17:44:25.156123657+00:00 stderr F I1208 17:44:25.156012 1 base_controller.go:82] Caches are synced for MachineDeletionHooksController 2025-12-08T17:44:25.156123657+00:00 stderr F I1208 17:44:25.156039 1 base_controller.go:119] Starting #1 worker of MachineDeletionHooksController controller ... 2025-12-08T17:44:25.163235022+00:00 stderr F I1208 17:44:25.161902 1 base_controller.go:82] Caches are synced for StatusSyncer_etcd 2025-12-08T17:44:25.163235022+00:00 stderr F I1208 17:44:25.161927 1 base_controller.go:119] Starting #1 worker of StatusSyncer_etcd controller ... 2025-12-08T17:44:25.175501306+00:00 stderr F I1208 17:44:25.175260 1 base_controller.go:82] Caches are synced for etcd-Node 2025-12-08T17:44:25.175501306+00:00 stderr F I1208 17:44:25.175288 1 base_controller.go:119] Starting #1 worker of etcd-Node controller ... 2025-12-08T17:44:25.177178192+00:00 stderr F I1208 17:44:25.177103 1 base_controller.go:82] Caches are synced for etcd-UnsupportedConfigOverrides 2025-12-08T17:44:25.177178192+00:00 stderr F I1208 17:44:25.177124 1 base_controller.go:119] Starting #1 worker of etcd-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:25.177586073+00:00 stderr F I1208 17:44:25.177549 1 base_controller.go:82] Caches are synced for BootstrapTeardownController 2025-12-08T17:44:25.177596603+00:00 stderr F I1208 17:44:25.177582 1 base_controller.go:119] Starting #1 worker of BootstrapTeardownController controller ... 2025-12-08T17:44:25.177655255+00:00 stderr F I1208 17:44:25.177633 1 base_controller.go:82] Caches are synced for ScriptController 2025-12-08T17:44:25.177655255+00:00 stderr F I1208 17:44:25.177645 1 base_controller.go:119] Starting #1 worker of ScriptController controller ... 2025-12-08T17:44:25.177664095+00:00 stderr F I1208 17:44:25.177657 1 base_controller.go:82] Caches are synced for DefragController 2025-12-08T17:44:25.177671455+00:00 stderr F I1208 17:44:25.177662 1 base_controller.go:119] Starting #1 worker of DefragController controller ... 2025-12-08T17:44:25.177901101+00:00 stderr F E1208 17:44:25.177859 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.207898220+00:00 stderr F I1208 17:44:25.178719 1 base_controller.go:82] Caches are synced for etcd-operator-UnsupportedConfigOverrides 2025-12-08T17:44:25.207898220+00:00 stderr F I1208 17:44:25.206990 1 base_controller.go:119] Starting #1 worker of etcd-operator-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:25.207898220+00:00 stderr F I1208 17:44:25.178723 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:25.207898220+00:00 stderr F I1208 17:44:25.207557 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:25.207898220+00:00 stderr F I1208 17:44:25.204089 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded changed from False to True ("NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)") 2025-12-08T17:44:25.207898220+00:00 stderr F E1208 17:44:25.204858 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.209388010+00:00 stderr F E1208 17:44:25.209046 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.209388010+00:00 stderr F E1208 17:44:25.209218 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.214848819+00:00 stderr F E1208 17:44:25.214566 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.214996293+00:00 stderr F E1208 17:44:25.214970 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.219531997+00:00 stderr F E1208 17:44:25.219405 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:51920->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.226370723+00:00 stderr F I1208 17:44:25.226292 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded message changed from "NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)" to "NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.237733953+00:00 stderr F E1208 17:44:25.237270 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.237816755+00:00 stderr F E1208 17:44:25.237727 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.259453556+00:00 stderr F I1208 17:44:25.259015 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.280742057+00:00 stderr F E1208 17:44:25.280667 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.281831316+00:00 stderr F E1208 17:44:25.281393 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.281831316+00:00 stderr F E1208 17:44:25.281682 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.288287593+00:00 stderr F E1208 17:44:25.288228 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:42638->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.293535355+00:00 stderr F I1208 17:44:25.291858 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MasterNodesReadyChanged' All master nodes are ready 2025-12-08T17:44:25.305535033+00:00 stderr F E1208 17:44:25.305477 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.305803031+00:00 stderr F E1208 17:44:25.305776 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.306349815+00:00 stderr F I1208 17:44:25.306322 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"EtcdMembersControllerDegraded: getting cache client could not retrieve endpoints: configmaps lister not synced\nNodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values","reason":"EtcdMembersController_ErrorUpdatingReportEtcdMembers::NodeController_MasterNodesReady::ScriptController_Error","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:25.317965892+00:00 stderr F I1208 17:44:25.317866 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MasterNodesReadyChanged' All master nodes are ready 2025-12-08T17:44:25.319145705+00:00 stderr F E1208 17:44:25.319102 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.328789757+00:00 stderr F E1208 17:44:25.321357 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.343568010+00:00 stderr F E1208 17:44:25.331207 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.353018879+00:00 stderr F I1208 17:44:25.346468 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"EtcdMembersControllerDegraded: getting cache client could not retrieve endpoints: configmaps lister not synced\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values\nNodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:25.353018879+00:00 stderr F I1208 17:44:25.347461 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded message changed from "NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values" to "EtcdMembersControllerDegraded: getting cache client could not retrieve endpoints: configmaps lister not synced\nNodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.365988852+00:00 stderr F E1208 17:44:25.364366 1 base_controller.go:279] "Unhandled Error" err="BootstrapTeardownController reconciliation failed: error while canRemoveEtcdBootstrap: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.367889344+00:00 stderr F E1208 17:44:25.366779 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:25.385045602+00:00 stderr F I1208 17:44:25.383362 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MasterNodesReadyChanged' All master nodes are ready 2025-12-08T17:44:25.468158449+00:00 stderr F I1208 17:44:25.459505 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded changed from True to False ("EtcdMembersControllerDegraded: getting cache client could not retrieve endpoints: configmaps lister not synced\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values\nNodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found") 2025-12-08T17:44:25.468158449+00:00 stderr F I1208 17:44:25.462652 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:25.468158449+00:00 stderr F I1208 17:44:25.462775 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.469263609+00:00 stderr F I1208 17:44:25.468972 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:44:25.483408275+00:00 stderr F E1208 17:44:25.482184 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.534395155+00:00 stderr F E1208 17:44:25.534076 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:60521->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.654043959+00:00 stderr F I1208 17:44:25.652586 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.660098334+00:00 stderr F E1208 17:44:25.659914 1 base_controller.go:279] "Unhandled Error" err="EtcdMembersController reconciliation failed: getting cache client could not retrieve endpoints: configmaps lister not synced" 2025-12-08T17:44:25.664161896+00:00 stderr F I1208 17:44:25.662595 1 base_controller.go:82] Caches are synced for etcd-InstallerState 2025-12-08T17:44:25.664161896+00:00 stderr F I1208 17:44:25.662622 1 base_controller.go:119] Starting #1 worker of etcd-InstallerState controller ... 2025-12-08T17:44:25.664161896+00:00 stderr F I1208 17:44:25.662652 1 base_controller.go:82] Caches are synced for etcd-StaticPodState 2025-12-08T17:44:25.664161896+00:00 stderr F I1208 17:44:25.662657 1 base_controller.go:119] Starting #1 worker of etcd-StaticPodState controller ... 2025-12-08T17:44:25.664944307+00:00 stderr F E1208 17:44:25.664237 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:33418->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.664944307+00:00 stderr F I1208 17:44:25.664270 1 base_controller.go:82] Caches are synced for GuardController 2025-12-08T17:44:25.664944307+00:00 stderr F I1208 17:44:25.664279 1 base_controller.go:119] Starting #1 worker of GuardController controller ... 2025-12-08T17:44:25.797043920+00:00 stderr F E1208 17:44:25.796170 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:42065->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.855700310+00:00 stderr F I1208 17:44:25.854775 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:25.863012380+00:00 stderr F I1208 17:44:25.862934 1 base_controller.go:82] Caches are synced for PruneController 2025-12-08T17:44:25.863012380+00:00 stderr F I1208 17:44:25.862953 1 base_controller.go:119] Starting #1 worker of PruneController controller ... 2025-12-08T17:44:25.863190314+00:00 stderr F I1208 17:44:25.863161 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:44:25.863200965+00:00 stderr F I1208 17:44:25.863194 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:25.863208615+00:00 stderr F I1208 17:44:25.863200 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:25.863248506+00:00 stderr F I1208 17:44:25.863218 1 base_controller.go:82] Caches are synced for ClusterMemberController 2025-12-08T17:44:25.863248506+00:00 stderr F I1208 17:44:25.863229 1 base_controller.go:119] Starting #1 worker of ClusterMemberController controller ... 2025-12-08T17:44:25.863596485+00:00 stderr F I1208 17:44:25.863575 1 etcdcli_pool.go:70] creating a new cached client 2025-12-08T17:44:25.866799523+00:00 stderr F I1208 17:44:25.866056 1 base_controller.go:82] Caches are synced for EtcdEndpointsController 2025-12-08T17:44:25.866799523+00:00 stderr F I1208 17:44:25.866071 1 base_controller.go:119] Starting #1 worker of EtcdEndpointsController controller ... 2025-12-08T17:44:25.866799523+00:00 stderr F I1208 17:44:25.866120 1 etcdcli_pool.go:70] creating a new cached client 2025-12-08T17:44:25.892807332+00:00 stderr F E1208 17:44:25.891292 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:37019->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:25.948506631+00:00 stderr F I1208 17:44:25.948342 1 base_controller.go:82] Caches are synced for ClusterMemberRemovalController 2025-12-08T17:44:25.948506631+00:00 stderr F I1208 17:44:25.948378 1 base_controller.go:119] Starting #1 worker of ClusterMemberRemovalController controller ... 2025-12-08T17:44:25.952455049+00:00 stderr F I1208 17:44:25.951980 1 base_controller.go:82] Caches are synced for MissingStaticPodController 2025-12-08T17:44:25.952455049+00:00 stderr F I1208 17:44:25.952032 1 base_controller.go:119] Starting #1 worker of MissingStaticPodController controller ... 2025-12-08T17:44:26.009956527+00:00 stderr F E1208 17:44:26.008439 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:26.027161637+00:00 stderr F I1208 17:44:26.027079 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"ScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values\nNodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:26.029799549+00:00 stderr F I1208 17:44:26.029154 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:44:26.029799549+00:00 stderr F E1208 17:44:26.029691 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": missing env var values" 2025-12-08T17:44:26.034285491+00:00 stderr F I1208 17:44:26.032776 1 etcdcli_pool.go:70] creating a new cached client 2025-12-08T17:44:26.038428404+00:00 stderr F I1208 17:44:26.038340 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded message changed from "EtcdMembersControllerDegraded: getting cache client could not retrieve endpoints: configmaps lister not synced\nScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values\nNodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found" to "ScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values\nNodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found" 2025-12-08T17:44:26.065480502+00:00 stderr F I1208 17:44:26.063914 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.146474531+00:00 stderr F E1208 17:44:26.146414 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:38718->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:26.248683919+00:00 stderr F I1208 17:44:26.248117 1 request.go:752] "Waited before sending request" delay="1.187621394s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts?limit=500&resourceVersion=0" 2025-12-08T17:44:26.252824342+00:00 stderr F I1208 17:44:26.252786 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.268266533+00:00 stderr F I1208 17:44:26.266924 1 base_controller.go:82] Caches are synced for BackingResourceController-StaticResources 2025-12-08T17:44:26.268266533+00:00 stderr F I1208 17:44:26.266953 1 base_controller.go:119] Starting #1 worker of BackingResourceController-StaticResources controller ... 2025-12-08T17:44:26.452049657+00:00 stderr F I1208 17:44:26.451207 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.470523461+00:00 stderr F I1208 17:44:26.468580 1 base_controller.go:82] Caches are synced for EtcdStaticResources-StaticResources 2025-12-08T17:44:26.470523461+00:00 stderr F I1208 17:44:26.468625 1 base_controller.go:119] Starting #1 worker of EtcdStaticResources-StaticResources controller ... 2025-12-08T17:44:26.487050771+00:00 stderr F E1208 17:44:26.486931 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:40415->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:26.651558518+00:00 stderr F I1208 17:44:26.651488 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:26.662931659+00:00 stderr F I1208 17:44:26.662772 1 base_controller.go:82] Caches are synced for Installer 2025-12-08T17:44:26.662931659+00:00 stderr F I1208 17:44:26.662797 1 base_controller.go:119] Starting #1 worker of Installer controller ... 2025-12-08T17:44:26.663061732+00:00 stderr F I1208 17:44:26.663039 1 base_controller.go:82] Caches are synced for RevisionController 2025-12-08T17:44:26.663094323+00:00 stderr F I1208 17:44:26.663084 1 base_controller.go:119] Starting #1 worker of RevisionController controller ... 2025-12-08T17:44:26.663159615+00:00 stderr F I1208 17:44:26.663149 1 envvarcontroller.go:242] caches synced 2025-12-08T17:44:26.666362812+00:00 stderr F I1208 17:44:26.666329 1 base_controller.go:82] Caches are synced for etcd 2025-12-08T17:44:26.666362812+00:00 stderr F I1208 17:44:26.666344 1 base_controller.go:119] Starting #1 worker of etcd controller ... 2025-12-08T17:44:26.666425774+00:00 stderr F I1208 17:44:26.666394 1 base_controller.go:82] Caches are synced for EtcdCertSignerController 2025-12-08T17:44:26.666425774+00:00 stderr F I1208 17:44:26.666420 1 base_controller.go:119] Starting #1 worker of EtcdCertSignerController controller ... 2025-12-08T17:44:26.666480885+00:00 stderr F I1208 17:44:26.666467 1 base_controller.go:82] Caches are synced for TargetConfigController 2025-12-08T17:44:26.666516206+00:00 stderr F I1208 17:44:26.666498 1 base_controller.go:119] Starting #1 worker of TargetConfigController controller ... 2025-12-08T17:44:27.158275960+00:00 stderr F E1208 17:44:27.157598 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:49637->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:27.453152093+00:00 stderr F I1208 17:44:27.448812 1 request.go:752] "Waited before sending request" delay="1.181448776s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/installer-sa" 2025-12-08T17:44:28.075190790+00:00 stderr F I1208 17:44:28.074436 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:44:28.089890902+00:00 stderr F I1208 17:44:28.089783 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:28.104324515+00:00 stderr F I1208 17:44:28.104247 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded message changed from "ScriptControllerDegraded: \"configmap/etcd-pod\": missing env var values\nNodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found" to "NodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found" 2025-12-08T17:44:28.477789672+00:00 stderr F E1208 17:44:28.477144 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:50906->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:28.650944035+00:00 stderr F I1208 17:44:28.647463 1 request.go:752] "Waited before sending request" delay="1.593785623s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods/etcd-crc" 2025-12-08T17:44:29.648933937+00:00 stderr F I1208 17:44:29.648162 1 request.go:752] "Waited before sending request" delay="1.560700571s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts" 2025-12-08T17:44:30.619797060+00:00 stderr F I1208 17:44:30.608557 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.608512522 +0000 UTC))" 2025-12-08T17:44:30.619797060+00:00 stderr F I1208 17:44:30.619722 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.619669646 +0000 UTC))" 2025-12-08T17:44:30.619797060+00:00 stderr F I1208 17:44:30.619752 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.619738288 +0000 UTC))" 2025-12-08T17:44:30.619797060+00:00 stderr F I1208 17:44:30.619773 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.619762089 +0000 UTC))" 2025-12-08T17:44:30.619843711+00:00 stderr F I1208 17:44:30.619792 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.619778979 +0000 UTC))" 2025-12-08T17:44:30.619843711+00:00 stderr F I1208 17:44:30.619812 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.619801 +0000 UTC))" 2025-12-08T17:44:30.619843711+00:00 stderr F I1208 17:44:30.619835 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.61981768 +0000 UTC))" 2025-12-08T17:44:30.619868152+00:00 stderr F I1208 17:44:30.619856 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.619844231 +0000 UTC))" 2025-12-08T17:44:30.619964694+00:00 stderr F I1208 17:44:30.619907 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.619863682 +0000 UTC))" 2025-12-08T17:44:30.619964694+00:00 stderr F I1208 17:44:30.619930 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.619919393 +0000 UTC))" 2025-12-08T17:44:30.620238992+00:00 stderr F I1208 17:44:30.620209 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-etcd-operator.svc\" [serving] validServingFor=[metrics.openshift-etcd-operator.svc,metrics.openshift-etcd-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:12 +0000 UTC to 2027-11-02 07:52:13 +0000 UTC (now=2025-12-08 17:44:30.620191151 +0000 UTC))" 2025-12-08T17:44:30.620440607+00:00 stderr F I1208 17:44:30.620407 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:44:30.620391466 +0000 UTC))" 2025-12-08T17:44:30.653455928+00:00 stderr F I1208 17:44:30.650906 1 request.go:752] "Waited before sending request" delay="1.594154703s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/services/etcd" 2025-12-08T17:44:31.044522515+00:00 stderr F E1208 17:44:31.043804 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: read udp 10.217.0.38:45898->10.217.4.10:53: read: connection refused" 2025-12-08T17:44:36.173753405+00:00 stderr F E1208 17:44:36.173376 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:44:46.424143480+00:00 stderr F E1208 17:44:46.423613 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:45:06.912248602+00:00 stderr F E1208 17:45:06.911689 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:45:16.055683458+00:00 stderr F I1208 17:45:16.055246 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.055197204 +0000 UTC))" 2025-12-08T17:45:16.055731069+00:00 stderr F I1208 17:45:16.055682 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.055661847 +0000 UTC))" 2025-12-08T17:45:16.055731069+00:00 stderr F I1208 17:45:16.055708 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.055691678 +0000 UTC))" 2025-12-08T17:45:16.055738819+00:00 stderr F I1208 17:45:16.055731 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.055716188 +0000 UTC))" 2025-12-08T17:45:16.055770940+00:00 stderr F I1208 17:45:16.055756 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.055739839 +0000 UTC))" 2025-12-08T17:45:16.055801921+00:00 stderr F I1208 17:45:16.055788 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.05577005 +0000 UTC))" 2025-12-08T17:45:16.055834782+00:00 stderr F I1208 17:45:16.055820 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.055800971 +0000 UTC))" 2025-12-08T17:45:16.055864302+00:00 stderr F I1208 17:45:16.055851 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.055833932 +0000 UTC))" 2025-12-08T17:45:16.055926804+00:00 stderr F I1208 17:45:16.055912 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.055863602 +0000 UTC))" 2025-12-08T17:45:16.055960565+00:00 stderr F I1208 17:45:16.055947 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.055930554 +0000 UTC))" 2025-12-08T17:45:16.055991256+00:00 stderr F I1208 17:45:16.055978 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.055960565 +0000 UTC))" 2025-12-08T17:45:16.056691465+00:00 stderr F I1208 17:45:16.056364 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-etcd-operator.svc\" [serving] validServingFor=[metrics.openshift-etcd-operator.svc,metrics.openshift-etcd-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:12 +0000 UTC to 2027-11-02 07:52:13 +0000 UTC (now=2025-12-08 17:45:16.056339216 +0000 UTC))" 2025-12-08T17:45:16.056691465+00:00 stderr F I1208 17:45:16.056649 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:22 +0000 UTC to 2028-12-08 16:44:22 +0000 UTC (now=2025-12-08 17:45:16.056629634 +0000 UTC))" 2025-12-08T17:45:25.071340138+00:00 stderr F E1208 17:45:25.070926 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:45:47.881394238+00:00 stderr F E1208 17:45:47.880833 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:46:24.893819640+00:00 stderr F E1208 17:46:24.893263 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-etcd-operator/leases/openshift-cluster-etcd-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:24.895236212+00:00 stderr F E1208 17:46:24.895169 1 leaderelection.go:436] error retrieving resource lock openshift-etcd-operator/openshift-cluster-etcd-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-etcd-operator/leases/openshift-cluster-etcd-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.077535434+00:00 stderr F E1208 17:46:25.077430 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:46:25.184045101+00:00 stderr F E1208 17:46:25.183936 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.184045101+00:00 stderr F I1208 17:46:25.183973 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.184966649+00:00 stderr F E1208 17:46:25.184861 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{etcd-operator.187f4e9604b877bb openshift-etcd-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-etcd-operator,Name:etcd-operator,UID:7bcc9069-5a71-4f51-8970-90dddeee56b2,APIVersion:apps/v1,ResourceVersion:,FieldPath:,},Reason:ScriptControllerErrorUpdatingStatus,Message:Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused,Source:EventSource{Component:openshift-cluster-etcd-operator-script-controller-scriptcontroller,Host:,},FirstTimestamp:2025-12-08 17:46:25.183782843 +0000 UTC m=+122.589095404,LastTimestamp:2025-12-08 17:46:25.183782843 +0000 UTC m=+122.589095404,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:openshift-cluster-etcd-operator-script-controller-scriptcontroller,ReportingInstance:,}" 2025-12-08T17:46:25.193953848+00:00 stderr F E1208 17:46:25.193871 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.194000480+00:00 stderr F I1208 17:46:25.193931 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.209434983+00:00 stderr F E1208 17:46:25.209353 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.209481144+00:00 stderr F I1208 17:46:25.209432 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.235144014+00:00 stderr F E1208 17:46:25.235056 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.235144014+00:00 stderr F I1208 17:46:25.235107 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.281290660+00:00 stderr F E1208 17:46:25.281216 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.281323711+00:00 stderr F I1208 17:46:25.281272 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.367398475+00:00 stderr F E1208 17:46:25.367298 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.367398475+00:00 stderr F I1208 17:46:25.367354 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.533571202+00:00 stderr F E1208 17:46:25.533447 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.533571202+00:00 stderr F I1208 17:46:25.533499 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:25.664858353+00:00 stderr F E1208 17:46:25.664755 1 base_controller.go:279] "Unhandled Error" err="etcd-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.670816902+00:00 stderr F E1208 17:46:25.670726 1 base_controller.go:279] "Unhandled Error" err="etcd-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"etcd-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=etcd-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.673350158+00:00 stderr F E1208 17:46:25.673171 1 base_controller.go:279] "Unhandled Error" err="etcd-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.680559524+00:00 stderr F E1208 17:46:25.680500 1 base_controller.go:279] "Unhandled Error" err="etcd-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"etcd-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=etcd-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.686227114+00:00 stderr F E1208 17:46:25.686156 1 base_controller.go:279] "Unhandled Error" err="etcd-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.784337989+00:00 stderr F E1208 17:46:25.783373 1 base_controller.go:279] "Unhandled Error" err="etcd-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"etcd-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=etcd-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.981588950+00:00 stderr F E1208 17:46:25.981498 1 base_controller.go:279] "Unhandled Error" err="etcd-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.183637724+00:00 stderr F E1208 17:46:26.183558 1 base_controller.go:279] "Unhandled Error" err="etcd-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"etcd-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=etcd-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.383590596+00:00 stderr F E1208 17:46:26.383521 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.383590596+00:00 stderr F I1208 17:46:26.383566 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.585721234+00:00 stderr F E1208 17:46:26.585634 1 base_controller.go:279] "Unhandled Error" err="EtcdEndpointsController reconciliation failed: applying configmap update failed :Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.585813596+00:00 stderr F I1208 17:46:26.585760 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'EtcdEndpointsErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.587179477+00:00 stderr F E1208 17:46:26.587118 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd-operator/events\": dial tcp 10.217.4.1:443: connect: connection refused" event="&Event{ObjectMeta:{etcd-operator.187f4e9658459e7b openshift-etcd-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Deployment,Namespace:openshift-etcd-operator,Name:etcd-operator,UID:7bcc9069-5a71-4f51-8970-90dddeee56b2,APIVersion:apps/v1,ResourceVersion:,FieldPath:,},Reason:EtcdEndpointsErrorUpdatingStatus,Message:Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused,Source:EventSource{Component:openshift-cluster-etcd-operator-etcd-endpoints-controller-etcdendpointscontroller,Host:,},FirstTimestamp:2025-12-08 17:46:26.585542267 +0000 UTC m=+123.990854828,LastTimestamp:2025-12-08 17:46:26.585542267 +0000 UTC m=+123.990854828,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:openshift-cluster-etcd-operator-etcd-endpoints-controller-etcdendpointscontroller,ReportingInstance:,}" 2025-12-08T17:46:26.780801928+00:00 stderr F E1208 17:46:26.780723 1 base_controller.go:279] "Unhandled Error" err="etcd-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.985104081+00:00 stderr F E1208 17:46:26.985030 1 base_controller.go:279] "Unhandled Error" err="etcd-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"etcd-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=etcd-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.187758613+00:00 stderr F E1208 17:46:27.187564 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-etcd-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.583632636+00:00 stderr F E1208 17:46:27.583559 1 base_controller.go:279] "Unhandled Error" err="EtcdEndpointsController reconciliation failed: applying configmap update failed :Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.583676028+00:00 stderr F I1208 17:46:27.583639 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'EtcdEndpointsErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.780040702+00:00 stderr F I1208 17:46:27.779938 1 request.go:752] "Waited before sending request" delay="1.112323377s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-all-bundles" 2025-12-08T17:46:27.781094333+00:00 stderr F E1208 17:46:27.781037 1 base_controller.go:279] "Unhandled Error" err="EtcdCertSignerController reconciliation failed: could not get current etcd-all-bundles configmap Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-all-bundles\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.185233004+00:00 stderr F E1208 17:46:28.185110 1 base_controller.go:279] "Unhandled Error" err="etcd-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.382333880+00:00 stderr F E1208 17:46:28.382196 1 base_controller.go:279] "Unhandled Error" err="ScriptController reconciliation failed: \"configmap/etcd-pod\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-scripts\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.382333880+00:00 stderr F I1208 17:46:28.382242 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ScriptControllerErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:28.583562680+00:00 stderr F E1208 17:46:28.583252 1 base_controller.go:279] "Unhandled Error" err="etcd-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"etcd-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=etcd-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.780447160+00:00 stderr F I1208 17:46:28.780368 1 request.go:752] "Waited before sending request" delay="1.585942983s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/installer-sa" 2025-12-08T17:46:28.785043828+00:00 stderr F E1208 17:46:28.784980 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-etcd-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:29.185625052+00:00 stderr F E1208 17:46:29.185186 1 base_controller.go:279] "Unhandled Error" err="EtcdEndpointsController reconciliation failed: applying configmap update failed :Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-endpoints\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.185625052+00:00 stderr F I1208 17:46:29.185225 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'EtcdEndpointsErrorUpdatingStatus' Put "https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.381423959+00:00 stderr F E1208 17:46:29.381333 1 base_controller.go:279] "Unhandled Error" err="EtcdCertSignerController reconciliation failed: could not get current etcd-all-bundles configmap Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-all-bundles\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.583232946+00:00 stderr F E1208 17:46:29.583160 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/etcds/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.980020405+00:00 stderr F I1208 17:46:29.979938 1 request.go:752] "Waited before sending request" delay="1.221978039s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/pods/etcd-crc" 2025-12-08T17:46:30.980331030+00:00 stderr F I1208 17:46:30.980254 1 request.go:752] "Waited before sending request" delay="1.39428095s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/configmaps/etcd-pod" 2025-12-08T17:46:31.403746619+00:00 stderr F E1208 17:46:31.403675 1 base_controller.go:279] "Unhandled Error" err="EtcdStaticResources-StaticResources reconciliation failed: [\"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused, \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:58.755897262+00:00 stderr F I1208 17:46:58.754595 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:00.275046733+00:00 stderr F I1208 17:47:00.274657 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:04.433772987+00:00 stderr F I1208 17:47:04.432955 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:04.513710663+00:00 stderr F I1208 17:47:04.513637 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:06.567537916+00:00 stderr F I1208 17:47:06.567447 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=etcds" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:06.569309521+00:00 stderr F I1208 17:47:06.569260 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:06.570702715+00:00 stderr F I1208 17:47:06.570494 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.601223126+00:00 stderr F I1208 17:47:06.601133 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found" to "NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found" 2025-12-08T17:47:06.601297289+00:00 stderr F I1208 17:47:06.601267 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.606596625+00:00 stderr F E1208 17:47:06.606275 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:06.611168299+00:00 stderr F I1208 17:47:06.611027 1 reflector.go:430] "Caches populated" type="*v1.Etcd" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:06.613710639+00:00 stderr F I1208 17:47:06.613268 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.617984774+00:00 stderr F E1208 17:47:06.617677 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:06.629848847+00:00 stderr F I1208 17:47:06.629818 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.635797854+00:00 stderr F E1208 17:47:06.635754 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:06.658117617+00:00 stderr F I1208 17:47:06.658062 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.662783574+00:00 stderr F E1208 17:47:06.662635 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:06.704720734+00:00 stderr F I1208 17:47:06.704652 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.710463735+00:00 stderr F E1208 17:47:06.710411 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:06.793260831+00:00 stderr F I1208 17:47:06.793199 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.802126310+00:00 stderr F E1208 17:47:06.800204 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:06.963520971+00:00 stderr F I1208 17:47:06.963428 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:06.967854528+00:00 stderr F E1208 17:47:06.967786 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:07.290493334+00:00 stderr F I1208 17:47:07.290427 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:07.295160190+00:00 stderr F E1208 17:47:07.295122 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_etcd reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"etcd\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:07.816616196+00:00 stderr F I1208 17:47:07.816544 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:07.820665633+00:00 stderr F I1208 17:47:07.820582 1 etcdcli_pool.go:70] creating a new cached client 2025-12-08T17:47:08.272775545+00:00 stderr F I1208 17:47:08.272735 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:08.970678034+00:00 stderr F I1208 17:47:08.970288 1 request.go:752] "Waited before sending request" delay="1.153062367s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/installer-sa" 2025-12-08T17:47:09.970408285+00:00 stderr F I1208 17:47:09.970344 1 request.go:752] "Waited before sending request" delay="1.281006805s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/kube-system/configmaps?resourceVersion=38689" 2025-12-08T17:47:09.971688996+00:00 stderr F I1208 17:47:09.971642 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:11.772571626+00:00 stderr F I1208 17:47:11.772156 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:12.834430293+00:00 stderr F I1208 17:47:12.834332 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:12.835556868+00:00 stderr F I1208 17:47:12.835485 1 status_controller.go:230] clusteroperator/etcd diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:25Z","message":"NodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:05:28Z","message":"NodeInstallerProgressing: 1 node is at revision 2\nEtcdMembersProgressing: No unstarted etcd members found","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:59:32Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 2\nEtcdMembersAvailable: 1 members are available","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:51:49Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:49Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.846076289+00:00 stderr F I1208 17:47:12.845980 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-etcd-operator", Name:"etcd-operator", UID:"7bcc9069-5a71-4f51-8970-90dddeee56b2", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/etcd changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nEtcdStaticResourcesDegraded: \"etcd/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \"etcd/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-etcd/serviceaccounts/etcd-sa\": dial tcp 10.217.4.1:443: connect: connection refused\nEtcdStaticResourcesDegraded: \nEtcdMembersDegraded: No unhealthy members found" to "NodeControllerDegraded: All master nodes are ready\nEtcdMembersDegraded: No unhealthy members found" 2025-12-08T17:47:14.873919834+00:00 stderr F I1208 17:47:14.873858 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:15.580974591+00:00 stderr F I1208 17:47:15.580558 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:16.116376845+00:00 stderr F I1208 17:47:16.116292 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.175347110+00:00 stderr F I1208 17:47:17.175056 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.372546419+00:00 stderr F I1208 17:47:17.372490 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:18.031494091+00:00 stderr F I1208 17:47:18.031385 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:18.390033338+00:00 stderr F I1208 17:47:18.389263 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:19.796640567+00:00 stderr F I1208 17:47:19.796202 1 reflector.go:430] "Caches populated" type="*v1.Job" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:20.172047044+00:00 stderr F I1208 17:47:20.171953 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:22.587766019+00:00 stderr F I1208 17:47:22.578051 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:25.072611809+00:00 stderr F E1208 17:47:25.072163 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:47:26.372278142+00:00 stderr F I1208 17:47:26.372169 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:34.159832927+00:00 stderr F I1208 17:47:34.159392 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:34.159917449+00:00 stderr F I1208 17:47:34.159869 1 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:34.162441749+00:00 stderr F I1208 17:47:34.162393 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:47:42.591719224+00:00 stderr F I1208 17:47:42.591238 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:43.340639489+00:00 stderr F I1208 17:47:43.340347 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.772036695+00:00 stderr F I1208 17:47:46.770945 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:47.745069396+00:00 stderr F I1208 17:47:47.744922 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:49.602180376+00:00 stderr F I1208 17:47:49.601825 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:49.781008115+00:00 stderr F I1208 17:47:49.780907 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:51.145471367+00:00 stderr F I1208 17:47:51.145142 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:51.147797050+00:00 stderr F I1208 17:47:51.147689 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:51.147951915+00:00 stderr F I1208 17:47:51.147863 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:51.148076659+00:00 stderr F I1208 17:47:51.148036 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:51.148210713+00:00 stderr F I1208 17:47:51.148173 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:47:55.899114494+00:00 stderr F I1208 17:47:55.899019 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:58.027776918+00:00 stderr F I1208 17:47:58.027652 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:59.759733839+00:00 stderr F I1208 17:47:59.756702 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:04.339278966+00:00 stderr F I1208 17:48:04.338533 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:04.452642366+00:00 stderr F I1208 17:48:04.452565 1 reflector.go:430] "Caches populated" type="*v1beta1.Machine" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:10.337602079+00:00 stderr F I1208 17:48:10.337153 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:25.074491962+00:00 stderr F E1208 17:48:25.074007 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:48:31.729061960+00:00 stderr F E1208 17:48:31.728654 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:49:25.082654157+00:00 stderr F E1208 17:49:25.081652 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:50:25.078279130+00:00 stderr F E1208 17:50:25.077669 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:51:25.081490485+00:00 stderr F E1208 17:51:25.080937 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:52:25.079317103+00:00 stderr F E1208 17:52:25.078641 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:52:34.166112511+00:00 stderr F I1208 17:52:34.165312 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T17:53:25.079403676+00:00 stderr F E1208 17:53:25.078627 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:54:25.080693053+00:00 stderr F E1208 17:54:25.080090 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:55:25.077067794+00:00 stderr F E1208 17:55:25.076516 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:56:25.081849981+00:00 stderr F E1208 17:56:25.081218 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:57:25.079129944+00:00 stderr F E1208 17:57:25.078174 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:57:51.147969279+00:00 stderr F I1208 17:57:51.146694 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.148286777+00:00 stderr F I1208 17:57:51.148262 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.148427171+00:00 stderr F I1208 17:57:51.148406 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.148957614+00:00 stderr F I1208 17:57:51.148923 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.149501399+00:00 stderr F I1208 17:57:51.149051 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.149501399+00:00 stderr F I1208 17:57:51.149167 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.149501399+00:00 stderr F I1208 17:57:51.149330 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.149501399+00:00 stderr F I1208 17:57:51.149488 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.149816297+00:00 stderr F I1208 17:57:51.149785 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150705050+00:00 stderr F I1208 17:57:51.149981 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150705050+00:00 stderr F I1208 17:57:51.150102 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150705050+00:00 stderr F I1208 17:57:51.150318 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150705050+00:00 stderr F I1208 17:57:51.150430 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150705050+00:00 stderr F I1208 17:57:51.150542 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150722580+00:00 stderr F I1208 17:57:51.150709 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.150867524+00:00 stderr F I1208 17:57:51.150841 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:57:51.151076920+00:00 stderr F I1208 17:57:51.151061 1 prune_controller.go:277] Nothing to prune 2025-12-08T17:58:25.081713999+00:00 stderr F E1208 17:58:25.081217 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T17:59:25.082685646+00:00 stderr F E1208 17:59:25.082106 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T18:00:25.086680303+00:00 stderr F E1208 18:00:25.085672 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T18:01:02.168506295+00:00 stderr F I1208 18:01:02.168002 1 warnings.go:110] "Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice" 2025-12-08T18:01:25.083466957+00:00 stderr F E1208 18:01:25.082936 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T18:02:25.088632524+00:00 stderr F E1208 18:02:25.088311 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T18:03:25.087691094+00:00 stderr F E1208 18:03:25.087108 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" 2025-12-08T18:04:25.089650032+00:00 stderr F E1208 18:04:25.089338 1 base_controller.go:279] "Unhandled Error" err="FSyncController reconciliation failed: Post \"https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query\": dial tcp: lookup thanos-querier.openshift-monitoring.svc on 10.217.4.10:53: no such host" ././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611513033226 5ustar zuulzuul././@LongLink0000644000000000000000000000031600000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000032300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000237415115611513033236 0ustar zuulzuul2025-12-08T17:44:20.653396647+00:00 stderr F I1208 17:44:20.651567 1 main.go:57] starting net-attach-def-admission-controller webhook server 2025-12-08T17:44:20.657039356+00:00 stderr F W1208 17:44:20.655867 1 client_config.go:618] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:20.661529389+00:00 stderr F W1208 17:44:20.661064 1 client_config.go:618] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:20.666395952+00:00 stderr F I1208 17:44:20.664222 1 localmetrics.go:51] UPdating net-attach-def metrics for any with value 0 2025-12-08T17:44:20.666395952+00:00 stderr F I1208 17:44:20.664566 1 localmetrics.go:51] UPdating net-attach-def metrics for sriov with value 0 2025-12-08T17:44:20.666395952+00:00 stderr F I1208 17:44:20.664577 1 localmetrics.go:51] UPdating net-attach-def metrics for ib-sriov with value 0 2025-12-08T17:44:20.671777089+00:00 stderr F I1208 17:44:20.668956 1 controller.go:202] Starting net-attach-def-admission-controller 2025-12-08T17:44:20.772357982+00:00 stderr F I1208 17:44:20.772096 1 controller.go:211] net-attach-def-admission-controller synced and ready ././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000202015115611513033222 0ustar zuulzuul2025-12-08T17:44:23.217782045+00:00 stderr F W1208 17:44:23.217246 1 deprecated.go:66] 2025-12-08T17:44:23.217782045+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:23.217782045+00:00 stderr F 2025-12-08T17:44:23.217782045+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:23.217782045+00:00 stderr F 2025-12-08T17:44:23.217782045+00:00 stderr F =============================================== 2025-12-08T17:44:23.217782045+00:00 stderr F 2025-12-08T17:44:23.219041040+00:00 stderr F I1208 17:44:23.218246 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:23.220991954+00:00 stderr F I1208 17:44:23.219913 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:23.223117391+00:00 stderr F I1208 17:44:23.221207 1 kube-rbac-proxy.go:397] Starting TCP socket on :8443 2025-12-08T17:44:23.223117391+00:00 stderr F I1208 17:44:23.221677 1 kube-rbac-proxy.go:404] Listening securely on :8443 ././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000755000175000017500000000000015115611513032761 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000755000175000017500000000000015115611520032757 5ustar zuulzuul././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000644000175000017500000002025615115611513032770 0ustar zuulzuul2025-12-08T17:44:19.506437482+00:00 stderr F I1208 17:44:19.506015 1 flags.go:64] FLAG: --add-dir-header="false" 2025-12-08T17:44:19.508002814+00:00 stderr F I1208 17:44:19.506564 1 flags.go:64] FLAG: --allow-paths="[]" 2025-12-08T17:44:19.508045115+00:00 stderr F I1208 17:44:19.508032 1 flags.go:64] FLAG: --alsologtostderr="false" 2025-12-08T17:44:19.508067706+00:00 stderr F I1208 17:44:19.508058 1 flags.go:64] FLAG: --auth-header-fields-enabled="false" 2025-12-08T17:44:19.508101607+00:00 stderr F I1208 17:44:19.508081 1 flags.go:64] FLAG: --auth-header-groups-field-name="x-remote-groups" 2025-12-08T17:44:19.508122647+00:00 stderr F I1208 17:44:19.508113 1 flags.go:64] FLAG: --auth-header-groups-field-separator="|" 2025-12-08T17:44:19.508142438+00:00 stderr F I1208 17:44:19.508133 1 flags.go:64] FLAG: --auth-header-user-field-name="x-remote-user" 2025-12-08T17:44:19.508164879+00:00 stderr F I1208 17:44:19.508153 1 flags.go:64] FLAG: --auth-token-audiences="[]" 2025-12-08T17:44:19.508194739+00:00 stderr F I1208 17:44:19.508185 1 flags.go:64] FLAG: --client-ca-file="" 2025-12-08T17:44:19.508215490+00:00 stderr F I1208 17:44:19.508206 1 flags.go:64] FLAG: --config-file="/etc/kube-rbac-proxy/config-file.yaml" 2025-12-08T17:44:19.508235420+00:00 stderr F I1208 17:44:19.508227 1 flags.go:64] FLAG: --help="false" 2025-12-08T17:44:19.508255231+00:00 stderr F I1208 17:44:19.508247 1 flags.go:64] FLAG: --http2-disable="false" 2025-12-08T17:44:19.508276782+00:00 stderr F I1208 17:44:19.508266 1 flags.go:64] FLAG: --http2-max-concurrent-streams="100" 2025-12-08T17:44:19.508297142+00:00 stderr F I1208 17:44:19.508288 1 flags.go:64] FLAG: --http2-max-size="262144" 2025-12-08T17:44:19.508348374+00:00 stderr F I1208 17:44:19.508336 1 flags.go:64] FLAG: --ignore-paths="[]" 2025-12-08T17:44:19.508371214+00:00 stderr F I1208 17:44:19.508361 1 flags.go:64] FLAG: --insecure-listen-address="" 2025-12-08T17:44:19.508393835+00:00 stderr F I1208 17:44:19.508383 1 flags.go:64] FLAG: --kube-api-burst="0" 2025-12-08T17:44:19.508418425+00:00 stderr F I1208 17:44:19.508406 1 flags.go:64] FLAG: --kube-api-qps="0" 2025-12-08T17:44:19.508445316+00:00 stderr F I1208 17:44:19.508435 1 flags.go:64] FLAG: --kubeconfig="" 2025-12-08T17:44:19.508466127+00:00 stderr F I1208 17:44:19.508457 1 flags.go:64] FLAG: --log-backtrace-at="" 2025-12-08T17:44:19.508485477+00:00 stderr F I1208 17:44:19.508477 1 flags.go:64] FLAG: --log-dir="" 2025-12-08T17:44:19.508504838+00:00 stderr F I1208 17:44:19.508496 1 flags.go:64] FLAG: --log-file="" 2025-12-08T17:44:19.508527038+00:00 stderr F I1208 17:44:19.508516 1 flags.go:64] FLAG: --log-file-max-size="0" 2025-12-08T17:44:19.508555959+00:00 stderr F I1208 17:44:19.508540 1 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:44:19.508576790+00:00 stderr F I1208 17:44:19.508568 1 flags.go:64] FLAG: --logtostderr="true" 2025-12-08T17:44:19.508596260+00:00 stderr F I1208 17:44:19.508588 1 flags.go:64] FLAG: --oidc-ca-file="" 2025-12-08T17:44:19.508615661+00:00 stderr F I1208 17:44:19.508607 1 flags.go:64] FLAG: --oidc-clientID="" 2025-12-08T17:44:19.508635061+00:00 stderr F I1208 17:44:19.508626 1 flags.go:64] FLAG: --oidc-groups-claim="groups" 2025-12-08T17:44:19.508654232+00:00 stderr F I1208 17:44:19.508646 1 flags.go:64] FLAG: --oidc-groups-prefix="" 2025-12-08T17:44:19.508690203+00:00 stderr F I1208 17:44:19.508665 1 flags.go:64] FLAG: --oidc-issuer="" 2025-12-08T17:44:19.508714543+00:00 stderr F I1208 17:44:19.508702 1 flags.go:64] FLAG: --oidc-sign-alg="[RS256]" 2025-12-08T17:44:19.508734514+00:00 stderr F I1208 17:44:19.508726 1 flags.go:64] FLAG: --oidc-username-claim="email" 2025-12-08T17:44:19.508753844+00:00 stderr F I1208 17:44:19.508745 1 flags.go:64] FLAG: --oidc-username-prefix="" 2025-12-08T17:44:19.508773115+00:00 stderr F I1208 17:44:19.508765 1 flags.go:64] FLAG: --one-output="false" 2025-12-08T17:44:19.508792495+00:00 stderr F I1208 17:44:19.508784 1 flags.go:64] FLAG: --proxy-endpoints-port="0" 2025-12-08T17:44:19.508822616+00:00 stderr F I1208 17:44:19.508813 1 flags.go:64] FLAG: --secure-listen-address="0.0.0.0:8443" 2025-12-08T17:44:19.508842377+00:00 stderr F I1208 17:44:19.508834 1 flags.go:64] FLAG: --skip-headers="false" 2025-12-08T17:44:19.508861847+00:00 stderr F I1208 17:44:19.508853 1 flags.go:64] FLAG: --skip-log-headers="false" 2025-12-08T17:44:19.508899608+00:00 stderr F I1208 17:44:19.508889 1 flags.go:64] FLAG: --stderrthreshold="" 2025-12-08T17:44:19.508923569+00:00 stderr F I1208 17:44:19.508914 1 flags.go:64] FLAG: --tls-cert-file="/etc/tls/private/tls.crt" 2025-12-08T17:44:19.508952000+00:00 stderr F I1208 17:44:19.508934 1 flags.go:64] FLAG: --tls-cipher-suites="[TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305]" 2025-12-08T17:44:19.508973420+00:00 stderr F I1208 17:44:19.508963 1 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:44:19.508993292+00:00 stderr F I1208 17:44:19.508984 1 flags.go:64] FLAG: --tls-private-key-file="/etc/tls/private/tls.key" 2025-12-08T17:44:19.509014402+00:00 stderr F I1208 17:44:19.509004 1 flags.go:64] FLAG: --tls-reload-interval="1m0s" 2025-12-08T17:44:19.509034053+00:00 stderr F I1208 17:44:19.509025 1 flags.go:64] FLAG: --upstream="http://localhost:8080/" 2025-12-08T17:44:19.509053314+00:00 stderr F I1208 17:44:19.509045 1 flags.go:64] FLAG: --upstream-ca-file="" 2025-12-08T17:44:19.509072644+00:00 stderr F I1208 17:44:19.509064 1 flags.go:64] FLAG: --upstream-client-cert-file="" 2025-12-08T17:44:19.509091975+00:00 stderr F I1208 17:44:19.509083 1 flags.go:64] FLAG: --upstream-client-key-file="" 2025-12-08T17:44:19.509111235+00:00 stderr F I1208 17:44:19.509103 1 flags.go:64] FLAG: --upstream-force-h2c="false" 2025-12-08T17:44:19.509130866+00:00 stderr F I1208 17:44:19.509122 1 flags.go:64] FLAG: --v="3" 2025-12-08T17:44:19.509152506+00:00 stderr F I1208 17:44:19.509142 1 flags.go:64] FLAG: --version="false" 2025-12-08T17:44:19.509172437+00:00 stderr F I1208 17:44:19.509163 1 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:44:19.509199357+00:00 stderr F W1208 17:44:19.509189 1 deprecated.go:66] 2025-12-08T17:44:19.509199357+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:19.509199357+00:00 stderr F 2025-12-08T17:44:19.509199357+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:19.509199357+00:00 stderr F 2025-12-08T17:44:19.509199357+00:00 stderr F =============================================== 2025-12-08T17:44:19.509199357+00:00 stderr F 2025-12-08T17:44:19.509227748+00:00 stderr F I1208 17:44:19.509219 1 kube-rbac-proxy.go:532] Reading config file: /etc/kube-rbac-proxy/config-file.yaml 2025-12-08T17:44:19.510498132+00:00 stderr F I1208 17:44:19.510479 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:19.510534733+00:00 stderr F I1208 17:44:19.510525 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:19.510558674+00:00 stderr F I1208 17:44:19.510550 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:19.510581325+00:00 stderr F I1208 17:44:19.510573 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:19.510770820+00:00 stderr F I1208 17:44:19.510758 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:19.516505176+00:00 stderr F I1208 17:44:19.515934 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:19.517107402+00:00 stderr F I1208 17:44:19.516936 1 kube-rbac-proxy.go:397] Starting TCP socket on 0.0.0.0:8443 2025-12-08T17:44:19.518060579+00:00 stderr F I1208 17:44:19.517470 1 kube-rbac-proxy.go:404] Listening securely on 0.0.0.0:8443 ././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000755000175000017500000000000015115611520032757 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_0000644000175000017500000004354715115611513033000 0ustar zuulzuul2025-12-08T17:44:21.131908810+00:00 stderr F I1208 17:44:21.129828 1 start.go:74] Version: 4.20.0-202510211040.p2.g4a9b90e.assembly.stream.el9 2025-12-08T17:44:21.133328288+00:00 stderr F I1208 17:44:21.133277 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:21.133631666+00:00 stderr F I1208 17:44:21.133597 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.133631666+00:00 stderr F I1208 17:44:21.133617 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.133631666+00:00 stderr F I1208 17:44:21.133622 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:21.133631666+00:00 stderr F I1208 17:44:21.133627 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.133646976+00:00 stderr F I1208 17:44:21.133632 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.247615216+00:00 stderr F I1208 17:44:21.242150 1 leaderelection.go:257] attempting to acquire leader lease openshift-machine-api/machine-api-operator... 2025-12-08T17:44:21.269769190+00:00 stderr F I1208 17:44:21.269458 1 leaderelection.go:271] successfully acquired lease openshift-machine-api/machine-api-operator 2025-12-08T17:44:21.305373551+00:00 stderr F I1208 17:44:21.303633 1 operator.go:217] Starting Machine API Operator 2025-12-08T17:44:21.306525582+00:00 stderr F I1208 17:44:21.305776 1 reflector.go:357] "Starting reflector" type="*v1.DaemonSet" resyncPeriod="18m55.840437345s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.306525582+00:00 stderr F I1208 17:44:21.305798 1 reflector.go:403] "Listing and watching" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.306525582+00:00 stderr F I1208 17:44:21.305800 1 reflector.go:357] "Starting reflector" type="*v1.ClusterOperator" resyncPeriod="13m53.278805426s" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.306525582+00:00 stderr F I1208 17:44:21.305827 1 reflector.go:403] "Listing and watching" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.306525582+00:00 stderr F I1208 17:44:21.306108 1 reflector.go:357] "Starting reflector" type="*v1.MutatingWebhookConfiguration" resyncPeriod="18m55.840437345s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.306525582+00:00 stderr F I1208 17:44:21.306131 1 reflector.go:403] "Listing and watching" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.306631745+00:00 stderr F I1208 17:44:21.306602 1 reflector.go:357] "Starting reflector" type="*v1.Deployment" resyncPeriod="18m55.840437345s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.306671916+00:00 stderr F I1208 17:44:21.306659 1 reflector.go:403] "Listing and watching" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.307017446+00:00 stderr F I1208 17:44:21.306981 1 reflector.go:357] "Starting reflector" type="*v1.FeatureGate" resyncPeriod="13m53.278805426s" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.307017446+00:00 stderr F I1208 17:44:21.307006 1 reflector.go:403] "Listing and watching" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.307076347+00:00 stderr F I1208 17:44:21.307054 1 reflector.go:357] "Starting reflector" type="*v1.Proxy" resyncPeriod="13m53.278805426s" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.307076347+00:00 stderr F I1208 17:44:21.307069 1 reflector.go:403] "Listing and watching" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.307130049+00:00 stderr F I1208 17:44:21.307062 1 reflector.go:357] "Starting reflector" type="*v1.ClusterVersion" resyncPeriod="13m53.278805426s" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.307178640+00:00 stderr F I1208 17:44:21.307113 1 reflector.go:357] "Starting reflector" type="*v1.ValidatingWebhookConfiguration" resyncPeriod="18m55.840437345s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.307188840+00:00 stderr F I1208 17:44:21.307182 1 reflector.go:403] "Listing and watching" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.307216521+00:00 stderr F I1208 17:44:21.307148 1 reflector.go:403] "Listing and watching" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.313496143+00:00 stderr F I1208 17:44:21.311372 1 reflector.go:357] "Starting reflector" type="*v1beta1.MachineSet" resyncPeriod="14m10.913774739s" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.313496143+00:00 stderr F I1208 17:44:21.311397 1 reflector.go:403] "Listing and watching" type="*v1beta1.MachineSet" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.313496143+00:00 stderr F I1208 17:44:21.311472 1 reflector.go:357] "Starting reflector" type="*v1beta1.Machine" resyncPeriod="14m10.913774739s" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.313496143+00:00 stderr F I1208 17:44:21.311493 1 reflector.go:403] "Listing and watching" type="*v1beta1.Machine" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.321720367+00:00 stderr F I1208 17:44:21.321001 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.321720367+00:00 stderr F I1208 17:44:21.321504 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.321720367+00:00 stderr F I1208 17:44:21.321524 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.325022047+00:00 stderr F I1208 17:44:21.324383 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.341870447+00:00 stderr F I1208 17:44:21.341084 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.345714211+00:00 stderr F I1208 17:44:21.342820 1 reflector.go:430] "Caches populated" type="*v1beta1.MachineSet" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.345714211+00:00 stderr F I1208 17:44:21.343282 1 reflector.go:430] "Caches populated" type="*v1beta1.Machine" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.345714211+00:00 stderr F I1208 17:44:21.343709 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:21.345714211+00:00 stderr F I1208 17:44:21.343830 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.351860219+00:00 stderr F I1208 17:44:21.351697 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:21.405131462+00:00 stderr F I1208 17:44:21.405051 1 operator.go:229] Synced up caches 2025-12-08T17:44:21.405131462+00:00 stderr F I1208 17:44:21.405088 1 operator.go:234] Started feature gate accessor 2025-12-08T17:44:21.405131462+00:00 stderr F I1208 17:44:21.405114 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:21.407334982+00:00 stderr F I1208 17:44:21.407144 1 start.go:123] Synced up machine api informer caches 2025-12-08T17:44:21.407334982+00:00 stderr F I1208 17:44:21.407179 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-machine-api", Name:"machine-api-operator", UID:"6e3281a2-74ca-4530-b743-ae9a62edcc78", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:21.440268211+00:00 stderr F I1208 17:44:21.440206 1 status.go:70] Syncing status: re-syncing 2025-12-08T17:44:21.449272076+00:00 stderr F I1208 17:44:21.449095 1 sync.go:78] Provider is NoOp, skipping synchronisation 2025-12-08T17:44:21.461864000+00:00 stderr F I1208 17:44:21.461404 1 status.go:100] Syncing status: available 2025-12-08T17:46:21.286470202+00:00 stderr F E1208 17:46:21.285765 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-api/leases/machine-api-operator": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:21.287623296+00:00 stderr F E1208 17:46:21.287548 1 leaderelection.go:436] error retrieving resource lock openshift-machine-api/machine-api-operator: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-api/leases/machine-api-operator": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:47:00.527829011+00:00 stderr F I1208 17:47:00.527183 1 reflector.go:403] "Listing and watching" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:00.533866571+00:00 stderr F I1208 17:47:00.533791 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:10.341802547+00:00 stderr F I1208 17:47:10.341197 1 reflector.go:403] "Listing and watching" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:10.345246365+00:00 stderr F I1208 17:47:10.345137 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:11.795058104+00:00 stderr F I1208 17:47:11.794949 1 reflector.go:403] "Listing and watching" type="*v1beta1.Machine" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:47:11.797345166+00:00 stderr F I1208 17:47:11.797232 1 reflector.go:430] "Caches populated" type="*v1beta1.Machine" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:47:12.991615051+00:00 stderr F I1208 17:47:12.991535 1 reflector.go:403] "Listing and watching" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:12.993527631+00:00 stderr F I1208 17:47:12.993475 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:13.748057683+00:00 stderr F I1208 17:47:13.747853 1 reflector.go:403] "Listing and watching" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:13.750360185+00:00 stderr F I1208 17:47:13.750293 1 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:24.630066388+00:00 stderr F I1208 17:47:24.629463 1 reflector.go:403] "Listing and watching" type="*v1beta1.MachineSet" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:47:24.632630639+00:00 stderr F I1208 17:47:24.632595 1 reflector.go:430] "Caches populated" type="*v1beta1.MachineSet" reflector="github.com/openshift/client-go/machine/informers/externalversions/factory.go:125" 2025-12-08T17:47:25.574101276+00:00 stderr F I1208 17:47:25.573508 1 reflector.go:403] "Listing and watching" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:25.575666145+00:00 stderr F I1208 17:47:25.575612 1 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:44.819444260+00:00 stderr F I1208 17:47:44.818902 1 reflector.go:403] "Listing and watching" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:44.824896441+00:00 stderr F I1208 17:47:44.824836 1 reflector.go:430] "Caches populated" type="*v1.DaemonSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:46.521139698+00:00 stderr F I1208 17:47:46.520624 1 reflector.go:403] "Listing and watching" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:46.523944196+00:00 stderr F I1208 17:47:46.523900 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:51.357575194+00:00 stderr F I1208 17:47:51.356999 1 reflector.go:403] "Listing and watching" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:51.368610362+00:00 stderr F I1208 17:47:51.368506 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:51.382527280+00:00 stderr F I1208 17:47:51.382430 1 status.go:70] Syncing status: re-syncing 2025-12-08T17:47:51.391207153+00:00 stderr F I1208 17:47:51.391138 1 sync.go:78] Provider is NoOp, skipping synchronisation 2025-12-08T17:47:51.394464675+00:00 stderr F I1208 17:47:51.394410 1 status.go:100] Syncing status: available 2025-12-08T18:01:44.663238792+00:00 stderr F I1208 18:01:44.662325 1 status.go:70] Syncing status: re-syncing 2025-12-08T18:01:44.670542127+00:00 stderr F I1208 18:01:44.670497 1 sync.go:78] Provider is NoOp, skipping synchronisation 2025-12-08T18:01:44.674635656+00:00 stderr F I1208 18:01:44.674613 1 status.go:100] Syncing status: available ././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-0000755000175000017500000000000015115611513033053 5ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-0000644000175000017500000013142515115611513033063 0ustar zuulzuul2025-12-08T17:59:24.585379459+00:00 stdout F *** [INFO] My pod is: stf-smoketest-smoke1-pbhxq 2025-12-08T17:59:24.585379459+00:00 stdout F *** [INFO] Using this collectd.conf: 2025-12-08T17:59:24.594155950+00:00 stdout F Interval 1 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F LoadPlugin "logfile" 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F LogLevel "debug" 2025-12-08T17:59:24.594155950+00:00 stdout F File stdout 2025-12-08T17:59:24.594155950+00:00 stdout F Timestamp true 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F LoadPlugin cpu 2025-12-08T17:59:24.594155950+00:00 stdout F LoadPlugin amqp1 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F Host "qdr-test" 2025-12-08T17:59:24.594155950+00:00 stdout F Port "5672" 2025-12-08T17:59:24.594155950+00:00 stdout F Address "collectd" 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F Format JSON 2025-12-08T17:59:24.594155950+00:00 stdout F PreSettle false 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F Format JSON 2025-12-08T17:59:24.594155950+00:00 stdout F PreSettle false 2025-12-08T17:59:24.594155950+00:00 stdout F Notify true 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F LoadPlugin interface 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F IgnoreSelected true 2025-12-08T17:59:24.594155950+00:00 stdout F ReportInactive true 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F LoadPlugin threshold 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F Instance "lo" 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F FailureMax 0 2025-12-08T17:59:24.594155950+00:00 stdout F DataSource "rx" 2025-12-08T17:59:24.594155950+00:00 stdout F Persist true 2025-12-08T17:59:24.594155950+00:00 stdout F PersistOK true 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.594155950+00:00 stdout F 2025-12-08T17:59:24.602050569+00:00 stdout F *** [INFO] Sleeping for 3 seconds waiting for collectd to enter read-loop 2025-12-08T17:59:24.602094680+00:00 stderr F grep: /tmp/collectd_output: No such file or directory 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] plugin_load: plugin "logfile" successfully loaded. 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] logfile: invalid loglevel [debug] defaulting to 'info' 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] plugin_load: plugin "cpu" successfully loaded. 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] plugin_load: plugin "amqp1" successfully loaded. 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] plugin_load: plugin "interface" successfully loaded. 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] plugin_load: plugin "threshold" successfully loaded. 2025-12-08T17:59:24.605118599+00:00 stdout F [2025-12-08 17:59:25] Initialization complete, entering read-loop. 2025-12-08T17:59:24.606484826+00:00 stdout F [2025-12-08 17:59:25] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is nan. 2025-12-08T17:59:25.604986321+00:00 stdout F [2025-12-08 17:59:26] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:26.606177918+00:00 stdout F [2025-12-08 17:59:27] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:27.602628830+00:00 stdout F [2025-12-08 17:59:28] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:27.607604632+00:00 stdout F [2025-12-08 17:59:25] Initialization complete, entering read-loop. 2025-12-08T17:59:27.608203097+00:00 stdout F *** [INFO] Sleeping for 30 seconds to collect 30s of metrics and events 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: log_file, value: , result: default] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: log_level, value: DEBUG, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: allow_exec, value: true, result: default] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: connection, value: , result: default] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: subscriptions, value: all,default, result: default] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: client_name, value: stf-smoketest-smoke1-pbhxq, result: default] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: client_address, value: 10.217.0.84, result: default] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: keepalive_interval, value: 20, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: tmp_base_dir, value: /var/tmp/collectd-sensubility-checks, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: shell_path, value: /usr/bin/sh, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: worker_count, value: 2, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: checks, value: {"check-container-health":{"command":"cat /healthcheck.log","handlers":[],"interval":3,"occurrences":3,"refresh":90,"standalone":true}}, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: connection, value: amqp://qdr-test:5672, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: client_name, value: smoketest.redhat.com, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [result: default, parameter: send_timeout, value: 2] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: results_channel, value: sensubility/cloud1-telemetry, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [parameter: results_format, value: smartgateway, result: parsed] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [result: default, parameter: listen_channels, value: ] 2025-12-08T17:59:27.621846887+00:00 stdout F [DEBUG] using configuration value. [result: default, parameter: listen_prefetch, value: -1] 2025-12-08T17:59:28.603625643+00:00 stdout F [2025-12-08 17:59:29] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:29.604293526+00:00 stdout F [2025-12-08 17:59:30] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:30.603079129+00:00 stdout F [2025-12-08 17:59:31] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:30.627458682+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:30.627645206+00:00 stdout F [DEBUG] Created check script. [command: cat /healthcheck.log, path: /var/tmp/collectd-sensubility-checks/check-918454417] 2025-12-08T17:59:30.636420368+00:00 stdout F [DEBUG] Executed check script. [output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:30.636420368+00:00 stdout F , command: cat /healthcheck.log, status: 0] 2025-12-08T17:59:30.637060764+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.00872408,"executed":1765216770,"issued":1765216770,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216770,\"lastEpochMicrosec\":1765216770},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.008724\",\"executed\":\"1765216770\",\"issued\":\"1765216770\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:30Z"}] 2025-12-08T17:59:31.604570214+00:00 stdout F [2025-12-08 17:59:32] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:32.602918446+00:00 stdout F [2025-12-08 17:59:33] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:33.602992543+00:00 stdout F [2025-12-08 17:59:34] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:33.627261403+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:33.635308455+00:00 stdout F [DEBUG] Executed check script. [command: cat /healthcheck.log, status: 0, output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:33.635308455+00:00 stdout F ] 2025-12-08T17:59:33.635477900+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.00796082,"executed":1765216773,"issued":1765216773,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216773,\"lastEpochMicrosec\":1765216773},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.007961\",\"executed\":\"1765216773\",\"issued\":\"1765216773\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:33Z"}] 2025-12-08T17:59:34.603766759+00:00 stdout F [2025-12-08 17:59:35] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:35.602757868+00:00 stdout F [2025-12-08 17:59:36] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:36.602849576+00:00 stdout F [2025-12-08 17:59:37] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:36.626967902+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:36.636198375+00:00 stdout F [DEBUG] Executed check script. [output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:36.636198375+00:00 stdout F , command: cat /healthcheck.log, status: 0] 2025-12-08T17:59:36.636342849+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.009244453,"executed":1765216776,"issued":1765216776,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216776,\"lastEpochMicrosec\":1765216776},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.009244\",\"executed\":\"1765216776\",\"issued\":\"1765216776\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:36Z"}] 2025-12-08T17:59:37.609246060+00:00 stdout F [2025-12-08 17:59:38] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:38.605854607+00:00 stdout F [2025-12-08 17:59:39] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:39.605105873+00:00 stdout F [2025-12-08 17:59:40] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:39.626953298+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:39.637534667+00:00 stdout F [DEBUG] Executed check script. [output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:39.637534667+00:00 stdout F , command: cat /healthcheck.log, status: 0] 2025-12-08T17:59:39.637988999+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.010554828,"executed":1765216779,"issued":1765216779,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216779,\"lastEpochMicrosec\":1765216779},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.010555\",\"executed\":\"1765216779\",\"issued\":\"1765216779\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:39Z"}] 2025-12-08T17:59:40.603621289+00:00 stdout F [2025-12-08 17:59:41] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:41.603383658+00:00 stdout F [2025-12-08 17:59:42] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:42.603291602+00:00 stdout F [2025-12-08 17:59:43] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:42.628939888+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:42.636382614+00:00 stdout F [DEBUG] Executed check script. [command: cat /healthcheck.log, status: 0, output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:42.636382614+00:00 stdout F ] 2025-12-08T17:59:42.636599440+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.008363171,"executed":1765216782,"issued":1765216782,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216782,\"lastEpochMicrosec\":1765216782},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.008363\",\"executed\":\"1765216782\",\"issued\":\"1765216782\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:42Z"}] 2025-12-08T17:59:43.603806706+00:00 stdout F [2025-12-08 17:59:44] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:44.603438114+00:00 stdout F [2025-12-08 17:59:45] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:45.626694764+00:00 stdout F [2025-12-08 17:59:46] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:45.627055163+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:45.632501406+00:00 stdout F [DEBUG] Executed check script. [command: cat /healthcheck.log, status: 0, output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:45.632501406+00:00 stdout F ] 2025-12-08T17:59:45.633035990+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.005409862,"executed":1765216785,"issued":1765216785,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216785,\"lastEpochMicrosec\":1765216785},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.005410\",\"executed\":\"1765216785\",\"issued\":\"1765216785\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:45Z"}] 2025-12-08T17:59:46.605723000+00:00 stdout F [2025-12-08 17:59:47] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:47.605334447+00:00 stdout F [2025-12-08 17:59:48] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:48.604183105+00:00 stdout F [2025-12-08 17:59:49] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:48.627327503+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:48.636304440+00:00 stdout F [DEBUG] Executed check script. [command: cat /healthcheck.log, status: 0, output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:48.636304440+00:00 stdout F ] 2025-12-08T17:59:48.636517706+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.008973526,"executed":1765216788,"issued":1765216788,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216788,\"lastEpochMicrosec\":1765216788},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.008974\",\"executed\":\"1765216788\",\"issued\":\"1765216788\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:48Z"}] 2025-12-08T17:59:49.602663772+00:00 stdout F [2025-12-08 17:59:50] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:50.603523183+00:00 stdout F [2025-12-08 17:59:51] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:51.602794441+00:00 stdout F [2025-12-08 17:59:52] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:51.627114601+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:51.637270299+00:00 stdout F [DEBUG] Executed check script. [output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:51.637270299+00:00 stdout F , command: cat /healthcheck.log, status: 0] 2025-12-08T17:59:51.637418433+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.010121737,"executed":1765216791,"issued":1765216791,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216791,\"lastEpochMicrosec\":1765216791},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.010122\",\"executed\":\"1765216791\",\"issued\":\"1765216791\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:51Z"}] 2025-12-08T17:59:52.602720438+00:00 stdout F [2025-12-08 17:59:53] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:53.605910219+00:00 stdout F [2025-12-08 17:59:54] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:54.609622805+00:00 stdout F [2025-12-08 17:59:55] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:54.627050693+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:54.632580549+00:00 stdout F [DEBUG] Executed check script. [command: cat /healthcheck.log, status: 0, output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:54.632580549+00:00 stdout F ] 2025-12-08T17:59:54.632820185+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.005504445,"executed":1765216794,"issued":1765216794,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216794,\"lastEpochMicrosec\":1765216794},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.005504\",\"executed\":\"1765216794\",\"issued\":\"1765216794\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:54Z"}] 2025-12-08T17:59:55.602972298+00:00 stdout F [2025-12-08 17:59:56] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:56.606706315+00:00 stdout F [2025-12-08 17:59:57] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:57.602759058+00:00 stdout F [2025-12-08 17:59:58] Notification: severity = OKAY, host = stf-smoketest-smoke1-pbhxq, plugin = interface, plugin_instance = lo, type = if_octets, message = Host stf-smoketest-smoke1-pbhxq, plugin interface (instance lo) type if_octets: All data sources are within range again. Current value of "rx" is 0.000000. 2025-12-08T17:59:57.611409555+00:00 stdout F *** [INFO] List of metric names for debugging... 2025-12-08T17:59:57.617031403+00:00 stderr P % Total % Received % Xferd Average S 2025-12-08T17:59:57.617076244+00:00 stderr F peed Time Time Time Current 2025-12-08T17:59:57.617076244+00:00 stderr P 2025-12-08T17:59:57.617084495+00:00 stderr P 2025-12-08T17:59:57.617091885+00:00 stderr P D 2025-12-08T17:59:57.617099215+00:00 stderr P load Up 2025-12-08T17:59:57.617106075+00:00 stderr P load T 2025-12-08T17:59:57.617112985+00:00 stderr P otal S 2025-12-08T17:59:57.617120076+00:00 stderr P pent 2025-12-08T17:59:57.617151146+00:00 stderr F Left Speed 2025-12-08T17:59:57.617151146+00:00 stderr P 0 0 0 0 2025-12-08T17:59:57.617160167+00:00 stderr P 0 0 2025-12-08T17:59:57.617167287+00:00 stderr P 0 2025-12-08T17:59:57.617174407+00:00 stderr P 0 2025-12-08T17:59:57.617181427+00:00 stderr P --:--:-- 2025-12-08T17:59:57.617188397+00:00 stderr P --:--:- 2025-12-08T17:59:57.617195347+00:00 stderr P - --:--: 2025-12-08T17:59:57.617202598+00:00 stderr P -- 0 2025-12-08T17:59:57.626612946+00:00 stdout F [DEBUG] Requesting execution of check. [check: check-container-health] 2025-12-08T17:59:57.632390478+00:00 stdout F [DEBUG] Executed check script. [command: cat /healthcheck.log, status: 0, output: [{"service":"smoketest-svc","container":"smoketest-container","status":"unhealthy","healthy":0}] 2025-12-08T17:59:57.632390478+00:00 stdout F ] 2025-12-08T17:59:57.632708096+00:00 stdout F [DEBUG] Sending AMQP1.0 message [address: sensubility/cloud1-telemetry, body: {"labels":{"check":"check-container-health","client":"stf-smoketest-smoke1-pbhxq","severity":"OKAY"},"annotations":{"command":"cat /healthcheck.log","duration":0.005741171,"executed":1765216797,"issued":1765216797,"output":"[{\"service\":\"smoketest-svc\",\"container\":\"smoketest-container\",\"status\":\"unhealthy\",\"healthy\":0}]\n","status":0,"ves":"{\"commonEventHeader\":{\"domain\":\"heartbeat\",\"eventType\":\"checkResult\",\"eventId\":\"stf-smoketest-smoke1-pbhxq-check-container-health\",\"priority\":\"Normal\",\"reportingEntityId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"reportingEntityName\":\"stf-smoketest-smoke1-pbhxq\",\"sourceId\":\"767a2e13-cddc-445d-8e25-1495703fb724\",\"sourceName\":\"stf-smoketest-smoke1-pbhxq-collectd-sensubility\",\"startingEpochMicrosec\":1765216797,\"lastEpochMicrosec\":1765216797},\"heartbeatFields\":{\"additionalFields\":{\"check\":\"check-container-health\",\"command\":\"cat /healthcheck.log\",\"duration\":\"0.005741\",\"executed\":\"1765216797\",\"issued\":\"1765216797\",\"output\":\"[{\\\"service\\\":\\\"smoketest-svc\\\",\\\"container\\\":\\\"smoketest-container\\\",\\\"status\\\":\\\"unhealthy\\\",\\\"healthy\\\":0}]\\n\",\"status\":\"0\"}}}"},"startsAt":"2025-12-08T17:59:57Z"}] 2025-12-08T17:59:57.668407565+00:00 stderr P 100 944 0 944 2025-12-08T17:59:57.668455556+00:00 stderr P 0 0 18509 0 --:--:-- -- 2025-12-08T17:59:57.668463536+00:00 stderr P :--:-- 2025-12-08T17:59:57.668470997+00:00 stderr P --:--:- 2025-12-08T17:59:57.668478227+00:00 stderr P - 18509 2025-12-08T17:59:57.668485187+00:00 stderr F 2025-12-08T17:59:57.668540188+00:00 stdout P {"status":"success","data":["ceilometer_image_size","collectd_cpu_total","collectd_interface_if_dropped_rx_total","collectd_interface_if_dropped_tx_total","collectd_interface_if_errors_rx_total","collectd_interface_if_errors_tx_total","collectd_interface_if_octets_rx_total","collectd_interface_if_octets_tx_total","collectd_interface_if_packets_rx_total","collectd_interface_if_packets_tx_total","scrape_duration_seconds","scrape_samples_post_metric_relabeling","scrape_samples_scraped","scrape_series_added","sensubility_container_health_status","sg_total_ceilometer_metric_decode_count","sg_total_ceilometer_metric_decode_error_count","sg_total_ceilometer_msg_received_count","sg_total_collectd_metric_decode_count","sg_total_collectd_metric_decode_error_count","sg_total_collectd_msg_received_count","sg_total_sensubility_metric_decode_count","sg_total_sensubility_metric_decode_error_count","sg_total_sensubility_msg_received_count","up"]} 2025-12-08T17:59:57.671251960+00:00 stdout F 2025-12-08T17:59:57.671251960+00:00 stdout F 2025-12-08T17:59:57.671270860+00:00 stdout F *** [INFO] Checking for recent CPU metrics... 2025-12-08T17:59:57.676709383+00:00 stderr P % Total % Received % Xferd Average Speed 2025-12-08T17:59:57.676750914+00:00 stderr F Time Time Time Current 2025-12-08T17:59:57.676750914+00:00 stderr P 2025-12-08T17:59:57.676758565+00:00 stderr P 2025-12-08T17:59:57.676765165+00:00 stderr P D 2025-12-08T17:59:57.676771795+00:00 stderr P load Up 2025-12-08T17:59:57.676778355+00:00 stderr P load 2025-12-08T17:59:57.676784815+00:00 stderr P Total 2025-12-08T17:59:57.676791335+00:00 stderr P Spent 2025-12-08T17:59:57.676797766+00:00 stderr P Left 2025-12-08T17:59:57.676804216+00:00 stderr F Speed 2025-12-08T17:59:57.676810666+00:00 stderr P 0 2025-12-08T17:59:57.676817256+00:00 stderr P 0 2025-12-08T17:59:57.676823796+00:00 stderr P 0 0 2025-12-08T17:59:57.676830246+00:00 stderr P 0 2025-12-08T17:59:57.676836777+00:00 stderr P 0 2025-12-08T17:59:57.676843347+00:00 stderr P 0 2025-12-08T17:59:57.676849937+00:00 stderr P 0 --:-- 2025-12-08T17:59:57.676856517+00:00 stderr P :-- --: 2025-12-08T17:59:57.676863067+00:00 stderr P --:-- -- 2025-12-08T17:59:57.676869507+00:00 stderr P :--:-- 2025-12-08T17:59:57.676890978+00:00 stderr P 2025-12-08T17:59:57.676911429+00:00 stderr P 0 2025-12-08T17:59:57.709690240+00:00 stderr P 0 0 0 0 0 0 0 0 --:-- 2025-12-08T17:59:57.709731661+00:00 stderr P :-- --:--:-- --:--:-- 0 2025-12-08T17:59:57.715565145+00:00 stderr P 100 510 0 304 100 206 2025-12-08T17:59:57.715610166+00:00 stderr F 8000 5421 --:--:-- --:--:-- --:--:-- 13076 2025-12-08T17:59:57.715784491+00:00 stdout P {"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"collectd_cpu_total","container":"sg-core","endpoint":"prom-https","host":"stf-smoketest-smoke1-pbhxq","plugin_instance":"0","service":"default-cloud1-coll-meter","type_instance":"user"},"values":[[1765216783,"13988"]]}]}} 2025-12-08T17:59:57.718379499+00:00 stdout F 2025-12-08T17:59:57.718379499+00:00 stdout F 2025-12-08T17:59:57.718397320+00:00 stdout F *** [INFO] Checking for returned CPU metrics... 2025-12-08T17:59:57.721201244+00:00 stdout F {"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"collectd_cpu_total","container":"sg-core","endpoint":"prom-https","host":"stf-smoketest-smoke1-pbhxq","plugin_instance":"0","service":"default-cloud1-coll-meter","type_instance":"user"},"values":[[1765216783,"13988"]]}]}} 2025-12-08T17:59:57.721520542+00:00 stdout F 2025-12-08T17:59:57.721520542+00:00 stdout F 2025-12-08T17:59:57.721529952+00:00 stdout F *** [INFO] Checking for recent healthcheck metrics... 2025-12-08T17:59:57.726545373+00:00 stderr P % Total % Received 2025-12-08T17:59:57.726562764+00:00 stderr P % Xferd Average Sp 2025-12-08T17:59:57.726570364+00:00 stderr P eed Ti 2025-12-08T17:59:57.726577924+00:00 stderr P me Ti 2025-12-08T17:59:57.726585434+00:00 stderr P me T 2025-12-08T17:59:57.726592934+00:00 stderr P ime Cur 2025-12-08T17:59:57.726599925+00:00 stderr F rent 2025-12-08T17:59:57.726599925+00:00 stderr P 2025-12-08T17:59:57.726607675+00:00 stderr P 2025-12-08T17:59:57.726614715+00:00 stderr P 2025-12-08T17:59:57.726622695+00:00 stderr P 2025-12-08T17:59:57.726630225+00:00 stderr P Dl 2025-12-08T17:59:57.726637216+00:00 stderr P oad Upl 2025-12-08T17:59:57.726644466+00:00 stderr P oad To 2025-12-08T17:59:57.726651756+00:00 stderr P tal S 2025-12-08T17:59:57.726658776+00:00 stderr P pent 2025-12-08T17:59:57.726665806+00:00 stderr P Left Sp 2025-12-08T17:59:57.726672776+00:00 stderr F eed 2025-12-08T17:59:57.726672776+00:00 stderr P 2025-12-08T17:59:57.726680547+00:00 stderr P 0 2025-12-08T17:59:57.726687567+00:00 stderr P 0 0 2025-12-08T17:59:57.726697787+00:00 stderr P 0 2025-12-08T17:59:57.726704997+00:00 stderr P 0 2025-12-08T17:59:57.726712117+00:00 stderr P 0 2025-12-08T17:59:57.726719348+00:00 stderr P 0 2025-12-08T17:59:57.726726338+00:00 stderr P 0 --:--: 2025-12-08T17:59:57.726733468+00:00 stderr P -- --:-- 2025-12-08T17:59:57.726740758+00:00 stderr P :-- --:- 2025-12-08T17:59:57.726748028+00:00 stderr P -:-- 2025-12-08T17:59:57.726754869+00:00 stderr P 0 2025-12-08T17:59:57.760282830+00:00 stderr P 100 464 0 298 100 166 2025-12-08T17:59:57.760324171+00:00 stderr P 9030 5030 --:--:-- --:--:-- --:--:-- 140 2025-12-08T17:59:57.760331472+00:00 stderr F 60 2025-12-08T17:59:57.901133965+00:00 stdout P {"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"sensubility_container_health_status","container":"sg-core","endpoint":"prom-https","host":"stf-smoketest-smoke1-pbhxq","process":"smoketest-svc","service":"default-cloud1-sens-meter"},"values":[[1765216776,"0"]]}]}} 2025-12-08T17:59:57.902057219+00:00 stdout F 2025-12-08T17:59:57.902057219+00:00 stdout F 2025-12-08T17:59:57.902057219+00:00 stdout F *** [INFO] Checking for returned healthcheck metrics... 2025-12-08T17:59:57.905996012+00:00 stdout F {"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"sensubility_container_health_status","container":"sg-core","endpoint":"prom-https","host":"stf-smoketest-smoke1-pbhxq","process":"smoketest-svc","service":"default-cloud1-sens-meter"},"values":[[1765216776,"0"]]}]}} 2025-12-08T17:59:57.905996012+00:00 stdout F 2025-12-08T17:59:57.905996012+00:00 stdout F 2025-12-08T17:59:57.905996012+00:00 stdout F *** [INFO] Get documents for this test from Elasticsearch... 2025-12-08T17:59:58.166232816+00:00 stdout F *** [INFO] Found 34 documents 2025-12-08T17:59:58.166232816+00:00 stdout F 2025-12-08T17:59:58.166232816+00:00 stdout F 2025-12-08T17:59:58.166425771+00:00 stdout F [INFO] Verification exit codes (0 is passing, non-zero is a failure): events=0 metrics=0 2025-12-08T17:59:58.166425771+00:00 stdout F 2025-12-08T17:59:58.166425771+00:00 stdout F 2025-12-08T17:59:58.166425771+00:00 stdout F *** [INFO] Testing completed with success ././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-0000755000175000017500000000000015115611521033052 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-0000644000175000017500000002535415115611513033066 0ustar zuulzuul2025-12-08T17:59:30.068237773+00:00 stdout F *** [INFO] My pod is: stf-smoketest-smoke1-pbhxq 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': 'b6f075ce-3c69-4cd1-b581-ad2c8e40a4ce', 'event_type': 'image.delete', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost'), ('project_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('user_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('resource_id', 1, 'fb2bbe17-24fd-4487-99c3-d4c5ef279550'), ('name', 1, 'cirros'), ('status', 1, 'deleted'), ('created_at', 4, '2025-12-08T17:59:30.894913'), ('deleted_at', 4, '2025-12-08T17:59:30.894913'), ('size', 2, 13287936)], 'raw': {}, 'message_signature': 'e36d2f4e7a78293877fb5126fbbcbedeb3744b8629262a97823497214b7cbd88'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': '49d16489-411d-4b9b-9a6c-0b29c7a2416a', 'event_type': 'image.create', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost'), ('project_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('user_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('resource_id', 1, '693d53eb-2b24-4761-87e4-2ffabc1cf410'), ('name', 1, 'cirros'), ('status', 1, 'queued'), ('created_at', 4, '2025-12-08T17:59:30.894913')], 'raw': {}, 'message_signature': 'e96df190f96f0d54c84e3634301b657d518346edb7efe8f200c31d66d678ccf6'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': 'b1570a8b-87f6-485a-afa6-16a738d56417', 'event_type': 'image.update', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost'), ('project_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('user_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('resource_id', 1, '693d53eb-2b24-4761-87e4-2ffabc1cf410'), ('name', 1, 'cirros'), ('status', 1, 'saving'), ('created_at', 4, '2025-12-08T17:59:30.894913')], 'raw': {}, 'message_signature': '4fdbd18e015a684d8764bdd897cc0e2b66975e4eaf52c88c8674fd466f6784c4'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': 'd37fa610-49c1-47b7-a47d-4a04cede9581', 'event_type': 'image.prepare', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost')], 'raw': {}, 'message_signature': '3fe6877e5bf821ab35df06168acd320b21b4ab8074b23b5cfb23140edb950ca4'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': '49cb5cc8-fd50-444e-9abc-f3df6d9eb8ce', 'event_type': 'image.activate', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost')], 'raw': {}, 'message_signature': '33b567f6abea644d0ace7e180451764fa89d9b86d0a428157013c441e13c240e'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': 'fc26a59c-7a13-45d2-bd78-ee35ecbb002a', 'event_type': 'image.upload', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost'), ('project_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('user_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('resource_id', 1, '693d53eb-2b24-4761-87e4-2ffabc1cf410'), ('name', 1, 'cirros'), ('status', 1, 'active'), ('created_at', 4, '2025-12-08T17:59:30.894913'), ('size', 2, 13287936)], 'raw': {}, 'message_signature': 'dc5827a787a56c8e2b9e912e280057a59a8ec91999bd5fd06cf2559064d796f9'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending event to event: {'message_id': '634de968-2bbb-4e55-a1fd-9fa123ac3423', 'event_type': 'image.update', 'generated': '2025-12-08T17:59:30.894913', 'traits': [('service', 1, 'image.localhost'), ('project_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('user_id', 1, 'f7a5d2ac23aa43bb844c6e1cd873c48c'), ('resource_id', 1, '693d53eb-2b24-4761-87e4-2ffabc1cf410'), ('name', 1, 'cirros'), ('status', 1, 'active'), ('created_at', 4, '2025-12-08T17:59:30.894913'), ('size', 2, 13287936)], 'raw': {}, 'message_signature': '051cef74221eb6c904d2aacbb5b76e93fd563d5a342c619d9843d195fd4173bb'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending metric to metering: {'source': 'openstack', 'counter_name': 'image.size', 'counter_type': 'gauge', 'counter_unit': 'B', 'counter_volume': 13287936, 'user_id': None, 'user_name': None, 'project_id': 'f7a5d2ac23aa43bb844c6e1cd873c48c', 'project_name': None, 'resource_id': '693d53eb-2b24-4761-87e4-2ffabc1cf410', 'timestamp': '2025-12-08T17:59:30.894913', 'resource_metadata': {'id': '693d53eb-2b24-4761-87e4-2ffabc1cf410', 'name': 'cirros', 'status': 'deleted', 'created_at': '2025-12-08T17:59:30.894913', 'updated_at': '2025-12-08T17:59:30.894913', 'min_disk': 0, 'min_ram': 0, 'protected': False, 'checksum': 'ee1eca47dc88f4879d8a229cc70a07c6', 'owner': 'f7a5d2ac23aa43bb844c6e1cd873c48c', 'disk_format': 'qcow2', 'container_format': 'bare', 'size': 13287936, 'virtual_size': None, 'is_public': True, 'visibility': 'public', 'properties': {}, 'tags': [], 'deleted': True, 'deleted_at': '2025-12-08T17:59:30.894913', 'event_type': 'image.delete', 'host': 'image.localhost'}, 'message_id': 'a561efec-d45f-11f0-88b2-0a580ad90054', 'monotonic_time': None, 'message_signature': '4c363665b1b4cd70a5f944ba2204dab1a9a953b8be5acfeb92b234dda0a4c701'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending metric to metering: {'source': 'openstack', 'counter_name': 'image.size', 'counter_type': 'gauge', 'counter_unit': 'B', 'counter_volume': 13287936, 'user_id': None, 'user_name': None, 'project_id': 'f7a5d2ac23aa43bb844c6e1cd873c48c', 'project_name': None, 'resource_id': 'f297d2f9-168d-49d4-a33c-b68f4a4d5a48', 'timestamp': '2025-12-08T17:59:30.894913', 'resource_metadata': {'id': 'f297d2f9-168d-49d4-a33c-b68f4a4d5a48', 'name': 'cirros', 'status': 'active', 'created_at': '2025-12-08T17:59:30.894913', 'updated_at': '2025-12-08T17:59:30.894913', 'min_disk': 0, 'min_ram': 0, 'protected': False, 'checksum': 'ee1eca47dc88f4879d8a229cc70a07c6', 'owner': 'f7a5d2ac23aa43bb844c6e1cd873c48c', 'disk_format': 'qcow2', 'container_format': 'bare', 'size': 13287936, 'virtual_size': None, 'is_public': True, 'visibility': 'public', 'properties': {}, 'tags': [], 'deleted': False, 'deleted_at': None, 'os_glance_importing_to_stores': [], 'os_glance_failed_import': [], 'event_type': 'image.upload', 'host': 'image.localhost'}, 'message_id': 'a563ef7c-d45f-11f0-88b2-0a580ad90054', 'monotonic_time': None, 'message_signature': '1e0b590394d255378b593053723c18c0b4b21319697c2425c52c77ec5c2e9d4c'} 2025-12-08T17:59:31.036156823+00:00 stdout F Sending metric to metering: {'source': 'openstack', 'counter_name': 'image.size', 'counter_type': 'gauge', 'counter_unit': 'B', 'counter_volume': 13287936, 'user_id': None, 'user_name': None, 'project_id': 'f7a5d2ac23aa43bb844c6e1cd873c48c', 'project_name': None, 'resource_id': 'f297d2f9-168d-49d4-a33c-b68f4a4d5a48', 'timestamp': '2025-12-08T17:59:30.894913', 'resource_metadata': {'id': 'f297d2f9-168d-49d4-a33c-b68f4a4d5a48', 'name': 'cirros', 'status': 'active', 'created_at': '2025-12-08T17:59:30.894913', 'updated_at': '2025-12-08T17:59:30.894913', 'min_disk': 0, 'min_ram': 0, 'protected': False, 'checksum': 'ee1eca47dc88f4879d8a229cc70a07c6', 'owner': 'f7a5d2ac23aa43bb844c6e1cd873c48c', 'disk_format': 'qcow2', 'container_format': 'bare', 'size': 13287936, 'virtual_size': None, 'is_public': True, 'visibility': 'public', 'properties': {}, 'tags': [], 'deleted': False, 'deleted_at': None, 'event_type': 'image.update', 'host': 'image.localhost'}, 'message_id': 'a564a02a-d45f-11f0-88b2-0a580ad90054', 'monotonic_time': None, 'message_signature': 'cfc07e2e1ab145caaeb26937677872843269503966392942bd647c09f565ae68'} 2025-12-08T17:59:31.158766505+00:00 stdout F *** [INFO] Sleeping for 30 seconds to produce all metrics and events 2025-12-08T18:00:01.161260419+00:00 stdout F *** [INFO] List of metric names for debugging... 2025-12-08T18:00:01.232850852+00:00 stdout P {"status":"success","data":["ceilometer_image_size","collectd_cpu_total","collectd_interface_if_dropped_rx_total","collectd_interface_if_dropped_tx_total","collectd_interface_if_errors_rx_total","collectd_interface_if_errors_tx_total","collectd_interface_if_octets_rx_total","collectd_interface_if_octets_tx_total","collectd_interface_if_packets_rx_total","collectd_interface_if_packets_tx_total","scrape_duration_seconds","scrape_samples_post_metric_relabeling","scrape_samples_scraped","scrape_series_added","sensubility_container_health_status","sg_total_ceilometer_metric_decode_count","sg_total_ceilometer_metric_decode_error_count","sg_total_ceilometer_msg_received_count","sg_total_collectd_metric_decode_count","sg_total_collectd_metric_decode_error_count","sg_total_collectd_msg_received_count","sg_total_sensubility_metric_decode_count","sg_total_sensubility_metric_decode_error_count","sg_total_sensubility_msg_received_count","up"]} 2025-12-08T18:00:01.236108927+00:00 stdout F 2025-12-08T18:00:01.236108927+00:00 stdout F 2025-12-08T18:00:01.236108927+00:00 stdout F *** [INFO] Checking for recent image metrics... 2025-12-08T18:00:01.236108927+00:00 stdout F [DEBUG] Running the curl command to return a query 2025-12-08T18:00:01.289677846+00:00 stdout F {"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ceilometer_image_size","container":"sg-core","counter":"image.size","endpoint":"prom-https","image":"693d53eb-2b24-4761-87e4-2ffabc1cf410","project":"f7a5d2ac23aa43bb844c6e1cd873c48c","resource":"693d53eb-2b24-4761-87e4-2ffabc1cf410","resource_name":"cirros","service":"default-cloud1-ceil-meter","type":"size","unit":"B","vm_instance":"image.localhost"},"value":[1765216801.285,"13287936"]},{"metric":{"__name__":"ceilometer_image_size","container":"sg-core","counter":"image.size","endpoint":"prom-https","image":"f297d2f9-168d-49d4-a33c-b68f4a4d5a48","project":"f7a5d2ac23aa43bb844c6e1cd873c48c","resource":"f297d2f9-168d-49d4-a33c-b68f4a4d5a48","resource_name":"cirros","service":"default-cloud1-ceil-meter","type":"size","unit":"B","vm_instance":"image.localhost"},"value":[1765216801.285,"13287936"]}]}} 2025-12-08T18:00:01.290788596+00:00 stdout F [DEBUG] Set metrics_result to 0 2025-12-08T18:00:01.290892339+00:00 stdout F *** [INFO] Get documents for this test from ElasticSearch... 2025-12-08T18:00:01.355365334+00:00 stdout F *** [INFO] List of indices for debugging... 2025-12-08T18:00:01.409486067+00:00 stdout F yellow open ceilometer_image d9n86q5DSdeb0YeTwOYlKA 1 1 7 0 19.6kb 19.6kb 2025-12-08T18:00:01.414057388+00:00 stdout F 2025-12-08T18:00:01.414057388+00:00 stdout F *** [INFO] Get documents for this test from ElasticSearch... 2025-12-08T18:00:01.467503363+00:00 stdout F *** [INFO] Found 7 documents 2025-12-08T18:00:01.467503363+00:00 stdout F 2025-12-08T18:00:01.467503363+00:00 stdout F 2025-12-08T18:00:01.467503363+00:00 stdout F [INFO] Verification exit codes (0 is passing, non-zero is a failure): events=0 metrics=0 2025-12-08T18:00:01.467503363+00:00 stdout F 2025-12-08T18:00:01.467503363+00:00 stdout F 2025-12-08T18:00:01.467503363+00:00 stdout F *** [INFO] Testing completed with success ././@LongLink0000644000000000000000000000024000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611514033067 5ustar zuulzuul././@LongLink0000644000000000000000000000024700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000025400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000001064715115611514033101 0ustar zuulzuul2025-12-08T17:53:43.351110738+00:00 stderr F + [[ -f /env/_master ]] 2025-12-08T17:53:43.351110738+00:00 stderr F + . /ovnkube-lib/ovnkube-lib.sh 2025-12-08T17:53:43.351252512+00:00 stderr F ++ set -x 2025-12-08T17:53:43.351252512+00:00 stderr F ++ K8S_NODE= 2025-12-08T17:53:43.351252512+00:00 stderr F ++ [[ -n '' ]] 2025-12-08T17:53:43.351263732+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:43.351272383+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:43.351282763+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:43.351293173+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:43.351303734+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:43.351314124+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:43.351322554+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:43.351332884+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:43.351342925+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:43.351352415+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:43.355717664+00:00 stderr F + trap quit-ovn-northd TERM INT 2025-12-08T17:53:43.355717664+00:00 stderr F + start-ovn-northd info 2025-12-08T17:53:43.355717664+00:00 stderr F + local log_level=info 2025-12-08T17:53:43.355717664+00:00 stderr F + [[ 1 -ne 1 ]] 2025-12-08T17:53:43.355717664+00:00 stderr F ++ date -Iseconds 2025-12-08T17:53:43.357698117+00:00 stderr F + echo '2025-12-08T17:53:43+00:00 - starting ovn-northd' 2025-12-08T17:53:43.357774829+00:00 stdout F 2025-12-08T17:53:43+00:00 - starting ovn-northd 2025-12-08T17:53:43.358248252+00:00 stderr F + wait 23883 2025-12-08T17:53:43.358398037+00:00 stderr F + exec ovn-northd --no-chdir -vconsole:info -vfile:off '-vPATTERN:console:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m' --pidfile /var/run/ovn/ovn-northd.pid --n-threads=1 2025-12-08T17:53:43.364382169+00:00 stderr F 2025-12-08T17:53:43.364Z|00001|ovn_northd|INFO|OVN internal version is : [25.03.1-20.41.0-78.8] 2025-12-08T17:53:43.364782340+00:00 stderr F 2025-12-08T17:53:43.364Z|00002|reconnect|INFO|unix:/var/run/ovn/ovnnb_db.sock: connecting... 2025-12-08T17:53:43.364782340+00:00 stderr F 2025-12-08T17:53:43.364Z|00003|reconnect|INFO|unix:/var/run/ovn/ovnnb_db.sock: connection attempt failed (No such file or directory) 2025-12-08T17:53:43.364782340+00:00 stderr F 2025-12-08T17:53:43.364Z|00004|ovn_northd|INFO|OVN NB IDL reconnected, force recompute. 2025-12-08T17:53:43.364850892+00:00 stderr F 2025-12-08T17:53:43.364Z|00005|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:43.364860622+00:00 stderr F 2025-12-08T17:53:43.364Z|00006|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connection attempt failed (No such file or directory) 2025-12-08T17:53:43.364949285+00:00 stderr F 2025-12-08T17:53:43.364Z|00007|ovn_northd|INFO|OVN SB IDL reconnected, force recompute. 2025-12-08T17:53:44.365271893+00:00 stderr F 2025-12-08T17:53:44.365Z|00008|reconnect|INFO|unix:/var/run/ovn/ovnnb_db.sock: connecting... 2025-12-08T17:53:44.365271893+00:00 stderr F 2025-12-08T17:53:44.365Z|00009|reconnect|INFO|unix:/var/run/ovn/ovnnb_db.sock: connected 2025-12-08T17:53:44.365271893+00:00 stderr F 2025-12-08T17:53:44.365Z|00010|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:44.365340295+00:00 stderr F 2025-12-08T17:53:44.365Z|00011|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connection attempt failed (No such file or directory) 2025-12-08T17:53:44.365340295+00:00 stderr F 2025-12-08T17:53:44.365Z|00012|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: waiting 2 seconds before reconnect 2025-12-08T17:53:46.365353396+00:00 stderr F 2025-12-08T17:53:46.365Z|00013|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:46.366153527+00:00 stderr F 2025-12-08T17:53:46.366Z|00014|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connected 2025-12-08T17:53:46.366386543+00:00 stderr F 2025-12-08T17:53:46.366Z|00015|ovn_northd|INFO|ovn-northd lock acquired. This ovn-northd instance is now active. 2025-12-08T17:54:01.029958443+00:00 stderr F 2025-12-08T17:54:01.029Z|00016|memory|INFO|15360 kB peak resident set size after 17.7 seconds 2025-12-08T17:54:01.030074636+00:00 stderr F 2025-12-08T17:54:01.030Z|00017|memory|INFO|idl-cells-OVN_Northbound:3933 idl-cells-OVN_Southbound:15715 ././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000001104015115611514033065 0ustar zuulzuul2025-12-08T17:53:43.197671036+00:00 stderr F ++ K8S_NODE= 2025-12-08T17:53:43.197671036+00:00 stderr F ++ [[ -n '' ]] 2025-12-08T17:53:43.197671036+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:43.197671036+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:43.197671036+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:43.197671036+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:43.197671036+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:43.197765129+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:43.197765129+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:43.197765129+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:43.197765129+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:43.197765129+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:43.198758536+00:00 stderr F + start-rbac-proxy-node ovn-metrics 9105 29105 /etc/pki/tls/metrics-cert/tls.key /etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.198758536+00:00 stderr F + local detail=ovn-metrics 2025-12-08T17:53:43.198771417+00:00 stderr F + local listen_port=9105 2025-12-08T17:53:43.198771417+00:00 stderr F + local upstream_port=29105 2025-12-08T17:53:43.198771417+00:00 stderr F + local privkey=/etc/pki/tls/metrics-cert/tls.key 2025-12-08T17:53:43.198779837+00:00 stderr F + local clientcert=/etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.198779837+00:00 stderr F + [[ 5 -ne 5 ]] 2025-12-08T17:53:43.199202648+00:00 stderr F ++ date -Iseconds 2025-12-08T17:53:43.201481890+00:00 stderr F + echo '2025-12-08T17:53:43+00:00 INFO: waiting for ovn-metrics certs to be mounted' 2025-12-08T17:53:43.201499731+00:00 stdout F 2025-12-08T17:53:43+00:00 INFO: waiting for ovn-metrics certs to be mounted 2025-12-08T17:53:43.201505261+00:00 stderr F + wait-for-certs ovn-metrics /etc/pki/tls/metrics-cert/tls.key /etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.201540662+00:00 stderr F + local detail=ovn-metrics 2025-12-08T17:53:43.201540662+00:00 stderr F + local privkey=/etc/pki/tls/metrics-cert/tls.key 2025-12-08T17:53:43.201540662+00:00 stderr F + local clientcert=/etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.201540662+00:00 stderr F + [[ 3 -ne 3 ]] 2025-12-08T17:53:43.201549422+00:00 stderr F + retries=0 2025-12-08T17:53:43.202112127+00:00 stderr F ++ date +%s 2025-12-08T17:53:43.205632543+00:00 stderr F + TS=1765216423 2025-12-08T17:53:43.205632543+00:00 stderr F + WARN_TS=1765217623 2025-12-08T17:53:43.205632543+00:00 stderr F + HAS_LOGGED_INFO=0 2025-12-08T17:53:43.205653993+00:00 stderr F + [[ ! -f /etc/pki/tls/metrics-cert/tls.key ]] 2025-12-08T17:53:43.205665564+00:00 stderr F + [[ ! -f /etc/pki/tls/metrics-cert/tls.crt ]] 2025-12-08T17:53:43.206113436+00:00 stderr F ++ date -Iseconds 2025-12-08T17:53:43.209120577+00:00 stdout F 2025-12-08T17:53:43+00:00 INFO: ovn-metrics certs mounted, starting kube-rbac-proxy 2025-12-08T17:53:43.209136168+00:00 stderr F + echo '2025-12-08T17:53:43+00:00 INFO: ovn-metrics certs mounted, starting kube-rbac-proxy' 2025-12-08T17:53:43.209179749+00:00 stderr F + exec /usr/bin/kube-rbac-proxy --logtostderr --secure-listen-address=:9105 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 --upstream=http://127.0.0.1:29105/ --tls-private-key-file=/etc/pki/tls/metrics-cert/tls.key --tls-cert-file=/etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.245712942+00:00 stderr F W1208 17:53:43.245537 23857 deprecated.go:66] 2025-12-08T17:53:43.245712942+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:53:43.245712942+00:00 stderr F 2025-12-08T17:53:43.245712942+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:53:43.245712942+00:00 stderr F 2025-12-08T17:53:43.245712942+00:00 stderr F =============================================== 2025-12-08T17:53:43.245712942+00:00 stderr F 2025-12-08T17:53:43.246385981+00:00 stderr F I1208 17:53:43.246332 23857 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:53:43.247815340+00:00 stderr F I1208 17:53:43.247763 23857 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:53:43.248551929+00:00 stderr F I1208 17:53:43.248204 23857 kube-rbac-proxy.go:397] Starting TCP socket on :9105 2025-12-08T17:53:43.248551929+00:00 stderr F I1208 17:53:43.248476 23857 kube-rbac-proxy.go:404] Listening securely on :9105 ././@LongLink0000644000000000000000000000026500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000027200000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000001111015115611514033063 0ustar zuulzuul2025-12-08T17:53:43.090608895+00:00 stderr F ++ K8S_NODE= 2025-12-08T17:53:43.090608895+00:00 stderr F ++ [[ -n '' ]] 2025-12-08T17:53:43.090608895+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:43.090608895+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:43.090608895+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:43.090608895+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:43.090608895+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:43.090780140+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:43.090780140+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:43.090780140+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:43.090780140+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:43.090780140+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:43.092006103+00:00 stderr F + start-rbac-proxy-node ovn-node-metrics 9103 29103 /etc/pki/tls/metrics-cert/tls.key /etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.092006103+00:00 stderr F + local detail=ovn-node-metrics 2025-12-08T17:53:43.092006103+00:00 stderr F + local listen_port=9103 2025-12-08T17:53:43.092006103+00:00 stderr F + local upstream_port=29103 2025-12-08T17:53:43.092006103+00:00 stderr F + local privkey=/etc/pki/tls/metrics-cert/tls.key 2025-12-08T17:53:43.092006103+00:00 stderr F + local clientcert=/etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.092006103+00:00 stderr F + [[ 5 -ne 5 ]] 2025-12-08T17:53:43.092493687+00:00 stderr F ++ date -Iseconds 2025-12-08T17:53:43.094794489+00:00 stdout F 2025-12-08T17:53:43+00:00 INFO: waiting for ovn-node-metrics certs to be mounted 2025-12-08T17:53:43.094838230+00:00 stderr F + echo '2025-12-08T17:53:43+00:00 INFO: waiting for ovn-node-metrics certs to be mounted' 2025-12-08T17:53:43.094838230+00:00 stderr F + wait-for-certs ovn-node-metrics /etc/pki/tls/metrics-cert/tls.key /etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.094838230+00:00 stderr F + local detail=ovn-node-metrics 2025-12-08T17:53:43.094853640+00:00 stderr F + local privkey=/etc/pki/tls/metrics-cert/tls.key 2025-12-08T17:53:43.094853640+00:00 stderr F + local clientcert=/etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.094863010+00:00 stderr F + [[ 3 -ne 3 ]] 2025-12-08T17:53:43.094871741+00:00 stderr F + retries=0 2025-12-08T17:53:43.095543250+00:00 stderr F ++ date +%s 2025-12-08T17:53:43.098168571+00:00 stderr F + TS=1765216423 2025-12-08T17:53:43.098207272+00:00 stderr F + WARN_TS=1765217623 2025-12-08T17:53:43.098215592+00:00 stderr F + HAS_LOGGED_INFO=0 2025-12-08T17:53:43.098237493+00:00 stderr F + [[ ! -f /etc/pki/tls/metrics-cert/tls.key ]] 2025-12-08T17:53:43.098277944+00:00 stderr F + [[ ! -f /etc/pki/tls/metrics-cert/tls.crt ]] 2025-12-08T17:53:43.098980663+00:00 stderr F ++ date -Iseconds 2025-12-08T17:53:43.101050899+00:00 stderr F + echo '2025-12-08T17:53:43+00:00 INFO: ovn-node-metrics certs mounted, starting kube-rbac-proxy' 2025-12-08T17:53:43.101067370+00:00 stdout F 2025-12-08T17:53:43+00:00 INFO: ovn-node-metrics certs mounted, starting kube-rbac-proxy 2025-12-08T17:53:43.101108221+00:00 stderr F + exec /usr/bin/kube-rbac-proxy --logtostderr --secure-listen-address=:9103 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 --upstream=http://127.0.0.1:29103/ --tls-private-key-file=/etc/pki/tls/metrics-cert/tls.key --tls-cert-file=/etc/pki/tls/metrics-cert/tls.crt 2025-12-08T17:53:43.134463678+00:00 stderr F W1208 17:53:43.134315 23838 deprecated.go:66] 2025-12-08T17:53:43.134463678+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:53:43.134463678+00:00 stderr F 2025-12-08T17:53:43.134463678+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:53:43.134463678+00:00 stderr F 2025-12-08T17:53:43.134463678+00:00 stderr F =============================================== 2025-12-08T17:53:43.134463678+00:00 stderr F 2025-12-08T17:53:43.134959961+00:00 stderr F I1208 17:53:43.134927 23838 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:53:43.137794798+00:00 stderr F I1208 17:53:43.137742 23838 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:53:43.138286282+00:00 stderr F I1208 17:53:43.138202 23838 kube-rbac-proxy.go:397] Starting TCP socket on :9103 2025-12-08T17:53:43.138635901+00:00 stderr F I1208 17:53:43.138609 23838 kube-rbac-proxy.go:404] Listening securely on :9103 ././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000026500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000004751515115611514033105 0ustar zuulzuul2025-12-08T17:53:42.972592736+00:00 stderr F ++ K8S_NODE= 2025-12-08T17:53:42.972592736+00:00 stderr F ++ [[ -n '' ]] 2025-12-08T17:53:42.972592736+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:42.972592736+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:42.972592736+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.972703799+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:42.972703799+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:42.972703799+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:42.972703799+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:42.972703799+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:42.972703799+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:42.972703799+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:42.973736137+00:00 stderr F + start-audit-log-rotation 2025-12-08T17:53:42.973753538+00:00 stderr F + MAXFILESIZE=50000000 2025-12-08T17:53:42.973753538+00:00 stderr F + MAXLOGFILES=5 2025-12-08T17:53:42.974169919+00:00 stderr F ++ dirname /var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.976222775+00:00 stderr F + LOGDIR=/var/log/ovn 2025-12-08T17:53:42.976222775+00:00 stderr F + local retries=0 2025-12-08T17:53:42.976243095+00:00 stderr F + [[ 30 -gt 0 ]] 2025-12-08T17:53:42.976243095+00:00 stderr F + (( retries += 1 )) 2025-12-08T17:53:42.976668227+00:00 stderr F ++ cat /var/run/ovn/ovn-controller.pid 2025-12-08T17:53:42.978843106+00:00 stderr F + CONTROLLERPID=23808 2025-12-08T17:53:42.978843106+00:00 stderr F + [[ -n 23808 ]] 2025-12-08T17:53:42.978843106+00:00 stderr F + break 2025-12-08T17:53:42.978865807+00:00 stderr F + [[ -z 23808 ]] 2025-12-08T17:53:42.979123904+00:00 stderr F + true 2025-12-08T17:53:42.979123904+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:53:42.979168085+00:00 stderr F + tail -F /var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.979903955+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.980121902+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:53:42.980345918+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:53:42.982582788+00:00 stderr F + file_size=0 2025-12-08T17:53:42.982609308+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:53:42.983559715+00:00 stderr F ++ wc -l 2025-12-08T17:53:42.983631927+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.986581697+00:00 stderr F + num_files=1 2025-12-08T17:53:42.986601017+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:53:42.986601017+00:00 stderr F + sleep 30 2025-12-08T17:54:12.989457496+00:00 stderr F + true 2025-12-08T17:54:12.989457496+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:54:12.991658006+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:54:12.991946374+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:54:12.992644312+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:54:12.997209797+00:00 stderr F + file_size=0 2025-12-08T17:54:12.997254718+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:54:12.998671576+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:54:12.998671576+00:00 stderr F ++ wc -l 2025-12-08T17:54:13.003636351+00:00 stderr F + num_files=1 2025-12-08T17:54:13.003636351+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:54:13.003636351+00:00 stderr F + sleep 30 2025-12-08T17:54:43.008198580+00:00 stderr F + true 2025-12-08T17:54:43.008198580+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:54:43.010129171+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:54:43.010481321+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:54:43.010738887+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:54:43.015688231+00:00 stderr F + file_size=0 2025-12-08T17:54:43.015688231+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:54:43.017166151+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:54:43.017423288+00:00 stderr F ++ wc -l 2025-12-08T17:54:43.022133554+00:00 stderr F + num_files=1 2025-12-08T17:54:43.022156866+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:54:43.022166576+00:00 stderr F + sleep 30 2025-12-08T17:55:13.025994241+00:00 stderr F + true 2025-12-08T17:55:13.025994241+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:55:13.027153652+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:55:13.027342977+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:55:13.027616034+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:55:13.030282576+00:00 stderr F + file_size=0 2025-12-08T17:55:13.030282576+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:55:13.030959935+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:55:13.031147680+00:00 stderr F ++ wc -l 2025-12-08T17:55:13.033720049+00:00 stderr F + num_files=1 2025-12-08T17:55:13.033765960+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:55:13.033765960+00:00 stderr F + sleep 30 2025-12-08T17:55:43.036577679+00:00 stderr F + true 2025-12-08T17:55:43.036627250+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:55:43.037946726+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:55:43.038029328+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:55:43.038187192+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:55:43.044354221+00:00 stderr F + file_size=0 2025-12-08T17:55:43.044354221+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:55:43.046546672+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:55:43.046546672+00:00 stderr F ++ wc -l 2025-12-08T17:55:43.053969006+00:00 stderr F + num_files=1 2025-12-08T17:55:43.053995216+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:55:43.053995216+00:00 stderr F + sleep 30 2025-12-08T17:56:13.060084207+00:00 stderr F + true 2025-12-08T17:56:13.060084207+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:56:13.062291807+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:56:13.062291807+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:56:13.062291807+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:56:13.067432909+00:00 stderr F + file_size=0 2025-12-08T17:56:13.067432909+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:56:13.068756124+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:56:13.068967130+00:00 stderr F ++ wc -l 2025-12-08T17:56:13.072950860+00:00 stderr F + num_files=1 2025-12-08T17:56:13.072950860+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:56:13.072980451+00:00 stderr F + sleep 30 2025-12-08T17:56:43.076270427+00:00 stderr F + true 2025-12-08T17:56:43.076270427+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:56:43.078170916+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:56:43.078170916+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:56:43.078170916+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:56:43.082311594+00:00 stderr F + file_size=0 2025-12-08T17:56:43.082311594+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:56:43.082311594+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:56:43.082311594+00:00 stderr F ++ wc -l 2025-12-08T17:56:43.084762078+00:00 stderr F + num_files=1 2025-12-08T17:56:43.084784399+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:56:43.084784399+00:00 stderr F + sleep 30 2025-12-08T17:57:13.091572745+00:00 stderr F + true 2025-12-08T17:57:13.091572745+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:57:13.092615053+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:57:13.092769237+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:57:13.092883090+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:57:13.095322554+00:00 stderr F + file_size=0 2025-12-08T17:57:13.095338874+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:57:13.096033412+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:57:13.096249968+00:00 stderr F ++ wc -l 2025-12-08T17:57:13.098830955+00:00 stderr F + num_files=1 2025-12-08T17:57:13.098857826+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:57:13.098857826+00:00 stderr F + sleep 30 2025-12-08T17:57:43.103023592+00:00 stderr F + true 2025-12-08T17:57:43.103050733+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:57:43.104160452+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:57:43.104303666+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:57:43.104489940+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:57:43.107917769+00:00 stderr F + file_size=0 2025-12-08T17:57:43.107956660+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:57:43.109100789+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:57:43.109336526+00:00 stderr F ++ wc -l 2025-12-08T17:57:43.112119698+00:00 stderr F + num_files=1 2025-12-08T17:57:43.112132108+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:57:43.112139038+00:00 stderr F + sleep 30 2025-12-08T17:58:13.115706480+00:00 stderr F + true 2025-12-08T17:58:13.115746421+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:58:13.117262480+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:58:13.117326512+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:58:13.117503356+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:58:13.120456404+00:00 stderr F + file_size=0 2025-12-08T17:58:13.120517365+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:58:13.121424988+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:58:13.121483110+00:00 stderr F ++ wc -l 2025-12-08T17:58:13.124730694+00:00 stderr F + num_files=1 2025-12-08T17:58:13.124796195+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:58:13.124859877+00:00 stderr F + sleep 30 2025-12-08T17:58:43.128222262+00:00 stderr F + true 2025-12-08T17:58:43.128222262+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:58:43.129214319+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:58:43.129672961+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:58:43.129849665+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:58:43.134491828+00:00 stderr F + file_size=0 2025-12-08T17:58:43.134575290+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:58:43.135535135+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:58:43.135593137+00:00 stderr F ++ wc -l 2025-12-08T17:58:43.139709365+00:00 stderr F + num_files=1 2025-12-08T17:58:43.139799027+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:58:43.139838978+00:00 stderr F + sleep 30 2025-12-08T17:59:13.143891992+00:00 stderr F + true 2025-12-08T17:59:13.143891992+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:59:13.144495098+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:59:13.144831277+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:59:13.144865718+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:59:13.147869607+00:00 stderr F + file_size=0 2025-12-08T17:59:13.147980770+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:59:13.149226353+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:59:13.149273294+00:00 stderr F ++ wc -l 2025-12-08T17:59:13.152011917+00:00 stderr F + num_files=1 2025-12-08T17:59:13.152290344+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:59:13.152359866+00:00 stderr F + sleep 30 2025-12-08T17:59:43.155267311+00:00 stderr F + true 2025-12-08T17:59:43.155267311+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T17:59:43.155938438+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T17:59:43.155971789+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T17:59:43.156226936+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T17:59:43.159578714+00:00 stderr F + file_size=0 2025-12-08T17:59:43.159721558+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T17:59:43.161330360+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T17:59:43.161373381+00:00 stderr F ++ wc -l 2025-12-08T17:59:43.165747677+00:00 stderr F + num_files=1 2025-12-08T17:59:43.165860039+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T17:59:43.165972842+00:00 stderr F + sleep 30 2025-12-08T18:00:13.170062145+00:00 stderr F + true 2025-12-08T18:00:13.170174858+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:00:13.171895253+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:00:13.172497139+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:00:13.173030313+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:00:13.177836619+00:00 stderr F + file_size=0 2025-12-08T18:00:13.177941112+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:00:13.179132023+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:00:13.179294718+00:00 stderr F ++ wc -l 2025-12-08T18:00:13.182326538+00:00 stderr F + num_files=1 2025-12-08T18:00:13.182413240+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:00:13.182452561+00:00 stderr F + sleep 30 2025-12-08T18:00:43.184971906+00:00 stderr F + true 2025-12-08T18:00:43.185089669+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:00:43.186296340+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:00:43.186668500+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:00:43.186786433+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:00:43.190320386+00:00 stderr F + file_size=0 2025-12-08T18:00:43.190374467+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:00:43.191235851+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:00:43.191368864+00:00 stderr F ++ wc -l 2025-12-08T18:00:43.194186098+00:00 stderr F + num_files=1 2025-12-08T18:00:43.194239040+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:00:43.194264200+00:00 stderr F + sleep 30 2025-12-08T18:01:13.196678154+00:00 stderr F + true 2025-12-08T18:01:13.196839058+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:01:13.198476731+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:01:13.198863343+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:01:13.199345465+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:01:13.204063410+00:00 stderr F + file_size=0 2025-12-08T18:01:13.204259155+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:01:13.205610202+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:01:13.205812687+00:00 stderr F ++ wc -l 2025-12-08T18:01:13.210439211+00:00 stderr F + num_files=1 2025-12-08T18:01:13.210551254+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:01:13.210606865+00:00 stderr F + sleep 30 2025-12-08T18:01:43.214543287+00:00 stderr F + true 2025-12-08T18:01:43.214747032+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:01:43.216451618+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:01:43.216822647+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:01:43.217131156+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:01:43.222151249+00:00 stderr F + file_size=0 2025-12-08T18:01:43.222266533+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:01:43.223730791+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:01:43.224107602+00:00 stderr F ++ wc -l 2025-12-08T18:01:43.229397892+00:00 stderr F + num_files=1 2025-12-08T18:01:43.229397892+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:01:43.229397892+00:00 stderr F + sleep 30 2025-12-08T18:02:13.233547128+00:00 stderr F + true 2025-12-08T18:02:13.233703542+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:02:13.235023287+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:02:13.235297855+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:02:13.235442909+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:02:13.240037461+00:00 stderr F + file_size=0 2025-12-08T18:02:13.240204185+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:02:13.241834919+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:02:13.241834919+00:00 stderr F ++ wc -l 2025-12-08T18:02:13.246868503+00:00 stderr F + num_files=1 2025-12-08T18:02:13.247048598+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:02:13.247104639+00:00 stderr F + sleep 30 2025-12-08T18:02:43.251390483+00:00 stderr F + true 2025-12-08T18:02:43.251390483+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:02:43.252221916+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:02:43.252560565+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:02:43.252816581+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:02:43.256222482+00:00 stderr F + file_size=0 2025-12-08T18:02:43.256292214+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:02:43.257286670+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:02:43.257533756+00:00 stderr F ++ wc -l 2025-12-08T18:02:43.259667513+00:00 stderr F + num_files=1 2025-12-08T18:02:43.259719505+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:02:43.259743465+00:00 stderr F + sleep 30 2025-12-08T18:03:13.262602240+00:00 stderr F + true 2025-12-08T18:03:13.263079312+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:03:13.264824109+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:03:13.264984304+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:03:13.265373814+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:03:13.270806010+00:00 stderr F + file_size=0 2025-12-08T18:03:13.271000845+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:03:13.273015509+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:03:13.273015509+00:00 stderr F ++ wc -l 2025-12-08T18:03:13.276070171+00:00 stderr F + num_files=1 2025-12-08T18:03:13.276117962+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:03:13.276127102+00:00 stderr F + sleep 30 2025-12-08T18:03:43.278714170+00:00 stderr F + true 2025-12-08T18:03:43.278772431+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:03:43.283387616+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:03:43.283387616+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:03:43.283387616+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:03:43.286748725+00:00 stderr F + file_size=0 2025-12-08T18:03:43.286748725+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:03:43.287619348+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:03:43.287619348+00:00 stderr F ++ wc -l 2025-12-08T18:03:43.291083681+00:00 stderr F + num_files=1 2025-12-08T18:03:43.291083681+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:03:43.291083681+00:00 stderr F + sleep 30 2025-12-08T18:04:13.294366139+00:00 stderr F + true 2025-12-08T18:04:13.294366139+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:04:13.296405372+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:04:13.296405372+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:04:13.298105737+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:04:13.301112237+00:00 stderr F + file_size=0 2025-12-08T18:04:13.301243351+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:04:13.302558995+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:04:13.302976017+00:00 stderr F ++ wc -l 2025-12-08T18:04:13.306127790+00:00 stderr F + num_files=1 2025-12-08T18:04:13.306228603+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:04:13.306268644+00:00 stderr F + sleep 30 2025-12-08T18:04:43.309984049+00:00 stderr F + true 2025-12-08T18:04:43.309984049+00:00 stderr F + '[' -f /var/log/ovn/acl-audit-log.log ']' 2025-12-08T18:04:43.310525503+00:00 stderr F ++ du -b /var/log/ovn/acl-audit-log.log 2025-12-08T18:04:43.310873412+00:00 stderr F ++ tr -s '\t' ' ' 2025-12-08T18:04:43.311033958+00:00 stderr F ++ cut '-d ' -f1 2025-12-08T18:04:43.315418543+00:00 stderr F + file_size=0 2025-12-08T18:04:43.315445784+00:00 stderr F + '[' 0 -gt 50000000 ']' 2025-12-08T18:04:43.316352268+00:00 stderr F ++ ls -1 /var/log/ovn/acl-audit-log.log 2025-12-08T18:04:43.316551453+00:00 stderr F ++ wc -l 2025-12-08T18:04:43.319146282+00:00 stderr F + num_files=1 2025-12-08T18:04:43.319238794+00:00 stderr F + '[' 1 -gt 5 ']' 2025-12-08T18:04:43.319272005+00:00 stderr F + sleep 30 ././@LongLink0000644000000000000000000000026300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611546033074 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller/0.log.gzhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000060006415115611514033077 0ustar zuulzuul‹L7i0.logì5yBê4º© !ŠÓlæ“ÀŸ!mÔo¼´ž."”$þH¢¿ýƒÇQ–ÆaHÓjðë1ÏEÆÓ5 xà(¼q(y ?È$h©ÁÏÁnØmgCÄâñç ]ôÕ}P6è<\ö#T)šh8 ûˆÚ"ÌãigaLè9${Š}v4û¬’}v<û¬Š}¶ƒ}Å4Õ’·=˼4“–+Š}Jt¢:ЬñßF•2 CÐJy¾?œzi2ôÓ8FâÛãàÒÀŸÒÆ“tSS­ Â8Ž?´!‘µŠ•Ú "Iƒ8 ²»†î8LÅ¥ÒJll-Ø„*¡Ö<)YS7€VBÌ$N NFÞœ’ "ù¾øÉ_„ÔN~8h˜F òcK@eeUÊôGSšx°KI\’C¤5ÁÏ%?·õ 7ñŠÉ†¡*²b=¹àS¼€šN¼1}‚ÞxRȲ  ¤Ì‹LrT9ѰU½ÂÔã ¶*­-tÓT^eÞv0ùUQû_QŽô˜8þ 㤡¼òÆè»Xm¾œÛ’-ÇQõmrF)õ>:iÒ(ÀH+¹;t.D׳˜eušë1“RRÑ*(þ/3ìNó²Õ8íQŸ¼ó2ÒÂuQ1J.‚hñ…œÅ)½êŸ öC«×o_u§0¬†“YU$™|ß……ú¡ž-«²S¯}ÞHg44{ífgУrºf˜ÆÓÑ­B¥ÉÙ²ašš\ÆN·Õé¿k¿Y E~*Á˜ÀhIž†[ÂÜû5N•3-UÑKfŽÁS@àvß>8è¦bº£ƒ ÙeôÓñ,&Õëè8ša¥U‘E†e!…H>©•Ïv å]Š_çЩ,^¤cêi£¾`i= Fô ×!© j)»5Oˆð(‚ðÁ Ž^n+…ÅËYŸm&aÀCÊzw¤¾Œ¾€†r€ú¶®ifÉ:ú^FÉéOßÍ¿óÉwïÜï.Ýïúµï:§€,Ýha¯ˆAÞ¶¢Ê6)Ž“M[s,"?`ºb-ÊRoü™ÄÄ⽡¤2ÐFD6$ÿÛþZhìé3žŠ1Hxð͈”‘Ô»%Rã˜ÞÕõ ÝyK¤„,|X3ÉGàÄ”m°‰_IçjÐkž½/Ç¥€?1dYÙ àé^ŽÃ£Éºª”¸vÀc¾8SâCdbï‡ì\隣éþ<[› ÛÖ-»h„åÚ¬£ÛvT”‹WÔ‚hJŠEµÃ G4«É¿†Hßz©OD©âȲZ×VØÁ.ROF3M!ÐN¨—-à„r¸’¢$v*IâQ£¤ idé‚’Çí“ ¥·^–÷þ3f匦7Á˜ò΃ ¢ æ¬×5|$»VUÑuŒf¾³`H¿@¨yáp xëÝmˆiÅ+-ÇJùXÎÞ¡h—xV'ŠrO­® “ÛaVF0¶ ÷êC‡ŸV /ñÈj²ÃC!œþrJp–üd!a€œ~:=ލ  6ó Sx6ñá(ËU[U•ªcÞ*ak{ݸ®Xñ>Ü~¿ˆÒÍnãô3¯³õ¡ Ä)âéiU ïÏ©Ï:½]ô§;ô"ò:»cõqè1V‡•¯sÕQŽÑXÜ3†óé<.Õȧ7ÿ§ôG8è¥þ Çæ'\ ‚ïSà>fÓ‹ô8^¸cûw€âLŠôqö1þÈ>f/ËÞ£}'s°ä ®Ê;Šñ+ˆq…'*‡Š.…X“®5ÎfìvŒ¸½=wÕ’”BÀ5 ¦‹4?kõ|ƒ=úM·\¥x„±§7  »{¶¶¿e› šÄa0þ6óüy½4aßœØÍåÊ|ùЕ‹¡‡¢ó#ÆOk‡`qFüM,õ:a…´òøøämD :".øL¥Û²tºQ‘L›Xv±ËNo·À™5tS5ÈYkk–+ájŽ¬Ø²Z‘é—ªNùMK‘±b[ºŒB?UÏî´?_õÞ‹¼¬}Þê Úƒ¿[æë‹VãRö¶Ñ#I:= šø'I£8ÎX–z‰„齨lò {Á¨Žm!DÄë¾ã°Œa_•xÑ—Ÿ[‹{Z«* ïfϼHù¶ßPõÙá`N]¼u£ÁDZÅôv'Yã÷ÕON„ù-¡çû炦$?Ï#E1šjè5¹&×ëëU(!ÜèÒzŒ$Æ<¢áôÈuy1‘L|Óqݺ¢¨ÏU•Ýâ0KıƬ€w‰X ók»´Å|+äàY•8¸+Hž,·.{ùÕTñ,¼²$.ìæ—`—3¡cùÈ÷TŽ'btždwR8’è æ9ÐÆS~C•è^ˆºH^JÒxD°WÃ?rXå zò»¦xØæKyÞBÕB˪)®¸X+HÚc¨Q€˜‡Ž Ð7ÙnÎE²Ü°NΫzÞËÇâŠ8ø’Xäe"YMàizËHy KžÌ8É“-Ù®CÓoq´\Ƽ— Oâèà]yX›)@0ƒ/Q̃Œ¨ò÷ž»pÙ–Jò˜do‡B~Çrô±‚£hŽ-;æÖ=³G‡I0Î6ˆ'T]6"¤^›Æ®ªÖ'ÒõRFý¼™ ½’]öZAŽ.«šeê»Éq´ rìGä¸äþœN<Œ{9¸v…Lz•£À°A&gËãá€Òº¦Žá]̲KXßÍ>3Êšqx­ ¶ø¸¯G=Æè|Þ=îiEc/Ü%ÔÍ¥yC»ë’ÖdBÑSÑu ~@ð.?m¯ï6§ ìSn«5—ÛT¤Šúú¼Ä·ÔÏ—¸/¬‚¹¾xæ>"¸”½ÂWhØ,˜€ýG¬?R>ˆTƒùåÓ¹Nʽzí’³N[|¸Fàtý*¾„8ÑQñ^| ˆ\àäê G_z_úÁo|Í M¯A/ sB[sŠ‹Ø<»È‰èA ¡«òÃr9q Íp^:EA¢€úÚÝ7í¿­ÍÑ÷=oŒGið÷¾ïÍ“£5ñ•oru]ê™·‘¹@.Hν˜œ©ËÝ4ÞfÁÌØ'Ýpü¹%—ZÐäÞ¯–ñ8}vï…’4qCêˆý¨Ë·#W”7Dw‹;K0Ÿ¼1‹ϰ¤—Ý âÌ WZ¿1íMî™K þOÜ/iÍÕæ2t ï¨f³³æ& {¾•O½D—s°3oë6ba¢»[,Ÿ¢/‹§@ÐÑŠµßþ"öùõ/쵌ÁèÞèköóm˜­|z+¶ ]š²l>ÊÚÝMrÎ;ýŽ7§½¼ÔµI‹îB¼€ö >C®ŠµÚ2a®×m%”]{ù’Ï%¬•Ë0ŠÊÈZéÜû×ˈäý*èWP—Ň<¨«žƒëÜ×Äø÷ÜË<4«f·ì‡«[´I€R B¦`ÄÛ°éÄŸiäŠ?¹Šç‰·ôQgíóž0õõóÊÉêèd5²ÑØÁ Î8i(k–@\ë®#7Áç8¶…1õŨuSОnì㦄^'ƽOxoTut+[Mÿá‹MR,Df<×_¤ç"‚íQˆ°@Uü³l¢Á }~óœï 5……ýö‚ìÜ\™GØ2C5Ì\j—"D.Ë{pc¯ìÌ{rõÅÐ<'6—nG°É<áöpà,¨We%-èÃF½œµaRšPäB»ºipóžÞ¹…&¡‰b’P‚•ân$@tøá›¨ƒ¯çº÷+šV– žÿ¥ÍçqÄy&}Ø ðo+¤ü}ô·…ð·}Gñ"òsä˜7 eáèúà½~/tKGuÏ}€ÈÛ˼Ï]v7Çcð‡˜¯ø…Náêßþ\hê@þ.N\òá¢ÙiŸv\\Áb(•ky¿Ó\æYÚÛŸ—+ûAÿkDÂŒ0V¨™yb’f¡kâ;6$¦3.W)Ér^ž¿à_pLݳÌZTƒ¬ûÐ5ßÐóxwÙ¶O`û,èÑ—téT¦n sÕ|nkнÀU¨„¡å0J X0D?_ÌçwùrTÂÙ™•,/p›ó;/H“ ªhÙ<1Z@5–JÐ…øf‘ð0!×¾ç±1(6ì °—¡‚æîGL}³:KXînÎvâkŒÑVQE‚V‘z!F =/‚Ðò×à¿kB<•˾sµÞÌ4keF=ÈQnÏ©çP ^‹=Yz»zû®j>,CúËé‹C~w‡/&^‰Ðp.Íy;Ù(I.>ü ’aºe;ÜÍà^Í…*lŸŸžw1ÁûÜçW°í³K/qç^0.§ó Atøwõ¼žÄÑZ^äMÁ= D…®Ï tÛ¶íµm—#·¸‘?€ýCŽz(ùx[3ÈË”'+V$Qs\ õ'^﹜¬m4`”«(5CuTG·`s_Oy¶p,eç’j §¨¼l­ÅÑ+i(Rš¯ÊLÑçªR3 t^ÓY%[–£ªÎšj™›š:\R(\ 2Ô_^¥Ô”òSøž˜5kYÈ$~<ÃËõc,ŽTÑkËŠæ(»k²–ã¬Ý_^üGJ ôrEèòT@H‘xYBœ<:°(¡Ç€n`ÜTÁˆŸ¢GŒ“õµ[À€ è9òBEÀ"åðFlodÁH¬’u£ö½;¢蛑p1uøßÛùõçÚ—(/ÇáÅ÷äÉâÛÉ+R¨·ñ^UãŸV5·W„‡˜)ˆ`âÏüxÚ‘ú+ÂÌÂÂýþǘՀ¾‡ìá‡Wd¾pÙ]4®]B¸öå~(¢ø,Nîîw3¬ª ,Fçž+‘pH†g3÷ûñÌ‹È(ŽÃ%hþòÆ%ô Wјn#ð±0êeñ<×®ƒ(ÓT“?¯GÝpd€ëyÄ–… ¹¢8†¡Y»HöQ|w`•´ý©*ÿJª¢©º¾}ÿj[UTKûÓ§üÿUͲ-]Þþb¤ÇŠãÔ—Ü ŠÍÐUSv{=†¬8ÖŠ¼ž‘µFf¿á.-?ÛÈóp,«’tEü]½ˆd3ˆ~1ý¨‘bÉ{pánqY@¬ U‘aS×ݤs$UX˜fY2ÉùëȃÁ»h3,ÅÚiÿüË›V´ñWP _ÂW7C-,s6£|ðòw!"#ù$°ULöIùi·b¦¢ï¦Q)Èoâ!ù¼ãª‡!ðЦ[üš¡ewFÍ(û²„mŒZA*)`µ$æràçñë(uÕwB²»2„o”ÚªT}ýh–¢ÀÒ8‘ÙÉzDã6Ãódw׃‹î4eõ—àT²i¼½;º}<;yíÛaƔ˙Ñe ‹?Ý!Ø× x¨\—$ß2k/òç_X$Þ4ñvo*ÊñŠîí«+‘i醹#†ñœUÈYù#×Ôgx&pÉ_ lUÕöáÄ|†SjE~CÐÔ×dÚŽaÛ{°d?Ï5åËó‚”;¶cæW§|k9^ŽUã_¶“USŒgXFñŽÆ×Ö' ÑQlk–õYúô˜©—cÁ‘uEÛcǰ[¯`A“±º‹…:F’8Yà­ÿ«šƒ º„ê´‹j¤êe»ApéwFo|3~ÿ%_$"ÒÐ0'Ìö˜ìB`MØCY¼ç ©·¤ô‰ ˆµA–û0e*eL©Î'²I??– äÞDõ ô†ajðk'zÃPì£5á«)²¥šJîÞ®,Õ°¬cÉÿzÞÑ2,øäÕÏNò¿FÈi+†®k»ãe[ѽk™ü>ËŸµÌ±•_©Ú;ë0NVö0_%+tÀm˜Ž¶3xq,¥p4v é/œ‹(²ó¿Ü]én#9’~Á¶kɼü£O`Þê^̱ ¬ Fž.¡dI#ÉuÌ{Í ì“MDrÚ“‘JËco¨²M‹?ƒÁ`Ï1ML£ãÝ$}(®¥„jãZyŠçË0×ËOç,˜KR½¬½z޼5L8Ò#<Íÿ/å9> dy5F Þ¤$ ÙHEå¦Ýñ Øox¥f¤”,jpB;~tì›$F‚c'U4½ÚYÑ¿ó>þÑîT.= íp\?kÀAÌã3Ò`Î-æ–þZ·NƒÇü[ŽõDá¼IxÅòŽ{}»‹†wl“Ãy·Š±`2pRß´‡o}L>“³l“Ué¹£auÒ°„|XmYgÊ>Éæ<ë`r°ñ^‰Hü¦A“·úó /UâªX qýþ×÷‡!XPCFˆ¨kñÆÚB›Ûvj"Tbä?L~ÚIEKÛ¦[¡®ÆU ûeSŒ6 åM–Õ÷2„oùµ”m?gɱ‡µ}´ñÁù Áð\é¸iYw‡H — +?äuÀ{*þEeºLòîs$ïø¹ÓæÃ¤zLÄ{™Ã—Û FeœcqŒ¾“ï]?C|ÝÒPnìQá´¬ÞZŠÎ:Ð.Ï(øÏF#V¢±½ßá§Æ|N ÚhEªYhúã}ñ¶PÝËâ\0aà°5ÙøÑ;´ëTÏkß|ˆV(Ø"{ZÐäûŠïÊp],ÚãAº(H¤”ííGÜH MðÚd—TÅÎ9›™ð]Ž!cðÏ"ÆóÜ"ÑÑ[Ðù#¦¹H‚ú®¶Š<„‡ãË)<àŸïñ4;Äõa)âñm¥öY§¬Q)âg¼Ð®³¼l2^˜€ãŠŠ©RXÇ·vgC £´Šo+`‰ø^Ñ€CëÓ^Ñ.¬²š%v[èy®è tÊ+:ðñàvÄoÖ! ûWt,Þh$@NžúŠŽ…óRÇÍDîlÿ ~Tï¸1¹"Ö!÷RøøEh×IŽ?Ð;(½î¿J³º´\-âRicÇT§ËI‡¾Õœ‚±C|™œt®ø®QvìÔɶNK nUÔûƒv4ðȦè9^þ/’ªT^{òØ>"1ùå§ÿü£~—}¤±–ªcÒ¼›Æ‰uÍëB È™U|½ùÒtVyÃŽO¨°; j¥‰'ÖB»N&t?!гÎÒBË-…*ÔÖÓl6 @1ÂhP©Q(Ft*È-[…V©´¤CÏÛ|Àòˆhÿ+!KÊ8gH8€žíjQLá;›¤ú_Фœßœ O›|çÀ´5Û‡¢=ØÌçÉÍrµÝͳí ÀÚ”4Ñ‘i–ÃŒ€(`º^hߊilOÁ [,îrªâE'€#; g8»$ût#ÞI ÁJÈ ì †N¿ÞY©èÓæ\a;Œ'QФz'ŸüHnŠÍôv~3táPau¼PŠJ9b [ɉat‚î7äi¯¦R`/ăÓtmûy»†}ß.7$ØYbEF·ÆÊÏ1XµÁ¤™žÙ>ƒÉ8e= X·˜q?·û—=ÉäX<ÁˆgU@;Ç_äž‹³`Wûx*8´3š6]}´4S%ÔÌŠž©Âbâœ`X[éi¢ ª™3XS›oôéº]Ü}ÇÊ’œFNÜ¢lÌÿ™6B*q¤Èƒ£ÏGsøª ‰¢Êù£`Ý3Â¥6h§ ™¬ƒFoM‘˜1‰Ï)rÒ3OÏv®¢zãú»è- '”5ñU…’ÓÇEoß5 Òê™óᵄó¢5Š#Z5ü¼qz«%‹' B;îhrBr ¶fŠIåÂëÊ[«½tÄ]´5À¶øs1Ü‹òŒyÏLT A;GÔÐýÎ\K”µÒù Q {¤$$ÄB;A3oÌåªD‹Î‚PqÃ3å‰ šäÕÕ©·ÂJ¦Ði¢ŽÆÌ :7΂DzwŠ(7=.UˈàÀô0BÆDt:[¡©·Šá‹‹sËx|‡‡vÚ Ûá#npK›=Û<¾ôÆm\Y>O¡ù-ú_uéDº%ä¹VVsOš ´cþyc%¢–õæ¶5¡ÜöaªºñR‡K““BÇ#ÐŽ³¡ê(L“²Ns¦‰ J£pw»d9ÿ:Ì/ô0SÚ©¨yè…ôêÔw„Œä’À(N]ö±8K=[fÆÀäsA£u2A‹ÜVzÍ£Å*ÉðYñ]JîU\~hmWCA-sZ)í]˜9΀§àT‚Ð<Òòó"ãñw<hãÆŒ3óÒ¦ ¶¼¬‡&¥!ÖÌ7»ÊoS Ca¬ S”«i²3ὑa­IGè-õ…L✂U¢È&$–×’b,aRÇg’ã;.àWÔc‹Æ&?Jái0 ¥‰v,BÔ2Ä€¶×èÀ,M ím:7`¨øA´ AxÌ =4Ôê LyWáMN9n·M¡f¹ÝûëÍjiZ€ eâ’­²ÉQÂ$-gÒ)gU3-,§¬=-ˆÖîíÝbwG×IïBÿ†ùa&S<2ÒZºNáÑaŠŒ1°¿ jE«än÷ñˆ%h˜rŽÇ&ˆ~@opdOÑÎ…éÁ·àÁ?' ê=±ú¸ÚîÖ ð‚Kl޶Ûf$|ºëG#Õ0:i)Á[‡Ú†)‘°¨BU!ˆñˆÍ§b·^ÀO霠©Á Œ³Ã¤%ÙKlû:l3+Wy¶DòÓr>pÇ7p§;A;MT?±HÈ^„”ÅäãG*žú 혉2 ,OE²¥ WÃEŸüÃv|¨ZŽFAö .dx‘Yi¤$$Vj= •<þünqÄ „NjW„V3ñżÅûFá¶Agã‰VìÑÜï‚ÙðŽa­´Š‚Ë {ÄÌ‘' Ô“u2¾ÆóÄp =*ÒRe4(à°yæ¸T–à/ÂPHq:7škCáF9EŽ` }=«4¼m8ø)!û‡ †9CÃ×¼s˜‹²sT¿7î±Íj'zbV !CÚ龜”&­—L*¿ªç1ÊH>Rìix8ŒOc$1Ùcˆa^S‹ì#K>&ëjì…¼•žÀ½7‚S¢@0yÄÉ=‘!Á”Ä2ÿµo†½ÌpŒÎQ!a;š¶»,ªTàã¥cN°8 i#Ä÷ØqÍÃL).…)ň:\âb¹›gÉŽžO Ÿoa·A≓ˆ_ܯ./]X„8ó’E÷Mlg¨ë>-h‘I§„·adœ9­™ó”ThòYãâsÄ•ïÝh‚ý<ƒÎzØ ‚ÒŒš³ºË§à¢ïÐZ´[@/N›øf…í¨ñ‹ÞˆÁ½auÐÐÂÞŒå$TD Õ!™!À:‰&u`»ó$vìñXG"È“”AJ'Å£€‰Ð=I‰LìXƒ¶¥ä2𢨏ØoTŽ+Â3V ‹–^ÅvV ¸2Õ¸0tiu¤­‰ãŽé!ÇÝ„pE{ .ç St„rØå¡éb^Ù·lQ <ð¦cRŒzè ìMp7â£ÉR`Œ¹¦1ÜxT’¡V#›DY˜#ØC• o±d$’±[l›²Ð,Än]*EXP&zÛ ;Ðó ,Cæ…f&Ìše\8k†˜š‡fSK™" ÊEãØ®?1BØË‘¢Ç#£ã0îÈì›áKLc¹.—-•£¦ ôX%˜Ü“­Ã$I/㥱Sƒn¯5I§tb¼sBƵ¢öFsÜÝ£® R˜!20Çc§)뻌&2' k`«ˆK0ØMDe *ìóÚ gBà§rAÀå©IÌ…ßÛ!Ö†K f• žB;é)'8ŒDxpqx+·e1b;V8˜ü#dÜ–QBoõ^I÷ä‡Àófü¥Þ“Ò÷òuÄ“JÚxÈÈ1Îé¢g7TRp6V¤78RÚM*onž£ŠØ}#OžãN WÝŽÛa9y=Ç—Nð°´Ã­·*HP#GwYF—2qé”sCnD†}ÚNÚ‹ëÑÎ-5/ù°«A²Éæ•Aza%‚â…”£r„ \‚{FªqµåµòC´þª &º^'cÆZD'àxâaq¿Ó²x[ë®Ç ¯ŒÛéÐN»ï=tâ°ÿÀÚ°q$ÝÔ§éySBKæÂ|ÁKCAI¼$òÐ"<‚6'”±:¨ûÂé¾CPçîE˃·Œmp.•Чga;.¢aÎG±ÇÛ‘Pa,°þY|C‚vbÐ yÂMU¡ ë0óµWñùã^úg¸ãC,°y˜J!4¸öQcÚÉ1~öW=–i â“MñèæÅ¥ ž.FÃÚ"¶‘G,>Åx«û…kòC§JxÍ àw/õŒöfœ#¨Y°ÒÝØ—¸3Éð‹ð ø?zÛuûŠ Ö²à9˜üRUÙ¬kó¯7«¯ß&‹d±ûø÷IÑ™€ãÄfÕ¿ š°ú •bŠQßQ7Ðø^ÞV,0’s Q’¡)I½ÈHÞ6÷5Àîî1Ï”åFÅÏq1˜¿AÜ&¸ž–b_=µ™üeׯ ,VY²¨§¶~ø¡ó´À䬑ÃeÝA}JíEÜ"‚v§«`_ϳë¦4lÅWæKÀ b¸,²–^ÝmðùV£­‡¨ âêûX‹²Ý÷8ÍFâý‹(d%;U!Ï4p/× ÇªT‹tză ‚™(;ð¼Æn“dŸŠ'µyO‚Q±ç{)¡Ú}ðnÏFƒ4»UaÓGÅ Éüc³Âí1®Wð'*Ãí—9ê0ëÔ6,¨FJ÷¢9_’—­îÆ¡åV˜¼o{o þ{ 1ƒ ¿)òó}ýþýË"§Ài:e â©-û+yo_©äpSüí®Øâƒ+d‰t\(%ãñ.‡öóýÏÐÿÀà`²˜ÿýI|¤šX%6p°cœŒiKhÇ;¿WŸ·õ€ã—¯Eöß¿›\Üm7é|y?ž~Þf»Åd:ÝÍo ˜éK®á‹åj ›t¥î§SÍä2Ň=¦S\}Éî2Û~†/²Õâîv¹­6£Éb¾ÝÕÊ· ys’§±þZt° ©§ƒÝAÀ/Üw“3[h•de©³2•¥¿ZJ—k–Гɦ¯–DÍ[àÖg‰3À¬SI™ÚÌ%¾à2/;ðjP BÀ „e®l่ìã«e©%ã0^¬WÔ\-U‘+¼Ò–y.83‰ª(ç·ä9ôW”ÂZŒÛ”9cNÒL&¢€¿Ñ Z€«™:á‹<•<ñÝÁ<ðùr®`"€&dRWz. ) ÊK FH!-s¥U0-"Õ ŒKe¥( 3YU’tÛéíšAsÎK¶žHñ‡øJåe¸`ê ZǧMA¹ƒ‰gœ•Z`I½¢Dú$eÑ)Ë¿áUZŠ4Ñ0ý¸Ú¹9»¢»·ö+Z=ZÑð˰¢Ã}ÁJ`–ÐWgO~ªåc¸*Qæw·ëi¹X}ÙN*é< 0[ë'güñ×®ca=Ua²«ÂÞÿÏŸþ|ýëo¿ÿ7Öp_|›|÷už_²¯êûwWKØVŸæ| ûFÉ™>¤›Ê-¸ÔÖÍœ•[põÐǺdç“åõ7ôÝöRIîñëôX—Jƒ²2ç“9m×ÉMq)ÎÁ›¯6óÝ·K­Ùù|}>‡_^mv—¿ýþÓ¿/¿\o7Ù%7~&´B÷ì‚[ün¾…];ƒo_À0ÁÅ—/³Ýwà}ÞÎwç5qŽÆö¥Q Åß »ë ~§;ö`¢€t3p+gN΄’{üûQ‰ zÕA¾Lvßá/>ø´ï_Ù ¬˜Á¤°™&é‚wo¼«à=€7𲓳ò;z |ËCzé¸a˜ç'hWw? Žé NÊÎ@dx {ûP=]w©ïÑ ÔA'—0¸ºHù#…ßÕ—Õ(^X’27xªÔø?Ù»Ò·-ý*DÿĉØbíUÄøbb;É5®7¸;™  ©nŽÕ’ ÅN£áÇš˜'›S\$R"Å"))ÝNg±%’b}§–³Ÿª¢TÃÇgâ`6îLãjüÁ|:;þ(;ûG¹€””è)¸ËjÎ.¬°¤˜ájY/ûBí.­Ÿì® ²5ü…Ñ& ¿dz5ß§ Í÷¼yv0ŽŽFnñ?ýØ`1ór;¾4=ß°€wï?¾-.¯‡V>&°ê±€-àGX‹ÚG¸¨04Æëœƒ­Ý¯³ï€mž¿Û]´CwÉ&ݵ×1d†÷­‚¼0Êðy`0»$tîÒнå,¾Áõá´'ÕB0H¤"Æ2(»ÓèX•¨ O*·Ññõ›]mb4÷®Ÿßé¡·²³Èôøª¼oöy9е°z]¶4 ˜jZ³Ø™j%Úäµ³Ö"‹Î€^¨=;zÛ×ùÙÿx÷ï·ƒwÿ¼¼¼ýùã¿>]={Ö;Ùˆ8ÔŒÎ-Õª\ñ;\²Oá‹Qá“2%Vó,ø^*Ý?{ê§ÉiI;å߀FõÓ˜Ó7ÿdòï™Ìz´D&*Ì‘X,~”tÊ¿ êo@gkvûˆIåƒT,1ÚP+‰NjÈhWŠÛrß#Ñ‹#¹¨/‰KiYŽb$ Ä:‰Ýƒ¢èB¸Êå‚J¢]Çf? UÙŒ)îìd–QÌ%ã|C1×`î  š“~•ïd×G[m§`X.†¢•ÃR/ 6t_ $·òÇ{@5X§a¾j©Òìur-r…ΩÂóˆ`œ ¾P®ÂTEo8(E…‹¨ˆfíêÃ`kxÑp9X,½eøü§p±üi9ÿ¬¯¤®–ì‘* c:K(¿ºÎ5CdGÙçáµðI¢:hŽÜaõt)÷Ü!é.Ó1†„Ë!až[i„aA:°Â®`Œú§Í‚ß´‡ ™˜âŸ¡ŽäU™iz;¾ñ]ßNƒAÒAî®îe6›)#Ê!åX󹨠G j€Ó9$нº9ÇRùHqùÉ嬊‚26X¾„®Ö©Þÿ:øåòŸƒW—Ÿ®-+½rññå§«Þ^Ì-–m–x”ãMÛY$d+g+‹™e‘l±¦¹D{°á݇³fúèNö*§¹­ˆvÓu‰Aºnƒ¶r§dï¦ëÒbºîÄ^øå ãÐ t üd|g]‡KëâÅà·ñÔ÷ÆÖ¹5Å]çêR…r¸ØQ‚8µ;¦ëçȾÚšOì³îÏ:7˜;Ö|w,¨ÁXè}VŽA[¹ýy’ð´'íÕW, ëßÏÂÉàËERBtn…鯢ƒ(X¸zçá턳Õ$úÓíñæýù*\ýÿÂþùb:ü\ED cV›t§°ï™YìÙa¨ 'Cof/ïfáóëp~ «‰fÛYŠ=£÷7öõ™zp÷‹7~Ž$iÎîÓþ< â£fÊ~äXÓåM8$Ãèú+]°gkž²&°ä•ÑÂ^—P>_ÎW%Ĥ‡ZØÞx\ñ„ΨŸí¯^´´ýp4‡ö–缬Á0®¶Ç:õÞŽ+ø*^ CaÇ%œºÏô3g%ØÂÛq¬1ÿ>û³¯™ªd‚ ·Ñáy\¼Z>Ù´§Hª›l„¡ÜÖz¥åŠâÊ:{™”(êjª¤\¼C•bl8wFOe¹§¼².’c¥u9ÌãÉ2+óLJ×%¦¥uÐ2ŸBæö…©Bˆj†“`6…ùk-ÆRÁê9$ö¡Ÿûœˆ+Á¼ÀÝÀomÎÕO¿ÃÃÙ%{6^]G“Fè•#… zIº¡¯Þ«°?ô–ÞxzÝiô.=HСÄ•å Ô³®mñÜÙ“í¤da§›~rÔM+Án ±=æ430§áéÝ€©[÷¤WÀé$ʸ>ÐÝ/qöÌÜhó+6õ¬Ä6µ’rSdU·ûnF³QôgwÊ:ŽDñT×~ñk³±P› ¦NGÄ…“çûů3A‘bqĦº)Ø®‚6Þó4´9 ýâ×x·¿F°õq›v2çì@fI,½ág;³GGA+¨Æ‡±¤õÇ1jÌÊ9®*| ÐÂQ'Q}[ìJ¦Ñİ Ý­ò;ƒo£”-Žw¼> ¸¿6À×—ÚÍŠ©‰kHŽšÛÖ©„}øÔ'˜¦à¤Õ0»±Ž7ÁÌØ‘­éV´P ›8å‡1â‚È»žLÐó‹þÚ°‹eN²+cðK\ïµãèˆ6œ×j¥/•E™‹©ù<2¦·ãê™t£Ž4G 7HˆèèqLüôï£`ìªõW±×o3è”K#Ì]Ïyæ‘ `‚ÓÅÐËÓÐ;W¢~—/xŽ‘ƒ ðÜ¥£@æ]nñqÁk—ÎŒÌro™8‚)nYˆCÙÍ„¶9BÅ;ó†¢Ú|½ G°VâÐÎX·=÷œö:a—P䈒»õן%ú@°˜D\t›DQf–Cωn¸©t”0ÀÝUv“E7 `># ìŽÚc Z‘À©Y_3y%óŽf¸CÂw×Hq¼#t׫ºÑˆ PŒ•4 BðƒøÐó$éiâíl{æ&˜&ܳdǶ¬–Þ\ç>6OG8&à9L‚AºXû[ß[é¥Ô‘&Þ}`úal•`úu¢ËÄ­LŠ@U(ÑA½¥ýõ§F`±ÀŽØÓx/¼NQ ‹L(ÀJ™YxúÈ•n‚SŸv`Ôí¤£î¯O†ìë?¡c L;è(;šûe:¾í>çT2ŠMh`☡ôMúäÕIxü‚tŸ!Ýæ²ÂJ!¤]åáÞ˜l+ÂÇÁ&Ð9 “&?%Öwt¾s;$ãõ~^κÊļ¯F¤¬&Ñò®U¼…ƒæŠ° æ®2uÖ$Ù^p-bå£U\‹3àÆÔÀ˺¦Ô{¥S§t ¯“1„v¶-ûÚ¾l¡a3xa‚‘0{nΗÑ( ƒ–sšIǨ_Ù’æÚ³lÐå"¨V¸ÀsXíÉ-ã¹e Ú¢dO¢7HÔjÐß Â*˜ "Ðöãxar:ÊÕõû«w–®±ÖÏXéZ,!˜#vjAÀs¹3LwA |eý:]MKkc°ò†×`¼Î¬™·¼¾¸]ôö<.|è§Oœ§“¥ut¼£º) Ïñ\^çÖ)fºÔ!™9 è“|×$¬}Ū*d’PîðzdÒA‡+p¨F}Ögþ¼µ–ÅAáiÁ#Píy[Tëí¼3<Ãò¨`A¬QÍÙ%ÌÐÉ-ÆärrátV¨Rªhæ<'BÖ¶Œ—´ìäÊz¥WXú÷ž>Ï-åÉo^T·*«=b,AWÚªØmõåÊ˺ Áüô–¡õ‡7¯l+èÍú>˜©²>×¼'šD‹hOŸ»–àòœðÿûßJ ÀæTm2¿~ΩµÎ‰–°ú¥°äèû°òa²Ü¼›.?†^pJ°Ž'ÁkGÞxnC{™TáëŽxýáWï6ߥ¯ÿñȇ †Ÿ7}è6½p•ý&Ò¨_øB¯z?u1ü2ÌS—¡[Äíä#®´>)£ñÇ´á^zù•v¡EþJk™éèÀåj±žCñ·û|n¡£“û¯“(tuaT’«÷zJ@ÇQ²;ǧ«õ—øÖ·Î졞ÑØCµðèÊ%²÷ì6aYá(©dFRÚt8òlÏñ€èУ>ã>«Ó∋nÁ°‹ÄnáÍfnÚLÂ2ž|¾†qMû à6(“7.óS{˜ ©ãùT C)½‘#À(ð0ô¸§„’#20¯| ö‡Äxõò¹7žÝx[­è[º/œ/íEt= Û¿swk²ãgæÿ1v¸R˜®ßîCŸî{y8„ñ²õÖùvþã÷´yJ¦+»2]=A¬´‹zÙb­¢^ ?fç‹'ŽœôÃOnÉ“ëƒ><¹\?îÊ™³÷T5dY IB¸Olá8ÀŸQÈ@™މ‡h(ÙF޲À^`;ü™"áþ\h,áÒÑd8^áù®··9Ôx”êØ}]ߨÞ/{>ÌDXoñÞ9û_Oî¸@Í ãƺñäwÆÜ;Ç` øËÙì_+ Z2ñûtû«Â›¿ +]”$Y+Vš½bIäQ޾ø#eSD@÷‡ž>¸#Ìb9a xõˆH¦Á·G&+ £].#¥O2"é‡'Q”Wmú­~®¾°ƒâÐõMÉ91Çþ ·£žCøUÆK¬b n#–vÇ»±Ô€µßÙ{¯õOëã“(ŠYÌU59ˆ¿‘-E·±èÒ¼®mË` °í¡ ĦŒ#[ªÛŽ’ÀÒ‡ ùµ'åžÕøºö dþ °ÓyüÍ@¾Gþ­=OWA¦%˜èA©8•IX7 'µ©f°Ú§šQ7D½ro¡ÂOšGÒOšG+'‘¬ ÜQ.¯¬«h¼4:½yae*Ç×(Ó=uåíÓUÖäùØO&Àýî•/ÑLž.—³¼Oâì›Þ <]kg°JÏzÀ¶gÙJÍ ü’©nîíë‹1Gpeüƒ?h²p¶Ó*…nëž¿ýŒú3¸ª…À¦‰ü /k ~—ѽÉkº*¬hR}ïÆ[¬×W|é[õøcýS?þ¸>êL¹j4þÀ²’Ý`öŽ}ÙàÇZIå KŠHmÊ$N#Š–YÂAÓÙ¼Ÿ ÉV&:µê¢SGYM4k"k4kCš03$n>Hž#V7@o^Üç?ÇÒí,åÛƒÊfÐ1ƒ´)à4¿ÿþú•{>l± µ~µ(øgŸåâ|úer>_÷?G“àlÝ4ü¶pwúô'¸]‰H3¥÷³ŠìºrÍ»×ÿðãúWÙµgëMt±€øÍ•íÄœŒi\³sîâžw—ñ,O¨p$°g« ”6õi{¾@¶_WQ Ko³ZRÃdOH 8ç5V£J"jáŸû‚dÙv/üº´ê)X¶“?YÝÍ?>F•ÿãc;¯O¹UI*ŽyåZÐ *}¼K.[eÖžñ¯H}JÿÉúá)·%SÌDúsÒÉÆl"ýcvRŽ¡”ªnÖ…1¥G28LÛ6ˆ)ÔïÆ,I'1Kè#7K¨ÐgÐÕ/WjPn¸/„`¼\[mGÔ+K*Q§Ø‚1a :0„8g&~XYbTVæP¾ñ*0×è_Û¥¾¶-gz`£èsÍàç?¤¿ý¿ýeÏâÃBáBöÃg[%í];†µî˜N.ø‡Øc‚9Ò 7„áz×·’ë[XÁ€-¶>8º¤3´£¶x'q¬€<ލClÏa›"$lo$…Ž) œQA/‡³´OEå_ rke”~âªgù1‡=âtxÊÏxÊÏ8©reÌ^ëÏê85{ídÞèÓϤ[˜°)ö=[Cø$ñhˆ™ðéHœ–ïÊzbÈOöñ“}ü7²¹SäçwÔn‘ÛÓ¡Po“ÚS™¹Ã•À& JÜ w_*ˆ)§, 8hâš³’ÎÄ—üѳÕEq˜å»ì£Ê5V#¦&Ë¥~'콡úËåH‘z¡÷¶7 K0ÞÉûWž t(WŸÀD * öUÜ\i0^lj­‚$7!©N>Íòá:˜Ó”ŒV%äfû5–æÃò$VásÄå9ÂüÕ” ê l2 &©t v~y$îEAA/2êÕBï(鮊ÅþÕgH —›üç[è¾1˜7ÉÞ äÊú}èÞOFUÇ‘­ëp¢?‡åicëþýÌ]%]zþ8ŒãÔƒ, *îW7IóIÄÝ `öþ6ÍØMó„ ¶P·vÌ ·Ò]uõÍŠ“`yÚ€¤$.žozH\˜¢Ñ|M‹‰·D3quŽ;Y£€+×e0LÑõ?Àø™M'zsÇÉt¢·¡[ÛXÖL§ø%É{ëZy–/Â¥ûi9œÁåEœæ£('½߆{±:”šwoiÅÐÍù¡bëø€îz­ÃX/§ãÕmœ`½]-½,ÀÒöÞ>IÚúï›ØÌ§¯ú/˜‹ú'Ö`µŠëùsëÞÄ; Mþ>YFc×zµšÇS!yóËéí­î×ä(:ŸÓ±ýÿæ{XSÁkÓÄ&Pfk³½Ó$÷û€ú>/Ïü.HˆÇ•ü-¸RÂdØ~„LÌÔ“SÍÝÚ¸­¾¶»„ŠlУ°=çðÙž¤Œ™ÌƒýSwm…r¶w$cÀ”ƒ„òÌ‹5‚P0:Ô&œø6%ddK2›*D†Á"§$´*èx <Õñ<Õñ|u<æluÊo5u6)½Õ°ù°2 ‘~Où‡\«ßò±‚6’AG j…+J01±Œ7îÉåÜ›,¼alÑähÐßVñQAïÿx÷äÓyòéœÂ§c:s ¶#Ù•M%ÆM áS¹ì¤Ã¹4o²Œè.X’ˆ1eB—Pœm‰ŸÂî<Ó³¤L5=ûíã ½·¥¢¦v]Z9bÿ¦âñ`Ù’dR0Z8GÖ˜°žq8äcn‚œ4«72LaÐg+àšü\À àoÞ#¨e>D—DIÝaøaeHH¥3(Y“T> '~;99„i1äȱGDx •Œ¸í;³GÏñBÊ¿à±ÝøÍeé£óÜ·<÷mÝöOJÓ“Òô •&ŤÑÞç’³‡#í‎$0Y:£MÑHÙŠÓ‘<á¨ÿgïZw7²ô«ù5ÙIÙu¿Ø3;»ƒ&™`€Íþ¯m%ò%–ÝéžF?Ö¾À>Ùž"%‹’EòP,Ê’› ز$Ö©:—¯Îµ(¸Syܨi 'ýGâ7SüfŠß´Åoœopf%[NÛ³Iq{‰gv¬½°Øñ©VÕ-ÀR"•ÐNUÌõe4§ª£BSÚÝ%ø^œdaâݯ—w™T¨l«Ô¦ïM kŽ~uæ¶½X;“r'ÔeG uarCGälÃ¥êöǃ°ëN\üs±ÕÔa¸˜ŸcMòjÙÌëÜ¢ßH¾ÔŠû£­WüUq  œUã €‡'#öÔ³s&fŸ˜½ƒÙµÑÕézá=pî:ŒYj{ªÞEmöšÝK\NƬBÔc·ÐLXwKa¾ÿšßΗ×@àòÓmêÅzN°îˆzw[JÑò>Þ×8uoôØëÂX ÿÞ,éF) Còn:Ø;ïÆM}_wOPR®«=å£öófÚÁïyr}w÷ë>²š(|L%B¯jÅÌi]õÐ! ­_Þ4Dd6³pL„È™§9¶ËmÎM¸v‘²tïfS@Ú–UIãûÕdŽí¥þÿ¾úøŸ~ü[4 1.¤í<Î^ˆ£Gžâc½{žêüaÈP8”çðów›!ϪfÿLŦ‘å6L=3pF˜PûÚæî΀x*¤ Ö°•Œ#¶Tìkì 8œ¡u¸®w]¬0NÛ;<©†k×Eê(M"ð”Tv¥G(Wn{þ‰å»´-õ %¼(2ßå”rM°òe© V´Ø*_ª{&^ÑU´/Íw0E›g¢*T ž2ÁT]ôF¡§×˜¿cÊœ„#ß¡~8Åì”3¡{Ûv•eyNmFx*(‘Êi’p¦ˆJEÎb‘é,6¯— =ÄþO¹Ð8œÀá[‡ŽÒaQ©u[膨T—õh W9NãÍá*§¸5A`wéî᪠$Ôlàöë›ïÇÇ­¤º8‰ˆ$ºšä<bZãG¨$¬.}YªJÙ Þ1PÆÊv—V;DÀÙ!:´·‡iñÒ*^ûò›»—E©É YBŠ8vDæ‰%Nç’PîL’iÍ#åݸå¦GàöÅúª¨mk@tžÜtq÷”­ÃªSv' ›¦¤þã'ò¸XžwÄ5q]¡ƒ=1Wl+èJ#ÿR4$ÒúYmdó}Vy!§@kµ S uÇD)+Xg 7¼¯6cå5}5èzÖF‚­0C°“ÙZú§4\%ê9N·Í™M·X·Ó`FôÙInÏu1/òôS muº¼[ÜlþÜ2b§‰ F AއHtïÙ|´"vö¹Tû??×:0Þ6÷($l\7ŬöxM™lèÞÝHžRpéw ýPq_nЉ÷~)j=Q¤ )lÂ)I’` …ŸœåŽd©ÕEnÓØ&Y¤ýìCÄ®{œP_"mñ«·þ}½ÎïWËlE°×€,Êßpw,ŒÆÊ58»unõwò>9ହUª˜ªHšá³ÝÍY¤‘)–¨й~êûÓ5Ðr¹ ZÞ1]ÚÇäƒÄ‚ÛÁc­9áV\ãŒýè_Ð`²2ë´dhÁΨ kr¶~=ÎÖÈ3Û_âšÙ~øóÕ_w~Ÿ­‹¬ÏÕ˳[¸&ï«¿€½øã¿G?Å‹§|ƾô#œf6)”á$£® R°„$E¦H&-’8),ÏËŒ–icÞøÂ^— ¯÷ºñù˜† Ë†ÜøÜY÷ º%Ï·>[ ¼]ßú¸n¿õqÐÜݳýaØù¾d1g\Ó–œqÛ7iÜî/¹9sš-éùÊáÆeÀ£$’%Ü(”Dp:(‘üŠÇÈ'B¬ÖL"gAêE𾮣)ï£oQeL¥¨>Ш?3%à”táÖ)#éÅ!#©A…šÅ°*‹¾j`¤b‹žË8´ºž+~CÙuXúÎ[è€Â}ž׳$¡*Vš)–æÎfŒ¥V)i˪„$v.§^Nã"s–q'…J9üÈS•Ç2eìô²‘öFlFŠ©©/0‡–#´ŸKfQiX÷¦1)§ˆGµSÈcGÍ Í1j^0¤Æo¹G.vF/ä„ [Ðk~CóZäs…Ku× ƒ3Þû9'†Á¥Ð#ɘ{´ )ÉÁ ©bÂb(”&H2žÂ@ÕÈÒw«7V#HTj`5ò°D¸“+K–T2×KÄÖ!ú’¼R‘î0›'±2¢`$.¸%’fŽ$I^¸ÈÅÚ²LÄ&}jÝ`vk*Û`Îs¾j˜#­=ðKI;j]ðVVtcÂwTsÞœ°ˆF9ÚM´ôŒ+fÑÝÍu>{So$+evV“åò…Ðy^1^º“ò³Å¡˜˜î?=^úçi¹e—wþ÷MÓÞ>ÙMøõ‹3IÁœ¤w’^¬ôjàLNè–ÞÖ+zߨX° º–ZwO.‚÷ñC/è!òOðzŽÝ8%ÕñÄ>¸ lçûŸE†;—»4&VۜȂÃOIœÀ¯R*©U¢si¦Õ¾h!]E_Û Á…3—ëL jt&MœðŒq‘Ù„%±ÍS­¤ÖfΈ,MeN¹N:‘f,"Is+*Àú ]ëöå'Øï›7T†u´¡SµÍ5û+£˜ÐS °Ú‡)P¸­¿ Z"œïR ªÓPw°'kö¹Š»ÿ¼=3´=cß0΂bíÔŠû_=ö¨·Ë”é Žs‰Lºy Pë{žC3‘[s¶Œ ‚#¢+‘qÞ–^{ч'×öȽfRìÚ_ŠÔžäë-;¯ük#ŒqÑšžZ )D^±Ì¤¥ya‰£¹óÖàV'If3¹M *ŽA œ{<Ŧ4õ)Mý«MS7R3”ýbPšúöœ$u4Á\ Byöˆ5¦ps qt{©2goŒd‘"cÔ°)EÝì0R-™qÂbr€Œ±ƒ¼›ÀqäÙ*!1½µ,5ƒ²u;è)5·ý©'1Úø†…×¢ÁúÊ· eÖÑç‰(¬6ÊaRñAI·*ÃÖ:ª"xmµ¾ë 'X´Î›9†1xz¾ì øP ƒitL‘<ÀCy‚H§•p#ªç8œ çí·9UcÀÅÿmtÏÒ´¼ÞÎj×Þò…ЉgÛx‰ÎZFáx‡i¾bÂiÆž– +{.¡äh¿„«‡+üqY∫ô!}%ØÙ“€7”ícâÐíPÅêI«¸ÆÈXôÏOûé7ýçÕêo;ÀtõÐF‘ ‡É°qVo}E:šc”*Ã1ºCs¯BDŸ=ô¡;Їõƒ>´}Øpèƒß8CO<'u‚9Ìés˜o"i1œ¯Õ©tð†™˜›ØÚ„$‰…½Ž'Öè˜hžÅqž±4ΊãÆo‡y̦píä:\§oÓuʨÔÑ TøÉåÚŒp£ôàÝ8)IL ª[;âŒ5$³°‹_uŸÝ‰{÷&µ9©Í¡jÓi©0ÂPë‚w>¸¢Ý‹Ò,x÷ÐÛpn3¡g[ÎmeJ7Ih^Ç8\Çãuî1‚Cðºs ìõÝòñ>ÞÃú0÷Õ¶ TÓ圬ÿp¿xz?¿%¿=,–¦qÙÀ/±l^ƒþ^ |E¥Ôï¢ÿ¼]V†ê_ž©|E¯Clk4¿ªð\38àB9†Y7Ï<äÕîÃ9<Ýv¢.Õ>±APÍØ »J.6ÜœÄË|ÏÞ^Áæ-ŸÃ¼?¯áòPæx\‘Ç•ÃðîýÜó›¯&hd¶«.fóûVžïmT!V¶3Vu¥Ü–òÓ„w“õš¬W¾æµÔé-­$½öüKþ ºè®lÕ‚WCÑ]=~ºÏŸXà ›9p»Ÿ§‚ñ‡üâýlÛÍ}9gýÛ(«$èYOÏ¢ßç‹pÆÍ݇<š?6’ê¸ AªÑÁ-Ç:e3ÕÛîüár÷¢dª­UZiÍo‡ã‚Ó’µ‘ú¯K=2)æ‹|I¸“žJ~»~(֡Ʊ¶ñ5 Ød8xëu•Âùç%¨âü]ù)_úC“]íŠÐ·³Êúh[q÷t[ÊPù/|—–è&~øuµ¥ Ä‹FR˜AJ}¤N0ÎúpK6S™/á׺¯€Üƒ]Ì᛺D§Ò’EqÏoé­-8†}f;œ‰Ž©ô°»ô€:û!d!QÃY¦ð º”G`°4‡Ug×ñÖKäã½úð0xݵ‚f¬ð7sÙU—× Ò ‹¡2 ' :e"Ó’ÕG¬/+ÌWýcyÓ€WgãìRkõ0cð<¨š›'Ç}¦qIþ•}øø¾qáŽ:…Y¸o&öø”_¾DTšèÂeR:róËÇ_? ¦ÄòÑp!(K¯'/w~ß0œ·y.…ÛY®h¢ERß5CKøS¹/yZ^Vÿ#ðÆ²ŸY¼ éíœT.©%Yüæn‡¯Þñ*&üæ;°£˜å« Ë*’9ŠY¼¬µ"ëï_îÙ±?7]{diö—²ež‚L<~Â.)¬78-¨Ô±+ˆH5¬?¡(qÝè$Ndîâ½ÁX6ál‚ ¾Tf_†Áÿ¼dh*ÑÚŸûŽÒ?ÜU—ä¬ÒÞóÛâ®\}V³^ÑK qøÍð"úïeÝhl´~µ‚’5ÊšC/-ËZwYF/¹h;ÜT+Õfªã÷ þ ŒÆ®ßèM0zýn—†W3Ðgà&™°ÉAÀiF’X&ðtît^ÐB+w¹:<²â„šlöÃZÎfM';û /c©èí28-9·R|»açKÉÖù½«Ö¬þRÑÈÚÆ1a»¡í/÷ÏØiÀ¡ÓuÖßÓò¬(¢…JAv5ñžL€ ™#1+sBÙB&RFZ[ùr®ŽŠZæê”™G»¬&ë¤ñ}œÌp/Û"S~d5U„X;Ë/à(AWl¿sžÜçß=e+`žUID­ŸYæ‹bývr=Mâñ|±Z æ Ê'eÏ’ÁÁ,É7®5­Þ¾ÔÍ¥#Š I¤-ërÐ1SIn PZ óöí.)¬·Oe¸²“nïDrï¨Ô6öA;)â\¤IªÇ»w[_׋ˆ?™Z¼2Ãd¥®`Ë{{K6ølí1)¡[¾ã1©c·¾þ ó\œAŒ¼bʉÞQ^ÎWL8Ç*ª("e92Ÿ«Ïs{2×ê³ÏÞh× ½p­³¹ð„ Þ3ã’´$h&_žÁ/ÍÑK°ªàÐmþ ÿHDüÑ*5h€H£mtH çZF‡”À»Ïìê³C„;«Ù!ÌQöÁ TÈÒéí  Ǭ3ˆ££nÐø“6fgð sÜ MrvXjÃÁéSU¢ÓVµä9`×ïø°ÉLmG3ÊÈ愱 E™9W¥Æ{(ã ^€9JÒq¥BV©7,°ýzµä~™HÕ@÷ñ^LÐÉ }`X/´Ü”¶ïK®!wŸü/nœe¦;þŸÖg}ƒÓç…[±ç¶3ý-ôÊpsa„h¶hÖb”’ó:H…'ˆ©q,ý}ãi±'º¨I¯Ä©ßCߨ0³ugIa}Ý…É,Z’"‹}Ÿª‰”“D[éRiÓ<µ' Áµ6x½2®žEâj¯HN¸ÂÓSK¹À'\­{ ?ß LŸU&¯r5*Ø T‰‡fÄz{ FôVàªìÂ{~LÈa))I hÚø)!§Zh«àô3öú*š‹ð}í©yvGÌï=qKÃwyVT¿ÜfføÒåIe0w8a6»Ù˜ÊŒ']ÚÐøX¥\˜Œà;Ù•¼s$ ©ÁüB@­Í©ÁxJõÒ@äq âøÛ}&ص>ûÊ• àáNŽb2%.dæxSåÄæ û$‘£‰¨Ž£ V ºûî} µ¨@œ–&#ÉÂr>˜fÇnƒ5: ‚P;CVR‰ZW¼CkˇÐÒ]sÞÖ?¨s§ÆˆE0­X{ˆÍÚ4I³Ô¥¹%’f`mªH–É8±æI‘ô ±5oCncîY‚+%´€{>Öt„ÛE‡ÛÐ;#´ Ý9©o[<ç×BØýŠ06`pCÃ÷6”_E àÊ-%Ü/• Rª˜¸‚âr‘«Âøj‡4R>ì½[Ò ÙLˆ–’Àá>R1Û¬QÏð}µ¶?ýø·(ÿØVÒ°J*Ù´a-cŠçSÚP¯>Ød[Õ¯[*q‚µ ®’@µ«í¿y|H÷\Mï&DšK*xÖ)b)Íd"Š›0}ínÅüîövcr“Bá÷»˜ƒå\=dñÜÄyV~‚Á>ø;¸<¯²›aHWqfå†M½*w¦6КQXÙÄ“âÉúÕ>éš}åxÊ^þ$jl dIˆAé” c©ŒÎظZð)/ógdW#»:SvÅ0—LnF§£s­·ßÿ|þaÓ6·îàM—ÓKÍ.)g›;@§õm^_¯téûT3aT.±1¦xcøl¿T·ë¿[f UdºÂZ_~úõ*H4Ùu*”³YªÙëô¶Rèq…6I”Ü-çÅÖñfR;&Ë‘Ì!O qZ+£ꑌ„”4‡Œ¨ÒÞ‹ÄÓQ &À4dÚ‚Ä‹š ¹A`Ðç©S`‘÷¸iÌQœ±iRô¥ª,öGoœž¼mË;Çn›Ç~îAQ¿À‚ƒK«Ò §…¢V„–Õß¶Ì Î@>¦²{¨¤éû> ûð>ÕÌ}YCum_ÞÜké¹J:ƒî?¿ÿǬqJIí;\/Å¡]É÷5 Ä`çI›väˆ/Ø¢k."ž!Š#¥ÀBT@ÊxXf£mQë‘ šU,¶¢¥×êÉ:NV%[b#Š)O•eÕÀ%©W.xÅ×=j§ kÌUFÏ÷î;ž?cn½ÚcQTH„Œ‘$¼ãÌæ—Cs7(bTFЄ˜Z¿¬VMP²:Ü%tpîæŽ7F/çF® `ŠêVðëG8Çs’=xfNëªvʸ—Öøå¾ôL ¥iÎл—ÃÐÆô@´1¥±—°mH:K@Q'¢­tº´ÒEiŸŒÇ(̾Š6þïŽNwùíhêìòövÑÀ¹\x$¦7ñ¶:×e­³rñµÏ#§Áòeñ÷yÝå±ñZ,ÞU-m°–ô“z_*ßPväZì`Â\¶B".:^Ô¸Ajèä{ƒmzßà!5 í QnVø?†Öÿ§ñEø’ÿž³B‚óÄINÎÆ¦bgãùë¦3{"Êàûô¢‚¿h…~>õÕºTïìÐ(ƒà€Ik¹(·J¤e0å"ÌIëõ±·Åâ¢+£Œc& ¦ Fޱáæ@[F‘ß–1w9ˆäz˜4ì#æÒšˆ˜— „KäJÑ-¦­ã@Þq`Ñ©|VtÈm•Äñ råËï‹a›Ã”¢u+8àj_°øµøsµ. 9L•YT äâŽÕ²ŠÝìh*Ðpý_ZKB#i»Žò #kê'ßôõÝýäÙ¥ÒŒIÚØO>žšµnþ•ÑtDSð#6ä¼É”Õjàr0¦˜ñX;*IŽxŒe*ä°ÈkÁà¼t-1¦ž©ãôGb1,&T,Ê‘ö<"g6*`bÃcd»s»!â­“º:{»Ï#a—B4ûÆòG¯ä7é£C)6SÌ[â AÆ%©­GN– W¥£m‚bŠ”/~8®uð9•©oD“½›.ÝÁ_Ù§=ðyÍÚuŠh´¥»j‘¿ ŒØ¢qM=!ÈYõ„È_!Jœdœ%õ;‚´ñPwó8ø ’1ò·“³°æÖ…i³i þ‹Ÿ…µAw;û°¹½ö½7Mœ†3 ûª ·Ó w «É”kž]£5g”ÒÔ4[sÙóÔ¤CÁ¿g›`Êÿ„,õ‚ÕOP²g$ô¹õƒ9*zÅAXª1’ÀçR"ÇU‰”“8ŠàKkÅó695Â5v~Ûcxó5…7óy°|9ô«±Ê¸‡*ãb,ÎRéÄ€ëÖŠÑ!9|‡d1ZëZë„j¢'Êr ù–ÙöâÊâ„FÁ§§–"%ãD <+ÚRë’à>]ïÄ`•áʱ9hs±Óé{´µr¾§¡Ê©O)4³i…î¾Óµý¯Ë×=þKêü_Ã<î~jž#£°F‰D̃†ÅK:Cثȅ‹·, %))p±XR‚€ž1,•˜ZÀJÂBþýçŠÅF,þLy;ûp¹xMY™¿ 2ý¿ÇÕxæ—þö>ܦ¿>¼ùDììîڒ⇅޻œé/ 'OœP}’6Ž•ÜQ=Wˆ‡€È>‚¤²ÌL}LþÍc¬%ÕÒ—õ¨+ã×ß/~º ~…ÄŸþý˜V:9Ëkéãcýßu ÎÙÅÚØ[ªË û£úüˆ‹t©øýâ-œúê¾øþînýKXª‹ßáÿŸ*à|8_‰öîn²=wøþr…ÀˆYÊ9qI4¿ø9I¦õ½‹µõù.M^2O ¹²ò&oooÂEj‰°êˆÛú¼°Ðþ–Äoõ€Õóª%XúùÞÂý»ÛÙÔ™\üw°³‡ë®ƒÿgmÒï>: êë·· ¶üòýÚXXØ‘O†¶0EªÕùñݶ\>þ?ÁšÌÂO° ›U˜¯xªuøuõ«i÷ï>Á•ïg©þã!Ôç·ß¼zSýÎ3°%ï¿Ù5Ë?._}±¼üçs6u§]n* ñáã|MZÕ§Çú;&[K½¸ÿ㨇ÅÞÚ—ÅÕÇD)°4åt¥_¬?T·­CJ£X㪖|¾öKaU‡ýšôÉ ÙýÕ]I§ &ˆÃ[0åMÄnæ®o¨ ƒ0“¿®8LnÊ»ÛéÍÃÂ7Þ­¿´° +ŽüáMâ“ÇŠl’͈¬˜P£¨¢\Iœ3[¬ØÙ©ÉƒÕÜ%Ç÷:š¢Pn*_T Åwq¡k'EŽW»?]Œ  |óÃx¹'•àèóü—?mWÌ,ãìS÷gn!»¿¾RíõƹWXÉmz÷Äu<½¹[««±üçŠ]Mj__|Üè Å/|!ôŸ¼-±œô²_dýöÊsš26¯¨«"»ÞöûÅÚN_JÁ¿¤ Ë•gxzÓ|ïÚÎ׺Au©Ù³I¥¡œfPˆ8\¸!M·ò43ãp2ÊŠªoœ²R çLY¨)Ü픟&ždˆ½3f`Ägt'…MægÞþk”iß¼LË&f¥¿Íêi9‡,-£ˆ‚ 䌭âOïKÄ¢5.ÐäJ`=’e&ƒŒ#¢ Ø`hUåÂ=0ó0ëu ˜>jFyã¤çÍâýÐ8t!”: dS,hdm ˛ͤ$gÛDŒýS-oƒ3C£¯iMGámœ4ðà—`çì©*È<35Hè<h,¤;{û§«ÿšÝ: Ö£Ù…»òñýzAþíß—‹AŽ ˆ”ÚE8â(%<7Ã3æb£òÑY5íñÜçžÝ{°KÑñÀ2c.T³V8GÛN=áo;Ž3H\;vȯ&¡máêY&š-Ü4}%´yô{Î+¡-÷ƒšØy§ûñán€™ØýÆ0o¸ŽT^ ã4Gbî?nœ¨…BæèWäÅ’ô8­ ËŒ$=ŸLn Œ#ÛÀªCpÈႊ¥‹’µKÒ{:¤n¼<¸‘'é Ò°!h¾|ãÑ'tIz¯Ñ­‘+vy-ˆ~ ô×Ñ&AW`Ù$¼ö¦1ƒ³¦AU«²ò£÷©³rsÎA ?»ýX¢zãþßÌÃ,®¾Ž®§ï¯‘ýd§3릳$–2P¥·Ul•°[³4»epn±)—U’äÓ×­›÷Èg²õ°×¨ 7î[/TÿUþÓ§ïo`nîKíÙËß®X}çþ?ˆ’Kc(_?Ý)uýð”OºÌïÚzò§u"é*ƒôÓòoM,§Œ°N â.²äg¬âƒ¥4*RÙ"]”—ØJ¥TØ6iôúáán¾#kôë¤QÝIÒè¾ÍoÈ!%lÌ!]¬Ã˜CúDvIŒ3Päá?u"TJ'9—g¡"ˆÔ<£H”ƒ6)«Çaûl'§Vœiò¨·³S$§fÏ«:[m3't(¨áàÜ@ÚâÜ :ñÿûèµs¸D £Á@o`ši‹Hðš”ÌCžÙ ëÁˆx0”Æ€Ò·PLI™ÁãA´n®ÙVfn½Ùð¤«l©xLcN.Qe( x¸ pyݵ†—»qóÁ‚Àeµ¸–Ç:sÛ¸PæÛĘCtPVR$bê`…EŽF‚Ó&`áÊ@{LD—Rj“QG"ˆhU\µÇò|Î2*Âö”Q[E¥÷WQ ¬ˆ*—$'­Š¨öSBOåRÙ“¬U¹ÔþÉõS¥Æ&£–Ö`°<‹GK€go›ø6AâÙP¸f†hâ4Ò"åìÆT"k„pþ¸>2ªÁ“?{Â` B”„à ƒVÀù‹‘!TêDàPbUP,½ Ç–¨~O‡Ôq T!Xü¤t@{:W¨dœpE# vÂ`ÖÇ„ÁÞ—lL[ðž=ªß+t¿(E5˨=PŒUéÎÓk†µm.çdשZ÷%['lûêßܦÏh}»¹SBö<±x9¬gSVÎi©F¬Vá)r¥­° êHÊ»÷’c¯•wmµÂ§Cê–Íçuü°VØOKÓW­¸ l1†(FŸ^—êI¢4CìÓ²AѶ"ÐÔ›h[}hèGÄ/©ÁŠËæ~Dšba2¦¥kÅ#íû•7óV-µäFê]ïåJiǶ²}´•-Æ>ƒêw1\ùbŒ­ ?¶RŒ.–u £)¼Œ€”­Ðuö„Ú{ÂÑi~ã$87î5¥2¶J1î5“yȬ‡™Èœ{i­ì¦+dœV­·7öþË›íènþûû¶Ã6жªÂßÃ:ºª·‰ÌUÆT4e'–qï4z(ி§(f:Cãž‚Äâ¡ ÂeðeP3 !ßYº-÷”nWµÈõ—eÔj¶üº×éZm÷r?ÑÃl>ØújM-Õ]ÕW/dG…uÕÕ³^bz-¿Þ¦ÌºÚ‡¢NØ»êª90ƒ±¬ºZ†±ªú4QbjЛ§`žì‘Š¡›äÏEwCsº."0a”dd É»o«ÓÆ£ˆkî;šáQäžëà8ATPÀ6t ѱDŒšÒDé0S¤¥GñÉ:Žª ‹…”1Ââ$F¤K‘çÞF¨¢¥íÍÚÍ'#N„<8²fÿüÀ r×H`lZ€dœõŒ:9æ&g°µ‚˜rƒÛ Ë/N“²RCa`ûar'Å•ìvlmwsŸ•6…í¯¡ÿJU,^UoOjUÝÕ…î“aãIôȶ’eœr¦Z•½í<æÏVð–´é=õn•ÅsLÁÛâolXoùDá£yfH—#]ÑžZ‡%× Ô¥XXHí{A€!ï4S^He8~^ð–ÓÝÕ#L˯ã¯*^æ¦&&‡ ܪö¸I÷Su|Ä´Ú5ilšV/õÆ+%tάø€‘^ò¼ ƒBzXK!xÎÆ‰3ï 9Z|£Åw,ñ®xñ«vMŸvòâžRv½ë T»]Ãz5J]eS/µ­…=Ü—RÇäÑï9/­Žà”ƒqäL»¦ ;\G‰‚®3ú”Âdu«èÛÎItw„RMDÆ,ˆ<1L‘íÐ>ÑÜd”mÀ*vÞ‹ö™ªÌzÑf¹SÞ^Áƒ@' •NjˆNËþzî¦aÊ[ƒufÄ–ãqnÿêjg€êÈMAŒJe-£çæ¿ÌÖ\œ-ƒÓ""J P®Êu:‚ŤD$„GŒ{^Ïå)ÚÍè³UÚQ¥}*-å¸kަ HoöÉ7;Pò‚ ¯ÄUBØdËS"+‡{玒Œh]zULÐà èpð™{ GrÉY0¬Áä,‡˜ú´K/i&îCÊÕk ë¤ÌTÚE­ÚUVº¦ëÓ¤Gºæ”Ñ ºføÜƒ9# ³4,Ö$ƒ†Ù&’¼rÕÀ€-¾H®¾Êñp[,«µŠßÍWŽ”zƒÃ~ta#ßß܇OÓT´‚îî?A¦áù{Q¤-Ùä« Ñ8xCÊHfRu^c¹uê2ÆiS%•qFë¦Ñs†eNŸÕª´Ï¢¼Ámö&©*Ë<£%#jø$5iC% )ún-QË–å O†Ô1`Š%Œ#“ï8±%r%¦ˆ°S:Ƀêï„rÅMNôƒs<Ü\¼%V®‡`¨üÿÙ»Öå¶,ý*¨ü˜²§ÜRß/üµN2[›*ÇI%™ì”KÕcZbDJ±GåÇš˜'ÛÓ )h -n¤JÅݧ/ç;—þNÄÀa6ÆA«áRlƒÁ§ƒ9Î,£…Wáº@™å q]äH;§‡“åðš#n—‰4Fœì‰Ÿ\1|@`5B ŽÅ·¼;|TÿúE»zE‘´¤2‚ÉX€™þ¸@ÁŸÉ$ S§¨³²¾nw4…ßhŽx*òV #š D;M ¢(E‹1ÚJÁºCJ¤\ ªÆ1Êu„çÛ`rºH Nħ…bNKñ׬3CO„ ¦cJ5(w ¦qÙ¢<}í›3CèѦ‘Ę€d#Ö¿âãÛê°h—ÖkÜ&+]o¶9Užþ¸yqR$rÆÄ—¶Ïc IÓHœç\$öî|XGCÇ|"­Ô¬¥ý”ª7l—_°ÔÏSæÊ2W’§¼‘“*õsºIÉ>} <ÁØa,Hg"Ô.«± ´§À*¬ê)°‹ÛÌ lJƒ= U4öFzžæˆÞÔ²‘ÇJèÝsì¨íC¤¹ôÕŽ¹Ðè9û] î“0c¤­ üDL7jÁçþyfC[‘}&9ÅXሞ98ûlhºsÒ>䢟ÄúdªIÎ0‰‰/ri5_^/–s»¼Dó›ëò0€7lS´ú`>»};½BÜÌj¯‰Üë ¥=\(Ò&Q‘[[6hvJÁ|3×™Ët»=,S\°{Ø_?Èf¸Õ&ö0¨¢bF¥ÆB±mïây“÷¿üLÌ„|hü²3F±Ͷ£/'Õ~Îúؾ½:µ1t¥·Ô[+´Ä1dDÛ«/M—†Òˆ@3—xtõÚÂò—ùÇeKßµb ¬mØ£©ènñáÏæ½Hi×›Q ™¡Ú(Ó%÷w€{éë–ðnhöu("6J‹“ë"Øk1€Zhþ~ Q,“äìæg€†ßÏgЀüy’—ê}=&°ÅÍf Õ÷×w.™.‡J׎IŒxˆë<ƒ½úzæõäÝ;9oÌdóµsL̀ܠý¥ NãæÁºè,ЊAz¾ âúö*ŒVx|ÙÎn|¹(ÐT7調†ÄΆ÷R>Ð&Äó2~ vk؈w2 Ç_A°‚cŽÐmA9ÏG¶¢# ±,ª&;Ï\æ³=Ù[™92‡ À…ËéUH«<¯ý^³¢_}ÚY1Úy¢>kCPÃä/¡ÝêaDþ{z5]\Bë|yú «sÌ ró‹ÛãÁÅÜn‘âÕnßË…çU>õ%ôd³Ê÷9u<¦o;c³Oà1&`&†Vî‹>yãi< bñ‡Æ>äKl¥dÛ'ìÆæsÍFâÿwOóãy䨯ˆøúºÜ+‹azU\‡våµ*yhµ¾Q%ÿ\ÔµEU¶ xÂ÷ÁŠE ü îÁ? µp¯èPÓä+Q1agÆüçßÍ DÀižÀGñBõÝ«b›ËqߨoP³Ëù 6!~~i·.¡sqwÓéyDiÇJ%têŒE…’)âÔ *ÂzwgÖ±,Íd»+Š‚“¦¸®<4®+[íXNÇÅ'¼ÅŒ‹ ®ê«fö|ó+R\p!”ÈS”KóG£ãRbN¹ˆhy”ï§›6–p-#2V„R .®1ÚMú`\D{%ÝXÜU,øbáBtYÀÝÏ´†‘g Á Ç9F©ÌÊsn-³’¦Ez^M(TM¨Ƙ¼^á‹(üÉ?·Ä——fT&€Ì´z¾Q?ëˆõ–"xSVŽ®>Å».ý(ðï "<äqlì¡àDª~nçÃÏ<õWü£?.oŠùîq(‚{$©Š8ü = ”+è`¼; èXœa¬±jAÇ^ ɨNÝ\1r^ü얽Æ›µ· †eév‚‡õuÙ74³ß/4(2ª˜qQdD£`ûØ`yc ÓTÇÀ´î®b/cz»ã•óÔ-ì÷ãÙ4[í%ÿ[õìû‚î{·´“ûÇ5¾7I°ÖÚæB"'™ƒ‰Rdl*PšŽPçˆvEBˆÄvèRZa ß›ÀL& $Ñ?ù¦,Ñ^ý?ä3•¯Ú® _¾wRÑ`oDõ¦Üðû¹¯´ëŸûzôv>_øô¹uïañ9o‚mõ\)m$¬”“B!.-EFóŠ…œ©á›ô¹©½çXDËl¾§}(ßU¯A ØÇ:êÏ?|ü4½~v_?É÷•\K±ž­VÝöè­GmÝ=IÌ_&}U~²!«dôÅ/>ódýÙº‚}Yor·ýä5Ä­²ð𸭿K^äŸ|zMxÀêyA„ë¿¶KÇ¿øggËËo.]ö®&°oSX¡—¯¯—?y‡çËu2`ÉÞ¼Ó´2Õ°,P_–ºÿ¸ªLÿ3HgæëÍgï6RXL~«ËáÍê[Sßîï~¼ão^¼œy(¼tõþ­Ú·oªòÍÌ.Ê«ß]íëåß«W¿¨.;õ#Þz-¶šs¡äýzö‡¿îëï˜l‰ºüü»rèAØ[ãR^½÷³ D“OWùCë?ÂG0Maø/b«–¦oö瓾Õ÷Ê‹ÈkPL>*Pj×ADR®fªÐÂjT…ð:¤Lr@=ªHÁ‰°Ã’rwZ4n’–Òå’dHût(ž) Í·Ê2JåUX}ùI"µ:SuÀ¡¨jõœîW&I¦•Y¡AÂ~ª0æ>h‡Ànf6¥¹–˜'°XÙ¬RN&ŒµÀ*@6“†×– Ê}Èf·¹;«,ÎmM¼ªžŒ6}ðq|——Ħӫ–ï.ܬ¨¬Ò]Nß^"{g§3›‚2‚)ñ€ ßB©”Ã@Ì|Ã+FÕ ùÙÙü2ú5Ha Æ?xúö º•~œ®è©w.×R#eæ}Ó25èæÏ·¿Ï:â4Ÿ^†pò‚ç 9 pŸ8n2fSR2W*eikÇ2M¸máÐCSiG*"<š’uÍ«E½Î›aûsa¡a¥±Ùw(m¨ivgjÎ HÑ5–½YzQé#Á &dµw•ϯAß—ž¯ÁóĨ жöó•crvóßê‡È¼IÂM•nòþ…7½,ƒZ2ÇÉ1 Æñ“bŒ8†‡LÞœ¼ÛÓ7€•s×'ǵNK‰ULfÈzHÜ>ƒa¸õš~ݱúW~÷ámÀ}ã3ªÈ½TÎYæÎA’°gÈ®PÎ8áŠÜ1ܾíSŠ nŠÜëC#÷º=rÏŒÀ½¶ýF16å 30íÈþ}ß©·íûБä¥y·e,[ªÌ¶oü¯¾Þ®T]Õ]í¬Ÿ;KKXzÿðJ¨p±©´Vwcúvó~Ó«ùÚ¾jË?VûÀ¤öøõÅûã<ù•—x~çm^åø—ý*ë¯ÊØySt󊺽±ïmŸ^¬OgV÷•OAXUé›^5vikØ.Á³F˜÷ê÷XøÂ© ìÓàõ#ø õÓ¥{>ç "¼eBŒ¼€(=­ù`ˆ¦19©’ÒÁÇìârºÓm¾|Ñ–³ÔëÈ\¬´bÝë Ð+»õ½»Ë™¯@˜fCûg°Ô¿ˆuÿ…{¨åPÅŽï@J¡U.y¦ÑB^^ñ~‰Ž‡•@4+0—Öˆe#žb0‚rÑ͕LmʱE;Ź˚€(‡úD»ÿÁ4ŽãJÅÌFÒ=°Ú–UqÜÞú§}*FwÏ ê^Äbû¬åz6Û à¼c=4y’¤Šˆ½… ¢Í¢ˆ¬S%c>Š%å “1D¸TðÔ‚ M©Acg8)2œÇó n°ŠÈä6‡ž´è7+«â @ˆá-Y˜ÑíæÇ9‚ѯ[½šÓ¨^è#žØìw{®!‘šÌ(©¸BYþ/Þ˜fo$t'¦/bä3#5_IÊ¢†¢Ûžó§BU5[Æè.5óêëûúïa¹õ°ˆiÇkëõLÁ¤ ÛÇWðË;¶Þ:Šùíb2r²|G ½5øÃ¼nQ¾úÚÿ}ÿÓ¦D*´ïý|ùñÕ×ÿð3hÏLV‘L¿+ÿpë# ÉÏï¦óŸ_¿üeuË/•v_¥Œ‰–áN¼ü§ÛYˆ¼ÖÞï/Ýïþ]j7Íôº¸·mGƒh×FóúåÏþ¾þÖêÚó•Í[Æ´·žÜøž`BW¯Ñ±ïŸÍ}5Y¯.}oçáF`@Ç®f»{J«íkA-×Müjš½ú:|òÕ§›Z s̨žÒ=Öö¸:H[½Ít Óô&µ™?ñá#Ên¦×¨ÍŽìƒ–ä0å?^Ó}pŒ™jQ«Æ—Øu·õ®Ôá»àÚ´Û? ïÏ(¤#M…©Äì¨Ê§kuÃçâÚèi†ÈqˆíhW6H»¶Û©GÒ­­/}šµµ}ÿŸô*ë¯ðR¬œ Xk¡—ƒzÂ'Ÿ¡G±ïyTPAa&‹Ù1‰>€ß|q=s9¬,štÀ˜«]q™£êÏ»Ý\樒£ˆÅÔ*Ø‘11ˆSÊ‘ÎxRk´QëlO[&XÚÄÓv¨s’µ;')<&Tí›þHPBaáO3Gô†±v?#ΛgDã:WBó½î9Ê©’¤ÚŶ_j‡ÑF"‡•Ç3£+3¸µ s:µÏñðOlwj'×EÉcQ+:¦"N«ØÜrîùaò̦ˆcí½¥Ì!'2a¹°–0=B~§I#×AË­6XKTÌûx-G)# ɘÌœf*;š7Va‰yDU˜õæ(^͘%ÜÜvcDLÛEÄnÀñå}Ix0»ºû_€&Ü›úk»á Cßy‰‘äYõýçpÛ3ønH?xô!\X}ñy`šHVT‹Áb’|tŸF0÷ž êv¶Ÿ£·á$xÁBäèí]*ÿhì‘ÚDõNn&mÜqR™´±'‡$§ŸÓ>‚6Gf(í>ê§á#&àôëGŸ”œ Áu”„ú$éDË «QHÚ¿ïDvÀmï8z߃.MÈ 2{‘» ÄÐæZIŠ¢uÌÌ®Õê¿6Ǥ¨ÕÙ'¬;ÙZ¸Ñk´°HòëÀ *ìÃ>F0,ÁtñyÜ}6㙤™ö5hg¢€][”s›bE™ãÎl¹—Ùü»+_Þð vzœücº›^$i0?ÛTyò ?y†Ÿ<Ãcz†‰äFÄhO1¼þÉx:´)6<éâ}Ý ;RF£ò|$$äm)%CûHéqI ÷|ì§-d°Z1k‰ì+êY[cºÄÌØôx{Dz, °Û¤q§ ÃŒHjdÎ<=NŽÒÂy/Ÿ’˜¹LYÆ7M|%*3MôÀ2ÅOäÓù¨6ÈØ™_/0~r‘ˆ¸Íå´"€bqD"¾¢Ý„ám”}- ôX”}_™$ÔMsŒ¤)`m^ 4Í`‘8¬%M-æ&O„ÔbeH¤²¯³ò¾£rìí2î1·÷2ž w£êD¹û:—Á~?öDâWŠá‰ÃoGEqI"ŽíÑ#TéØž³ç7®,ˆŠæ7·°þ‘ô vëqÜØvÁ5‰i;QËp$5+-& Á3n’iðS62©i‘Üê¦)h8î4iäL4œ &1FÚ1@ÖH§N¢<·‚E) „#¢0IuÄÁX 'z˜û8Ð9ÃN¸,§ˆcO…•†,×9-±’ë”É#š",jЄ=·Ì-7¬¡ôÖ_€mS-4—©Dï¨\6†8¦0³#Ú¯Äi)ŸµÉúqDCZJ>ÍÀ/UÀ¡B›Í1’Øž×3´{å“w'ÉWÙäìLXËÍlŠSS„Ê÷iXšÃ›Î¦gšQÐ"ëOüᕚ¯k}=èä'`ŠH$6Æ$o¡ Úáãú÷ª°iðÕ­óÅ×õ!܆&ûPE(>ó(rã±›^`€CÙð‰wçì;£VhÐAˆÌ’¦8Ây!…±Ö‡ÇÓ#ñËEH8ë³ýƤ”qæ+·E´šŒá¬×¸®‚æ°·»õ'¾Q[ø¶±?°½ªˆ\jΆ'´ö‹îÔ„­ï_tŒl¯äîÉéLŒLŽ–_^¦§ô†¦°EÄÓ¸¥ß˜BÒ ð-üƒ¸%iϾƒ¡à…&ÛñB`©»iÒ•À¼7¥ÜvDgûO4_ü~ÓU€8â ÕùL!òÌZì€ð‰“!ǦS*óTr§:Îgº‚4‘×ûPÛAç39k?Ÿ™ÎÈžöž›ãPÇ{Ä+ö7.I$¦¦`ž…ËQ[å0J àÌ”ãB ³.¥‰P˜ìóÃÑâ‡<޼²»q¥3>³óÊ?¾íÅ ³öûòk/ü.ù¢NûC*çt -g‹§Ú9þw¡Ôqýïf×ïg{¸yˆ ~µ’î °ÏÏÏ|£þò~x/†'?ü¶“˜P‘Ó(jæî—t¾äW=jG÷ŽÕ MZÜ¢_ŠfLq ¢[‹[(É ÖÝüéJ<œ?½¥{Ÿ‘?}Fút¢OŠ>=~:°hµÛ§Ãqhµ•\ã˜FÜèä2nïáQ¸Œ!#"‹R>WxÇá§¡ÝÄ5øþα¾Lr1²è–Sv CÃ!…’í8•¸Tªyy¦(F¤dÙ,ý÷ô$¸ì»+rEH%ŠX»ÓÓ3ý6ÝO+© † )Ç„ŸÜeäš5†”Û0àAW6pÍѤÙsuûqÏ·02¶iÿVüóîß]VSscZ`†ÞÂuqGbTÎÑëy‰Çùx=I ¡Š;‚>6Ђ]ð¿W’So ˆ)ou"Zp–p¶¸óôóÂôÄßìáâ÷pñ_*\|½x¶tHkk`%cpVqìo©ÀòÚqð5V˜ÌÐBZEô»gIoHl Eë†ËâzäÜ•„êµRùáâã¯÷s§e_Š„‘F·«Ê;žVM™Uh0W¤X1†;ySó¶9ÃÇó³ ½GÝŠ×ìšÔStçµðj³UšÕFVÍœÈïޙŕ½íÖ£ýÖ¶`v‹ [0l^³*XÊŽÕýâ]]£˜€–L£Š±V{¦½z‡mË“µd1aj õ€²ÄE#j$Âmÿø5ÌÜjÓ×Ó¯ÇAÖ½ýòé±Ýð¢!ªl%þ#úôõ¯¿¬¥’kË+’¶Ú-1ûBw~ÂJçÉŠãîv‹˜Q.¥§JâXA5¯9ìfÛæ?º÷—ÇÈGÜ`Åä»Íº]s˜¬‚Và­$ê"üúkGrdßÛ_·ÝU=-‰`RÉд‘gdpÙ$nxÜÜÙÂʲX×Ù‚ïÚÙ‚oΜ\[D m¶f­¶ •zaÊ ºb+±å6ÜòÕö»d;Q^;¿&ˆõÕºZf³ETî ³aòèš ŸÃÛ ]k[éÍ;µ”R%ÅvÒdRµ¶ã„•ÙjÜ©µ'^pOxJ‚åÀBfiL¦U5LÞ‰8¾F™>«U²k›i#¤¼9ÎXIÅZwÎR¼jI‚zT(4µJòeJõÂQP@"…„K3ZâA0­ò^r',è~(ËCXrÉè¤q’p!"‘Ü‚¶f&‘E™ÛˆštÄMÀbÝË—ÓŽ5À}wÆm…°À͆/4S²‚nF‰ ô%t-]øªèâj#ß"ÖÑC]÷ÌÈÌIÊh$I/%q ñ nµrúmäå! »‘•3ž[aH.{XJ.’CDæ*yæ’qĬÐÿ®Y&­bþÁäx“÷Ïæÿ‘xúÁçÞÃjº0u"}R0µŒ3ªur¶C-Cÿbì•¥ °TÚ 7–6Xª-¯HÂÄIÆd›ýcŒ5#3´O”þþ€*‚d–¢¢bUƒ½*B¶\$Ÿ­@¤Txm(m+D £BDNªBÄR°hmV,»kRçŠe;7™8¾!ms‹±M-k$¼5Œ¢Ë®f:© |CÖhÑrÇ´’N¥86¶eÔh©+˜²WÉÑöòfœ ¤-Vã$×+*Æ °Š¡2 ½Š¥¶çÍ(µSõô²á“¥¯q¶n}Îå D›`Á(31ðæ|­dŠR0TðA Ÿx¯³ÇíÇ[c61”¡ù‚–éþ0AÛSÐ}Ö´|‹Wݳ²ÍY’åœ 5óóÈZo·ÚRˆšP&Ó"¡<Ì$(k ¥!4#VûœR mîÙz{iH‡2©3\ÄX‘*8bƒE"<“lr òxÖ·x¾{™p1áÎÅuS<­Ð‚1šêÛ[Óé2®.:øe2NP˜¨Ó4Ên ™ŠÅ!*€@1?lŠ Ý§(œ’%³=8ÓŒ4ð$F§0ê=I)F4׸@… ÄI’Yf¸Á%%~ëK:ÑxÉ8òšùì3Jâà ¥! ¨ "Q4W^0÷˜&äT±7ëvŤ°7ëÙfì“”ØLNDdëK>e$÷:‘\2²" ˜Œ‘òèÙ#XF­0ÃNûdÿèÖ }…ÇŒ:[å,É9£¦%žEExTŒ–%S®gòÏÝ » ¨Ö)j†F»”€æ»Ñ8|‡þà,q¸¡ýA›ôzê|”^Oó@†Ç8Sæ%îg(è¸R£92ÃÙâ€óFH–õy˜)Û[ü“¶ø›½éö¡­VÛ’ÇÙ½w_ò°/yØeek®ž¦Sb&¢U‚0Ú$KîJÃ3Ü­àIZ£™Pjþ¼øe¤Z &’¨óú¾L½¦Íd»¸Ôí‹é³íÅ*¶µNÒ?{`_4BP³O‡ŸR`ÂÙšÍ>ˆ<ý òç¬SY¸§Þ³4¾dÛj5Å+²*{\ý½ã­–äPS𗬉zA "çµÄ0—%¡_ï¥! ì9)‘¢WžpÐB”¨|³™$ï²¢1ÉÀÄÃ/ wœ³où>FË÷f-9%[nÂØÍ>ö6ýØ[³¡ ©v(UQçc¬ž0‚p•e3­de \@M•ME2£+H»S™ü×ë,F«„ÿßn ÀvþbšÌJƯAûaÔæ·=`’ͪ7°5¼{›Á6ð¢Ô·nó®%•´†<¨è.8x¢ZvyƒÓ§'š1>ŽÒŒ¢JCÞÕI|Ô3‹ld¢Ðã)M`DÓÚˆÌÛ5ðåñõ´P€P*j«ÇÖgjËÁh’G‚e¸"š|‰fâ¹@ç”»„ŽÁçm;5¬u±ïBµ7C÷fèÓ4CÊ*àÒ§Y#¦r Šóȵ.P“ -+͉KùV·ÞúÌÇÃkjú3e¸Ú<Øñ —-´ˆšrí“áÉe4| ™¸ÇßxÄVÑ{γ¢'€ÅÒNÅa†K))1 tIPÄ üa”/m€­NM;§ŠÀýñÍøS¦iŒ­c¬"#ÀA-(Þ£kÕS=¾S)ÈqÜ)Œ#< wE—€@Ƭ8çðÍ ßÔ‘7©ã €tt«] ֌Ԟû¶íV@jÞ Õ5ë÷øáãëuƒg¼È¢ Ós¼³§±×\Q6­5§¬Ð\W@'KÚÙh—X±ÙHpgîÝoÏî¾$çÿx÷nu/ ~ˆ3«¤]ÕK¢•.= dÑŽ=»©ºr•‡¸b°ÀÆé ØMKß¡sJGhrzøhýÞî穼ä¤%êøòò÷ÞD°Á{8žqs6P׿ªáVHÖ]¨AÛÌV°]cÙº9Ù>²^ ´ú-~vO{9Td³jœ”§=]´½f«~¬¶Ù{=àõ0&'šï-®6É, '‰L‚cÔ¶gMÌ ,¡±äÌr ÄfW,ƒ1BKµ~p¦ØÅû]áÖX©)=bÎHpU0”®X%9!î¼§ÌxåÑ•íW3|wDã†@£¼ Ö ¼ æd3.=¡ENIñG°³AÀ„{xÕMñ¤R ë=!Ê/Ì`€.pÚÝëT¶àEi†øæäõÜÃ»Û »ócDiå x‹üѯyÝÛ©o#žöF–’ qm&Ô>ºÄÉD4†e6 ó bªº ž–E Ñ»ªÈ½†ÎB|aÕËæÏ'g'ÇȪ‹ßÎÂ,øÑàºE ΖFçîÌA<»Ø bpÔðCc€½.µYQJ¯‰ê@ ô³ÃwïOâ?‰oöçà“êà5ÝãÊfoÂLÞ„iöÄ`ê\fqMvéÜ¢›u·Á^,“R=43Є˜Ù«@ºZe‚8á'ù•ߢLp•±Ž&˜¬‚-Ÿ&^ÅTâÁ"[‡æ,',G¤ ŒXËI2x¯3EÊŒ·ª—³ˆVj Øæ5_r²NÒ,ùêÝÛ7ÍéɬeÜJLº?\ßýêo¾àð|›.ÝÑÕª{®~|Ód™xÊ(¿DŽçÚ±ŽYb£’^Ð@½’Ñ@ÚÌ'ŒÊŠ¡Ž˜lpžpšþëÇçÍ|šç?ËÊy{úæð;s¯Sœ7£+®'Š\œ§@ÐÂ=>2þÛóÿÿþÓ·gç¿þý?ÿâŽÿûÍ×ÞÿåOßÔÎÛÃ×ÿóõËf.ǾZtÞ¾KoËož}`îôüر湻t¸Èç(@ͪ‰ 2ð ¡ô&@›Åxâræ$ðèML4%+ú1PŠKNàÕö‹¢³Ê(^~:ø)¸iX~¿*&WYê××ÊË«öï³e÷úÝy8X <ýøüÅÁ !W“bs¡Ù…+ÚÌ®5Ÿ¾C5»F¾:?_|çùàþýaÖ¹Ö}|¨2á¡=!G«fä Í¨fäôéàù%è›G­nêàDzáï,à©^Âq(%Ãçðéè»·gé tl¼i؈·»óz>ªï‹T›Ýàæ~³©Z¼úñ2è$¼x{z~;:øäN/Ÿ§ðKkj^¼÷¸OŽ¿{{ù}rñ·¯ÞÛRjihsßp6‡ß¼˜ÃL]ßþ?à̦.]øåvptíyxyó­“2îo^|/¾:-©i—©MßÍø.fOj¿óü½»ùóÏVQùÇëG\_þÓIá©_äé5ëqˆ—ï/KpöêªýŒ£;S=ÿ›y‘.Nö¾Ì¯^•õ„SOnÄöâÅì­OkE¦h¡TˆLû4|*ÕGÖÊúrŠ•Ó.Æ€8A%‰TÉ”rÉŽãi^N… ¤(­øã:TluN¶&ä#5©±ÑЙ8$áQ»ì“°‘ö­i]ÒÐEj‡Ê-¡¾¸?4QℳÀà¯y¼L=|´ÑRªniÃÇë¤9~4ªnЧt¨¨Êî¶Ý)–…q“í¥Y'@§FÙî`<~®•;RÁ…ãT˜úåì ™]ùç[Wä~·ìW›Þ$VêàsÊ’ÿÓ§ÿ0ÿrT*që³üô?\Í:swû¦pàˆj¢f¸U¤,åFÏ\&!p–8º†.<ë¹`(’º)]³žùÎP ”&C²±8qèN1ƒNNMúx~ò®¨—_Ýé)*w4=ð'š“sN}úÔà]´ž›Ÿo³»žqÔtµùÛ¯ž5ËGº77[‡Äë0zV4;ßj~WøˆC°ÌÚ•>Ö UlýÁ‘(àA,AŸf³n>•²ÖR Ždîn:îJÞZL2ØìXNrD‘‡Ï05ËÈÂn8P}wÎ$ % Svý†Ô²š¨.R< HT_B×ÒŬæÐM—d|ð Ï9¦þ*‚–Þ!Êà«¥òAÃǵBYrmÔpé6t]ðùoàùgb‹ª—ÍŸpŸUîâNK/‹y}`ððYa× Ö^çüÓÚH†U±›!ÊTGàT-‚Žšf±†k2.Ä¡QÀ%ß '´æšVÐ%e*ÍÝùÑ]YKƒDÍiûñæ.ÔÝ:-}j5gØ!SZ16g„ª×T§ì§å5á2´†mZ xœzûE2é¿{vÿÒ6‡ªµdˆVUs¥ó×FÃëî@AŒÊ9zC$ñ8_vø~=ï>wýjºÓotý-9+k\?&wôý˜Üìü¥µÜ.xÅÙféGÝåüm×@äÞ¯oÕZSËWù~JËÂõ‚RQZQlPvœè]€¿»"ë.®_ºÃ«-”ä…óe¾¬Õ¬B]jèÕ'rŽ€{?¾?]a,ŽÕ²ã± OÇ¿˜ž;êP,záŒÛrÇÞ<ÆN±åNí–,r\o«mo2/ÐÊZH–׈òÀh# ñwù¡CÍîzÛ¹~­é¾Q¿ž#cëô«ÙU¿šú•±µücdÿë@_1‹Ïjfv]PU2&¤X­X¹Ú”¿ U»ô#íRÃ4 U¥¶€Û*zvŒîíʺ먞8TJÝÄ‘Z ¸èÕBµ‹#õLÅ ek2HT«äaÊ]9¶–)¹®¡G¨ÁÓ×ß'ñĽ>{{q‰Œzvs-”´)r1SØHŒ·Y…†|Œ~W}‰aúV+úWsku!¢ÕÒuJS“ÎâùÛ´VfK¦W*pãÐÿŸ7ÑX{V’뎮æys­ƒ25KØ[ë¨+ÜL5ÔÉ]cý=¥k¡€ž‘µ(Â7…JBTMß÷•‰«]â`=kq´BO²b¢©5N¯Ñ›,Ù¸¬7öÕÞ¼OQÜqé®ÙÙ©²Oý<-óêþ•'w êv.fqTΗ|¶“³óÛ¬Ú›Á|}#vŽZ÷_\¼jåá6?ÉyFëÒóŠ,)ûI·ß¾ñWÊ’¼}FÛU\õ¸O‹sÝëÏ¿–˜ù7vr¶þ=ÜQ‹Ä×Ù¥ þDå’@³lÇ\öº€cïŒöû·Ü8†¦QF ŒYA¤B÷Â&ˆŽ)HÁ%¾m…ós?¯¡±¿!¯Ê»ãôþâhÓæë7»âpñÑÓ·Ïn³ß½ð K]¨·<ɘ(OY;™&¦-£6RL%f£ø[`œ+¹6ÂØdä ·gùïKO)o½&e’‹“×g¸Kýo+|·›Î>óî_jª šÀâî¹°éæ) ‡ÉÌ5l¿Qýý]w‰ß#ÿøòòübE6~‰ÜÍÅ/á„^©ø÷×ǦT{.ø>Õ~>ûTû%‰ntEŽt‘üÝžŸ†^J~Ù†ëöoôùLeö¦Ððn3–ö¢pÙ"­6a6¨#kXØYh)Á.”òEߢr9OäZ5Ôbràâž=ìÖå~—˜º‰¤DFœò¥Â 'Ä'c (+…Õ‰¹,îÄÝ/Ãù£Ž»_›axï U—O §>hül÷|Æå²?‚ÙÁ<äL­t¦‰|6NeIÊF{aÒ"¹CyŸPèCâ˜2;^²æ t•JåO³ô_L$í[*¯)Ë Ø‘§gB“ eòÌšXqÊk*Ëô&資(@´·Õ~’V †äurÜ]N[qj9±YimŒ¹yê{ . i`0*Æ /7ÞáøKE*VqŠò¥o™Š"­¢á›¤v”øñÿ³wmÍÜÊù¯°ò—Z\@CU©JâTRyˆ³•ãäÍ¥Âu¥cY«¬|9>[û³òòËÒQÚYJ$AΌđX¶×$w¢qé{÷Gß½ýx•ß%ÒgªÇû–Y°¡þ Iì/ö÷ßþ2lætÀq΀VM‡c^¥“Íg'è·ìÓù½ûb«¨[UÆï&‹GzøY³N<§s-20žâ‰Ýýº“n‹ ùéæòú¼Ê´NvWáâ‰}éEIÆi²§/?\„ŸÎ‰ß|¼¦]8»®éZ‹[Rò»AzRsiÎÜT2péãÛÎ[HS:/—ù*Ýö…rïèM´§ÅWõý¬§nwzöØüµÅfŸð° l*Q$i=YðüžÇ.ßw®ê»ØÍÕ¯.¯™.ÅZ tõÙõÕŸ/ÌPjj™æï·imóÊß·U 6l›vbHÒÃÞ ûߦ>t¡Œ³Ï]œ¢—ú 7§>X ÅvüÒ’gjP9‰*`JA¯#ÒI¯ðÒ/çéo;àH¬ÏPAÜ-cf½¾¸Û8¯M}„SRÝ·ú#tŒ­?'ÉêÄbDAZA"æ˜y© ¯3)“>G5aGÜéÙ«¦Þ'FÚÌ!'Š”"I°–ZdänP¤´™Îi⥤¢hÅ[ȔÚÁÙØ‘sŒhÈçE%Õçµ]ôš~üÅMó|Eáâªl?G¼ØÈ™Ç‹Úµ”€¨mzïõפñ )ùk)QÄK(!Š•yµñ›±Š½œ±hÒÉ +•j£j´‚)GŸnˆx»–âí™Ýó(ùP¨e¹”‚½©„†(n‘²øŠ¼b] Fæ¥L“À–d¹ˆM¹:¥‘£¸"p_;)-°Žy4‘ÉhCŽV@?oH¡&B“/™3)–”€Õ¦À B¤›B:1“!ðšý”toRèØœü00…^aë`Á–-Ÿ­Pc´û=xÛJ@Õ/-WË|N Q)‰[F´L%EΖ Ÿ7cëû˜·tÊ2oÁ)#jÓc×¶{ùà»w3Ý_Ü4t3Õÿ+l È·³ÛŸ’ííìVÆ>ßa•vig'H#oJØáƒÜåÛê'r”oùÙàÆ[føŠøpm;±dËŽ“qâ=~è°X±¶¥•>]_;È«¼íZŽäOn§g`ë­môŒåIx î«-aƽð.gâCN¶´~§ÅC³÷"™83 t­èËÌK‡L¢ç*;Eº“}^+p €9ÚG]ã¨k¼j]£•C_¿9 W¿Ð3qõ Ï“)\1!0&²`³aƒ‘¨0¡×Z*@.Ž‘U3Ì¥nrš­Ï=ÜÙ7ø: 裸ßVÞ9Iå]‹ïýåO¾ä‡U¯Â×<íR'Œw>ëb˜³!10žŒ/žE/HëÕÒ ÃR'V§4r¦ŒÎ¦Y‰”ó¢9óÖ–£‚sHAÄÇ4`Ó1qoRΟ‰œ ÅçÑ3PF0ˆt baE¥ ’¼”)JÝа˜ŽηAÛMU7‚ö›/$Y›:¯lŒÆm㶇]øÜIRô€ëUƒ$OÁ'”H’×´™"tÅŽ‹Œ pI8&A®Ni\¬ƒ£\bÄ2ƒ€D Ò )ÅrŸs0ÜäY'A¶xL‚œ|ÉHŽ&Z#Ϭȥ꼉“Hûå:*ƒ6” Þjä±AÚa$A¾ÂN"B)©ôö ¡¸˜g—iT\ZµA$f°=ó#—\ $QÄ„›¦-ýF‹Šk¼èøèï;úûžrnøÎè¾éúökzÿ ³Î´¥Þeâ„Áö–˜½ÂJà¤æ¸ÚÛ’kýauí éŒ^oÄôÒË^ƒ)X{:í…)¨ÌFLA k÷AcC.£2°S°¿|ï6®åÓ(‚ú…s£ÊÓ "¸`¾• î$ßЦ¹&v(ÍÆÁŸžÐbá@) ˜p™4K*¸7ŠþЉԂÒZ¾0œ«G€4 Îèè­¤éîËSsºƒ ‰þƇË+ÒJ¿E`YýÚâòš8xʧŸˆ_úÛüíÓ¤Î~êÞÝqß_†ŸY¼úøkZʇÔðâÒåþqvAìŸùßüåÕrê-th"]zK´»Wu–¢¢$§‡c³q“ugOÔœÏËuߌü›¸oU÷ß´Ï$É¿-ÿƒŒ9° Ü@bÊU6aÉx4$Ÿ¤®g¶2N”^%´wGóË@pžåR?σ áóÐGÛðyž†ÿa~‡£XýúhPw´O­§‘€@@Ý2q€VÄ©sZ7h²×Éù¹}¹¼,z©' ¾\çQèÚ¤Ñ!çÄj 2Ÿ¢`Úi%¬*Òóå®Ni䨬.68'XtŠæ)S9³àL>GRofíËm#ðèË|ɰ­4] …ž–¬ `XÀ±,¸E+Š«¾Üc–Ûaør_cjea‹1Þ‹nt˜t½ì»Ö¾ú>¶=ÀŒKdHýÇÌÕ‡ßÚâXÿ—î­T5°T¸f kÓåêÒòA@q˜}ve³Æraò: ìë€Í9pϹ†Q›Œß.èæ™–3 ½ëHÛŽÄ»ÏÌÓŽ&u 5MâT"—Ö{ššéí7Ø»øwçmj©ý2ª0ˆ½&ýøû¸„©§C@§7lO+mRN‚ž3>ék)Uš‹¦]´C †ô|Eàþñ³Ï·£‡&ÐA.YÔ'¿>S=/žùqà’p݃Nzö>l©×ô¬%™Xf *óÀ’3k\J榬{h=îK¦ ÿë–ýåæº¸-±íܸ´·AÒig5ÐÎBaœeªHƒðUÚà‚"¤[à2vÏ—±\ Öî‰& Çö¤Wë°.@Ô‡O¯áº€±nàâqÀAžj­6ˆúæùk¾ß5ØNÔCôàqVl8ÿ†”¸¦¹N#«·“2tæ²×'ð-5}P8…`=¢f³'‰›XÐ$ÇmJBb’Ré0ÄÕ–¸¡wˆîÁ摹—^W—%Ç?âU~¨iºññ'zQoRÒm‚mBCìý§?X  QÝBŒ£xjÆŸ¿“N´(àz‚è.FÒ$ ¢Í”j’°÷µ˜ÞFj´ ]7 7ƒ‚ïMÈö~àë‡>ß²F;µ7H5m÷rˆ_R÷:K؆H³O@ÍR¬¬›cd*³¬£ö ½'U`³‹•)iNGʈŽAô$z”PÌ碟ީ[ÚNoBloî¨ý:ú']t×Fº×AaùÉØJEK£ú ¼ªYV5ìÌFêðg6ýþ5‡ß4ÕWÔ%ñ±«K%˜¬G<ìü;_úyûÔÅúŸ Öß9¬ž÷–“nÛœ _g#ÿ hÇ*Ú½0+Äh@;  q§•2ÓóEfo©£>tÔ‡V·V¤Ü7\[åF€“Ú¹Þ¥âJÉÕ²F45x²½ÕChº/x¯—‚ið^ÓóÊKÎ\À™'S—¡£‹&IfãÀ:™•)8Sƒ¬Ý «O)GÇœºgNÉ(´Î &lºµ£qC¶­vz®Fm‘Éy9DPŠ&Wf4×kRÛQ\{CŸ·•»îµ‘Šƒ2 IÖªCÆØÛÙ@F/m"&²N²:Áuí{«™*AƬlyA°½!Fôuïèa9zXŽ–ÙzXše’4r®ŠWЬ¯æm£›$Ó²=wz(èì›Ì¸Ôs©˜VÉî“bVFËÀ‚d>zNwÌ)'s.Nû—¿ýÎËce\þLƒüúÕYª,ùüóÇk¦áw;øô;5JiƳÌÔͶö§é¿N=‡Êî)¡{–ü4$„¶Ÿ;nÞ&Üñ\2ò}ŒÂ› YÑ©+(Bæ’5Ìå$¼Üùb^þ •/—)‹‹ºfƒ¯9æ`²—–™+ D ‹6,aJ¥æKfÊ®LiÜ#Á¥³AŤ ¯Ù3ä™B-¹pV;#f)ÛFà1Svò%3¥ ×±6a"IY+’¿ª®`€‚Ì?‹o5Söä{™²oÙ?áz¡Ž}C{*¡ÛÃB>_¯ÝNrúå°ŽôS Ð/ŽÀݳÂ:˜/äâè¿ tqôM¬ ¼,vÉôÀtk®=pZžU¥aÐ&·¹if¶ÉO.Ó 7îårKÓ«IÆÝJ„$S‚Yt¤[ù ³h#0™Œ/!+L¼ tþ¬Li\fé4§©JdœžIŽz™t½,ÜÏZ·j#ð¨[M¾d¹X4*ÞÖ\qÁcdJb¡dÕù°¹ÊÑz5ïˆmG¯âäKyÖ9V•‚kY€Hj÷ÂÀðvaþŽIɇáU|ÃÁ<)Œ>¬N"½¦ÃR4ˆb”V"7†U$pÉ’ž‰;£µŽL}¯²ÚIdeJc§kˆÆqæU¤ùGÚ(`JFs%ÀÈ—?&RÛ©ûÓ+mK|ÊÀN÷þI‹™ÏwãÚü‰m·v!M ­¬Þ€ƒÙæ'g÷x›•=ñæy´Îiî¦,™C…‰„${£µÌE´sŽgtP°iÒ”ì媗ÉÒ z³1&¥ í‘&½ÉT½‰k&…:hNv`Þòʌƕ¼¶èàŠ·,•hÈYˆÅãcRBšðX4²å€›çÍ“y÷ŠáÈBÅ’uîÈYà&2™SI‚[C?9á¦9®µjéI¥ç+uÛzÛÍK] ¦¡½T8ãkch3Û8]íº†×ËŽi‰Œ—×™ù›ËóÞë^s­Cpºº«ôÅ/¿üu+fäŽãÝAHò¢3Õ€E[}ɵGkÆÂLQF•œI‡w›!$iTÖ@Hº=$ÝFIîÖî—Ælïù*ÁÊɇE|×´ ëð$%+ð1ž$ýú)È ÐÑRKcÕö†¬R ØOrGà%¥±]²„q-3·óÔ/¦SXŒV:’N‹ž¸]©FCDzà"TWRk¦ãvF“VÈ6Më‰ Aw=-™W–ئ“- æÉ§QmmÊͰ$s‡¢&˜<°¬­ãít BªÆR¬k80¼B~üc¹=ŸhÃhç{^…]Ò”ÒøvmAñtQmißѾ»\ÿ¸øç|•먣ø« q×|䮳WâÝãþæË`rÍV°[²\üËåõåí‘}ûÇuìäì2Qêñ¤ê~TÃáöÆeðÄ“Oœ-ä©v†t¿µª_;u½ÜÒµÔ‰k7áª4tt}ݼƒ§žp‡ÖúŽ^=5÷¡ÓS@ U–žs²%TÉ‹ãÞzɬ‰“Žy(ž•$RíÚ ¦¡Ê•) Ùªx`–tk²T¹g#²‚!&gTÎf:ß[û1™®÷î3€Æ6-ñ¬\8íÇ·Ãâé'a`¾e=U»¾ÌýéãÏdòw£OÂ4ýír¬ÏÿjÒç¿ç_üÙç:ÆSƒ/¢ªOÅ V”¢uThYð+¦NàÊ›d¬\hm‰¿»ÅŒË'"Å™†­Ñø_?|·¸[»?ëQº¡™æ_.ò¯·gOüòÝ3÷¢þôá‰ÓËïnordÄ).Î\´ ¥M¶8“¬TÉ!š+«¼ÎNŽQ£SÊèP’öš×² "ÚKu¿è§þêæÂ¯üJý«ë¬41âB´Æ¬sõÿ¢}€Ë×ÄàÂ=b—ße÷vÏ|úaäÆ9 £Ú¥±'Epùï—“?ÑŠÞ§'×ן«X¨w{ùY}û¹ÿº;ÿd ßÜž<4Þÿá»÷']§sur×*½{û™/jŽïâËÉ÷Ä›ºøÉ?ÞÜ<|‘ŽÄÉúçO]F1ñ‹ÇšÓù8[t]ó¿œ|w—ýoïÏ’‹¥'?Ôt°‡¿;y@x_©¡_¸­=šî»ëŸ}O¶ÚIM¿Ï§á¾y×Hþ?kÎ[7Àýxýï~øäiÄøþãÿ³w­¿mÙþ_!úá¢ÝÍÈó~¸ÒÜb·Ø¶ìâö.nœ!éh#Û©å¸Iƒþï{†¤dJÖÇ)QQ´2)‘sfÎÌyýÎ9‹¹ûpþì¯yº¸{ýÂ÷iPüò…}ùú§›»äiöáùZÖVõû·†VáË©ùþeUÓ¿~üŸþ ²Èÿy—º7³°<ÿ¥9¯V¿šûqÿòž¿zö|á­Ò»¼Ißj|ËòMÍ;/ Š«÷_ï¢òOõ«ŸÕ—ÿgî—ʾó‚ ^Qâìî_•}l¾ã|cª«ûß__úyÉÞX—êêGÏ&05Ù|%Í×”·Fk¿”Èã´€÷$E w*Ï4ÊM®ÇLj¥3”9)LA}D{RÔ/²Â/%G¦UÔÙœp‰€•Œ¯ŒJ„Í *àF–éÜ}Zt³ƒÅÂ^TïJÿf&¢ÎÃɯ³·7óë»ÊÈïa &)LQ©Ö€¤<+…çùÇR2>Ä D)' åhk­ÇñÊQ!“ã—Mwú^¸liæºÉ‚?|»Ù »îäØÙ–va+åããã+e^×C™¦Æáû ½ÝêÁ4¿~»ÒWCùnµ{ÎO__üXuÊ)ð3¯”¶­—Áñ¯úY6ï®ôxGøÃšjä®—ýñl}×JÌÞm¿j¨4¿ßëc­Ú•—þ­z0,»ßôXüí¶žÝm{Wë\Ng7ôˆnzîAÏvKÒhFn'ÇPÂbÈÁìÓ%ë2ö!œðùÇ”¬wİéäd_¿}‘œTâ UbÖ–ÆbWw{È•ŒÕ"6ôaP)º4ˆÈÞ¥/9€¥ãùŒºz3£î¦YšûjÚ)Ö=;c_sP=;£7§›KÅn®µ½·±Â£ç’1zÆ»Gm-¯­‹Ð裇/„À1Ãçª{ø±¿]ž&Ðç²qÚ=Kð#ƒÐ_Û6ªžhãí’|]ÿþøÚ×ðÛÒºþ¦HÃ…Õ¿)A1É Dh&aÓÒ@IHoóîã]¹‚J©2Ìe ›Z_y†Z”åÜÈBËBñ´ Ì{‰€S—ò“¤?Iú#”ôœP8Y#¼WB‘ƒ16Ñöõ¡Ž`ÚàÊêÎ2¿[#<¿O·ä_fóOy$¦;%ŽÓÜ)dT*w˜¡ÔY‰ ,…£…*HFöf…F³‘ÄdPl ¯‡@²@ÇÊÑ cDÇÆ“I¾Ì@:;GW! Ïrš¡T8V,˜¯›¬X®11©8€ÝÂð„ køáòf‘?*ŽÙo?òF.Å‘ÅÍãÒ£rÆ/[[©¦ƒí°¡»‡kÄMŽÏ£",hú›%aÅ^*ÂÆXè‡ÀÈCñàÁSr,4|õœ–w%‰6&yQ€ìÍ Ä©Ç ßEÙQÉ@^á,³%.¡Ä99g¬%^nŠíVàp—¾Mí|w]þäEõ“d~ »%Ëg·À›é2ßüækÒ·å_§·~yn¯[ܼËêd¥,â7°#ŠÕ×ÑkØj(½Oç‹zØ1(±¾¥×Xõ>_ø)¨·åA Ï»pñ{B®¬ÑyO¾'+ÞÏU/Þ¤îÉ›h’rêrØî¶0¾Ð+°¦o¢©¬„ƒPT`–”•%4MáUqàÇ/W ÙÞ(»qòœ`òå4œPòÅ”ú„X(ܨ.cRµHYJ12Vƒlâ\£”;‹´áR>Å[ÅBm iêþf1E?Ž uÄÅ·’S!˜/¥pùÉ¡zX¨/Ù͡ء7Â9¹4N.m¦•jLVUïoL‚5­Vñd>*U'H×fíótI;ÔUÖN4hÂGeí´ñÁ~xâI£ãxÚHÛK.ÏNÊ^Ñs¨8HÙžðÅ¡÷©1´àªHØ‘„|FÈ%8TŸ¸Dͱ#—vrÅã½jˆ…QîÕ‰àÊñ„06 ®$d*ärJð€Éâšè~5žb.bµ{鯇ÛŽŸ!Ç!¤Ú0ŒZÿ&€b3 ãâj<ò0˜¾£eì¼®éÈ•l_´Ap›âT16Á=!*©³lôêâo¹}íK²÷A+ÅRtx©½4Uã\‘k.‘FÜ9‡´ö©ê4O1ÌrQ²0ÀãM7<ÚÅgÌÚÖ`‰o‘J#èâlB¦÷‰¼'äLßq€ºï?'÷4Î=}`åØ}§&-Poª6Aàa×ÛzÙ×ѤÐQÀÞGÈT€¸Ìu š…¾BR6XcM5O±¢¾_ÈL1ªÄ—]¾àÉê à 3ª°Àš/Â÷øŒ±S©œ†Sië|DqÞ}>S:*Ó'n´3K¬Ú'¿CgDê¡rFH{¦Xòe#2œÜyíÁZ1åçŽEXÞå,ùéÆÿ,¹ÊÓå»ÛÜËêÙØé ²×O^;âÔ¾zí«“SóècªqÄB“SyÑã//šœB¬À¾²·”1Î*1*û¬·7n²¬4¦`T7…LÐ9Z“@y0w‹s¡¥‰˜9-¿È²ÏBɹìT‘3),3)…ƒËI:|RÆZÁµ""ÝßËF`¦a«'ÆX²(‡¬ Ú¦ct­@.1SQ{ŸZÙ$1[šâ"ƒMƒ˜¥ åðŸ4µ)f R(°Biþ´¹eSÇuO9g'À pL €Ø3™wƒoÚË>g+K^ ÇPðxÆ„4¢¥™WJaA…"“æ[7€Š Ò¼ó,ÛÀLõ0€¾Äd ’ÂO· þá8éhܼGLˆ›/=ä; |mxÌ«Æ7`ÚŠ·¢­¤_´7¢­d‚h«dpòò˜µ¦G˜Û×!ÎÀãÿL23h™™1µmòÚŸÞ!¹â1øOy Qâpµ¿ …BsÜM!U€qi,8`èÜÓgä«ò *¯|ý0˜üÎ=$_lü…¤ÅQsi%zCåûæ£púÓøÇç/Γ-? •Áu23Á‰LZè{Œ¢+¶FS†°À Ì®–4CEʽÃ4ÃNë§xU†.²=@qü8>¬}²ŠNVÑqZE±´}éZÔ-æEî>8ðU¥ÎÛ‹·©{ªè7R/œÊ´Tˆ¿ù÷¥ìªƒŸ[‰Ø˜~í"V3Xÿ€ˆ%r ˆ%²UÄZAk¨UÌ êÏ ?Ù5'»f·ú§o/þ²¸ó¤¿ÿÙ^¸ârí‚þó×îgÒ7„’i[EQæÓi8#Ù"Ȧ…r…Mm¡éþ ºØ=®XÓÜÉ–~o¾J~‰9>ÏÚÏWþT®Ãeh·ÌUd3Â4câjù,YÌíÍý2³ÉÌlb ›i­[Š_ø„uC–x€/ÝØ_øtÿž6ÁÁ&xq›§¥¢ÿ§{2{y“%ÐZÒæ[– \†XJ1FbHò³6Q.j“eËÎQXs—¹§%ëó¬¤<‚<’ìí4 2Úh‘ÄbÌZxTŽÁ†#`÷õ>3-5¥åúCê|X60U>‘èÍ‚ªˆþ^)îS‹ÉûÒ"–ãÖ}uú•—™‚±’/œ[¡HZ!¹ÔÜiDÝõ{Ñ¡ÜA¥åcbqJÒ‚hĉ2(Õòà”Íb–ëGXyÐò}áAZ>åíZ¾»¦² iŽÈ³v·än¡¯€ŽÞ!ô›I,Z„>|ƒÅëtDíö~±ÂdªvÛÏíR’He%å0KªðE…(¥ R2P¨¹£$syN,I„ï.µ£›íÓÍcëÕUi¡ÖÞs{ûñæ]¶ê°qjȱÕÃÝ Pï? »Åò¨›nX9QÓ:r³£ ’Á˜lUDò—FDZ÷Ö0ùî I„ÈS‰¤jN5’† §f'ܘ i,gѪÌnáKÈ ‚R;Ln w@˜«ÑÒ×Ð&w墶²•¦¥T²ØÈ¨Q›a£¿V5-°8’iCÔxbt#çé0¶'ãód|ÞªïÅ!6~'¦Œ™½PÖçd Ò ‰ˆAš"¹)Y“X¡ñ_ÁCÞ-Ϫÿ¡4»š—š zx‘Yka,º7Ë÷o‚d(,y ’äËñùz¬·¹Ëç¥6ÙcéðIF.¦_€8»¸AUa\–I](ôVë74H”f†Æ…I_‡S CõEç7à̶"C—÷VþÚáxšîE•Ša%¶¥¹vÞþ“¯È}Ö’’˜åN¥Œ·; ¨u… …™‡"¹H;’‹Œ_cõPSÒë ¾n(‡ÕýîzY©+¿{Áë-ŒE¹ÛFï´d~T@ãÛð±g„Š:öÕ—Bj}¨é{F ùÖ4L±ÇÚ=™iET8%0æœDà>MânÓe¾ãd¹X‘_"Ä© ³Q+õ –¨ÖÀkƯöâöC/z-¿g¢òœ»N*¥±…¢gˆG”â 9Û‹#Œu6îzf 1Mb˜Æ`Zi¤Œ.WB"ËT2j)áÔZN²D)úع(Ä9-ÎÅ›ÅÕlÃ)è/¬½XëÊäæƒÁÿÂ?\ÿýùýÿýkñzùÿûõ¯xùíOß½ÔìåO¿¥ÿ[üúó·•+Ò»¸Ö$Í@œå7þWg÷¤tø‘äEz—ãT¾–d×DhJ!m†œðØM҆澼&“̦&uøÁéå\Öü#ëûº¼}ëv8¾€éĶ竼6Êõå'Ü5'ä|׌о¢DQ˜BrzïBS:ÔÙùèJá–¢PÖ‚œa¾ù H“[ŽO%ÍÒÜÍ×®p“LR¸ÅP…[´*ܸ®#¯$fõ(ÌWçìœÐt¦˜QTírBKa0£á HÀ4ÂÝ )A0Ÿ÷ÕÍMµ#”F´Tߊ%¬ =‰bOæ;0½`FG,ÓÃÌw%Ì=ªC“èj~YMýÚÈŽûRXe¬–ÉË7—¿G™ôûzyuê+¹d&C49òEgÑŽ¡¢P8Ís+±Ì;¶˜E(a‹ MØbí [d4/4ú‘ 0óDzr„Ñ/ˆ!4¢¨¬höùk7ú»xäl +í>bÅLaEÉŽ0Ÿ¯Žb4m9š¨Ï¡=x4E©mûšú †`ª ~ t®#è¤ Ïã”>ùýNCjŠ%¢š÷ømîÈ‹à$ü£}o^t/Íû®¬á®}Ô(Ë,C¾ ›OÛI‘ÆÜ!¦ýâ‚`$éôœ§ƒ–ã-Çí­¯¡‘ G¬!1“æ•QxVÅZQýX$µ+8wÞi}»t¿ÓÑC§Ýšé¶ˆg‹Ý‡&­ Î.³K-¥Tƒ¨n9L8V"†.ƦÐçÆ®.5ÚÞA—š¿P6O/¯o–w¾'çêšóδ,½;HX« á2£Ðûìþ÷.pïàçVG@Lq´ö#@(gLàCC*¢=¤ÂÒà JÒ$b¹¡kE²YŒFEÆ&fÀr 3(v¬µ‰ÆfJ…‘ÆñÃj€˜Ü¨Xq±}Iå4H%H™«·ËnñØóÕžp‚å™õeÓ8HDÎ…D)ÑÊmZœåÜÖº'-²,äõ f(ÎÝ´‡³ðšN¢Xl? ‹±‡²Ö*jK7’qŸ8¼ßd(“ í¬EüR‰ß§¤)r­"F.½xøˆTYO‰Ì¢Lš_ï;¶NÜCêí’¥¾ ®DqÁ–áÈ2Â,Œq)ÁÖ)וÇAs] 5×U»Â$ †‰$4bmžÔÕ›Å"ÉàF˜qùÑY†›úí~þ±D²7œ¾¬ÝGÏÖ&bÙX3ª²//KpµbTкH²(æ¶Àõð,J= e&k-›ŠÂ=lZTßÊMÅÅDŒ‰³YݪS´‡€!DSIwúƒ„!`…M&Ø>ôˆnÌÃ!{ÖrÞî¦AÏ÷y‡;£`êjÒBƒÏ‚ç14胨jÖ¿oÌhÊ Á~ð÷Þ,º¶R¢5nã<Í ‹û@Od;ÖãþS†<Áx8·Iñ˽—"H‰a4Š’FqJJâ&4zN0×Az rrMz%tÊ$Äyß™ß}8Ûye—÷oîÆA´ÈS‰€çí~ÅE|ئX#UÄìq<‰r;ݪ³8| hN!¨Â%áÂjE!ã¹ê½ 1 «ï „#`œ‰q¿Š&ÚmN{°—`¾…åü+•Œ‘W#™Euã Úøm¸×™âm½Î|3#bNˆ Ót××Auú2ªmÂ%j„å=êÕž´†PÞ.¼»Á33}FK¹…Ömô¶â­¯#Ð;Œá†Ë Ù×oÌû±4 F¦<Ÿš .õ@´ûðí1ßñø¨1& 1&¹c9A>i› ”ç–Rj5SO$ÖÝQ~ƒá„|jwéÛºôÅf݆҅ù÷z`?¬Æõc=¬OR•#ººF£F`šß?ªWtCR-ñÞ‹nh_·™z@€‘ë¯É>(‹nì^ýÝ „ÓS†A5§ƒÂ–óÆÄXc[ÄRtûé¢ejÿŸ)°~T‹—5z5Äx—ÅØ5éöXžt½ê½Ñs§F…±&µI-/8š-ÓЊ¦³šf›µ2º€Ûc¥qÇë’¤ÈRÆrîa:G`R*¢C²PØä…Íp&I`ævh‹„´h‹^žîxåØ»Úæ¸m$ýWXûa+Ùd¼¿¨êª.ÉîÝ¥*çumrù²år!Íz,)š‘o³.ÿ÷kóBIÃpHZC™Ù¬ãápH4Ðh<Ýh<@Òv‹ê]ù)NŒníNä:FliXè[^-—·»åc`‰³Òdt•;fA‹›À¢¬úaB”Ç­F®J.÷”ú;~!þéû‡õ½Û–.ÿôôJI=µ-R÷8bYÛG%ff×·qÝ–¿¬ÇEíñ›‹Ÿ¶eö+¯æØ£·E _ö«¬½®±W€í+êó×Û>Ÿmêü®”î§í×df×Íß]ÙÅf*–—>wWb«ŠHU×R<\>2¥Ä}ºì,¡"&HöÇ5#“§H?¢óNGÀ[†Øbñrå}ŠÄU?Hœñ#©ÎÒÚÔT0Ü,ÙÝü=HâÂÑvHàþKT廫dÖwe&Öš Òn@ÉšÁðñáô<Ûê}ûyã,Ø<îÃÆcm*–F2F#•[‚87£rÖI VsðÖ~»/`ÚxèL@³EL±t‹1gÀø}YG=±IýR'Œ¨&0bˆ³\#)íBkÉ„µÆ²Á¨ÓÕDšN°eŸUúbÀ¤®{€I߸D½P\bt'\r@žyNZàJ<L‚¦‚ÝíëDÝ z…²gªytÛ_LiÉÊ™ZU|Ôƒ—¬ùQmßÓ¥°ä®š’; ¥þa¥?}_~ó‡Ïí Ø›S\Úɹ8`Àžº æ?µI½8;Ÿ¼Ë–›“‚Dn±MT;Ƃ׈KðI¹"TäÒ±€Yyž¥ü{ܤžá „3’#ìr€¯”Qd ‘Hš¢P˜['Dþ%ª‰JE(5Ä¢¸Ai âçî’Òæ aÌj¤Mˆ•$NˆÖå$£’.6Õ$g׋¸OPU”\k,ü·ò4_âsË:”àmr¦uäñÜäðT!‘¦,˜àᙟ€Š2yXE ¬nì}“Šö®£|Ì:šdð^¨ŽŠÃÁÕsÒ㮘,‚yàoœeø‰ã¯=Æ2gÙû>ƳHöÍê÷ßÂmßÀoKŸöÛò@1\XÿðÛò˜A¶>g°èÜ…Zö­û¡¼+ÁåøôÞic ÖåRcÄ­bH /Î7óÔóüô^‹‘Ú jl`Z Ø)ž[‹ŒÁN°:Ç–œÄ 6VFoŒÕ"ó7`–` â ²ÃìÄ ÖlQ¥N¹Åó‡2œÀT3`ÆHƒ¸8Θµ*øB1A‹áŽe~;ÒpÇ*Nñ ñŽƒñxGŸÁ³Ì•ï$TlŠ–MѲ“– eh#25üX2·Õ9òõi™¾¶:Z~;¿¿œ]#‚RŠBG×ó\ÉD6·ö^vÓÜ(ï ä(€$FV™€ €`̽*äÅIÑDçv,u•ØO]Å\ÓJ‰Ô)Î1ï´©±/â9жŞWž€©ÝÓºcL«íÒ•‘Ö–ª‘ÛR)<ØqcŠp—8ê¾yØW¤4U–z­Ð>¨Ùßmȉµ”!w¹æ‰Û[ñ[Îõæ§V+‚ ŽçÒÀ:Àrޏ39Î8b4ya¸e’î_¬¦à*5’cÉ É~2ÃÆ¼à Å)šÈi§pé>Mì- j4)|³†æ¹& YƒáÝ<ÑMw!:içH|ó忳möü½ ‹„þÖÅ‚S À[¤ó\£ ]îdEq¨€6B6)0;VÙÇ\±nú¯-CÍ3Ɔ³«HæÌÐfšg‰±â”¤•NY·¼¯Ò1p“\Ú(MøNúêXWšô × œÿÍ#¸9zÈ K׃§î5ÍÝWÇÍð&’Aªá;Ö±sÚL1ØBZÞ-Q·½nnFÌ03ÞLj©/_~¡Y#Å!0ÁRÄ‘$j ^­ýèˆTs9÷^t/¡À{ë*¦‹ZPª¼Ðw’JJ¤|° ‹Ô)–Aš›ßÞ¨5JDK’ˆôÏ_䯯àÿšñ\,]³ubœé”†R5·ø¶–aíïõbŒÊ;/8÷H:·¼nBb¡˜¥VðEmyî+L6ŸðOm>ŒA/”„íô#µm‚÷FZÕ±7µ6˜%´Xv©)¾³è<$‰>kסË;{½°y ÀögUzhÕÊ¿þúzZú§¥ÿÄ—þìIzÇëïßý×üÆÁäj›ÝqíÞåár“Þñoÿ¾Jí -“4°×.€qC›€8#¹Ž™³AåÁY4óÂp’™ªÅ9–g§±:Ìs?y÷ÔÆµ!¯I—ë£É‹zvÉ7'JfZï ù€T¦Â†>ÀC‘z÷,z³¤*IR¬pW2­´µ?i¡e’‹”᡺"ãûÛ[锤ç†ï ß)()±zdšiÛÃã§—H*)5„«„­…${ ¨¼þî—ƒX õíW·ð¦s‚鹎åUÙö›НÁ€Íõrsã.’Å\0ª¤F°“]BþÏþ^~]ÿÝÊr—0f{„esùñíeæÞÅ.€´ˆKó<²¦lÁÌì¦<5Cq"ÒéyåBÁ%AÞ ñÂäÈHb‰ÂÔ<ÜÉI…’*Aê¼z=çâW{Ôí]¦%·¦upëäVLq±N"üqK ¤•7ÈiSVœ3à°inXBBŒ|¨U÷H¶ÄF‰L„? Š(h7p:µwb§ö²é”N?Ý6ðHé² ÊŒÊdèH0lpB,ÏýoÑì‰sÞÌ?Ô DîçÞ„¸¼¼¦Q·$ÈÂH/¥%zo>UZ% ;-.Wƒ *BEîG ⒃Æ3,§!'ŒÙB*Ñ=jR¿fK¼Ì5Rà!NTŒæE ®<à˜:/œ±L–¢åõˆNë0ԱʓbkŽBFžÚɼ¶ÙúÂÄT[f‘µ`ÁrTr¤° ’…<'Þ|Ù³w2ú§ÓuÓ¡ŽéPÇK;Ô!96Ú$$¿°}©y§NÇ‘¶fŠŽCr-¨NØVäµc£¢%fЧ>'\!á"ü,´øiã ‰"WšðÁ4’°!Çý*÷mÈX‚Xx\„èþ[¬W0Ú&ž­R<Æ=åÇn!×<¥L,«Ä¸üq½Š¶ñBçÎ!~©Ä¿z«~Üø†vé‰}'E­ïF¶ç•¦œã UI"DÊV¥Pb„Gö€òæLÁD?ã%ä–€þâa* $0ÅgPË¥R$aTòlû yýäõÙĨ9ª}Ðñr¦e“k0~× ›Àå3,»õªE'QGÆ×Ö=–’’Tä9Í †BâÈh=t¸vÄQG)—âGMêwnq'…°ˆ0³€Î$ N4²Žqc˜ ·Ã©‰Á„“×Qj2ÞXiZ+Vš:pŠáqÆJ‡´g¥pM4y·ªMõ#¸9´ú>îcm<ê/ZR’š=…›Æ‚´*Ý´úEcí&©GU»)yëò}µ›4b˜úM-$4Š»$H8H}Hi£Ã¼ÌТ—ã!âs<ðÚØþ?ÐÂD’K7ܵ+;8T˜ÑÖ/:­$€äYɺ1;š•=±;J£„J8 #ÄøÉÓG Œ°Ž§¤Ž=yH”qŒTp•¥¤Ñš ʉd³vw!`Œ´d€Ð1ÀrÀý†‘‚HG•_6ðãú2eNPc‚/j(lM ÛRu¯¾ ñ²½­èq6y$ בP‘Òpé‘»zoþ™ÂÀÜáñ“mʺL¶EA\3=–Š™îg²•Íãi(NÏÁ^&Û§=ù*µ‡w3‰’s@·Ó§L¢JCÓô2 E° Ò ÌÈ€<œ'®î@‚0X.?:ù[£„D2™0~«!uÅ£(‰1ìCóXQ)(M„èáÆªƒ r1©Y’\xPzB€ÝËY˜~»„#¡½œ76\Q‘¡¬ï…Ó¯[[5Ã4¥­bÛÖ葬’r‡‹·ÙŸ‹ywâSì ’%3 ïšÇ¢(*©l'ÊiRˆNÓtš¦u›Qª¤JÐmõ•’»©‘dPd<A#“ ²œq^`é©Vªã8¡ô¡bÄŒõ KZ*QÁ an†ñç£B›H? ýȦÒc:0âóÉÙ´ðáϦe÷–]JÙÁı3Ã"ÝŽÐYQF:ïn>dóYéÃíL ùãê=Ÿþê¢ö?ÅÒ^|j~rss²Ì/Üï뤓µk·wǯJ÷¹ÍkníÝÝ„Æw¼-…Úôdü‰/ùݬ4>ùçí øÔË«"Û¾5[wóê­›0ÉuµÛh8gçÙ/W³EõyVE׉³Ù}ÜÃŽ»s×7Ù <ú®@_Äû.ï-˜ÕeQøóõcÏíüöÊž?hbùUæ5ZÌ.¯«Ž|zœx}cyÏÝ%)–&õ]?Ý–í{x‘ƒ£ýý^ÞšüÄcšûPïzÈ>û#É^WíZ'/e^H›çR"àñ"žo N´÷TáÜð¬ÜÕ’j¢‹ú‡Hkù÷øï糟o‹|«‰/±£Á]]‹?Õÿ^š¥«åòvq¶IÍøå‡7gå&ÔŠ³*M üü‰ÀÜ+oþ|¶IšÆgßÝÞn~ “îì3üïç2–üéÆÃ¾é˜5ØÙÞù˜5M–³† ÙêEëÙü–Ïg?T_ýøæb[?Fª³_â ¿ùîl“îR¦¼C/-biÃu>ÉÅkè…³ô[Çüàq>W¹ ‹0¦|Àúyå0n>­òÜßÜÌgùïgÿ]Øùòꇫ"_µ7÷,ûÕë›åß ëÿnãtU+šV¹tåðþø¦ÊbY=þO?C΋Ÿ—6¿í…ÅÅßëýðvý«Yl÷o>ò·gßÍ#ÿß²¨Ë·nߢ|Sý›æà“Uï¿Þ%åŸV¯>[]þó,ª›»/mb¥•ÐÄåýb3=ÊOŸêï¸xÐÕÕ÷?^_Æ~Î~0.ÕÕOQÕ¡külÓ6ʯ7»¸b:¥À&!'F·Y;éNu‚çDN ª1*LWu†‘s„¡¸õ%À1»Žž÷£&õ ¥=À>I-¸U¾ˆA!ÄŒ÷…$yá > *LSE:U}IÚ¹NàÖT‘Qž$¤ pczàJht ùÏæ1ïšeoÃð“Ú ‚תÃU8Ì3P1˜¯v^Eþòãûå:‡Ež+¾'ÛHHз„xÁñ 5@“åê(ˆ¨§¬|M;l$q °~ZB"‚58ú\~k ¸}ÞG† í´Ž,OI)¸IØã•ãÝÀHëàqERR‡MП’Oƒ%£:%¯”>d¤é)ùaMÅSGØsäia§œÀ¼ÃŒ@ì´p—þI*{€9é;ɲK½j¥´â Dö °Ä×É„:Œ+ŠH=€úƒÑBà_ÂS’kkr?d Y¥Œs”€Œz>Â<¶uSHz )¥÷Ž &AfÎAŒÇQB‰pfrN:’ôDz+Äx·‹Ò鸶‹ ˜›pšSO¹1I)y2 º8L³ªŒè7O&I€v93i}Ò&FcŒ9Ké¥Ç¹8Ìl$.¸@0¸ÊXPî¹B†èf¦¹¤Â127½&’Ñ„ý4Mÿ: ë‘@æœòÄ爆‚Æ5ž"ÐM°ÔÚÜ»¼€y@5R’(œ FûB\§»HëàQ(M1‘)‘IÎØ‰ÓaL•ð¦Jx¥Fs¬xéÀ`=߯þ”Œ?D2~6¥õ)7⤺l‚²ã‡²Ù${\v#Œð„eWŸV%<ŽkûÉv¹ãyïöôF›3—sÌR¨¿9Û*a™…˜[xu죯 òÊ7õÓ¢ùÕìº@Uðb3Ãß=º¾íXLÆñVë÷ôÛ‹8 A¡Ëþøã§r ëñùY4ÏEáœÖÄ¡‚(…8Ë5ÒŒ´)´–LD¶¬ AØc2fVî•©Ö:Ì)Ž ;yÜÀÏ4tÎ`=Épöÿì]ÛrãHrýÄ<톷¨ºdÝô¶ö8üâGرᇀZòH-­¨îéÙ‰ù,ÿ€¿ÌY )$n-B͇VK$.™UYUy=)䇤øúpóSHIoo/yŒ4áO”ýÍ>þûï >ö:A»£jòBâ~:ùó¿\&‡è' 3j„ŠY„*Z€Q×QÝ•ÆZý~¬5&‘5ÇZ‘Oçèl3Š×Ó[r‘ÄÏë«Í, w7UZG]”ŒY,D÷ûâ×_î’ÕþOÜH'¥Ê¡]:s^È´A:¥(ÒµJ§h”NPFp×=kõ4ÃÝ~‹ê#ŠÄá ^ôÔq–¶™¼U¢wl˜(-¼¾[ÿ)¹½ÉC–<¡zó@cÆŸoÊEB­QÝ!NC?.äO_oß,Ú_5øi‰öû¾á~ßî×~Iá~ ”<É ²Nˆ¶´…û{‰Æ,‘~#´Z‰n^5ç£"ý½x#È,€–VñŒÖæeÀ¾¶mô‚™ã Lóv¥­æNæ©V@1¥)8€·&V Ç«•ZÓ¡©,ÀôóÓulH“Wûø7{ùZ£¹J !0>zßÈsÝUOxšD*ªQwìF±Áëj2ÔßìIl·ˆÚ¢°¼»f#QÀh#°'ùÝVàþ¯:Ç£‡ˆãž$ Ê´–¶¯2>­¯ðÛËõSö÷uùè=…˜Òýº]!6Ö›¢I!¶CbÛ®g#o½…X«q)côƒs¦l12'V%Óún"ª]~êô³†TQïûžS ©á¸Õްd-%ËÈN±d§ÉëÁ™53yŸãÆæ>h—gƒVÿcg`ãã7‡ áµÓÛ’û&_ ê Tí¾À0v>!¯ËN✘(ÕI–”˜VdÍw;’Ž ÉUx½JÞZiŽy•@{-^%wÚ¨Z¤z¨¦Ú»¿½«;æ¡Þxöñãµ&¨¬ÍO¾j}r?ÝÕ¡DñÀ99Ô)ÏÆ¹–h³º–Y§róÄiÇ2ÚÄ—Gi‚ÇÄ1¹¥¾Û’vûPšJ®Ë/_ÇÑŒ»«vß%®¥Ñ *ËŒ›ˆk ËU`™N+²ò­Hf,A1RÞk‚¿ Åm Ï`iç)nA_ïæØÛ¥ÓEŇãc# ¥jhØo§Ìl“žXH‹»ûOLÃ/ÖÑÂ|mOØ(š”¾d^ esÝ hÕ3;ÔLÕ8'NK xb½QÔ ^ÛW·ü*–1p{LÝ2ÏxÕ|ð’°b´ºÕµºÕªíW˨—úDçÝ g´.ŠmŸz¾âJ‚–-3…2$(ÔÖlïâmÌ4.9ïÎ~?C¼¢xÊÃEüÁš%Ar¡%@dÄOqVÑh’ÊsÒ Vë3‘ÖðP %6Í!0-¤g Ó”¥e,Í@fÆs‹ƒ1¢õ€¤iÕ,„6>UL§Y‰'š³Ìiü­ÌƒÍ¬¹÷ÙljULðº1i‰{KPV$WŽÓÈhšîSô¼‹ ÿÿ»MT ³{Ê-”¶îÁŠî^‘Èé7Ë›»˜ ÿX|Œh8¿^ÄIŽß_žînéÕÚ‰^³\`BÚ_0!yâ„r`Éð¸LÉ“«6RH` ²îÅ2q˜æ‘sW­t.˜r)>µtm>ð¬\d¥Ä—é|r.¥ÓŒ†ê9)EJú­¥‡¬´2e`Î@Æ@pËRÀ?MiÒÌÉTã©C’&nLÓ”Æ0aDáºr–rÜó žy\Xo/&ì6À£V‚ð¥Óÿ«2Šê'5žB%ˆaª_+[MP®´-ª ÔN‰H7‹*8húi4ï9Ö)ˆæ²^À;N›±žBQ[°‚LÛ»—¥ +–àqà VoÖçÜh~ŽFóɹqé’â,¸»drŽ/?žœõ¥78v•ÐËt²Ì£+q+½È´cE.<>UÇíªL™Óeœ ‚‹0ã¤Yð–2iµÚ)8YTÍ£¡-Aâ¹™+`ÀDýÉá²–qü‰‹N(›ÔŸIšvCô¦Èd–ó<¶Ð ¹`Τ¸x…Ge0%nó‰ 4Ë)ÁÊè7†[WÅá”Â5^æ½Cµ× KCì³t\k夸¶H‹“UzœAÏ…@çB ïªH‚5^uדƌ¦åú´hGå²|ZZrï:#˜ÚËZ“çÝ…F«‰ãúCò#Zrq’âSÒÛXÕ²˜xºK.žPÂó˜7xw÷9îÏyøë~Ç(^'`\Û½#T5µà«1ÓÐ}Ï­8ĘCc Ô¨g{Gà«»±Dî{}pìz·àëÁ”X"T+ù(mF«¤9¼ ÊêP®NÉËÚéY}0µeDQÑgܘ=ð„€¤®µ =M„Ö³¸ŸÅ½SÜ ç¸™ôßg‘,ÄÁn‹Œ;sÝŽ®Ì˜W`YD»UÈ íç#Ï9!¶®A ìÝ;DóÛÀ·ñÁ$º’$7Ü¥© ù­€=S–ò`pisÈ •9­òÄÄ$Åc]|}GßCê6ýzóôaÛw¿%lµÃÿÇöÒßÑú—-©ï³Ëï«ù;·ýínûk¤ƒtʶ¿»Us¤ý¯›«ÿïáÌëô«Ï~_ÆáÜé÷à@ÓÊvùJ\-c0Œ'²ç ï.ÕAVèVTciŒÖŽÂ9øQp²CXÿ–À²­mdÝÄ}d…^°,]HŒ,;PH昕–;OÀûÄMA-5ñ—fF,+‘…ü€Ù,ä%”: Ì–ÿóõËx.` TÓT¡m"žàSvž`ÖQ u„”@/ŇéôR@`è´Äÿ@öæär+ -j3™p%g9¶Þ@|Ôô}Â7=ºÒ‡›¸ áòüsÚ›,8Ð=†/¾Y€4M¡`ŠUû –êÔJ§²›ŠŒç”À\fEÌ;ó̉`˜PFYªTúm‹g°­ÎåŽgsülŽ÷æ¸â5FJê±0ße»4 œ™Ùñ‚±¹&x“2¯Š’Y.ðe çùl1²y­¿K1±1òœ±A Ê£ E¹E”©Â8a k­w3Š8OÍQ\Ø¥¦ˆÑÖé¢"€ôi“n©ÓF[ ›¶˜¦MÈìóΜD}0‘,#W~\=}3H|CUýs •õzå´t¼¹ó¤âœ‡ÖNXY¿s•n+f.þ~¶5z”Ó+ÁA‘8·.=·‰™£MLr¤t¾·[HÎVéò­Òä¬ÌO¨ | ×eˆb3À/Ú ð1Ò5üN >¸¨œŸ—5§hõÁÔª%.sËÂê7ÓFϽvæèµ“œR,I]0.urvn/ß¹œ}”»eÙt—0àñ '(wÖ<Ïšç¬K€ 𣃑½·£!ÁšÜLL’¨¼rB1pÀ²Ü– î²¼x`©Ähg_ã€qš0\®¬Âk kƒ V|mìÚ"‚°®ï£²<p×ÏŸ3|~ñT¬ã¡Xç7ÕÚ½üׯ÷¸T‡ëyHŸ“Ðï“û§ë ¥l7¶1YyýùïX%ÉŸ7‰œQË¥iI×(Þ|]|^3!\I·Å?ã.·Ï›OÉÓõÍú%‚±: °:nYÃlÇ/[˜L¥2!Œ5c‹yÔ÷°Å"ÊÞöò1øb»%—4ÍÒ1¬1Xis†«†áŒ4vp8jgLw9™’jÒØ˜øæ>¼Ø3°X\aÉß„—+aÐd“f%D+¶‘W­•ࣰÅÐT¼¾ùT°Í ½äð7|þípŇȴàŠEÕ^¸b›šqÅÔ¢pÅ”4ÞyOX fœ€WÃXÉhÃhêÁ­…¢6`9̃ †ê¯ÓŽÄ±ÅqŸùÎje¯ÛW=ª*ýV}¼áÕªßÛ‰¶ðѶS0bÓ23K¹W´RØ:¿.Âg¼ÿ¢é Ö\jGd!®? f®lÏ‚-§¤×Š´ŠÝômÑÉ^î~ö5}Álh‚—;öKùOó§¸r$®¼›¢ÒmFF„t¼»Ö£GÈ‚8fä÷ñÌùŸã'‡Pœ«(Cª ­uïêÏ ½¤’D³5„†x]æ,]GþÕ©§ª…ÿÛV.†PþIÞ–(o9,¥Æg]µŒBÂjÓù”¬7ñÑ–ñaHÊ®/Fíút¸y6ÐRjB2H>jwéËðLXF=É8ÂÉž¿›¢Ém½ëçXÙųMªÞï9­¢Iò"= W§ï"žSGÉ÷þ4ŒÂÔéËßTx:tˆDÓ—µe Ð(-¤t„,8pr Öcí÷—ð² 3Yöô©î#éy H 6m;dRC$ Å€„v H>匠ÇʃÎËÜȩӤŽe©ÖÌzPÞ?àÛ‚yL¬`œ<κèYý~uQêþ®kî* Æ/éÈlÂûÕFr._ãýZ+çE3p™Ò‘B ;5<’Áð 4µ`j•w`š+oµî t)g‡ %q6šÅ'wPWƒŸÕcñå¦ò†><~ÆÝ1¦vµx“ATÙ)„OâƒG«Ö^Ph…A‡8¦?O¸³#…÷U–]_â’û²Êµ~f.æww7xž%U†vòÔÚW±=üCUãÿÇ$lœ¦Ïsx‰êêí-ŽÕÝý—"¹i,çÖNYIHcÔ¦ÉtÀ®ÁvØgöê8³h/T–BÔZËûÏŸ*n«'%8½éícLÞIª•³™ñŠ¥ôv4CV¾ÚyEôÝU° q,ñÓË©ä UZë…ª%–ïõÔˆš¨G(ƒï!yºÒ°*•§–<­ª&¯Ñ"X`ó%O“—ƒ3Ó/‡¥|‘¼ÝÄ8B%ŸJ}šK‘³R•™[–jüòÀ…Î\Pù¸J¾Ц$XéT ÈØÆ½D!É4‹‹S!•Vé…D‚’¯„ä¿_ ‰©)]Q9Ó€ºß0[Õͧò¾:YBmçO^GîHÀ*ù뺱{ ¶m^Vmå0ŠÂ¶®ù’´Š¾¤ÆÁ°B B ¿Þ»•èâk®Z¹º¿½«)ªy(QKõƳ¯µïðô }ìÆáGA¯hwøyESÓi:ü¤iuø‰tôü˜ ŒkþºXÚ~q’u±Í ÏáCÈÍ0ÆuZò-õa­Kâ¸AoWJj©Žôï±r…ªl‹=o¼TžÂ”ƒaöüPN·f=²†š°j¶ê-W\R¨õ—˜ÒªÊ_#?€#ñãOômñXæm-ýiRÏÆ‘œ¨†ÏÛl<‹Ëݲˆðô‡bר4£Ä®y²ðìÄJ8ÜqeËú#òZ_[íUÚê43B~Hþ mPA¹À 4ÈpsGc$K#ê…“Þ%/ö[Êêh|ÛÁºüiÇïe“¢vù.RÐJi8× )¨?¾è{Ïzÿž:öaSN¹ýG¿Ñ‰b ‰ÖO€Õ—xžƒ]•¥1g©³”AÉ¥8”( ’[ëLžfb¾ƒ­/ðÛ-¶È‹Ö;AÉo±šO-eÛ¨Ò Eo1{Y\;Q«Ö]q jõ…×W(›ÈŒ‚DåV‚ ÍØ‚ïú³ö_“$…õT¯¹ÉSzæ#‘i^‚u²ÌmbµÐ¯êºµ¾muÝøøÕî(Ú¯ÀEý"Vìâ—[²65Þ'WÄ\¸Ý¡Ëžn׳—-¶/"ùñ§ÿÚ©B /½Õº(NnÒYnXjÊœ•àE!]a”._ª•=LT­ŒcòR©ü×·•ÊZ½ªSÆ ;j”=›=åG*¡??^Ü÷-ÕÖBÃÕÖÍ !¤ÅnžEµ/w3{¼øZðsñu5 çâëýóÈ)Ü5:Ó(µ¯§Q¾¥-¹Ûó´ð鲿ZrEëM”[¶ÕâŒ*·RFJáš*/¤¤ÊM­’ü4Q•ÎÁs@°—M}ä|Bc¬xÊÃEüÑÇ ó^ L±ådé,“8¨ÐÜU‚‚Êák%®ýKóL¨Ñ£ï¦  ¡{ã­r%[Ú~R@7éÈ¢œ¯|"¢±‚Ö ïŒ†Xµ,×OÙß›r•.v?Œuˆ•×hË4‡ €ãæB¤@Ö¶6?ÊÇs Ãk'öhòð*3Ð{ð²ËõT9xr’ "–nÀç P¯d)7†å ‚Ê…Oe–'Ú˜×HpÊ_ O@‚‹ïÙxŽ›Mk4VÙuº¾¾ÌÐØµÒ†Ì›Ì ®¥R¢(r›hã ÿw*ÖÆZ—AޤJ9ï 4‘¥«¡¬Uί <·®ŸžÖ¯¾zLÑúÚ¨T?EÍ•pöìm½#ýqrßRÙ}¨3ü(ékÞÇdz‡·cïpüÕ;ïýŽ9Cñøúë1/‚·îà%ñ£aŒ´8+ü«ñŠõ~M•oùœèPäéãѽ´øQ2Æ#y»LªJˆ#¾½BÝüììØŒÃÙÛ±~ Ú ÂñîÔ¨¹VSaK®æßªÍê\²»†½€ÎùÎk㊠­ñ—m¨z¤k¾r‘Ç«žûaô°K?‹N{TéG¬ö¼-ùê-ÒM¡Ö¦n*læ,Z,³Þ1p¸s›ˆ;÷§dº1C•®ß˜Å¾ë1«Ôú^ø†Õ ßû˜ù¾cæ¿û1ë¹677¼#MúQæøI4{Œg|úæÞ> ÂH!¡m0²<6^ÛåÓ¿íƒüs™øîHR‰fw®^SøÄñ˜0us—~,Øcñ1šê¿^ìÿÙ#¸RZ„°ºj†f³'xq„úî*˜ß}Ud>þÓ¦–ë Í¥7B¡RþŽ Gt q$âCO8²,ö}ÕБ¢CùáßþójûÝÉö¥‹Wy­5aï‘~@yñN… ¬vÎ@ D†ÕJ"BÂñH8ÑŽ× áÄxH8 {C1mF!š¾¨"Ó©—b`P´ýÄ(=x\ã{“¤(‹Ø·¯`œ àıð²´LdVÜ)2ÁSÙîå[«„»K€K!Ûò­ïóŸq>vïÚO­®èØ}ÕšÇ8ÇúóÝåú:E9¹9jgB§Öpi!‚6xˆY9)w¥€´ÊEª}ȕɅ¶Â•Às£\QÅI÷l/Ûú{Œ–jœí¦”hüê 0?˜;”¡¶”d´dÁÃt›q8‡évtÔ©4é<²§x9¥½›œ—iZ°Ò:ÏÀ–š¥N[$n©RCV(óm/§²)‡x9@ÿþm<ÜåÙø<ŸïÒøT26{íÞàcò0z{ºös3vêÓåo•jTvÑíùTöäÿ³wõ;ŽãÈýU„üqH‚c7?Š_þ+‹Û8$·»ÀÝ.‚]4¢HMû¶§»ÓÝ3{³ƒy¬¼@ž,EÉmËn˦,Éc÷èØ™±e‘Eë‹U¿ÒýÀåWþÔ‘¯åËOOº,§öjÄG}±³8Ôõ%óQäGòPŸ›·T‡ošŽDTâP½‰ª¯i#éS‡‚({<¢ì±ˆ:û%ub:Y(“¤“¹è•Ò´[)­måxï¸J¯$]®Òë´6¢åò¼îÑÓÙ¡ßýÀʆì@€T:eêBöÊfÛËÉãô!Ñ0N()½‹húƹýO'¼¿ÙŸ½í‚F~Ù a…– eiBš^4»w|¤s;=;ç÷š<Ô‡ ¦Öec5ì@UØuœ“jØ‘~îåßï>޵Р&¥ ; y†pó©‘ÈvX‰¢°¯^Bª*Þ8[w{ªÏ†®îH¹ú¯º#ùDXÝËöÝ}¾³‡i¥ÈA:4¨ºZO¯1"€â<&€Ÿ\›½î&ÏuœhV2ɹd$HA ƒ“R÷n±—™5µÐ›¬íÉÚ~Ö¶¤Z§ÔZ MOM.wºCƒàJáh= †§ž¸ $ñe Ì(ãŠ`Ž+’“ïÙ¦|ŽéJuºR®T§+ÕéJuºR]Y.1T‘P’ fðÈÊܽñ´«ÏÙê«{é‚Q~aıú&¶ÄjD‡–ŸW 1×z&¸V†(j­ÍÞâ~Ë?V_7·h¿UE·–=è–Ÿn>]Ùd³ma³ª¥éMÌ9^…¸æw ­¡ÎbükàÖv>×29ÑT…XwáIÍ3%pí¹.Æë=‘ÎEV1§‰ô;Åb§XlgîׂŠî—BœáíÄÎhGû1Hâ¼f¯b%UðbÖjT Íê)QÑñX]Ų¾8ÙÀâ¸ícmûtnÛƒŸØ*I“vméSPƘ+±øœë$‰C1ÅsP5ÚØ6}êHü“4š¤ÑŠ­c×{™Ð4ƒÉ>5 =ÎÖ«2{fŸ¤¢Y”¡W•+°L&aË3?~[½§íík"•¢ …‰ úåÈn.ʨàÊHeSˆbö -¬e`³]~¥„e_‰èÂ‰Äøä¦ÛPáÏn¤ÎµŠŠ„z ¥ÅXq•ñ…|úI ùÖÓ-²„J iૼ?“úL„&÷`¬MDiP2H^[ÆVm…+­Ñ#²‘’\'ä?k1øù?޵2ÎÙ§ÆS'*PM ¾•ª‚ðàKϨVÁ‡ñ6Ípt²± ç O;ç¥k­0V'\×™t×±ý[a³†MppiÁ£Cè €"ÜàjzM(þוÀ„.z¶JٜҰâתà¸+³h@Fó#Få‚x+œµ^•Á•gíá¦8y¸£/YéQ,¨¼$Œ)ô–%J3#ð­`Б.„€ò¯ÕìµÓpq_¡Ú•Ô2Ù^µ+)˜“èz±§Ê§7™Rœ™é0ˆm3*µ0 ѯÆFÍfKŒ„ÊÎYf”½@gª½Éuªl?ªZ‘Ù[èÛx¾…@~AÑÿ±ð®?Ö ØÙ£êÑßÏã7áárù·M=$cÒ& ¯ …jÀ™¿Ë~ O÷7¸—Á_çkuš¿±*ÅoTzÿl¹¢?BC;@£e´êÇo› U¢d•ˆ?h‡¬ÒgÞ.ëåj­C|M«Õaµb*M·ÞbÕÎm½>÷,(@Ím&öÁ¾}&¶¼rûà8{k¼§H®=àÞ¡ÓË )Š’ƒE'N•.ÓÒèfŸ žQ\¹»ÏÄÝÍ»‹wù-Z¾Nx‰,[!܇‚à–\ÏòÃ}÷]ñóï?ýü=(òŸåæ'ñ÷²üžþõþ¦þç¿ÕO²îNq•ýòéY¯?^wá.þñîò«š>°ìO9ywoëê¦lË:(Ð!jˆ4ÀØÂ«P—Òø!5ÒÆ^1ô¥†ç"ßüG ‚öjÓ[1ooÑ ÙË ’õjÒ×»h®ÇlË‚ü±¹M;Z­ƒ˜Z8TË0up8Ð kÜÜfNô”?8åžNþ äT3H±BŒé…ý¹Å½íSòØù‰í'ˆÓ^iæÝÌõ¬ó¨£«>¨~WùâJíÌ—h,i͈KhM¶+_|uGU±S-šŽ Êñ­N„RçåD$3DBoÂ]щý 16±ä ­â%í%Ÿö8’¸J£O¾2l\þÞ,FnÐøæþÎ?.Aõ~YLérEceDÕHm‘?\¶}A´÷ ÿo C~+¿-®²â!ä5‚Q}»R]GgûÇx“~1¿»¼þˆÏVÿªS¦w>zúÅû‡ˆ#„Ýï[RÔÐÍ‘ð£Î#ј$K•·e$t'¬Þªú¬§¾à¦]ÅóÂLfwµ “Õ½.`C÷6!û_6ÚÆR_Ô¦9F*,jî*ŠZföŠJ‰¢ ºqWöãX‘tXÆëMê8§UH”zöã½*0ÚÎÞPDè^I‘BGÃH;Àkì‚8·î6n¨æÙ§Zëþ²Žà°jé¶p3Uíf¶<´aYàãÑjØöøUï…ã¼WVr –¬@0™Bˆ`f'w œ`±Uúñ^!ˆÖ32fW?1 ¥„0aûœ w@rà }%[_¡ïmIѬc%ºD´¤F7®_•òÙeF®‰{&GMåêS¹úùY™ŠK‘Pú„ÖN¯¨ç.Õs´ÚèÜúVÛCßkÆÝ™c°îY±*.Öiɪ|ÍKVøº¬Ùâg¸híòFSMSäM b =LÞŒs¡´e6I’ÚS3s»ÄÃ|°…·ÂÐJǶŒœX®<1@Á–’J™ÓãÚ·ÆÌ&Ãv —NáÒW.ÕLH%į½®*Z˸nÀˆ„²uMM¯Øo+1#Àð´uzEDm3}Eb Õ1¥ÊººI¥ –îEM’jWã84¥Õ—¦…+p¢ÒÇ:­Ú“d©™i« £vc¢Ñ›H¢$»¶[bÓ€]ØW&Ú*ºÛ^ì2:ÔÿîHm;þzúutQ^gˆlýÉÐø4)×:ãe^›Ä&nZÉS‹-ô1ûòàK‡¤“Òš-±Ì8´D¿È²’ ÜXá¦á„/0ù“0ù“ð…}M™Mj剷ƒšlÄÉF<ˆ³ çãµDúÛÙ<áÒâuð7ZÂQAÏ¡ü*†?4w§\üÈÝÖÒ”ž§Fžz¿‰“¿nN¶ ÝÏÉŒÛUäê¹?îÙUöKS—×óÛð’EÖ?oôNBõä½2¥&÷Æüʯ²Üûm…Pütw÷k =!¹z÷øÇìfîî>¨Ñá{j¦„œF” \Û”‚fËV@Lwîïob5òÇxr$\‡çNkÿú]üû¡ '0*1‹[£5—J´c÷Z¡’êé-_Ýa?ãÚ½lQ‰ÓTãf‡âîÁW¾ï‹æ›UG#ßKj+a O©:—rh ÷Ý7Û+×ðL®¾ºÇ‘.åF z«opš§xùyÿëvg‚keˆ¢ÖÚì-Îá·ücõuów‹^~•Zjíå§›OWaæÙ6ñö1XnbëJÍï*8zš=ÅÚä(§nŸ«A‰¸ë¯²/ص+À“ìÚ•}!ˆöq–ŒgLœ¨@Ñ €o¥ª ½#ϨVÁ‡a–l ùœuÈ'›€ãÛ@Šj½÷ºla­\¶Y-ÛoÃÐÚä\¼¼ ÓxÄ eí·añ$q2û1úž®ûÒÞ·æ„Ü—÷~`÷epç$¡ëÚˆ3•µ9ì®jßi]\EájKÎhûMTúL9CÞDí#¤mÞL0û«€k¸‰îß]þþéz‚³ü1Ò*ç HOü[ìqÓ_T»„À ŠÈUš›ÄåN"÷zgÐ×ãZÈÝ.a°¢-.¡9Ð#4;BjzïP¢¯M9m¬äeûJnWSüB.Õ¶¤ *.4ˆjŠ K'Ð!èag9¸Å©L[SPúΙÚSÏ›¢€Sp›ë7Yp“7¦—.õDZ‹Ò•B+$O¨¶†Ÿë¥^Ú­éyùô\qÉiãIýÅâè¢ÌWì têXÈ©„LØVäJ{Ü{c8/ïHߜҰ²(­±åGÒÇéÜùª#égÜì4›²yÎ?›'›Ôî€jW¡ ›PC….sçt‚hÉáäpk*C»¶ìV_K]–¾”à=‘þ­µ{‚JßVG–¬ Ž»-h8xïabõ¥·ÂYëU\¹;²”ÝY’úÀÐ’Ô;cKÂöÞ­FL¦-¶´¾ —û´íÄ Ã´|`b—›ö`M21p`€©#…ËØ±ÕÆj5ÄÄÕ(>RGºZÉPÖ&c ¹©®ò]“ d˜66²½×ÿIð öž´â-LC‘•¿YÎì!a^µì»{ØÂB‡ÏS[š4Oݘç‡Ûª·  Dý÷ÛÇÚù=Æ8b·±›jЧ—Ío³kÇT6:iª^ÚìŸe}òÌ…á;ÒÉó4b”cw8/XœSÊÄÕC´m‘ë}él)Ñľ6åMÛ´5• xÊ´õG­ÏL™°iI‰—ëÐÝÑ‚0Uвä²Èe9žc’Ê$¶Ñ"nà0îøá§´%>ÉðSûÆÁ0gðpÀÐwœØ8›fsËp- )q¯ tAÄåEAU1Ìh›¦©mö»4š²•mAYŸ1Y«M£ò*û6Ü„¸Añ-ùMD½­[i µž ÿóOŸ[IÒ\$Xàhìí½! RöóÛùã5’öøñ¶¨ÌÅÅ]jœFT|1`ñxŸ¯AêVßmƒÏ­²¬‘V·ú™š1žÒP3ÊöSÀ®"b´o«¹# Õ¼¶¶~|.~—ßæoñÏ”ŠmDp†F»L ¢á]|IÎjql¥Pàû~p%X?F[2·pÛF¯øí|Ç/þÚ3h  AS[ÐhX´ùŸ"tseS>Ý¡…sÐ~ÍþüÃã³8kw«‡ylÞMîÞãÞÄöõhý±ŠÉ5ŒÑ:y­D´æR îlÍëÖÚ«ó²ùQμ+JÂï»þ½• £$M¡Bæø²ÿ’Ç}_ÿÃV:­Ô6…N=\Îîû’àv˜{OÍørAÛ$Wó‘V±ep‚¶]l –™²eÁ‡:ÖÝ7f¿Ÿ½ùŠ7û7»‹Û­"0×Q hN=¿'mØ,ƒ\;¯4úoŽ:=„ÛBïŒsÿÏÞ•ö¸m¤é¿BäãLWw݇€6qv³f¼F2;ÀbÅ"ii"w÷XmžFþûV‘”DI<Š,R–:D0ÓÖŪ·Î÷| #Æ·ge1p@A± haõu;^v¸þço¯¢Â((þ?÷x<>.¼:ñKTÈÍúS’Þ–úíî§·«‡»-`%±|p~"3oußòÛMºÎÊ6°\½_ýY¯Ö:^­óÃãv®Óœy$vjÖ®ãeJ`1·zý¸ÔÇÍÚîß{í<Íêý½m=þR‘ò·`ûÅü;ÿ Ž!W ÓÝÓc;³mOíæ§ÓMª?÷neˆöx{.`¯ŸüE¯ É¿§B}.ÿJ¤)6i ’8S€Æ*3 1O¸Ö¶H=•XcžÅÅŠûÝ1Û;rû›ŸS³M'uÿΉ-a^¾ç^>Wÿ¯Ë§§ÇÍÍÎýo¯ÞÞì€È äêüå3Œ>dôûÍŽ~Þ|ûø¸û¡íËÍïö¿ŸóPkìŸjn_øíNûœr”^¿]ìpx²]r)»Ïnv°ó9'§m|ãÆr ï¾xcµ€—Û»Míµ;x]`~ÿäòHòlŸ—ÍîUIÄùöa½2_7ÿ•êõÓòÕ25¿Vãí§ØžYË7O?¥:ùòíÎíYÈu­È©ËGíõÛT¾|üŸ~ÎwÝÏOÚüº…ÍâÿªãðËöW+×ï×o?Ó_n¾];}ç)­Ê·íß&o©úÉ«µÞï¾¾¯“òOeÓ7åÛ߯Ü,ÆŸœƒ§œlÛŧO›Ý’Ë_=WÛX uñùëâöµƒ}0/Å»ÏnÙ¡IV[×êîEþQ£ÞH¡’ÊÃ+AÔ>轎ߕ;ÔiXõ±”9J·T­ùýlÄGúcºÈÏz»½îò·xηÓ>y |7º’(#Ø›É`—að7òE† H»é±ÛÕçíD5èÌGoÔ›!téÚÍz³·ä‚q„<*óá „ÁRNB´.8”Ê£èV0)‚˜~}„@úëɸÝÕôe°owõòÑa¹ãõLÜ]7·à îár®æú!¯öÙŸ=y¬›er§Ž‡?•sD-ë#ÓX,³‚+% P d™jš:ÂYtD8 û΢ á, 'œV'–>Y‹0u ¶›M\«4Xå¨[0AE˜‰[éM“™k¿Ò`ÚÒ[ˆ¿s³i+¤Ãñ£›#µÍb»÷isWü:ù°Ê=j••ßÇr‘‘ÀôÒx^‡ª,(ÅQÑÇThÌ ˆa,¶‡“5àQ µ>/ÇëjÍÕѽ×_gMwÖt¯VÓ•” éã&êzó–ý<‘W•·,$CzLœj`r‡^œ[Ðò쓞D2e0a€dÚ7Ú6²óH˜Ì QÙQQ'© .èH-héL‘Pà›¯„Ï[Z~ïBBþCƒñÛKä$ÿ¦X[(JׯpÌì&tÉâZ3 E2I°€FÑ2ϱ„VU¯¼¡áøßÒxùðð«W@ž{äO(ïКäq È.ÏÒOÀ’AÝæ¼Õj„~³iÈGú‰Õ(…´ïûHQ‰ ¥s‡±µ^e©ùbÖéÖ®¿{ÔæWûP¢ÜoÍ}!2%yœ¸\[ùÏÍ?š¤b1Èa+ý(Œ  Esá»N¥ÐDø¦çžv!ŠX&X"hênm¨ÔH¥XeÒd™NãÄÞJ°£4x{ê‹æ-iðe•!©Oï¶J°K?ùúbKÂ;8_¾ˆÒQ•¾/¹¼îLyýø¸q‰ñß§ë‡/òSH»•Ï*ÂL£Ô™Z%‚¦F:ÖB”1"Ó˜0Dåª.MÒñ êêòÖamÖzßÄx_ <߬ø™÷H~ÒFùõÜ{ßm’ìfºf«l/ˆúD}ᜩ_ŒÃœªtÛaN=ˆ¥ÖÐʯ6«à«îø ¸Ê´Æ($Ö0å(3À*ˆ È8å€eK£2‘±$ÒÓj/¶@¸åš{X¸-ñ[ —‡{cw†Ûs,õf¹Pb½üòþåÇ¥2÷?þ€þ›¾Z‹÷˧ÿÿüí»ů~|}ÿÃÓÿw]µjskÕºôÁýù`üü¶BÑ+ý¤­íWìÂèdŒv<µœ¢S(•HCì=ƒ`Še–"Hñîì'©C_Ø¿HC¯€÷MÍÊ dè¨:*/¯Ô¶©ŽÆâd8nªSå9µÇ$›OÉbæCòè´Ê’J¤5`_7S®´Õ„Yß5}D’pûŸ´úÜoÙ¿îM‡ë)¼ÂåC}ÝîŽÊ¤"¸Áå ØAþ(NÚéd³Æ9u÷ñ˜S";ý9MCyç=Æõ>f…Töð®óñ`{YaÚì+a‚a/ù8æã ºôûX)íƒ$X‡ò?v¨ú*HÒÙ•p¤k£ìLSemßD*!ÚîB€„BÔmG*&Œí2I¡òÙBLâ= ß0’)l—·‡d•½þ„,á.7‚É¡UN½Î­ ð¾Øº‡UÃNÃ]<Êkå.­õÀ’#»Á=2nx…ªu(IKødu“¶4·ñÎwAô!q‘œº÷?†cܺ6ÎXIÍ`KipaMõ© .ÑXÌÅU÷Xl*´¡"Õºð]?ùeëø]˜}³u DņeÿªQ ¤H”Õç™Àš¥”"5Ý5ì=m$°¦»cOSÍ-d¶—xŒ„Uswˆ7M·¯t¬p;ˆÛŸÒw,ž%´·³Â·ˆ[•ó[„ö %ÁGèÑð8ˆóý×~­q• ¤|xæ$W$è®ê©Ðëªu5x )QXUÛZŸªž¿¥ÍK¨=kéÞ ª:³*PY=Uj/S•iqÑ¿¡ËªÓòÞŒ4°b%€¶Æ6{§…k W,I(21——b ”K«L±È’Œbų$Œµæ¨Gãú;üØÒGÏ =æ4tðgÁÂŒ¾Rþ4C¦´5 — ³#¨Pv£8PÄX¹aöS9ÎÍÞÑ«öŽF³-;¢-+ˆàŒßînCšiS€Ç˜ñæ&ËpÛ¼‡{ Á6ÿš„ý€÷`gíÀ^¼z¤Âöñê‹TXTøG±<¡°Ï?¸Â1k>r¨€ÌçÈ!a4B­ççh€V‚1A=@º sýy Óxÿ§ÊXMTŠx.-‘½‘‡|ØRº %Ô@ˆ[]… ©}™p‘I"ŒÌâ4Ìî<îÒÈåˆÖ’°j–KãçÖ‚BÐZ±J€f†ŽuBR8¡Vä»L ìÖ7z{yZRBŠ¥GA™äzÉ2üâU‘eøOœ”A^ï®c~€ã»ñê’”@Ò3+ÓÅmõvÔc,dLb`¢% ,ã@ʼn&I¦e–fY|fdÁgþŒ)8GtæˆÎ‹ŒèøʲRr~U`4Óh2ãŒ0ƒ‘Ú>5“ÈŒ*"ˆâ ÇeÂjyO¤A.Á.í §WÐçíh2?C]“EiÁÇ?}ï¬Uô•ŽÔŽ¿ SÕyš¶0£pü3õÐÅÔ¯¡að¬&g­•hZÄÙŠH…'ï,2y7r÷}ãöxÝøÍ?½+?;ºËFQaµtÉ`g°ëÍ9¨A‹Vð¡h÷C’æœ_ùEH?¿¬øEP¸_DA¤„y˜Pyi¦è$Ò©Q˜ 8"Vy¡(n$+ƒcÃbÏkœ^¢WgŸ8ËžÃÓY·šu«Y·šu«3èV *"iwÔUY;ö‘Ÿ[üh-詞ÎÁ—›Ê`á[T¼eÅ;cG"}œí“9c¢„`é±ÒåÅ#Ï«z^ÕýVµ H Æl2ËÕ} ôãê®òïÝâ Æmª}h}¢H+d ®¸K™fèLÄ›¥©†â$bŠˆì&Š„´`7Ù÷y]w (&£u¼Z[uô-ßp-~öíÛ×Öt´[)Io?Ú…«7éá—wÊñ^ÀåƒË)vÅê¾å·v'deýt–v‹ýY¯Öe§|ãýäö¼UÎ?§k'`¹`*Ö xZo®O±¬PÝo$Gï có©d[³BX_¡:[¡.“'ËC–ú® È”•ÌC³hPçô¶­Ú–Ç; ]Üß[»ÔÅ6ÜSôÚ)ÈEDfä맆ók{ólÉ'¾ù=XXÒ‰×J¤•ù?íuµYZ¡7_îM>O¥^öÄÍ”;6ú@5o¢V+wkŒ¯a°lÝPŠD¢_œJá–\.••.ïשÁTö$+ýþþÁ*Ëfs·}ϸdRð”_‡ub…K/bI~°²|ÚÜ€N>¬ò¬ØÊ,°"9 \‘ͽkZ¥ÅO$ºUŠÁFXB¸ ô§§åá8ÉÚcr`¡ˆ÷Ž -©}h{G¬DRÀ á ƒ8 '@&’’q®·-Â$"ªSÒo¸ ¨¥pä”§û¤tàMÑ£­û!Jׯ8æ;«|êÐùµf0Šd’`¢eÍÇ+Q}!CKêªà·‹qqçÕu3UW-Ó¾ÎÕÅ8ÌÕÃN.‰qHšsÈ…˜îìÈÆrº±­¥CÛ2{Xñz™á´œ6輟¹‰ê¦k5QcˆµÔ &*RmT¤ZTD‚甄¥âz¯Ú³¥äægؘd µt »­uM ¹=´0…jaujb¨Â•WÞ66EšjlÿÇ€ ªì޵â@¤B0Û?<8bøT·Â Ô¦[9-¢±áBñªWº6Vvb– eÅX$öØá‰À$Q‰ š¥ eP&!œÅYÂ4ƒRªbs&™ÆÄ­ckõ.ÓO›üÁvWmË/O>ú¨­3¸œ¶úÁu—ÐNTÛz\ÚjyW妦–‹iåþûZåA6+§Å8ÌÊé°]¡ \Oãß§ìNŽ£ŒªŠ'ñ«ú¨Z¡²ƒ¥Ä× {ÔÉl‰ñÓ¾ŒC;ãyprŸ¿¦ò×cç¦ú$?L•7Õc½Sæ=a`irÁî·mc`€JÁˆ÷ÃÊÀ²¤>6Ö$åI=æ‰]Îûû¦óðvÙñ¬Øe=:Á,ÃÅ06ßóý1hm‹Ðû£°ºá)7kã"ˆÃ· ¿B”3¶ì”y;+“/…“0èéú¶NÎ:”š/P·Òñóê¢" 3š•Ú5OYŠd’ƒ$ç\‘ñˆsNOKÔÞë-Î¥½÷¦.© u¯ð6µÂ«-¿Ø™Ì_y`±­âÀ¬>%[D·¯ 7%ðM ©|Ö´ð9ˆŸ¤™Ÿ¸’Ž&±Æ©„„ÕÜJù{Sû•ìÝAáìW*Æaö+ù,ï:7 ŠÕ9–Î*BÈ#ÝÞ"¯+Vä·„=^Yˆk±—‘wè`<úÁâ¹87+)¾í¾Fo »­ÚV¾a‘Od«ûK%ƒ¶nóÄ~‰¶Òšçwk/^óâ{˜½Ð=,`¬Là ŸI¦P8hÛ¶.ð¯½yeØ„µÊö•§ ª¡¦b‡]Œ^>§»É(R bR` ŒJ,» @i­n74Q1¢EŒb^ŽpGNBC³6â…™}ðD…)±¾14bZ¬ïIò!QÃê®7b¦³[ŒÃlļq Â.k¾L'-klðʘûö’ê—éžâ«0¦*`fýÛ¹¤ æ>{íÁ: d¢ÝWÓÔ컚^½˜W8ê¦ä Üûè®0À»!L—6€¢æ+od¢’`„LÔ,ÉHD}D ›”º3ð¼Óá½(È1Ñ'Ùéëú)EA ¯n¶Î¿äˆ‚å›|qÀW=G\­·¯î«$å)瀧J:È “0nDœ)£Ï‹u5@?™á­f•tVI¯^%åÔƒmÍž¶òÞšÃDf¡oû°1}»ú‚v«+ îGh?h³Ò}#žÍ\éfõ¨=ó9¯E{ô"Œe5ç}žfâù FIáã-ØxÖ¯d)1" |è¥-rÉ.ÍD º‡¡€±ƒÄ0 (4Ä4‰A–‘ÅeR@—žÏn뮞©Y‘›¹Y‘;:À "WXa^ãYh. ìp޼„:Àܱ8(”“”úø§+ô×Pñ¥i%ýR6Hl´„,±cÀ F(g­!ÜÈÔÎñyÕÁi³Þ1gõÌY=/-«ÇW»¨„/k`Ö$fM¢m« Bï$‡ •”B„^G…Õ¿³ûnv±ëqÔ‚à-¢c ¾ÿö®l·‘$»þJ¢ žb_ø¡§=0 Ï´m ý`4 ¹J,Q"›I-Õ…ú¬ùù2Gd’br æMF2)i8èéêJn÷DÄ]ã.èî1±Èn{y½ù×:^ס} œ›°NÏÞl¾Õ³=úJÈ¿ÿ-|ohŸ-êíËé$¿Î,ÿ8f-O±„µy>¢÷¤«£-¥®Ð ‘*¬(´Þ€¥¡„òáj»}g…¡ óàúC·1ò÷à¾öàÞ?㈸‘±Ô©€í)G‚Š+«ú@:¬7༟ªRаaÁtšã¾­Ë1OäÊh%ëA•s’1OáP» ûëd‚ȰzÞös¸›-ë%Zkw¶£—Üڽ߼g£™<²x×­ú]ÓîÏDQ,µõTDŒ’„×M¡¤HâÖ*S&+H íF¨ìé=®[纴µç$“9/˜L NâD$¹PYžRªMA ­±J’<ã .$§ŒÓ‚ –$¬(x&³~ºe7Òe¹ðH§÷Ö=vN`ôºAžîÙ—É.õ2\ÊCF¿Ç&ÌÞˆ­?¼ @¶™×Á×u˜KÔúŽsA®rN炞qÎC¼Zç½ëÇîL*b‡;6Â\û„V8„`äÆîâ²)ÙpãkØûêgߊƒÜº½ûGŽ®µaL,¬åÏ^L')¡#Dëº A2IeH'Êíßù¡7p%»CÓGª­$Ç‹‰÷äEˆæ’pŸº/JEß峞rÎᥘª“Bï+¹œC™„@ÐÇ–LÀ¬«·—En•%\´/ ˜kçkÁ$¾–Æz‡‚!Jt8Í5Š5HcL­MªÁÓÅ;k…K~ÖŸïØ`̦BÖèwÆÚ9àþðÕAþü ¹YÌžÒQãìV‡¶ïD«,dE˜`Î3k䎼ÍÜÂËÙý‡;»šI ·š°Ž“-×½§ê;yøgßBžúa ?P¶ºKo¢ý£ï»ÿáŒ2†Ûõ Çaõúm<ÙWÈ„»Áà€Gˆ°Æx-xz  pÃõ¹fÇöœ8"éò †S„RÚp€vïñÂZËì7¢Ž)Üþ¶›Æ?‰miŽp:ÛJR&ˆlB±7î\Núå¤÷pÒ±o}’ï% é’„dϳ2X©öó¬˜êÛn±ÞYëɶ[´~iféŠ`z¥™+?X¿r§ÍM}}î²r>Ï].èˆQ%5’ØÝXžã¯ÕËÍÏMê*‡êP¾ºXæõñöÛ«®Ñ¾ã^Ú£`]Y6Žæx:w ¬Ö$u ¯îÜö\³© 0ÉQfbmO‘ÆÛ-§XrÁÁÈ鎑²Ž JÀò:ñ#c–øÛŽÑ}ý¦•¨¬·§vM_OTÛê~«ìFÔ§óÑø¡tc‹~‰'ùëNÛ?«e€-ð÷ïݶ-Å„$eÈØ?IŽ´Ê¬ÐBÑXäœsÂmÓDK€ÿm-·0mVoQRø\•­ö)~M«^-8ŽGBì¿3cÉQ5¸ÑòÒ†qš£q†ÜT]lÍÌÔX)+K¹¡Èd±A¤(ŠXP£e,­&ùí1·ê*³6Y\–ãr”ÎSǨNd”yúXq+¤~9™r{27Ž‹¤©5pìKDÌ‘Q¹Ö1£YÌX𑈼|ToRg>r WÂ8 °+'‰4ãLkËIYQXþ4Ž?…Dš²Â–iYO°lî0®]ïoɈL­…gÈ`Íí·2Šbˆ%™’¼š ÝÏ’]ÿûVüÑEíö¨v5Áö÷j·‘–vÎ{2YB¡éЖë„;O¯„Uàþ& äŠ)F„ö¾ÍV€Ð·5¡zì“°ê¾QÇ«áðÍçYœß[Üð ÄpëNR!5#G–wv#=xàëÞom''аIãV)•[ûÐÄ %²àˆiéLCÅpn"aøÞñ¯Öôó—…Z.B®r/myìIç·6¦ËÎæÓ—¯h1)ßóðW÷5üuyG¶§Â×X)¼UãëµÕøîW•±ûJˆ•ÑÛEÄöQPñŠ}¢½ü³¿‚XèKqµ — â-}§¹€üv#THue°¾Þª¼¬ÎÑè[ÅKÿG ½"Òº>T^RIŠÈ½kÉ÷£oOï{›×ô4FX-XV¥Ùm]B+8U¼QŒ93È&¬0±3äã‹;T¬ } bµR]jVëìÔ¬nÀ7Y¶ú‡¨§%«_§2ßêïpɾ{ŠPk·2”e¼°Æ9ê$³séÀeac’»Á="Ô¿¡c£9JÃ놰cR$I¶Õ¥¶#àó¼ŠqýkY__¤óôL´Èÿ@ÕFâuÎ^­úΧݔÅ]ê{S¹©Vƒ`ÿTHšð'Ç ûr…?ýÇÏŸ—¯må /Ô+Ÿ 1 "ŸTXZm7ùÔW’-žVG&Ù†{).åo¥ÜÒn)·¸‘rKÂSn)±ži¿¼¦¿¹‰½ë¡Œ‘›T"£„FÜ$28&(“ 'º°ë4ôLá^uÕ»+„>B~ ¯‚¾˜7óæbÞ¼;ó†hûä`ª·q¼wvx 8{jDØ…°›Wàk›¿I°çJ˜^1#5÷~‹ÃP% X´ìñJ¸Ïhýñ×þǾ:\ SÊgí·ÜÔ¾ñMœ½Ã޼µ”€Žg” ó& ÂG#V*ŒçöŽÑõ0áX^FŒcüŒH4€j;û>ÜgnÆÔZ S·Á×+Ghýh`ÐûqÁEŽÀ:D¯oÌë[½sF¬¤ó÷ùƒ³¼Õã&µsOY/ݶÉ ABhŸü´$4˜Ÿäå‡ö‰Z¥'èh¯Ìdi{\oÍòJs•œÑiw˜DOÒn›VU={¢®ˆ:È<\ Åúdž•˜(ckç%¨;'í–´ùò=˜üÌBí‹àœ€P™Á&[-¿Y×å¡çHªà" Ñ눫M‚Cf]‰+Íí–" rSÉ$ëq·Æ÷n*É<¿q 2_¯7ÿzœ 1# &l£|´zöhóíþ=âEœ’'°‚Dœ6°ã¥éIEÜ6ïF¨ƒ¢ ŠFÊÆÜ©ùâs[{Å0ò×ê ªžüsãœÔÉ\¯~Þ~€dbRWƈΞoÿ—‘›;e—Â}_ôOߪôÏ/Œ\î?¨ŽfU÷Q‡~\]XT%ŠTù›±‘D&Ø%ó¦.ÿ.ÒvqpD‰"Ã̯Qþ2Ï]5Ûs<™Œ°ËǰÿžLÓºHàû÷È~ímôŸ5{ë›kÊ~­.Bþòãh§ÞÌ·g$7Ú:+l{ ¯[×tÿl07¦kmvgƒIN®¬và r‚™àiNÄì49«;Èå¤,‹ŠbÉ8¥œh-A´ŸfRVwh½"y¬ v«ë4nVu¡¯Ï£y…#Ÿ"¹~•cÕ­$ÝõQ6Ï•+‰n¦Ö%˜o,ƒåÙ¨ÒB_«û„x<±ºhZiëÕUmüÊðR¯¡€ÛW!d§Åv)è‡û§•iVFv­÷­xÃÊò’o¤äB¾)Hêå¶2Ö\²ƒ•FTR!—’ˆ J£î€‡tNèpƒÎ%{_u$R0Èt5*e‡ ³Ž:!§™(G¥vÝr»ù0ÇÝøn¦GøB‡€¤B9ñ#’ŠvËÜé@kÍ8L¯ò ~„Eƒ4®A ŠEË6Åbµ]ˆJ‘ƪ áîÌÃ,ýJ¾. äÃè¡ÔÝCÐcé®ëM,ƒa~&h$ ø@(Ìf`+4[YÙM££ ‹3¨vZ+BF=”íÔ[+]‚¨—SïbÇ{T}0:ô6¬¶ÀÏÈŒ Сy3†ï¥ßåþî!cgDá´½öOå̵BCã‡b~€|ºŠW|À¥w)À©’€VžŸÁ”°^A0Ý‚žCb–vÍÝ=ÓÒC÷ã›Ã @ ¹)Srh8«*"ëñ¯ð™ˆß* 2´^g{i×Jíª íö×ZÃ|ÖÝqÇÀ”ë¡ÏÃã"~¿´0ùbx“g©0‚3€á Ÿñ|q˜xeâ÷!+ؽËÅŽDŸ)*± ìEB`…ùÍf¦§DRkä¯Ã}8Ézx÷ÑO´Á ¢É¹âX+3¨õÄHŽC&©ݸÑK$.aæ˜Èú{/R\%œ+.3ô¢žŸ^|h  ”ðw8Ô¬S¹¹(N_E÷c”ÐUöh³@»z5ßäÊÏ·ßäžõ=LÒãd¾©1˜RÀ¯ËKÜ)FÂÀ·j73 ÃézœðPÍ0fL1Õd0E²•€ºÒžDT¤eÊ…P21hþ|óeâ…êŠÚ¡Ú%Úl÷Ý#!æLvÌ:b…þSHè0Ls¸‡a¦8ãr‡YøÛi¹˜Å‹[äVyìg–ÚZbB;X>ÌWl†“D¶8K 5½Í³G'dZ| 8‡óJ«ñÕt/ÙR*"[ ~lê:™ÔJw¯§_}ìïÆ-G_ È ÕKx0câÕL˜Œ‹<ýšNò×´¬Ô½7]8áYŒ]œÆÚÇF¿Ý΋Y0Jy–»ç×L«v×VÂB€¨óJª`ú5üe~—/fûA/ñ†*ñCŸ¢­.V¨l¯?Øû¨¬Ê“tèfÉ!{f†¾t„3=ƒ:«ôòÒO°¤°f7Ö³fþ´ï‘:SVß2²ë`´ ôL9L»Ê?‹<‹{I§Ú`郧’ճƧɪ[ï¹;:T³Ë™âch'&bm»¼ŽtžRiß×É‚§Ôõ½^Äó*+b7ÞÒ<m‡ÂnŸý}7_ÛÆÉž"ÒD&ãÜ û//wO^„‚1@~„]‰N'…ºšºÿÇ‹U§=ëÊMç÷î<¸.*e´˜VÅád©á~3ýÿzëïkù¯R“˜´È$*‹…)¼¥€4¤°ªX`énÓ1†ÒÉ»3!ÕÅâ’˜3µïîLó‡iá]×ù-›PW5z}·»n”ø­z]C'¤×õ¾6×{:×Z¼?ÿ±zåÓwoÍœ“ÅpÒjÓÁ¹»\³êœZòéd(Ɇ…²J¾n}¼àF !2tó”Èß¼¨•Õ7Q3+ª»(î8/ê{8B3ð¾zr±×Û {R$Ê$ZJ$oîn~÷®„2\V"pîN÷½îköb#h\ÞÆó<û¼²Óª,'>5­¸MãíSc[ÿí“ߺþä¥Ó ¢SË#gõÓ8ÀÍ "[s‚p·9A¤1'‡Ï b3HJ¤]â–-¶ÒêÓõ¶ÚW«}ͳॄs bÙͭЮwÜ,W-¼÷ˆšqî_2É9 ñ5ÿÞÈh¥~,geb"­ÌBIŠâ"+Pbâ‘4¦öI–©$vžRïÖõ»›©t¼18VéâŒ]œ±éŒAÅ<éX&béúÒ©+ÓvžÄ©K xùj³ñôwF•1¢Â´â©âËÃY™p O çG/G:Ô ÷kU﹦°‘&K —Ú$èÉ”/w^xZk7`4¹íñºí ½(4·ÿGrróìGoc‚~¸Íݺs\ î¨zG.[[Ì&7ã$ŠB)ŋ̠‡É—[ŒÒ7œ•$z*_ž½rˆaëmƒP·—{ª\ïy†DbR£¤â ¥ÙïÜ{3âØH™Ãúvj1Ê8]ÔmWÝ‘+gå¿ùéRt© ú5AŒIJu{=Še*ò‹ê:û~& r»>çTžŒsaF —£ò5úæH¸ãüAºž8â‡ÿüÊqµ=£x2™>£y>©>–çõ¶S4©†‘øøeýÖe[á Fg#ppdµ¶Uóÿ¾Œ^×$,èƒÕˆ{¿±b›·îrgówü$ìùœkÊ»I™=«“éÍ’…ª Þh<ãW¥ÕTö5yè>·‹ôzÆãt‚ìo\tcÉï㿇U+>Oç®›6±* ªKö?çå¢ùÉåÓ‡üyã©eôê“g‹±%‡öË5išÉ\¨ 戜£8I8¢±¤¦P¹âæ„z„cÆ‚Œ þÆ®‹Î¸èŒÎ§Ÿp)‚bLæVí&{_zi.Ó¤H¹5B““/Ê‹• ©<âd¸P–«E°;8N«Ã³ìyŽT!R)L¢9eúüìÅjå•‚`¥rðÐÇz°Òlás‰bûFwÏ)¾ÆX®Ö†»½3/~¬TÖ"ˆò;}Xe«·9}÷õ›j§Ïå<®²‹_}¿6/à[ýxd™²~>Æ¥›NýK¶Ñ]x©„@Ê,$]K­yUSZ•˜šf…i…fUÏÍ–¦Ñb:½‹ì¯°"ÿ[J‰0A)ä—|¿K¾ßÛÎ÷‹.W,—+–÷Å]œÆó8@EJšw!o¤uƒ/§p*Ý[UOÿŸ½s[j$ÇÖð«Ì $è´–¤¹œØ±ßb.œ™$¸0Æ`—‹áé·dpa»œòoK2Ý;¦+: ]_þ™ÒÒ:=³î¹ rõ&ÔLŒ›4“ðWN´áë6j8Añß¶ ¥Ú2\ò¸\· Cæ£ðwj¹ ÌUZ.hõ7o¹`­ Tm°þ¦)¹¾;UþïS¶xâM/89cúR^¢Ì~y‰VUÊK½ž?íä¿¥gw\¹bk²ôÜ„! §Ó²ý§ÔÉÖÜðåÛk;8?bùù×í¯ï˜M °^“„žú‹—þw=ÿïz¾ûÄ:¡Rv^Ýo>vj1tRxd>Œâ[†Bj:›ÏÅß4ee“ß8í㪰ç”2‡ÝÕã?#+Z>ˆýž¨â)/Ô)—ÏœzS0¬xXÕ'e*´ˆ9VßU¦8Ú^¼0ÖWìÑ«ÝØc; {ß4웩 ¤’t#Ù&‚aRHÂgíŠäΔ•£79~ha _˜<+O±Ú¹°moâEW¨½C‚ùþ¼DNeõÎ~-]"‡Sböç•Ç*gv.1¾I]œáꀋ;ox¥ò´{ÿb î ®ÎÄ6wêtƒ7#N‹ç~3Éþ;úa»`øÄ늴7!…x÷þ8­ÜöÁº­¾—5¶5áÿÃ}óf­ß€EkI[É’Ò‹ÿã§óé2,} íÓpÏ ˜~®dæ†\"›ÂÈpÕ^p§N gé•9>CL)52®<ÜYJÌ\XPicu)¥•ôÀ0îp‡NÏ·këFo}\ï¯x<Ë0GF-H!0JœÃGÄ„H0ÂÉ™ÓocØ¿ ÁÒ¢’Fó·²$BY¨†D‡ ˜Bt#lJã¼C˜N v»\ŸÏŸá€Ð,ïVq·^樄é/C6îHÛY›`¦ÃÿÜÍîb`&þ-“Y·ò1²÷û»Õ¢;cÖfwÀÔHþƒvtä±Üޝ‹ËùÏ£òÏÈÏ^zó4úØ*!-0Òœüwá«xô™E_H©/¤’"ûN“)Q{SÛ©nËx•’J#sÞ1뵑‰ý+TV‡»ð¾ø,ൽ:Û;¶yüqÏù®Æ»¶¯úº™äþ뜖‘UWm[úÑ­tákèÜÁ ê5}Û ZüzxϦÓúº^–#'ø‘ÏSB2Ä«í=H‹D{a sņ¥›YÇ,G>ØyáøÕ7¯ýÚkC1¡*-| ¶N¸ŸZ%X‘²ø®Ù…[R²MIbÓ3p0ð¢{cý–Ïb륃…S~Îc¥AÒ_éŠe‘GV®ãSõÆ~«á8]ϵ}OÍÍÆ£Á8=UJ‹ê¡¯U2 @N(èYäë ë8;!½±}Ïáë\ókxŸwÙ´–¿sÐÊî<º·­Ç_Cç¼*Á“—7±ÉY`¼U!s¶ZŠÜ(løêÔ–ÆÒ d“¯µ ì¸ï3¤Â9®·„l]¢_„‹×çhF~ð;VêÆ8ûò– ÉçUE Ïã®RØÓgn ”zú”ÀЧÐmUûhÑ»süûA8†QܲMXøU7£QZ+0Nǰ±ß32ìùéégì)¹3dë½_¿Ýg1÷ˆºýÀ A îŒkfÃB=ò9ÉHYeKßå’9¨6f{QÏñQ\òÃÜŒ‘Ya´@È}Ó ðIø[b»šÉ¬éæÓ ÛÏûé|ÙÌ^ü|¥’Ò* ÇΊÚËIœ¶˜„³s\à7^brÂrÚlðÁÓ¼¼Î–6‡%WÞ›²—ïÓV:o‰®¸âŸêÍ£RÎ'H×Ü“ÂögO7V¤NfVY‚èª%í?=‡éyÓã'ã©‹ÃØ!Œ*I; ¨Ý¤SfS¬]F’õ­®¥Êüçj2Ÿ¾eçã$UNXû¨6>)$4Új¾œŸÙây ±p¥ú£=L}ãRz˜‰êU!M_WùÚ›š!ŽJ%H_°.É÷&ŽâEx¸VÅÞ¦‹Aü›bšf¡m†ª"Ò(¼ ¥*Ƭ“ä8k*÷´9<ç(C¹šÍmþ Bß1o’Š9+¤Ï“-pÏTìÃùŸ@ö”#'(Áܺ‚Òì^:¬%m7ÃëzEþ28¡¼ƒ®ßVªí‡—/™2œðNèÚË×§‡¿Àò…CÙªË×!üê¤L'ÙNVlÒU®ÿ‚ Wˆd0;ɕ⷗µ^07)‡ÓÂ#™èNÕ*sÝ÷ˆæ£Tè€>œ&=N‡õ aªç9ˆ‰{¢˜pm@Qæ£ýs½'©ŒCGÒJÄÔÚFÛÑe¨CUÙF‰`‹’Š9k8ªm1Æsc§­A ªZ •±ñP8.»ÚmkÔ2dñaë!äÊ .sÛËF­µäË‚زvÀ±À[†^*o _ðÐy”,›ã›d³h”Ç×:õ\A _åpƒ¿©×Â+‰1T;Ãä:žq‚:VñY.g›Ú¼"©vT©çÊQf¢z‹Áç•ÔËKdcúÂMÔ7P›Ò§ÙÝdy—¥J` »0w.~w’ZÖH³7¢Ú:öQ Õ?ë˜ìEŠ©³ íq Ý³Sµñ0Õçq>ÍßhXhQÔéOúŽo&7~ÆRÉ=ùš›ÍWW‰ÍeâzdÁ^q4‚EØüUŒƒ™œ‘Œ X]ß&ÀcÏÉFrÕ2#çMçHã%„é:Ƀ_E²kpžZºT¬fiãÈ",¾N÷åØ!–èIBQS­jŸ’Š~¯J=LÆÞ“VI*t›X|[!G!NcŠ”õMXTç©3øº³Š¼Q*œë¸:ó×N’•Ø¥HhÌ Ty¸ÐgÑ~Ž`0‹¯9Xh RFŸp•“6•§7üÙù,GªÍÆŠ`¹šãŽ0Áë¢JªâS۶ȉHKˆÂV5)p7§4Aiªe»w³çŸýî2±ß=?G)VÚAluò§N€Á.來a¥c„‘åÕRÞŠ˜0]'÷í‚P…Lêæ¼Aøl-lŒ!oŸÎ,¡P*.¾= ضH ãX…€tr,ö%1¤ÅÏ6ü·9j WïtáøÑç¥Ã$7%¯¥„(ª9!~®b—ìn²Êš…P¼wJѪ'0’­ÓL/CÞ \»,îÚ\x‘·CJí ÄÀUû²}îŒ}Ž*<$JÒøØ5—ÚØ¥²NI©VX(NËÉÒPU4Ù\=ªÛ¤š”†H\Ý„5â k„A«Š¹e¼68‹«zÖ\Ì&«ðO¹ ÈKƒUï€y@S¢”$±˜­¸ˆ•8GÂ$$k­fçŸmR&”WFòZ‘íMÿ¥QP®Cø¼~x»WÉMÆ*6 Õšž£ÌPsV:*HR Ï©Öqà°{Ž*މ W3íðìȵð)…P(§¿£lݼ&è!ü†ž°ÄIS®%„a  zU9Nº7Ù,C<œˆkFK÷q`­LR+g<@¦„¼ŠS­Ä«†3Q}ïÚv^j¥T’È lRÔuó”ÐI) é$ME?ÏùœäÚ§¼’өƇŒ¾:µºßb¿ÖSk}똾¼Ï‘X³ð¹®ì8¬j÷©­RñYeÂc‚Ü]r@ÚØ,×Í­xm'],}ûOÓ½NŸóÆ@ï$hd)ÝϦCKfRÅýg€Ú+(yðùg–ébÙÊ“¼1#U3ˆžD(•¤bDˆµäè×ÃÖOÓåf=ÚI8`ß·Þ°óm³öË·œpÌȺ˜ž–˜·MÊ#ÌÂï‰Òûqj¤Ï©_ˆwƒg¿ ÏîeÉí”ÖÐ- ¢{pY~tµ6É8œòBä^8Yðq8ÈvÝÞÿÅö£Ï>õ ƒµÖ ½o泜#;ŒÊ¥dÏà„åMßšW~OT71ß­¢å1LccNåM¸v¦f½|û•c`ia„‡P‹Öœ¨º*(ÔR8 v<Þš‚ê~¤ãK¯^G>k¨õ·lmºþݬs´•Ö9Ô—ÒöbJXYT–%•ý³ ë6õÃÆîÚ¡3äšžý°9 ‡cô(Ëb gÓ¢J‹ÔqIG©pE%•>î‘»ù¼±uLáÔoZv¿~å(­= X›XŸM ÷’H®Ö08W°´¾f/V“6îH“på±cý˜÷Y¯®aFjGu9Ï UÄ¡¡‰œ€øJ:4âdößx»ß4Ö‡'ÓC×7¾õßÒ¡h¤JIwê¾H¾t¬<ôpRañ>F¼¿Þ?î"ëG^äœ]´ æ-ÂÁ%•: o{)¯Îc‹G¶4þ˜OÅ™>^mc–eUYßþ "Ø$M9 ´3Pµv¸ Õü€ýlÝ9y)‘~Ú©:~¾8m7÷ÄYls¢ŸNîçÏË`;-o;9îºÇfùüó5œ‘¨m]0£zo›·~ýNYª9‘zUÚÀ8Ö6õ®E —Ôö0´0ú“†lø'NW÷Íšý[ÎÆ†¢QLÜ Î"{Ÿ‘ZYÙU;òÝ~tÅùú1Ë«Èô}Cý½÷9’*!šT:/Î>ᆃ)' «’E8UÉHÊ^ªôíÞw  šœá–›GÅ«[Æè`¥@pÅb$g‘Á‡ƒdy1L©ÅõƒÕýäî)˜rný6_å(Ö1Ò•ƒÕ{€° ©4c¼'„ÕÈ¢z~þÞùzg1¶oûðínÛÕ‡>ò¥¢Ã08ëë;¶wŸrß“ëÚ¶1÷–rê*ÉjdxvWörÓÂGä;m½ƒÀ­®™VxøAcÛ¾Ì@mߨáÇ[Îᑜ•¢«–axš¯ŒÙ„¢:s§÷óìig­êúÁ˜Þ³oîï(ë¨â6¨¿‚Ã;MYdïØSµ½÷pÊKwca=0:üž¨³¹ÃÝ©ÀKãDÆõ´:Ìå“2W/”JÊJz ÁéõÉEVš<„W1¥tt‚k¦t±º aSºRfi -LNë§½w£«`²†‡ôvçëÝÐ'µ­'ã\C«Õ{–Š ¡6¥mT6cRÇ6V!¤þúÇ*ëÁw}Ïn°Í¹ǜ"Çt0„ؘ+Ÿ?¸°(©5J~­<Ť{ _|¶7Cg{Ƕ1?îsrU™¥ÆH¯‘¤qv#¤RU™„ˆYNqüÈÿËÜ5cE0tõ¶dã×¥£ï'5pJ2P7ÈEøÉnP·¿ÛB9æ¡k;g|C‹½ä¨sú«tÆJ@©eç•Bx‹õw ‚Ï+Ž­±çw¿²¼[í'~õ-Ç¢¼þaxÈÉCeÏãõ5¬ŸK`a[(•“j…ÛÀ]ôà´CæíÑO›þ~ý˜£¬•BãËÃ¥ýs8ìÄI9pR{U[gÛ÷kÛ5ÈÚÁ;n{æ¦w/ËYâZiär^ƒ¼E2mìÆit[£ Çí—gË÷ýÐú¨qnÈ \Ç0 R£j•*Þh#„»íR.®tµw¬dß&qv´js !k„†;]´ÖûO¸–1•4ŒÓ”ÔdúW‰×»ûérõúŸ:önÒ,Ö«§¬·Ç„?Œ)æ‡K’”YæâìwÊUHÃÿêáqðAíïZÓ·ÝШů‡Ï›eé‘âõp`,•ln&MÕ×ïï8ÚÖ0ï`o­ ¤hز©ÜÀ1 ˆê™ì¿j­ÕkI=?*híÜŽ|0´œ¡¾yí×>KFg,„fËùÓp®"‰0bÑÒ‹m»ÉmZ›Þ˜ƒÍ¶ëœŒ*ëµFê­+-7N£¥ÜØ.œì °¢­2S-a¡ßj8ö'pmßSóD³¬ø”±¥r \AmKóÃOC2‰Þ W´·æ¦ìØ63öƒÆö=‡?®sͯá}ž³;ÉB–ÅÜ8x‹ôÉuJy ¡sÝZÝúê·­sJQòÖ!PJV¬ù“¨H„‡+©X7û¹\mžÆ­cyñúíˆÃ2;GLqö%gÇuÚKdø¯+縎('sZa© dWAÜåäi{I~eçü œà(n>&,Fª›¿å4¹‚‘Ëy ðÂr§N8.vÿ…Ð]­âÉç§§Ÿó+øª]{ï×o÷9‚²€¸r. öê¥|FŽÙ ˆÎÕ/)ßíyÛù–;ãšÙ°POY‚åÜ –y ½”ÈpWÔÏpàÝÜÿ¶a&g{ŽëÍ’æ&CË $Ax^TòÜžbÃTTá*Sîvœ“+Žëé§±ÛdÖtóéç„‹e3{ñóœî^ ¥š&/d­)= .8¾œ&Ì•<ÆÑwßW¼Ô”Ùì¢ØÍC*–(²»Y¯ê¿zõµ×ª)’AjÈžðu7‚‚ON¶mB å°b!´©æ Ð=½ë†a’%j(=TžÝ)·CoQ’aI2#e¹áÑâXtEßÝwÎè“ú"ùÍeÃqî)-‘¸÷n0M†ƒdqG2Ÿ‘к 7ç€Ò›Aø{oJÌŦÿw9v`fì†ÍLêr¸c—¹Þ{X#Ïrÿñžt3ýº±3¦p}6Ðx· ý¡ÍS (›Ï;Ãâ"ùÑõ)削í»äÕ÷“Ùø¯íè-ØYX‹ù¯Ï_ý©'TáøÝó?{ƒü«îWoæ…a¾z–¬}:ü\×·XPøêvoBK%9åj&÷øŒi±A'‹‘̘n‡¼~®LúbÁæI7‘F1äºz¹ É·úï6©¸ 2´PžDBÔœ%=eMÈN™¾¥W’¤Œè(ËÝHÂŒ^¬”Ö‚7; Nz“äóëáx:é¾ý z:̆ý.þ˜a¬,3`æÿiWrØô¿ä³?þkÐëÿ÷íEòû°ûö¢ší´"$„µÓò`Á/’}ô׎À¼Óa2rnìÿšMÄÄM'ÉÛ‹*j$!¸HŠRúõ0wýˬß®ŸK»Hþåv:‹ñL&É;7ð¯aLvÒMÞ~þǨ{3¿èwßQ¤ûݯ’߆º^ŸØÌßÓÍÇÃQ’÷@]ÎþœQ+“¸SØ·Ùþe/Ÿt?ÿ4üÅŽf·Ý]úó< )ëš^Þ]FÝ”ºÅ-Mð LÿªYswã¤/Ÿ»|Ðnðií;¼BØ|kò…Ú+{õÀ6Ü7ý4r÷Ç›ÌÞ, ëâÔø»naûPYvš]u‡7ÓÙô½x‘ü%ö—rV[Y`æ;œ1ìU fô›vaúžñΫöÛ™®[}ôëëí×G³u/¯¼;÷†ãÞôp¸ç°™ßëÒM¦«'Ã5‹wîÃÚ»`;gw–Þ›ö`¸ ôÀ7 ÙÉÃþÍõ`öò—›éœñü¿÷®P?éä¯ÜØuß~ð¿Õø[’Ë››^îgæ3.¬L¥É±6C\¤éÔdHÄÀtYa‹ ´Â âûÝäÇ›ñŒ{çOþax} R°øëÕ0ûsñò?~þ±;ûéW¬{[)€öd@s=©JGCí  í÷‡NSíxôÝlt'$~Ë1=Hö¾þ:ùüõW2È¿=°(’“Å K¡­b(s…A`› ²Z2$RJR#™…mOVTØBS:˜>˜-,ÆÃë`Y|ÙŽ(¾†~ãß„`F3”.~uòÉôÅ‹Y "¼Q8 knf†f|p¹P š°ù\ˆ§ÀYÊ”P(S¹=N$J‹” \cÖ„ºö޹µ$šQ 9 >ÖaCØUºƒªˆsP ­C*uTk¤-”¬Ó +Í@UÀ«œn‘¥!º¾ð™–1Dà _4|'eø’hv¢Ù9„Ù1„ PºÆàR¥± ÇpžµG•³)°>KÏgRÎø®¬_›q"öýf‚Í.YAÔg8ý0v^tIÚ6¼gð*zÅFòY²Í»ä6P} ¼¿7µ¥½îz~$× º¼uo(#r–¤ÚMhGc…ªÌ° > MT¸SNTÜd>Óø ɪ†pJd5ªÍ´Û-¥Å3ÿälê@Ñk‚ÆšM»=èØMãY›; ‡¬MCŒ éy¤‹ƒdmJ¾ÈÚÔUY›psþÆy‚‡ã.¼Ÿ¬œ«w¿~eS×_þùù=goMº_&6ϬxÐD?»}6{ÔË#OÉÂt®}×oî_7°Ãð&îóí³Ûäç×ßûí„çm²6òÆÐ†Xê²ù€¡|9M~òš`f§¬J»ƒ¬LÚ5T`ÎëÕ ¥Çs~7OĽ¸uvçï¹2«±°ï,ìu¯ÿ©ûžï°å,¯W_¾zóù—WÝüt{ÿ«ïÁÛ>µi¨["¹i\븵…1 Yîq­³Sp¤U†³LqƵh׳//7º™óÝÄ͹3¯u?|BÉóòÕs«¥†¬Œïm¨TêÛäCozU·n(ÊfDà½LQx£ðî)¼¶•õ<Ìzõ5á ö¥ç"¹vŽV¿[ŠIòÍ[‚;”¨ü”ßú¥ê•ÓF3Ó&·%Ƴ`›:“}=¿¨Zêë8òóüíîJ;Ì?„©î &nI—÷ì%+wdžL®ìØ«Ð;U>c¸?™ì2É[ÍLe„ÀlÉÊ”ŒºÍûï†pá¦'Ÿ6äyÆT¦3d³Ô!ž)‹Rí4*–;N nQ‘³±Ñµ»Jž˜¿ž[e0åZKê±"ÌǼfs³WÕ7„ë0r›»(È')Èðx),-Çlû&xÛFˆÀL‰ä’è@^:õ9ÃÌÈ®›Œ#³t躈³ÙèÝh!¨XMõq6z÷5Ƭӻ)$W9JûšÒ*Ê “º¾™&\‡å¡6{µ´,7|²£¶nø‚i+Èï´áS•ß,(®ï¶åG¨Od÷ÉäTR§–8€:®s -ZFXL8W$€-C‹\Ðâ›Uû Ìur’M·¨»[r×¾¥ÓúáмíÜ?¥‡áìy¿÷²Œ¨þ¼K\ǶêÏûŸõ4"ÖUVŽ@—º<Âñ“)B]=ϻ֬©~ÒÉ•°ZÁv—óÀšaúÀÀîúé;ãŠ6’›¦ÆõИ ”ª¼@™•Ø(“jI\k˜”0®y}ï.¸“§S±:@\CJ)YVƒ+îlÔFj+4$·§™m‘Î`AãÔç¾s‚`æ"™Å2§Ê¥&kOn¹¢š³z¹åL<òÖ[øíåã±²ëeQoù6&Lc¸ ’êæ…õ1 ’JVSŒØX_ºÂ!- Aœbœ k)–¶=ñFVï "B’'T »^~ÏÛìÖJnÛV÷1f=2—ì|!vqÆ ² ƒE6…(ŒÃÊYÕž ËY©zA–ôhµ…ƒ6«*8a°æöäùIj„Ö34E‘‹ÜÀòŽkX-&‘ÑR£,Ôð"ËYQ´ÈüFhn˜_ª£1ˆ™_1˜.¹=1«ø4Y_ñܘÜ2”åÚÂZå9²ÊqD$ævGVYÖëÃ?VßÒÖ‹{”e·£G3z4êÑL¢?1ú£?ñôŽÞ¼èÍ;)o^}iÑ—}iÇ« =YÑ“u,Ö‹~¤èGjÞD5ñékýHT  ß+縅¡ïGTîRou‡ÇžR½U_auÆÐÛ”àó,ùÛ¦·›ª´ê UZ5‘B)JQRÀŸÖ<¤ªXµÇ…±œ#¼P–#ËØR¤È€ñ@*Ï%Ki³Õ¯¦`¬òœsÆú†œŠ¾!Wß„ÖwŽš£±r¼§XJ‰¤ ö@Z€ñ­jtN°Ä$Mi›š,J}K¸®T*æÀ‡½AªuÒæ³ÍzÔ•üU í×£†uÍu†‘ÄØ .`iSé›:±”!Üš0Ì|¡žàüI·Aˆ°<ÂòË÷.ƒ¶¦2d^ˆ>;.LlÌ¥düaEÁ ®üjéˆ|µ7§ßeN›hNT,C€Tdßæô»1ŒÇ ª#Hq=©¾æ8dø·Ýœ~ï¶èZ*ªx@ƒ#©°9Rsúr­ëJ*˜ $„ *Ö˜>hÜ€’iþT¥øÃ4¥/=¼!= Ó&t9r·šùX~Q3¿7*šoða{>îe_VÁÿûxx½¥ €ÔÞ'@‘Æø0]Òw4Z†M-Ć–l÷Žù“ëh A\” Ø#–2Úèh°¦¶u3¨rY(F˜1õGUŠHyÄD¥:jvy]Vµ‡öŒ¾^[ëÓŒÅ\«O$ŒÄ€;„"\s%8쾚÷Ènžè3.OÄ%6W²–9ä+‹#]8‰¬UÒXBóÖ˜°JBÑzÓª¸b1MúêàåSÑ[#³¯^nUç—f2/,ÆÈ™Ô"˜F‚4g)".Ç:Ë¡¤½V{°Uâ4 ¹˜Z+Y1H¡®ŸÂ¦óH‘á•2‚j£õˆð”™¥…AT†8Õ™‚羞H¡D–rÛb=¥„ѦÞ;B”¤!r„È"7‘“P#@õLjáa„‡§µÁ†²úóH­©9ãÄr fŠì`~d3ÕtpùѬ9÷ XµEšQ¼þD¤^GiÞWš%è|`P{”æ‡J³®”fƒ1WJHó¶dú³HÎS›‚êõîAõGÅR>0 Ö¦v½ ÆäL’ó"nЏéá¦$ÊRD-Q–A-IÄ M`m(—õQmF3s”DÆØM®’Wo^ÅY^–µ Y^pžiлwW¬Ec/Cº×£µo¹ÏÊlvå*©õm•TµBоdæÅ S24'B˰iáU‚BZÀ«"ËUªÍŒIÏKÑL8fZ×/ÁqÑN¤Ÿ¶_4CêûiÃuÛÊn=½E;fX MÇ‹ÆÍŽn€¨ w>jŠèéNÙqÕ€d Y½G¨Î°%îKP9{;d'Ð ËCG‹-ÛÉa¨!1üÀœmºÿW›Cæl¯mšÈ× S"„R®ZÌ×^#lάc¥J]Wó‡ô£ºàGÌÕ®Ïö”Z_"À_ÇÚÌÓ¾†]eoàõž/„¹|ª¶ÒÄ„ [±f³w¨ÖX‡ TÓÆ3±«Æžzí®”T!ã7tÿ,ÑÊ? -Ô`À@Ll‘4QHËÇÃÑ~¡îM4v-Íð*ŽºÔt~›ó¿tëAz—†µÁµ¿ƒŽ1Ó˜Pc¸R\yÿ½õæ'(Êü4[Ý«Œø‚þÂÄ)ÄS!Q –y.2®¤V­Á\‚á 0’æpý#OS—­ëOIú–czˆè}ýuò ÜŸüõW2È¿=°$žf9‘Úq© 2ŽS¯`Î,¼ÂЉÂZÀ¦=I$ F¸þ‚à’?˜%|ÈéyÓ’øòŒ-aùØš0.|¡D¦…P†´yØ_þö+{š¶±0¼H™, š#®0Fډ˕0Ø*¬q{I¥õÝpü6”Á6žHž™m¬ÆÆmãclÔ¦Rœç9ñ^^€¬œ1† Í"¦´¸ÐÒµ'›ŒQ_QdS­{q°_Á„Ûã#áÓòÖ󺔢‚kA¢EŽ8µ ¬ D‡=,R‹ì-©oìוÒþÌÞA¶yÅÞaÚâöئì‰0·IeʙÖ#®u†Rm1J%ÍT˜\XÙs ÊSÌ­qÃ5-¢/ú÷¥/‰Þµè]‹Þµ09ˆ¾­èÛz"¾­$z–¢g)z–­äý:ѯÓsE¯Jôªìãýë÷—?õ‡©íïâ=H/³âÝ*Æû?_,â»ÉŽ‘Ú8×i!E96°!IQZ䥶PY‘ڴдEwã, Ÿr² I„ºHÞLíx£Zb¬›|€®†“)êM†ýÙì-ã6“½éUBÏŠn\ƒJ$ÑXš€±™ v†¹«ß4Ò7EÏ“lœ%cgóy¼ AÒô¦=Ûïý{6Þù8'7)<(!¸C‰êà~NYÅ©–˜DQ½­Q;®çV Þ•ÕÖZ\c˜òª¾çìJÚ©c,waêÓ{ˆµ ÉL#+Äf©–Yš¶&ÎT)pÿ•ÒSà޼ötx-ÚÌ{BÆHO}‹K¸N&2ÞK¼{Yz}—¡„HSÀ\#q5þ{Sªû½èø€T÷p2wêIDæâN4êÛ[}2qÓ2ÑE‘§²Èr”_WÓ†ˆ–øÿÙ»Öß6r$ÿ¯4‡Á.n¨ðQE²‚·ØÜျ½ÅÝçAÐ2ñŽc{l'“ÙÙüïWí‡,%j5[ênI6$±eYÍb±ž¬ú•³ Ý"¼9'Úu,ëªfˆu†wA”¼ãC851ÔŠVphJp%ÍŠ†¾Ê odj4EíÒzxsâAm¤±à”Æ~Û•9Ë'Åâ$¦ƒÄðþÓCV€‹@˜Âb0YŠw•âƒÖAëYyØ£>~¤£”‹¬gHžf…4¿B‚VŘþ¢ZЦÁx¯m 1^ ?1Êò U/ùGà)§¤º;~mRUò®µqCÙð¼jçŒuÆ¡²BIÇa© $ˆ°eIecl)L—— K.E¸ƒue¦mд—¿;Ëóñj†É/еT=º(jí¼€P”:a±)ƒ1•‚0Ý})`;‘À'l23aÎV0[Áli‹lƒ² šÑ‘R˜•~%6,³vT?Nûð 2‹ƒßqòÉ‘oDß}Ò:X8ë·ä¡Ó)˜! 9F>Œ¼OA|aÂí\¹Éëм/×^›–)S“»¯žœQI«÷“f&{ ˜D)¦díi¦Ä$D©|Ê ÓìÇMLösd—¼$òÉ`·¸Ÿer^r›§×Ëž]²~¾Ÿb<ÖΔ U{eÏuÞáXžY8¦ƒÆÐјÀ~sCRT!:áTmtô­]Oæ7£fõ‘p«j|`Õä€0iƒæ ‡IóѪ…ÉÃAÃ)E¥Ð±ä0C-ªÊ¡ J¤!ã&<Ö–­bJ’?æ”d6€Ù>wXdó“ÍÏlæÇšvN¿ù±wÌFâÞ^)î"Ù¾FIF¦Ókô˜ÉÈ~òr‘` ·ä"Ñ":•D€›3¹k,€™S‘OIÕw×Wµ¸ ×làö&åäYÈa ·$¥,ÜΑ€Ü¸ö!¹Gëa =vB“Ò~“{<»HJ>Ú‡ä#J‰ê›ì㿳£¿-ÿ˜L£Iò›²SêÑèo¦FŽÅ˜zÜÉõ ×FÆ,A`w º6sûÈã­{ÁâWÖ d•²ü ÿr¥fïi´«g÷O[–=ÝXi[¤WŠ]ao¡A¡½­* Æ6vºQÁèÉ{ßߪñP Æ´ š9ÂëÜcþÉã:O:¬•pM4xQ–*i<ª1L‡;€d=ª„l9‡$3¥³qËÆ-·‰[‘MK6-Óš ¼î/I³f¥»t/4®%Kß~ÃÒÏW‘6nõÄô \ŒÓ s-¨EgÂÒUÚ'b {PEá+rÂD¡Reð¦Ž}qàÃé”ö*i`3ÓV˜ëJ%+û¦…ÜA¥®Dņ iZk Ëé˜ÖŽ^Iè‘¶[ûÍ^ÓÓTu4-ÒM”jÉŸZµ’!8_FÙ¨ ™¦ŒI`Éㇽ:aUXdEô²·ì jÀIå´î/êµ´’5ÙhËÅÁ¸çÒ”8xQßbŸð‰L÷U¬“¤|Â]²“;ÃÙ®G+#e­SÒÍqm¿ÙÃåÆÞ)´6¡ù†·ÄLpc¿™²ÇãÑ:ºZuÂ9å AÊÚÑà²~È=±S-P )NNyO¿„;?‹¡þ­>S$^Õåmɇiù–.J´Ò”ÐÑá©q¯êÇX;·&·Z¨:Ömý€å¸°w)j0 $­TäL{a.ì?0ÃÏꛡWöN“ô˜B‘óó´?ÅÛŸœ1`,&ÐBzZ¬| ¥¹¸m®|œbÓ¯¨§_ÙHýï+nÂk~½XŽ9Z¾úý•U8üö÷ÿn3öw/Ýܹ©7·môôÓý¿ÿü± ×L¸Yœ]¾âST²É+wYÒyâåüð击zóùª¥ý! ¹ö¬ÿ ¿|<»­ƒøû—¾ÿù·?·÷/í:¿laŸjÎì•ó°Mo-‚é9ƒÖYŸB ŒP3DËíRãœåÍHpx­Ó¹ fýr¢›7¯¿æÍ »ÂŸ{¤×…ë—ƒŽý~þ§ÓR!»1ˆ»‚Ÿ ßñ¾lÊK¬£hj¯4±U›8ªZÈj6é?OÃ{ÉJN÷WÇ9"̹{(†7/I/ô€$DO<Ç1»U°®Q1zAÚHQEô¨UЧëÊõÚ“7ý±¹×Úœ°êÐÇ¢:ôËRË ¤¬;&ÑJ_o„‰ÐÞ¶P#ʤŠJ4¾²õ”ºÃ8#UFÉíUp›ä—-‡‰$©â§Ñ#I›?eÅÕ®šâHÎäuWRCT%…qÔÇó— =6ÌzšN\åÂÙq±þ%—ôæH=Gê§©9NÎqröuO/N.r”š£Ô,¹'¥9FÌ1â©ÇˆJ’î/0j§Æ/cĶfà¾(DÞ“»ZµòââòaНÒÅeD¸¨Ë+qvÅô"½PÖ/”¶ ¥¾ûiÿ¸¼âî4Ô×õOwVüûÓEIÐÔE~[NÙÁ²œp{^è¡bwvk­ŽÅZ«çk­[û|'•ÿR*6Åí@G>ZºÄC¶Ï_V¬ù3°Ý{ÙÞ=UÒGcŒ×’·˜ºµY… ×DÊeÁC°?6§-ؼþÝ›9 öx‚ r‹`“Ò h¬Ü5ðµÞ|ZãΪv+¾y±Ý™¡M;t9\•VB²­Ž~H»JÈJ7{SYéŽéMY¬²/“}™±}™"{#xŠO^#*¿Oz»GûoÛäðMgÌz?K×5ûœF÷/P[=¨É7aI½šhRBW7/?ÛHçUÊ) „{¹¬¯{z]ç¡ù„îxBÉJJH}i/íÄ©õËóO?ƺ‰Àq¥%ñîÝ{܈¡¿ÅX&¤×Ó)×S_*´Ã ù‹{”Iስk¼u~þû;{HÒ îAúÂæã˹HÞV ‹Aã¹ùû!÷ä1Þ)ÍE=hÆ[7 -•F`Y±VÕÞ üU¬W9¥k¢êé†ÛW¦ô•–¢tºjí aÉèZ!sw%ÿðCã„Ó–?[Év$rÛú‚µ5V— rµ£fФÛiX ‚½åšà@œÖ˜i™ a$9š,&ÅIâ´RçF‚áõ²ØHu7hYá ð‹¬nŸçÍ_VvÖ·ÀÁýÊ΢šýžpkè»7Ag'(1¬ë"͵6'…4:wºéD‘?¢;]µ0[îtSŠn·˜x˜¨ÔlÂcáëüVÚÚ‘Û)´Ñ¡¬#eÔI‘¶ŠR(R4·ºÒ^çOém<2l›S(ÒókõºMÕ·m‹E<;7‚—ÊdXŸn>ÿZïK’uv&mÏÊ>Ö}HÜÆ#)Õ•lyñXx„Rüòþ:^íK’ëš¼–c§¿„[~6Â?â/F9UÅe,ÚKŒ-svQ܆ëgå9¯½õDÿïO%',šû€nYZðºøõìüœwþÃå§PœÝvï IG)ûB3íË#'Od_À I6ƒþ©øÛõe[ÌÒ.kĺ‰öz¾ -Õ…*îvª¸ºln‹E'‰h ¤8¨ìà¨HôÒJ—/q+µýà ØhïG{5g[çÞŽRÈ6€j?a!Û–tï¡ *íugA[:tÈ‚¶%TÉTy©Tضæ`v’¡MJa“áf«fwá+ÙÛJØÖÖ> nÍx«RèÑ3M»c'ïa2צáp[GݵËDeM 90Ïx¸Öîî4ê®]#!‘I ç™™æìàQwν°QwÌ’.…m¤æa›ßuÔÝbHN[Öº®ßv©eÕ–¸÷»ÊLBžs—^l¸Æ˜± ó×¹}ì@¿Ýë@ú’_m 1<; D|š8D‡£cóOH_‘.ÑT ”s^@eHTAQ*ë)@cÛLv[QCáp›áÈ#ïvÕo^žŠØâ ñf«‚8Y E-X6 åI²Î@-|4h{Õk]©ìtÐ`|;)³_gãò¬»=u†~InÅÆù‡Súy) ðJÖXÕ‚ÿ%’ ¦DÑê ò¡)#Nèh€&ŸPE Õ¡†Ü%ybOÕw):xùî´Í?PûT·–8FM3}£TÉVDt°TE%e)|ݦ굚®~ ЀÕ2APÃÍä=è9@?νÈáqsx|Báq‘ƒÓœæàô”‚Ó"‡†94<ÝДÓú¯$æ¹Q{Ú¶=-Ú¶>eáp–NúÕ-Ñš_O@ñ&eó$¸ý%úyz«»Ž#ʽ³Dë-í¤–ý5WÌ<íp 6 nÑ;Ô§2ÀœQ üBGÀeÿ)ûO‡ðŸŠ,OÙ{Éò4š÷RdßaßAItFzìóø}Úbè›’N Gÿ-É©†¾¿P¦{è¯Q#é„5:¥÷ú¦eç‡úuñêãÍõ«êìâÕå§ñ馾=/„¸½ß¶í)Åÿ0áo?ý߯g¬ï‹ÅºfS"Ú–öÛðãw/Î>¿~õ©¼~uý±ýÀ»¿7ÕÛ¦Zܰh~×I :r)Ôh|Æ‘'¦8Éx‚Åéí >C±Ru€ý RšÜI@¥Sd”=H"½ÝD¼RÐÖ¦P¤wD‰ÐjïG¯tÌ€±n Ç€†`ÐiHœb²ûãáh8dÂ7{¯Íñ @x‰ÞXéH±³€@Ü,Q,î³üïÞ+ï±¶'îõb)kõz2¨‡ ËMGwhI`×"…„½èiè}p ª¢ )ÇäÛ{ßù¡ 4$-Î×û¾‰á;´»+ÅQ ôÁá÷?C_Zs}yµ_ý $#o÷2ôÝܺY–b?|Ì_ÂÅo)¹Ä› ’…«k½K[]Ô†4áz»õRƒWmöOPPï™ù[nNR•º<Ê:M]y@‚‹Z@m9^`¹•"SE¬M”zºxÁIë2aŠ½Æ¹ZBQöÊë«»[ÛÑÞãŠv’ºï¿/þÀPüóŸÅEóÇY…ð8«X]`´¬*á¡–‚Mx+Q W£4‘J]WõtBèµSýÉ,¬ÞÍoÿv¹…Wßœ¬ý[½øò Ðzìærp'­”û[¿7[ä®ÿZK©(z0JáËhÅ_UÁC±®Jå”-Õt¢H,‹Æô‹"­àÓÎgFOÊöIáÈÖp?¡Ê(4”¬và.g<ÝÁVÞcÿ$þÖÚ‘Ñ0rþ.çïž[þ®ÈÙ³œ={ÙÙ³"ç®rî*ç®v´9s”3GÏ7sTä¼MÎÛL‘·)rÖ$gMæËš§ÑA/†(+!yðÇæ¾Ì¥³ýT•¶%ú×ˋ،¹áƒÐÙ.¸ìl=X僬¥bWÅÔjoŸdËfžtÄjÙì#°j§Ý˶ ¤!%"[pÖ4èìtRî•u¾ÿ’Ù8yvÇ1óL¥|­z!žø¢´Q0Rƒ€¼ ÀòØ®› Ù€šPìÑkê¿ÆfÉV‡»ëKЋK¯5m;ç@ì“â#þɽ×Úøºv蔇³*hQ!¡(©R53‰m LwÀI¢M8à¨2~öT³§z,žj‘ýÄì'f?q^?±È^ZöÒæ÷ÒuBŽ@ÑŽðzïG¯4zŽ ?°©kuħ¥P~,ÄMÄ܃ ˜…´pB'È@úrÑÎ2Þñž¾ú$š p>”õû³‹ Ê«³W÷k~h.õùÇ›ÛN\>Ê¢JXÿØè{¬Ø§­xŒÞE@TÞ&àhL‚4°aì½nÆÞ€\N½¿ÿ19ëöÆ©ŸÐMÀÓÔv€~~ìÀgÝ‘î‡1ël¾þZ÷±V8òz#GÍÓëý´#úÓ ÐŸ~|˜ýó§ù[ýmòeª-å±Úqdµ÷nÇÀhÀžŸp‚ý=WT¢¬•à*'¼/•¨¤ 4¥›ÐŒÃ4ª¿ÈC%ÍÓ6hÊk˜¸•¬O?ù§,­)A‰Fkޏ¤jDm-¼)+ëùÜM‡¯ž5LF)8²/9%ž­W¶^'l½Šl;²íØëð¬ÁßþõÏoÿãü²*Ï£ß^Toëøn û¯?>@ߪ ¶²ñUD§E#) 0ªUlPTetu¬Ê*úéŒ&¶sÔ öŒØ{å¿]ùú úÒ!V!x/ðýíí?6zwe%€½Óé›Þ‡ßWçåEXþ„ÃôUêcl*ëF4ïãûÛÑ©÷hÑ'Poé4Àe]Õd­eÂ]%o}ðQØh¬‰! V+SEü?{ÏÚãÆä_‚Cæ˜*VÑ@>ìa½‹îö‚܇ûrÑÝì¶çby8v¼þïGj$MÏŒZÍ–ºõððËX–Zb‹õ~Ð@m̼®=h#á•×®ÑìÞæ¾…CZÁˆŠ´K!±SÇ?6üÉ“Ój£Èôßü`Û—Ž:d¼~ÜB‚íŒÓX£ÆwÄd5p\o8žŒ—–{ W’†Ý-§ Pšu°›hÁ½KÁÄè-³°¦siidÂÒ FÏÂö'FÉŦãhwžþÞÒâF3b –ß]í tëB›‰2²Ûf ­vFÙ$ì´yÙËßêë I«:ÈÕwïn.ί?ßå™ÅëË•Ø8¹>=C»GØÝ¹Údðdm­Ñ•J@ÉÙýdmÁ.²¶(%ªUÚvnKõçk“ñ1Ò‘¯M§É6™[ ( ÷÷6Øöúœ¹}H›=´OþÝ#‡/"á?ýtËaBáÀR*hS$ ‡cëXøPœpHò–9|è\šœ¡”¥-LÃOO&’ÍOÇ–`÷l~:rËSb‚X$z×}NâWNßMŸ×ß5·œŽ þ2ü­×+\ºÀ')SÊM#šÓ¥õ'ØÁ¤¹üu0Hà<\J'_ÄwO üj‘ÀgɃó÷DŠ“Ðáñïƒ_‡Í²‰Üwî6³uÏ×Ëò…¤§›®§{®¦'ÒÁ w û¤xÜ:‡µçu›âbvd6y*ó†ó:‚lèYKÝ#ÏîÌåÌã†>F„1hk­ÔŠ´#¹³y³O8©cdA+\9Á¦ î—©]U{šÒßr¬™úEÿ¾Æ“É×{<_>Qð%jégh¾>šÛ·GqðmïSJ±¶…ª,´^•½Àº0ú7M& X¤´:±•ÄÖâÆŸ €@w ˆðáâŸE%ˆÉ’alÉP»ÂWÌ2¶1ÊÒ ‡¶Œ¥7ª SÕt’!xÜлŒÄƒ>&ÙVw­)‚võtÚæï«Ú¥W$“8™¼®…kë –R°VF@ JbK©®cbÄÛ ƒI¨ô`%u.ŸÌ®tv¥OÊ•žeG6;²Ù\=VGv–ÝÈìFf¾<:7r–¸ìÄÇq4KBtƒŽÝœ°Ž”|éÀÇZÆë‹u  Ý ¯VHÕ¶åD‡–yå¹’…àŒ€Œ‚]S ïÊ¢Ðu¥n¶¨/=èu Ö D#™‰v$Ý 0›@4§2§KY2;’ʘ¢}üÝ ', gY=í-;¨pˆšU¿QèÚwÒLÖd&îŒI+­?b§Bš‰»¬G·ÔÝ´Ü5+)“Öµ_Ú°si2 S–¶8iÓÂZ»”N‡,S’¼a+ä˜ k1Z&2V½»Úf»Ïö„5òb” (°ÚWOÂÍE80¿‰ò&rÅÕî€ÛéºRAM?Qš´á>´éZIØ.á ÛÛ–ìcÕ]²¿¹¬~.ÊŒ[Vÿ`û·(¨Ož¤ÍEr ÷i±mÚñE<á8ç•9 ”t +ƒ`4Á)ŽÝý¶’²fª)¸5;`Ñ–Þ W¹B6MÐ;Oå ­à=×Ä¥mоRxõ88|ò4GiÊx˜@ÈÊV|œšUˆ¦© ö×Ò¨#8°v_£þ²–ÊZ*k©1´Ô,눬#ö¦#´TnÛ›ÝuéöM/SDئ1bkS8flí.·Q5«Í™Ô¶3Î:Z­öUK‰õ ž§Œ§­BÝoÏ›ºú\½×"]¯ëËç‹ðb1«dùöÎø˜‘ÃlûÇÆŸ2‰ñƒrñËz?cAìrÔ¯‹Êm 2«÷cŒÓT¦1F\Ü~pQ \üÅ•ˆwÿÜÒw_Y‹ý—VòíExöÏ¥>Z½ûýeývùß/ÿíþù[Wsãíê:zr¿Þþóå·›2˜xAÃ]¿Ìû"h¹âlnxµX*€óìë³ùO½üô!â¾°sî­õKýûMðÝ¢=òå볯³üüïÑQ‹p~lL»‡†‡Ë† #ÆZ‚Æ<‚å¾kÐM¡ë)4¨‘lð¯}gÙÂs¿×Y†–¤vJ2 )EÁîÙÎuNn0+U¼å ¤(o‚__±dáTpÐb…ëúð~bžÕ²»Ðxù„eƺhßñ‰ŒS þ¹ÂxnªJ)µ€Ò²( YQ5²ÒÈ%^ŠXy°U’˜]õð¤mç”ÁÊÝøÿ¨…Éä¡M`PÖ³ ´ŽC¬KéD¬L¤›Ë jì~™áçuÌТTŠ-<šöð”;d=èß²>Ëæq6³y|Òæñ,§Ù8=nãTÛèƒõvŠj›Æt¬2Ý›=µE3×Þš:.…¦¾u…UØüpÿ[¡Y´vZh$R_[ ïôõÛNúiGƾ1ư–FƒÙÀã6ì¼éçq»©<óøPï”t <Þ99)…Ç“•2ÃãÐÍã¤ÀHLàqâ#šøß´Nï¶ ›=~†wyÖî"ŶŸ^ín®´0_–¿ÙÆÊ6ÖD6Ö,sX¶p²…3¡…3ËöÅöbc{„çZM çÈÄjÄGµŸ÷+6»D ªÿ.î°ó, ‡Ii9äÕ™TÐÝŽ¨ŒÆQbJSÊ ¬‡­)Ô €%k0 1DvË›Wëå jÛÒ[+<ÿ~õë&Fv–»÷ÏŒÔ`rý·Ø†pæ4'Ù5M£ã|YAÚ6"ðD%ÊÒyQ`eÕ…7µ<5ác!ÎHî'šÕöø'Ù}‹¢àWºí@-íð:lüÅçWççs"i¿Îþþs^¼=ÿ3’éï·OÌþvs1·aÃûןçaùÉÏ—õ_—×pJ£àj5‚¼ÿx5¶Vþ:{ù©®~Pôã‹Ùó›«ËçåùÅóð±øxU]¿ q}K ŸÞZ‚×ïEy9+/Eý©"T*¼ß¿S(;ÕÔŽÓØ÷ <’ɨiK ‡ lŽ5žmwd–ºδéÖܱ_­JÀMo{Ë3ÈÁJ@¢ÕT7QóéX¨¬•‚“•Sö¤Voo®¢ó Ž=[Ï—~¼¯/>w‚N`ŒN}ä;ç·–Yq °D£wšn‚w@Siœ{†”‚ãî-pÞ¦Û ýF'Í{è\ñ—ï?ìVC;(²ö{å]¾èÜåU ìâÇþ7{CôîÁ×'ˆÍµážÍßl¶&*wÿê2-ÁI0ZZÒñÚmGÃtlSRª<ÎU]°.Eã à%‰‚ëÀ6Áìw>l6“Ý竃”bYò¾šÈŽ—‹Ë™CxdŒ¸„k .üþûÙáë³ýkvá<SgÑ)U\è²,… H¶‚+&¡uQ:fï•÷Ó1¥ > ÷3Ý¿~Ü&56S¾e8XgsŠí¿Êq§%_ÃX>†Ÿ¼-„Ø®ŒFšxǤd\-œÆ*œÍªB?¡ÔV ëƒ5ð'éëÕ1O“_A½=¡Cî<qj!Ä`¼ÄÉ‹ÂëZTcÑWL(Ë Ê7Á#b{.uŽæèà·œåØ\ŽÍåØÜ`–È‘±{‚‘±YŽKå¸TŽKM7R$G…rThêC–c29&³SLÆ’#éú;^,™½' ôðØÝ«‡K?|Ýß;¹¹HºF¬JC‚qQa¡pñ~¨ÒÆ ¨ª<†sH¸Kl0Ÿ¹|æ–g‰L°¿MÒ…4›izMå>øÿ]Y:U®tUã­¸j®]³+BFÑX&Û¢°èÐgš»ÛJ‚Ò!“P"Ma˶·óÒ4~[ÉÆbíQ:H¤b—‚ó®$±YeªÎf‘txMÝ,²E¿ÅþpÛ?, ISö…¼+ª7çµ(>œ?¿š±¸nJÄK€Þî }khá(­!Û‹D ðŽÞÒò€G¤1 äýÜ:f–U±L¸ÖʘÅÓ/õZû8,/Si?wÏ'ý¾]>oSž§»ç9åy^mCÊónñ¼ ¡W¸ª šSÎÛÝ›‡úÎô6ýCD ídYçÛ²^¤»¢Ç‹5ô4z¿ç·Ž}l÷–Ï‚|y…Fü^¾ˆøYdÄg‘»¾>Åo „'Œ1±»tp3äÖ×\'Òâ„“µPÆû§Dc+`Ћ‚+'œ·¤AQÉ ¦sÓÈEa×/(Ȭ6.mƒ¦½[;HLaP%X(+%\Є˜Q‚y(©@³R{ÏF‚¯`ˆ’SOùr™¬Ï²>ûæôÙ,k“¬MÆÔ&§ç$ÄÀ¢Òc:]‹’¯Z¯[!]IJt¶Aà›ëë?× £[I˜D—Œ-J56¶‹oŠo‹‹zõIðgÛ¸7/mSyáß4o®GÆÝÅvî§9…OVe嬵qª@6¾æFØÆXÓÔ5jåî ø¨¡ñtF²^yíÍÞi=|$Üa™Ù‚FÛ?œ­ÄãŸÙ÷äÉé¢ùÅýýÄ®Ÿ5—ØŽÑm!¿:ñB´&/0z¬”⎘,RJæŒIu§]8,ÆrbÈÓ,I½vbâØ²KÀĺ)H”f,ÆžÝ@”VêLn—î5²sim•IX:P}ôto_x}”Œ¯C–d†­‰[f|ûZ&}%£Uö]÷y@М²qS'}·ËDº <•JÁ'Îû^þV_!ZÕA¤¾{ws¼ì·^‰›‹8æP”7‘MºÉlmV Èt š­ÓÀ£€ï˜d øNM‚Á€$±³à¬éŽ;«a%£àz$®SrİLkÚÇiß`нßÔtÁç• „-ðIÍA$Ù"Çi¤– ×È0R){Àçq^_ܢϋ~ú ¹»xØ/ŸâÅÅŠ §ÄjøEk½udÇIöd›x¤2L ,o¤Ì=ÝÞfÒ ,WMýÙc÷ÀnÙëaaªu ‘@¹X!áP+¥FpÁÒpª¥lЦ‘¨ta‚Cc á¬4¢dÍÊú ºÉ c5¿+¸?8£ìÁ†n'mо<¾œ{œü?¹§‡°.ÈŠÒVN€u•`§¥P²4ª¨¥•h²ªºH8ÐÎ=í>Œ¬ã²Žû¦tÜ,k˜¬a¦×0ZS°†z+öÃsf¤ÞŒ_­!ê§øñr]KB¯Ÿßß`´!vý5Áá9ЧÙ` ¹¬]!²¥]c`}.V%˜¢6UYÙáê‡= `‚òê/EŠÔÊD»#7U)’–¥  ®Ð¥(Ù‚²‘ÅtDCI*!j®ƒèÉD»#š7¾¬ÛQTwPâ\…Ÿ®”¬k⢑^MH´p\jv4´ZA޶³ç„Eá, ¢§½eÁÔSDýbÀ¨©¦Kö$æ6â®Àkãõõƒ»êí¢Î."M„ÁRá¹VËÚ°´¾î\·mØà¹Áiý3””¾Ñ ¬u²Hc¦ô{p[ ”ÔVéwW;ßJëí!›¿UÙĶR—‚Lk˜ç^¦K†¿â¶k¤ t#‚IÝñÔ£%€U,1X£ÔæJÞÁ; ?o Xî¿–ÜÄ“?~¿Ðæ 8¾Æ8™L2Õ°µ£Û¤×T<¶~ðAߥ×ß¼šÃhƒÕÿòS]ý èÇñËÑÊ™}÷ÝÎk¡z¼¯ªë` ]¼ŸGëæÚÀÏÊKQúß‹`áǰØ6,s@êOç×ágŠë›«YW±Ú²MPµØºL2·VuÍè¹;ùw%äÿ|Q¶Õb¡ýçr"ÄÝÉœvi¸S†@Y- Å1ßN£m^Öš½ü¶ŠÁëFÚz 3ï(Àha,j_£Rq~ÇT~…·ŠŒÅƒ'¤mÐ~xu0æ±0ôäé!é\cA’@D } ‰ó•`$huãËé-‡ã‰¶ÿÐ’RO¼Ý)k¦¬™Ž[3Ͳ^Èza$½àˆ]Bý¿¡ÖÄ£7õÛ`²¼z{~qó)êtÃÏ—ï?žGõu8•±6>¡&†ñ¾›{oß=[$÷‹àÏåõL™¨µºßŸ$Ù •R±ta¾äâ¬mÚÌð™’úŒÍY—·‡ÊÅ9d½¡jy{‹å^_œÏu¨¶óª?sú÷Ûw^‘èÑŠWoŠËÚ‡…¯›ùV€¦V 3ÈÆå“;/Þ è-Ÿ‡Ø·ÍW¾ ,¸Ž'÷*hž«ßo‚™;÷òænÜ̳ùWû`ÓŠëÑÄ2¸lËÅo£1í¨ïüý³æüm8³êMq~ñbö·ÿúåþòË_» ç0¡ß Ý‘ÍA½²K(µIG•訇 Fœƒû–ŸG6:O@ÝvdæAuºX'‘ Hت<õèÉi9^XÕ_`fÛwÜÓ JzÜ*€ÿgïZšÉô_QÌÉŽ0D< Àa6¼{ðÅáƒ÷´áC=H5[Å)µzÂûßTU$¥B%Y)y91-’¥æ—‰ò‰Ì_Þ4À/áœxS¿¼êˆ_ü‰â•Gý3„‰×›õ®Ö·óÕ*@õ¢T¼»êÕÚrÞùŸÉãúa².çËI«ÿئÑ6ìo¦¹bìG1ßüs/öüÇà_‘í}÷u//ÐÈá–œþ9_¨ì!t™OÝ4Uy+\s"Pbõ¹›¦â5ôÏÈô”(ð´(AølzAÄèÒhİ€Ž-ãP½_4嫵>A3Ø-Û)Iá:ãýI y¨Ów‚Ý¢¦oês½µ†BÊ:Ðuú6°ƒUTø†+{žÙŸ·åÔ¿Û‹_©(øO5ýs±µŽpÝÏxgäüÏ7ÐkoÑY‹B¬9ÑPÞ ­T‡öR0¡™‚£ã\Ρ•ÍBSãáNŽv84c<¡— Ô©Bo+’hÐW³¾Ÿü2©{kY·s—Ô‰p—L w–¹¶Æ@òA^ï¸ý…/‘VÀœø Òû¼3ÍÊ C, píLWÙ|^Rƒ$¨Æí!ñ'®E 1è<ÃÄ>Ú¨Ÿj—ç¿´ª Î*f½(1P¢dNùŸ4VŠ‹©wÊ:£ø†ÌÁNò‡Ñe<åEo]ôÖÓ[W­qÑ©µ†ÿNîÑ1«,~Ò1”'NHE’é!¿Ä Êè÷²¦@¨GsIÄ›¹;ï†Öú /±–—Q”Ÿ9„¼º}‰X)Ú0g¨ÚÊÁ¾&Nÿ°W§sõc¾ùvU<Üø]ü »¿òú‘y=ÉîVüŠ}¿ú¿üå¿þþ_^¦Öâ¼âW½øwbXܾÙô“ŒtJ¶êèΔd”×R÷'ãè”5úíl m‘êò=ýêó7¬—#Ýp§S™<…Ñ_ â„“4£'IÕ½¤~)4j™o–fCR»‚PwëÑ Qžošf<³ç”åþr*8q.õίð¼ê_ƒÐ’‚ÿ¨¦ ÇŽd!á¿Ò»SæRß@Ku …&Ìoð»åR«·\ªåVïåR¯>z¾nžàíó¯;¿9|ükÓ&e ¾~­ÎÜ:À0eÀ<Ísfn›e?&s¢V¨ {Ï\šßÓƒqo+’(Þ¬ï猀ÿ® ¿ìØýë*ìÄ?ýùíí?ýÙ†>|þí°Ó¶Þö/åïßẼ*pVèPSf±$˜¿[œ/0—®Ôj*§LT•dP©‚YYHV¢*¹›xŚϧ¶ˆ’pOËÙ­ñð'NôÒtžýGûúS ÙCöåÂüqd¼* -kV ÿrfÁ8UÎ còÕæ;/—H¸³å¬¾$z/jî¢æþÍÕÜÕEÉ\”Lb%Üy/qø®ªõ'Í wQ…áÌ(„‰onX©úçðkä…£^„| S/¶¦G^òÂÿû…6´h9Âo]ü¸ä…?ír:Tf8ö쟓ö åð  ø§Î@‰æŸ-)yÿ]ÎC3çO„Ä(±ÒG dëjÙa‰UÝóÕ’s¤|µu'L¬6VCŠÄ*™FÿÏ—XmHj¹ãVô%V=¡½ãCmÏ—X¦ù<6¯è'Ð ðTÃÀ+¯Ìç³ù´Þšl>Ø Bã=íê¡"Ã#üs‡•ƒŸv…1óÀ_j-H=V¦>´&GäDA†1ûýsÒ]r¢ýÓÖæèi©Ckþ%Ç¥ f¤ràÆ{C.Ô¡ÊãRiKð…#à ‹JÎ g¢‚ŠA=µÌF³ §T¥°´ÓlΣ Ór„%N+JcЩ¶²u?é =^-x%ÕTIV¸Â1˜jÇœCÉ”ÖQ Q¤µr_C { ~Ñs=÷o¤ç®.Zæ¢eN¡eTH€ ×OƒâV§ þѪj[ãâ£\à°ËOÈ zQ&Ñy® ûšYÁlíê¢f\Ï=Ó¬°N{aªMé·…â‡'†Î+¢R[+Ôð¢É¯Úk9Ï¢s4¸_ {¸+Ÿî«ÞËγhçœ /su‡úE³_à¦õ> ¯.Ñÿo–÷pJ#'XIÖÈ7ôçãnèˆp g Bé[þÑæ½VîhôRç˜mÞo•7ùfѳۣ°Ã7`•pdŽþjàY§›¹*IJ@©BmÚñæCĵB"¼:´wëÑð·ÚŸ`Àùq‰qwj¶:id.OhÂõtù3t]ç«GHÖŠ¬ûx¨8ÕfZh0i3ôï@“’i¼k7\0RüªáÉšï8IŒcFïË.§¢¦.8Sµ!Ô¬˜òÐUÊjSÌÌÔæ›³ìåË» š"‡8*=v‘¹‹Ìµ²ä$8E8ºí–q›(X¾õsW#¬ÁÔeí¿°¾õfùQàüUEˆ”ÓIEqÖ¼@nò¬IMÞôþVl¶h{^-gî´9ÈDÛ¶°>¯ôÓÃY¨sÉÓ ËùæçÖâýV?=ߤ$5'”Øzà׌›×¢¶÷'¸'!̬fÖͦ¬veQÈi%fvÖ]t"…×Ú§uaJo6 7¢Ê³¥3Lͤž–¢˜ZUwO£Ñ¶œº‚Í – äÔÿ³h ¦«T1UUYáÖÓh¦ÓBM™­Ë)ƒ‚WÌùó—Õ 9ƒé1¾³ê3Í•‘( ÂfÜç÷_ëó†Ù Ç´Æ­›baþëßþþßÿèû~ºi '²´UêóE€éèµ;GX]s¹MG‡öÌïÓÙ£Ñ[›…ù½6Æ+ó­æËu>ì 9ÿ±ÛÝ K‚î8?ôwfìxð"‡Ìôx¯#®mä¨vÊ€”“òóääöR‚ÐxŽE 9è¯k×*v€’)9Åé½×Þ@N}«ËÊaù?ÓóÕȹc _½5í,Gþïç—$áG'ω” ¿wÔ´ÇŠð¬éÏðQñâö]édøˆ©&äÞí!¤ipûÆqDwY½+ÒЇA £qøpFiÇ2¼ŽþëßÿÖŠbÇ9¿ëûÅ”=zÇ+´Z¢³ Wñ\pC{î­˜µ_øeqÓ›¶E¥„²À’çã¯÷ž[…ÑÔY©êŸ;L·¼ôˆÎc8¯N†·gí›ó ¯c-@PiN àŸ•Uzñ!pMÂgRJÀ¶p6œ4×ÊDYi$ b®]Õ´™ î.2`HÌÛnƼŒ1Ô ÔÊàöT±øzXØ5‰J€wt °Éµ™ë?ž¦ëùýr4P«3+ØAu΃¢ µ™¶îeDà†RHI·v×¹9¢ü÷¸ˆ|R‘O·¡Zö½aÄkÛó µ£@„äÿÀç×`’@u™Dói½ Qp6_ΊÑ0…̵ƒBY³·R¼£2íg§D®H8sIMñÇò¾ö<õŽÒf¾ù9®L¸—vÙ*QRo³¦Êe”Ä…“ OäUD½Ñ:B^Ò-ó ÇÞV×\‹èòRái•ú¸lÏsáRÄ\ |_¾au×. Ò´nç«Öÿ¹ÞLïFCÓ"»VïňŽÒUÃ?'’-t‡ªµŽx̪?Æ­EmCœ“@ËšëZWߦõcHw BƧ6¹dòv9;Átˆ*± ÿŽ‹­|ê¨Ùa£ÆÌʲž7Ëûõ¦¿…6¬²@ðŠI€GŸ¸[ªV+DlÚØ“½8QAá¡VùBO½àŒ@—.ºË¶VÑÆÛ£”š6D›R¯¶ù¯hÁZ„Yx"]ˆi'ëÞî­Eœhe¾<'±:Ã0- V'Ræ{ê¯UL8 G 4 óÏe®d}E]÷â^Y NºŒñ YËOÛOtœéPÚ×’Æ]Ûh*ÑH-µ¥D¤ô9·ƒ†¿ÉVŒ½|ÜËùsÆwf† À>KÕµqI°ÚÌY³ÊŸÇ½eeF k Qžž¼8ÇÄ䉲WÖuá ˜ÿd”£àÌ–‹úæ=ÔÆeR¥ææõ5†Õ¨•É® •ãù—î¤ß^×ÎüÊ¢E)Á5˜Îú \#î*º|² ;cÐ.Œ+Ö”#Rxº½±B͵•$jRè@ç›ñó±´·Éšª¦›ùl>­·F¢ûãã»si·­Ú‰™«êḭ́•µ·r,QÇÞ)<‚ÿ”<ŠF:l}:ÝÏ}ŠýG'Éž2¶·*ª[ÿÃ[FÛÔ0«LmÑ0¸ý~ƒ£‰<›ÛñŸì½Þr+ïùU³Ùz¶q½Am-ÑÒh¼aºøR4Þ ‰É•ÉOøzÛ4¡ºÚï$Ö+°l1[É»^:ZJaª–é÷ÊÎMÉÎ+†®´Ú–Èn%nz7:´]¡à‡"uâör·V±"2óEù¢¢CÚd*Ò›"o}¥&/Emœm5¨liÙúú­_o[‰H¨Íó$&X"XJ&“ ‚ßw HŠé/³™þ­²Úƒaéª겚1¹úñí·±T¤>»!ð‘IÚó„7%fà¼#ëOÖ›§Mâ }úªÐ.‚"("2ù½1«BÚEdÚòæO¦õ·bç-ö¼ÒO½4j) ³P}™ëºêyŸ™™®P»ÒÓëêÇñ4éÄÙ“>î7ú$*AÞrÔZd.&/ê»ùú%(¿¥bÐÕ¥´®dOný|;ž ™<—~¸µIÝXïóh2É èI‹ÑÌ™‰Õœ¡ñÆ6‰ˆôÑùý‰æ“ý7˜)ëj3]Ö~¾??¦!?óÕ¦(C‰Yá%„`õ÷emÆCOa0ÎòN‰Ç}d* ä âc–“ÊDÑ«‚³¤§^s<‘Âa2@ËOaCtÏ«åÌG-eÍZF67ë£!+g”"ÁËd ì±±1&uÌ6w j÷%Ã0Ø»ÆPB»ÆoKMA†t¤ÙÏ„ôÃPöe˺ÖìN/6½N§áad …J>È5°2É3 B{9$àïŸè—¬™Áþ Œ”TÜèL–ÐËXyjÔ•I±6}!IÊ%¼þŸíž‘VSˆ×Ù}¡z\ˆbÁªåœ­7óåš-~uËÞÒ¤pôý{ÏÚܶ­ì_áhÎtzf™xàÜÞ¹i’¶¹ÍÃ7Nz>d<-6’¨”ÇÇÿý.HI¦Q‚c)iz9àѲ[ŸkÃ;˜¶U¬lÌ1@qò‰]Ü—àû]âräÆ!¹×ófwcÇ"±\0‘ßlÛâ\AOˈñ{£!ÈaÒåãñlb5ïmòSrñ±ë¢×@ú˜¹¤€Þ÷™³.PÆ÷6kÊlúõ6Y¶e9ol½uÈàëÒ¯ÍÛ"Ò±½DéãØ·Y¾Í¡ØwÊØ²Ÿ|wN'º—ï¿Ë*œ²y2°‹_.ûA뜨J’4R)çHe:º/Øj/û uñá•B:œïQÁþ÷wÝÀvÔõI"àŒ%ºL?MâûcµÕƒûã±XÁûà”<ø^ŒÌÞFX#´=c´R‚b'Å>œËu°¾ÕÔ"}Ÿcê‚w+Yâ–«ì,´k—ØuÅ%ÆJÑÝ›0¤ßÊÔ3Î3:‹Gtaû§¨þvšXš4;x§¥wn&ö€Òeè½»~5 gM¡7öèBøF£iI%ŠAyÀ™ÑÂÜ€ôLªlzOfE-MËóñØŽ¢æ×ó<~?|ûöÙ“°þÛÞÚÞœvJ7¨Š`wÔ\âV´ê3éVjŽÿ.é7…6I÷R¬wQ÷ºyNíhÈLÙ|Ë‹”•)*ï=š;NL¥ÍkZ¸Qùææn¼ã1÷#É¢Q –b…t TÊPDiLÅáxG@…tà]+qUèI š£ž ¨¥±]Nj´ò«?^>h¡-t8-ä=è€}ë{S7Þí"ÉöQÑÿëäólò%ŠÁÖûN4ƒ?¬þ†Å5Ix¬b#šP˜6 Iã~IEN“P¾–‡ˆeÐ:Åý=Ú'nTþ¾t“¤Äw8ô(ƒ–³ú·³OôÐ÷¤‡¼-°g-`sâpk‡j_cý×´Pö¤ 3]!PtišÅЬ³fX©ømuÃô}5°[)­|±Cú%1¡‰JÓqª(b~ª‘Œ"‰M„e¬YNvéŲ+¿ï舕¿/½Ã ˆ ïZ9u¿½õñ cþ¦:Æ{áûá“.;ôTÐÚ¡§“ÄÊàYiåìÃ(i.šc…5CÆø±(€‰0ЫF± >rŒÍYÆi^,Mþ#`¢J(Ç‚`¦$ÿô.³jè½»?rt7r˜JÉcŒQD}ŽÆE8I‘‘DÄL¥ IrÍjè«z}0AÓ¸Qù{rM€w§»/8ä~û çßnéãAÁ=(¸oùøª0Ü0Þí2C¹Všù…ïnÕ ñ;õš1áÙˆEh}$OG€¾Wå^dÀ±ÌªL²O&é‚‚Ü!©-”kè9Œewbe$n~-Å)›žM “fÃÕåË ¨Âa> — —Ä«%êj):0ªQ6Ý·Žêû‚HÖ¾5ÃIÅbäk™Â/¥"j'2@}!\˜õ—[Ê(æÖS„ã˜w#õ÷5æQt÷>tËhºG“âadl{ëZ^F™ÜÆÁþ’\@‹øßòˆªÍHä¸ì‡ÅôdКԶœ4©û_=jÂüξ¥½+ΡïVj§-}/mÞµþqGÿ„Àtîª!÷þÉ s >È:¤ ê“`w €BÞ*º[ „ÖpÔAï[×À¤gÖÖ9k¿jÃTvB£v{s„¶D´êÂ$gÙ$Í‹qåþ©×û—ÎjAµÁêXÇCc3yåÕ$îµ€ý©÷9´½Nøˆv€²íðßã&øZe’{€¥ˆp«5”Öi!‚)¨¨v³²^|Ážuî¶±’+Ì]`jQn4ɇ¼Y„Q [ ”kˆfÓ5 ª¹ÚÈýÿå']@lïNÚ o]ß²²DðR¾‹ÙÈ´V𥛾7ËDÍúMM]œƒuÖC™î17§¾‡ÆöxžUáB‹§´ÈÇÖ>°‚9Êaг¹p<ô§÷èñã§Çoz5–Ó"¯r˜KBÏ÷îh+„µBmÂxK–„µ†Ìœï]SßI—µ/ݼÿ˜]Àn…³ìuæeØZyæ¿x¼î$qrÙuÕnÐl_ÐÞµ¶CÄE@ @¬ûƒÅÙþÕ àœ9A#;Æ>oóµ™ê¬Xü¦S¹[ô$E›»¬Ü¥º)–}ò|Z3hYº )lxo7²5ŽV@`>èö_²IVÁŒÚX³B€Ì9WîÎgL˜'ë=û”OÌܸs›÷¼ùî‰N˜”¾‹fnguX‚}$ô¸ÈcÐp¬€®Î¼uPk˺?¥â»³B¹Ï"hmœ™I2ͳIU‚—[“I÷žOò¢!SÒ8%›’L4—I,hxÛnÎ¥z_ç<]zYåe6–WyÚ{?É/'sp½9¼]„`Ä^­¾›ÌvH±ÛSÖæê¥0ìž±)ã¾à.P¼o­:G¥S™Q&|w2‹Ã¾æ Ý R =¹Îvi7c¹ˆ=ÓÍ6EqM#àʀߪ»ïŒ¶åؾÉè!KßBs«…K9 R§Ô˜ì›†;£v!Å0Bwi`"?·˜vÍÇT¡°ƒÞm§Óÿª[àö.vÙ$z°x'PJ‡mçâ:ضµ¿i¶ÆÁšý¼É²T¤Ë²$ä´eEÚÀqmIÚ`sfê=¦662ÊlšÙÐûÁ†w¯_ÕY”^˜J‡×¶†m›,Ò”"Ÿƒ˜Ò)Òi`NM¸òƒˆ+L}"=ßk°ÁÈ'ž„L…`D€ÃÛ7½†Íß–‰ô3°yšŠ‰©L9Èò#]ÄÃPÁ¼ _ÁSe“ÙG¯³Òêì©wgX\:šªw¹ƒ¶3k}­$ ;‹uC¿®ïM¯ù>XšÁ gYCcs±MóQ~~5XdjÄevT¯fä§5±àÍà éz¶4tݳ¯/²Â6>Ø” ºö µÞÍÊÆ[ÅYRÀéÝnÜ<"¬×ïÝnmýV­ß+›B×ßÙ¡Í»•í¡öÕé #ŠÎa¸_ê«ù€Ì|SK­†Ø› Z‹Š,9··,Á˨@æ#¼ƒiÝ©½ áöõ™Å¶ßëÍÕ|IuˆEHM˜ˆymƒÙ-õIÈð9¸í2ÐÈgú=»`ˆ†ùt­lë.¾­ÔĶ’ši^TàYõªbfàËH]ƒŠß»YåX])jpK(ÒÈ×QJ» „#&bO£$•‰HA…a¡?oÀLb=­Ow¼[cÙsæ7Z…äó· ûf0+Âc9‹@ YÙô‚ÔX¨áó³ÔAMã aÉŸ†G C{°šÙXW(K-­o[ûŒÞê6”mÑÌZ‹º´wº¡Z=e*/³*6ÌXïÞ.K; Èk}Û|o•AÖnAãì¼6uÓ*T]*¢y"Úx>óÕÓÓªj˜ëžc«zÞÌuCxSäÈ€Mæ«vidV0 5l0‰˜5 %\’û±~Äâ„q* p…©80¾2Ê¡°3hñà<)ìÀlçE’OÐ:AK¦@دÕÙ«¨9†sR‡B?z4™ä¦zTÇfNjƒý6Ѱâµiîõü£IRe/ŸÙÝâ´ž šKYæ.jF—¹¥˜ù8§bœ $+ßÏ'^èQ6ésmÓ0zw%jX{$6H7ÎÊz[cíOÙŒÁ^ïèBG£,ªóqLuÔÀ:ø ê9À_C>ãIÛÕÞfŠ-7·i:¤« ÚF‰±ÿ„VmŸzïæÿÝôO€¡5©ìÃõqž<~öäuØ’¿Ì«·¥y:ß Ö`ÿØÎÐ@ûüv2¿º6&kQè¿ÑÖgóµ~¸nþþÝ\…“òl Ôf^¿ÙiÑš¦Àˆð¸0©)^æ'óû®ûÖú­—´c´‰9©å%„·ý9¨¶?ø|Rj©¯멎íÎ…”=ò:žÎBïúƒÙwã]×Íßxð뉉A£NžÝôÍthÀñÔ#TVy$´$X°ÄžM_­éýœM`°Õ†³sðÏM‰ð¯™­â· úín‹’;ŠŽAkW¶ ¥‚)E}0¡Û¥)‘¦Cÿ÷¬K}fê¾RÖþlµ}Ó4²KÍ2æF2a 1¢-RÙ7ãä UB0Ú´_o¤Ç¡H‡éÃÀ]'ó}ö…¤;»DIJ¹”ÒÊ¡•¾å‹ëÕ_oìŽè5HÇv Ì ³Ú_êÑóTÌoFUdteG@xëqÍ=)_Ýú(uù7Mío¨ÀøV­ ¯ý÷{£}~ÓåÉÌ8·á„®þ €Ë2p® <@Ñ+—…¼† ž¾ÐY=ì p|õ¯‚áË|šMøMr¯ž¦‹B‘:~öä¯Ç5jË È.~½¶ÇGH½Ùá08ñNœÖÉJÏNóÍš =àÚ­ç‚G˘J3ææ¿¯ÛÏ5nÏ&óÙëxQ'\u!æ$Y©ôÛÜ_VS¸îö‰†0yºÎÖ“ÍÚ»ë9N‹áêw˜_‹ÊF<¸_·h[x6IófÞºcl^¿hŒ˜hÁЈYiÐþ†ÅG±JˆÔ22œhÊE¿©U‡k(‰±VA€b–ĈE,µ‡Ï”¦D“”¤¸ÿsžW¶lD |ФH0b:`(J¢aÆDjw^¥©èÿn ‹ÑÂ:ã nâxÕÁÌH‰ÁG)Îë¿:yfïMiJ¼ßtå=µ”/ ,Äçuôâq^˜W'žˆ F¾÷ãñ(¿ü§Õ¢X ¦x=›T ?‹.ã"Cyxt„”8¢ƒb¨ó’A ƒó¬J|£•äþœî‹ŠMúõ±½£oñ¡ÿª‰”NÎÂ5±•þ£ntÃo4ý“K=­ ›~š¶%œõ›ëµŸ6R¥>ÌôU6¹M­ Ál&ØÅQOÑ…¥#Œ/ûâÊ¡&\„A,µ/¥O±8QÌ'„aiM#AÆb* ŽYœ¦ŠM#Åc_ M”“(Ðô´’}2?_%±d¾¼1œö$˜;ˆú© ˜6A¢Àª× Lß”q Ï~ˈ`¸æXûð*ÖœG¾<„´Ä‚aîSÉY'‹Kx`‚õ܈·Oón/ìÈÀNÿ¸€’'±T*b:±E¤œÚ+<·oh ¨Ž“q$€ŽM*Y¥2ö#ª¹ô¾°_ðÅ ðÂ$‘2,š2€X“èBaé H?6HâG)ޏÍ]û /ìHX¥i½àòÿÌ]Ér#Irýžt:‡±/e¦C·™4šƒ¤±“tj+‹•¢nÍ6ý»ž ˆ$±$™™hª$ _Þ‹ÅCr7´N2cdH‘ÖejYÀ[™15ëà”Œ0lBŠÂq‘«-‰Aâ:e)²jI©½5N)f6O§+—4Øë|¶¯#\$ò C.ú\ sUV´Áj©Š(,Õ,£ceº%¤0\:eø‚*?ž™ÛÖ¦6ÊbÕV“ÆY‡N‹F‹+ôÉ¢Oá 2γ°6'CƒÚt.1+0곯ÿ´CpáSyZ&,ج«PGðá6&XÔ2d¢† & þÙZ¦‚2!ª/üô;efVàGP®Ë†YDÍ”¬NAšˆ´YXðIÃ|uæžûhuŽ‚Y)s5>…àËY)Ž9™Û‚ x°’BEÐ2¿¿ªW à»åÂËd„4Ù×÷É9p‹Þçbð‚{EAÏkäËè…ˆ Ž…m]1áPœ:öÓpÏjx‘Ù:ØWEÏrïâ6sÎ… E3V!Gì†]V ç—ˆ}˜/üô+àÚ9 BëFPnPI窵¦Ð´©„>U`HVA>ø…æœu‰ÜX4.„` ‹]j *Œ×Öjûõ°º}rKPæ#Œ/2Åk11‡ g¯ªf$kÑ Øk,ÂÕZ#/Üq4¦ä»è¨³¯ÿt¨,"X›ªEÖ.Ù ­*•Žâ>ê,/e,Æà)ÒŒ>® *þ…ŸÞaÌ{€NoÅÊÍ•![ó«0¯”ÈÈ0öÌàY™ì"IƒÌ¦*óYúPd(9zä€n˜J[P:EËJ¡N[^q%νT”I¬´¡5š–±’’ü>1ÉÉHÓ˜0Ý×D%e4œ¨-$zˆ¡åй5ÃR{"Å#h-Tç‹/!¨H{°sY#P¸"¦Y[dÉðÊRÝ«–&=VŒ O£@²OÈÿèPØT‚¾ÐåÑYmL¶1Ì€B˜®©!1*jKIÙ–Œ@Üq3´áË€Ì% A s<ú4"‡Z´7 ] ¡–ÅìMu.'œ)‚%ÚHè[®ãGW[é–Ñ"1ÀÞˆ/xJªÒ…` Re&jD«‘B' qDÙšyýõ§½«p×Ç]=o-¢šâ4zhs/5¸Z A†ð ô£áMH~.†”<,VDÊp´LU"3Wd GGˆ–‹J¤d&–QšK€ÓÃñEe‰(Iª`þè <lÏ$¹T ”P&§S"æðUÙò0 +¤æ†¶^ðNá+hF‰è™aœªw`NN€õ( “ÁÏ3¼L¾D/A±$8 °J΄–°)«Ω´(mJòÞÚ¡#U¦ínˆD A(rnƒ _%6.1[àÑ(ÍÐF” ȹ& öŒ ¯C@.J.„‹ºH޼d )<´z^+m¬òfèž×V¡ó#zPŽB5“!V („xk¹kq©* ©±‘–jõ:k˜ÌWé¶V!ª´zàœå ù2t0úútôb…žòcA"–t2Ô)<$l 6#¼“)›–è’21lhhLª)½£‡mÖ@j ¨ªRÂY($%sŨªÑÎÇ f’jÐÚºiËHÛ‹Á­‡–±Hç`¦0N#¢°3e0¢ÑÑ`ƒá±JCC¡ ß ®:–ƒ±ìÉð¶ŒÂ e|h3 °I&*M£²âŽFÀžtˆÙ •q¤“µ©°X«„gVøÐ–‘¡•jè¾.šÜÇ bçLÍqy¬. )°$”ç ÙCÎ T„M$7Ó@Znˈ¤+ÔàI^TQ‰È­œbÙ2q€y³Ð<ÂÐ,â=hHòª,&K¸Z[F~œ ,£fŽÏJ׊_Aзš; vœ^ˆædÒ¬æ‘ý¤B¨5ਈLmÁ±†Âp]oAÔ@ªpŠFÄJSXF e«áe–P¤ 1~CIÔr%[@¼ÚqmõÐ~mFàI²D`ÉÊ”/˜SDͪE¥sˆF âR½> çÎ(SéŒKöŒ"õÐ}­EQJÄä ƒ¼@¼©= ¤#²¡¾‘5’O¢ÐðN ªXïÉÔ¶Œ¤È Ö¨da‘¸¸Ö¥x¦ îäƒYtÁÑ\ Â馣µf:!!O¾ëkZA0?t ·6H<)K„5q›¡7æÁÇjfÁ› rß§*ciÚ¹sޏ¥øàñ ²@ZÁÊ<‹‘ž-ŠÎ[*1G"7aüÐì,¢⽩ i¹{8â¼`Þ „c¦ú‡ÙrHkâ„Î@Ò"µôðcrA€ß€Œ!†j'=L8– eÛ1œ9¯¸çCËh“²áÌ”Z³/©ŽƒFŠXip‹ÐOÊÈ€´Ÿ@JU­a;) "üDÆÞÉ(91§¡e Àc 1Ç#–œ­TÁÅPУˆÞÈyòp"Pà„•ÂFa±-ŸQ€k!Üu†•j-äfàb d|BD à¥lËd‹Uð‰ là^BD  tª5¼-ÕáA´ÿÌ@ëF}êüºK‹«æµ¬åö’ýMÌ)JHo P2‹ qYŠ Ù“íCu&âô¾_—©®ÀÉ»§—>)Bb‘ƒíÁ)sD‘‘ÌÍJ€¡–·”€ß^… ´¯a—Ðlð=Dø6 Pô}[éË9$9øX'¬„GÀËÆiÒ&{„´?æX°$ˆçQ¤R”äT™§=¸Žam™¤3·‡îáGÔÊ%ÏZêb“äH”2X‰ÈäÒ,ºÒº[µÎ²Nå³%£–fð³,©¨uÕp ðÇê `†·ßd¾h"Q&í¢ª )Æ!W¬Ú ™€Pe`Ä·¢÷ˆG0Tð‰¨‚IÙ[/’.Bæ’AÌ‹¯Ì*5ò‚ö³êÛ#ƒøŽÖë¡zNñ(E"4£’«¥‰·h¬DŽ,•ÃP«p>çLæD¢G¼¯ÂGØ*wïdäÒ:94h£š7µÈ.U׆eÐ÷E#¸JÅfÁu@$Åhˆ” 6ëiÅ/þRôÍmÁƈV ¹þ{µû/ÓÿZZ¼~ÿóréuɸôúrõÉï¯ š—+ŒÖ«žþm]Øa½ª­}õ÷]—V5¦?ýk ´tiñí>~íõ³ßÿVÒÃ|qõXhÅÝN'¿ýûÝ­cûGZþ$Ÿ/µ; ©¼^|]¤¶ë‡ÿEcÓ¨?ü¿ïž¾yÞßf³I¡Ý’aòçùÝÃl±*ý¼}ïÞio»3CXQû6IR¡‡M‰ÚÿÜ.ÎôZ{ãŒêí †$/÷ÖÛë.«…´Úñ½]+̹­ê?çÛ«éÙºö/ðÅ|¶*ay´ Ew¶N'Ù×n‹¢=ŒL^Þ‰2Ü"þ…j%÷—0²«´Ü²<<¿¥í°éHÍ’Ã÷6‹—ÙÅeÊ%•”9¨RÐÇ‹»I9Ïuròa5|øvóÃ_ÿȽnФ. ×ÃZÃz[Ðën§·m_ÏËeÆ•ò©'5ª‚ÛöR˜eÎvQ˜Va›Õ@“«ZÒoiR^ÇáÏi§Ëäî­éM—úÂóšûZý¤Ÿò<ïPŸ•”<¨òVG|œÂ8ß÷ÁAÕ}úiÍlúòüÔKqð¶“âÌŠ{=‚ím Þ}ìøÒ)èä(xý·Sø]ÿm”šO.z5Úî;5zØÂaå>åÏ%àw4/y1ŸœD~Ø^?6£ÙI']ÒØÛ§ªû¨I2Ïl'5ÙQ’t˜]¿NŠÝ-R˜l5´Sz>òŒæzž¯O¡$ÉähJÚzýÖ²§/ïîn:kéÐCšë5Îz©‰[§T5Ù?ðmaômyçûy½¨Ìº(Ž»S¿»Éí` o׳3õ‹ÉI”&NàÏ÷\ÿJœß÷¬FÍçÑôRš¤¥r]”öFF×hìûúHªwƒÚhªÉu;›”UˆÉä Œú쨔•\¥ Τq²Zcö±Hí…ÑdS²³lÛur¦WyÒ.о_Gû,:ȲÅ&–Ï_š2TßîbY}k]ûììï¿l  üËvUÚÞ2láÖ7Œüõì¯w³‡ÉÊ~ÿ²:'i)ÖËò¸{µ ˜·Þt` ¼I`ý±J¼‹Ã%ÿ”ò!ïøï+û¦»Ç)yϲû%$Ø.ÒHýǾ¿ŒwÓ|–©ŒCXªB?½XUoŒ…*ÆSÅÞ’ÏâoË"ôéèÀ—†t(i­ô։݊åµNrù_¾ÿyrÑŸ=Äe¿§z±9½åŸþy}r ÿä,,;Z1(šÌ|mbkÖM Õ¦C¬NŒW8ÏPQÝ$†äøËrm˃ªù¡ú̯)iî.–ì(–Ú;nýA¦og‚ý‰i¾¿¸¶2Ê*ÑE<%F•=¿£÷ÍæãO ʾ¿µó8í“Ö¾  7jXÀ…wôÏ‘Á}75úÑ¿ôú1F©.##Û•øÇä5[‰ùK”fëþFÍfæ4Êñ§å³n¨¬Âj úúg¾ŠÊw>¬þæò¦—Ú¬º‹Ṵ́6•§Ÿt«]74Ö»“Ó4ßYR ªLçí·ÂËÎ;›—çë¹8‰B¬ž¦}ܯÑ}ømÿ½Íã­¾|é¥ÏD‡S.•qjx¥¬«Ð}I)ûïmÜE4®ŸR\—#íð=1ÞPävaàÍ'‹rÿ…É£j^²»ïm,-\ê¤23°Ê6›ÇwT8–¼ÞÛˆ¼¨§PŠeb4¥lï®ÿÔ0Zׇ41OúM˜Znë¤&;ô´Øûá®OŽšuzBsû|ßoÉ"D«ã/Û'~{ö^uí¤×ëÅA&¿Þ=¬™ØZ{ÍZ„ ÖEmf¼ìó±ÜÓ'RòÇ{›(~Øç^JqœwRŠ'æÒÍão²F¿©®ÃOm.]îÇ®»+rØØô:<Âç¢ôûš1‰÷½”A%»(É?b¾ùSŒ¡ã³šñô´8Ò†ú\ÏÐmV:­ßãK›T>›<\\^‘Ùù)Í­™ºÛ>ŠrL8g;(jŒDëºàÍãªëúô‰»/Qö®dxÎ'R™1ré÷þžXŸŸú©‚SÉÎãªp/z]µîíówﻥ®i²p/éj²['îØÅð}v—Wgñþzöó_9ûñPH/3ÚÅpUßÎV“¥ËÁïò·ƒ ß­²QÖšÜ<Û§Ççþ]ì†é>§ÁëõÚ‡cƒÜÛßmª»Ÿö"GNpÞÅÅ—ƒ¯+=§ÿŽ®%]~«±..¦§i¨É2¯h÷^óº³vã„íËo¾èLRZ[}3º¸žôoذò]sÚoÏPº³I÷é¾×²E'™]"øH=}hÃ=;ÝÓŽi:gѯ¨¬ùq9¯³þm2ãÅÕƒt:Q̬”on¯Ÿo÷·Ç™ç`ã{cõQçœh×UªL³¨÷¾îk‘gÈ0– 9ÏFjQ{–ëœNo ËΰV»tÜo#›áß1uô •êõVk›‹Çh~ìm£€‡±.mt£¶ñ(tïöµF­Î˜Æ\Ü\¼ôn7«o÷ñáeCç1$Š„Ïˆú󫻃VÚ¹%öÄ-yw}Í@f‹¨ôÞÆH*·Ù¡1£e¨[€Ç‡ÅùêOòíÕbij[!Ýø½2ÎÇæÑ/žoú7ÆŽé[‡v×ûµUW§ð¯1“‹§þm-mu0ѵR­¼š}3\_š½mRÚtk“þ—ŒnÏO©w›ÔXÉx×@éŽk¦ÊEÖ€£6)¿¨½€ÉÓZÞ¥Ecë£+‡>l¨^{¬à0®ùa&×¶ËÆÊÍûJì¹Þت“ÑH¿ªÑ‹ôô´·eF2Ö¥eZŽ-6Mºš-O:DÈǃ îêëi¶ý0V×´õm¿i¬GØZSn€ÏÛÛË5ïÒ†íù»¡Û°:¾äõnB>òxcf¦¿Àö$xü|^Vµ½„¦ â4:ïˆw!ªbÄåç›WŸXøvSóÜkí[W5ëÄék}žåFs¼+77ìâºo3æT°mÿö—ÎÒÛÓàÓ-Šf=e¾Ò£lHQÏ«k)z±ÂP¨X¢L©+Ò›4I‡W[Û¹dÈâ@†JHÄ®>Ow›°2UÜmH:å~BµÐ&¶0yŽ”ê‹MÒ´"ü@1x–ñ›F‚4v‰Þ¢Ú‰¬†¾0Pá4ž6n1s׆0}ê¤ÕÒ%OB³3X<\¼ŠÂÉíÕ"žMÜwkHk%üPª›üs#mŒéîsf NáYË„Òaû:æ©©Šg½ÁvÂ,»úÉÈ€þM_¸."Th·f=Ì!ÆRve·Åuóä0_%v-PSqâ¨òcìÎ$|ø,ôƒ‘ìnú¥7- {Ú¡ñCËnO |C¨?æ“-Û‘ýš²T\ƒ¢WŒ‡hÞêæq@‚ ÜjÜ[NÇô§Ÿø¬ÿ:ê&XÈX,`É2QãIÿ#±—¸s¤§?h9Øà†,»æsV&ÛNùaÈÑ5Ÿ5{¬æËy܆V1„€4Û|ýñŸÊ9:þÃsümø§…îâ/iП6>¤íÔ®n—©žªtŸñÛfôÊ¥Ú ½äÁ|ÖõÕvЦ7òÔ‡ýw@\ì^@œB).8bJÞÜ÷'Iœ˜¤Ü3Ÿê©™¶M7ÀÄåFû2p¤÷¼™6Í-N}A»!öeÕ/üÞÞON ÿ‰Dæì*mJºØ  À$ž¯E¾ 1ÅÔnȵ%Å8åÌ*+œžÌCß­n&Fļ ÒdU!G³Æýi8q€°ð‚«Þ ŸØ?té"È´¾±ª@q³˜å²¼`˜Qðb˜eÚØ²•P„§Æ|º}ù¶JOöðñªÍc#RXô욟â«;ë\Xx´$Þª4cdzhq£¿ÜôÒ«¶ƒ2ˆã½"·7Xé渂©R@“ëöST™läl )éô¨Ùº¶¬†0l«e· +s ¶9b@ñn:b®Ö`õ‡’º¦îÔ¨Ù g9íJÚõ§… úÕŠ~_©P×ÑçKœpe­†eÈŒ­LŽÞÐ1Ü ïžŽÖàO^läøOìqÒ»Ïq¯iH©ÕFÜáŒøNZƒÖÊN'ê²L°jJ¬(´ÒçýA¸Ø£e^~ µü’y³bŽ—Ø@$7âÀ™þÀ‡(=u æa3p­¬& ÇN–§C=„‰X/‘׈ØlpÙ Ϙ}Ò<03 Y¥–TØÀb­Ùå÷Æ<ˆü¨@ñ*M˜ ^5Ä|¾Í³i´ˆÚc öùéÃËݳØTýÑ6pÉÓ~$¤»Ju»íÛ]äZe׺WPÎzh”ÌîUËçŽ 4“ µÅÇNL-¶“|í¬ëÀ²ÕÇÎ:O=«ûTß÷›•vÃÄ]2Há‘M¤?`1 `K‡’jóqSˆái¿†¹ŸˆÜþËýöhÜè ×T³%aµ_ÏÓ¤(¬?ÞA"gËÂK“ûvkÁ2(ve7´ƒ–®Gµø Glæ·É¢x"ÀbˆK£0+ƒù†Í´¶F¯ íæ 6¢WÛœkâ’œ6,Ü×ýaÊa|ý& ‚¸’X¤§ wÈòãkc¯k6X£DEÛÇ&u…²Â=„/°ÚÖ3BM› ¤f¤3+¤CŒnÓ1ŠÃè(gÂXg®Èc-è¶Jµ489c—Îß“4ɧQèÜyIi t¯ÿxVÛ¢˜úóe:`$z¾—GN0õÒI”;ÅÜñ#'[Ì0æàYÿÁ¼·+k_`b’àYW¤ ÚÑÚ©Íùm^ª{éü|ÿ†Õ¿œ‹e¾¸ð“ôn£Û<(fBErÁ¾Î>*æÈ_8þE‡“î¡Òe¸s®B;Ý}D´Ž†¾GÎÙYï¾j;›¾ØN_ð0ôµ¡~ +‰.§VXý)í¥Ñß`u,%èl}°àŠeîàÞ}Öâ¬ûÜ×Ü/©•·1ŸÊ¥è ^@;¢D óµóíû¢µŠ}æë:óû÷…qó@×b g0ÈÈL»Ö‘f˜Jp7:YÏ@CÈæ‘.mF´&ý”f^LQÙ÷8X•È›D¹Æ¹o†Ö¾`¸ÇŠ5˜VVÑQ-¬À¬0‘¢7F˜q~5©‚1,0y2Å!‚v"S”¶h¾Ù†%1Ì<7v=¤½P ††<®"º¾Žó•ˆðe3gì1‰!YcjçŒ}_Êmá 9†3“µ“]¦t§$Ǡм…]d‹]¦Ì µ‹à%1ºæ ÑaFcxžælf—=&IÜv vÑe-좖ìÚðñ™ÒÎû‡ì°))K5v*cAdíËûŒ£uÆUe#cìûT´1Ô‚1GôÅi cØÑŒ±™¼-Œy"–]yWÐŽŠ6ư:cJoäË]ê6ýÆ,øbßk5¸%_’ _Êåg£à04år©p÷”Q.;4 ´m3ò¸qÔí»ã5zԹŨÑ—Ð-£.,F¡$^[¹Û(p6ƒq’Ž— }ÚÅ€tÅ÷ ]½Nø>D ŸÎbnŽh4 ňE£ ütÖÈš# hÖÂaÁû¾0nSTrÍšÜð4qɘ;'Š‹$»e¦pH|Q²à|é;o‚[ý¢Á)Ò˜ºšónT˜Ð¤Dµa@97PÎ ”óÊù#ç…Ó,,Gàᤅ#Ò†#ö}‰6L}ƒ°¼Î¢t|ûî.}íœ?Æt’0™P†¿H‰)É‘e }Òå«î]hW/Ù½Ï<µ-AÙô!Ž*¾QzŽèó¶åDuó꘾d‹óNlœ÷üxþ¼Ø³Ã$`ª™æs€vJ´qËÂÝ—˜¹ O“nó1-Ü}Ó—9ÐÝÝsI‹Q÷‰Å¨2RQrø@žç‹@ï]®1´ã‚5³ƒºÛÂC}×óc"A§L~£…± E¬¸ÄÂk‘©#Pµ­HÔ"8pD_õM‰}Æí@(ö8fI|‘Oçw(ÌPyÅrµÅl$óc·sZÉY¶¿{ùU–îæpÄÈùß(ÿ”¾_¦i4s²e>u²y6r^Í?¥â$\ß_,ÓÀ+¢í§^ÎæéÎ¥wåæ ,ƹ fìB³yš /¸r¢Û(-®Ãú›;Á,ò««¿{÷0\ã©`¼Ùd¾HŠéõÈ,š/MB“]Î,J'ÅtÎæm«q¯"ÿ«Û?ßgf p~Z̳í'_gðDòFæoÞÌKÁ„{ÿòs=£j$ßìÍfèK´˜;¿¾qÞ½úñýò›ßÞ9¿y>Œ¡®-«vJ·»2ÄýñÛ¯œiäGó‘Cª‹å+Â(+¦#‡~JßFA²Ö”½/c»ÑãŸ&¹äñ/s äñ¯™Á±ó¤óˆ.ÁNœb à]+· ~}s+V7ͯΫŸ`>à[”¤˜A9ë=oU‹C±KA5} ÚâdQÒ_õ΋i´WÉE£éšÇ±Ùå?Œ›2cSunÄ@;ÕfúÓ­hR#G 7ª¥°èM“–ÅZĉ$5 £`«töe[8Bmtjú n+«úßuö ÃHVL°Î$´“º%K©…9bß—js©E@H2âJ¥:3L»ÚgG’¬ðüYTuhö%ŠfQ¹¹X‚FHR§¼?râd6Æf+2Eð÷×oÿñãÛŸÐÆ‹ è³3”9E9èºú ž‰½šÕ–:è³óöçÿùùåû³ªÂb^̃9h+×i ‡K¡º3L»Úá0ä°ãÉá.‘JЮ8ªiWÛøVr^xÿæÃûÓ1‡í«U缆v5Ÿ~j¾…7Jq;¢@;©¨qÕ¥órUI[Tḭ̀ÃÊj w®g»µ"[0¨Ð—UWg«¾ÎjCzV¡9C`†Nµ¢ýàäWIéÊ9A ß,ò`ó¥æÊÇÇp˺+kºÐ«Ô d’.wv£G€£ ÙNßç‡ÇÂøf®ìÜE5í¨øVnTSªå°FëÎéëf-@ÝÈŒª«“ób%$OË íj!q÷Hh·Vþifç÷0ΧÞ" ÇIZÄå p.._ª›+öÌSçåÜØÊEÔ„Àä¤(a@>. ëD˜!—λêl¨ã¥é¼(ûÌË| +•ŸÏoÓóùbr±‚Œ®‹%Ê—™Q&£ÿJ“Ù;õV3ŠÖ WöÞŸg«‚0gð«qÎFg³yàÍÎ~8[EJ’®•& \Û„Šk—͆$ܺöSÜÊdiÃ=[Æ<œ­DÐäãUçØ%犞F/;»¬·—ì5øá,îMÀlçØ<»¾·õ$6™Je^O”šI ŠÅ2‚;·à4U¤¸g_¿n VUL|¹<É¡ÉÈ&>°ÿ‚¼Ï %Yp±&çX2"Î1 Þo-’kóeÑ$6ƒ,1ÁØC± ÕøTÈd—LJó6§©øè&Í]Ê:m]-o‹0[„üâÖ[\€"0åú.¶ó½Î U®:7OÁÅ4*=NT‰}xŸÙßé¯hÝMó?Q±x(ó׿t>˜# QåÙ;‹(ˆ’[p£Í‚ ³o¾\€/ý·øüŒäÎ|šWþù:Up÷åbûÛ|’À@ßV¤¼ßŒòæY6úó—9,£ðóÇ_E$¢\Ð…°æ!F$C>\dÊaR­=`%þzùu;Ú ÿîeåKM,¼ì,ÉF[*Ç©ÝÉ·oÁëL¸`ìWñ§Åxƒ¾ ˈë†ù²"@céXÄ(b`ap/ݯκu0à#S±˜DÈÇ.<솱}ù~IÈ<+±Nƒ7ƒå-›¨\:Žóë±·È@œn–Q^ŒJYuVZÝN¥‡ Y“`œFÉd:®fT¾zÑÌÜ ¢qžz…µê6üVn¢‚Z4b3ö&0o§ V¦ GFÔÌ!h‚•Ñ0÷òë¥a,¬`uÍgËë´üõ÷åj¹7¼¯4êjUÿ‡9Õ;úxg~€â48ãå2 /œ?˜°Ø(Òð1GHÉP#H"úü x?-åL«Þ æÌ5ÌØÕ_¿Íƒ«Õ¯%cÊÿ_™•æëeoaQÑ_ê4÷‚RP`N cž,˳f¯ÿxõ]0¾ ÆÿÁÐõOÂï®yX‚é²^ð× Ý®º*Í>Gì! Œì¬ŽKæÇ¬vDKŠU—½ôPv²Õ®ŠNËÜǵdo|‡ UTžî«Â—¥'´(æŸÑ/o£Í¥q ]Š‡Í»àFáM6òP¿üE«ùû´³Q "<&42eý@Ê”‹<¢=BÑ(þ?Ùl´æ2£¤™ËZ¯èïâòuÕè—7 îÝ?«Ë£*k¼¼1_Œ’&yáüáÍ–Ñžvµâ¯_V#”»Œª"!b\ÑJ!\fm‡!æAü`\-Õ¹Çúú]ŸJï‚ð‚€im+ò$^Ù¶p€3r½W#ÁFÌ€¹d½ú>J vÍ!s÷œ\`qXpÖÈëb9ÂÌ}rs(¦æ›­‘‡\Ÿ9DE*”1âDÒ Ö*æJ>?û8§Z€v|‡È݃Ü=ŸkÍ8AðÓ9xßeì›V–ï3üf¸`´ŽêœáÜ=u¼o{Ö7…'Ö›:ë`‡û!ÊaÂ9ÙÛŠÙ—“Ýý…âòÔ¢ABÀñd¹ÏÏc|:ÿçôÂi7ÄÿšÂ‰kežxùù.ˆÿ\kÔ¿ (ÕÝb ê¹ûÏ¥[¦e·ÃSy%e”Ñê,Æ·ÊIóüÓp„DšÅÅžŒó#´rC„©bXJ»Ì}~.SŸ—Ë{Êí0¿£û`¶ #4óÑm’åÈlD ‰·ÈF&#Ã1ÑÿÇw¯¢Ú;ñ£è/?ò5w½HD( ƒ±@KäĨ*/àsqªI‚•bŒÉ®…¥‹ÛÎ[lG^%cDaSrö1ÝrÙ’lq&ù˜¾j‡ÈVûãre­hj©'&¹–ØÕvÊ&ûdtöÂÙ B_T˜¶œÙ¥Ü*‰»4M’}AÉÚþë?]`ßJ+o¶MíÄóØõ_i_è^ãFÒCÆCZ{! BŸó@Ç, èó ½®%ËŸ|àûZoi_iÿÊ+­ó]³=›f#®¤ŒXh6íê'·yßÎãß|9þ[•£²a5¸òXèsÂM<™9³Õ„Ø•ç14íéQ•¡i«>P‘€²Îš1ÍŒ >­tPú¡&+³Í'¢2€?}‡„>‹žŸq˜ñ>KÒw&õ`Á òNÚµm»=Il{ÅÇò¯ Mÿtl\oÝà§ü(àQôì=ÙnI’¿ÂÇ™Ád+ïCoó°ìKo£w°û00„<*%ZE‹´$ØßH’KlÑÌbf•Tn¡Ñ¶Y¤¨ˆÈ¸¯d©n Es‡ ¶)!)¨Ëè”âï€øJ¼³¤órsXk“žiÉòH=¦¬¥”+¡LÆQ1ùüóJÊäƒOûàSƉåSÆZ¹™·Ðò;c]—yMŒ6(#Pl˜BœYƒÀ©MÌ Áñ¨|ŸÔ7˜)zœúœññ«ù¤½«^Ïþø ïd4sƒn>?^ß÷—!…xmOê¾¹KûÚÏö^ïŽ,mn2>‰–qeâ˜žØæBoÕ„¯éˆž9;ú $xäF !º¼wòKï¸*Ljຽ_ûéèn¦—|žQÏûR8eœ–ÉËëËï§ãì)Uþª pŒ®¦óf»duá;g=ZÜÝ>~Cü”LEïÚF{ÏÓº©Ô?¼t\ €T¡HÞ_—g›¿ 7Óåš[šTšà —Ú8to–×ý#EkÈÞ+¶à%þà g5‡ÿ‘œ]> ³(Ãyûùg×eû>øô-f_/§s$bTJñÞæ|öùJö+te¶Ü·»#|å`?]!¾óû0+<5»˜nõÅ+Æðo"Í¥w<5¾ÈÙg5†¦ï+ Vª­ÚÎËÙçHEá¥SÇ‘Xú‡‡þ1lm,’¼gÔžÖ´# ?›<7ñy8*^õ„ùr‡DûRDA°IË­úÇ¥uÝí©¸lÖ‰§EÖ³$C÷×r1€®ª¦A³{ûZÒ:"¦ör~»©^ž=¤tíÚl í4x0 =†ûï’‘ŒëÔTq”ùÂÛ;ø`U•–iÁܽ4²Ü«©›ÏnÓk´{[€Ä‡(xH„Kc@¨Pƒ5+ÐPÏGõâ¸tN °ŸN¢k*W¾lô0G°Í XP}ÿ8_ €Tq\xwݬ3뛳ֿۡ¾ .Î’έæý£d ÏéôÙ46þ›Ÿ5»„”]ÙÙmKI(ð¾M”€#r3MéÉØé4†‘…]a„OÒ€,8Ús{÷íìåKs?Ã/î ]šÙB}¶îÝ‚…¤ö‘s¯FwKÿ€HaÀnIÑëü`³L‰¬cÙµ«s¹¸ä×ý£@ªEsí Nq‘òÏ"8¤ùr?¼ž&óàèOã´ÙÙ›%:ÈÙh˜zhÜ5áÊ®Z8<.æñt»¯9eÇÅ®¼ŠÓüärÞ°à‚>YFÁ” Ó·½²w—Í Å«ë‚úL6>ŒU)¢íü–ýH*¯9‰aÞ,–à¤*d_IÚx.´0†›T6tW׿ô²áf V†|§³}†&íCüR‰ï`(+çkö åøQ¸€Tüüxß?N‚ôæ‚ÞÎnZüè8<Á@Œzyy%̘‰š·/h%…#c„a,‰k–=W;‡AFÔÑpg­·#áœ\k$®V«ï dQy­zQ4>©£B ­¯iï(ÚŸ~XX ÿØ ” ›TÏòcî±/àì¹ò¯¥ŒÞyÍ ‹…ø2bÃögDóæùe³z™ N¦’B¸ŠW«þä¸Žß°Žµ§!%NWßÎ^}ŠÂåýõ(ñ¾ÕÂS;ÃS…\©h´tAJô—åçp4•J¬g;·Á„‰B }¥ã¬$j”³REî)qåÅÊ  „®Úƒ¸¹NØ¢Åýêf¢KV'ó¹+ìî=@Òïxp>"ºx¸ú>Nº—Ö˜ƒ1ƒØTUš€_®^t sAsÐ]¸7Cà"ë´?%K$œG +ÙÉ‚Æãl JÔx•õ)$SU»º³B£ Ÿu]á5¹:ô8ÚAÂÚkô¿ÏàL]?QÜ.à=.ÄýÉ• ‰9ÁŒg`axî껺¸K;Ïö Á½ÔZH¸V_@ÍÔAmio©ýh—x=ðØ/‘$R?}ã(1®Yt¹½¹ù:Oîö®`ñ=Ü?^€†î¥Œ÷2n2©Á_z®Ñ,.èMÿX•vHï¹{{£&R ­‚L̶”Ws>>ªnst˜¦>;C~>ÝvÓ.Ñì‹™Çþq¡'ÍÕírµ°à%6M¹†Ô”°œ¢§7¶=Á_îfKu:RQ)2`;±¹uŸ/Ö¿,M™ü4ùÏÕz¬vBz¿ñ«åd»W» “¿¤V¾ÔE6ùÛ=ù`ük1<­E!/à¥ÿÄÔi•è†G–_=ü2~Ý@_àÓî:ü”ã´«Us³Xýeù×r|ôï~µd °JúÛ»5Km‘Üæoç¥Mp¥ˆp\.¼Ç“°åPêRiÈV ©»xfd²+Ê#G‹ái}àÚ¦+à„o¹ü¬ ÄŸÔÆ®}í®Íõ%¯ÄúâX'R­…d>YnnF­€¿:T–èÀöã°b I±ÍæV[pw¡èêV”‘ö—J®c×ÌM1 B£PZ5+ÆAšÊþðà§ y)o~ZÛ‹ÎÂ|Ü|´¾æ"O?ôcMZ­eÝ•\iv·¢ÊӤą)®¯×Ääø ÑQ†-<˜ãìûò›.:Ÿ|/Ìœ1Ä}”r¥œpœt¾é"›×ºÐN -¸8Þ«£H+¨|5Ò¯¼ø-‘ái» !夛åóVõÍ…ãðg0éÂqÂ'ÿB‰Jûîñ§ÿ{m§ý4&9 )±{N|À&¤•q¦-²F'zÐ&x&¬~×3†ü•].§Ës ]Ú…Ÿ–ò/ÿu½?¤ÊWÓDFt 5¤„§•ïÆ3ƒ˜’*Ê ”c®·½ïÙlBt#nÙ­6é¼®?]î¥8Y$î~)NnÓ ™ñ<° ÆB"MY41Ë"~ûƒƒÀ¹n¢(qòJ/LUàt]ц]$)/B¼FÙ®e†rˆ äòpÇQ¹8–'ÌÊ›­Š±Ð¬6o÷Hr]î”÷²cQ”?9¥W¨¢.­™»ràÐXfR¡+âxT9˜²Tßž¹á“µƒ½”¸ø'hÄÅø=gillµŒ¥;U#ÒB*ä •Üª†iß>ÐÔ”—ÅOl +†³ µÍ¢&ºbX¹YËëÈ,†´µ€ïäšì›Òú…ñ=½äĆÛCÐK¦QôײÜ:6W $×ÅLÒŸ_œ‹„<”dê`Œû&´Ä¬¼LÙÝÌ(T®¿í¢“Ú©B‘M!Z\M/S¬RhÃt ŒVë*=Ò^ j½^Ò!OY œ©â:Hµóg*Ù2N§ôƒ»s' …{à壃ßÅPS]­ûš“ª}ª¨b$PU¡¥µ²×• ¼`5;;Y32•§f5õÝñ] ª/ööE õTßq:kAKBXB”W‹K;LƒÍH¯­þåº1NË›ž;ŽQ—‚ü"¼î,À¶´ä´˜ó Fu{ÎÍï–ÃYœu­Ä%Æ•1Ž–Ö4G] › ¦iO±tfæÌ 㠆͆÷øR´£Ñt¹®ÿŽ‹\1ïkçÓõfðMàÃÜ*¹¼‡FŽwí‚’gt0z±qZ#oÀnò€2–`Du"ÒÆ GÊ:÷AªÜÁH‚öØbÄ #ˆ -6±AÁ8kiãIÔ±¿úH6›p\+í=góò†<|\cí~øâGº¼­$YÑDVÇ 5푪ѿÓÎñP>:¦—À¢’`iœwë›2ºÄ­éº—é8?å®1C¤íÕrÅôníè_÷x–yU÷x>¿QÑ—Ñ<…ÝÀ=Å8êãµ›…ü¸}ù…GµFKšOŪuQåï‡,†Z‡÷õ€&„pArHmFÚDÕ‹¾#.ºH°E®! 9\!Ã0C¬ÁÔK*#ý5Qi"4ãÐZ›«+Å’¿þã_ϧÕ<‚c6«5 /ÂAˆãvo-à7ýB0x~¡œíÞù|XíŸX‡€wv~Ùœ3ª¤F§›#.†ûmývûç¶ú)ý¢ÖXÛóãýßXÐ=篱ËÄš3¢óõ󌂶ŽCñz#÷ùrnWucMż%Þdœ€XY9Žœ q¬5bŠ„·g#Ò.éuŸ®î¾€7ÃÅ̆½•r¡¾5}©3Ò)QxÒPQc¼AÜ2Œœ# aÊ´ àZqì B÷@ªœNáœHjŠ¡©‰5‚Îô 1B#‰o\Ã{dqÍ©al"Ëg®»3zÆ€úÞW\žNžŘãú¨ÞBžß×›p;» ‹ó€<ÛÕð<'ç!à¿@,Á ЩAÛi¡¨ p½é­©4T¿IŽwÌsÎYšc\‘jþÁí"›8CKëW›+GSˆ¹±¼ÿõ?¿~ì,Å΂ÉÇÆ€:dû˜½É!ÙGÐ6î mòá/½…Ùm5 ¼‹h‘íì1¦Ø®±Æ)ÕPÄ5WHsx@ki¦¼Ž®)4Å{ Õ•­#õ ¨]E“+A°‡h×d…—,9ƒ¬éÑ;c”h%3Ø„‹wÅ&Tì΄© 6±ÁrN¥@Á[—$L7g j„– k Óel²R]6ñÁjƒµD‘¤¤ˆ· £aHFc@{aç•lbÌx£¯µLñ¨ûpG+iÉNÙ7vWÙJœÌŽÙgðßËïºøza8¥ÞÙþÖÄ‘9Ñ–YpøŒÓà¤q®‘åÞ!m¸1…Õª´¤½RÝè@;fµø­Jf³‹l$ÇHF=¢aü¤k n~WlÂÌîLDN{Ê©gÎ%° ÕÈé †?]ä„AˆYÆ&û Õe#G‡§rQðAÜËP0Ìdl\|{6ÑLÖXwÂåöÅsYaÇÌ[.qÙUŠ'À\Ó › {èNÃ"ëÊŽ/¹È¡N/†Z ó®4pïIÝåtG3k¬§Ä£È¢ƒðЫ”}m62`"@ ³B;½Q]ý›Œ‡°4€ý )‹ÈY·.8Í‚¥Š‰·×¿F’ñ&ñòHüS&ñL{rpT¹¤~Å(¥rFáÀ)Ž˜ƒ{ Î[„KÛÙ¼uä=š~_NqÓÚ¶A3t²sÉ)– “eà )mN8CÖFÆŒX³XèïT9vÂŽRˆž# œz…=²F¤­6´¡:Ãß›Œ¸¹$/îøI•²qI,O6Æp¿½rp©'nkzR T§ÖŃ¸Û€µÝ _ =2Šh°Âi-¢"$ZÝß¿N¸WŽ˜ó?åÀ41#é½wÆ ¡›ÑA§”CQ†‰)˜µXûölD)ë´dGÕýŸlL3\êÇŠÈ)mD„H½V`ð­\CTæãž¿YãìϹãBDe3çT Á#sB<E8™µ>8߀ü¿6’o7pû1åÓÔÏäcxbL·#n¦ž|8ýãwú'.÷˜]ÆG)åù5?é±)üvÞÒÇ­6hM>ýŒj=Éxç'ñöøãíɇ٭iv5,ã2&Á‚®¼òls:ô2q1òOZj‘t$zD4'ŒA.1/Ò2ŸxÇ™=îØ0‰‹(Ï`£ ƒ@å‚q¼éóÐW]ä _§Ðlú6Ö,AÇe 4"cµŒQäÿÙ»¶Þ8n%ýW~X$ÀÒæ¥X$óv°Oû°À¾¯²bYV4²ì“Åùï[”«­¨gj¦»mu¬V43R«ŠUd}ua|7ì¥ l‰‚°ÍKUÏš°WÉl^€ÓúÓæÅìÎivÑ{†Ùá’|¹´ˆ¾.„ä]k¹ˆ‚÷S©õfòüøoH¬>ËçvÀÚ–¨ëG ylIÅÅû³Ó;Š¿žÃ¶÷Ç„E›1‰(ðôÝéØd ­gpˆS.¶ó%ÁñÒƒQFqhv“;©|'¡@fîg0˜)³ó–å%Ht¨9|Úé"ù*¸ßþúÉnu>Èïå¯Çô¸Á²Òß¿†¼Ÿ«}¬;HÙ­ #Þàô9’j-Ù(ÑõÖÂI$¤)Ây@`\øFHÑ./vGŽ«EôsrÚ݇Qó™hþxAKúe#lÅåæóéþÃ÷rõôÉÇ,¢÷ŸƒÇœŒó¾ÀÖ§Ÿ3n­¡FÞ®X“ÏRé Ð2ÄfÝw 5¾´˜¿Àæå2þš+¾ ¿yj«‡j››;£ÍÕ‚uûm®Òîyõ€hƒüšá4FKªFiŒÐY”ˆh½è¨£x¯uKzbÊïIóî-l@“ßœT dÒ¦D²R'‹P²u ª‰ÖèÇ ú1i„µŒ «IúÞÂKiEjä ”N´¢*j[rµ ž6†6  ûÕHk¿Þ޼º¦ŽÎáj§ò²vƺ¬;H Aï›99Õ5 ]Õò6^‚8Ÿ//Z˜Lå°§ùq©Ÿo@$®´[þ2ÇVŽ–%@ÕŒŠ^5+rõ6É”h[ùg ÿ~z?×zË›þEìˆL‚¶¤¬û ¢“t2A÷ƒÙïjF¯Ä}W Ï ûäÅÕ6ÿ¡'“ªÔÔ]9ïÒ KÒ£gêÊÑ’C©ž>Ñä=öqûæ>|/ÞÓ/å­(±¾ÿp!,|r~2™ÆN>v'P‰}3¨4¼?<ó•r-p{a€AŽ-^÷†îd¼ý˜3©[ûxGÙžý±‰­ã ¹iñì¼ÛŒëëúþòú§íÏc´ZòÊ #®N<}¡õ^çŸÈ uâtøüáêVø÷ lê ×=Esyøöæ9¥<‚¡ˆ鳉UÓñ>8–•0p·íIü‘÷…k6£ Ƀ°ÛüéÓ(s=ƒ«äôTáþeg$ or²Sv¥ ù+0w.&Ä€^1â2vÒé8•zΖBÖy'azÙÐT~öëêÈ“NØ+v";g]`XrÒ•çÕ[}ÐÓ‡íˆXJRÂC.°OH”VhrÙ-ù) ²š˜Lýš¢™coͦТ¥e [¾é&‚Ms1žÜ¡å|!®’8)Ÿ×Mˆ„3)SuºÆ&¢ês4«Nô.ä|Ú’’VFΙ6GóIóª‰RN€NR“J ž }q6A?iö¨‰†õ†hy;q]!Z/ÑqŠžœ[ñìÞÞø› .„‚´ùíÙÅííÞvvú€|½?¸üÛB.}sâÒûw£1Z*gLxé§Çp—âÁË©?óz’ :•|šîC[‡•ÆNfˆS#úäŸ8á¯ÙAx:8$?ƒ±zv¥íy:FQ–^ ¬žày°JŒ@#•I À— [hø¼*Yô`ì¢á\^‡ ¾&PB[É R‰p‹Dat(¡a’ÆMô‚“4/¾Í6’qE†\~ŠL®/¾2äØ\ÕN—ź8ƒê‘;ÍP“`W<Í„·Ä«‚I|Áù)—™©† Vì¿ð×›!EÀ¥ûÐäŸ …‹$¦åâ¸äKÓ/û>Ì4þ‚v}(<`N-ƒõâw<ÿÍò@&Ù*†ôJ‡«-s\Ar9/ØìèK"yeDI®ß4!¬ãqÁãŠ+4˜žþ3%RÎâéŇíõY~ȵä·5¿Û¯AØ”|£c<8ñ¹ÜüaG‰wÈ!~†+ò}Wœm9>l›'Þ#ÚC&ŒçÀ‰\þ€›é´OAÿýÖÝØ²ÌÖ¸µ´¼l>†Z¨æ¢r€„÷:Š 9%ìý¥œ[ðì¡¿ì4Cܨt´ö³À›…oûr!Ù•b=IRÀ©³Læa†ŽG cðå‰GŸðä|HÐEÑ9«Ô7Š ,prPŒÛŸ;QJë刣Ôs¶Ÿ R`pbõ·ŠŽò3[øp|ÅSdîÚÍ_)üÍZ³ÌÛªjåiïê€û P”~fêM|ÖYÖô¦µ„öH³-öBoÙ" 5y9ñ&Ü#’fηXk©RU*y‹]al¶"ê€Û‚j 4CMŒZñP]Þ¯+~Ø«%÷2¨WšÔYDhÒ'ï3õX¥P¡ŠÔ»€êZZé+ýÉï-4R*…+aLôIKÉÌÑç ‚ŒÎÊÚj@®n·9k ̓„çU’Xc95‰ºæ¬s5¢UizSì ^¾ø¤’NZÃÄVÒIš¹gCB›,éEð¾§‚1‰Ê‹˜ „`¬ª°`6ÎÒû†ã1 œ¾oÝòGÛA6Ô1T"– ÑŠ’#áXéiç©¢ÒiÁÆØç¶OKÐ>"iæm‰>HÒ†¦úxªA$£ŒÀBŽJ¦¼h»àoe1øÒôgñ%“Uy£²²èÛñNAøFàEµàœAÀ†ø£6ýy‰u?“ñNÃX×ì‚_©/´ ¤þ&³{Ç…f#®h¥]¯/ă£ëò…°w¬´ Áócfuq%–.ÒÉì3YÝ ¤¦Æ¡:BŒ‘>\P´ jäÃ:íeö~n±ï¬ ™åŽ\£A6zâ•Ä)£Ê þ¹ýtI¸a­É Þ®X@rÒjÆmd…ƒ~WG›¾ï ð¯êéÙöúê_o¾~)-qŒ@îÑß^'oë¼ÕŠQ­‰è¿_<ååBç:7/·òÖOYñ œÍ Ê\?ÊܼÀ…á‚ïåތܲsÓ{éöG}ý€“} ä R¨Ð»‡1BÃ[Á?”ç»– ’d ’¶:ˆRÙbU£ˆ­Idªô¢˜ ôrÛ+80š@ vú]Š/E€çg­æåóúeøvŽ×‘Ä5èÚc!WD:÷ZO¦'·.\€|-6Œñ*„)e­S(g¸Fp±Ú²LÞ.]•iÕÒ‚bt8ÓRM¯Dž¢‚û íøÓOö+ø!fWkNkÆšÁ÷›Ùþrí}‰kï›—K竪ªXïÅ×Í â^?âÞ¼@•¡ ÏìçÕ”f¬¯I~"¬s}r`7>#%sy¢äiÃÑóÛZ>:{3ö˜N®·kíÔ¤ ý¦Rª(*ꑚÎrAW½†ìƒ1‡ï…ÙaŽ…¶›zªÑ"ÚØ„IÅ!4ôÚúï¿O‚Ò3ÍÔ½õ1xK|¾´7Wsî«pz—ûFÙwÿ¢ÏºêÄs‘/ÎÄåùÇSRqþ{¸hwrázJâäÆ3÷t`ËÀ âˆs6¡ùóºófÎ.¯û– Ñóz_ûÛEq£äCÀýåv†–AÎ3~ÐïÞÅ|4ü}Ç Å6ôOàùé§w̱ï;yrðß<Ðße®/!áî]Ç˳7÷¼Ð–ŒõË'dé¾váK–‹(oÛÛkÖYÃeĨٺtvß°) Þ ûöúúiDƒò È ¿]ËøÖìZ5h“ ó6*È(BwJGMJ¤NÙåÊ[µÑÊÀþ 9©›~^7Æc?•e²B$ ’$|ôDJKVÂÚÖÎ4ès¦Ý{DÒÌ“Hms)%r0DN#ká "áÑš­ Ï@M¬þ!OeVrÚôÈ©µ ë1ÉMW¦ˆFˆ!—æõ‚j6p$“½NÇq™÷Î:rA ˆ¢ëí…-%‚’ITCnƒ 2ùûZq…¦uÐkMáñlðºâbz8pÖ)¤™–ù¬‘ }˜ñi}Áó©„j÷¼ZÀ¨ð`ï5‚¨ˆ¹é¬E*™Lx‘™ÎÞjEÈH*ç]oK: ‚<&if¤Ú$` M˜ŒBI$NŽr˜b‚l£Qdôu#uróÄH¿ôŽ}óõË]‘Q6‘§F{–§Ñ?¯‘D0(GNË¥hé”mÍU]×WEG¯T½?‹–˜c9u¿=&ifÈ®ÖhÈj’-D:/‚½’Ó ª‘ î7Ýý"†š ¤ÞÙ«Åœ—T yH°x?ÅLj‘à‘áЯå"Öã’#_ÝåXÞ6\ÕåXd%%pvÌŠ;<ñðÊÊç‚ Á¡Z§½ÌnS©¥¦d½Î 7=0i„©RgÔ6µ`µçÚ€QMé¬t”À";-E£jld‹¢1À7áéÜ)h„è*(\Phu`L6Ò^=ì´[Иc~[»È¡ þޏ}ç§§µ'ƒü¬ Ö:[’(~¿ùù— ab’ìmÞï?þï¢Ó{èC6}5Y}„þ `ÞË¿ÜB¯KRù9K™RDùë¦ÇPÿ©äÆÓ"È !_Â9†°Fý|yvÕ±û§x~þ‹ìÁUúJ¨ïnÙÿýï =öíæ¿ÿ—”tÐlç&à/Êæþñ_¿lþÒok²lÀ ds—ÂÕ~Ýüó©u|³cí²¸7]·nËõ‡ï6ð:H Þ¼ßþçæü,}¸Ù–´¹&ߨ×(Ñjúd2£­<&G®´cÄnü ëòœÈ•Ç×Têƒüªt¬ããÇT{Ù"Úä­ÓÊÇíõ=Í=¿ùIýܙۃàK–ývGÐA´yÅé]÷j” +%‹ æ Œüµ¤`´¼a‡ïÀ&ÛøYB%3QMÇÕ h¦zŇÌñ“ÙÄÝ_a~}W¹ÈÐ9cDÂÐÆGƒïôêZ²±ÑÚüjº6ÌneæóÛ^fôèvæÃ()ÌýœÁÄÕ§ÓßÎ÷Xì‰O¿3å¥åKq¢Fúz>!d„!Ù°8—LÚmÊ‹Ò!˜r-4åZî4å &KÓÏtzmimû‚ßÔ«-^ñþìô~òÁý7´ðØrÊ‚°——ö÷ÒTÁ³H°…<º_|¨n< Oðµò`ÿ+<×®çêý(<á2g¤T³Ïß\,Dù ¥£åv0ââs§–A\e‹ ãκ<<Ò®®i ÛÊg` =ö{$ƒNZ‡½—7×ï§/°±óTþÌÌ’×±ŸŒËïºV#ÑIk+eíÔ“ïA*í "ƒrKY£§C5™åŽcz,•!´ÁœïE¼”f³ª„ÀE ½b2)RR†à¸ñVÓQ 2½šÎ…Z¯…âA·uY(CDGpppiÑ_«¶Nž(ä²9a „Ø‚xÿÛçwûj€{è,ô2iiÓ^B‚…2‹\>ú Iõ éøîB"BÏu ¢>¢Þ M•CÇ)Œ0ÖïEO”Þ1Öu,– ½žz ` –wá°yr3äÍŽcúŒ HS~ŒqŸcÜ€ äipD´Ò^ˆËœóJC¡ƒ= §*óHîiÂRH› zGàfA˜ÁZX8(’1‹†…0\î&Éá6!ö9·mfô»·žÁ ݘ֪\lúÈ _Bò®õk!ï÷X(þƒî¬gBÓžœšSvÌ*<ö¦ £2ê#ÅD$˽Vi¸|ov®åӖȼúA€§n«ZaÜu9|˜0Ýñ=Üú°·ˆõáó6Æ…>&‘ÁÁ 5xl%§ëþ²½Á¯œŒ6…=¨Fb`u áÇqVR•’\óÉ rí<@èþYA¥5¹bÖ$kÔr8ƒ•-®µµoWÕÚʸ@ZÎ8ÃÝ ¾òÍ'­¸AÞ,0RyRY—CÑ‚<…@ˆ8{J BµÖ¢ÕÁcĉ©¼G$Í»“y=ÓW7Ö¿ Ûõ,\¨AHè±c¾¦åŽ?Þ|–Y|­F©-Õ‰½XQ2yˆì Ùê”3•¤h;¿IL*þ¶$­xpVZIÇW µ]ŒDòÍÝçF4¿¿n%dßÚ[»X4ÐKÁÆ\ÉØ¹SdnI+¶BõÜ>­)¹ò=®ófþC9g_0ÝRnõ@>G ¸¶Ç“R0¥yŸj:_›Ý…w·m å·wüC llÓM n>áâUèÅ~ÃE xèY„‘£¾¦ö]tC¥uû  Ð^¬v].×!hÕ±»¼ËÕògÎúªWý»6‡WŽ$¢îØ¿ !âg¿¬_j©·¨¥Þ]ŠGzY¸økw ÆÇÆw¶"ë½vqýDز«˜&÷t\ÅUëÈá|âmö”DÐ! emW³“aa¿Š}‘Öµ-©¢ *TQQ‘€ÖÁ&M µ\ë©`"Pxjâô¸/ë}[<ÖËzïÁYtjÓ6‡æmN5ghU׊ANboÎ cƒ):ýœ²}ljÓ¯\c0ÊÌsF­VªêŸicV—Vdg¬HÀJ¤È¥¢6| Óz£W#÷Øxã§ëÛà± ™wìWYµ¶Ž«å•ÙXß\Ý´Ù›·Þ·6ûä - ÷·O …7 qØLlW;Û_Sç¼2k½¬¥r(%Hìxó“ö §g‘g‹Ý‘<î^éó%?/·è¸»ôÑ)ù9pKŠÝëõw¤½êéG´Iót1—HÓ¿.íÎ'&uHÔ'²BÔj„*®MzV¬%R±æ –6…².|KÝiåÒ¨®ñâÉmæ§ÿÒpbà‘ó»ËМñ‡æì.I”5á„é#,{¿œú…5&òUx}óþîþ*ݽüúYzSÒ[q>¼.÷¢¾y{pöe¯Ä(õâƒ+ LÞv0Qâæõygã‹”‡k‚¶Ï³}š†A rN®T-»«8 F‚È¡”êÁ@>¿!hÒ‹ÙÖ§šÃqŽõ¾åU—¡ÍaV÷î•4~I!íú5~•ÿN|º½©~M÷`@ú|Øg8c!ßîc³z‹Æ {ŠvR[Ð}eÝÂÄpZüñÓ‹.ŒÉ “» _q¤„ÛÀlÁÝÐŽhwØñóa‡öέÔÓê<|~Ro«n d·ëm5o‘ËS^´&üú„zõòý`¾þÉ]¹Ÿô»ÇZs´­ß}~SßÜ÷ÆoÿÁ«þƒ™h’¬êè†gät¸ßì€í꟠øP^_ݱþn|ÀAçíÇûw})Í>).7¾G…meVc¡CtÚfÃc«9(³ò:d–“n«Î.Z›¶Ù· CÒ®Ð>ƒïÌ÷×eR(¿÷°.‘wH¹r¸óîöî ÔÆJ8Þ­†ÅDÙõ„Fô]Bƒ[Â-o›±Î¡·=¶jŸÖ@íIŸ]Ón+kŠ’¾Š„DúçDðV ²O„v‚TÒe÷DZ?+œ6µX%›>²C£D¼ˆ¨ z*¡øº´¾OM,-M3_Ñ;Úmì}Å«ãÆ3 S8Ò|Çþø§eF¥~$tf¤Ñ+U‹R¥&Ö<Qf]5Y Ñ/¤‰í‰´2ë<¸¨É8QuldËXEï„©KT¡Ùð]¥SM4ââ:i7qÜzU¯ø<¾§aYÓíõRuvZŽ[ÜçQ‡ªGFÒ(}Ñ_é1ê nõ …®C\Òë>lzÞÛÆáµÏº^ÇXé1F¾#BWÒ,/$Ýë}þ ý.V`\ƒ˜ÅëÑþmájÐ/¿š6_Œ•J{êˆÈ¼öO ŽÉòû¨ž¨FF¤G/²ô Çj&¶0£D&Œt0äÎÛiå¨r@hcø"n˜ŒŒ¬"«¬=ãÉìµÞÌ‚ûÕÄ­7ib‰ºw\©Ýk"<ß½ËÜž-æöì.Ss†*Nw¨Ä½»ç€^/Ïì|üzú<ÊàÏ# |sÿûA™ARÇ´]+¥8ËÓ…0‡Êò8¥ÈbÏÁ9»(Ë3SÏ:ài·è 䪴…®Ì#*ôíÉœLÐŒÝÁ1Û nc&’c,>Xc‘ zª:(‹ÀÎ?Ãù­Qõ´z¼æ =¸g¹.GF‘½5…i-|$œ ¥Hï<âBÆòžH몄¦äˆQhHA°¿ŠÁG‰¡¢Ì’2çWm¾mû¶ø×¼mhL§¼Í¡åàsÌÂÉÂ߈˜9nk=åÖ¤ÌØ³š’ë÷x]–N“t«îpÊŽŒ²Þ³N­Ò< ï]1dÅñ~«1…à=‘ÖuÊ¥ÄHÄ[QÎq0˜¨ÍSgS/DÖ`lÝç·mŽÛÆuÊ}[ük:e+ŸÖÈzé¿SæRì7}Ȇa²KH²Õ& b”ʱ×&½Ð¼ÿ(ÑÊEb)&oÛ„žVé8\*|µØj¬©¥ Þ0)Ó­$`Ì›{ËN5TCE5lÔŒ _)¤y¾. )É ÒS84;ð,„.»ø5]ò”A0Ö,„Mí¼É Óàþñ¹zj:ŸòYuÎ¥?ªªŒ÷#ÇÑQëu§¼ ª“¨2« Õbã†jä¥1ÇÙCnZ¡?…­oƒ‡zµr@Fw¸lÇÿ³´~5JuµÍ°R1Y- E²j,9K£ÚNœõÎwX¿SÃZßeý„Ò«ëwÖ>Ïfa~ë·Æ—’‚¨šc5H:´QJ8¨ÞS ª8·>ƒ%¯Ñü/x’nÊ}¹{É¿}`M>÷¾¾} o{^Ÿû5^žyáoÁÞ]((#‘#æ ì.Pw|¨»»Í3M2øõA°±Ž}¬wØÛ¦'Ê;þ¢¿ß½üü“È×c]&*k˜V˻凢s[æôËo·iLutI ¯åÓ"¿Ù Ó¬‡g¡)Ó°5ØÖ8À¶ÂR½(ÉFFZ oÀ2òÛžH+ó,TU`T2&Õî-‚WVXv[NBˆˆõÜ®¤ÑãíbÚïzõvÇùÀ¾éÕ µ}sˆÂ$¥´t¼AƒóêþOïˆ{á—nÀ/Ý]Ø#²s+w—¬íøYÛÝ%"ZÆä ›Ž³%øz˜›Ô‡‡â&‘‘Þúãø“´Æ§ÕŸm2‚ÛbÏÏj,@¡#_lu‘ñ“itFG1ÑÒþl{"­ëPö9d!±²òaE¨qÚ!»è½¤`ž‚š¸AK=¶±íH>ðÑx‘,ÉGŒ Ì((!øb²Î6<4푨ãÐìÓjö£&-GTçˆÞlÛ£E’›C(%êd”²ra©Ç¾HëÚ¶‘FYŠÔX“I@¬YÄZÚà¬4%¹`¶{¦&P;Zã9GßCæ‡MK£÷¦$Þþöð‰xøä_w3ùH|ºÅþí/;ÖÖ¤‡ ¿üßM“vÞWìÚNvõ ùŠ€?ëeÓ€ÝÕ-Ÿ—ü”¤ .cùÛ®ð¿*¹£6'fǧ¾“;Ëk,Ÿn¯>4½ýG¸¾füË蜼~Ÿ>où?ÿ¹ã¯}³û¯ÿa“˜4×x©Ù†ÂMÞý÷üç_v꯳ð\ÈX5.¦ê»ÖÆÂT‡^Z¬¾IƒÐT¥qÆ ¬7ŒËø*™ýyÉ& {u©Õ†‡fœÒ H5× ^ݼnÞ÷$èþÂFÏN*ÇT…¾ýÇ›ßøÄù_øÙCjàØ¿Wd•øž‘ZŠˆ„w…ØÐtÆ<î!ùª<à!?ÑCÿ¨‡Ôîà™¡AÓqf|´Ï’§7JÂÇI“­·ZÄLìOle•,^ k1:SÌ©lhùÙHÝaù“ºªNËÿ:¼â.¼»å=tY?ðl¸hÁ$:Ý|Â#ž`ùðÙ3ôÌ'{Ü3ÄèÁð Nô õ Šž©W{Îtú|}>ÓÈâþõø¿ìÞãßÚ~ÁT¡Ðýû÷owðB)…N¿»û÷ÝõU|ÿñ.ÇÝ=›ÍN¿éÝÝâõÑ ÏúË7`ö»?¡$gMǽÜtìÌék>¸2D'¡çèhTÎOŸ?+ÃmÉBO Ôo‰ý”¶›K ,o£ÿ埈|^ß¼¿»¿Jw/¿~ƨ ½÷áÃër/ê›·ÿÛÑOÿß÷êð÷ÍcIt+ó{!_B¬ßbw©ìjjð¸uõ»KÄ<|ļ»È±ˆcã’ÔÑ ÀŸ¯ÄÎÔɶé*±S%Hc8äQF€%‚E­ës&Һƅ—î¾H+Sªбˆ¨¼` ±60üÈ’㹜Ð= k—[wó-K¡²M Û@5$ 6€â+Â(cdFÏvþÀ¥—Ì“¸v½N.(¥BEÇÓm^é§5€TÂ÷{¯kŒ¯cX/%C&ÇÈáoTÔCu060Å®y!¿mO¢•)0>F­“f%iµú*³û5è…)•¡(Äx~%Q4évº`&ø—”’xnÂëWâ<ò·DËÞXŠ™¯wx}hŒÅ Õ÷kÌ b‹ÒÓq|§Ì_y=Ç»pô¯ `àNè}–>ã§ÿàP©5¦€¯¬š]SÂ}Û«ÍaF¾zÆþ>Ó†Lf”µÎÚÚ6 ½!L«E(šÏ;(Ò)VÏn¦ v”°O¹zy”ƒõc@;CçøÀÿL€J­ô!@ÿ‚pÒÌÿênâÿû +Ædœóïùã§×3¸»¿ã y׺R‚áРǦ2 àD6 Àé ÅÈG (1+Ô!ò.JÞ¥ÇÉ»°Ôc9§—4æëâ\BÆœÑQÛùÃÄñCzñcsñ/:û#ÒŒW´ØX”7j1È·W 8.{ùíWŒLr¾" zCõzéF“\AsºNb;e¡É‚#Ÿ±ÓÊlÓÆ¥gó S5.VÀà¢&ãDmÎb½¦j,Q…B&ÿª±¿ €í0‹5#Ròfq`Ó!òñHåÛ?}uxÝ›„Óè†X3g&…[sg0~æþv° …^IIWüèujQÀã5Ò/=/¯UN™='óG˜Å¼ iQŸ1‹za”Vdƒï'!Ë40w°éfÈÀÃVwî*3‚ç,ÄÉÝæî='´÷Ð ë€_œÿÂç èŽÏàåÞï' —<g5[qWï}]¬ožç<$Vk”¶=‡æbš˜ÜŠð”(±BJN›ë†À¸WÛcjÄO?­ñpjB2ª‡™T½ .há´Ê¼ö"@ ¢¶NÉRù)/ì °'ÒÊ=ÓL+HQ¸bZ¿’öž›‰J1µ6p¥ØòÔÜ󜀠GI¸»X X%r(F@áX7(Ã?X”“Ú£ÜÐÛ(‰ÞU#Cr2¶}~¬ûÇ¥vä÷,èÞç¼ïøâ¥Ÿvľ ™;wÜÔÁ$Ö Ð{Îfƒoeœ·®#:G³€æp÷þº%¶~–±TË¡¸ÈoêÇOËd|8M„±x)#§èúRt -9ê8¸ƒA÷âÌifâ´·="‚µŠ´ïê‹Ø}l“(î$öÒ£JÕsû1F&ï;$Å'Æ›Ä屮§/ õ˜3¨$6:R##x*(Šv5Wà㪠ã„=‰ÖÅwF£C)2»_òQPuF([M­SÀ ñ]·’LjÙðžUê.Sû—¯šÅ¬‡t(ç­}¨àF7»‡äJ&Q|q­ËSÞQ9YôUGb·¼¡Â:mw jþÏ×þ2aƒ »Kãà‘Š°îǺ»¤´ÆOií.aÏ9®]/GVûžuÆ:6ËÐÁ﯈N¹§õX4éÄ®\‚B„^ +o3G~ÂGgER9(ÍŽŽj!‚ÚiåÇ"™ÑX)öP% ŠÅŠœ‚rZIgW¾Ãíù:„IÞÓS¼’åšQ´&Z;¬ÐªÀy‡Yï!D ;쉴v©r(%Ù&ö2«æC0¡: „±Auß/ zó-“‰hñЂ ÁWaºäš•t–ÿËç ª‘ÕÉÕblÇ¥†¢$+X×ùWÎLj­{ †Außò~Et†Fž¹Ô‡lÆz^ì>8;ìÔ綃¦”*j›î—Ø(‡DÕY àS®¤Y{[œÎwä®ÓÈ7æ±a-dDE„ |)MŠÕ“;¿ÉÉ ‰ž€O¤÷< ÷®×¨I8y!³ÙQÝË»iËñµãýÇ$ÇÅkëN­DòFòM³ð%‚p­·@%¡ô¿*>ÐzàÁO}Ä…_ñàÀ[£Æ¤lÓ»ç§$[ªóMŒ—“ñìÊtMŒOI§bD-Òpؤl³ˆÌ˜™¢ŠšêBVÓžHëB4ˆ¬#®'²¬6ŠÈ@M„hZ­#ªaèìeß/ÙËÍ·,ÕP 9ÕðnA Ax/«ð $!%Ô³¥¨Qú²ö¥Æ¥<ãl úaÇ õå;6¾JH?6kì˜w›€’ªEƒI C/æJŠ£ð¢(©b+°W5<…C”ª¼M0éƒW¼ñÜÆÜ:ÏqOÁ$bH‰ãä?ÝUá¤e/ÑqhV?)òžLI‚žJí ŽŠƒ²À¾V©…­6DÒAe³pzû¾H+Ïb+C±–µMÕÖÒ"‰ ƒ§¬­46Õæœ#Æ©GKl­Sòi©‰™TÏ÷ê0<—µ$Ûxe hþŸ½«knóÖÑEÓ«vfqB îýÞŸû3g2üLÜ:‰k;Ùžýõ ¦i#+qEûÕ[‰–<“ŒmY¯@€àƒ™]SÙY*É/T“‘«&£ÒÑ P€$ÎÔÄš¾*Œ5[MÄšø#ÖhmmS;R¶§²& A!’ˆ4 *”üŸ¡ñy!çý®H[\M:eA„ª @,‚ØE% ŵšæ®Ñà%˱ú”µ"fÁ ôDte¬D!lä©ÄE>Âʇʧ]έç>·Þ\Âå¿Ýí’Æ°œq£Þçò•¯âÁOà‚Œ—É%¿hwÿèѾ,‘Ô~˜à·`Â3™!.#‹YÛ/.+¿ –â)’ŽÔO0)?"µ×~Þóݱ¨h®óÝÑ…c³œ ÷)J·ÿêûƒ÷¿Þ£ÂO¹¯­µÆ¤)‚æy•y,v›J™Ç.œ'ÅÛ,g_^Tˆ«¨ *îßœhiÏ_«Rb ‘<¶‘2x¼Nò¤¶ZÈŽôÒ˲ñÄ(2äŠø.• DT %2³M×h—^ÜéЩ¦Ò®P0(Ñ[àЪÄ)F]36ž;¯06ÀK^aõ)û[èœgíj;KS._“@[ bÞ;ºá&æÎÈJ¶‚UÖ°äjÃÌÍ䇆7U^a¼¡r“²Ï¯>JH0Y±w¢ ÅvsÕ"°m±0TXŽŽ•Øü…ûÉ·÷¨çtö"(µ©ÕîS½,\Í>z¡¡¢:- ½EÛeFH0]"x½XØ4ˆ>èâbKÕpQ ˆwE:t{I%¢jî'€HUn2hÌ(±PÔMÅPžøBîØ¿ÐýfNoºNþjojQoqËŸSþ giˆ"ºY\ëA#¥^ÿå2$_dNN#‹aHáÒ ¬ÏSÜ,§ã˜I ¾„à£" srД³Y7ß°?j†C0!?hMðè+`½|õÎë>¹ð›âHÞ~Âëñg¯rl„í¬åÿcvmþ¼Ã?¿·l†ŽwYr«/ŽT–†ÈhkŽX) YÅNÆ^weƒ5èM#Ò +KwE:¬å»²0qºâKþ›Ë5–s¹,yÁÛcéþÇ|‹Óñ›Ú³‹“~²“þþD~ë¤IO˶<ö_â²O–׊~‰:þlªòågdü n®?¾¹z¶5Áù$þÞ_ÿüÖ-Vº-ŠïgVŽOx«õ‚:ÒçË=PU:>´§uHÁ[—óÔHuNM®FíÁu"HªI ÜV¸”ÖA[Riá!ÅŽH‡õ¿JŸL2 }Š"¿¬( ¦²½1­·Ááñƒµjq¢âù ¿?Q±óì×OØLOJT Û5íÔýIºöØ©û÷òúQ…}Ê©»ì1™S÷`¦½j:Ôçò5F þÇ‹£íñªÞõV‰ùH3t¦¢ MèÞNY1¸ÞI:å­ ÄKû8ìˆt`ŠB½6¥ôh¥·«îI™e ©®±†¹Ï¹Çx9ç^}ÊrSÆcÁæ`d§È\K7nÅHxLb5ðl«ÞgIÜŽÅ@Ót¿ÄoÔíj?kË•U@¾n)–ÊâØ#ùÞË®ˆ¹ªPŠ!ù˜Ó)Æ9ôÀ¢9}<¬¶ÒY8€•¢ö:aŸHGX‰K!ùÑ5ëˆÅ,¤åØéÀGÖCò&ÆJw’ß\º'ž Vº¶Vz)  Ã´Ÿ ²Sÿò1Õ­´ZwÀžU—²þvSsH×ñî~S?‰xŸÕmó#þÔ³´9æ·õÏì—ø·‘róC„®R‹†$“8)Ôš ¢MÕ·’š3?<6 «´±và,Æú­Q|NbÏ“ÿÏo5ÿhìOÿÝßÜnóÃÂÏê ;§eÀ÷⼸ŠÃBÕS<ì*ÄÞïC•ÒY—$,³‘>Ž.‘ãoU„vTDÞ|!âï|–ú㳜|Ö«w·¯ÒÕûWò2|hùþzS>¾»øæÍmí>d“náêýý£’ô˽<"ÉÉî¶I.@µ`WJB0NÆÜLp–² Q‹­°Š0ãÂ{6%:0¦n6…=”–%ì–¨ ¸éÁ&s‘.•¹1õÐ/˜zõ)ól]‚¢k”§ŠéU ªAÙçA%>Ô ó³¹âi!õKD.ãq y°§9Óë´é®Í»Ä9’BÒ!V ¶BÕ. 7”ýz•šV9íPï/ R&,-DŽ7W2¹‚d¾×eõÛÉåÔ²`9øÕ]ÿüX1ŽE2:íÕ;Q<\@€±TúJñ‘l5¼=+’ž…2*ùÈì |©gL’Õ|)¨¹h “Òz{=ã@ÁWo[µ˜­cé¶ØOåñíC^?mã=¥âÈ"{¦‘¹óÓf9Æ”s*ìa½ žÔÀ²±;Þ)šÛŠ®y¤â(]"xE¦Ô4ˆ>‰-UÃE-­8ÚéÐ%#xiâˆ@ø¢#þ‰1ôæšæM›‹Û=‚Û5[H󬮋ΒnKXrÒ*‰rz²Ä‚x]SÆÔZó¦_È„YA÷Ø¿ÌÝO.t_.‡®p9ts¹é5¹ý¼u³›‹ãŸßño.n÷€n7X hÝ@Ñu»_·ßÉð~.aS¾_íÎn?ãÜ}ÌY´ðKÛ‡÷¿gO¿áVþ3á»û p)äD%e™è›ÿ}ûËFb #¯üV¡¹¡ÃÅèrëv6•¬€ŠÊ°ZÙ‘Êì{rzaø‘krSäbh`rgKÊÉHXs—b¢âÜ÷Çx«OYŠklb4H°u^,´v}%tgÛþÂîx"…:/[ÑiOèiÀí:¿ `⻼sUN9‚uš—_WÈýgÒò–×ߎí)Í££4l§mè8Æý8×Þ!¥‰öó’:³u âo/,§­Óbõ‰½R‚X}gf%Gl”5Å0aaaùC‰ëLHIwÀ°÷ŸÀR!Û¯'5b1uÖ¦05`à°®>eŠs& ®*/ä©Êeе´‚Ê»ZêÙf¼.„ħ€W_¢Ï•/5À¶'…'ELª¶ª«Ü€fãÛÈÐZ³ ·‚„Å‚.½h›˜­¸Ð?”è°;K9±Åq(Þ‰ø±AΫ¦¬½òÇVãiû*ì3©¾ÿLt^_µšÿ“¯+¼‹ïã›zûêÃõ»¯/‹‹mD%¸oÞ¼µa±‚›ÓRpÜ:ÉÔ~i¢¶95ª¨ˆÚA&Ý2«óvantG¤C«¸nÅe¯{Æ }…ÀT E¦Üvd±žŠ{eÑ»5±n^"Ï1+2‘§7Ê íçø‘æ™ndo¼Ì…sŠNË0×­ºn5`˜cr¦‰ÉõÅ9ˆ)ÈN±Ùì”Ò óŽHîêék§0 h%ŠÕˆÊuÇ¢!9¦‰sÍ|û›Í¼û{lŠ_¨anÎ+«ëåDZbJò`S‡q•‹¨Bì‹fkö¬›DˆÇ_´¿ìQsÊ‹¶ÎNÓÁcV&Cþ|óÂ7#O bvÆqoL\޾hÄf‹æÃméß›} ¿ÞÅÛ_êý͵ø¯W·µ¼~¿ÝØO·ÿÞˆ“üb?ûÎû~ÙÐ?‚b4ôîî¿6×Wéç»’6÷2‰üGМyw·PíÐz?1ˆ-I&Ð>EÙÙXQ†~ ¥0x¾XgÍÒ+îN ‹¤¦n}s­q´¹·PJ°ÎHlºú¤‹%øã£AÕ8ç¢õ:™Hˆ•3P’°!µÚ§×;eDCÏÉ1WAEwj}y*£…HÌP­Šq2îè‹æÕvC¾'×D<×> ”MŒ`‹~å¹eÏÇþÊŠÇŸüú¯gè)ÅÃs…ΜT\o¶²›2›ûãzC¬¶93[ú‘Bï6ß"䈉Èꈴ3nW¤ÃÆõNçà‹ñ¤‹€ÖfDˆ£È¨•diE£@HÁìWÍ4/ä›â¹ ŸcV8°pÆMׯ³hF‹÷&,ฑ„ˆ‚ÆbËj«‚¨ÄÜbÃX4¶§•l[GN#ç¼9°™ ”øÞÆ>Í• &ÒÉqÐni²uG¤Ãe]PBŽhÀÆ${[³¶ò¨ŠOE5C:ºšGÇ£vŶ•ìi˜ ZPÑG ^Ëö£ zIüt+Xz ”(—…*±#Ò/Õåï÷¬:‹5õÂ,X㔤™Z]ûÒÈÐ/5xëOY ìk‘¸0Tß›K5Á¶\ dgCÓ=¼OgKõ2KÄæ•ª D×+ޕה5â ›7Uì{ˆ3á oª"¼atFæ…ÆÍËŒ…ˆôYrôh?‹ÑLÚªæ*”R¨F0¢ ÐÄ«\u=5âó¤z2j–øèTŒÈ€& G <­Iµè³Gj' FVOš™YÇ…psÖØŒ`8ÊS[oÁÔ(@E…IP¯ÇO`ÑŒŸõ®â˜q}™ÍòñZK^JÈ×(!ß\ê€gÊ”L\¥·¹€þùAÿæâvèv½zãyyKÙ!o¯ÞWÈŸ½ôWF®ßi‘ps—È.“ºÿZPó Gj_<ùì€ðÆ=2å=2ýýòßö³öñwù2雨ú6R›¯®;Ö½¿¯ïnî¼ûéÑ9¥ÜÀ•@ÑðCŒhAíÓ‡À(R¹Eñnžº‘Q=*»v¬ýt¶4-;ËP`.Ó´ÖÚ~Ú•»ú*´>‡¢Áxߣ”Ü{uÆØZ‹VvÑýðè({³¿‘šÄ뎢i-TШY3p†¨ct®oÅ«h—b«œc•QÈ|GÛš¬¾“סh£ÄŬV.ÇJªåÇFÁJ ¥Ú_)¨ôch­[ý#•±G‘,èdûɬŽÉzq4jêzl€—zõ)ÃÔRC%û£Š'#±zŒ2`ªÒÙi› ž-E¥›…¢²µ¤\Ç$¥š¾ˆ¦@²¡¦F9{m\i3ÇÐCÛ Ⱥ]ù;³.x()ÖŠ ø à¡õ[)ž2(¤P0DòlÚ:32 \y”‰k"mu‡/+¬r`t(¡¹¤ŒÇÇGaX¢þQµî($"Ó,!%ü?{W×G®cÿŠ‘‡ÅݵõAQâ¼-öi¸ï‹‹@ŸŽ'Žã‰=“ÙYÜÿ¾Tì$¤U¥®êÊL߃`œ¶S.ŠyH‘‡²)„íº!±'ã×#EL1@¼¨å÷_ ºÝB Åû‡BnÝ·®õ9±(Q›$X3,uÑF¯ûR´›Ñ)ÀmàD$YY6‚½š±¾Q"{#Jã ûRxòRìÙl"….>²+¢Õ#2hÐZP^0.Eò‘Gk¥`ÞõÚ3ÈD Ï4çœ1ŸÂÅs1ÔùC]<çxO(iø,pöh}®œ=[0àEà• VštüԪظ7vX) ªIƺÒ4Gnà>ÅÝè.èT¥£¢0<Ä­!¸*A#tR ^Y‰" õšrTçY¹5ö m˜úy)´Úø¦¤z†›µ’ÐCËÛ¶#U |£5‰|d_ °FHa6°¥·Á-ñË7ØìE€h«uR»t)6MeÒ “‘îÐÔùìÚ¬î+(Xûá¶½i@ €ƒ:ŽéL H#[:,ì™ o°’02Œ5Öȉ… ìˆvk) ÛûTŒ¨Eš–x†Í3»[UÔQk¨ë¥ÀS6ÎÅǘ#êÔ¦©BKÈ“‹"†¬PÛv½})<‘¡ùAw„j3Œ]ô‡·ioOøŸˆŸüí[î®? ,înÂmùôû²GFâl­9bë…ϯ꫇ÿ©Ñ‹ñr|`/ù·ÿ»mÞïdO¿hNû'9€cÄÆÆ‹Õ®ã‡U´Ž`ªÚgb€ó„²ÃÎ…_\ß1ê¿')c¬.V_úÿ£ä…çe“ŒÐ/ä…‚0â¿»~×rïÃÍ clŽ~jÀ==z÷þóQ‘ÿõwÆBŸ ½‚KmþqnóÅÿÇþtñ5ÃxéiÓ€5Ì2º¸O |¹`“Ã)ŽœQR}˧wŠ·m—΃gH:Ђý⩵õG …?ºg\:'7o îŠEè¼Ëk^ø-ïßÞ”OïôòëºäÉYŸ«pôæî~Æ¢ÿÀG#’¬)9Ú(8n`t‡ƒU”ª•¹CÇi#’9×1"šM“FDåžÎ¬RFÙǶqQ ’Í8HS¶å¿KËûbEÑ®æ š°æk¥°{´Ò=SøõF¸œÝë§ý¦·‡ŒŸt¦ÌŸµz„˜žå>Aùåñoñ†2°‰Å;^ÆÕ’hµñµš¢[C²¾õÄ['7á1¦¬%ð}c¦ÎçÂUÛÈc¢aïÓû÷3Xlíã?"³ -"£€-‰ju Y³yd VÃÃ$2 ‹ŠdfôBdfô$2Ó²§O”Ñ·“X†’ô‘­™V%±@»ó"y¬’9º«¥ ³u•ÈüùH±u­ËÀdѾÚÀˆ¨6vú*f«RÂyj 8SEp „Îj,ÆgYO ÅÆºlR¢âÓà"éÅ\X1D¯ƒÊ¦ŸŠCc4H!ÍlÄÒ±“—£ö³¿ød c¾_ìÎsã|?~AÐn>ÑØVA¯_ÖŠ|43.žß$šY+qW.dã2"×ÞMð „hÇOÜóË„«Òê¢îÛ ¾¹¾zôô¿à£„5Å䄽»³¿Ì ‡…O} ±FHHRö"±UÃøA'›J+Äé|pÐ%º^>X«¥©5ÊéÞ£¢ó å€öì¼I™\ØËîÂv,‰Ý¡F«ð[Sbv@Þ‘0%£RÁÙR¥ŒÅ çUT–Ä©Í ƒµp·oø–V\¹ m“ÆZ(hW‡à ÑÝwÔ)ìýçoìÇ„EË€7²\¯^_ý±Æ-¬þåÞÃF4 _u¶E´² AžO[­N†R"J,3á&›¾Ú 7ÍÒpÓL‡›jõ^0?&«á¹$¡b”¹X«»\Ž­Œa|Ô@ª¢¢/Ùlgöwj¤A÷H ¿{ ÿ3½üôòÏ,³çTÂƬŠÏ×çqñmÐíz)Ñ t1ºa c¨ør!xîæ½‘ÊêC©axñŒ£N‡£†}.íí⨧˜@äëpuûöþá:ÝŒØÏ•ôZÜ¿ýõ»J¶°É$'~Ï¿ýagàÓâç>õnÕP £žÖ¬U¨¶ò6Yt Ä B¨3¨É×Ò»m±~!j²~5…®9ÚU•Dv6`?°´—CKÛ‰ÅÝŽ¤'í¾Å•Ú¡5±Œ Fì¯ÆK~tœ=.—Þ¦ß~±¨=”?0±$øcmÈÄÈù»^óO´.CM.û†^ÿ|…36dñsmˆ&ø¨Û`g”D‚¤@2:)k‹9mC,(Ó+V¸´ÿ§ë}äj ’šµ!ý¥½œ^ÚÃ6DË#t‡šŸÐ@Ó7!ƒry‚yÛØ Î/‡£öŽ‘¤ÃBt»»È8£hµ€ -h½\¬Ùãmä°\úr­×솵‰X¬ËÕm”=^¯Ä®dÞè^G–ÌOuðiæÁËÄÇu½.ù“nî…õof žñè¶Fx˜§ÝVÈC†NÂÐ.MÚI·jµfœ^×§ôòmû»øümëjÍÕBÎÂæ+¢£Ú’fŸö¨-ÂuL H- §ÖŒÈdXYk‰3Š®µP/PqK7¨PO[Z¡t0¢­G'ŒPì¿X-áUÆç)(Ý£Û%Ä0Þ£…oQƒßµfn§ú¨A2h¹5©Q¥6†^ §U@šZßKµ©HER~±V EîȾ—˹Úªj§<’¶‹ÓÛ$8N4žv¨Pßg j4€tvD†O8¯lIA—m¬zšý´dr¦ò™w/VKa¶ŸÔUN÷å­!yyغ1o`苵R(‹ªÉÃÝõ¼ý²‚ûÛÏùè[" À,â«×ôûH½øŠÇ?ºþ¢*£I•„Œ©‘-)H¥8 !Z;ãúCR¾ÔôÒd=Ý€¬pý®ô[³+TIÑ{‘(æÖº% 0¨ºUW"(’âä­ˆ³i±vÆxÍwè<œ°'†‘,ȈÖ.%¢ûŠ˜ôeçóÆûBÙú£€+gÿd¢[úøGSÁÎͲ9@vuŒ"‚b « LsÄB.ä^*riˆš.‘yµ>½«?üÖè^ŽãÌÓ;°qÑ™]ÓöÛ™ÇÅ#½ÑvÝË„TJ9£¯NÜyÿZŸf¿öŸÿäÛJd¬¢(Ê9&yá¥6‚ /±!P0³÷ozv©oSÓ¾MvÓCF‚ùJ4Ö¼:QÍèŠ}¼›¶« ]!qW.m, Éu´ßøôV/?¿å\#Uk…åë\Noà OQ`4K,;à) >F(¬¦VFàa(&e}—©fiBo:Ÿ'»‰Æ´¨ìQuÎÑΰŠ÷­U·¼¼­ÝÖ›vtÆX>÷ÝF!O˜ëãm Ù §Ò_uÕ§¼B‡|7Úfì&Ô7*ÞF̃«5ÚÌ¢ œç}I‹Âí—/_Æ?À1o¶ÕÿxsõþõH~Ò_øT6$ƒÓ†£¨Ò `Ë*|òŽq¶‚J¾„Bs–V:©{MÚK)ŸÍ4å³^¯s ùìéG'uÙuW‡#í€O‹q‡N#û5í¦N#›ZÀ7cÚÕŽ·¥£âÙøŠn),äp~™ß¾¿½yò½pà"€k°ïw÷þ·ßÇ8›'ðxê³ RÇHS*ìöO=TyrEªPÂl"NÚÞ©_ °Ì4ÂÒ¦«ç$Žè·qamý«›?ÓûvM@W.“šüv,á1áÛ.v~Ý›.RÎH<„‚öÎØ Ã=,ðÑ£#X­}‡?¿ ïþ÷å—w÷?¿{7cžðd¬Í))¨1A‚ of©…2à2G„2Ý|•$ÙîXšwƒÒ–¸Z+^Ÿ(qÒ}ºÀ=QãØx˜èý²ïNº©D²ÆéƒáHîKÅãøí23'UâzInö|ëñëçþ7¼¯Ó½È¡¼y{+,¼w~.-:ÿ„G‹áªe7œÈ5!c‰Úr5º ²CÊl7bžK|ßËÔ/fyšÎÕt‘(#‡ìn®ÀÄù:þü 0}é&.yÃÀî8¨„wWåAÔW¯¾YÝ!¹ÿ°ÇSo(FÝBRmhRëŒÆ’0¥‚w ,ƒ—éxFs„Ú«6^šÞœh]Mh9¢)s‚KÙ“µÀÃX4"Á^±ÑIÓóB~uÔ(=ŒÔ80R8AGêþVîœyØ6‘ê0ÊBtª[—;* *ù=ºP¿8¸Gnª#„ÙK*ßÖÓ}ùVï/ã—&–?ÎôBÈs®m^ápZ¹÷ÏŸA@ ¢P¨ªàI"HoDÊ,šÁ¨åŒ©M>÷®’`)[i1 *„ƒ€š±p²’fr¶Éa—èai.DÍTØ­×#ª³e»2ŽçÄÒt„ÚHy?¦m{µC©]ãg;8wÇé½ZBíÏ‚Z}UÐ…à›Mh¶Í³÷$ZýÞv¾°à ¸|9 ž;ˆ—QÆÌ"rü'ØA§¸œ>FÖ wm“>FºÕ2(ÿ§µj±ìGFs”ˆ%h'0–ÖHµðբȞOZ#ìŒ2®cjýú•NLÕªÉEÐ.~à¤bÈ)! ãmk¥w–P7Uë€ÏT­›/Öêƒmø¨ðS­adÚ f½båüJÕúœCø‹pµþÀQ€¸ žªÛZ–øèÀ¾Ö,«ü¡Ù¿V‚«·×o³&n8ñ‚l‡ liœx VKŠz áäl–§KÉèÚX})Ž 88Å®unSJÆù ×vûs£ŠÓ5Ò®½±{—"›P“0rŠh*G.sü€"D2"¡&¥Ûøv©_¬ÕŒ×K12àp½Æm+ÅÈÔŽkw”³§¨n<ª&j»#ïí6.é8ùÖªÄÿ i-IÔb²‹2d#œNN°#Ö"¤v:Ò¥T²áûð¿JÃwÐöë7äxtYD¾½ÉÄÓIÉ>Ä_f.Ãþ›Ç[.‰ºæ6ÂÐiãA¹"ȳ ¯ÁCªRµò¿™^&¬}º±¥L£zšiTÅã–ù[=í-רA8³øvLmZ*¿†hyïë=ÚŠ1[à]1>ÜAº<ô¼íÄÙ'Éj2€Ù[á©‘)† KRÕÏ”ÖûTsû—VÙât™-ÈÕ9¦±WoïîÂÃ+q÷îío×m´c»«¿¿¿qwóëÕõ­øåÝͽ;¨££žð¨•ê + AÔ³‰ªR‹ˆ(O%ù™&o¬¶×ðÐ>_V¬!§Í_Y­•ãï'**ÞÞ¼ÙÛþ)WÞû„$®®^YZ^¨1ùØGý  žÕ_V=÷µt‡ž™ÆVëïxàp€™tê›Â¦X[*ñ ÞüìFy«>þ#™07S‰mçÄ1Y EÍôfÜ3ÛÝ3¶×ðcí œ';E]ºÕ3b‹R·{mÿH¦)ŠüªXU,‹,âÔjg('Q‚§™>n)qÒi}žÊŒ(*ã‚d0y²¬ˆU¦eL Âm†S«hþY/%Âüº”˜œe8+²£4Î,LÊ,g2ÖјZDŒ <£8ýR—_ƒQŸÆRâKñIŸwy ìñû.W ÌÝ<ÿàºçŽpê:ê Osåu§ÿt,Øý/h„Þ­°÷Ù&Køç¹u|'ô[чÂȆëí¾€CJ=׬X%}gìÙØíwvxû¨£Ç’c{—‡óÖÿùÂ;vú„ÒsŒõÿý{}4Y­Ï sc$ÝÓÊÑT§.FÒz$EH´–_Òîþ\*C¥H5¥qá>RÎø©Îxl…`¦àðß,›Ð7V ßÝ]ÉXg¾d¬n‹¥æ`¯˜f]1I1Çâõñ&KŽŽO ¢pBÛ$OyâfQGCWú˜#^óÃ|a*J™@¼K¤˜`DëãEŠ!2éé®Ãiq“ÉÃècŸì:Hé)‡é3Û! ›>³Õhè°6ñ1¶€Ë@fÇQÁµ_frVŸ‰û•ƒS:Uð¼ uªXçRÄÚæ8M Òi!>eúsµaa þOµaú/ÛÈÂ8j-†‚6²83Lc°‡°Ty¬3(ƒó+Ɏ̉»éÄ )”s*Š•¥Æ}ÂSÅÊYö˜°@³#ÇÂÛÁßëj.-ç­ÿÞyÅ4îÈ)ÖŒÏ)þ”ŠR4„0ªN÷í®á”Þ® 'ŒMû¯á´z)’HèŠøÄØ…œþôEþ)ÁTÚOÅþ Á7جl~™î6ÍÖñÛ›e¡\YÓÝ@eÝBò{{¬›*¼Ÿc/ƒ‡/ƒbôØ1!:uÓÍCïWG¨,ïø|†ÖlN”<`ÅÆ0‰j„œfð«áX‡PéiÏ yd}°Í &à#ê¦É‡œ‚V¶µàh•Üý c¡|—ÿÙQóLƒÏ$$“±ÉÿÅîzlÕØfëov{îÊSÌ}mkÁpÏuvøèñ“ý ¥\< á¬7KÁeGv¸lsMygTSŠCÈ:ÅG]ÆÒ=ÜN„“5o6–R=Œ Át=ZO=ýôïAz§©"TH:øvŽÒx eùíU÷ÍWž1gÓ’sžÇw™è»ÁótÕŸüˆÄõxzŸû}S|øû¦Øw•7U!2ƪßNz8y6œ×þïÐk.yg^+V„HÿYY0ˆBaÔO¨B'ð¥Nljá¶TQÁx¡dšÛ§OG»—B&• ¡°µm5‰u ÙjòZW¥…’$€ îI´fΫzr»4.¶-6evR) /ìúúvƒ´'žvéÌä7@E‚u¹ûŽˆGŒ$ÌÈ 0¬žÜ‡wWÚ,@S¿µæêŠð²‹ËF³gÿåå«GOŸ¾xüôÑËÇÑÊÞ\½‹¾y»È¿CoT¼I xë0D·ËÍwŒÂÜeï6¶.€µšç4áêúMSF¹øc9;–øCØ=àe Áßœ„ôÅ:ø…š¾Üe‘g·ëÕY¶XžÁãønm6WQoªèöw˜ÃEÛ·‹õÆ%¥m@@aèÝ(0æ›8[ÁÓW0ëÄ›k÷cá´´pÓc7v’ÈÁëÃõ𡱖í±&ÞaÐÝžþÞ0,úb»¾²Õ"¿°ÕA"×)G@ãÓjó(Z¬ApÓüÝѶ¾dÐlL­/SX¥¾‚+Jb™tìÐÝÆÙÝÁÏ`HD?UÙÞ$à¼L²$H ‘N  ;Ap]pÎëSE¨×^,/¯sûjqóê\ip^]-–·oËÙVòóè—?77N—×Ëøvmݶ xâQÅRN·î¼OaâÜrpÌÏ\ØÄ‡Ð'ª!8½§G¸½s–Î?ŠÿTëøõ ò€eZQ†úÙÉ4Q§+ô–%ZX)•Æåhœ-Åöâ$ œ¬gðÔmå¿NÁùoÀ™K—`ÃÒ$šÌaU8ÇDÌ1žWgê„(ÐA“ÞUó£ñÊžCÆÁͯîê,Ÿy´—€á„ „/nºÑRérÆzk®:`/J´[{ñÇlŸ̼d@ÿDú HÓÿA2 / žœ‘óè·ÅÕTÙDuFL#÷D(ŠÁŸw®FzU:‘©³ìl~4ÖÖQs¿v*^K}‡´»<,n¹~FXòþCT¨ûº]°¥þ:²ë½lŸªì¼¥TéX%”xtŠÂJƤwÂ…ù¡eÜú° bú‹ÝÜï¬$läÔd~½º8s¦)6‹|µN~Ÿíqdöíl!ÍoÝú}Ÿ{e57 ì>3fçQÃkï±>ùž¼ŸÕ«ùüéζ̒ÙÕµI¯ ±Ê³9”•6ʶÞB«ØùmðèujâZàI‘‚%Ô&¹H˜MLî^¾‰·œ3àÂC´í:Ðȃ ßÎ`ù½‰/¯oî5€Ý»Í³½7±{ ‹ËТ]ºi*lV·žÜ]¥ËŠ4ûðaYåKuÂgì‚yJ³‚æ±²Ä}²Ì·ýã¼P¹(—X¤¨'X…9²€É‹›;ÖEÚ‡sqïDÀÏnQeJ ÿíU¦ÚÕ·`n>hD×M>?¥ëÞÚU V.S« ^TÑäžöyjXG ~€²µ²-;{UG­l/v¹aB½liâ¶®÷?‹enß(ý°?åã3Xþ²2÷GOß$Q•{õä*½p§²Îëô´ÈÝLó¦W¯“}ðQ¦°Æ8„2ÍÆ[‰Rf^oÇ!^ßfK»Ù‰ÍŽN®M©Sw©[.t’œaL‚‰K„%â½~‡‰¢_8Õ+ÏrF(㱿¹5žgé9ŠÐ\|T0¤wí®7•¤7KêqqÈ—ÁakŸÝékŸ¦»ƒ®‹ qÕïA[×™4°¸Ù8%*;f¼–ï/ÓÅz‹ÿϯÿý8þñåó™ Œn*e›-ÓÍlë«T£ñm´n¼{SÊÑ5´°º].]Éï尮ݸ6Eñ&rY¡ñ³¨Ý ð›t±9O\³›ÈeØÞ®#œDÍ{I…î^ïÇ@H®Áî]B½VŽõyüò¯ãK+úÕæFª5-×ôצÁTlúù×—Ï}éÀÝ\Fz=ÉÛ_nÞà¾øµ²«é ^Ù͉‡Bhd¯©ƒz­+{‡Â‰PÈ8(IYlꉖƒ4dN¬Œ€Ân¿¬?î¥0WzÔ<ñø ÿñÙS Þæ²ÅJô®» Gb×É GÏ,¬fú°D–õ0÷Œõ{eËÑXÝ‚îî¸P,®À-ÿ¶™'?¿øíÑ‹ªÅhºr+·Y¼ÞE¤Ñ!QüèÑ÷ß?~‚]®7׿ú ÖsQ7 ”P˜箪GÑ44äÇÒÀ“š÷ÓõZǦæh Ãi’HBY/ Pá‰Ça, æi„úhИµöqO=»È` 0&ü"{(põ0™lŽ¡€RTæ§õQ@Á!ïh(€z¼\W'ÕÁå}f— ±K—Ó˜öbN>LÂ%÷rêu¸¶GbbL #—èÒ‹ °ˆ‡îörQÝzDø.ÉÁåVÁ;wÔ±J­t³ÈbéöÊd´S] Âns¨lAUU¶{ÍP£ù»î¢ØVP­ ª«‚nUÐ÷+ìmOC¥ÖïîznG®]OìÕÛÛMÞï~¶+% ´ÿ©‹üÿ>D…UjªÔ$<-Ф°V%B™¼¯=hÝû×´AXš& •m0’€æ³ám¦SáÈ‹„œ nƒ²'—mð<ɸ2#ÚÈÓ$§Ui"4Þ„"21ªlBAcƒÛp_1M°©Ø¦ùpLe8±¤"E'™ÀÃYÊ ¼•M¤"É™Í7ÁÁ;IŒ(Ûlï¨Ö\á ™ÀÞõ¡÷¡fn•‚º?ŸìtÖojbX+¹j*[Œ£“id¨× ÉÄ\s|À0ÂÚüÞÄÇUÊ3OùÑ„´.Ý ›4ÅÛ žÃš=<=¥Š~"4± ©B(NC°è©5Õ'×µÂ9'ò Â†’§òdvsÿ¢å,v76…olv DMÉÈÙ4šOÔ[ïúÐÃ*œ(€^áSΪÅ܃~¢8™^gƒ±ÐÓÍ®CE»VZ¡æˆJN‹œXi«ëRZîBw¹—w÷{!œ£³÷ÐôIt(xE$WpI'PÙ‰hÒŠѤèôŒE\cåºY§ò9U‡fÙ`Rôt³ìî#­¿[¦èðZU¸ì§@ 4NSýü¢½}çhÔXŸTE§!†*¥Bˆù³©p¡ ÀBO©›a"ܸ¾RÊ`& âÍêhø(­ôâñIñ=‚¼°Ý ô!°;ŽÚWË©¨‘naª¨i}‰p*½ Çr:½ –âV‰ro%a‚Dve;(wRÅÜš”«EaÍ;se·ëm“nÒ«ë‹à‰3œ=FE}p|2í§ÌKã …@Ñ •õS¢KL¯¶¡XØé6cFÈxãîâwW ©!gªõiz»¹„7¦¼äìÚýŽ·ÆÍG…î¡ðîäîa÷b_„`§tmÿ$Ÿ^C±¨Ó…‡ é.‚‹Ø!eSqEBOµåÒõᯞY1´Æl}vö¥’
²Bq·s üxú€N8–Ó9§d³Q°ÿgïLšÙ‘;þUtòÁ¤°/¶ož£í˜9ø0ñÂA‘ÔÒMRIIýäðw7ŠKUqð¯PìÃÄL¼÷Xd7 &€D"5æÁ;M›•2G_–îg­çO/÷WÞŸ¾LÇîexžBø4D­Úþ ¨"çyð&´£i M3w¸RpªͲM³ †Ï}t¬ƒw‘(¸e¤ÔD[=UE Üj±šTbœ¼LÇV½&Ú „WM1TM¹üÐí®&Ú-$hÕí)6ÑŒv'€E‹|-¤¡ƒ…ñàDCÁ /<Ñ@{Q"ˆFˆ-)`/žÓz)Ýš ø5ar8c1;@îV^¿5Rœä¼`›–_ÇpÏG|©ƒÇåKŽ ¬^\8ã ¡elDØ…ƒ›Ep®C{a¥B°B X §” ¶üCY¤(…èõ_Éqpºéªm®È­ÒÈÃéã$ÙÒÇ“3¬·ÌX„ZÛÒiã 3À Îb‡K?Mg|lD`N Jˆ.u¹-ß6{™<­^7nÙÜ×ÙB»k©ÍëÇÚŒC%„dÏfŠ1×÷h#ç—Ã'2¹ùù³ŠÒ;Ü[+ÿDÌÁ4ˆH´—©†Ê–ü£ª¾n@žœºÖÔ¨ÿº9˜tŠ HW¾VN;DjSä'8œÌÍXc}„©z¶B)›ubêzß72É„Zõ­_µ8ÕÚ JJBeiæÂT}`©R¸Ø”íݧÐ,ÂYRoÙ.Uì8)È8”&#ÒR'îcr8_[IÎ Úë®ìðí¾Õ<'¬àÀEžä9OÔ1‹& Ì"ó•5=Õ»&ЇŽÌUuJZ2ÂjãŒÚÙÇ¢C„®JEX]Bû`Ç‘K7EÉÀVd`åçÌÂYÞ+¿žæ›fcEe%œÖRÃ[ %†±:ÞM>¶¯›éÒT€~ÕÒD NëþD¦(›ó¤u[Y4-w…³˜"ªÐÊVM B©áÂ8€è˜›³„@ŽHB$„ÕTk É “?”c`dñ~š!°ýÙœAA%­Át,˜öÏ6Ý[*y¶¹såGãM=lßÇ}/7s‡Ú8·“/ß½s˜ô0ä†û]¿85åÅó’ü;ZL“9r#û ‰dDKMðab6âÄn8‰ˆ%)¿PÁ,²@ÎQ§ÁÕ˜)Z–PPE˹NÃSƒÇßÀºÀñ =ÖÄ:Ò ˜‰ ²S¢5PçÔÉXÜÊîÀ¢rº“®éãq£7cŠÛ„‘Û pÙ'Ôy—¸Ïƒ¦LXá÷õ΂ý): ;fdÆóúlo(‰-?õP.rN½¸æ6þ]²·©ÔZ ü‚ âß½¯ÿ+¸WTýý˜[ÔK©¹U¥Êy‡88¼.?Á¬³ê-À¢K¹r•l’¨Í)Ù”½c„­E˜Øð2Öâ ­’Q-eü^Þ}Ž j.bðœÁ/䋳0šûúÑg0r=¶¡j½Ú\œ"ÌŲéú÷PÄá)3¿:vÔœVMã༞n(GySf±%MEß”|,UàÆçÅNl³\Š­m‰ëÿ6©—3·„YÏcƒ‚0§„A® s¾9uM [ C5 qÞÂÇ-÷±*¾ôs¾ÞTé4Ë—§}(*<½`9d±èš˜ ^v+­\ö\lS^¤ò6˜Åä>°¡JÝrúóÐŽ'(—ˆ[5pºX³­'à ’Üh´EÓ?lE f•D€Y©Þ¢™äT2ÈQ~.Â,ãàmbµyðÖ Ç/åHY¿~l¯64ö½‘,Šèåañáøô¸3¾r“ÁW9÷ÌßG*Y~ªi)às:_ÌNgín7£Í[X]¶r÷^µ{éþ¹š,FádÜôŸ¥—4‘šØ>X)’<¥Û}«3ÍÉáe(ßÞ gÉØÞ,¢§Gߌ“PVSts³$êídý4ß&KÐ/jüJ¬¡fKµ"RX Pg-lpkaÊ6ÀYì9õ‡¡®:t ˆ‹Pj<¿Ä÷g¯£^…ª2!""I™ç8^÷ÿéç“©)Ê‘[n âÆVl€9‰²ð|sUáV´K0ì —ANÿ½~­¯“™U¹##àfWý qÖßß/Ç“93¦£._rS+Ã$…Xtö$àfëÂY”' «ù î£qî8qߤý÷c |”šp¡‹e3ç _þ&PS«ÀâoÇžÚT¡QÉfRñà¤Â™ímK»Ï&ó¥ßHÅ "CY÷=KžâçZ ",pY•·müï!QyŠv‡z†°dô¨tÒèf¢ œ¨° ¶ðYn3Y¾95‚/êµ®L0€LT6¶$¸T¢’H2@ñ¸éërù±zÙþmØ\ö›$ýk¯j8ÒÅ䎷‘¡¼#„¹SCXŒ(R?îÚÈÖueý©;€Û²ÕãöÿMfË—Í.P!Úg·ýRªüD^­Ýý/çÌq6†‰¬Å›n&†,aƒ³(š»ˆ`h\ëB©"T(µ½¹Y™CCp1G\-ÚÇœ²äsãÁôYC ~¡Ìo3MyÆPƒ®ªÝœå™¦&-/‚0[mî«”êpjJ{m-‚@n¼N* .kÁPÌå N˜EdMãoë]c`œ0(ê0'ºùzûòø2ŸÅOt\ ÔwàýNt—$ §!n‰’lÞÝd`ËBYJ讌l=áH(öÆ¢ PnÄðbgºczsg,Çêc¬Ëk<Ê(¯È\ÙçØÄ«– È$¡ ÎÔW¬FªÃ2Bã'”ÀŽhyƒÕüæLí†ÏØ?Òñߎ@»Á|ž¬fÕ!Lpß×KÅ!9Ì+÷šÿsX5+Îÿ¸ûûjþõ×꿸Õq^©Ô¼Îæÿý²}þ¯Iõd;_oþ¸{M¶Óç¿ì…ø×ú_òkJ‘ÜÐr·ÁÔôAµ_ÌZ/¦¢ý¢ýgfŒÜýËêeño­G¶~Ÿ K[ž“ö }ú'­œW5Ìw/×óÉãñý³¿>'wËÉÛßÉ/6çsÍÜ@Vo1ÉÈãî ªÇäðX<ÉÙáñ£š=›OÌñ±5íÇóããù´ýw«ýc7­TûñôøX´¿’Õæms|<7íLJ¯TnìZ$º~<´ÛÃß­æºæž%'ûÇšãWRëþ r|lX[ÊØhª&­O;5üã0Ô”Q)Áÿù4YU—Ùñ$P£’¢ùjööꬃ¿-cHë3Ù7ûúÚ$ ¯óE)•µáuc³¸$}Š–'´—¤cß÷B?HŸ£B•%q±TY­«sM9:‡×n¨êâ™o‹§—Õh©Vf™.ŽIз\,9œ¦e4‘H—N'+Ϭi„ƒŽQ9ÖÁ0\ [TǺÊéÏ_³e²Lñži¡…­R¤zšÑŒSD“†iY´­ÏÏÑR¹à²†K¥Šª\u)ÏÃã¯/¿zq+$Ÿ¶o¶Q¢=uâª$%¥À9[sY•bÃ|T1:¤5¨‚Õ4ñ{4cæ{š,$ :fK«`´V A©ÌÕí'8ê—rËRH¿Pq4+ª_gÞßÑ£Ù®„ÚZ¤·ŸƒN1©.pz8õaR›Ûpº: Í¢cEh¯2TQ#lST+êlœ‘6›U*®!2AŽ>‰U†3¤²“ÅfÖ„³Alì•°Ÿf²¨„#u§ÛéöÍ+‚0J "pš ׈¼å×N>ëåV‚M |¹­`°ý1¡èXÉÊŸ°®Ô=lÖ3“L¯R •3˜¤âÆ-€z½ÆXUâÔäâ£¾è± ¥Q¢ø–ð¾ÆPßÜÑLOŸü?'Š!ƒ–¦¬7>ª„«Q+ç83Ù=‰ñá>j³Ð† Á •®wb=/ž~ªdxA3œ®w(i 7-QFXÀ¯ieîM*8ÀÍrÃÕqz%J+K¬!ÌèÇZlD² šePœJ¾~@8½¥ù•øúÆy¬Y…²e=Ê‘ ¶£õ»ý~OÂ’xÕÐBëW¸'g¹ÚЀ7«’±éJäl 2È…ëyÏ€ÚÅd–Oê+] “á†õ’«ÖŽ.y¡+Õð·_¡û­r˜¡­é:àl4]}ýxL–@Ét[úŒ©¿%í,{Ã5° )oIŸ‹U+å“w¡¬ç'ž 32oKûž,‡M2bx½ò `xJr›×è°77 œgÐ&JÙ á±râ#ókey¥ Ü­¤Næ¸Â•^W— æÀ~‚f¹!ç¬áý:ùØ>ŸºÁFOÓw;õ‚+¡À…Ló1î1¼¹_§ÜÉ´2·‹È;°Ím3ÁýÌHÍôÂgýF„½²×?ÔèçvýôžŒ¯“.ͯ!E0|ÌŒ(ÎÀlD©¸‹ëƒÜ茄]àü¶ìA¬R}÷ç_¦nZWüµ‡bóýöôœ.DŽ€°ŠWa®Ëá%ߕˉ“·;ËgRdÀ[JÚ™p)diãi‘ùÑ»ýñ>KÆf)®è3˜~ý,“n[BXy'ôµAmŒbdÙ²•U°\P¥ Ï­{½˜&o”†\Û¸de=’{lŽÞVß¿¼¾n. ±ˆXI‡¸0[¦ö«–W!ÞE嶺zþ(Íf+Cš‹¥ hÞ1ƒµ©”ëå´î9©SìŽSµ —û°‚H Áeº¨½¶ÆèV¡0¤f«û˜¦$Äzñä…vÆ´n;DÒÅë JO‹[ðªS@ÊT!‹ûTŠúv^ím»ppÐÕB¿£ïÙfíµpD•TLt‘#Âc‡’RË٠ͬEx•(éìÜ£ž°± uîµ’+‰8Û„¦åÃV‘½üz”ÉÒ’#%°B ±öˆ’ÆaN,Ù±aNîŸàXñÞüIÈ)Ë£Ô`!ÇL¢ÉÇöu3,ZBŽ~¬g?>“…‰æò¹¼x ªª‚8-¤1>âÖwa*tÈ‚¥)|ʳ|ÍžD£ïo^i¨vǦ¸4@¿ã®qD'|)êÅ«@F‡Ž':±±–¡£.Žù¼–­Åû}»ž1¯X ¸¶_çó\žÃeò^*wŽÉ­Rô6ÞË ¹›æœA&,˜·ò`^ëJ4R+»Y$ eR–8?W>Ï¥¦Ôm’€$Ö ç¹ Þ,xC…ò7±-z¸÷™¨b½~P^¡ª@ˆPIYJA´Ž‡¯$ÂÍD¦†9„Vüep¥ƒÅâlÀàƒû}Nó6{Z?¬ÒeH¹ ¼Š„Å xÁ•¡pêìEµán<Þ4èÅ¥$åòÌ,?í·÷REÁ’Yèä ¸-v(‘´“¨Ù#gûq³æ„zXCˆT¾ºÉÁ±YR7#ñöö©’I‹ù¡% U¡7Dt\¼”î P|ÌC–, %·²Ö¯Öé1ûóù§WªêüK©R¦X>{Ý÷?D”!# B’× ™ ^ôàréÒ®xŠ­y_$“‹Öv(ŠFÑÏ(©wo²çû‡¶V’€‚Xwø±ÀˆQ|Óhêt¿*µròrôýëÇš%‹~(f&í?ëåæœ ×–¨BFR`°ëÊìcj8 Aù€…m.'ÄçR>' Ár\ îP°ê6ÑeÇJB-BÎeÑ27žoñCö6,…àšy3}žÏ>×d2OÊðk•BgP¢J¸àì…^r£-P¢ßIXD‰bÞT P~%r˜p‰ÜYÆ»J޶j}ÏÌv‘,“M „‚({åœ Š˜xÿP÷9¦K^,£‚6?>ð˜R 6X]+½lFl¶y|JB¦8À½X½« 8"]Eäªd.¿du2B ¹Ž*¤›À€– ¦Dí¢c7F³…]¤JCiZ”BŒ/A­¨bLQ@Á ª bÓå[‡V)XÉ.îuånhùk»ökWÕ¯E±ôýð‚ ¯ôåãwE¼¾ôNÎâ¾¼ƒßÛ½ËR:)ø$~3{þVéÈ&±ÆÀ£g¾¸ã¬8NF‹Õéo³QZe`fƇH4¸Rñûs*¼ôÒñp„Þæ(8Y¡D2"§¶È¢Pêgˆ›ãQA…Ñ”ÇË”Wryè¿oN£ús³´éô)'³3ì¼ïC®LH† k^ô˜>Ä­$ñà ƒó›Û†ócJúññ3Y*#s‡õ^åL8äsƹ”€(ÖÞ{]ÔúòÖÚÞbq¢†;å¿.—«—ퟭCç{׿’… I¹ >¬„ã—†sQò”ï•ì`ÑqpíB…(ìáŽß6ýúHpÜÛÍÌ ÐA¼¤NFa®Çj¥€w‰k2xH@xjƒ„üN¸|eCŽq ÓÉ¥Aù“-¶>R#¤íœmŸv]‘ÁK]-Ä&N-Hn+,:ÐÃ’öC*ˆ¡:n±P&ÒPúßÛàçm¥E2„¶ÿh+ý¶Ò¿m[i\“ùå½q¥¿Âê?îþ:wgòéË¢ZïÝ}9õ½ûx›M¼E6Üw['f|ߦ¢]ÆÒ-(‹×¯ÿ9l>ûõÓMéãŠóŸîq¸Ûü¹šÞMn©šý鶇÷ùf;÷ZSL3¡â³S—‰ý»e‘5+øa¥ƒúOºEïn·(Ûo’ù,ïÎwLym/oë×Ê wþ ;®Sèî¸h28NkªÈ8€–æ£õ¥§ä‡¾tÆ¡»ióSój°z~hÑ »vËO+{h3Ü€'UO+%ƒj«Ú}4i©Ìo•à”’HƒP2SÌ iL¿üØÜ¤Ù!ç=kpBEX<Å}®—¥ôºzq4îͺHOó¨‡(ëÞXûØIW%à`Ú‹€)“jX$Lt£ E )püèzì°ÖPÉZKúÛÄþV*›×õÞˆªØRB Z›Õ$û÷q|jm¼=Iõ9x°¯– iªæv a=¬³:˜8×Ö"PœôÜËýy 8¤&"ÿï>'÷ò@ɵü°ŠgÙÆÊNGuŸÝ· ;.Œ0-sæ˜@h­êSx˜a÷g¯»›mŒr8WT²¿u^÷…±x5¥¬>n¬S_Û}ý_8âV€Ž—s_íåG–º¿Vn&Ë·Å:×Z Ð)¼_fë„î†!•ÖÜ!œjJFŠx„4çÍËU‹Éú•áûÉåjùÚØ|ÖÎ:ÐpŸ—½þ‘ým ¢æ°–®r•WÝš^/_?N©ÖÑ÷ÃÅ ïׇÝôíëßÞŒ?ÏÃŒ=_Ìáø‚s¡ys7›4σÕ¼ûrAcï7Œ1Dð_êXŒ’ü¼Õ:]\M/dzٟmN÷›Í”,Ïšöçæ†b>™5°|n§à›\­ç4@?žÎ` ·³»x߬þ¼96£1,SF#® #"ÕþðÉyóã"€Ú®ó#0šûÅ7ñÝM[‡aÄV޳æ7„ýƒÊ˜EçHõÊÁgBEû–øQÊ2ÙÞÙ GDJ¹ÍIÄǧӞûl­±—VT+‹Íþ*Ð~ð·ýþ¥–ØFGÊYÃùù»Ç<=½íœ¸Ö–‘yŽK’Ìv_ÇÐ9IýÈ Þ‰85rúyË3•I'ýƒØè(-ÞyçAdubRûF(s½‹{Ÿöcedä÷ÈL,IŒÅÏ{8@ûÅó„xïG~ 6¿;ý=éΞkoQ‰.·ü®î*zOÛ*Ù  ƒAЀbs\¨×Ú3(Ü0h0—«ÀýÜL×–ß®EQ ¾Sʶ9U.‚.|±ÙkcßZx­ º ö\³žë`ßÝ,nC öìe8TÚRÞ† õL)m„º.Ǫc†±÷çËjmj¬ðìØ@cµ•(h%5&8äyÄT”FÞ;æ“Ïä_f™dàpÜî¹F¨z¹œ€Á[މ xE²(°†P¾bÜ ›†¯X¬Æ¾ ~̺ŽÂ ±U ÜØYW´š‹Ûh†ðr€”2g]4€#+û€(å¨5Á˜÷]!œk«­Mò^:ÍΛ_BYx@¾­Î;,ëëÉUs ºÛ²½1k)x<É‘w'EKL7gx®Û¬ö`m¼ŠÕƧjâA‡£L#0DW©“ÛA[Þ]‚ʲ|7ÛehÉ¿éšñ{Њ¶õ˜ŽW«ÉõÍê›å·Qyœ™0òt8g>-Ûa4°n_|™\~£Ä·gÍéÝòöô÷éŒ-P¶ß_®fÍèçûáå²~¿ oäµíæeÙü~;š|iFÅcâÊViÚMZ Ü( à÷¾¤>Áµ´ÏQhláùŠ,ÑÝ&$gˆ²h%˜§Qé{ù£R(`wÒj¿Ž†1Dv%k© V”° S¢¬„Ö9ŠZÌE\£‘7€rži xC¡#çŸË¡’$Ýzp{Erƒ#o½oqi™@äD)P HdBÇÉè$Ó²â5@»s¤³\bdŠu÷)× Ð:7k·«=ÑqþqŽÁâ$‘Æ‚6¸ÉƘUœeì×rd=î@B”Z9DOdÉô lö>:Ð Wo›ígBDáimÁJ©z÷gt6°Ž#ÐiFqFíÅ®ËqºœÖIhÙÙk›Œ¢ IêO…ŠõA*:©Ðƒ§ëÓñœÁ{H<8ÞX¡žT$t:á% \€õ€0ý5£˜þÜŠiƒÁÍ}Ö­u$u(¦Yµ¿Å«Çø´¬ªV¸|ÑÈÓôÜ=oHëœÇ 6®æx×^6†…ÚI„ŽÄñË½Š‚Vcœ0†9è~2g)  @8¢É¶E§`á(c±ÇZ”97z r c€ª¬vH©‰ñ÷t€án1]ÿ=‘‰Æb8‰ÉƒÌºìméï=ÇÈëÃS(×áœçÞbX°ˆ0QwK“$ð(ŵ¥°/ÁòˆÆ-é7"‹¢¸jO´²4Žâuý÷”c Q( š,¯“YžðH98Oäd8T,Eë½f´±ö,…šWÿb—bIhнK~Šåˆ5<-ÓÒêMèš*F„"²Zm>ðbdù¢5]•:Láp,Ë›-i‹Ðwü½Vˆ¦_`ÛxA~Åc±tyy«†Òåj½/Sï ÊööŠ×ôÚݱhÌFUÄL¾U5cCîj¸¤LˆN–q–â÷Ž4%WZ,„§Ì“Οa‚ Ä„ M…S€–Á${(ONdÅw%ãaó¬Ô'DesnW]ÀäµDPPiV5'…j&8×p_!¤ñŒ¬4ã@~)\N²gÉÐæåÞ(N¶4B‘ª¦*0تÁI'}¬D¤fÕ&zí(¸ú1‰éÞÖÌ­ öíhe˜v¥S 5\® ~£`áƒY@qe]eè!×Yéq¹¹bc¼ƒ=ŠÕ¡¡L5¢Jú§ò^c"mÜg,·ŒJG²µ aÁs5Ç´ê²Ðmg>Œ‚ìG°«”ÂçÞSSaà +E­Y%ûc®\ÍÊvR‡všÌ„hO M¦&¤uˆ¢Ó$‘÷Ž£‡˜wBÊ­ ´w2vkø´ùiýéY3néSÛG›¿|?_M¾œ5\6]‚(œ=Ü>SÏØ©ÍÛÛKøÈøgBÃ'ÏDóãçîϪùÛlü!ðnž7ïÆ¿Ï&g |ÇÃxv}Ö°ÿÄd3\3‹0iµ³Ô®z —b±^{»‡ÌÝ’šÙ£d•ù'‘•’!zO] ™÷´¡ñci Ëág­˜C¢Ù¤,h°–‘ú µå"k¨c„•±r†ÝçKQ .ë &­PëG¸3@ ]½N°8U“gŸŸ·o*¸¡IæKðr·‘äÁ¯ú ­ Ò¾÷A—ƒ•äiqòÜz'<ÃD¦mLS­—Ð@^ó'Q³Š)~¦šq„J9_Å‚vL>…MPiÈ]&ËÂ1Âëü1÷Lc¢íÚICXÖaÛ.­Hl€#Œñåˆ=]6Õ(kO™4cbï­e¡å1†NËÂvˆ ’$fR(ˆòrV>°' žj8c A‹g˜²O’+‚J;2Ü*‡ 3\Ë‚‘GÍLjÅøÐ¨vŠFpGfRîP¼ yDÞ( #%C»µL vE’ä•h©QŒÚPìÜ\Ð^+‹mMvuÏ®$‘e³¢¬§$`:ž<ŒGé§ØKÐ(µ!g/1Š+Ï0í9á Üwžñ¨i:Wì·£ìëNB‹p 5at¿S= 4Ώ6•QðgÛ ÀÛá9ßQ VÅ)­müžÅ–íPrÕï¶Ñì¿›ÑÂQÑ}ÌI\u˜=«ä@íS g ÐI‹Ù¥¬.ÅZ M ^KgÈû˜á8vÄM‰d³Öþ§‹ÓÜc$2œ’;š)ƒhK&ŠÊ….æÂƒuDaJ\¯íÞkF ­F._Ó=”Yž„F«™NÎ,v@ ³Óg„àJw†'[UR²#-öäã{Ÿ–m‹½Ñjz=YÜ­¾ç~˜¾M¾L—«es5™~¿]w׋úCµ2^ B—ºjQiÕŒi¼ R‘׈í÷ô.GíÞ¼è\I¼ JP3ôx+Q±b­ò8 ´u/XÛX°Út¿ª›÷†FYGH†Vȼ3ôVV-ƒ¬º|°2Tí2A’©Š–Eiù„-4¯Æ“ëh¡ ~>¢uÚô•[W†Kèo’8‚•U·þ-Œ†.™}0å ýà]XstQT„<ðr”CÓ‰-f×XjéHP()¸6Ç’D1`£'ô:Bƒ'7P:ǽ…øRl©àV92 Õñ©³’ž¦GþFÂŽ®RC7z*‡L¼„³0`Þù1ŠÙI+É0F›¹|N#ŸKAS`¹¥ÎÅ®mË„—¸$•dhóˆ´‡òc´Q”!{\c–À“z–¶ˆî˜Õ.Bô˜+ËPà…‚BÏbÔÒÕ*ð,pQ ÑÒÔHbú g¢…'`ô”D°U æÕS餖Œí;·"¨l«:‡@lH¨ÏºZdñ˜zFìã.Øúx!ŒÅÖ}“ÙÕÆ£–4Jgw–7¹ªŸDi†#†ì«À£…PYUt4=ó ŠÁeßâ¼§ÉNí=ä°Oç®(†á%úÝžÔIÿH°|£Èü#Ú*s Òrå,¢›åŒ€¢ž¨jÝ +Ç!„²ÃÚb˜ Abßñ÷¡?àW¯7ðÒk†òJúÊìÀù%¶V)%1^¥©óT Ð`|)§Ò–$œy•ê`ä§Xí½@ôŸ·ÚÖTÊ* ³ÎPIL^ù#3‰º­eœÙt65TüÐÙÀ•U.#šÑ\›=“S­“Îb¶£Øó×WÏ c¯Œ/m' ŽªåìSÄbwLp/…D\r‡¡ÎÐ\PçöUã·†Ua1-ËLÝ뀒 Ç Ú&Z!-±ö½w Žp¦º@„Ÿµåãò/e'A¯àéKÙI]5ç€F%¸A´æ \ÃÃí /\„¿ÐûÕ k9B{ðBËúЪJ"¥áˆîÅžq;`Öâà‘ÞJ3°—Ö–&þtÐä¾^9X4ée«ÜYóuäÐF c·ç\éåø§oZo8'-°Jáï]påaf$"·Ìs)Ió¶7™5\#ÒJ|—å±Ï©¿‹"7;Ê;0½b׺ª| uÐÛP|­Ò>Q ¼Ü']bøåÅ,†““軜L¦ßå»LTÛw©Gï‚_®ò®®Ãa0Ae&Ü,nW2¦f%B›à‚’fÒûð\瘙/®&Ó›‹ãùœÑ³éüîK€¬¬>oÞþ1½¹i·æb>º[NBwÊæåëÐý2Xàí²ù¸h;^ ûŒÁüÔ5³E ¡ãÌ¥Û9f½«pÝ“þ =9[Î0·]ß°XÎCÀ3yXÀsøl5°ÝBOA jé2b8‡kÞ§q æÓ8Û•ú ¾ ¸î¾%ó9 ?ŒÁ[®@¥Kîyo™fGö¼BìùïrÇö¼Jïy©à¾PiŽnxN3Grs_Í—§!_rËÍÜï‚`p“ieÓ"zÍã-îéäéi«r÷Y,»,N§÷ë)‰bVòCŠÙ¯»’Às¢S~5iCFA㙌g«­­„²æ¼ù¿ö£µý´³»eøs:_N.ïn[_ÞlÒSGÐË}@âÁ Î<(Š«ñmk°½hŸ ºØäÛ5èbÞÑgÏH:Ud÷Éì ñõzeÉÐ~òëxÚÂhóÖa[‚Õ¢]Z'÷ýÉŽ(Ë≓^ÇfÛó5ø¶4¹"…¤:åË•‡ë¢"HSyÜ*bãdëíâþ`žU‚êkcm„r¸È1âm¸únÆÓÛçÈá÷ëÐÆ% wà9mÙnŽûºGØW×Óyóz’ÿÙüe¼„ûòMÈ©Y_[©M —¢õiL¦›õõe­ó—°,…ÉpØà“˜à9Í÷2ÑÖ³#fŸ¥ß¬–É ½ð\Ç.ßnÓ››ÅÕI “Q\9•ÆÔm•Tü_ŽIÊò½^ WU‡ç¾ª,ŽÌsÉ5YÇ*¤4 +<3Hð€Ž-joµ2 ÅwÊë¦7«Pk´¼ØØq­ŠiÎag¿k?ßVúœ5‹Ïóæòãx yû+Í|¼únýIóó?_þþË_^Œ^üøæÅÛ·£—¯G?ýòw/G¯^>ÿ.hЫÅåbÿè'/­µ<q‡ç:™ÿ[ðkÃ) à›u1ÒIÛ—”âïO¶ÏŒ–ãOÐÛo?Ü]ƒÊ¼üþ·“Ñêä»âä<*p:©dAQhEßé¯}hDÛê¢ý!«-¤‡BËf´?ª¯~ûîÍÏ¿¼{ùêÇGƒø]û;gÍ¿ýÿÑÁ?ÿOD4X¶F'ËIÂsv°ñ¶ ^§’ôÊá¹Îî¦X—l: ÏuÉ |ùa6¹_o^$—ÁuKÖŒZG6˜ù°.žÿüêÕO?¼ù;|—õjq;i¿Œ- «9— ¹$…ec}ïµ²IîÍð\§ 1(g´a*}X¹Þ5@­×EÌ·Z°Ôyžë¸{Ÿx©rØvWkþØRÕm饕")¨‚9hUh¸*ŒUIò¡ð\‡ “¼„‰41˜Ò[;ÜR5̆\|›:là9æÜ@ƒ¶X³6Y'Ïq×÷°1ù¨àf<©…ÀsÆ=œ6³éü{lwgsPîþ_bK¸Ãû•w<=*¢Û& ã n}Ýݧ´¾Ö̓Í‘XãÙô_Ái=¿jZšŠg1¨ì]¹4T/v˜n¦ítŠòÅ9èUo›ç¯iÆïß6›§š«é2LæUD8tŒK.mx®C­óéöýÎ4 0ûÿùæoøYR¡u@Òžc:æé"ª D\/>µ¶Ûlñ¹5Û®n7Í?¼y…¡ ãr©øµ0]NgáÍ›ïšÏÓÕÇæ.ЕD|AFêÀü™ )…ç„!lÕxzŒÝÞî¹Ió(”±\œBv=Ømw,?›°ä6M Z€Š%Ó =3OÓ*.¼[3o0%’Vq} *IÝZy“)—ânA;kQãJÒ{ÝÙ7@Möª&ïԙ怈 aNC²HŽ$ E;PÉQˆ-Içù–;·Ó(¤–?e“ê€À‡6L¤Ž¤ÏGÝG¹$Ž˜Ô1{OÆ”Jú[Y)ˆ)ëM(g [¦)xùð;Õ0)…Åœ’ʹ] 3 º\¢$ø’²¤^ðÍöLîR+y’…5<§IøÂûSÿ,¶$³±_kùT),{ºþŽÉå,³E—¤ïZŽa†Å O³|œèýWgœ¡ö­#"N8:ô„òxù44ô«ñí‡É*ߨÎKšÖ7 ¤áŒdŒûÄD…àV'[k®…ý%õ àس!IHÝ+M”æ.YHÑÊJî¸éÝU«|²MBrÌV>¯9J>óÖÐö@³¯­i[@z*1z£þ÷ªûÈgÝ q³Ãw4Mäó)<+óžkæá£¤RνÑ5ö†ìžÏíÚ;0©->‰.U6ôxìú«ë¦ÔvÖ«_‘Ƭðk X0ýµõ2 ¨¾|ºád„ø‘FbDáÌãÄ–å#l)Ý[x¸ÎHþUÜšGÜJÑ AÑX(w…€QÎQ  aëÓúhÅÀ!®C•ÛQ)TH'ÂØšþ,yÜG! Z‹ÿ’wµ½qÜHú¯ üéî žÅ"‹Ü$÷ÁÀ^Ö@°÷%6Œ~••GZIv’;ä¿_±G#4ÝC²{8#o»G¢gž§HY¯dÙGˆuöÕÙ© ÆSî2ª¿±öÊsQKaÎr¨v‚UèÁ`>…’,òXЙÞ8nßjÔ?“³†¤Íí}èí:ŽÚB8›ÖO—Í”Ž7Ô‚n ­š…ökxÇóaä2‚ȳf[ÛŒ³°VD°PRæò‹Ù ÎõÊãæ˜°ÈÚˆT5¶lákz¿Ò3Ci"ÅxÎà+xÌ¡'d5F—ܜÙ#ô Ü:FSSf_yÂa¨ŒV1 •6_a«1œ@jãRÉÔÁ/ÊÕ9…—u6†PN'hðE²QüVGev©czaöŸÓ…ç€b–²Ñ<š,]>%RŒ©!Θ7WnÒáyð9£G0_ÕÃ{IÔÜÑVXüúGMcÐé, èQU>:Kon¯}×óŒâ'çd ~“cÿΆ¯ÁD…\´™j[v_qRšp0ŠÙa¸wTÏ-ÇUd#] d's*ôÐë>ãðmøu¢ ÍŒú|2zV."ÊÙîNQ|,b. ê´ª4…Šb<è2^ícý¿±hwÛüÍN ƾŒo˜á½axG¿Ä†Áùâpžlá#ÎtŽ}–^hkHùV–1‹Tg5Óæ;ñ"™øKKMýy]|1ξyú¯…ï‹1w rDÎgàFŒp´¹Ò³úcGñi°Qø0‡ùÏY/_8màåàQ¸Ö7„‰€kt®¸Y Z§\L-ez+¼«ÒœåGJ—ÁD0V Š™ +döuB„ÊæõŒìðHäÚi•9”݆1[ic¤­\žêÀ©ÂvB 1U¦çôÕoÞ@å ÑÅTC8竳PÐBŒ™†Œ)ei5ú,rc‹<…¡úŸ¹°]IODMB „(aÓ)…½ó.Ð(rÆÑ$Í÷s<¡¼c€óM%&7$“Ûlëå»+?Þø&™‘ a;Š0ßI Íè«Ì†ZcÖÃîä«. c5Ä,pÙÓSÛÛûËî²m‚9$È[Ió`²§Ńö¯€Æ´o’yZ\6”žTvÏd@ã-òùOâ€TÆDú Æê é[™·(Åd;IEç|0rr:W<ÁQ{Ó°Bµ>‡%BÀt" <ˆÀWVÄ™Îp«ô" Ш\L£ gO—нÀÁtQuÃùœzwõ‡¶ùtu|Ô<²5àI­@QT…¢T9»9ÇãÕJƤª€4'O¯ ÚÜ‘àIe ÕÏÄnŒˆÂž)³>}úM%’/(›) ½öQøâ¹˜y™±¥ýæEÙ|¼¼ë³…‚iÏ ÜaD?v—±ãã܈FÄÔ]©Üž¦ôÎ |Gt6ª~'Û£ôqݮҷl,3%25NL(x%-ø„©¼tž\¹âáÞ^ÒB|¤°.ÂTBçÎ’4—JÉÆDb•9KÌáq@à¾IƒÇÁ9¬•XädT•ƒÌ¯²ŠÓ5U4!¥ÎG ŸƒÆG\"¬IÄ,¾8w¨ªrì3yÉÒ$äx4ÐXacž¾Ç ŽŽÇÁIÒR “³QE+dó{%¾yüÓL¸<ÎdwG„Ñ€ ˜\üLí³è6ý Úuss}¹¾¿›‰—(K·åt¸ÚÆÄ̵U˜óͶæú×õÕuÙÌÅÉãröDÀ$¥F´$ˆRñ¿Ä¡o$ä{d¡ÅVð)NÝ }¨€0#Aí¡Ý2Î|Æ{ñdt´¥Óábƒ^<#³æa„jÝ&0cc t˜ŽºÎ³m¥Á² û<†™öÒÒ d,ßuL“Ǹ|¬}˜ÝP1ЭÊ×ÕfØF5&0àƒ)‚™¤Sv+<Æ D¥‚­”z dd6ž 5_ô0†ʼÏ… ‘‰á¢e6Kè4ŒÏt‰ aDÎç,ä•LàDHqœlþäÆsN&sh£ˆ‘Ëu¸em¤áÿZCÄᙼ>#¨úhS%Aç9xŸ¥L`¨”’1 ÏkÎÑ(„B#*½(ò¬×ä¨÷ŠFJC1ß³”á¨òZ|㈡¥Ý îP_B¸˜›ô1L2j£áÒ T,Î`ÊYŸ@} QÆSÀØ¥-D+{¨ò{;Nš/Þˆuù±½ãó¥õœy·øi뛿o¯ZÂüþnQ>„]·c(>áz§BÄ!XÜûcA/xýöñn‘´€|o!DÒÊçÝŸ"ÂDŸnšò>B*`}ðdU ÚIÂÀãh§•AUÞµï:–½ÿ¢÷Þß\7w’cX{mäBþ›âþï7ëÿ}·¨oÛ ø«ëŸ/ùº¸¹¾½ö5¼ý˜~1 Åõzq÷ëå}ý?·‘‚«E)–‚Ûià•e¿½'áo[÷_™Æxø¤“(zGñð×{Lx’ï±÷+çaI\_0¶aƒû™ÇÁΑó¡½bèï¯.ן~ëU«feó]o#|ºmÿú??®¾¨øÕ°â¹ðËò´ZŒ äbñáúîþ5_GÚÍÈgºØ^mY^ìR¾XüX®›êú·×߯¯æ ÷ÕÅâÍw¯ù³ûÿ‘‹¿õŸ µ,l‰P µº(]S¥qªê”“²¶ü·ÿûÛïV Q®´]ñÿ7nåEN‹×oîV‹Ÿ¤Xò†ZŠ%}jx²Z#ÄÁ«øvœÞqg>lþ{/rët—ëË;¾Ñ¿à­sÁ¸îü_ýßÛWýÓL~IÞ½}µú‰À62ÿéí«®\Q³²àçµjWèÞ¾ºxûÊïƒþ×Ͼ}õÇÅÎ_Þ[Oÿr{ÿAô?¹Û¬èþ‡ß|.Ùzý´ö¯g¬ï¾)ùN[v…ã…W`-Û¢ìlS´­†¶«„Õü­ïøS.o¶àY¹ûiÿyO–`ÿm?o®ýO+û_=Š)ÿx÷/p¿ÿ}}yõŸû«åR)ÍWßÀÕa3Îìt7xÁŠÿûÿúËשøYÊH¬á³Áã¤_ƒ.ñ³ñué’ðžñ÷$ß R‡f‰äý1,¡çáÒâÃ~øGš4ü±6І—ï^¥<Žp² 4Œs‘–Š?J@äÝi6€Æä½gýè¥<Ýjè~ÐàQ­Ÿ1 Q¦Ó2Ò)=J C­t:£“LŸHؘfÉ+’ŒÚ·{äR¢°VŽ@'ÿCí»‘ìô5ß·÷m}Ï:î¡ü!U»]w‹ûßoÚÅ¿}–Ë7¼¸.yÚÛ—kÞË|šñ:ûÞ<ËE}íûÅñýë¢ùäÕÛ.Ãös»¾_-~½¼ºb]öñús»¸¼ŸÉÙ µo¯J2ž.ƒxؾ«ù> ÿm¬·‰ìRáqrDZò¸”$ï„o7ÎMëþš¥pÕòþñ7ù­rK†z±àKÙjñå6ûh]ÁqWÑÖWÜö¹v¯ßä^R)D„ÜIpuÝkŠæG^ þ'/D JƸ¸®.ë^4ù„zs{íÃÙ›Áoú#2z8Ц]ÿ^”ŒwDãw-ëÄE/•ÍÞY.¸«}JªC-®¶ãQŠ]0rý)ÚÚT|»+I–%_JªÛŠŠ¯QލkdÙ!µÎ8ÞŒ]Su¶]Í꽩ds\oÐ9@σUV m04Ö?á“É4,=?Ð3é$ùæJvܤêÊvu…ßFmM…S¥(tUY#ºÆI]ùÊ=?ÃŽ žüSÒˆÐTñ8¾“Œ(AMg¹KôÚ­bÍöóúúÖ‡ ïXÉ->ð?Ê+6-šßù—ízq³ÑAm3[D§ÀÄcðäÒ°'•Æ”3ñ8"±@B|Bk;ÎŒµÒ$Å×Ï×…Wå'ZF÷—<žçÍ[óûœÆùÏ¿¿¦q3ù¸í®ôSÓËi˜AëAƒƒ‰ï†J`¢È›’)¾µ³ÝhRkçç¡×åJ4+Õ­°YòüÔÉþ aÀC_¦{è™ ¬¥¡BSm  Eå„, ¾ 4-Ê8}98®‹^‹¥PÒ¿Öqè£í8Øiôéq·cRô¶Ùó}[û]1è7ìÔÿÇÈSÄ`€Ah+‚ ùoS‚-$ÁÉÛåwsÈÚçQ|ZûtØ¢ú´n®Ú»1 Rò.#(¸sN*à9øîª-×;33 c™ˆ ³0ê ñóíM] ?L–Œ?ÊÙ~üYQ‰Z w8Ad3n·@¼-½ æ·ýùvQKÙY>í êZ Öjµn­+v‚œÁŠ !b _T¦ë¨kemXôVQ­b;ÌÁ¹à €w+ Ì#pn瘋 ÷€ÃŒ¯yÙ“s,t<ÎŽõ%xG%r*Ÿ|"åN(É>ù1®Ã ,€‘Ò÷ ëg’GvÉ!}î‘7f.v:Á:¥G~ÃËw¡²:‚¿1'ä2‡|¢vL¾—ç×r)È 6FÔa+ÀÓzçÑã㥀ógKk…³«­ÇíºNM|8ÜÓ]7®qºêtUÔ©"rÁŸ~ÀûÀçÅ&Dlø Ãr‘–ž&ƒAtà³A.©ØŒS€äˆÔ9ÙK”ðOa È'‚Õvv¢D*¨¤ èoòz”!úÕ%¢Øy~e,gb&ƒmþ„ 6±ò'ÔxÏ+;F†Ð>eƒdG=º Nîc^Õæ{´ù›Sð?[Ä#Q£É49„qž€GÏÔß,P˜ D¬¤³<Œƒ$j0öÒêIâɬ´>ø6Ýã8yâpGMÊ¢"<Žl0#íY TXù$Ñš„ëê¶¢jÊVôn!eÝuMUÔU:§­«ø¨é¨åCov.ÇK<5¯ã©vR;’Ù¯õy*©´ZŸ™RÏñ°PÛªµX4®bÓÎUm¡©syG—b4Ç£ÚÏñÃ9}ÓfT’Ìá*ÆÍ8ØÉq¾¸mr$¾$ÀqÁE‚ÉU-WMÃFÀªéž•ÿiUUmYUè$²JWÚ”U T k*Sµ²v,¶¿ù¬Ó —f~Y´CÏKŠ5’c”¨ï¥9¹Âi&¡aèhÈ¿§!èȪJ¡fOõž+P,D°Ã޲4&;=WQ35›k”ŸùkÔ–Fù;äë!T\ul~>j©ï5Ô}‚õâ0SZú+ºa,‡z?w.! sU”TU­ÂN´¥hlÓÖ¥o½å4`÷T[ cÁµþUC¶4 kS 'xi·*áE³žY­ù“ùÚšYÍ‹Ød*_˜)¶ý:‡§bL«s˜9là†7P eaؘ*°ª¨ð……’ØÖUÝY9bËý~ Fl`Ë×!4Qັ' %…æåïÖ”L›Í&Îz–媆ªU#VµZÙgÍ3ž!²õì–^Ðä5×Á­Ð“F¼ø*¹‰y}g®’cù‚îJJyàqÔ‹ÏퟘÓw®Üþð>Ë?TŽ\h~¼+Ê+é¹·4ʧËbÀiØÃÑÄÌä$‰#¨Â#$D$±×Âeg¾l™4Q€=™(Δ+ãy:+I£ÊÃ)ã^H®LÌÁ›@ Aœ?SæØœP%O&@C¯úˆi!Ýûía:uÜžÖ¶m¬RõI .eÍw_h:r•SušV°•‹Æ:þWÓ4uåZêt{ßÜê.-r2óËâ"'‰Õ‚ÄäÈÉLBÃÐÑJ«HËt'R"'³§z/r¢|ßP°ïOOfBꨑ“Ù\£"'Š¡k3”b½áÕ¿Æe½k²GÎ%ENŽÍoÛfÏIkÈõ¯s ÁÍ´/AëÕ†oÇ­ÿy"'©žª¯*rÒÏO:ö&]`fyÜ®6:ÆÑ$;í:ƒ¢¶¥h+YR%:0µ•¼^ªªiZ²ØÕV¨\éPT•mJ¾•Éùøó/iGÓÌ/K8š$ fzÛÒ™„†¡£”B›Cg·ãRÂÑ4{ª÷Ž&¹´ÀÖºQX)L¬9êÑ4›kÔÑÄüY“LšËÂôI(+“N¦cÓëé \‚ãFÄô-£´|ŽPõ}E"xó8eèŸ'L˜ê¼ÿZ„½üÛ™ÔÖ)¢ðŒïº—‘¹ö ‘m]©|¢\ÕuZwþúL¦Ñ ËjÚZH¨]c𲑼°; ¥qe‰†wÜ4ŽÍfxfÁKöÐó0ã WÇL±í¥q<cRÇÜ)Oã`²ÑUAª4EÅ*½ð;Ÿ·y#4ÒHì·«DJãØÌCTÔÏÄòœßúz®š;Mð.U2N»éÁ»#§n0ñå‚ïáÊãŽå›hõ4jŒ¢åµöx¤8öÊî9îéâ†b2ŽŒ‹É ~yb:Q€ñQ)+Êݹ›ù|ü´¾¼ÿ}Ç…ý´Ñ[|Èñ5öe.6ÈwOï?éJˆ—•Ö/6Ôü€QK@©E‹ÖýÙç=ZV2{ïþ¡Æ¶35€D&è&öãPþÙWB¼¬´Ë¼öb™s—25Š 6µý-ƒhY™yÌšu§˜‰4ºÜð8 âˆ{ùn™GÜ‹f—¯¯Ú•v+G«Î­@¬ì³”ûgçëÄVÚ­„–·y[Hê @jÿŸ½k]rãÖѯâ I€ ¨7Øûk+Õ¼tâ$¶ÏfkŸ~ÁÖØÑHÝbSê™ÑÄ®T¥ÆXÂ…$>€  UÆ ?Ž= `óÊ×îSâbËz}Yân§ÜÖ—:ñ~¥s{2”·^cYYsCd’!-&ˆ#JÖ äYc± ëÑŒI§Â)Ž\ä—©¶OeÛ å]K3oY9…½£FWé ÷LÊÕvþÐì©ûšÝh‚å ¥Ž.×®ªxA¨È&*vÉ*7ʹ’åg¾”¡LçJ\ÈPº]=!‘m° › §®‡f÷¿[{×]K³zJ³Í{=V‹:»Ç“6-'6Wºâ¡Y6Y|<«<9.9%T̾¨Üh‚8¨cV9.|Çåµã·¶‚ÐÝ>[ê’õ·Ö÷ª 5Þš^©½[g6ÜŸÎ^£`ý›Bjí¬PœÓ?w•âÈýPÜUŠã+î‰âœvˆ¸Bqt¦ý§(Îï¸v…¶Ä—æ‰Î]ŒÍ´¸ú[Sß’™k\>]%|hhq!ÿ˜ìópÁ;'Ïé`.gy‡AhÂ?§¼©·Öú­•7q-[ÒÚÉ2oX–¬øÊö–*¾• i+žâwÁjothXÜ;öö›[÷bÅO}š¹¥øiû\ÂS­ÜBö­}ì í‘U·)~º¹<ñiñÚ›EÄp? ÷E‹ŸúÔDîþÔôr7}ºòð¶ŠŸ„k/'8‘Õ-é$Bðð¯„]¾çâ'á‘Ak ¶éÇ*ôãïÜîºbz[ÅOÂu@d¦y¾ 5ßùJèÐÒ*~ª,ûP_vÚ¦hŽ<}ïË`½®žý@¸ÁêA8ÔÄY]>Ú„Ž=òí7B¯èmçëR“Ó|?jz½‹³>Y¸?½RR¹Oq€?w•âÐþPÜUŠsæ‡â®S\ø¡¸ƒBŒ­ÏtxHOtä–++3uÑ@Þ]¼8;з›xóõ®½WVo¬ÞÕLÏ‚óþrÈ>Ñ÷úŸSÙ{eõVj#WVbIOµežðݰ¸wu’ÚÍñë­Š{©+«NÍ¿þÊjóÎNÂ[0šàrõÝN%\¶¹²º¹^ýäÊ EĬ1„ûY¸/yeÕ§&cïPM/–ŸêÔðÛISXöšu·Ed÷Ý/ƒµº:ŒyoiʉC4†Ýåçç:Íß½ÕWëÊè·Õ¬ãÀ5Ñ—(?ÒûîWÂj]á=7ë8ðèÈXlA\Îí}?v_­+gßÔ}µq;2SÁúEé*9®v¾¶óï7®ÿõ)~R|þô¡|ù¥üûáÛ/!Èþ~ûØnðûí3Zû™kûøöéGŽÈ®>¾ýº˜ç€µö>´8°Ôj×{}N»òÚÚ•V"äÙ^­ wL^êÊ{HkšïÚÚ-Ýs0‹bÖ‘^7WxÝì¾»¶ig‚x(4°·;ô: ÍMqçú^gÑÕLŒcK-…ù™¥ÃÂ=›NàvA[2¼,z0m+ ÝÑ­íkJ!W% 0jæçÈLÙ ½¾ÜS÷@ÇϾ۠n¡OMîGM¯VØ©3ÂûÓÙ+Éô)ÎûŠ»JqüCqW)ÎjûCqW)ÎèŠû¦`Y˜VŠFèxÕÅú€ï¡|þŸ!¾ÿýøvJyNÁ£§Õ‡Ÿ1Ú¾ènÕGv„rÁ‚†ZßÔhs}(·Šíyj[\mZ ÕAÍJãœE0z KŒð&`°m~Qo›É])ѪpEïô4vl^J$¡½k…š•NÛ®på:)&®‰v¾^Løóàű–S`V Μ«e„x93=цV¡nbús8y­1j㇌aL™!'á;[sÈ! à &‹úÇ b…2Žnï?{oLÏÛ)°*Ù€-; Gvº¶Íì75™]«³n²'Úêê&{«¦—»ÉqÏžY ™W8¤ ¢’²âÛ £»Éæón²n¾›¬˜ÄÔy8ˆÔÚbB§=oèˆå‡‡rd(â˜#†GõÁ¦ü¹Ç·>kµëíÒ‡9šÒçz[üÎr&_o¼†œU:³þv£m‡³kSñ È!ð©ÝÃ(lélÛ¢¬º.­Ò²^rJ䑜¾üü‘Îö\—v³`—wàœœËýyt Cg+ƒ¬à›‚Ô{RÓz,q—Þµõ&âÞ˜^×:Û½8Dà½÷ˆû˜Ÿvv>á°Ñ:÷·…¦†âǨŒR¹¨ k]qàH±6ðee[h·y[èº`9h€ÐxßUé8l×\ãyÈc‘ðb@–]gÎF‚soIÓÄü:&ùUt1,AûèG(¦Î >¹â^H¾¶³¶CQ°eÖ`rð‹B˶¾ÎÀæ‰þºÀæ­º_›QG`Æ:«`PhŠW5¤‡ÚcPÂ<ÒK`s<›~lÖì¡©)–1­ÖN?÷Æœ*bŠN.X Š®ŒÚÙL¦Žp O¤óHct˜’&1†Tu´u²´•#P‹~^c>¿ó¶tèd1r å Ýq©Êëo̪¯³y¢¿®y«î—7¦ñ½ü sÐ EÙjÐ&«huLyôÓ¶žÝ˜å|cÒâÆDç—2·¢¡3z6æºÔhDÄ ½)£Í£œ8ÞPÀ¢œ_ɉörd9îD¨vdÐF„Ĕ䨳í.¼žç­ä)VM+ÕÙÔî™¶Ü*åœí¯euí¯[}!Ë"Ádö*[/@Ö ^ΟT¦Q§\xüR–ôùþâÅýEœcZŽOŽ(ƒúöPë.éíÁ×k‹±.+e_¾Țˆ{Gû§GT'6±ñŠñr‰“RøF,^¢d±)Ò~À88bïW†cþ9Â1"+íƒk-~²ízçÒÌú ±hŠÌÉiHbóXšÆîÅœ’Ť}‰)°/¡Ž&1S¨VqÏeK¯òºÌÎÛÅû  Ä¶ÜI¥;j°¥;iiåÌœh©Ëܪáe?R†ÚFÁi)Õ*² £E bÎ~dXò#æÜ„E?â¡v%Ù0™Ðósû‘  žÛ<¿ëüHö>ì‰öH{ùùÄ<å°áGÊi½,ñÆè¢‚’­ÂÑ*bòjtØ/{ÁòJ?BÏáG*ªeÓ˜Gq ã£‚¾«ª oe{«/å÷ò¡–èü«ü><|yŸþ^M)´$'®öÏôËÿ-ÝS}ÐO긤’8@³ Ø<„óò%U·Ôól±sNkÓÊÕyF7{Cu…úgªÙÑX¨"7s鮽žºBŽU "Z$Z’­ö§•¨•ù:Fßãî%ŽïqwÀóº“?ûˆ{]ööÙî ==¼O8¼|òƒî?ù3[Љ]¼QjŠ)ªXÀJ‰ëÓª“ŸŸå䟆°£·Íe t4Àòé•ëí[òpߊÓhœ)gb aÈ›¥ƒG¢?' ¯ñœx¢ ë7çÄAëwåë2»ú~•a_ì>ðÞÛ½OwÔ ‡íhú·ã`Fä?E.“BëŃ”…<Œ†ä7e- ÛoG¿3Á9fäË@l¢ÃàæúîÝøÎŒ#Èy\KJ! X?ÛòhbÉÅÈÊ(Y~ç£(–E Ì)EÍ%_]‹wgœÎZÄÐ9 ‹5Çè åÏqÈ7TrÇŸ¨¨+Ž¿U½î[8£iTdHä±HèˆâsÖ:Çä3Æ¥8ÞžÅñN/Äñ~'ñ% ê†C›è,-¸‚{ÜAó~àÎ8]íÈïI,‹5þé9~Âaà Ø+²ºƒ«ÅYAÅ ¸”TˆCTV‚Û`ÔqXYd3­Ãm€­½­ÉËZ½|ÿ{ #¼¶ÉFí"¤> ‡ŸËço‹eþoÕßöíw„É£cL¨þüòÛÿ†¥X}öºúkÖFòí°„b—?Ò½¿]Žä7ÖØ<Ó¡æ! ¹Å´Ð™ùF>›væEˆCgçŸtÊ€×¾iß\Ê•¯DK± xíh$1™†à•–‰<·`Ap'Θæ"-[7„æÙž ÿLNH95/êy¤s8P7ñFCll‘0&³wÉC¶pI|޲bƸ º”œ]Ö`x0 ¾¾¾)9æ­YSZ®qéñHg®-œUÈÀ½ZagÐ÷D]Ð÷Vå/C_W“S>„¤°@P‘Vì­ ¦ä‘RZ‚¾p}Í<ô+Ù@Äž/—Ï?Ò™…åÛÛ›óÀøMɱ:•YhoKÍ Œvð)ò=á°›¡6#ÀPÇÑ«Ììb1*–!(†aD )dVÂf³9l†°Ódu`w¹†ë‘ùB1Äû²Ôçòóû‡V=ý£ª•ϲJX=Ð/ñ]­— 6Zµ,90‡hÇ8Ÿœ-F‹Äõ ;ÇZ@ CqÎ'-A–õúöñ"÷Äð¬}Ä dØ5ì#Bkç'Ô:9£&|Ó˜K3}¦—ý—³à–…ï6Z7¿üñ]îÎIÌ̳¹~ `íÌ­…*tšøÒ¨ž§Ì¬™ÑS?E¿ÐCëëÅ#ø°ýׇšQ3¶i K4/Òîn›¶òõ÷Äðúq0b5- »ÁC®êÝ©›—šøÒ'¼×®oâKŸÔ ì‰ëÓZs‹=0î(ctýP—>žOç¶Xw³Þ¾è {ÉÑ,PsRÎ:`††&„NÓu]ƒž¯ÝÞͦSÁ¼¨&^­\7ÔÇ{ÂYK-ÁH€?¯r(ýG墮ÑÇ~ñÓv¼õéߟÒœË4+ú¦Z‡|¤õæò=.òíDB½“˜;põ讯±;FdO£Ý¯å™žFCDõWþˆqMâþ§UŸ´67?ÉhµÇà¡© {<„l]n~Üó|Mïö7m$tÚ5Óïk-p–a7‚~k'’¹¢“MÀ 2ìkY•D7;kíB‹‰iò{ ¦pβY‘E¿’ù³°“Hmj£tÞÂÕkŽÙË4S÷[Ûê‹5Ñ‰ß íDù:)Þ¹ÓèBÌXsT²N$€ˆX`äZÄ`b}r I`ªsÆPÊ—"¿óÀ…R¶È…ß«³61µ‚#p ›#³Aº{)˽¤“³Œö‰Žº2Ú·êw9£í­|R}×GÍ Ùd5d•1#9·²ñK¯ÚÏ3Úv>£] !ÜØ`Mk åí»ÙDí¤õ=°ºò‘í ìì)ï©ìõI9Ç ‡¼4^QÎÁc48Y~!+,l•|ã(KRþO˜ÑSZ™—¶›ç¥Ñî4Ú:5¥áÐ&:£ÝÖ+Êð9ý¢Êƒú:/Lw¿«˜ùŒÕ`¯K~{ÔÎàŠ'3|Îsä‚ѵoqä$ð´}¯)fõ}ðê{dvö±A'ƒD[?¤˜aeÓb4/‰Ug¨9Ô®%–`§™¹Ébó5lØ ;qð`ÔIT²Ä~½4Æ]~r Źs{Œ`4hÐ6$,> ˆ)Ã@õÍÂ`mŠ™u‡€¬‹DÂ]ÒuJ™ôæu¹/Î䬜ΚËÏŒtП¯$wFgîD;]îVÍ^p‘LØ&¤£Brc½ÊeU´Åú Œ—œ;p°àìN6Œej<„>Ðo:«q_gËtâ¾8“+_Ä–½É{ðûÈûèö|ÚN8l€6×Ú"%IÌÆ(4ª!¦¨Ës6¼ò]Ô´ú6m°3´x¸¼p':˰y¶. ï?þ:õW>ÇÑSv‰Õ˜sö'ì.|Øj7IŠ=»¶FÍM9» ϳÆu†’Cl±Æä:ÓvMqìäK„ÁùÚØÊA}5,Ю©D–¸×ož¹»(Ë*„rTÛùÀ“x‚Ž4„ЯNŸôݹ»ö¿ÎçÔžfÓw‘Ÿ¯å%@ j ð‰Î÷¦ï.íÈ4Hì.¬ù\c=ö£¬*›<K&FÃ)K) +àêÔN èAoŸÁ{5ng-C@u¶NfçI?cï‚ZÎ`à‰šº`à­*^†¢ ϲ?”É)(”õ.þ·òcMYS.Á@:‡¸¡NÓ` œ\뀮M8zóx¯»›zSy¯Æí:`HÞqÏn*4ût O8lCê†#ÙyT}«pLòS!Tµž)£E3²] qs`èÂ.†ÞÁÅ$ÀÙn yªîkÓlÿ§}xèGƒ§Ÿ°dòâLoËNÞÝO¹<å‡äûwZk²[üÔqËÔ‰ûÎ5=wMkÃ4¹ôVö¸#ñ»ì °òzÖ ð‹B¯ë1½,ÔW:çu7Äk2ýµY‹q¢¶Ù™±^<š[² ¦sì.MÇz¤“]½¸îlo9NÂhíÛ>:ïKÌ8ë¬ËŽÆ„âuˆÉë„Yç(!È&zG>1‡¸=˜{Ygmà,8pÚù† „É=#‚;ÕÅùõëSÝô]¿Þ¨×eØ–Ç2Pñ£òÅBàÔàG«GqPÌ@v ¶ùsØ6;Áf2€0Ä$—îË¿ÒyO½°í6K/V{YW4ï÷eÜ›¸‡½9½n}Êa ù~€†b2*JRˆd£ 4ÀH1d=½²'ªÛzD…,F·#+®§v)»¸h…΄ /¾®8Ëé®yã Ÿ-Ì D–ˆ'\v’?jJ¼ìùŸ¦šFúµTûñÕÉãZ:üzæZôýÇñóð× îï6AêçÏ$UiËaxÓÉŠ¼UÖ¶G]ñÀ^-€¬+ÙÍ_>½û£þTW™|З‡wÿõß ¢Ha%búöÎuÙ‘IÀ¯Âly•ºd*ýwþolÄ>À†.¥…nfž~Sö9à¶«J*W7 ÑÁ¤íÌÔíSJÊlšbÝS´8ø[¥ñq‡¦y½žúé÷»ÃÿñJŸ¿?7æÿnüçøÝŒ©hMÍïÕ4/³b=¤þüQôÿvˆ?ÌߟVtÄ~“Ì[wÄ+îé‹$„#[“¦5dÖ «í}ñʸº£ebņZ#¯žÀ+µÛ=˜Ië~ýðuúÐfþ;úvÏ/vy0Ëïî^åã]÷`æõœÔÈÉ꣥QLC#‘» r/߃Yò÷õše©—=•™Ü¬­ÓïîÓ’uôl¡ù`d“ÌnÎ*¡ Ê·ÓÖ›½à¼Iëß®6ƒç©³síä¯IX6ج-/ä“hß{faŒ¦bµly\ÌŒ!€)abW²-,» ÐÅ»\”}¥-è $C~””Jû݃y/%'ÛAÅðv9ºÈŲß=˜yoÜf—ÿÜ;ë²ËoôìüNÚc°¨J–ý³ ƒuÙ¯z€`Bˆ65[ ÖøÛ4Îì¤ùÀ@ œopK•Ó¬»³Ò½ëé½ó^Jöí¦ÝxäzPqLõÒQ]%w¿Ò°±›öëwÓŒ²± clŠ 7ÎxÂÅÐYÎz|[hûéŸÌÿ·ÚNßÑ mg»„}ôÒ¶W9¼xµ·ÚNzNkÄFv@ š-R(ª-Ðöâï[h#…ÞNÒÁùw­° ¶~öâ‘Í›@Û‹}Ðæ-Ìå ;$Ý4JÆËFf›Vú5o›÷èý$³É¶aú(JHOð]SûE’>ËkžÙæóá<3²UâÖX«ž,ïø_ä”ÚÔç™.$ƒ1Ú\œ“CÁúœÅV²ÎDSbNÚ  &ckNÇ¢d1YÕdRÚ¼1:?@Éév©§>/W­v<䇡óÉ·èü¹wÖ¡óFÏΣ³Ò*Z™ìù…šÐÙç!ØA~M“£0ò8{•nÐÕ4:×& rZ¾ZMEdµztžÏô”èÜ"DspFv,Š—îv½È‘wkò­òÇcr­4ØÛ‰d;¹€¶òêO9£½cךΪœUwçÿY£çu·sš2Þë¶æÆÑ›÷žÇåùyµÊžîL·­·Šfýwߟ¨3ÿ|ºþ—·v˜ÐM"ghÅZ¥ÿ>gÑ©šý×Yøþ³F/Idà«“‡Îãçp˜´‹}K»†]"GnÛ²õ(ÒÛ´l=@ɾˆé£)G•ªÿq<ÂÕã¥+ Ÿ´>âãl 5Uß“Áš à” Õ»&>R‘|gUTûG|ìAÉþÁøeÞ:Ë[‘që­×󭉵^­"§¼µmëÉð›[ÿN9Æ~3Ñ[ã\‡+È=Ì÷,N;ùÃ×ÌDÊYnn±Þ!çÚýéñ¬°gîÃ0zL2¹=lB£HaÛ¦Ë\è;'Ûf†¦ƒ ½Ö–qìÏr€ûå1XJ´!ÚXFoƒ Ñé1A}1§|1kíGŒ:*?zóáÿãý1ú?ÔÈ?{Hp–oŸœåßWØu£13j3{°Ú7Õfo{¯hlnÞÛ#gë'¦¢ÏU9Tõ…’Õ-#°VPÙíH`³™}çÂk@†ƒ¡²%SdDd•”cíFteLQ6fZèþÛ_û§©?Ô=MU±Ó²ÌvxÒ_\X3Mm4fZmãYìØZj›•67ïDå*…nòÑ÷j+Ðí6Om¶³³v¨úwÎv«X¾z_äŒêž¨ö¶íl‹>躜Àd'0`ç&*ï•3©Vúz–j{Ïß¶RŒI:ëâ˜LÎõÍHš0Ê64Õç£MQÕJÑ–ÐDäÿФƒ PÓú©¼ß!ÝS[2Ý¢\0Q«EEîîVK'y]vsÜwåÂUÇ}[Ý¿pS.—”!æ!¤S’ņ¨†HÅãÄv6åg¾=îƒéã>]Ë~¸Š»jñ–ôYN@½7núü#´7¸úÔ–ôE`m8uÔáñ˜®r~^i؈ÀæõXYÀFRQ D›“8{?À˜]tVY†Ò…Ý#°Òµ&E²Ÿó!pzG{ï»ßsÛ?~;þôÃw¢áâ(®±I[R*%c­üù¼+E1aND{9oòÇþwãuSõ*"÷ä®zïM«ndsɾÁ¤U®æ´œ$ë7jêkº¦šµŠ¼ŸD¯U†°¾÷^à™ÚØtШxÖ|Ùà³]¾ùõ"8Ã×1ïõiŽª©¼&3g!ù™`°†¿MãþçYÎ:׋Ø[wà䣊#¸QÖÆFŒ™3ê¤FH ó(øZã¸PFçëÓàQÖØ1M™G¿b?µ%Ó-Ê ÑXV­e€‹ìMû!öF—Ý"öç.\‡ØÝ?Ø…eo­ Ž‚l±a…ËénZÌ:Ź:±Vß"6Î"6!ǵ[ä4v#öóÐ^Ä~jK:Ÿµ_›£ÖGËG?ÍÕ³–+ —Ûêõˆj!\Ù™ }l i¨?6Œ&@Ö9pÈ¡±÷Öbjê /«¬]Îlr–CmZµbw\Œ¿JòT•l*¾(W«:fްHÁ2Ê_Á°ü´®gï–s :;› Ü»P}ÖL·,)k hµ¬l—õ½‹Õ¤³.¬n»Mù¹ץܨó V"•}<-SÙ v”ITfÌ2p  (•ÑÍ-X·YÄ‘§,i+ôÀØr£MEÎ_Tž)ûeÖV=Ù/ÆšÎb2áHºÆ†Æxd–?_¥Ÿü\ÃÆÂåî)r.\TX .¤ú Ó›rC‰.¡g‹î|yêÏ;/\î`¬Uß/…*gÑ»?ÎÂÕSýK^¸¤ÅœÓºý¶ZÖ9è¢÷Üï5ïø¾°£©ÚR$êH‡Ó­% ·Õ˾ÌHÈšýÂ"!ÒRÖ9r؈„œåÍDz Û‰ Ò^o©¯½ µfe‘3LwÕ$ßÓqz·Î3Öt=ƒ{СÊI'TN¦]ÓÔ•¾ÿ½Ü›tý°yÎDÙáòC†/î\¾{Ç}ä ¼7¢F èÅîùÜô°§z/>0Vs¯ÀíöToÚ“¯Õ´òøñ_CÝW<Ï;ë蔲¶ÃKúOßîݾbóÆí^ó3S¾ó|®ÊÕ=Á[0Ü1Qzøkèö•yëžð㘿?í× œó¶÷_ÿ¤Ý€ –éñ•{ë…`k«[c©¶%àOßêÖjò=¾Z•{ù®eàû~þøÍOÿÚmü;ž†åCñ³ ü1î®ÉEó¥Ü;íØÒ×8+5èåÀ«?Æõ‡5-ý¥\h·4jã„[›{`”þ€zSz•­Ãã›^ñ@÷å/Úý)lÕGK«”oÍÐ5}ÑŽIŽ6¿ð¸ ØÀ¬y2–š±å“è9:냃5bº£—kw¼¸Èós¹è‘¤Vý¤ ‘nûÉ\”ÀyPN¥M˜†È, bÓ4ö6u­ëà#§ô^a}ß”¾û³áª—)½}9b³ã”¾ùFñÕ”Ž[Íó€ÏÑY=¥‹éÈÀØá"ód.z蔾ÂOÖ|AS:‚Ó²-sÝIî2Ÿf;ÏÔÓ±nO?µÒM^?›Þ-…Ù:Ÿ9Ïç³÷Éu¶Òq³+ןÕq ^A‡ãô_Ž»ÏqÆÿ帻gÿrÜïŽCC^vM®å¸*§ý¦ü–ïÜËE¢ zÛv;õ.zOÞ¨~`‚å¤Ï/ržžË_ï7ë³aoѪ³œÞ–göB:wÑÆqøÞ„‡û ñ¹üõN㎭òÞ/ç{~‘3°c©7±N>÷Q¬ý-¤0³—ir°?ý{MÙ·žïëMs¶WCMiÑö °¹³\Γڡr(½j9ÑñINkÓ_÷¯M®SÀøƒBc&“ï­T•ö¬ ×gLO’õf ­UzE™¸»,xÍûh¼¯Åˆ'R%ÏäëYI37l9ôº¿TY×è„ìµù_rÑ'HJv}fŒÊ d´Vö4˜…tŠ"=j)±X©ìY¶ìý¾m«ÊÉ®DÑr†³°{“f=ž¹yË~å©UoÙ·zy!¿!AT9C§·ìÎ1ƒ Ù˜¼ìg#Îå7´xó–fÊ™IsÀ)²ü»Ñl ôb¸¿.Ì“ «þ1ï¯pß‹ôŒÇÅM<{ÌñóGåW6^¤ãõbÄZ“¬d‡-n‰* ÞFF“Aq}Ý÷"ö¯cAæ¡AëÝâšw–ÓŠvÃÆ›ÿrÑ‹-´nø÷'ÿõwýì¸âK{òl¹S 3õINzó]¹Bñi°Z~Ús’«Y¢;)rUMäêv ³òd¾ê•3í“«lêÌËí»Y;-ƒòËÏ,OrÆ\DGZH¹ÅŽ³Þæ´D±žKr¤ÎX$S6Í–«S;v':X3tSB[r*J›0☴§ šXT‹L³£#TY#N1‚IÅŽ‹ñ´]>ÖÓ-…VınµÚË—û!æ ÷ÜæLúÜ]ër&mtõ×(ë…ͧѺ3?öxÌpDWëj+mq•és ÄIwäÇNˆè‹4+7~Ó0Z ”Jb ¶“8÷ÏmõÁhÖñrŠ·* VïGœdN^ ù½¹”‰m)2œñ?Z·‚6û¾°›4WyæV‘fŸÒÓêÉ —¿[ê‰~¬z)³·YnSÑ+˜ä”5Úʧí~„ÙkO]êZ¬F˜iÎF‚êj –²Ëö¾Ÿ.ï´á¤³ƒ x8K–†¬i¶Å”M¡Uî)Ôº.õS6õ½SeEf]+ûÊ.Åe— äAÇ”„à@Ó¨sŒD:ZÙ°Ù1fùO·!KŽU«¿ñçïmw$Ê>×ÜÐä•«VÑäV7/D-=xäA›‘kK©4¨‚eTµW³QKK“z†&ÍA[¬åÝ– •Ÿäœ³º›&Ÿfdu“ä3hÜG‘zÎØJ‘¯yk> €;‹—žÂî—~„ìù¶>~\ë“ËûKkø±GãiÝêã?gLS7¬eå;ᱯ5&ÈQ³ø`‚CV«zñNq+9öÓ‰ÆÔdÊs¢Ó2qµ ¬UúL76ÞeÀIa õ¹²EsËŒ¾žÓT]–“ŠìÙŸ*4L92Ý»Æeä1¥"x›­“гC]B¶âè“‚,³¨ü)'#YY#,$í5Ž)îX“äýÕh>(@ÒµêøbÛœä¤K½-öøå¯ü´ ·úxsá"C†•,?x=â ’.6*ŠÉé9Tä[T4S¨xj Y¬bµtø*wƒh â“ ¨^N|uû 1âј#û£Í5ãz¸â¼+ Èë!1ª¤J0n°¤WRRgU†JðŒªŒÖtB¢Ù,`Þ ,õKù$îââíVHüáÇïå_?^{’ê‡Â©O÷Bà:›eY½ §4œÔ•®•Š´oéÂÞiÛ }ÓÞ½<+™ÖqZG²¿Tmm™‹ÄGË8çã䃃ÌhŠíE¬T÷C¾9õ»òÚ»Y“ØÉ˜Û4‰¡nÈëTù•ê¼8'ãuõÚ„Ÿ< wtp¹V=äEåOrf®Ši¯î_‘v2Gp‚ÌZ¦fJX|GoJ( œU› >õ\4¥1ÕË+x Yí†qï¡à¤ÿdÐè<'9Eæ ¸mÚ7 vå™U ¶Õ« ·î`‰†¨ 6|ònf(™‚Å0jéÔpÔÄýŒbi‚F3‰òÿ“wm=nÜJú¯ ü´gq¨áÅòpÎæ,`±v±Ø‡EðjO¬¹d4vâüú-¶F±FêÙRkFÆ Û3*u×…d}U,¡¨½Ù4iEfoÁ`®Ó¬3Ði× ÖyöMípXba: É•'‘@T¡˜WÄY!HNÉéäç±u»TÏ ÅP2½`œ Šê>0`7t|Æ2›1–÷Ÿ"#¿Mð=}½æ¦\ç’–ªTjùÇã¿OÏåM|A˜Ûh'º:Iÿ©á­©~Lvo¢Ãì ÜŒJZcWPÑ|’ãh3´¡‘Ò˜àTæ%3³''‹×Ö˜†UÒž,²àÒNN ž*Òñâ;òíÃI¾à ôˆAKò#RÖ‡zjlè$س-;éfPzUÖi-£ÍäcVöóñËNã š—^#ÅiYQלat–e§Qˆav¹ÚˆC=‰6tŒŠ#—f3î.;œbÈ&7&s/ØÙÖfùZÖ"3†¡ÚŽÊ¬LY™LUæ²iqôÂs¬LkÄ‚cØ?Ø"Ë‚Œ W°RùPrzpU]Óñý;ãj›SÁ†Pÿ¬RD™Ê•­2SÊ]ˆ\±r’—%Jq|òì”° ‘wÒÇÌ k˜»â¢$´ ÂDë–AÅ‚HGÍ9+Œ'ªj¿¾å¥ê¦Õ·œ¨öñH™&î˜E¦ñL¤@,žï,ê1æàâX¤÷#e3)÷öÑȨy¨Qñ†”˜º¥qy3qê&ÇEIÐkK×ÑXÆC678vªd^rX‰µãôXÛ‚s€o ÞZA$O@àŽú*ãb)xnеÍì±vÉ+¦Hz0b|¦£ôXçÓ 9™Š‘ #LæÂ¯}ðR„¤J9Ö„d˜×F1€3šŒHÌifµ³î|Îç"$¶ Öøh£dÍ‚ZY¯à|UµßÒñ¥ê¦µtiºó‰Ž9nM å^d"SznG*—JPt‚Fçó;¾@LTbÔtpÈ÷tBž-Ñ»Jw«¡¥uTÞ•yÿß>qi}AkÆešæ$ˆ92.­B °+” =œöQHG­b츌K»÷öýÕ¢ï¸=˜}˜È½Ðçʸ´Ë×T€2CYøN•™1Éø±—£eZË€K ›Gr\1¸”΢èÒ&Ìa_ÛÓ²G¢Þæi¯IÉyˆÎäÈ4ºƒäKå:ò¡/8›dîä³ãA`à2³!]œŸ õ^†ƒdT*m5 :jÎz[Uµ‡zwT7 õžªöqÔ›%hO#¢^Í3‘ÚqZZÂ#g9"Õ0†zó>êµ#¨×.$Ó¬°l+v”LiÎŽD½4D½—!Ac«sÞiY€/N¡D;ž_×+¨7ÑêkyCsðË­Ò(I ¿üöñ·»ÓªêÏożÓôÆO<º4M†ange¯\׸eÌ€=¡¶¡Å†•®ŠKÃ÷§1Ìžµ¶¡E¼ÆJXePèQ‰QI㉞NX£N*m8B¢¯…²…›ÁKxÐZ3"œÒhOk¬‡tìh¸Û<áƒÔÔS¯Œ­Íåœ&×±¶ñ‰:F º,ü\ÔÂÛ „ò.™â*øón0¾µÃö3JjÍ'é{:¹Ýñö¼û‹uMퟘz©¹i'¦NÔúBÜ¢+É]ŒxA  „õÄzI%xÈTÚ¬«èÖ:ŒuÑ<`4/gj«*Òm_wÄöâELÃSvßZ€Ææ˜ºÜåc\'W;µ¸;FºŠt=*;IX¢‘HPž¸(ÑÁ#Âw „WmHèüH·8%œM‚®‹èé„åü¬u ž¬²ôȯü÷_?žVQ×ð‚f¬;Es’*;[E]ƒÃì a5ÎOZc·,±ò„Šº&3î¡ÝÒ&Ž™á²Ö‰Üœµ¢®I¾&¸‹2—{$ÇD6Zõõ®5‘‘NÀIuLjԋ ùB ôzxWrâJàE…\à O3êàôéé˜Õö”ª†–iŽ'b•cFج´¦œÆKñ‡ce'´Ð9ŠÿõAD¾#ƒ¡Ð ©1ƒUqB-(f¬=|¼»§+÷êÍ\׳ÒEæ²È!•ý‘”¸G•RjlÎ"•êlk’cÀ“ÌR:ïUÂØ„q€Í©¦¯v‰É+°;hü]éÔdlÅ6¼ņ׿Ĥ×Ë^òcGO“’§êøÀ%& ¬¦Ü“Ì@©—&†;¹åQбäßO~ð‘ä‡Z”[$qY«,}kº­~¶/Ñ…2o‰\{ìà7¼¿»DÜ !ÄÕüË-Ñ-Ç/øaº+ÝÓâ”ÇôÀÄ*Z×EðçÐÑ(k¥0ÎK¥¯)…q¶u)È ¥ îÿéê‡ÂEYÙbAúϨ¯Ç9(Ôý§Gýû2 Š>[p0M:гK÷<ÀgH0ÒÔB°™2¯ågʼ»ÍN]Çsgd§B|úe–q‡ÃJŠ’qê·œéRI¥SIQJ 6FËÙê("oLQòÙS”Š.(Æz˜ÞkØò­ÁüL·å˜Ê ¸ ëÔ‚†Ÿ®þÝÝ,qÐ`äþ>!øüJ„–ñwW«›?R?I]?%¯gÜ_­>a¸ÕŽŸÄ›ÇÔ/Aƒ2o4Šé ‡…Žò×çÐâ›K!1ˆƒ;k: €þäð1åeÿÔ>ˆ‘¸Â½û_ׇ¡ËûUz÷õóïÞ½¿yúðÉ/Ðñ\ß?¤»Õ‡›üt}ÿùŽ”óGZZ]¿¿'_ƒ£ë‡ï¯Ãc¼þ„SgÌÍ]ŠÏ“éú3»v7«ë›;ú6=®®Óï8jpÍüŒ?`ijºÞ¸„¢ò»we-ý·å§ÿ>öûõcÿsýX$€iùÃSº]}§wõ„ó‡—^cŠv¨­Ò†NnLÐÓêæýjñV‹›ûëgqÉÃýò&|!(òZ)Ëtͳ-áÝòáƒc‹¿c<Š0:ý-¢£y–þÇþ%U`£™2”©ƒ:(t Ô|é•_ã#þ´zjO¡l¾Ñ–&™(›Þî:%M²ájøýe|)muíýœiÖÚ2ø«æ *i.5ÌŽœ¯-ÜW†Oÿ•»|ô¸²Ü§pè&¬ ¥Í9Q&7y#”1r(qÁdéT<È®^H°%‹{¨Ãé†ÎlЬ$.þœ ”&I ÷èÁ¥žqøuã9yà9ZEiot+õ¡trCb)ËæKN¼Kƒ:.ÝHÑÕ¿Ït¶ý| ˆì{I†]LJ2œªÇñ$ƒŽ[š É1{"%H‚ 7“$2PoxLa4É ö“ ƒ7¥ö „!ÅwÒŠa¿«š“Wü­ÁÇë°ÔXùË;HM]Ö….Š—1‡•CL02Xi Me8•[>J ‹—ÁfŒÖ%mXkåïÜ7žZÄO †CMq{¬éæÏxVO™¬nï?hü´þ#þÃï¿¶C ñg´‚¢µ\’±¼.?ß*_›ŠÆùæÈ¥q­¨ZÄh/0é¾÷úZ–DÂ]:•AcélÀéM­+KGn¸ NU ¢&ªt3”šÀöšM(÷›÷·}íƒ+Sþ#(c¥Ρ"Òq;ßqÕðéqÙ>i õ„é9A&);JZ8~·Õè º4 ÞÅÀªq"®µ5pù0³¥ÁИi'°¢ækl²f¶ñ²aŽð|èÆÞž1M™,ÍâuE¤²}r 2¸Ù[¥Ú2:ÐrD.¬Ä 0Ê)F(–kQåÔXÓÜäõ€¯Š¨Ô&X挙´v,hÏhiú¯u…”3§‡äDâ>DÃ2/]´”˜ó¾’·brØ8(B§ÚG:v–0f\{ÍŽv&6§jv<°ÑŒ£ÿ ’‘‚›ŒÜUt¥ú/6r?°‘à +Emp ¥LÅTH'Úo~Ó)Ó꼓 >\'u¡ã¾èòÎÉÅ+Á<â*9GML©/ >$q1ªì,¡2ba©)ø‘ó?jÁ8H!éáàšNèæžª=Äñ£ÊàM Ù(šD´Eƒ¿K‚9 Dct–kr!2O s!²0c«¨ó³3¬[YZ>KCkº•€àæ ëw‘{o¥ÞÑä•úThâÄ"Õ";"¢Æybƒ ÎiM8@Œ—V|ÊØJ­öWj5²R«WLÃ…¬é”mn]úJ¾uM>?;m«¯ÕÍË·„.í\ݾÃaeõUÓWßRÆÈt¦Ä™Iy±Ý“JᜃQí•@Í¿úÂÂ2˙ЕÔSOǤº ÕwÊñš7Y}í‚*ÅDÑîAÝötTØ šèSŽÇ¼êD¯Œg.%\FŠƒ¹Š5b¢“Ž(ô’¿Îñ–É¢)8îxËp¦(4š4Õ—ƒÖbÆã+kñòx ¨QñUWÊÂg´ýë?±]9̯˅?’sMG·Î듵4Ó‹hÔºy¿(Y¬C ZÓÁV‘ÃÍÓóË´Z{6ƒúù¯Owwh¢wÅj·èä¿{·¡!+÷uæßºE»­¾û¿wäéÝ_<½ûi˜)ƒp®äçjL!ÝÖ™s3U.U¦ÎT¹uV¦êý2ð¥¡/Xc*|a4Ë)=2þçÐ&·îñczzX"pº.|º»yúBðóG‡ãrE~Yú_ÃX~}ð1?>¦5ñ>M ‚·œ©š(ñ0SÖp©+¨fMG©ÌÈOVü^ªž/ èè†òÇyœp=ýËTýd)šrø(™¦–ªÉÐ [.YE²B'Æîv;•óM?qSRà gÄ̰߭uÐ/ P¨=,CO'íHÒD®²V „Щ˜µ ñ*(–´á6ê¬<ƒdÑeø˜x¦À"R2Ÿ¢Ö!¬QǦ‚.ŠÏak()5ºA[³Òmõ”ž–#TÃvžhL!û]_*hZWÄ•;ž%¼÷, osÂIåx²*)ïõ‰ášœAg›="eÇуh7A<j^*óƒvCE9c£µ>_é$ˆÓñç¸n^ÀуZÚßÑ|©µi;š'jüPQ=ÍÜZO¼GÎ%ââÀD4$ž„:жÐìƒÒý›€7¦Ž¡šÆ?+&, ùj ô¢§^ ©^*ómð§Š 55h™ÝÒ—Và«9¦Y¡C=„ë‚ÈKYG àSp BP±¾šùá+FUPΫYeôžŽYùÍú˜ñ½ÕoÒÇð’èÔ**vC:Îí7»@ïÛ~S T}*#¸0\óŠ=‘Ž)6a§o=½Âžð–€¨ 9zõ6-{Âg  e€^eUWGï ÏÁÿÞ~³“Èö‡¹ëYúv£ð•v§·¥UV(Þ iß\+Ëû>þŒÿÖ›ßçV (Ú0[•mºË®4TèÓ'‘ÅëJ…³Ú·iVÓÝâ–Kòôèr¾ e¢=7bW½²ÖSr±±Ü·Î´®Š¨˜2ë7«F6å>g´þ®â&Y],¸åÒˆñ®ŽÛtT_üy)µ…q^g£_)¬î´b¤Ûn¡w©‡@¦Ô!^À!n %¬Õœ3Y±ÒI 'U¨ÐÇ«áÓI+ΫYl<Úýç«$×L3Qe©´m…+eÛÁ›r”sŬUuÎ5?ÿèyU\‰R•¢M9^5ò•Në­ ¦z[Ì·ŠùNê‹yŒV„y3­¼EëÐcT¤Ô›«èHúêzÒæŸUOrQnÎT¨ÃÉÄžNhøÊ×Zò½—V¾öÿÕ][së¸ þ+ùò$~ÛvÏÎô¥ÓöL;]s²=»Is™3=¿¾ lgeëB1–eï[&A$¤H\?´Œ¤bÄõhçu¡>‚–¼¬IµÈ^œ/=«Q“`9é¯wÌØ ã!Ì Ž©‚ ¢ÿ#TÆÍ çßVe\̲¡T¬‹UÛ´t]ë;%>4O%«9AI2“QiAúô:þßßFÞx F¶tŽaxü,V{®˼^c­ê í#mg( Óá°‚Ö ©¦iÍBª]FГb¨¸1K`UÆÖAè.€¥8®^?Ò‰v’ú‘ÎÕìx?RarÙéÎeµ\Ž™ ³ä.óu¡roÑÊù3ÖäûýH~¨©]¯CÉ  ‹,•ÐÁ¹ePWýdæ–A]‹ÉyýEHÛŠ¶ÌÛ · ËÇ-B'Fú‹|z‘ªò0^»Î´+e–E“å•-3U6¶!o¸¶sû‹üÂýE°Õ$¥•eÑLSwGgÈ­fxþõp@™žû2¹ÌŒÕÝTÉ×;R7_¥Ô‚ÞÎ}%úµaˆ0+g"ë t(;7w¦Ô‚ÞÀ!ùÖE塀0¨-²>B'÷âYÕ|úX' š,°u«MqÊ«ŒfšTŒ%!R´`-è”-w’õgsnÝÅwÏzÙO‘JoTh$Go§¥oé€Ï+úYöÛ97’*½6xq鯔%Û‹ˆVyçãª@ƒîJcá“¥4™˜×Ÿ" {N¡q¸)1vtx=[v4gf@·+ž¤ŽƒwtÊñ5bãû¥Z">>bæSµè­G;Ú &\•=±¦k¨åái üåoQeˆ?FŠ­¶S±¡Ã÷úcsÙ´Xú«L£žÂ„ÚÓî|ªïq«–}:®Ÿ|{6ó×/õsv_¾¾~iâá0zÿ¯aVt@o¢¬°ïŒ‚ºEPÜNŽ#ÏÙ½4Ûxg½¢±MœÀ!óùÐÈsd˜•çqVò7&éPÆ“‹äBŠãÚ}„ïÃÌCP¡spÙz`5v¼°s!ä!"AÝnâ°Èqî|©å{ÖÈUémSøÂúÚæ•É •²Sœ)½ü¬r _¶PyQ8ÏX7ºÔÍ È׿rx%˜¬ÖÚÄ.!¡3œá‡!‘O•ÐIúôÔÑËõœ¨')×s®j§°ç¨ÔT”Y¨·ÉlÞþÔ`æ¨äŠ/\ ùX®‡û¹ÊõÈÀFû0¯\N®UK× ™µ}ÜÄWmú¸6—3Ái›óÖ7[ç¶µÛÂIÂæ„ÃH¶‡Ó³=[£óÒfº¨šÌWY‘眩ƢÍdê™Ù^8ÛcÂ(NzãÉÂDšò@'Æùj^ƒxÿGNƒUÜgߘ  Rbƒ3O±¿§S+;Æy(ÜË¥z~|{=ÓAn+waƒ?…G~Úÿë?£ 0 /=§`ÃLê«¥“ÏâL}ÉÊï?৯o¿r»¯ÆÌ__óòK˜Æ’µ³z‚­•í‹2Óë3Á X~|ÇO﯈j ÕF¶žtoW}êhiGgà÷ôaQ=”»DùÝýœ?|•ôõñî¾ËìK¤ÐX|÷òð½n]žÍ¶ yÝÉ­üx÷ò&mÿ.©ž[þoC kÐQfÎ ‡B'ÆÜÊÚôwFL|yÉ„ý²§SìxÙé9µØ{ÍC]ub„Oþé»Oœž3ö˜yÎê^º0ƒDµ`ºÇkòôœ1V‡™Bq C”!Æ”%Ë:ezθâ{."nŒQà\ÄTÑвÓsÆ¥˜å$ŠdÞÒP1`šdre!¦MÏ™Íù¡lQÞèᜣ8²àeïÒƒ"1ܤ ºpH‚ýè©dG9ÈÆ¶(Ê2oŒÓyAJ\ZjØ`Ó4¦¾TQQ¥Ëª"UÔò¥Ïžžs>WClçpÎkެ†Ðug|.<=gL!=gñDAIÎâ¹Êws]ÕV«<+ËZ uUæÅAÉ€M¥¸¶Mn›1g1ï9‹¬†œÅvPN=ëÙ˜Èj 9•„pÅo' #à:|Îs]½ÕÍ!`kØVæØç;á0â0æé£ÆB$)!«ª3[Õyæ«ÂÈOâTEƒõ<‡±ÝƒK:Œ6 ô´!GãÀMý;:ËÚuÏuõ%íl¼ýõ%ͨ~Æ\‹n'{Ëâò3Ú[tÃ|s =8ñ1Ž(Tq%˜scú>µåxã•\,g³g͇{:Ò˜9ÃÆze-ŽŠE dttxÜ”dÆÍdû0«˜HÓPOr¨äÀl k­ØÓ¤Õ°£ƒnÁ nä+t^몒K6äÄ|tMž×Êådê¦1ž¹2¶¬š¢®1ÏuA*¯òåZÖ€»“ƒë`‡zp¥#ëPÑ.d½ k£?cæX;i3fÎÔì„éÆºvPræê0±C‹ý–ƒ2YáËÂø¦2ò¼1Ó­è›n0lºÉ ’“_ym#Kè”I1Ý®öɤØm×`ržÑu˜wÈ¢^ö¢ü'FŒ¶"ÝhÒFLÂ2C±Ï2ëÈeZ‹ Ù/6ä5 T36XÞhÃMÈê’f§&7nK7cfÌ-øÉp·â¯‹–™ (¥,DVCèÔBöí­ÆËŒ}3´QÖ)dRÓŽN ;JÀÝjT%ë´G¤ÊlÎ@‰›¯nßæQNî©2Ïu¼4JÜx\ë¸I‚Õ(óÚŠ«4LåXc­×'‘¬€$Äð³°JÜ»tòeñ,-¸›E‰kyGÝzBk#²:µ,Ø’'ã¹É °¸†®Ôÿ’¬Ñ‹ò"Ú¸JÜ^T±U”WU‰PWG‰KºSd³|M”¸d±8È5C,²+£Ä%Hâ7ÁmÁá'%ié´]¹&êí¥~nË}êj«Wõç¯o/BüOyìO»Çîï¿£2êë a9àOš•;:cì«öå-´7Çд™ÔÒM8mz§÷5$òdP›% ï8£½ÞddÝs€3²JEX÷žµáK/.+Þ{0n@>tínªë?Å HÄ䬚Œ8·tØÅ0½Ù"ÿY± [+òo5ìØjtY ¡Sý¸ÜíŽÏŠÝPáøqèÿ߉OøB— ././@LongLink0000644000000000000000000000025700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000013744215115611514033104 0ustar zuulzuul2025-12-08T17:53:42.858532015+00:00 stderr F ++ K8S_NODE=crc 2025-12-08T17:53:42.858532015+00:00 stderr F ++ [[ -n crc ]] 2025-12-08T17:53:42.858532015+00:00 stderr F ++ [[ -f /env/crc ]] 2025-12-08T17:53:42.858532015+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:42.858532015+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:42.858532015+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.858532015+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:42.858532015+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:42.858532015+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:42.858532015+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:42.858532015+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:42.858670129+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:42.858670129+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:42.859684857+00:00 stderr F + start-ovn-controller info 2025-12-08T17:53:42.859684857+00:00 stderr F + local log_level=info 2025-12-08T17:53:42.859684857+00:00 stderr F + [[ 1 -ne 1 ]] 2025-12-08T17:53:42.860326794+00:00 stderr F ++ date -Iseconds 2025-12-08T17:53:42.865701670+00:00 stderr F + echo '2025-12-08T17:53:42+00:00 - starting ovn-controller' 2025-12-08T17:53:42.865753812+00:00 stdout F 2025-12-08T17:53:42+00:00 - starting ovn-controller 2025-12-08T17:53:42.865781332+00:00 stderr F + exec ovn-controller unix:/var/run/openvswitch/db.sock -vfile:off --no-chdir --pidfile=/var/run/ovn/ovn-controller.pid --syslog-method=null --log-file=/var/log/ovn/acl-audit-log.log -vFACILITY:local0 -vconsole:info -vconsole:acl_log:off '-vPATTERN:console:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m' -vsyslog:acl_log:info -vfile:acl_log:info 2025-12-08T17:53:42.869896614+00:00 stderr F 2025-12-08T17:53:42Z|00001|vlog|INFO|opened log file /var/log/ovn/acl-audit-log.log 2025-12-08T17:53:42.871639172+00:00 stderr F 2025-12-08T17:53:42.871Z|00002|reconnect|INFO|unix:/var/run/openvswitch/db.sock: connecting... 2025-12-08T17:53:42.871639172+00:00 stderr F 2025-12-08T17:53:42.871Z|00003|reconnect|INFO|unix:/var/run/openvswitch/db.sock: connected 2025-12-08T17:53:42.877311386+00:00 stderr F 2025-12-08T17:53:42.877Z|00004|main|INFO|OVN internal version is : [25.03.1-20.41.0-78.8] 2025-12-08T17:53:42.877311386+00:00 stderr F 2025-12-08T17:53:42.877Z|00005|main|INFO|OVS IDL reconnected, force recompute. 2025-12-08T17:53:42.877333156+00:00 stderr F 2025-12-08T17:53:42.877Z|00006|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:42.877333156+00:00 stderr F 2025-12-08T17:53:42.877Z|00007|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connection attempt failed (No such file or directory) 2025-12-08T17:53:42.877343237+00:00 stderr F 2025-12-08T17:53:42.877Z|00008|main|INFO|OVNSB IDL reconnected, force recompute. 2025-12-08T17:53:42.877343237+00:00 stderr F 2025-12-08T17:53:42.877Z|00009|ovn_util|INFO|statctrl: connecting to switch: "unix:/var/run/openvswitch/br-int.mgmt" 2025-12-08T17:53:42.877352517+00:00 stderr F 2025-12-08T17:53:42.877Z|00010|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting... 2025-12-08T17:53:42.877413119+00:00 stderr F 2025-12-08T17:53:42.877Z|00011|ovn_util|INFO|pinctrl: connecting to switch: "unix:/var/run/openvswitch/br-int.mgmt" 2025-12-08T17:53:42.877413119+00:00 stderr F 2025-12-08T17:53:42.877Z|00012|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting... 2025-12-08T17:53:42.878279452+00:00 stderr F 2025-12-08T17:53:42.878Z|00001|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected 2025-12-08T17:53:42.878488067+00:00 stderr F 2025-12-08T17:53:42.878Z|00001|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected 2025-12-08T17:53:43.878716654+00:00 stderr F 2025-12-08T17:53:43.878Z|00013|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:43.878716654+00:00 stderr F 2025-12-08T17:53:43.878Z|00014|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connection attempt failed (No such file or directory) 2025-12-08T17:53:43.878716654+00:00 stderr F 2025-12-08T17:53:43.878Z|00015|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: waiting 2 seconds before reconnect 2025-12-08T17:53:45.880852812+00:00 stderr F 2025-12-08T17:53:45.880Z|00016|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:45.880852812+00:00 stderr F 2025-12-08T17:53:45.880Z|00017|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connection attempt failed (No such file or directory) 2025-12-08T17:53:45.880852812+00:00 stderr F 2025-12-08T17:53:45.880Z|00018|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: waiting 4 seconds before reconnect 2025-12-08T17:53:49.887795111+00:00 stderr F 2025-12-08T17:53:49.886Z|00019|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connecting... 2025-12-08T17:53:49.887795111+00:00 stderr F 2025-12-08T17:53:49.886Z|00020|reconnect|INFO|unix:/var/run/ovn/ovnsb_db.sock: connected 2025-12-08T17:53:49.914985790+00:00 stderr F 2025-12-08T17:53:49.914Z|00021|ovn_util|INFO|features: connecting to switch: "unix:/var/run/openvswitch/br-int.mgmt" 2025-12-08T17:53:49.914985790+00:00 stderr F 2025-12-08T17:53:49.914Z|00022|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting... 2025-12-08T17:53:49.915764161+00:00 stderr F 2025-12-08T17:53:49.915Z|00023|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected 2025-12-08T17:53:49.915820473+00:00 stderr F 2025-12-08T17:53:49.915Z|00024|features|INFO|OVS Feature: ct_zero_snat, state: supported 2025-12-08T17:53:49.915820473+00:00 stderr F 2025-12-08T17:53:49.915Z|00025|features|INFO|OVS Feature: ct_flush, state: supported 2025-12-08T17:53:49.915820473+00:00 stderr F 2025-12-08T17:53:49.915Z|00026|features|INFO|OVS Feature: dp_hash_l4_sym_support, state: supported 2025-12-08T17:53:49.915871524+00:00 stderr F 2025-12-08T17:53:49.915Z|00027|reconnect|INFO|unix:/var/run/openvswitch/db.sock: connecting... 2025-12-08T17:53:49.915871524+00:00 stderr F 2025-12-08T17:53:49.915Z|00028|main|INFO|OVS feature set changed, force recompute. 2025-12-08T17:53:49.915871524+00:00 stderr F 2025-12-08T17:53:49.915Z|00029|ovn_util|INFO|ofctrl: connecting to switch: "unix:/var/run/openvswitch/br-int.mgmt" 2025-12-08T17:53:49.915895945+00:00 stderr F 2025-12-08T17:53:49.915Z|00030|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting... 2025-12-08T17:53:49.916430610+00:00 stderr F 2025-12-08T17:53:49.916Z|00031|features|INFO|OVS Feature: meter_support, state: supported 2025-12-08T17:53:49.916430610+00:00 stderr F 2025-12-08T17:53:49.916Z|00032|features|INFO|OVS Feature: group_support, state: supported 2025-12-08T17:53:49.916461201+00:00 stderr F 2025-12-08T17:53:49.916Z|00033|features|INFO|OVS Feature: sample_action_with_registers, state: supported 2025-12-08T17:53:49.916461201+00:00 stderr F 2025-12-08T17:53:49.916Z|00034|reconnect|INFO|unix:/var/run/openvswitch/db.sock: connected 2025-12-08T17:53:49.916480301+00:00 stderr F 2025-12-08T17:53:49.916Z|00035|main|INFO|OVS feature set changed, force recompute. 2025-12-08T17:53:49.917347985+00:00 stderr F 2025-12-08T17:53:49.917Z|00036|features|INFO|OVS DB schema supports 4 flow table prefixes, our IDL supports: 4 2025-12-08T17:53:49.932216389+00:00 stderr F 2025-12-08T17:53:49.932Z|00037|features|INFO|OVS Feature: ct_label_flush, state: supported 2025-12-08T17:53:49.932216389+00:00 stderr F 2025-12-08T17:53:49.932Z|00038|main|INFO|OVS feature set changed, force recompute. 2025-12-08T17:53:49.932386913+00:00 stderr F 2025-12-08T17:53:49.932Z|00039|rconn|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected 2025-12-08T17:53:49.932394683+00:00 stderr F 2025-12-08T17:53:49.932Z|00040|main|INFO|OVS OpenFlow connection reconnected,force recompute. 2025-12-08T17:54:20.167014790+00:00 stderr F 2025-12-08T17:54:20.166Z|00041|memory|INFO|22920 kB peak resident set size after 37.3 seconds 2025-12-08T17:54:20.167014790+00:00 stderr F 2025-12-08T17:54:20.166Z|00042|memory|INFO|idl-cells-OVN_Southbound:15715 idl-cells-Open_vSwitch:3036 lflow-cache-entries-cache-expr:290 lflow-cache-entries-cache-matches:625 lflow-cache-size-KB:758 local_datapath_usage-KB:1 ofctrl_desired_flow_usage-KB:800 ofctrl_installed_flow_usage-KB:590 ofctrl_sb_flow_ref_usage-KB:311 2025-12-08T17:54:20.461281089+00:00 stderr F 2025-12-08T17:54:20.461Z|00043|memory_trim|INFO|Detected inactivity (last active 30002 ms ago): trimming memory 2025-12-08T17:54:28.911990658+00:00 stderr F 2025-12-08T17:54:28.911Z|00044|binding|INFO|Claiming lport openshift-marketplace_certified-operators-tkpnz for this chassis. 2025-12-08T17:54:28.911990658+00:00 stderr F 2025-12-08T17:54:28.911Z|00045|binding|INFO|openshift-marketplace_certified-operators-tkpnz: Claiming 0a:58:0a:d9:00:07 10.217.0.7 2025-12-08T17:54:28.936568739+00:00 stderr F 2025-12-08T17:54:28.936Z|00046|binding|INFO|Setting lport openshift-marketplace_certified-operators-tkpnz ovn-installed in OVS 2025-12-08T17:54:28.936568739+00:00 stderr F 2025-12-08T17:54:28.936Z|00047|binding|INFO|Setting lport openshift-marketplace_certified-operators-tkpnz up in Southbound 2025-12-08T17:54:41.366171380+00:00 stderr F 2025-12-08T17:54:41.365Z|00048|binding|INFO|Releasing lport openshift-marketplace_certified-operators-tkpnz from this chassis (sb_readonly=0) 2025-12-08T17:54:41.366171380+00:00 stderr F 2025-12-08T17:54:41.365Z|00049|binding|INFO|Setting lport openshift-marketplace_certified-operators-tkpnz down in Southbound 2025-12-08T17:54:42.127155909+00:00 stderr F 2025-12-08T17:54:42.122Z|00050|binding|INFO|Claiming lport openshift-marketplace_redhat-operators-hl4hq for this chassis. 2025-12-08T17:54:42.127155909+00:00 stderr F 2025-12-08T17:54:42.122Z|00051|binding|INFO|openshift-marketplace_redhat-operators-hl4hq: Claiming 0a:58:0a:d9:00:0a 10.217.0.10 2025-12-08T17:54:42.166942970+00:00 stderr F 2025-12-08T17:54:42.165Z|00052|binding|INFO|Setting lport openshift-marketplace_redhat-operators-hl4hq ovn-installed in OVS 2025-12-08T17:54:42.166942970+00:00 stderr F 2025-12-08T17:54:42.166Z|00053|binding|INFO|Setting lport openshift-marketplace_redhat-operators-hl4hq up in Southbound 2025-12-08T17:54:50.244027483+00:00 stderr F 2025-12-08T17:54:50.243Z|00054|binding|INFO|Releasing lport openshift-marketplace_redhat-marketplace-xp5vr from this chassis (sb_readonly=0) 2025-12-08T17:54:50.244027483+00:00 stderr F 2025-12-08T17:54:50.243Z|00055|if_status|WARN|Trying to release unknown interface openshift-marketplace_redhat-marketplace-xp5vr 2025-12-08T17:54:50.244027483+00:00 stderr F 2025-12-08T17:54:50.243Z|00056|binding|INFO|Setting lport openshift-marketplace_redhat-marketplace-xp5vr down in Southbound 2025-12-08T17:54:51.627674209+00:00 stderr F 2025-12-08T17:54:51.627Z|00057|binding|INFO|Claiming lport openshift-image-registry_image-registry-5d9d95bf5b-cmjbz for this chassis. 2025-12-08T17:54:51.627674209+00:00 stderr F 2025-12-08T17:54:51.627Z|00058|binding|INFO|openshift-image-registry_image-registry-5d9d95bf5b-cmjbz: Claiming 0a:58:0a:d9:00:0b 10.217.0.11 2025-12-08T17:54:51.652810355+00:00 stderr F 2025-12-08T17:54:51.652Z|00059|binding|INFO|Setting lport openshift-image-registry_image-registry-5d9d95bf5b-cmjbz ovn-installed in OVS 2025-12-08T17:54:51.652810355+00:00 stderr F 2025-12-08T17:54:51.652Z|00060|binding|INFO|Setting lport openshift-image-registry_image-registry-5d9d95bf5b-cmjbz up in Southbound 2025-12-08T17:54:57.989450392+00:00 stderr F 2025-12-08T17:54:57.989Z|00061|binding|INFO|Claiming lport openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 for this chassis. 2025-12-08T17:54:57.989450392+00:00 stderr F 2025-12-08T17:54:57.989Z|00062|binding|INFO|openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5: Claiming 0a:58:0a:d9:00:17 10.217.0.23 2025-12-08T17:54:58.021281339+00:00 stderr F 2025-12-08T17:54:58.021Z|00063|binding|INFO|Setting lport openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 ovn-installed in OVS 2025-12-08T17:54:58.021281339+00:00 stderr F 2025-12-08T17:54:58.021Z|00064|binding|INFO|Setting lport openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 up in Southbound 2025-12-08T17:54:59.112841984+00:00 stderr F 2025-12-08T17:54:59.112Z|00065|binding|INFO|Releasing lport openshift-marketplace_redhat-operators-hl4hq from this chassis (sb_readonly=0) 2025-12-08T17:54:59.112841984+00:00 stderr F 2025-12-08T17:54:59.112Z|00066|binding|INFO|Setting lport openshift-marketplace_redhat-operators-hl4hq down in Southbound 2025-12-08T17:55:04.456512609+00:00 stderr F 2025-12-08T17:55:04.456Z|00067|binding|INFO|Releasing lport openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 from this chassis (sb_readonly=0) 2025-12-08T17:55:04.456512609+00:00 stderr F 2025-12-08T17:55:04.456Z|00068|binding|INFO|Setting lport openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 down in Southbound 2025-12-08T17:55:04.673452096+00:00 stderr F 2025-12-08T17:55:04.672Z|00069|binding|INFO|Claiming lport openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f for this chassis. 2025-12-08T17:55:04.673452096+00:00 stderr F 2025-12-08T17:55:04.672Z|00070|binding|INFO|openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f: Claiming 0a:58:0a:d9:00:2a 10.217.0.42 2025-12-08T17:55:04.716602668+00:00 stderr F 2025-12-08T17:55:04.716Z|00071|binding|INFO|Setting lport openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f ovn-installed in OVS 2025-12-08T17:55:04.716602668+00:00 stderr F 2025-12-08T17:55:04.716Z|00072|binding|INFO|Setting lport openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f up in Southbound 2025-12-08T17:55:05.112348748+00:00 stderr F 2025-12-08T17:55:05.112Z|00073|binding|INFO|Claiming lport openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj for this chassis. 2025-12-08T17:55:05.112348748+00:00 stderr F 2025-12-08T17:55:05.112Z|00074|binding|INFO|openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj: Claiming 0a:58:0a:d9:00:2c 10.217.0.44 2025-12-08T17:55:05.148015078+00:00 stderr F 2025-12-08T17:55:05.147Z|00075|binding|INFO|Setting lport openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj up in Southbound 2025-12-08T17:55:05.148729817+00:00 stderr F 2025-12-08T17:55:05.148Z|00076|binding|INFO|Setting lport openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj ovn-installed in OVS 2025-12-08T17:55:08.519026575+00:00 stderr F 2025-12-08T17:55:08.518Z|00077|binding|INFO|Releasing lport openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f from this chassis (sb_readonly=0) 2025-12-08T17:55:08.519026575+00:00 stderr F 2025-12-08T17:55:08.518Z|00078|binding|INFO|Setting lport openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f down in Southbound 2025-12-08T17:55:14.565383876+00:00 stderr F 2025-12-08T17:55:14.565Z|00079|binding|INFO|Releasing lport openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj from this chassis (sb_readonly=0) 2025-12-08T17:55:14.565383876+00:00 stderr F 2025-12-08T17:55:14.565Z|00080|binding|INFO|Setting lport openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj down in Southbound 2025-12-08T17:55:16.221147655+00:00 stderr F 2025-12-08T17:55:16.218Z|00081|binding|INFO|Claiming lport openshift-operators_obo-prometheus-operator-86648f486b-4j9kn for this chassis. 2025-12-08T17:55:16.221147655+00:00 stderr F 2025-12-08T17:55:16.218Z|00082|binding|INFO|openshift-operators_obo-prometheus-operator-86648f486b-4j9kn: Claiming 0a:58:0a:d9:00:2d 10.217.0.45 2025-12-08T17:55:16.255488870+00:00 stderr F 2025-12-08T17:55:16.255Z|00083|binding|INFO|Setting lport openshift-operators_obo-prometheus-operator-86648f486b-4j9kn ovn-installed in OVS 2025-12-08T17:55:16.255488870+00:00 stderr F 2025-12-08T17:55:16.255Z|00084|binding|INFO|Setting lport openshift-operators_obo-prometheus-operator-86648f486b-4j9kn up in Southbound 2025-12-08T17:55:16.397187203+00:00 stderr F 2025-12-08T17:55:16.397Z|00085|binding|INFO|Claiming lport openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm for this chassis. 2025-12-08T17:55:16.397187203+00:00 stderr F 2025-12-08T17:55:16.397Z|00086|binding|INFO|openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm: Claiming 0a:58:0a:d9:00:2f 10.217.0.47 2025-12-08T17:55:16.451249858+00:00 stderr F 2025-12-08T17:55:16.451Z|00087|binding|INFO|Setting lport openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm ovn-installed in OVS 2025-12-08T17:55:16.451249858+00:00 stderr F 2025-12-08T17:55:16.451Z|00088|binding|INFO|Setting lport openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm up in Southbound 2025-12-08T17:55:16.474747120+00:00 stderr F 2025-12-08T17:55:16.474Z|00089|binding|INFO|Claiming lport openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t for this chassis. 2025-12-08T17:55:16.474747120+00:00 stderr F 2025-12-08T17:55:16.474Z|00090|binding|INFO|openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t: Claiming 0a:58:0a:d9:00:2e 10.217.0.46 2025-12-08T17:55:16.529012090+00:00 stderr F 2025-12-08T17:55:16.528Z|00091|binding|INFO|Setting lport openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t ovn-installed in OVS 2025-12-08T17:55:16.529012090+00:00 stderr F 2025-12-08T17:55:16.528Z|00092|binding|INFO|Setting lport openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t up in Southbound 2025-12-08T17:55:16.539830151+00:00 stderr F 2025-12-08T17:55:16.539Z|00093|binding|INFO|Claiming lport openshift-operators_observability-operator-78c97476f4-mg4b2 for this chassis. 2025-12-08T17:55:16.539830151+00:00 stderr F 2025-12-08T17:55:16.539Z|00094|binding|INFO|openshift-operators_observability-operator-78c97476f4-mg4b2: Claiming 0a:58:0a:d9:00:30 10.217.0.48 2025-12-08T17:55:16.596344302+00:00 stderr F 2025-12-08T17:55:16.594Z|00095|binding|INFO|Setting lport openshift-operators_observability-operator-78c97476f4-mg4b2 ovn-installed in OVS 2025-12-08T17:55:16.596344302+00:00 stderr F 2025-12-08T17:55:16.594Z|00096|binding|INFO|Setting lport openshift-operators_observability-operator-78c97476f4-mg4b2 up in Southbound 2025-12-08T17:55:16.739201616+00:00 stderr F 2025-12-08T17:55:16.738Z|00097|binding|INFO|Claiming lport openshift-operators_perses-operator-68bdb49cbf-m2cdr for this chassis. 2025-12-08T17:55:16.739201616+00:00 stderr F 2025-12-08T17:55:16.739Z|00098|binding|INFO|openshift-operators_perses-operator-68bdb49cbf-m2cdr: Claiming 0a:58:0a:d9:00:31 10.217.0.49 2025-12-08T17:55:16.788023951+00:00 stderr F 2025-12-08T17:55:16.787Z|00099|binding|INFO|Setting lport openshift-operators_perses-operator-68bdb49cbf-m2cdr ovn-installed in OVS 2025-12-08T17:55:16.788023951+00:00 stderr F 2025-12-08T17:55:16.788Z|00100|binding|INFO|Setting lport openshift-operators_perses-operator-68bdb49cbf-m2cdr up in Southbound 2025-12-08T17:55:17.260993879+00:00 stderr F 2025-12-08T17:55:17.259Z|00101|binding|INFO|Claiming lport service-telemetry_elastic-operator-c9c86658-4qchz for this chassis. 2025-12-08T17:55:17.260993879+00:00 stderr F 2025-12-08T17:55:17.259Z|00102|binding|INFO|service-telemetry_elastic-operator-c9c86658-4qchz: Claiming 0a:58:0a:d9:00:32 10.217.0.50 2025-12-08T17:55:17.292990460+00:00 stderr F 2025-12-08T17:55:17.292Z|00103|binding|INFO|Setting lport service-telemetry_elastic-operator-c9c86658-4qchz ovn-installed in OVS 2025-12-08T17:55:17.292990460+00:00 stderr F 2025-12-08T17:55:17.292Z|00104|binding|INFO|Setting lport service-telemetry_elastic-operator-c9c86658-4qchz up in Southbound 2025-12-08T17:55:34.232867497+00:00 stderr F 2025-12-08T17:55:34.232Z|00105|binding|INFO|Claiming lport cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9 for this chassis. 2025-12-08T17:55:34.232959419+00:00 stderr F 2025-12-08T17:55:34.232Z|00106|binding|INFO|cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9: Claiming 0a:58:0a:d9:00:33 10.217.0.51 2025-12-08T17:55:34.268427313+00:00 stderr F 2025-12-08T17:55:34.268Z|00107|binding|INFO|Setting lport cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9 ovn-installed in OVS 2025-12-08T17:55:34.268493615+00:00 stderr F 2025-12-08T17:55:34.268Z|00108|binding|INFO|Setting lport cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9 up in Southbound 2025-12-08T17:55:39.087723263+00:00 stderr F 2025-12-08T17:55:39.087Z|00109|binding|INFO|Releasing lport openshift-image-registry_image-registry-66587d64c8-s6hn4 from this chassis (sb_readonly=0) 2025-12-08T17:55:39.087723263+00:00 stderr F 2025-12-08T17:55:39.087Z|00110|if_status|WARN|Trying to release unknown interface openshift-image-registry_image-registry-66587d64c8-s6hn4 2025-12-08T17:55:39.087723263+00:00 stderr F 2025-12-08T17:55:39.087Z|00111|binding|INFO|Setting lport openshift-image-registry_image-registry-66587d64c8-s6hn4 down in Southbound 2025-12-08T17:55:41.144802758+00:00 stderr F 2025-12-08T17:55:41.144Z|00112|binding|INFO|Claiming lport cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b for this chassis. 2025-12-08T17:55:41.144802758+00:00 stderr F 2025-12-08T17:55:41.144Z|00113|binding|INFO|cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b: Claiming 0a:58:0a:d9:00:34 10.217.0.52 2025-12-08T17:55:41.176116588+00:00 stderr F 2025-12-08T17:55:41.176Z|00114|binding|INFO|Setting lport cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b ovn-installed in OVS 2025-12-08T17:55:41.176116588+00:00 stderr F 2025-12-08T17:55:41.176Z|00115|binding|INFO|Setting lport cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b up in Southbound 2025-12-08T17:55:42.568183415+00:00 stderr F 2025-12-08T17:55:42.568Z|00116|binding|INFO|Claiming lport service-telemetry_elasticsearch-es-default-0 for this chassis. 2025-12-08T17:55:42.568242317+00:00 stderr F 2025-12-08T17:55:42.568Z|00117|binding|INFO|service-telemetry_elasticsearch-es-default-0: Claiming 0a:58:0a:d9:00:35 10.217.0.53 2025-12-08T17:55:42.631089402+00:00 stderr F 2025-12-08T17:55:42.631Z|00118|binding|INFO|Setting lport service-telemetry_elasticsearch-es-default-0 ovn-installed in OVS 2025-12-08T17:55:42.631147074+00:00 stderr F 2025-12-08T17:55:42.631Z|00119|binding|INFO|Setting lport service-telemetry_elasticsearch-es-default-0 up in Southbound 2025-12-08T17:55:43.675691566+00:00 stderr F 2025-12-08T17:55:43.675Z|00120|binding|INFO|Claiming lport cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q for this chassis. 2025-12-08T17:55:43.675691566+00:00 stderr F 2025-12-08T17:55:43.675Z|00121|binding|INFO|cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q: Claiming 0a:58:0a:d9:00:36 10.217.0.54 2025-12-08T17:55:43.714810299+00:00 stderr F 2025-12-08T17:55:43.714Z|00122|binding|INFO|Setting lport cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q ovn-installed in OVS 2025-12-08T17:55:43.714810299+00:00 stderr F 2025-12-08T17:55:43.714Z|00123|binding|INFO|Setting lport cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q up in Southbound 2025-12-08T17:56:00.556548913+00:00 stderr F 2025-12-08T17:56:00.556Z|00124|binding|INFO|Claiming lport cert-manager_cert-manager-858d87f86b-7q2ss for this chassis. 2025-12-08T17:56:00.556548913+00:00 stderr F 2025-12-08T17:56:00.556Z|00125|binding|INFO|cert-manager_cert-manager-858d87f86b-7q2ss: Claiming 0a:58:0a:d9:00:37 10.217.0.55 2025-12-08T17:56:00.597284820+00:00 stderr F 2025-12-08T17:56:00.597Z|00126|binding|INFO|Setting lport cert-manager_cert-manager-858d87f86b-7q2ss ovn-installed in OVS 2025-12-08T17:56:00.597284820+00:00 stderr F 2025-12-08T17:56:00.597Z|00127|binding|INFO|Setting lport cert-manager_cert-manager-858d87f86b-7q2ss up in Southbound 2025-12-08T17:56:09.872951002+00:00 stderr F 2025-12-08T17:56:09.872Z|00128|binding|INFO|Claiming lport service-telemetry_infrawatch-operators-xmhcm for this chassis. 2025-12-08T17:56:09.873022544+00:00 stderr F 2025-12-08T17:56:09.873Z|00129|binding|INFO|service-telemetry_infrawatch-operators-xmhcm: Claiming 0a:58:0a:d9:00:38 10.217.0.56 2025-12-08T17:56:09.908614871+00:00 stderr F 2025-12-08T17:56:09.908Z|00130|binding|INFO|Setting lport service-telemetry_infrawatch-operators-xmhcm up in Southbound 2025-12-08T17:56:09.909184706+00:00 stderr F 2025-12-08T17:56:09.909Z|00131|binding|INFO|Setting lport service-telemetry_infrawatch-operators-xmhcm ovn-installed in OVS 2025-12-08T17:56:13.328834181+00:00 stderr F 2025-12-08T17:56:13.328Z|00132|binding|INFO|Releasing lport service-telemetry_infrawatch-operators-xmhcm from this chassis (sb_readonly=0) 2025-12-08T17:56:13.328899523+00:00 stderr F 2025-12-08T17:56:13.328Z|00133|binding|INFO|Setting lport service-telemetry_infrawatch-operators-xmhcm down in Southbound 2025-12-08T17:56:13.504338917+00:00 stderr F 2025-12-08T17:56:13.504Z|00134|binding|INFO|Claiming lport service-telemetry_infrawatch-operators-tv99j for this chassis. 2025-12-08T17:56:13.504407479+00:00 stderr F 2025-12-08T17:56:13.504Z|00135|binding|INFO|service-telemetry_infrawatch-operators-tv99j: Claiming 0a:58:0a:d9:00:3c 10.217.0.60 2025-12-08T17:56:13.560816847+00:00 stderr F 2025-12-08T17:56:13.560Z|00136|binding|INFO|Setting lport service-telemetry_infrawatch-operators-tv99j ovn-installed in OVS 2025-12-08T17:56:13.560875308+00:00 stderr F 2025-12-08T17:56:13.560Z|00137|binding|INFO|Setting lport service-telemetry_infrawatch-operators-tv99j up in Southbound 2025-12-08T17:56:28.948102877+00:00 stderr F 2025-12-08T17:56:28.935Z|00138|binding|INFO|Claiming lport service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq for this chassis. 2025-12-08T17:56:28.948102877+00:00 stderr F 2025-12-08T17:56:28.935Z|00139|binding|INFO|service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq: Claiming 0a:58:0a:d9:00:3d 10.217.0.61 2025-12-08T17:56:28.982500034+00:00 stderr F 2025-12-08T17:56:28.981Z|00140|binding|INFO|Setting lport service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq ovn-installed in OVS 2025-12-08T17:56:28.982500034+00:00 stderr F 2025-12-08T17:56:28.981Z|00141|binding|INFO|Setting lport service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq up in Southbound 2025-12-08T17:56:29.726271813+00:00 stderr F 2025-12-08T17:56:29.721Z|00142|binding|INFO|Claiming lport service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx for this chassis. 2025-12-08T17:56:29.726271813+00:00 stderr F 2025-12-08T17:56:29.721Z|00143|binding|INFO|service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx: Claiming 0a:58:0a:d9:00:42 10.217.0.66 2025-12-08T17:56:29.752657052+00:00 stderr F 2025-12-08T17:56:29.752Z|00144|binding|INFO|Setting lport service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx ovn-installed in OVS 2025-12-08T17:56:29.752657052+00:00 stderr F 2025-12-08T17:56:29.752Z|00145|binding|INFO|Setting lport service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx up in Southbound 2025-12-08T17:56:30.620946369+00:00 stderr F 2025-12-08T17:56:30.620Z|00146|binding|INFO|Claiming lport openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj for this chassis. 2025-12-08T17:56:30.620946369+00:00 stderr F 2025-12-08T17:56:30.620Z|00147|binding|INFO|openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj: Claiming 0a:58:0a:d9:00:45 10.217.0.69 2025-12-08T17:56:30.655129890+00:00 stderr F 2025-12-08T17:56:30.654Z|00148|binding|INFO|Setting lport openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj ovn-installed in OVS 2025-12-08T17:56:30.655129890+00:00 stderr F 2025-12-08T17:56:30.655Z|00149|binding|INFO|Setting lport openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj up in Southbound 2025-12-08T17:56:35.380248340+00:00 stderr F 2025-12-08T17:56:35.380Z|00150|binding|INFO|Releasing lport openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj from this chassis (sb_readonly=0) 2025-12-08T17:56:35.380248340+00:00 stderr F 2025-12-08T17:56:35.380Z|00151|binding|INFO|Setting lport openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj down in Southbound 2025-12-08T17:56:35.396524834+00:00 stderr F 2025-12-08T17:56:35.395Z|00152|binding|INFO|Releasing lport service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq from this chassis (sb_readonly=0) 2025-12-08T17:56:35.396524834+00:00 stderr F 2025-12-08T17:56:35.395Z|00153|binding|INFO|Setting lport service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq down in Southbound 2025-12-08T17:56:35.403237320+00:00 stderr F 2025-12-08T17:56:35.403Z|00154|binding|INFO|Releasing lport service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx from this chassis (sb_readonly=0) 2025-12-08T17:56:35.403237320+00:00 stderr F 2025-12-08T17:56:35.403Z|00155|binding|INFO|Setting lport service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx down in Southbound 2025-12-08T17:56:40.105459892+00:00 stderr F 2025-12-08T17:56:40.105Z|00156|binding|INFO|Claiming lport service-telemetry_interconnect-operator-78b9bd8798-456sz for this chassis. 2025-12-08T17:56:40.105459892+00:00 stderr F 2025-12-08T17:56:40.105Z|00157|binding|INFO|service-telemetry_interconnect-operator-78b9bd8798-456sz: Claiming 0a:58:0a:d9:00:46 10.217.0.70 2025-12-08T17:56:40.139502300+00:00 stderr F 2025-12-08T17:56:40.138Z|00158|binding|INFO|Setting lport service-telemetry_interconnect-operator-78b9bd8798-456sz ovn-installed in OVS 2025-12-08T17:56:40.139502300+00:00 stderr F 2025-12-08T17:56:40.138Z|00159|binding|INFO|Setting lport service-telemetry_interconnect-operator-78b9bd8798-456sz up in Southbound 2025-12-08T17:56:41.510959280+00:00 stderr F 2025-12-08T17:56:41.510Z|00160|binding|INFO|Claiming lport service-telemetry_service-telemetry-operator-79647f8775-zs8hl for this chassis. 2025-12-08T17:56:41.510959280+00:00 stderr F 2025-12-08T17:56:41.510Z|00161|binding|INFO|service-telemetry_service-telemetry-operator-79647f8775-zs8hl: Claiming 0a:58:0a:d9:00:47 10.217.0.71 2025-12-08T17:56:41.555492392+00:00 stderr F 2025-12-08T17:56:41.555Z|00162|binding|INFO|Setting lport service-telemetry_service-telemetry-operator-79647f8775-zs8hl ovn-installed in OVS 2025-12-08T17:56:41.555492392+00:00 stderr F 2025-12-08T17:56:41.555Z|00163|binding|INFO|Setting lport service-telemetry_service-telemetry-operator-79647f8775-zs8hl up in Southbound 2025-12-08T17:56:42.988575169+00:00 stderr F 2025-12-08T17:56:42.988Z|00164|binding|INFO|Claiming lport service-telemetry_smart-gateway-operator-5cd794ff55-w8r45 for this chassis. 2025-12-08T17:56:42.988575169+00:00 stderr F 2025-12-08T17:56:42.988Z|00165|binding|INFO|service-telemetry_smart-gateway-operator-5cd794ff55-w8r45: Claiming 0a:58:0a:d9:00:48 10.217.0.72 2025-12-08T17:56:43.036475229+00:00 stderr F 2025-12-08T17:56:43.034Z|00166|binding|INFO|Setting lport service-telemetry_smart-gateway-operator-5cd794ff55-w8r45 ovn-installed in OVS 2025-12-08T17:56:43.036475229+00:00 stderr F 2025-12-08T17:56:43.034Z|00167|binding|INFO|Setting lport service-telemetry_smart-gateway-operator-5cd794ff55-w8r45 up in Southbound 2025-12-08T17:57:12.989664328+00:00 stderr F 2025-12-08T17:57:12.989Z|00168|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory 2025-12-08T17:57:29.063019413+00:00 stderr F 2025-12-08T17:57:29.062Z|00169|binding|INFO|Claiming lport service-telemetry_default-interconnect-55bf8d5cb-76n5w for this chassis. 2025-12-08T17:57:29.063019413+00:00 stderr F 2025-12-08T17:57:29.062Z|00170|binding|INFO|service-telemetry_default-interconnect-55bf8d5cb-76n5w: Claiming 0a:58:0a:d9:00:49 10.217.0.73 2025-12-08T17:57:29.106604299+00:00 stderr F 2025-12-08T17:57:29.106Z|00171|binding|INFO|Setting lport service-telemetry_default-interconnect-55bf8d5cb-76n5w ovn-installed in OVS 2025-12-08T17:57:29.106604299+00:00 stderr F 2025-12-08T17:57:29.106Z|00172|binding|INFO|Setting lport service-telemetry_default-interconnect-55bf8d5cb-76n5w up in Southbound 2025-12-08T17:57:40.589109489+00:00 stderr F 2025-12-08T17:57:40.589Z|00173|binding|INFO|Claiming lport service-telemetry_prometheus-default-0 for this chassis. 2025-12-08T17:57:40.589109489+00:00 stderr F 2025-12-08T17:57:40.589Z|00174|binding|INFO|service-telemetry_prometheus-default-0: Claiming 0a:58:0a:d9:00:4a 10.217.0.74 2025-12-08T17:57:40.622982505+00:00 stderr F 2025-12-08T17:57:40.622Z|00175|binding|INFO|Setting lport service-telemetry_prometheus-default-0 ovn-installed in OVS 2025-12-08T17:57:40.622982505+00:00 stderr F 2025-12-08T17:57:40.622Z|00176|binding|INFO|Setting lport service-telemetry_prometheus-default-0 up in Southbound 2025-12-08T17:57:49.058101145+00:00 stderr F 2025-12-08T17:57:49.058Z|00177|binding|INFO|Claiming lport service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn for this chassis. 2025-12-08T17:57:49.058101145+00:00 stderr F 2025-12-08T17:57:49.058Z|00178|binding|INFO|service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn: Claiming 0a:58:0a:d9:00:4b 10.217.0.75 2025-12-08T17:57:49.093532371+00:00 stderr F 2025-12-08T17:57:49.093Z|00179|binding|INFO|Setting lport service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn ovn-installed in OVS 2025-12-08T17:57:49.093532371+00:00 stderr F 2025-12-08T17:57:49.093Z|00180|binding|INFO|Setting lport service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn up in Southbound 2025-12-08T17:57:57.709152177+00:00 stderr F 2025-12-08T17:57:57.704Z|00181|binding|INFO|Claiming lport service-telemetry_alertmanager-default-0 for this chassis. 2025-12-08T17:57:57.709152177+00:00 stderr F 2025-12-08T17:57:57.704Z|00182|binding|INFO|service-telemetry_alertmanager-default-0: Claiming 0a:58:0a:d9:00:4c 10.217.0.76 2025-12-08T17:57:57.757727843+00:00 stderr F 2025-12-08T17:57:57.757Z|00183|binding|INFO|Setting lport service-telemetry_alertmanager-default-0 up in Southbound 2025-12-08T17:57:57.758280028+00:00 stderr F 2025-12-08T17:57:57.758Z|00184|binding|INFO|Setting lport service-telemetry_alertmanager-default-0 ovn-installed in OVS 2025-12-08T17:58:10.354185937+00:00 stderr F 2025-12-08T17:58:10.353Z|00185|binding|INFO|Claiming lport service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx for this chassis. 2025-12-08T17:58:10.354185937+00:00 stderr F 2025-12-08T17:58:10.354Z|00186|binding|INFO|service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx: Claiming 0a:58:0a:d9:00:4d 10.217.0.77 2025-12-08T17:58:10.412403492+00:00 stderr F 2025-12-08T17:58:10.412Z|00187|binding|INFO|Setting lport service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx ovn-installed in OVS 2025-12-08T17:58:10.412403492+00:00 stderr F 2025-12-08T17:58:10.412Z|00188|binding|INFO|Setting lport service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx up in Southbound 2025-12-08T17:58:11.508544352+00:00 stderr F 2025-12-08T17:58:11.507Z|00189|binding|INFO|Claiming lport service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v for this chassis. 2025-12-08T17:58:11.508544352+00:00 stderr F 2025-12-08T17:58:11.508Z|00190|binding|INFO|service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v: Claiming 0a:58:0a:d9:00:4e 10.217.0.78 2025-12-08T17:58:11.552599421+00:00 stderr F 2025-12-08T17:58:11.552Z|00191|binding|INFO|Setting lport service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v up in Southbound 2025-12-08T17:58:11.553192326+00:00 stderr F 2025-12-08T17:58:11.553Z|00192|binding|INFO|Setting lport service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v ovn-installed in OVS 2025-12-08T17:58:19.146593493+00:00 stderr F 2025-12-08T17:58:19.146Z|00193|binding|INFO|Claiming lport service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp for this chassis. 2025-12-08T17:58:19.146593493+00:00 stderr F 2025-12-08T17:58:19.146Z|00194|binding|INFO|service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp: Claiming 0a:58:0a:d9:00:4f 10.217.0.79 2025-12-08T17:58:19.217285610+00:00 stderr F 2025-12-08T17:58:19.216Z|00195|binding|INFO|Setting lport service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp up in Southbound 2025-12-08T17:58:19.231688892+00:00 stderr F 2025-12-08T17:58:19.223Z|00196|binding|INFO|Setting lport service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp ovn-installed in OVS 2025-12-08T17:58:20.619144890+00:00 stderr F 2025-12-08T17:58:20.619Z|00197|binding|INFO|Claiming lport service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn for this chassis. 2025-12-08T17:58:20.619144890+00:00 stderr F 2025-12-08T17:58:20.619Z|00198|binding|INFO|service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn: Claiming 0a:58:0a:d9:00:50 10.217.0.80 2025-12-08T17:58:20.663713452+00:00 stderr F 2025-12-08T17:58:20.663Z|00199|binding|INFO|Setting lport service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn ovn-installed in OVS 2025-12-08T17:58:20.663713452+00:00 stderr F 2025-12-08T17:58:20.663Z|00200|binding|INFO|Setting lport service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn up in Southbound 2025-12-08T17:58:34.215429096+00:00 stderr F 2025-12-08T17:58:34.215Z|00201|binding|INFO|Claiming lport service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk for this chassis. 2025-12-08T17:58:34.215429096+00:00 stderr F 2025-12-08T17:58:34.215Z|00202|binding|INFO|service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk: Claiming 0a:58:0a:d9:00:51 10.217.0.81 2025-12-08T17:58:34.246198301+00:00 stderr F 2025-12-08T17:58:34.246Z|00203|binding|INFO|Setting lport service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk ovn-installed in OVS 2025-12-08T17:58:34.246198301+00:00 stderr F 2025-12-08T17:58:34.246Z|00204|binding|INFO|Setting lport service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk up in Southbound 2025-12-08T17:58:34.593556819+00:00 stderr F 2025-12-08T17:58:34.593Z|00205|binding|INFO|Releasing lport service-telemetry_default-interconnect-55bf8d5cb-76n5w from this chassis (sb_readonly=0) 2025-12-08T17:58:34.593556819+00:00 stderr F 2025-12-08T17:58:34.593Z|00206|binding|INFO|Setting lport service-telemetry_default-interconnect-55bf8d5cb-76n5w down in Southbound 2025-12-08T17:58:35.246398512+00:00 stderr F 2025-12-08T17:58:35.246Z|00207|binding|INFO|Claiming lport service-telemetry_default-interconnect-55bf8d5cb-rwr2k for this chassis. 2025-12-08T17:58:35.246398512+00:00 stderr F 2025-12-08T17:58:35.246Z|00208|binding|INFO|service-telemetry_default-interconnect-55bf8d5cb-rwr2k: Claiming 0a:58:0a:d9:00:52 10.217.0.82 2025-12-08T17:58:35.292385490+00:00 stderr F 2025-12-08T17:58:35.292Z|00209|binding|INFO|Setting lport service-telemetry_default-interconnect-55bf8d5cb-rwr2k ovn-installed in OVS 2025-12-08T17:58:35.292385490+00:00 stderr F 2025-12-08T17:58:35.292Z|00210|binding|INFO|Setting lport service-telemetry_default-interconnect-55bf8d5cb-rwr2k up in Southbound 2025-12-08T17:59:06.551730713+00:00 stderr F 2025-12-08T17:59:06.551Z|00211|binding|INFO|Claiming lport service-telemetry_qdr-test for this chassis. 2025-12-08T17:59:06.551730713+00:00 stderr F 2025-12-08T17:59:06.551Z|00212|binding|INFO|service-telemetry_qdr-test: Claiming 0a:58:0a:d9:00:53 10.217.0.83 2025-12-08T17:59:06.591962903+00:00 stderr F 2025-12-08T17:59:06.591Z|00213|binding|INFO|Setting lport service-telemetry_qdr-test up in Southbound 2025-12-08T17:59:06.593009290+00:00 stderr F 2025-12-08T17:59:06.592Z|00214|binding|INFO|Setting lport service-telemetry_qdr-test ovn-installed in OVS 2025-12-08T17:59:14.684271440+00:00 stderr F 2025-12-08T17:59:14.684Z|00215|binding|INFO|Claiming lport service-telemetry_stf-smoketest-smoke1-pbhxq for this chassis. 2025-12-08T17:59:14.684271440+00:00 stderr F 2025-12-08T17:59:14.684Z|00216|binding|INFO|service-telemetry_stf-smoketest-smoke1-pbhxq: Claiming 0a:58:0a:d9:00:54 10.217.0.84 2025-12-08T17:59:14.722508157+00:00 stderr F 2025-12-08T17:59:14.722Z|00217|binding|INFO|Setting lport service-telemetry_stf-smoketest-smoke1-pbhxq ovn-installed in OVS 2025-12-08T17:59:14.722508157+00:00 stderr F 2025-12-08T17:59:14.722Z|00218|binding|INFO|Setting lport service-telemetry_stf-smoketest-smoke1-pbhxq up in Southbound 2025-12-08T17:59:15.198302157+00:00 stderr F 2025-12-08T17:59:15.198Z|00219|binding|INFO|Claiming lport service-telemetry_curl for this chassis. 2025-12-08T17:59:15.198302157+00:00 stderr F 2025-12-08T17:59:15.198Z|00220|binding|INFO|service-telemetry_curl: Claiming 0a:58:0a:d9:00:55 10.217.0.85 2025-12-08T17:59:15.231651806+00:00 stderr F 2025-12-08T17:59:15.231Z|00221|binding|INFO|Setting lport service-telemetry_curl ovn-installed in OVS 2025-12-08T17:59:15.231651806+00:00 stderr F 2025-12-08T17:59:15.231Z|00222|binding|INFO|Setting lport service-telemetry_curl up in Southbound 2025-12-08T17:59:18.947409298+00:00 stderr F 2025-12-08T17:59:18.947Z|00223|binding|INFO|Releasing lport service-telemetry_curl from this chassis (sb_readonly=0) 2025-12-08T17:59:18.947409298+00:00 stderr F 2025-12-08T17:59:18.947Z|00224|binding|INFO|Setting lport service-telemetry_curl down in Southbound 2025-12-08T17:59:52.203322785+00:00 stderr F 2025-12-08T17:59:52.203Z|00225|binding|INFO|Claiming lport openshift-marketplace_community-operators-jlbqc for this chassis. 2025-12-08T17:59:52.203322785+00:00 stderr F 2025-12-08T17:59:52.203Z|00226|binding|INFO|openshift-marketplace_community-operators-jlbqc: Claiming 0a:58:0a:d9:00:56 10.217.0.86 2025-12-08T17:59:52.245274628+00:00 stderr F 2025-12-08T17:59:52.244Z|00227|binding|INFO|Setting lport openshift-marketplace_community-operators-jlbqc ovn-installed in OVS 2025-12-08T17:59:52.245274628+00:00 stderr F 2025-12-08T17:59:52.244Z|00228|binding|INFO|Setting lport openshift-marketplace_community-operators-jlbqc up in Southbound 2025-12-08T18:00:00.780636920+00:00 stderr F 2025-12-08T18:00:00.780Z|00229|binding|INFO|Claiming lport openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb for this chassis. 2025-12-08T18:00:00.780636920+00:00 stderr F 2025-12-08T18:00:00.780Z|00230|binding|INFO|openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb: Claiming 0a:58:0a:d9:00:57 10.217.0.87 2025-12-08T18:00:00.812239111+00:00 stderr F 2025-12-08T18:00:00.811Z|00231|binding|INFO|Setting lport openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb ovn-installed in OVS 2025-12-08T18:00:00.812239111+00:00 stderr F 2025-12-08T18:00:00.811Z|00232|binding|INFO|Setting lport openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb up in Southbound 2025-12-08T18:00:02.373521589+00:00 stderr F 2025-12-08T18:00:02.373Z|00233|binding|INFO|Releasing lport openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb from this chassis (sb_readonly=0) 2025-12-08T18:00:02.373521589+00:00 stderr F 2025-12-08T18:00:02.373Z|00234|binding|INFO|Setting lport openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb down in Southbound 2025-12-08T18:00:03.317631927+00:00 stderr F 2025-12-08T18:00:03.317Z|00235|binding|INFO|Releasing lport service-telemetry_stf-smoketest-smoke1-pbhxq from this chassis (sb_readonly=0) 2025-12-08T18:00:03.317631927+00:00 stderr F 2025-12-08T18:00:03.317Z|00236|binding|INFO|Setting lport service-telemetry_stf-smoketest-smoke1-pbhxq down in Southbound 2025-12-08T18:00:04.492606007+00:00 stderr F 2025-12-08T18:00:04.492Z|00237|binding|INFO|Releasing lport openshift-marketplace_community-operators-jlbqc from this chassis (sb_readonly=0) 2025-12-08T18:00:04.492606007+00:00 stderr F 2025-12-08T18:00:04.492Z|00238|binding|INFO|Setting lport openshift-marketplace_community-operators-jlbqc down in Southbound 2025-12-08T18:00:34.703927234+00:00 stderr F 2025-12-08T18:00:34.703Z|00239|memory_trim|INFO|Detected inactivity (last active 30012 ms ago): trimming memory 2025-12-08T18:01:18.772464401+00:00 stderr F 2025-12-08T18:01:18.772Z|00240|binding|INFO|Claiming lport service-telemetry_infrawatch-operators-b88kp for this chassis. 2025-12-08T18:01:18.772464401+00:00 stderr F 2025-12-08T18:01:18.772Z|00241|binding|INFO|service-telemetry_infrawatch-operators-b88kp: Claiming 0a:58:0a:d9:00:58 10.217.0.88 2025-12-08T18:01:18.802229945+00:00 stderr F 2025-12-08T18:01:18.802Z|00242|binding|INFO|Setting lport service-telemetry_infrawatch-operators-b88kp ovn-installed in OVS 2025-12-08T18:01:18.802229945+00:00 stderr F 2025-12-08T18:01:18.802Z|00243|binding|INFO|Setting lport service-telemetry_infrawatch-operators-b88kp up in Southbound 2025-12-08T18:01:31.302040069+00:00 stderr F 2025-12-08T18:01:31.301Z|00244|binding|INFO|Releasing lport service-telemetry_infrawatch-operators-b88kp from this chassis (sb_readonly=0) 2025-12-08T18:01:31.302040069+00:00 stderr F 2025-12-08T18:01:31.301Z|00245|binding|INFO|Setting lport service-telemetry_infrawatch-operators-b88kp down in Southbound 2025-12-08T18:02:01.539125533+00:00 stderr F 2025-12-08T18:02:01.539Z|00246|memory_trim|INFO|Detected inactivity (last active 30007 ms ago): trimming memory 2025-12-08T18:02:41.040169288+00:00 stderr F 2025-12-08T18:02:41.039Z|00247|binding|INFO|Claiming lport openshift-must-gather-gctth_must-gather-5cz8j for this chassis. 2025-12-08T18:02:41.040169288+00:00 stderr F 2025-12-08T18:02:41.039Z|00248|binding|INFO|openshift-must-gather-gctth_must-gather-5cz8j: Claiming 0a:58:0a:d9:00:59 10.217.0.89 2025-12-08T18:02:41.081654143+00:00 stderr F 2025-12-08T18:02:41.081Z|00249|binding|INFO|Setting lport openshift-must-gather-gctth_must-gather-5cz8j ovn-installed in OVS 2025-12-08T18:02:41.081654143+00:00 stderr F 2025-12-08T18:02:41.081Z|00250|binding|INFO|Setting lport openshift-must-gather-gctth_must-gather-5cz8j up in Southbound 2025-12-08T18:03:11.060485721+00:00 stderr F 2025-12-08T18:03:11.060Z|00251|memory_trim|INFO|Detected inactivity (last active 30021 ms ago): trimming memory 2025-12-08T18:04:32.256432346+00:00 stderr F 2025-12-08T18:04:32.256Z|00252|binding|INFO|Claiming lport openshift-marketplace_certified-operators-p8pz8 for this chassis. 2025-12-08T18:04:32.256432346+00:00 stderr F 2025-12-08T18:04:32.256Z|00253|binding|INFO|openshift-marketplace_certified-operators-p8pz8: Claiming 0a:58:0a:d9:00:5a 10.217.0.90 2025-12-08T18:04:32.291279441+00:00 stderr F 2025-12-08T18:04:32.291Z|00254|binding|INFO|Setting lport openshift-marketplace_certified-operators-p8pz8 up in Southbound 2025-12-08T18:04:32.293978893+00:00 stderr F 2025-12-08T18:04:32.293Z|00255|binding|INFO|Setting lport openshift-marketplace_certified-operators-p8pz8 ovn-installed in OVS 2025-12-08T18:04:43.453019552+00:00 stderr F 2025-12-08T18:04:43.452Z|00256|binding|INFO|Claiming lport openshift-marketplace_redhat-operators-5gtms for this chassis. 2025-12-08T18:04:43.453019552+00:00 stderr F 2025-12-08T18:04:43.452Z|00257|binding|INFO|openshift-marketplace_redhat-operators-5gtms: Claiming 0a:58:0a:d9:00:5b 10.217.0.91 2025-12-08T18:04:43.489312795+00:00 stderr F 2025-12-08T18:04:43.489Z|00258|binding|INFO|Setting lport openshift-marketplace_redhat-operators-5gtms ovn-installed in OVS 2025-12-08T18:04:43.489312795+00:00 stderr F 2025-12-08T18:04:43.489Z|00259|binding|INFO|Setting lport openshift-marketplace_redhat-operators-5gtms up in Southbound 2025-12-08T18:04:45.795156707+00:00 stderr F 2025-12-08T18:04:45.795Z|00260|binding|INFO|Releasing lport openshift-marketplace_certified-operators-p8pz8 from this chassis (sb_readonly=0) 2025-12-08T18:04:45.795156707+00:00 stderr F 2025-12-08T18:04:45.795Z|00261|binding|INFO|Setting lport openshift-marketplace_certified-operators-p8pz8 down in Southbound 2025-12-08T18:04:56.124379379+00:00 stderr F 2025-12-08T18:04:56.123Z|00262|binding|INFO|Removing iface 8c2078259dc7954 ovn-installed in OVS 2025-12-08T18:04:56.129994198+00:00 stderr F 2025-12-08T18:04:56.124Z|00263|binding|INFO|Removing lport openshift-must-gather-gctth_must-gather-5cz8j ovn-installed in OVS ././@LongLink0000644000000000000000000000024500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000025200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000000446515115611514033102 0ustar zuulzuul2025-12-08T17:53:45.772284220+00:00 stderr F + [[ -f /env/_master ]] 2025-12-08T17:53:45.772284220+00:00 stderr F + . /ovnkube-lib/ovnkube-lib.sh 2025-12-08T17:53:45.772508926+00:00 stderr F ++ set -x 2025-12-08T17:53:45.772508926+00:00 stderr F ++ K8S_NODE= 2025-12-08T17:53:45.772508926+00:00 stderr F ++ [[ -n '' ]] 2025-12-08T17:53:45.772508926+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:45.772508926+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:45.772508926+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:45.772508926+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:45.772508926+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:45.772508926+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:45.772508926+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:45.772508926+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:45.772508926+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:45.772508926+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:45.775715883+00:00 stderr F + trap quit-sbdb TERM INT 2025-12-08T17:53:45.775715883+00:00 stderr F + start-sbdb info 2025-12-08T17:53:45.775715883+00:00 stderr F + local log_level=info 2025-12-08T17:53:45.775715883+00:00 stderr F + [[ 1 -ne 1 ]] 2025-12-08T17:53:45.775715883+00:00 stderr F + wait 24000 2025-12-08T17:53:45.775715883+00:00 stderr F + exec /usr/share/ovn/scripts/ovn-ctl --no-monitor --db-sb-sock=/var/run/ovn/ovnsb_db.sock '--ovn-sb-log=-vconsole:info -vfile:off -vPATTERN:console:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m' run_sb_ovsdb 2025-12-08T17:53:45.932131756+00:00 stderr F 2025-12-08T17:53:45.932Z|00001|vlog|INFO|opened log file /var/log/ovn/ovsdb-server-sb.log 2025-12-08T17:53:46.026805500+00:00 stderr F 2025-12-08T17:53:46.026Z|00002|ovsdb_server|INFO|ovsdb-server (Open vSwitch) 3.5.2-33.el9fdp 2025-12-08T17:53:56.033554307+00:00 stderr F 2025-12-08T17:53:56.032Z|00003|memory|INFO|18048 kB peak resident set size after 10.1 seconds 2025-12-08T17:53:56.033829575+00:00 stderr F 2025-12-08T17:53:56.033Z|00004|memory|INFO|atoms:18856 cells:17964 json-caches:2 monitors:5 n-weak-refs:263 sessions:3 ././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000026300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000000000015115611514033057 0ustar zuulzuul././@LongLink0000644000000000000000000000024500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000025200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000000457115115611514033100 0ustar zuulzuul2025-12-08T17:53:43.508536359+00:00 stderr F + [[ -f /env/_master ]] 2025-12-08T17:53:43.508536359+00:00 stderr F + . /ovnkube-lib/ovnkube-lib.sh 2025-12-08T17:53:43.508536359+00:00 stderr F ++ set -x 2025-12-08T17:53:43.508536359+00:00 stderr F ++ K8S_NODE=crc 2025-12-08T17:53:43.508536359+00:00 stderr F ++ [[ -n crc ]] 2025-12-08T17:53:43.508536359+00:00 stderr F ++ [[ -f /env/crc ]] 2025-12-08T17:53:43.508536359+00:00 stderr F ++ northd_pidfile=/var/run/ovn/ovn-northd.pid 2025-12-08T17:53:43.508536359+00:00 stderr F ++ controller_pidfile=/var/run/ovn/ovn-controller.pid 2025-12-08T17:53:43.508536359+00:00 stderr F ++ controller_logfile=/var/log/ovn/acl-audit-log.log 2025-12-08T17:53:43.508536359+00:00 stderr F ++ vswitch_dbsock=/var/run/openvswitch/db.sock 2025-12-08T17:53:43.508536359+00:00 stderr F ++ nbdb_pidfile=/var/run/ovn/ovnnb_db.pid 2025-12-08T17:53:43.508536359+00:00 stderr F ++ nbdb_sock=/var/run/ovn/ovnnb_db.sock 2025-12-08T17:53:43.508536359+00:00 stderr F ++ nbdb_ctl=/var/run/ovn/ovnnb_db.ctl 2025-12-08T17:53:43.508536359+00:00 stderr F ++ sbdb_pidfile=/var/run/ovn/ovnsb_db.pid 2025-12-08T17:53:43.508536359+00:00 stderr F ++ sbdb_sock=/var/run/ovn/ovnsb_db.sock 2025-12-08T17:53:43.508536359+00:00 stderr F ++ sbdb_ctl=/var/run/ovn/ovnsb_db.ctl 2025-12-08T17:53:43.510071511+00:00 stderr F + trap quit-nbdb TERM INT 2025-12-08T17:53:43.510071511+00:00 stderr F + start-nbdb info 2025-12-08T17:53:43.510071511+00:00 stderr F + local log_level=info 2025-12-08T17:53:43.510107762+00:00 stderr F + [[ 1 -ne 1 ]] 2025-12-08T17:53:43.510398959+00:00 stderr F + wait 23893 2025-12-08T17:53:43.510609465+00:00 stderr F + exec /usr/share/ovn/scripts/ovn-ctl --no-monitor --db-nb-sock=/var/run/ovn/ovnnb_db.sock '--ovn-nb-log=-vconsole:info -vfile:off -vPATTERN:console:%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m' run_nb_ovsdb 2025-12-08T17:53:43.652801392+00:00 stderr F 2025-12-08T17:53:43.652Z|00001|vlog|INFO|opened log file /var/log/ovn/ovsdb-server-nb.log 2025-12-08T17:53:43.718650752+00:00 stderr F 2025-12-08T17:53:43.718Z|00002|ovsdb_server|INFO|ovsdb-server (Open vSwitch) 3.5.2-33.el9fdp 2025-12-08T17:53:53.725395736+00:00 stderr F 2025-12-08T17:53:53.725Z|00003|memory|INFO|14080 kB peak resident set size after 10.1 seconds 2025-12-08T17:53:53.726188808+00:00 stderr F 2025-12-08T17:53:53.726Z|00004|memory|INFO|atoms:7148 cells:4847 json-caches:2 monitors:4 n-weak-refs:165 sessions:2 ././@LongLink0000644000000000000000000000021000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611514032770 5ustar zuulzuul././@LongLink0000644000000000000000000000022400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000023100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000000036015115611514032771 0ustar zuulzuul2025-12-08T17:42:27.680782595+00:00 stderr F I1208 17:42:27.680202 1 readyz.go:175] Listening on 0.0.0.0:9980 2025-12-08T17:42:37.770945111+00:00 stderr F I1208 17:42:37.770248 1 etcdcli_pool.go:70] creating a new cached client ././@LongLink0000644000000000000000000000023500000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000000000015115611514032760 0ustar zuulzuul././@LongLink0000644000000000000000000000022000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000022500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000000000015115611514032760 0ustar zuulzuul././@LongLink0000644000000000000000000000022500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000023200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000004305415115611514033000 0ustar zuulzuul2025-12-08T17:42:27.275458959+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.275068Z","caller":"etcdmain/grpc_proxy.go:237","msg":"gRPC proxy server TLS","tls-info":"cert = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-metrics-crc.crt, key = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-metrics-crc.key, client-cert=, client-key=, trusted-ca = /etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/metrics-ca-bundle.crt, client-cert-auth = false, crl-file = "} 2025-12-08T17:42:27.276539725+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.276487Z","caller":"etcdmain/grpc_proxy.go:460","msg":"listening for gRPC proxy client requests","address":"127.0.0.1:9977"} 2025-12-08T17:42:27.276977604+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.276929Z","caller":"etcdmain/grpc_proxy.go:430","msg":"gRPC proxy client TLS","tls-info":"cert = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.crt, key = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.key, client-cert=, client-key=, trusted-ca = /etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt, client-cert-auth = false, crl-file = "} 2025-12-08T17:42:27.277511423+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.277041Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Channel created"} 2025-12-08T17:42:27.277741022+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.277701Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] original dial target is: \"etcd-endpoints://0xc0002225a0/192.168.126.11:9978\""} 2025-12-08T17:42:27.277777222+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.277751Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] parsed dial target is: {URL:{Scheme:etcd-endpoints Opaque: User: Host:0xc0002225a0 Path:/192.168.126.11:9978 RawPath: OmitHost:false ForceQuery:false RawQuery: Fragment: RawFragment:}}"} 2025-12-08T17:42:27.277784692+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.277773Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Channel authority set to \"192.168.126.11:9978\""} 2025-12-08T17:42:27.278499579+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.278445Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Resolver state updated: {\n \"Addresses\": [\n {\n \"Addr\": \"192.168.126.11:9978\",\n \"ServerName\": \"192.168.126.11:9978\",\n \"Attributes\": null,\n \"BalancerAttributes\": null,\n \"Metadata\": null\n }\n ],\n \"Endpoints\": [\n {\n \"Addresses\": [\n {\n \"Addr\": \"192.168.126.11:9978\",\n \"ServerName\": \"192.168.126.11:9978\",\n \"Attributes\": null,\n \"BalancerAttributes\": null,\n \"Metadata\": null\n }\n ],\n \"Attributes\": null\n }\n ],\n \"ServiceConfig\": {\n \"Config\": {\n \"Config\": null,\n \"LB\": \"round_robin\",\n \"Methods\": {}\n },\n \"Err\": null\n },\n \"Attributes\": null\n} (service config updated; resolver returned new addresses)"} 2025-12-08T17:42:27.278564119+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.278509Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Channel switches to new LB policy \"round_robin\""} 2025-12-08T17:42:27.278958688+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.278921Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: got new ClientConn state: {{[{Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }] [{[{Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }] }] 0xc0000b5200 } }"} 2025-12-08T17:42:27.278972528+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.278953Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel created"} 2025-12-08T17:42:27.279002938+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.278982Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[roundrobin] roundrobinPicker: Build called with info: {map[]}"} 2025-12-08T17:42:27.279023748+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.278999Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Channel Connectivity change to CONNECTING"} 2025-12-08T17:42:27.279176847+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.279073Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to CONNECTING"} 2025-12-08T17:42:27.279213117+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.279173Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel picks a new address \"192.168.126.11:9978\" to connect"} 2025-12-08T17:42:27.279365086+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.279281Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, CONNECTING"} 2025-12-08T17:42:27.279907785+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.279847Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] Creating new client transport to \"{Addr: \\\"192.168.126.11:9978\\\", ServerName: \\\"192.168.126.11:9978\\\", }\": connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:27.279951375+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:27.279919Z","caller":"zapgrpc/zapgrpc.go:191","msg":"[core] [Channel #1 SubChannel #2] grpc: addrConn.createTransport failed to connect to {Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }. Err: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:27.279993125+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.279963Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to TRANSIENT_FAILURE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:27.280043595+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.280015Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, TRANSIENT_FAILURE"} 2025-12-08T17:42:27.280100665+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.280063Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Channel Connectivity change to TRANSIENT_FAILURE"} 2025-12-08T17:42:27.280536583+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.280447Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Server #3] Server created"} 2025-12-08T17:42:27.281672660+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.281622Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Server #3 ListenSocket #4] ListenSocket created"} 2025-12-08T17:42:27.282374227+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.282267Z","caller":"etcdmain/grpc_proxy.go:614","msg":"gRPC proxy listening for metrics","address":"https://0.0.0.0:9979"} 2025-12-08T17:42:27.282374227+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.282360Z","caller":"etcdmain/grpc_proxy.go:287","msg":"started gRPC proxy","address":"127.0.0.1:9977"} 2025-12-08T17:42:27.282413217+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.282371Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"} 2025-12-08T17:42:27.282421657+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.282402Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"} 2025-12-08T17:42:27.282699016+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:27.282669Z","caller":"etcdmain/grpc_proxy.go:277","msg":"gRPC proxy server metrics URL serving"} 2025-12-08T17:42:28.280544433+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.280273Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to IDLE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:28.280544433+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.280398Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, IDLE"} 2025-12-08T17:42:28.280636694+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.280553Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to CONNECTING"} 2025-12-08T17:42:28.280636694+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.280581Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel picks a new address \"192.168.126.11:9978\" to connect"} 2025-12-08T17:42:28.280856896+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.280746Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, CONNECTING"} 2025-12-08T17:42:28.281191930+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.281067Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] Creating new client transport to \"{Addr: \\\"192.168.126.11:9978\\\", ServerName: \\\"192.168.126.11:9978\\\", }\": connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:28.281191930+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:28.281167Z","caller":"zapgrpc/zapgrpc.go:191","msg":"[core] [Channel #1 SubChannel #2] grpc: addrConn.createTransport failed to connect to {Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }. Err: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:28.281286431+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.281231Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to TRANSIENT_FAILURE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:28.281317912+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:28.281295Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, TRANSIENT_FAILURE"} 2025-12-08T17:42:29.751141494+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.750971Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to IDLE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:29.751141494+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751065Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, IDLE"} 2025-12-08T17:42:29.751141494+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751107Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to CONNECTING"} 2025-12-08T17:42:29.751218634+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751137Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel picks a new address \"192.168.126.11:9978\" to connect"} 2025-12-08T17:42:29.751382916+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751227Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, CONNECTING"} 2025-12-08T17:42:29.751671540+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751586Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] Creating new client transport to \"{Addr: \\\"192.168.126.11:9978\\\", ServerName: \\\"192.168.126.11:9978\\\", }\": connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:29.751671540+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:29.751626Z","caller":"zapgrpc/zapgrpc.go:191","msg":"[core] [Channel #1 SubChannel #2] grpc: addrConn.createTransport failed to connect to {Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }. Err: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:29.751671540+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751653Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to TRANSIENT_FAILURE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:29.751721700+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:29.751681Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, TRANSIENT_FAILURE"} 2025-12-08T17:42:32.745236276+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745039Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to IDLE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:32.745236276+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745155Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, IDLE"} 2025-12-08T17:42:32.745343409+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745216Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to CONNECTING"} 2025-12-08T17:42:32.745343409+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745282Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel picks a new address \"192.168.126.11:9978\" to connect"} 2025-12-08T17:42:32.745538605+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745435Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, CONNECTING"} 2025-12-08T17:42:32.745759631+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745684Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] Creating new client transport to \"{Addr: \\\"192.168.126.11:9978\\\", ServerName: \\\"192.168.126.11:9978\\\", }\": connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:32.745781722+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:32.745755Z","caller":"zapgrpc/zapgrpc.go:191","msg":"[core] [Channel #1 SubChannel #2] grpc: addrConn.createTransport failed to connect to {Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }. Err: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:32.745912875+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745822Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to TRANSIENT_FAILURE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:32.745983627+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:32.745938Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, TRANSIENT_FAILURE"} 2025-12-08T17:42:37.036588730+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.035930Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to IDLE, last error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:9978: connect: connection refused\""} 2025-12-08T17:42:37.036588730+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.036507Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, IDLE"} 2025-12-08T17:42:37.036588730+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.036548Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to CONNECTING"} 2025-12-08T17:42:37.036659922+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.036593Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel picks a new address \"192.168.126.11:9978\" to connect"} 2025-12-08T17:42:37.036829697+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.036758Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, CONNECTING"} 2025-12-08T17:42:37.045832863+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.045785Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to READY"} 2025-12-08T17:42:37.045832863+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.045824Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[balancer] base.baseBalancer: handle SubConn state change: 0xc000264540, READY"} 2025-12-08T17:42:37.045926266+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.045895Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[roundrobin] roundrobinPicker: Build called with info: {map[SubConn(id:2):{{Addr: \"192.168.126.11:9978\", ServerName: \"192.168.126.11:9978\", }}]}"} 2025-12-08T17:42:37.045939446+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:37.045920Z","caller":"zapgrpc/zapgrpc.go:174","msg":"[core] [Channel #1] Channel Connectivity change to READY"} ././@LongLink0000644000000000000000000000021600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000022300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000000023415115611514032771 0ustar zuulzuul2025-12-08T17:42:24.372454708+00:00 stdout P Fixing etcd log permissions. 2025-12-08T17:42:24.383120496+00:00 stdout P Fixing etcd auto backup permissions. ././@LongLink0000644000000000000000000000022100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000022600000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000000017415115611514032774 0ustar zuulzuul2025-12-08T17:42:37.691975297+00:00 stderr F I1208 17:42:37.689919 1 etcdcli_pool.go:70] creating a new cached client ././@LongLink0000644000000000000000000000023400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000000000015115611514032760 0ustar zuulzuul././@LongLink0000644000000000000000000000021500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000755000175000017500000000000015115611521032766 5ustar zuulzuul././@LongLink0000644000000000000000000000022200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-cr0000644000175000017500000006747615115611514033016 0ustar zuulzuul2025-12-08T17:42:32.001263494+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:32.000869Z","logger":"etcd-client","caller":"v3@v3.5.21/retry_interceptor.go:63","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0000263c0/192.168.126.11:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\""} 2025-12-08T17:42:32.001263494+00:00 stderr F Error: context deadline exceeded 2025-12-08T17:42:32.060203412+00:00 stderr F dataDir is present on crc 2025-12-08T17:42:34.063451273+00:00 stderr P failed to create etcd client, but the server is already initialized as member "crc" before, starting as etcd member: context deadline exceeded 2025-12-08T17:42:34.065355395+00:00 stdout P Waiting for ports 2379, 2380 and 9978 to be released. 2025-12-08T17:42:34.078432341+00:00 stderr F 2025-12-08T17:42:34.078432341+00:00 stderr F real 0m0.013s 2025-12-08T17:42:34.078432341+00:00 stderr F user 0m0.000s 2025-12-08T17:42:34.078432341+00:00 stderr F sys 0m0.013s 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_QUOTA_BACKEND_BYTES=8589934592 2025-12-08T17:42:34.082268207+00:00 stdout F ALL_ETCD_ENDPOINTS=https://192.168.126.11:2379 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_TLS_MIN_VERSION=TLS1.2 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_IMAGE=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_STATIC_POD_VERSION=2 2025-12-08T17:42:34.082268207+00:00 stdout F ETCDCTL_ENDPOINTS=https://192.168.126.11:2379 2025-12-08T17:42:34.082268207+00:00 stdout F ETCDCTL_KEY=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.key 2025-12-08T17:42:34.082268207+00:00 stdout F ETCDCTL_API=3 2025-12-08T17:42:34.082268207+00:00 stdout F ETCDCTL_CACERT=/etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_HEARTBEAT_INTERVAL=100 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_NAME=crc 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_SOCKET_REUSE_ADDRESS=true 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_EXPERIMENTAL_WARNING_APPLY_DURATION=200ms 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_EXPERIMENTAL_MAX_LEARNERS=1 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_DATA_DIR=/var/lib/etcd 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_ELECTION_TIMEOUT=1000 2025-12-08T17:42:34.082268207+00:00 stdout F ETCDCTL_CERT=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.crt 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_INITIAL_CLUSTER_STATE=existing 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_INITIAL_CLUSTER= 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_CIPHER_SUITES=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_EXPERIMENTAL_WATCH_PROGRESS_NOTIFY_INTERVAL=5s 2025-12-08T17:42:34.082268207+00:00 stdout F ETCD_ENABLE_PPROF=true 2025-12-08T17:42:34.083084569+00:00 stderr F + exec nice -n -19 ionice -c2 -n0 etcd --logger=zap --log-level=info --experimental-initial-corrupt-check=true --snapshot-count=10000 --initial-advertise-peer-urls=https://192.168.126.11:2380 --cert-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-crc.crt --key-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-crc.key --trusted-ca-file=/etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt --client-cert-auth=true --peer-cert-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.crt --peer-key-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.key --peer-trusted-ca-file=/etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt --peer-client-cert-auth=true --advertise-client-urls=https://192.168.126.11:2379 --listen-client-urls=https://0.0.0.0:2379 --listen-peer-urls=https://0.0.0.0:2380 --metrics=extensive --listen-metrics-urls=https://0.0.0.0:9978 2025-12-08T17:42:34.117420795+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.116816Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_CIPHER_SUITES","variable-value":"TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"} 2025-12-08T17:42:34.117420795+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117376Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_DATA_DIR","variable-value":"/var/lib/etcd"} 2025-12-08T17:42:34.117491517+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117403Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_ELECTION_TIMEOUT","variable-value":"1000"} 2025-12-08T17:42:34.117491517+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117424Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_ENABLE_PPROF","variable-value":"true"} 2025-12-08T17:42:34.117491517+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117465Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_EXPERIMENTAL_MAX_LEARNERS","variable-value":"1"} 2025-12-08T17:42:34.117517797+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117490Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_EXPERIMENTAL_WARNING_APPLY_DURATION","variable-value":"200ms"} 2025-12-08T17:42:34.117517797+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117506Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_EXPERIMENTAL_WATCH_PROGRESS_NOTIFY_INTERVAL","variable-value":"5s"} 2025-12-08T17:42:34.117545378+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117529Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_HEARTBEAT_INTERVAL","variable-value":"100"} 2025-12-08T17:42:34.117571719+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117550Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_INITIAL_CLUSTER_STATE","variable-value":"existing"} 2025-12-08T17:42:34.117640971+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117587Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_NAME","variable-value":"crc"} 2025-12-08T17:42:34.117664041+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117641Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_QUOTA_BACKEND_BYTES","variable-value":"8589934592"} 2025-12-08T17:42:34.117685102+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117664Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_SOCKET_REUSE_ADDRESS","variable-value":"true"} 2025-12-08T17:42:34.117705833+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.117684Z","caller":"flags/flag.go:113","msg":"recognized and used environment variable","variable-name":"ETCD_TLS_MIN_VERSION","variable-value":"TLS1.2"} 2025-12-08T17:42:34.117760324+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.117713Z","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCD_IMAGE=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612"} 2025-12-08T17:42:34.117760324+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.117739Z","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCD_STATIC_POD_VERSION=2"} 2025-12-08T17:42:34.117803385+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.117769Z","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCD_INITIAL_CLUSTER="} 2025-12-08T17:42:34.118368011+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.118255Z","caller":"embed/config.go:694","msg":"Running http and grpc server on single port. This is not recommended for production."} 2025-12-08T17:42:34.118772912+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.118376Z","caller":"etcdmain/etcd.go:73","msg":"Running: ","args":["etcd","--logger=zap","--log-level=info","--experimental-initial-corrupt-check=true","--snapshot-count=10000","--initial-advertise-peer-urls=https://192.168.126.11:2380","--cert-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-crc.crt","--key-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-crc.key","--trusted-ca-file=/etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt","--client-cert-auth=true","--peer-cert-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.crt","--peer-key-file=/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.key","--peer-trusted-ca-file=/etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt","--peer-client-cert-auth=true","--advertise-client-urls=https://192.168.126.11:2379","--listen-client-urls=https://0.0.0.0:2379","--listen-peer-urls=https://0.0.0.0:2380","--metrics=extensive","--listen-metrics-urls=https://0.0.0.0:9978"]} 2025-12-08T17:42:34.119774079+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.119671Z","caller":"etcdmain/etcd.go:446","msg":"found invalid file under data directory","filename":"revision.json","data-dir":"/var/lib/etcd"} 2025-12-08T17:42:34.119774079+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.119735Z","caller":"etcdmain/etcd.go:116","msg":"server has been already initialized","data-dir":"/var/lib/etcd","dir-type":"member"} 2025-12-08T17:42:34.119816620+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.119779Z","caller":"embed/config.go:694","msg":"Running http and grpc server on single port. This is not recommended for production."} 2025-12-08T17:42:34.119837631+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.119811Z","caller":"embed/etcd.go:134","msg":"configuring socket options","reuse-address":true,"reuse-port":false} 2025-12-08T17:42:34.119862492+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.119841Z","caller":"embed/etcd.go:140","msg":"configuring peer listeners","listen-peer-urls":["https://0.0.0.0:2380"]} 2025-12-08T17:42:34.120071007+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.120001Z","caller":"embed/etcd.go:531","msg":"starting with peer TLS","tls-info":"cert = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.crt, key = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-peer-crc.key, client-cert=, client-key=, trusted-ca = /etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt, client-cert-auth = true, crl-file = ","cipher-suites":["TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","TLS_CHACHA20_POLY1305_SHA256","TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256","TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"]} 2025-12-08T17:42:34.121927518+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.121796Z","caller":"embed/etcd.go:148","msg":"configuring client listeners","listen-client-urls":["https://0.0.0.0:2379"]} 2025-12-08T17:42:34.121927518+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.121847Z","caller":"embed/etcd.go:657","msg":"pprof is enabled","path":"/debug/pprof"} 2025-12-08T17:42:34.122208016+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.122095Z","caller":"embed/etcd.go:325","msg":"starting an etcd server","etcd-version":"3.5.21","git-sha":"df6e08fa","go-version":"go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime","go-os":"linux","go-arch":"amd64","max-cpu-set":12,"max-cpu-available":12,"member-initialized":true,"name":"crc","data-dir":"/var/lib/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.126.11:2380"],"listen-peer-urls":["https://0.0.0.0:2380"],"advertise-client-urls":["https://192.168.126.11:2379"],"listen-client-urls":["https://0.0.0.0:2379"],"listen-metrics-urls":["https://0.0.0.0:9978"],"cors":["*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"existing","initial-cluster-token":"","quota-backend-bytes":8589934592,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"initial-corrupt-check":true,"corrupt-check-time-interval":"0s","compact-check-time-enabled":false,"compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","downgrade-check-interval":"5s","max-learners":1} 2025-12-08T17:42:34.123379507+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.122675Z","caller":"fileutil/fileutil.go:53","msg":"check file permission","error":"directory \"/var/lib/etcd/member/snap\" exist, but the permission is \"drwxr-xr-x\". The recommended permission is \"-rwx------\" to prevent possible unprivileged access to the data"} 2025-12-08T17:42:34.160039487+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.159823Z","caller":"etcdserver/backend.go:81","msg":"opened backend db","path":"/var/lib/etcd/member/snap/db","took":"36.31865ms"} 2025-12-08T17:42:34.604496941+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.604343Z","caller":"etcdserver/server.go:516","msg":"recovered v2 store from snapshot","snapshot-index":30003,"snapshot-size":"8.9 kB"} 2025-12-08T17:42:34.604496941+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.604450Z","caller":"etcdserver/server.go:529","msg":"recovered v3 backend from snapshot","backend-size-bytes":63799296,"backend-size":"64 MB","backend-size-in-use-bytes":60313600,"backend-size-in-use":"60 MB"} 2025-12-08T17:42:34.913525220+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.913343Z","caller":"etcdserver/raft.go:542","msg":"restarting local member","cluster-id":"37a6ceb54a88a89a","local-member-id":"d44fc94b15474c4c","commit-index":39602} 2025-12-08T17:42:34.914309532+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.913969Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c switched to configuration voters=(15298667783517588556)"} 2025-12-08T17:42:34.914309532+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.914117Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c became follower at term 9"} 2025-12-08T17:42:34.914309532+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.914136Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"newRaft d44fc94b15474c4c [peers: [d44fc94b15474c4c], term: 9, commit: 39602, applied: 30003, lastindex: 39602, lastterm: 9]"} 2025-12-08T17:42:34.914380134+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.914336Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"} 2025-12-08T17:42:34.914380134+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.914362Z","caller":"membership/cluster.go:280","msg":"recovered/added member from store","cluster-id":"37a6ceb54a88a89a","local-member-id":"d44fc94b15474c4c","recovered-remote-peer-id":"d44fc94b15474c4c","recovered-remote-peer-urls":["https://192.168.126.11:2380"],"recovered-remote-peer-is-learner":false} 2025-12-08T17:42:34.914420265+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.914379Z","caller":"membership/cluster.go:290","msg":"set cluster version from store","cluster-version":"3.5"} 2025-12-08T17:42:34.914855457+00:00 stderr F {"level":"warn","ts":"2025-12-08T17:42:34.914739Z","caller":"auth/store.go:1241","msg":"simple token is not cryptographically signed"} 2025-12-08T17:42:34.914950249+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.914867Z","caller":"mvcc/kvstore.go:348","msg":"restored last compact revision","meta-bucket-name":"meta","meta-bucket-name-key":"finishedCompactRev","restored-compact-revision":33058} 2025-12-08T17:42:34.955950288+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.955754Z","caller":"mvcc/kvstore.go:425","msg":"kvstore restored","current-rev":35816} 2025-12-08T17:42:34.955950288+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.955874Z","caller":"etcdserver/server.go:637","msg":"restore consistentIndex","index":39602} 2025-12-08T17:42:34.956084252+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.956030Z","caller":"etcdserver/quota.go:117","msg":"enabled backend quota","quota-name":"v3-applier","quota-size-bytes":8589934592,"quota-size":"8.6 GB"} 2025-12-08T17:42:34.956627136+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.956564Z","caller":"etcdserver/corrupt.go:96","msg":"starting initial corruption check","local-member-id":"d44fc94b15474c4c","timeout":"27s"} 2025-12-08T17:42:34.969700563+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.969595Z","caller":"etcdserver/corrupt.go:177","msg":"initial corruption checking passed; no corruption","local-member-id":"d44fc94b15474c4c"} 2025-12-08T17:42:34.969700563+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.969669Z","caller":"etcdserver/server.go:875","msg":"starting etcd server","local-member-id":"d44fc94b15474c4c","local-server-version":"3.5.21","cluster-id":"37a6ceb54a88a89a","cluster-version":"3.5"} 2025-12-08T17:42:34.970491204+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.970316Z","caller":"fileutil/purge.go:50","msg":"started to purge file","dir":"/var/lib/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"} 2025-12-08T17:42:34.970645569+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.970581Z","caller":"fileutil/purge.go:50","msg":"started to purge file","dir":"/var/lib/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"} 2025-12-08T17:42:34.970732951+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.970582Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"} 2025-12-08T17:42:34.970732951+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.970464Z","caller":"etcdserver/server.go:768","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"d44fc94b15474c4c","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"} 2025-12-08T17:42:34.970812203+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.970694Z","caller":"fileutil/purge.go:50","msg":"started to purge file","dir":"/var/lib/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"} 2025-12-08T17:42:34.972798188+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.972697Z","caller":"embed/etcd.go:765","msg":"starting with client TLS","tls-info":"cert = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-crc.crt, key = /etc/kubernetes/static-pod-certs/secrets/etcd-all-certs/etcd-serving-crc.key, client-cert=, client-key=, trusted-ca = /etc/kubernetes/static-pod-certs/configmaps/etcd-all-bundles/server-ca-bundle.crt, client-cert-auth = true, crl-file = ","cipher-suites":["TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","TLS_CHACHA20_POLY1305_SHA256","TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384","TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256","TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"]} 2025-12-08T17:42:34.972941431+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.972795Z","caller":"embed/etcd.go:636","msg":"serving peer traffic","address":"[::]:2380"} 2025-12-08T17:42:34.972974402+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.972927Z","caller":"embed/etcd.go:606","msg":"cmux::serve","address":"[::]:2380"} 2025-12-08T17:42:34.973533397+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.973449Z","caller":"embed/etcd.go:294","msg":"now serving peer/client/metrics","local-member-id":"d44fc94b15474c4c","initial-advertise-peer-urls":["https://192.168.126.11:2380"],"listen-peer-urls":["https://0.0.0.0:2380"],"advertise-client-urls":["https://192.168.126.11:2379"],"listen-client-urls":["https://0.0.0.0:2379"],"listen-metrics-urls":["https://0.0.0.0:9978"]} 2025-12-08T17:42:34.973564378+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:34.973523Z","caller":"embed/etcd.go:911","msg":"serving metrics","address":"https://0.0.0.0:9978"} 2025-12-08T17:42:35.615778105+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615604Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c is starting a new election at term 9"} 2025-12-08T17:42:35.615778105+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615707Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c became pre-candidate at term 9"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615796Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c received MsgPreVoteResp from d44fc94b15474c4c at term 9"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615818Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c became candidate at term 10"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615826Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c received MsgVoteResp from d44fc94b15474c4c at term 10"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615838Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"d44fc94b15474c4c became leader at term 10"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.615848Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: d44fc94b15474c4c elected leader d44fc94b15474c4c at term 10"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.616317Z","caller":"etcdserver/server.go:2153","msg":"published local member to cluster through raft","local-member-id":"d44fc94b15474c4c","local-member-attributes":"{Name:crc ClientURLs:[https://192.168.126.11:2379]}","request-path":"/0/members/d44fc94b15474c4c/attributes","cluster-id":"37a6ceb54a88a89a","publish-timeout":"27s"} 2025-12-08T17:42:35.616381121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.616338Z","caller":"embed/serve.go:124","msg":"ready to serve client requests"} 2025-12-08T17:42:35.616731791+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.616517Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"} 2025-12-08T17:42:35.616731791+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.616688Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"} 2025-12-08T17:42:35.617790050+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.617717Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"} 2025-12-08T17:42:35.618698695+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:35.618641Z","caller":"embed/serve.go:275","msg":"serving client traffic securely","traffic":"grpc+http","address":"[::]:2379"} 2025-12-08T17:42:43.991923141+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:43.991094Z","caller":"etcdserver/server.go:1485","msg":"triggering snapshot","local-member-id":"d44fc94b15474c4c","local-member-applied-index":40004,"local-member-snapshot-index":30003,"local-member-snapshot-count":10000} 2025-12-08T17:42:43.991923141+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:43.991706Z","caller":"etcdserver/server.go:2548","msg":"saved snapshot","snapshot-index":40004} 2025-12-08T17:42:43.991923141+00:00 stderr F {"level":"info","ts":"2025-12-08T17:42:43.991748Z","caller":"etcdserver/server.go:2578","msg":"compacted Raft logs","compact-index":35004} 2025-12-08T17:54:23.275765880+00:00 stderr F {"level":"info","ts":"2025-12-08T17:54:23.275624Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":39774} 2025-12-08T17:54:23.494850416+00:00 stderr F {"level":"info","ts":"2025-12-08T17:54:23.494747Z","caller":"mvcc/kvstore_compaction.go:71","msg":"finished scheduled compaction","compact-revision":39774,"took":"213.543457ms","hash":3921327326,"current-db-size-bytes":84406272,"current-db-size":"84 MB","current-db-size-in-use-bytes":42967040,"current-db-size-in-use":"43 MB"} 2025-12-08T17:54:23.494850416+00:00 stderr F {"level":"info","ts":"2025-12-08T17:54:23.494800Z","caller":"mvcc/hash.go:151","msg":"storing new hash","hash":3921327326,"revision":39774,"compact-revision":33058} 2025-12-08T17:55:11.392318407+00:00 stderr F {"level":"info","ts":"2025-12-08T17:55:11.392234Z","caller":"wal/wal.go:788","msg":"created a new WAL segment","path":"/var/lib/etcd/member/wal/0000000000000003-000000000000b1cc.wal"} 2025-12-08T17:58:12.238597161+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12.238330Z","caller":"etcdserver/server.go:1485","msg":"triggering snapshot","local-member-id":"d44fc94b15474c4c","local-member-applied-index":50005,"local-member-snapshot-index":40004,"local-member-snapshot-count":10000} 2025-12-08T17:58:12.239949646+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12.239718Z","caller":"etcdserver/server.go:2548","msg":"saved snapshot","snapshot-index":50005} 2025-12-08T17:58:12.239949646+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12.239817Z","caller":"etcdserver/server.go:2578","msg":"compacted Raft logs","compact-index":45005} 2025-12-08T17:58:44.867937863+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:44.867809Z","caller":"wal/wal.go:788","msg":"created a new WAL segment","path":"/var/lib/etcd/member/wal/0000000000000004-000000000000c4cf.wal"} 2025-12-08T17:59:23.278678811+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:23.278556Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":40593} 2025-12-08T17:59:23.414085189+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:23.413977Z","caller":"mvcc/kvstore_compaction.go:71","msg":"finished scheduled compaction","compact-revision":40593,"took":"131.701551ms","hash":2730509047,"current-db-size-bytes":119218176,"current-db-size":"119 MB","current-db-size-in-use-bytes":118484992,"current-db-size-in-use":"118 MB"} 2025-12-08T17:59:23.414085189+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:23.414023Z","caller":"mvcc/hash.go:151","msg":"storing new hash","hash":2730509047,"revision":40593,"compact-revision":39774} 2025-12-08T18:04:23.281626713+00:00 stderr F {"level":"info","ts":"2025-12-08T18:04:23.281156Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":45968} 2025-12-08T18:04:23.512391753+00:00 stderr F {"level":"info","ts":"2025-12-08T18:04:23.512298Z","caller":"mvcc/kvstore_compaction.go:71","msg":"finished scheduled compaction","compact-revision":45968,"took":"227.101173ms","hash":2266231461,"current-db-size-bytes":120258560,"current-db-size":"120 MB","current-db-size-in-use-bytes":55484416,"current-db-size-in-use":"56 MB"} 2025-12-08T18:04:23.512391753+00:00 stderr F {"level":"info","ts":"2025-12-08T18:04:23.512356Z","caller":"mvcc/hash.go:151","msg":"storing new hash","hash":2266231461,"revision":45968,"compact-revision":40593} ././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611513032776 5ustar zuulzuul././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611520032774 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000000227415115611513033005 0ustar zuulzuul2025-12-08T17:44:02.352107643+00:00 stderr F W1208 17:44:02.351197 6684 deprecated.go:66] 2025-12-08T17:44:02.352107643+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:02.352107643+00:00 stderr F 2025-12-08T17:44:02.352107643+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:02.352107643+00:00 stderr F 2025-12-08T17:44:02.352107643+00:00 stderr F =============================================== 2025-12-08T17:44:02.352107643+00:00 stderr F 2025-12-08T17:44:02.352107643+00:00 stderr F I1208 17:44:02.351350 6684 kube-rbac-proxy.go:532] Reading config file: /etc/kube-rbac-proxy/config-file.yaml 2025-12-08T17:44:02.352107643+00:00 stderr F I1208 17:44:02.352075 6684 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:02.353079179+00:00 stderr F I1208 17:44:02.353052 6684 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:02.354727994+00:00 stderr F I1208 17:44:02.354700 6684 kube-rbac-proxy.go:397] Starting TCP socket on 0.0.0.0:9001 2025-12-08T17:44:02.355096624+00:00 stderr F I1208 17:44:02.355079 6684 kube-rbac-proxy.go:404] Listening securely on 0.0.0.0:9001 ././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611520032774 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/5.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000020415715115611513033011 0ustar zuulzuul2025-12-08T18:01:32.268674171+00:00 stderr F I1208 18:01:32.268583 44570 start.go:70] Version: 89b561f0 (f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T18:01:32.268852676+00:00 stderr F I1208 18:01:32.268807 44570 update.go:2651] Running: mount --rbind /run/secrets /rootfs/run/secrets 2025-12-08T18:01:32.272310008+00:00 stderr F I1208 18:01:32.272246 44570 update.go:2651] Running: mount --rbind /usr/bin /rootfs/run/machine-config-daemon-bin 2025-12-08T18:01:32.274767273+00:00 stderr F I1208 18:01:32.274679 44570 daemon.go:555] using appropriate binary for source=rhel-9 target=rhel-9 2025-12-08T18:01:32.372527028+00:00 stderr F I1208 18:01:32.372486 44570 daemon.go:608] Invoking re-exec /run/bin/machine-config-daemon 2025-12-08T18:01:32.411133187+00:00 stderr F I1208 18:01:32.411084 44570 start.go:70] Version: 89b561f0 (f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T18:01:32.411673841+00:00 stderr F I1208 18:01:32.411656 44570 image_manager_helper.go:194] Linking rpm-ostree authfile to /etc/mco/internal-registry-pull-secret.json 2025-12-08T18:01:32.477351100+00:00 stderr F I1208 18:01:32.476860 44570 daemon.go:345] Booted osImageURL: image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest (9.6.20251021-0) 765a8d9fdcb7d177cbf4fd31343316543b668c78028d2ab915d810e45d5d583b 2025-12-08T18:01:32.478222914+00:00 stderr F I1208 18:01:32.478205 44570 start.go:136] overriding kubernetes api to https://api-int.crc.testing:6443 2025-12-08T18:01:32.479421985+00:00 stderr F I1208 18:01:32.479351 44570 metrics.go:92] Registering Prometheus metrics 2025-12-08T18:01:32.479502227+00:00 stderr F I1208 18:01:32.479476 44570 metrics.go:99] Starting metrics listener on 127.0.0.1:8797 2025-12-08T18:01:32.492789402+00:00 stderr F I1208 18:01:32.492764 44570 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T18:01:32.498398511+00:00 stderr F I1208 18:01:32.498244 44570 event.go:377] Event(v1.ObjectReference{Kind:"Node", Namespace:"openshift-machine-config-operator", Name:"crc", UID:"23216ff3-032e-49af-af7e-1d23d5907b59", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T18:01:32.499190492+00:00 stderr F I1208 18:01:32.499079 44570 featuregates.go:112] FeatureGates initialized: enabled=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks], disabled=[AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T18:01:32.501754140+00:00 stderr F I1208 18:01:32.501696 44570 writer.go:87] NodeWriter initialized with credentials from /var/lib/kubelet/kubeconfig 2025-12-08T18:01:32.501909785+00:00 stderr F I1208 18:01:32.501835 44570 start.go:221] Feature enabled: PinnedImages 2025-12-08T18:01:32.502144021+00:00 stderr F I1208 18:01:32.502111 44570 update.go:2696] "Starting to manage node: crc" 2025-12-08T18:01:32.507368780+00:00 stderr F I1208 18:01:32.507311 44570 image_manager_helper.go:92] Running captured: rpm-ostree status 2025-12-08T18:01:32.602685019+00:00 stderr F I1208 18:01:32.602562 44570 pinned_image_set.go:819] Starting PinnedImageSet Manager 2025-12-08T18:01:32.674185874+00:00 stderr F I1208 18:01:32.674110 44570 daemon.go:1827] State: idle 2025-12-08T18:01:32.674185874+00:00 stderr F Deployments: 2025-12-08T18:01:32.674185874+00:00 stderr F * ostree-unverified-registry:image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest 2025-12-08T18:01:32.674185874+00:00 stderr F Digest: sha256:97576b6e5dcea61323cc5edae1d4c603ef3869df0ea140c0ada45fa333ff09d8 2025-12-08T18:01:32.674185874+00:00 stderr F Version: 9.6.20251021-0 (2025-11-03T09:00:13Z) 2025-12-08T18:01:32.674185874+00:00 stderr F LayeredPackages: cloud-init gvisor-tap-vsock-gvforwarder hyperv-daemons 2025-12-08T18:01:32.674626326+00:00 stderr F I1208 18:01:32.674578 44570 coreos.go:53] CoreOS aleph version: mtime=2022-08-01 23:42:11 +0000 UTC 2025-12-08T18:01:32.674626326+00:00 stderr F { 2025-12-08T18:01:32.674626326+00:00 stderr F "container-image": { 2025-12-08T18:01:32.674626326+00:00 stderr F "image-digest": "sha256:346eadc1d679be03d2b6a0dc447edded7077483224443f2a27652056e5e51ed8", 2025-12-08T18:01:32.674626326+00:00 stderr F "image-labels": { 2025-12-08T18:01:32.674626326+00:00 stderr F "com.coreos.osname": "rhcos", 2025-12-08T18:01:32.674626326+00:00 stderr F "containers.bootc": "1", 2025-12-08T18:01:32.674626326+00:00 stderr F "coreos-assembler.image-config-checksum": "b444a32e2801642f1e41777fd51fa53304496c58a3a6b15e5964a1f86f866507", 2025-12-08T18:01:32.674626326+00:00 stderr F "io.openshift.build.version-display-names": "machine-os=Red Hat Enterprise Linux CoreOS", 2025-12-08T18:01:32.674626326+00:00 stderr F "io.openshift.build.versions": "machine-os=9.6.20251015-1", 2025-12-08T18:01:32.674626326+00:00 stderr F "org.opencontainers.image.revision": "7b9eaa1ba9269e6287cb00f7044614b0e9da747e", 2025-12-08T18:01:32.674626326+00:00 stderr F "org.opencontainers.image.source": "https://github.com/coreos/rhel-coreos-config", 2025-12-08T18:01:32.674626326+00:00 stderr F "org.opencontainers.image.version": "9.6.20251015-1", 2025-12-08T18:01:32.674626326+00:00 stderr F "ostree.bootable": "true", 2025-12-08T18:01:32.674626326+00:00 stderr F "ostree.commit": "8df94c06f4995c7f493360f258ee92a068ab3280ea64919ec2bf9945a8648a4d", 2025-12-08T18:01:32.674626326+00:00 stderr F "ostree.final-diffid": "sha256:12787d84fa137cd5649a9005efe98ec9d05ea46245fdc50aecb7dd007f2035b1", 2025-12-08T18:01:32.674626326+00:00 stderr F "ostree.linux": "5.14.0-570.55.1.el9_6.x86_64", 2025-12-08T18:01:32.674626326+00:00 stderr F "rpmostree.inputhash": "b2542ee90d9bfa3873e873c3ad0e6550db088c732dbef4033568bbbd6dc58a81" 2025-12-08T18:01:32.674626326+00:00 stderr F }, 2025-12-08T18:01:32.674626326+00:00 stderr F "image-name": "oci-archive:/rhcos-9.6.20251015-1-ostree.x86_64.ociarchive" 2025-12-08T18:01:32.674626326+00:00 stderr F }, 2025-12-08T18:01:32.674626326+00:00 stderr F "osbuild-version": "161", 2025-12-08T18:01:32.674626326+00:00 stderr F "ostree-commit": "8df94c06f4995c7f493360f258ee92a068ab3280ea64919ec2bf9945a8648a4d", 2025-12-08T18:01:32.674626326+00:00 stderr F "ref": "docker://ostree-image-signed:oci-archive:/rhcos-9.6.20251015-1-ostree.x86_64.ociarchive", 2025-12-08T18:01:32.674626326+00:00 stderr F "version": "9.6.20251015-1" 2025-12-08T18:01:32.674626326+00:00 stderr F } 2025-12-08T18:01:32.674698068+00:00 stderr F I1208 18:01:32.674669 44570 coreos.go:70] Ignition provisioning: time=2025-11-02T07:44:17Z 2025-12-08T18:01:32.674698068+00:00 stderr F I1208 18:01:32.674683 44570 image_manager_helper.go:92] Running captured: journalctl --list-boots 2025-12-08T18:01:32.690223701+00:00 stderr F I1208 18:01:32.690095 44570 daemon.go:1836] journalctl --list-boots: 2025-12-08T18:01:32.690223701+00:00 stderr F IDX BOOT ID FIRST ENTRY LAST ENTRY 2025-12-08T18:01:32.690223701+00:00 stderr F -3 5cc629ac7367418d888178e530691988 Mon 2025-11-03 09:44:05 UTC Mon 2025-11-03 09:44:09 UTC 2025-12-08T18:01:32.690223701+00:00 stderr F -2 9ce94f2d4be449f9a71ac96c59658a3d Mon 2025-11-03 09:44:31 UTC Mon 2025-11-03 09:45:03 UTC 2025-12-08T18:01:32.690223701+00:00 stderr F -1 fe16203d7f904480a7094aaec9c44109 Mon 2025-12-08 17:36:32 UTC Mon 2025-12-08 17:40:38 UTC 2025-12-08T18:01:32.690223701+00:00 stderr F 0 3b24470386d14a74bdbb1446f2890ff6 Mon 2025-12-08 17:40:45 UTC Mon 2025-12-08 18:01:32 UTC 2025-12-08T18:01:32.690223701+00:00 stderr F I1208 18:01:32.690139 44570 image_manager_helper.go:92] Running captured: systemctl list-units --state=failed --no-legend 2025-12-08T18:01:32.703938516+00:00 stderr F I1208 18:01:32.703841 44570 daemon.go:1852] systemd service state: OK 2025-12-08T18:01:32.703938516+00:00 stderr F I1208 18:01:32.703909 44570 daemon.go:1405] Starting MachineConfigDaemon 2025-12-08T18:01:32.704056159+00:00 stderr F I1208 18:01:32.703999 44570 daemon.go:1412] Enabling Kubelet Healthz Monitor 2025-12-08T18:01:32.765138586+00:00 stderr F I1208 18:01:32.765059 44570 daemon.go:3034] Found 3 requested local packages in the booted deployment 2025-12-08T18:01:32.765138586+00:00 stderr F I1208 18:01:32.765106 44570 daemon.go:3043] Unsupported package cloud-init 2025-12-08T18:01:32.765138586+00:00 stderr F I1208 18:01:32.765119 44570 daemon.go:3043] Unsupported package gvisor-tap-vsock-gvforwarder 2025-12-08T18:01:32.765138586+00:00 stderr F I1208 18:01:32.765124 44570 daemon.go:3043] Unsupported package hyperv-daemons 2025-12-08T18:01:33.518871977+00:00 stderr F I1208 18:01:33.518750 44570 daemon.go:689] Node crc is part of the control plane 2025-12-08T18:01:33.835949665+00:00 stderr F I1208 18:01:33.835837 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs1433780695 --cleanup 2025-12-08T18:01:33.838543014+00:00 stderr F [2025-12-08T18:01:33Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:01:33.838630236+00:00 stdout F 2025-12-08T18:01:33.838652227+00:00 stderr F [2025-12-08T18:01:33Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:01:33.849364652+00:00 stderr F I1208 18:01:33.849273 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:01:33.849364652+00:00 stderr F I1208 18:01:33.849315 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:33.849364652+00:00 stderr F I1208 18:01:33.849326 44570 daemon.go:1795] state: Degraded 2025-12-08T18:01:33.849415484+00:00 stderr F I1208 18:01:33.849359 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:01:33.912856294+00:00 stdout F Deployments unchanged. 2025-12-08T18:01:33.921961236+00:00 stderr F I1208 18:01:33.921909 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:33.922461420+00:00 stderr F I1208 18:01:33.922433 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:01:33.922461420+00:00 stderr F W1208 18:01:33.922448 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:01:33.922461420+00:00 stderr F I1208 18:01:33.922457 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:01:33.982439918+00:00 stderr F E1208 18:01:33.982372 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:01:33.982439918+00:00 stderr F   bytes.Join({ 2025-12-08T18:01:33.982439918+00:00 stderr F    "{", 2025-12-08T18:01:33.982439918+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:01:33.982439918+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:01:33.982439918+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:01:33.982439918+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:01:33.982439918+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:01:33.982439918+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:01:33.982439918+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:01:33.982439918+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:01:33.982439918+00:00 stderr F   }, "") 2025-12-08T18:01:33.982537291+00:00 stderr F E1208 18:01:33.982447 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:01:36.385187320+00:00 stderr F I1208 18:01:36.385093 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs2323271743 --cleanup 2025-12-08T18:01:36.389031273+00:00 stderr F [2025-12-08T18:01:36Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:01:36.389147066+00:00 stdout F 2025-12-08T18:01:36.389158976+00:00 stderr F [2025-12-08T18:01:36Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:01:36.398267469+00:00 stderr F I1208 18:01:36.398197 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:01:36.398267469+00:00 stderr F I1208 18:01:36.398241 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:36.398267469+00:00 stderr F I1208 18:01:36.398256 44570 daemon.go:1795] state: Degraded 2025-12-08T18:01:36.398296479+00:00 stderr F I1208 18:01:36.398283 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:01:36.451931748+00:00 stdout F Deployments unchanged. 2025-12-08T18:01:36.459647044+00:00 stderr F I1208 18:01:36.459497 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:36.459947532+00:00 stderr F I1208 18:01:36.459906 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:01:36.459947532+00:00 stderr F W1208 18:01:36.459920 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:01:36.459947532+00:00 stderr F I1208 18:01:36.459933 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:01:36.532690490+00:00 stderr F E1208 18:01:36.532626 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:01:36.532690490+00:00 stderr F   bytes.Join({ 2025-12-08T18:01:36.532690490+00:00 stderr F    "{", 2025-12-08T18:01:36.532690490+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:01:36.532690490+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:01:36.532690490+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:01:36.532690490+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:01:36.532690490+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:01:36.532690490+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:01:36.532690490+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:01:36.532690490+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:01:36.532690490+00:00 stderr F   }, "") 2025-12-08T18:01:36.532774822+00:00 stderr F E1208 18:01:36.532697 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:01:40.545175540+00:00 stderr F I1208 18:01:40.545051 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs429940331 --cleanup 2025-12-08T18:01:40.549633129+00:00 stderr F [2025-12-08T18:01:40Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:01:40.549762463+00:00 stdout F 2025-12-08T18:01:40.549778443+00:00 stderr F [2025-12-08T18:01:40Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:01:40.565379798+00:00 stderr F I1208 18:01:40.565281 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:01:40.565379798+00:00 stderr F I1208 18:01:40.565324 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:40.565379798+00:00 stderr F I1208 18:01:40.565338 44570 daemon.go:1795] state: Degraded 2025-12-08T18:01:40.565379798+00:00 stderr F I1208 18:01:40.565367 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:01:40.644543038+00:00 stdout F Deployments unchanged. 2025-12-08T18:01:40.653296061+00:00 stderr F I1208 18:01:40.653217 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:40.653923647+00:00 stderr F I1208 18:01:40.653836 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:01:40.653923647+00:00 stderr F W1208 18:01:40.653854 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:01:40.653923647+00:00 stderr F I1208 18:01:40.653864 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:01:40.736487668+00:00 stderr F E1208 18:01:40.736415 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:01:40.736487668+00:00 stderr F   bytes.Join({ 2025-12-08T18:01:40.736487668+00:00 stderr F    "{", 2025-12-08T18:01:40.736487668+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:01:40.736487668+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:01:40.736487668+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:01:40.736487668+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:01:40.736487668+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:01:40.736487668+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:01:40.736487668+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:01:40.736487668+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:01:40.736487668+00:00 stderr F   }, "") 2025-12-08T18:01:40.736579040+00:00 stderr F E1208 18:01:40.736496 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:01:48.750406231+00:00 stderr F I1208 18:01:48.749731 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs16123330 --cleanup 2025-12-08T18:01:48.755756693+00:00 stderr F [2025-12-08T18:01:48Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:01:48.755942018+00:00 stdout F 2025-12-08T18:01:48.755955549+00:00 stderr F [2025-12-08T18:01:48Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:01:48.771269397+00:00 stderr F I1208 18:01:48.771134 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:01:48.771269397+00:00 stderr F I1208 18:01:48.771206 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:48.771269397+00:00 stderr F I1208 18:01:48.771217 44570 daemon.go:1795] state: Degraded 2025-12-08T18:01:48.771269397+00:00 stderr F I1208 18:01:48.771253 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:01:48.857441382+00:00 stdout F Deployments unchanged. 2025-12-08T18:01:48.870383507+00:00 stderr F I1208 18:01:48.870307 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:48.870863530+00:00 stderr F I1208 18:01:48.870822 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:01:48.870863530+00:00 stderr F W1208 18:01:48.870843 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:01:48.870863530+00:00 stderr F I1208 18:01:48.870857 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:01:48.940123465+00:00 stderr F E1208 18:01:48.940009 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:01:48.940123465+00:00 stderr F   bytes.Join({ 2025-12-08T18:01:48.940123465+00:00 stderr F    "{", 2025-12-08T18:01:48.940123465+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:01:48.940123465+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:01:48.940123465+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:01:48.940123465+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:01:48.940123465+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:01:48.940123465+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:01:48.940123465+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:01:48.940123465+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:01:48.940123465+00:00 stderr F   }, "") 2025-12-08T18:01:48.940123465+00:00 stderr F E1208 18:01:48.940100 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:02:21.090513870+00:00 stderr F I1208 18:02:21.089686 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs3345479097 --cleanup 2025-12-08T18:02:21.093628133+00:00 stderr F [2025-12-08T18:02:21Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:02:21.093759077+00:00 stdout F 2025-12-08T18:02:21.093771547+00:00 stderr F [2025-12-08T18:02:21Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:02:21.106462534+00:00 stderr F I1208 18:02:21.106366 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:02:21.106462534+00:00 stderr F I1208 18:02:21.106400 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:02:21.106462534+00:00 stderr F I1208 18:02:21.106408 44570 daemon.go:1795] state: Degraded 2025-12-08T18:02:21.106462534+00:00 stderr F I1208 18:02:21.106439 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:02:21.166411261+00:00 stdout F Deployments unchanged. 2025-12-08T18:02:21.175547874+00:00 stderr F I1208 18:02:21.175502 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:02:21.176118979+00:00 stderr F I1208 18:02:21.176085 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:02:21.176118979+00:00 stderr F W1208 18:02:21.176101 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:02:21.176118979+00:00 stderr F I1208 18:02:21.176110 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:02:21.240000980+00:00 stderr F E1208 18:02:21.239919 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:02:21.240000980+00:00 stderr F   bytes.Join({ 2025-12-08T18:02:21.240000980+00:00 stderr F    "{", 2025-12-08T18:02:21.240000980+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:02:21.240000980+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:02:21.240000980+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:02:21.240000980+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:02:21.240000980+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:02:21.240000980+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:02:21.240000980+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:02:21.240000980+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:02:21.240000980+00:00 stderr F   }, "") 2025-12-08T18:02:21.240000980+00:00 stderr F E1208 18:02:21.239988 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:02:32.766466162+00:00 stderr F I1208 18:02:32.765986 44570 certificate_writer.go:294] Certificate was synced from controllerconfig resourceVersion 39347 2025-12-08T18:03:22.789087830+00:00 stderr F I1208 18:03:22.789017 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs3644582277 --cleanup 2025-12-08T18:03:22.791670120+00:00 stderr F [2025-12-08T18:03:22Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:03:22.791694500+00:00 stderr F [2025-12-08T18:03:22Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:03:22.791705480+00:00 stdout F 2025-12-08T18:03:22.799089698+00:00 stderr F I1208 18:03:22.799047 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:03:22.799089698+00:00 stderr F I1208 18:03:22.799068 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:03:22.799089698+00:00 stderr F I1208 18:03:22.799075 44570 daemon.go:1795] state: Degraded 2025-12-08T18:03:22.799121879+00:00 stderr F I1208 18:03:22.799110 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:03:23.188267170+00:00 stdout F Deployments unchanged. 2025-12-08T18:03:23.196080749+00:00 stderr F I1208 18:03:23.196019 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:03:23.196625314+00:00 stderr F I1208 18:03:23.196593 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:03:23.196625314+00:00 stderr F W1208 18:03:23.196606 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:03:23.196625314+00:00 stderr F I1208 18:03:23.196614 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:03:23.271978074+00:00 stderr F E1208 18:03:23.271720 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:03:23.271978074+00:00 stderr F   bytes.Join({ 2025-12-08T18:03:23.271978074+00:00 stderr F    "{", 2025-12-08T18:03:23.271978074+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:03:23.271978074+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:03:23.271978074+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:03:23.271978074+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:03:23.271978074+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:03:23.271978074+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:03:23.271978074+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:03:23.271978074+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:03:23.271978074+00:00 stderr F   }, "") 2025-12-08T18:03:23.271978074+00:00 stderr F E1208 18:03:23.271833 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:04:23.280988045+00:00 stderr F I1208 18:04:23.280218 44570 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs3399131193 --cleanup 2025-12-08T18:04:23.284656113+00:00 stderr F [2025-12-08T18:04:23Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:04:23.284777746+00:00 stdout F 2025-12-08T18:04:23.284786117+00:00 stderr F [2025-12-08T18:04:23Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:04:23.297461862+00:00 stderr F I1208 18:04:23.297374 44570 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:04:23.297461862+00:00 stderr F I1208 18:04:23.297409 44570 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:04:23.297461862+00:00 stderr F I1208 18:04:23.297416 44570 daemon.go:1795] state: Degraded 2025-12-08T18:04:23.297510894+00:00 stderr F I1208 18:04:23.297494 44570 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:04:23.352422130+00:00 stdout F Deployments unchanged. 2025-12-08T18:04:23.363968236+00:00 stderr F I1208 18:04:23.362641 44570 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:04:23.363968236+00:00 stderr F I1208 18:04:23.363207 44570 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:04:23.363968236+00:00 stderr F W1208 18:04:23.363220 44570 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:04:23.363968236+00:00 stderr F I1208 18:04:23.363229 44570 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:04:23.442413807+00:00 stderr F E1208 18:04:23.442348 44570 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:04:23.442413807+00:00 stderr F   bytes.Join({ 2025-12-08T18:04:23.442413807+00:00 stderr F    "{", 2025-12-08T18:04:23.442413807+00:00 stderr F +  `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:04:23.442413807+00:00 stderr F +  `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:04:23.442413807+00:00 stderr F +  `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:04:23.442413807+00:00 stderr F +  `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:04:23.442413807+00:00 stderr F +  "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:04:23.442413807+00:00 stderr F +  `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:04:23.442413807+00:00 stderr F +  `"pablintino@gmail.com"}}`, 2025-12-08T18:04:23.442413807+00:00 stderr F    ... // 2 identical bytes 2025-12-08T18:04:23.442413807+00:00 stderr F   }, "") 2025-12-08T18:04:23.442493149+00:00 stderr F E1208 18:04:23.442435 44570 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" ././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/4.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000016765515115611513033024 0ustar zuulzuul2025-12-08T17:58:02.980587111+00:00 stderr F I1208 17:58:02.980502 33375 start.go:70] Version: 89b561f0 (f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:58:02.980748966+00:00 stderr F I1208 17:58:02.980701 33375 update.go:2651] Running: mount --rbind /run/secrets /rootfs/run/secrets 2025-12-08T17:58:02.983833335+00:00 stderr F I1208 17:58:02.983787 33375 update.go:2651] Running: mount --rbind /usr/bin /rootfs/run/machine-config-daemon-bin 2025-12-08T17:58:02.986316179+00:00 stderr F I1208 17:58:02.986281 33375 daemon.go:555] using appropriate binary for source=rhel-9 target=rhel-9 2025-12-08T17:58:03.092640527+00:00 stderr F I1208 17:58:03.092600 33375 daemon.go:608] Invoking re-exec /run/bin/machine-config-daemon 2025-12-08T17:58:03.131441270+00:00 stderr F I1208 17:58:03.131384 33375 start.go:70] Version: 89b561f0 (f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:58:03.131863681+00:00 stderr F I1208 17:58:03.131831 33375 image_manager_helper.go:194] Linking rpm-ostree authfile to /etc/mco/internal-registry-pull-secret.json 2025-12-08T17:58:03.179205325+00:00 stderr F I1208 17:58:03.179138 33375 daemon.go:345] Booted osImageURL: image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest (9.6.20251021-0) 765a8d9fdcb7d177cbf4fd31343316543b668c78028d2ab915d810e45d5d583b 2025-12-08T17:58:03.180140739+00:00 stderr F I1208 17:58:03.180110 33375 start.go:136] overriding kubernetes api to https://api-int.crc.testing:6443 2025-12-08T17:58:03.184278466+00:00 stderr F I1208 17:58:03.184172 33375 metrics.go:92] Registering Prometheus metrics 2025-12-08T17:58:03.184367188+00:00 stderr F I1208 17:58:03.184342 33375 metrics.go:99] Starting metrics listener on 127.0.0.1:8797 2025-12-08T17:58:03.191522043+00:00 stderr F I1208 17:58:03.191471 33375 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:58:03.193775411+00:00 stderr F I1208 17:58:03.193698 33375 featuregates.go:112] FeatureGates initialized: enabled=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks], disabled=[AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:58:03.195446105+00:00 stderr F I1208 17:58:03.193922 33375 event.go:377] Event(v1.ObjectReference{Kind:"Node", Namespace:"openshift-machine-config-operator", Name:"crc", UID:"23216ff3-032e-49af-af7e-1d23d5907b59", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:58:03.195446105+00:00 stderr F I1208 17:58:03.195342 33375 writer.go:87] NodeWriter initialized with credentials from /var/lib/kubelet/kubeconfig 2025-12-08T17:58:03.195446105+00:00 stderr F I1208 17:58:03.195432 33375 start.go:221] Feature enabled: PinnedImages 2025-12-08T17:58:03.196203484+00:00 stderr F I1208 17:58:03.195607 33375 update.go:2696] "Starting to manage node: crc" 2025-12-08T17:58:03.213765548+00:00 stderr F I1208 17:58:03.213691 33375 image_manager_helper.go:92] Running captured: rpm-ostree status 2025-12-08T17:58:03.296652701+00:00 stderr F I1208 17:58:03.296583 33375 pinned_image_set.go:819] Starting PinnedImageSet Manager 2025-12-08T17:58:03.347246998+00:00 stderr F I1208 17:58:03.347181 33375 daemon.go:1827] State: idle 2025-12-08T17:58:03.347246998+00:00 stderr F Deployments: 2025-12-08T17:58:03.347246998+00:00 stderr F * ostree-unverified-registry:image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest 2025-12-08T17:58:03.347246998+00:00 stderr F Digest: sha256:97576b6e5dcea61323cc5edae1d4c603ef3869df0ea140c0ada45fa333ff09d8 2025-12-08T17:58:03.347246998+00:00 stderr F Version: 9.6.20251021-0 (2025-11-03T09:00:13Z) 2025-12-08T17:58:03.347246998+00:00 stderr F LayeredPackages: cloud-init gvisor-tap-vsock-gvforwarder hyperv-daemons 2025-12-08T17:58:03.347601907+00:00 stderr F I1208 17:58:03.347564 33375 coreos.go:53] CoreOS aleph version: mtime=2022-08-01 23:42:11 +0000 UTC 2025-12-08T17:58:03.347601907+00:00 stderr F { 2025-12-08T17:58:03.347601907+00:00 stderr F "container-image": { 2025-12-08T17:58:03.347601907+00:00 stderr F "image-digest": "sha256:346eadc1d679be03d2b6a0dc447edded7077483224443f2a27652056e5e51ed8", 2025-12-08T17:58:03.347601907+00:00 stderr F "image-labels": { 2025-12-08T17:58:03.347601907+00:00 stderr F "com.coreos.osname": "rhcos", 2025-12-08T17:58:03.347601907+00:00 stderr F "containers.bootc": "1", 2025-12-08T17:58:03.347601907+00:00 stderr F "coreos-assembler.image-config-checksum": "b444a32e2801642f1e41777fd51fa53304496c58a3a6b15e5964a1f86f866507", 2025-12-08T17:58:03.347601907+00:00 stderr F "io.openshift.build.version-display-names": "machine-os=Red Hat Enterprise Linux CoreOS", 2025-12-08T17:58:03.347601907+00:00 stderr F "io.openshift.build.versions": "machine-os=9.6.20251015-1", 2025-12-08T17:58:03.347601907+00:00 stderr F "org.opencontainers.image.revision": "7b9eaa1ba9269e6287cb00f7044614b0e9da747e", 2025-12-08T17:58:03.347601907+00:00 stderr F "org.opencontainers.image.source": "https://github.com/coreos/rhel-coreos-config", 2025-12-08T17:58:03.347601907+00:00 stderr F "org.opencontainers.image.version": "9.6.20251015-1", 2025-12-08T17:58:03.347601907+00:00 stderr F "ostree.bootable": "true", 2025-12-08T17:58:03.347601907+00:00 stderr F "ostree.commit": "8df94c06f4995c7f493360f258ee92a068ab3280ea64919ec2bf9945a8648a4d", 2025-12-08T17:58:03.347601907+00:00 stderr F "ostree.final-diffid": "sha256:12787d84fa137cd5649a9005efe98ec9d05ea46245fdc50aecb7dd007f2035b1", 2025-12-08T17:58:03.347601907+00:00 stderr F "ostree.linux": "5.14.0-570.55.1.el9_6.x86_64", 2025-12-08T17:58:03.347601907+00:00 stderr F "rpmostree.inputhash": "b2542ee90d9bfa3873e873c3ad0e6550db088c732dbef4033568bbbd6dc58a81" 2025-12-08T17:58:03.347601907+00:00 stderr F }, 2025-12-08T17:58:03.347601907+00:00 stderr F "image-name": "oci-archive:/rhcos-9.6.20251015-1-ostree.x86_64.ociarchive" 2025-12-08T17:58:03.347601907+00:00 stderr F }, 2025-12-08T17:58:03.347601907+00:00 stderr F "osbuild-version": "161", 2025-12-08T17:58:03.347601907+00:00 stderr F "ostree-commit": "8df94c06f4995c7f493360f258ee92a068ab3280ea64919ec2bf9945a8648a4d", 2025-12-08T17:58:03.347601907+00:00 stderr F "ref": "docker://ostree-image-signed:oci-archive:/rhcos-9.6.20251015-1-ostree.x86_64.ociarchive", 2025-12-08T17:58:03.347601907+00:00 stderr F "version": "9.6.20251015-1" 2025-12-08T17:58:03.347601907+00:00 stderr F } 2025-12-08T17:58:03.347699290+00:00 stderr F I1208 17:58:03.347672 33375 coreos.go:70] Ignition provisioning: time=2025-11-02T07:44:17Z 2025-12-08T17:58:03.347699290+00:00 stderr F I1208 17:58:03.347684 33375 image_manager_helper.go:92] Running captured: journalctl --list-boots 2025-12-08T17:58:03.355720027+00:00 stderr F I1208 17:58:03.355671 33375 daemon.go:1836] journalctl --list-boots: 2025-12-08T17:58:03.355720027+00:00 stderr F IDX BOOT ID FIRST ENTRY LAST ENTRY 2025-12-08T17:58:03.355720027+00:00 stderr F -3 5cc629ac7367418d888178e530691988 Mon 2025-11-03 09:44:05 UTC Mon 2025-11-03 09:44:09 UTC 2025-12-08T17:58:03.355720027+00:00 stderr F -2 9ce94f2d4be449f9a71ac96c59658a3d Mon 2025-11-03 09:44:31 UTC Mon 2025-11-03 09:45:03 UTC 2025-12-08T17:58:03.355720027+00:00 stderr F -1 fe16203d7f904480a7094aaec9c44109 Mon 2025-12-08 17:36:32 UTC Mon 2025-12-08 17:40:38 UTC 2025-12-08T17:58:03.355720027+00:00 stderr F 0 3b24470386d14a74bdbb1446f2890ff6 Mon 2025-12-08 17:40:45 UTC Mon 2025-12-08 17:58:03 UTC 2025-12-08T17:58:03.355720027+00:00 stderr F I1208 17:58:03.355695 33375 image_manager_helper.go:92] Running captured: systemctl list-units --state=failed --no-legend 2025-12-08T17:58:03.366004303+00:00 stderr F I1208 17:58:03.365954 33375 daemon.go:1852] systemd service state: OK 2025-12-08T17:58:03.366004303+00:00 stderr F I1208 17:58:03.365982 33375 daemon.go:1405] Starting MachineConfigDaemon 2025-12-08T17:58:03.366164557+00:00 stderr F I1208 17:58:03.366122 33375 daemon.go:1412] Enabling Kubelet Healthz Monitor 2025-12-08T17:58:03.409518277+00:00 stderr F I1208 17:58:03.409248 33375 daemon.go:3034] Found 3 requested local packages in the booted deployment 2025-12-08T17:58:03.409518277+00:00 stderr F I1208 17:58:03.409272 33375 daemon.go:3043] Unsupported package cloud-init 2025-12-08T17:58:03.409518277+00:00 stderr F I1208 17:58:03.409277 33375 daemon.go:3043] Unsupported package gvisor-tap-vsock-gvforwarder 2025-12-08T17:58:03.409518277+00:00 stderr F I1208 17:58:03.409281 33375 daemon.go:3043] Unsupported package hyperv-daemons 2025-12-08T17:58:04.212805959+00:00 stderr F I1208 17:58:04.212763 33375 daemon.go:689] Node crc is part of the control plane 2025-12-08T17:58:04.403447066+00:00 stderr F I1208 17:58:04.403410 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs3042228587 --cleanup 2025-12-08T17:58:04.405743386+00:00 stderr F [2025-12-08T17:58:04Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T17:58:04.405842128+00:00 stderr F [2025-12-08T17:58:04Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T17:58:04.405853239+00:00 stdout F 2025-12-08T17:58:04.414819210+00:00 stderr F I1208 17:58:04.414780 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T17:58:04.414819210+00:00 stderr F I1208 17:58:04.414807 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:04.414819210+00:00 stderr F I1208 17:58:04.414815 33375 daemon.go:1795] state: Degraded 2025-12-08T17:58:04.414844481+00:00 stderr F I1208 17:58:04.414838 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T17:58:04.474252816+00:00 stdout F Deployments unchanged. 2025-12-08T17:58:04.484121862+00:00 stderr F I1208 17:58:04.484071 33375 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:04.484549572+00:00 stderr F I1208 17:58:04.484528 33375 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T17:58:04.484549572+00:00 stderr F W1208 17:58:04.484540 33375 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T17:58:04.484567663+00:00 stderr F I1208 17:58:04.484548 33375 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T17:58:04.550460275+00:00 stderr F E1208 17:58:04.550404 33375 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T17:58:04.550460275+00:00 stderr F bytes.Join({ 2025-12-08T17:58:04.550460275+00:00 stderr F "{", 2025-12-08T17:58:04.550460275+00:00 stderr F + `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T17:58:04.550460275+00:00 stderr F + "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T17:58:04.550460275+00:00 stderr F + "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T17:58:04.550460275+00:00 stderr F + `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T17:58:04.550460275+00:00 stderr F + `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T17:58:04.550460275+00:00 stderr F + "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T17:58:04.550460275+00:00 stderr F + `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T17:58:04.550460275+00:00 stderr F + `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T17:58:04.550460275+00:00 stderr F + `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T17:58:04.550460275+00:00 stderr F + "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T17:58:04.550460275+00:00 stderr F + "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T17:58:04.550460275+00:00 stderr F + "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T17:58:04.550460275+00:00 stderr F + "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T17:58:04.550460275+00:00 stderr F + "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T17:58:04.550460275+00:00 stderr F + "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T17:58:04.550460275+00:00 stderr F + "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T17:58:04.550460275+00:00 stderr F + "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T17:58:04.550460275+00:00 stderr F + "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T17:58:04.550460275+00:00 stderr F + "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T17:58:04.550460275+00:00 stderr F + "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T17:58:04.550460275+00:00 stderr F + "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T17:58:04.550460275+00:00 stderr F + "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T17:58:04.550460275+00:00 stderr F + "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T17:58:04.550460275+00:00 stderr F + "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T17:58:04.550460275+00:00 stderr F + `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T17:58:04.550460275+00:00 stderr F + `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T17:58:04.550460275+00:00 stderr F + "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T17:58:04.550460275+00:00 stderr F + "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T17:58:04.550460275+00:00 stderr F + "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T17:58:04.550460275+00:00 stderr F + "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T17:58:04.550460275+00:00 stderr F + "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T17:58:04.550460275+00:00 stderr F + "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T17:58:04.550460275+00:00 stderr F + "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T17:58:04.550460275+00:00 stderr F + "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T17:58:04.550460275+00:00 stderr F + "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T17:58:04.550460275+00:00 stderr F + "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T17:58:04.550460275+00:00 stderr F + "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T17:58:04.550460275+00:00 stderr F + "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T17:58:04.550460275+00:00 stderr F + "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T17:58:04.550460275+00:00 stderr F + "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T17:58:04.550460275+00:00 stderr F + "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T17:58:04.550460275+00:00 stderr F + "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T17:58:04.550460275+00:00 stderr F + `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T17:58:04.550460275+00:00 stderr F + `"pablintino@gmail.com"}}`, 2025-12-08T17:58:04.550460275+00:00 stderr F ... // 2 identical bytes 2025-12-08T17:58:04.550460275+00:00 stderr F }, "") 2025-12-08T17:58:04.550535017+00:00 stderr F E1208 17:58:04.550479 33375 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T17:58:06.564935461+00:00 stderr F I1208 17:58:06.564144 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs655605039 --cleanup 2025-12-08T17:58:06.568442492+00:00 stdout F 2025-12-08T17:58:06.568462243+00:00 stderr F [2025-12-08T17:58:06Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T17:58:06.568462243+00:00 stderr F [2025-12-08T17:58:06Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T17:58:06.575851333+00:00 stderr F I1208 17:58:06.574929 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T17:58:06.575851333+00:00 stderr F I1208 17:58:06.574954 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:06.575851333+00:00 stderr F I1208 17:58:06.574963 33375 daemon.go:1795] state: Degraded 2025-12-08T17:58:06.575851333+00:00 stderr F I1208 17:58:06.575004 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T17:58:06.629686065+00:00 stdout F Deployments unchanged. 2025-12-08T17:58:06.642550857+00:00 stderr F I1208 17:58:06.642092 33375 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:06.642550857+00:00 stderr F I1208 17:58:06.642463 33375 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T17:58:06.642550857+00:00 stderr F W1208 17:58:06.642471 33375 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T17:58:06.642550857+00:00 stderr F I1208 17:58:06.642477 33375 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T17:58:06.713514741+00:00 stderr F E1208 17:58:06.713109 33375 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T17:58:06.713514741+00:00 stderr F bytes.Join({ 2025-12-08T17:58:06.713514741+00:00 stderr F "{", 2025-12-08T17:58:06.713514741+00:00 stderr F + `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T17:58:06.713514741+00:00 stderr F + "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T17:58:06.713514741+00:00 stderr F + "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T17:58:06.713514741+00:00 stderr F + `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T17:58:06.713514741+00:00 stderr F + `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T17:58:06.713514741+00:00 stderr F + "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T17:58:06.713514741+00:00 stderr F + `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T17:58:06.713514741+00:00 stderr F + `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T17:58:06.713514741+00:00 stderr F + `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T17:58:06.713514741+00:00 stderr F + "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T17:58:06.713514741+00:00 stderr F + "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T17:58:06.713514741+00:00 stderr F + "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T17:58:06.713514741+00:00 stderr F + "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T17:58:06.713514741+00:00 stderr F + "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T17:58:06.713514741+00:00 stderr F + "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T17:58:06.713514741+00:00 stderr F + "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T17:58:06.713514741+00:00 stderr F + "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T17:58:06.713514741+00:00 stderr F + "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T17:58:06.713514741+00:00 stderr F + "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T17:58:06.713514741+00:00 stderr F + "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T17:58:06.713514741+00:00 stderr F + "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T17:58:06.713514741+00:00 stderr F + "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T17:58:06.713514741+00:00 stderr F + "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T17:58:06.713514741+00:00 stderr F + "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T17:58:06.713514741+00:00 stderr F + `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T17:58:06.713514741+00:00 stderr F + `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T17:58:06.713514741+00:00 stderr F + "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T17:58:06.713514741+00:00 stderr F + "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T17:58:06.713514741+00:00 stderr F + "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T17:58:06.713514741+00:00 stderr F + "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T17:58:06.713514741+00:00 stderr F + "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T17:58:06.713514741+00:00 stderr F + "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T17:58:06.713514741+00:00 stderr F + "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T17:58:06.713514741+00:00 stderr F + "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T17:58:06.713514741+00:00 stderr F + "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T17:58:06.713514741+00:00 stderr F + "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T17:58:06.713514741+00:00 stderr F + "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T17:58:06.713514741+00:00 stderr F + "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T17:58:06.713514741+00:00 stderr F + "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T17:58:06.713514741+00:00 stderr F + "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T17:58:06.713514741+00:00 stderr F + "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T17:58:06.713514741+00:00 stderr F + "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T17:58:06.713514741+00:00 stderr F + `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T17:58:06.713514741+00:00 stderr F + `"pablintino@gmail.com"}}`, 2025-12-08T17:58:06.713514741+00:00 stderr F ... // 2 identical bytes 2025-12-08T17:58:06.713514741+00:00 stderr F }, "") 2025-12-08T17:58:06.713514741+00:00 stderr F E1208 17:58:06.713168 33375 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T17:58:14.782112300+00:00 stdout F 2025-12-08T17:58:14.782151731+00:00 stderr F I1208 17:58:14.724663 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs3817601763 --cleanup 2025-12-08T17:58:14.782151731+00:00 stderr F [2025-12-08T17:58:14Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T17:58:14.782151731+00:00 stderr F [2025-12-08T17:58:14Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T17:58:14.782151731+00:00 stderr F I1208 17:58:14.740168 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T17:58:14.782151731+00:00 stderr F I1208 17:58:14.740223 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:14.782151731+00:00 stderr F I1208 17:58:14.740233 33375 daemon.go:1795] state: Degraded 2025-12-08T17:58:14.782151731+00:00 stderr F I1208 17:58:14.740291 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T17:58:14.899906944+00:00 stdout F Deployments unchanged. 2025-12-08T17:58:14.909492142+00:00 stderr F I1208 17:58:14.908640 33375 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:14.909993245+00:00 stderr F I1208 17:58:14.909944 33375 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T17:58:14.909993245+00:00 stderr F W1208 17:58:14.909961 33375 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T17:58:14.909993245+00:00 stderr F I1208 17:58:14.909975 33375 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T17:58:15.012359141+00:00 stderr F E1208 17:58:15.012297 33375 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T17:58:15.012359141+00:00 stderr F bytes.Join({ 2025-12-08T17:58:15.012359141+00:00 stderr F "{", 2025-12-08T17:58:15.012359141+00:00 stderr F + `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T17:58:15.012359141+00:00 stderr F + "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T17:58:15.012359141+00:00 stderr F + "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T17:58:15.012359141+00:00 stderr F + `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T17:58:15.012359141+00:00 stderr F + `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T17:58:15.012359141+00:00 stderr F + "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T17:58:15.012359141+00:00 stderr F + `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T17:58:15.012359141+00:00 stderr F + `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T17:58:15.012359141+00:00 stderr F + `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T17:58:15.012359141+00:00 stderr F + "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T17:58:15.012359141+00:00 stderr F + "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T17:58:15.012359141+00:00 stderr F + "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T17:58:15.012359141+00:00 stderr F + "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T17:58:15.012359141+00:00 stderr F + "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T17:58:15.012359141+00:00 stderr F + "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T17:58:15.012359141+00:00 stderr F + "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T17:58:15.012359141+00:00 stderr F + "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T17:58:15.012359141+00:00 stderr F + "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T17:58:15.012359141+00:00 stderr F + "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T17:58:15.012359141+00:00 stderr F + "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T17:58:15.012359141+00:00 stderr F + "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T17:58:15.012359141+00:00 stderr F + "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T17:58:15.012359141+00:00 stderr F + "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T17:58:15.012359141+00:00 stderr F + "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T17:58:15.012359141+00:00 stderr F + `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T17:58:15.012359141+00:00 stderr F + `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T17:58:15.012359141+00:00 stderr F + "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T17:58:15.012359141+00:00 stderr F + "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T17:58:15.012359141+00:00 stderr F + "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T17:58:15.012359141+00:00 stderr F + "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T17:58:15.012359141+00:00 stderr F + "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T17:58:15.012359141+00:00 stderr F + "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T17:58:15.012359141+00:00 stderr F + "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T17:58:15.012359141+00:00 stderr F + "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T17:58:15.012359141+00:00 stderr F + "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T17:58:15.012359141+00:00 stderr F + "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T17:58:15.012359141+00:00 stderr F + "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T17:58:15.012359141+00:00 stderr F + "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T17:58:15.012359141+00:00 stderr F + "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T17:58:15.012359141+00:00 stderr F + "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T17:58:15.012359141+00:00 stderr F + "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T17:58:15.012359141+00:00 stderr F + "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T17:58:15.012359141+00:00 stderr F + `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T17:58:15.012359141+00:00 stderr F + `"pablintino@gmail.com"}}`, 2025-12-08T17:58:15.012359141+00:00 stderr F ... // 2 identical bytes 2025-12-08T17:58:15.012359141+00:00 stderr F }, "") 2025-12-08T17:58:15.012746041+00:00 stderr F E1208 17:58:15.012723 33375 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T17:58:31.236242456+00:00 stderr F I1208 17:58:31.236147 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs2590445935 --cleanup 2025-12-08T17:58:31.241187164+00:00 stderr F [2025-12-08T17:58:31Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T17:58:31.241316038+00:00 stdout F 2025-12-08T17:58:31.241323228+00:00 stderr F [2025-12-08T17:58:31Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T17:58:31.253482712+00:00 stderr F I1208 17:58:31.253419 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T17:58:31.253482712+00:00 stderr F I1208 17:58:31.253458 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:31.253482712+00:00 stderr F I1208 17:58:31.253469 33375 daemon.go:1795] state: Degraded 2025-12-08T17:58:31.253519673+00:00 stderr F I1208 17:58:31.253503 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T17:58:31.345953102+00:00 stdout F Deployments unchanged. 2025-12-08T17:58:31.360669903+00:00 stderr F I1208 17:58:31.360567 33375 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:58:31.361166785+00:00 stderr F I1208 17:58:31.361128 33375 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T17:58:31.361166785+00:00 stderr F W1208 17:58:31.361148 33375 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T17:58:31.361166785+00:00 stderr F I1208 17:58:31.361161 33375 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T17:58:31.456978592+00:00 stderr F E1208 17:58:31.456864 33375 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T17:58:31.456978592+00:00 stderr F bytes.Join({ 2025-12-08T17:58:31.456978592+00:00 stderr F "{", 2025-12-08T17:58:31.456978592+00:00 stderr F + `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T17:58:31.456978592+00:00 stderr F + "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T17:58:31.456978592+00:00 stderr F + "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T17:58:31.456978592+00:00 stderr F + `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T17:58:31.456978592+00:00 stderr F + `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T17:58:31.456978592+00:00 stderr F + "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T17:58:31.456978592+00:00 stderr F + `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T17:58:31.456978592+00:00 stderr F + `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T17:58:31.456978592+00:00 stderr F + `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T17:58:31.456978592+00:00 stderr F + "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T17:58:31.456978592+00:00 stderr F + "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T17:58:31.456978592+00:00 stderr F + "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T17:58:31.456978592+00:00 stderr F + "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T17:58:31.456978592+00:00 stderr F + "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T17:58:31.456978592+00:00 stderr F + "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T17:58:31.456978592+00:00 stderr F + "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T17:58:31.456978592+00:00 stderr F + "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T17:58:31.456978592+00:00 stderr F + "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T17:58:31.456978592+00:00 stderr F + "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T17:58:31.456978592+00:00 stderr F + "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T17:58:31.456978592+00:00 stderr F + "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T17:58:31.456978592+00:00 stderr F + "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T17:58:31.456978592+00:00 stderr F + "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T17:58:31.456978592+00:00 stderr F + "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T17:58:31.456978592+00:00 stderr F + `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T17:58:31.456978592+00:00 stderr F + `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T17:58:31.456978592+00:00 stderr F + "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T17:58:31.456978592+00:00 stderr F + "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T17:58:31.456978592+00:00 stderr F + "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T17:58:31.456978592+00:00 stderr F + "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T17:58:31.456978592+00:00 stderr F + "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T17:58:31.456978592+00:00 stderr F + "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T17:58:31.456978592+00:00 stderr F + "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T17:58:31.456978592+00:00 stderr F + "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T17:58:31.456978592+00:00 stderr F + "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T17:58:31.456978592+00:00 stderr F + "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T17:58:31.456978592+00:00 stderr F + "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T17:58:31.456978592+00:00 stderr F + "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T17:58:31.456978592+00:00 stderr F + "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T17:58:31.456978592+00:00 stderr F + "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T17:58:31.456978592+00:00 stderr F + "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T17:58:31.456978592+00:00 stderr F + "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T17:58:31.456978592+00:00 stderr F + `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T17:58:31.456978592+00:00 stderr F + `"pablintino@gmail.com"}}`, 2025-12-08T17:58:31.456978592+00:00 stderr F ... // 2 identical bytes 2025-12-08T17:58:31.456978592+00:00 stderr F }, "") 2025-12-08T17:58:31.456978592+00:00 stderr F E1208 17:58:31.456947 33375 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T17:59:03.410702828+00:00 stderr F I1208 17:59:03.410487 33375 certificate_writer.go:294] Certificate was synced from controllerconfig resourceVersion 39347 2025-12-08T17:59:31.463468475+00:00 stderr F I1208 17:59:31.463327 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs88429207 --cleanup 2025-12-08T17:59:31.466475574+00:00 stderr F [2025-12-08T17:59:31Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T17:59:31.466550776+00:00 stdout F 2025-12-08T17:59:31.466559676+00:00 stderr F [2025-12-08T17:59:31Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T17:59:31.477436893+00:00 stderr F I1208 17:59:31.477358 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T17:59:31.477436893+00:00 stderr F I1208 17:59:31.477394 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:59:31.477436893+00:00 stderr F I1208 17:59:31.477408 33375 daemon.go:1795] state: Degraded 2025-12-08T17:59:31.477494084+00:00 stderr F I1208 17:59:31.477470 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T17:59:31.539526200+00:00 stdout F Deployments unchanged. 2025-12-08T17:59:31.552621665+00:00 stderr F I1208 17:59:31.552541 33375 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T17:59:31.553658533+00:00 stderr F I1208 17:59:31.553604 33375 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T17:59:31.553658533+00:00 stderr F W1208 17:59:31.553636 33375 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T17:59:31.553658533+00:00 stderr F I1208 17:59:31.553648 33375 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T17:59:31.641902568+00:00 stderr F E1208 17:59:31.641806 33375 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T17:59:31.641902568+00:00 stderr F bytes.Join({ 2025-12-08T17:59:31.641902568+00:00 stderr F "{", 2025-12-08T17:59:31.641902568+00:00 stderr F + `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T17:59:31.641902568+00:00 stderr F + "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T17:59:31.641902568+00:00 stderr F + "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T17:59:31.641902568+00:00 stderr F + `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T17:59:31.641902568+00:00 stderr F + `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T17:59:31.641902568+00:00 stderr F + "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T17:59:31.641902568+00:00 stderr F + `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T17:59:31.641902568+00:00 stderr F + `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T17:59:31.641902568+00:00 stderr F + `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T17:59:31.641902568+00:00 stderr F + "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T17:59:31.641902568+00:00 stderr F + "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T17:59:31.641902568+00:00 stderr F + "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T17:59:31.641902568+00:00 stderr F + "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T17:59:31.641902568+00:00 stderr F + "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T17:59:31.641902568+00:00 stderr F + "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T17:59:31.641902568+00:00 stderr F + "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T17:59:31.641902568+00:00 stderr F + "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T17:59:31.641902568+00:00 stderr F + "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T17:59:31.641902568+00:00 stderr F + "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T17:59:31.641902568+00:00 stderr F + "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T17:59:31.641902568+00:00 stderr F + "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T17:59:31.641902568+00:00 stderr F + "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T17:59:31.641902568+00:00 stderr F + "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T17:59:31.641902568+00:00 stderr F + "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T17:59:31.641902568+00:00 stderr F + `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T17:59:31.641902568+00:00 stderr F + `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T17:59:31.641902568+00:00 stderr F + "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T17:59:31.641902568+00:00 stderr F + "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T17:59:31.641902568+00:00 stderr F + "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T17:59:31.641902568+00:00 stderr F + "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T17:59:31.641902568+00:00 stderr F + "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T17:59:31.641902568+00:00 stderr F + "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T17:59:31.641902568+00:00 stderr F + "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T17:59:31.641902568+00:00 stderr F + "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T17:59:31.641902568+00:00 stderr F + "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T17:59:31.641902568+00:00 stderr F + "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T17:59:31.641902568+00:00 stderr F + "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T17:59:31.641902568+00:00 stderr F + "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T17:59:31.641902568+00:00 stderr F + "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T17:59:31.641902568+00:00 stderr F + "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T17:59:31.641902568+00:00 stderr F + "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T17:59:31.641902568+00:00 stderr F + "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T17:59:31.641902568+00:00 stderr F + `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T17:59:31.641902568+00:00 stderr F + `"pablintino@gmail.com"}}`, 2025-12-08T17:59:31.641902568+00:00 stderr F ... // 2 identical bytes 2025-12-08T17:59:31.641902568+00:00 stderr F }, "") 2025-12-08T17:59:31.641978800+00:00 stderr F E1208 17:59:31.641918 33375 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:00:31.654445590+00:00 stderr F I1208 18:00:31.654281 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs1935102236 --cleanup 2025-12-08T18:00:31.658761483+00:00 stderr F [2025-12-08T18:00:31Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:00:31.658858665+00:00 stdout F 2025-12-08T18:00:31.658872475+00:00 stderr F [2025-12-08T18:00:31Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:00:31.675126533+00:00 stderr F I1208 18:00:31.675021 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:00:31.675126533+00:00 stderr F I1208 18:00:31.675059 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:00:31.675126533+00:00 stderr F I1208 18:00:31.675073 33375 daemon.go:1795] state: Degraded 2025-12-08T18:00:31.675247136+00:00 stderr F I1208 18:00:31.675193 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:00:31.755809635+00:00 stdout F Deployments unchanged. 2025-12-08T18:00:31.769457914+00:00 stderr F I1208 18:00:31.769389 33375 daemon.go:2255] Validating against current config rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:00:31.770653665+00:00 stderr F I1208 18:00:31.770601 33375 daemon.go:2167] SSH key location ("/home/core/.ssh/authorized_keys.d/ignition") up-to-date! 2025-12-08T18:00:31.770653665+00:00 stderr F W1208 18:00:31.770628 33375 daemon.go:2791] osImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" is not a digest; using a digest is recommended 2025-12-08T18:00:31.770653665+00:00 stderr F I1208 18:00:31.770646 33375 image_manager_helper.go:92] Running captured: rpm-ostree kargs 2025-12-08T18:00:31.887465217+00:00 stderr F E1208 18:00:31.887380 33375 on_disk_validation.go:251] content mismatch for file "/var/lib/kubelet/config.json" (-want +got): 2025-12-08T18:00:31.887465217+00:00 stderr F bytes.Join({ 2025-12-08T18:00:31.887465217+00:00 stderr F "{", 2025-12-08T18:00:31.887465217+00:00 stderr F + `"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2Ut`, 2025-12-08T18:00:31.887465217+00:00 stderr F + "ZGV2K29jbV9hY2Nlc3NfMWI4OTIxNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6", 2025-12-08T18:00:31.887465217+00:00 stderr F + "Rk03WUxFT1hIWDQ0VVpEM1lZME9QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdT", 2025-12-08T18:00:31.887465217+00:00 stderr F + `RE1KWTZEMFBBMTEzUzU4Vg==","email":"pablintino@gmail.com"},"quay.`, 2025-12-08T18:00:31.887465217+00:00 stderr F + `io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfMWI4OTI`, 2025-12-08T18:00:31.887465217+00:00 stderr F + "xNzU1MmJjNDJkMWJlM2ZiMDZhMWFlZDAwMWE6Rk03WUxFT1hIWDQ0VVpEM1lZME9", 2025-12-08T18:00:31.887465217+00:00 stderr F + `QTDlXVVNMMVFKTzUxOUYzQUEzNVhGT1BWRUdTRE1KWTZEMFBBMTEzUzU4Vg==","`, 2025-12-08T18:00:31.887465217+00:00 stderr F + `email":"pablintino@gmail.com"},"registry.connect.redhat.com":{"a`, 2025-12-08T18:00:31.887465217+00:00 stderr F + `uth":"fHVoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0OD`, 2025-12-08T18:00:31.887465217+00:00 stderr F + "g3MTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTm", 2025-12-08T18:00:31.887465217+00:00 stderr F + "haakEwT0RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZG", 2025-12-08T18:00:31.887465217+00:00 stderr F + "xYY3oxTUwzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaV", 2025-12-08T18:00:31.887465217+00:00 stderr F + "p2TkFremVTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRV", 2025-12-08T18:00:31.887465217+00:00 stderr F + "lheEkxSmFjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZD", 2025-12-08T18:00:31.887465217+00:00 stderr F + "RBNGw0MVVjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMX", 2025-12-08T18:00:31.887465217+00:00 stderr F + "NZWnFwSFZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2", 2025-12-08T18:00:31.887465217+00:00 stderr F + "tsQ1lrM05UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMT", 2025-12-08T18:00:31.887465217+00:00 stderr F + "RnNFE4TmF6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVj", 2025-12-08T18:00:31.887465217+00:00 stderr F + "VxS1hNY0g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYj", 2025-12-08T18:00:31.887465217+00:00 stderr F + "F6cmRSbC1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ2", 2025-12-08T18:00:31.887465217+00:00 stderr F + "9HNlFVclE5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTm", 2025-12-08T18:00:31.887465217+00:00 stderr F + "M4UWZnblBFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk", 2025-12-08T18:00:31.887465217+00:00 stderr F + "9pWGwtd1pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNl", 2025-12-08T18:00:31.887465217+00:00 stderr F + "MzYmR2TEFjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTW", 2025-12-08T18:00:31.887465217+00:00 stderr F + `U1bWUtWjRFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==",`, 2025-12-08T18:00:31.887465217+00:00 stderr F + `"email":"pablintino@gmail.com"},"registry.redhat.io":{"auth":"fH`, 2025-12-08T18:00:31.887465217+00:00 stderr F + "VoYy1wb29sLWU0YTY0ZjFlLTc1MmUtNGVhNi1hNWM5LTIwMDY2YzE0ODg3MTpleU", 2025-12-08T18:00:31.887465217+00:00 stderr F + "poYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSmpaVE5tWWpsaU9HTmhaakEwT0", 2025-12-08T18:00:31.887465217+00:00 stderr F + "RreVlqZGtOV1k0TUdabVptRTFZbVpqTUNKOS51b0pIaGg4RkY4ZWI4ZGxYY3oxTU", 2025-12-08T18:00:31.887465217+00:00 stderr F + "wzUURzLUQ2Qk5ZRTFncDZOQmRmbUl5bFRKTW9hdzg5V2VQNGt5ZU1WaVp2TkFrem", 2025-12-08T18:00:31.887465217+00:00 stderr F + "VTYWQ3emtQcDZCcXU2NlFjazNCVEpHckt4cHJYVHBxRVVyYXVKazljRVlheEkxSm", 2025-12-08T18:00:31.887465217+00:00 stderr F + "FjTWV4TkxZdURZMHZJSEdNRHBqUS1fSldyZmxJNldWMXVkUlF0ZjhuZDRBNGw0MV", 2025-12-08T18:00:31.887465217+00:00 stderr F + "VjVXlIVjRDQmZ3b1dGRWdmX2ZwZ08zaUhlSjZuM2lHOURPa3dKYVcyMXNZWnFwSF", 2025-12-08T18:00:31.887465217+00:00 stderr F + "ZpbkN3Q2hwOTNabHlEcFhReUFkSzFDay12YmtHeUlaSjJQUTZPS0RIb2tsQ1lrM0", 2025-12-08T18:00:31.887465217+00:00 stderr F + "5UdDVJbDVyUU5ZY3Fxc0dGRjhEcTJpdVUzdmFQaDR4aV9zV2I1R1dtMTRnNFE4Tm", 2025-12-08T18:00:31.887465217+00:00 stderr F + "F6SVB0QmloUGVHRW03NERtWE5JRVpXR3UyQjB0b0lYZjlFV1hISjhtVjVxS1hNY0", 2025-12-08T18:00:31.887465217+00:00 stderr F + "g5cURacDQ4djFnUUhkZWdvNVJPU2t1ZmE5Mm96LWdoMXdsaWQ1VDRNYjF6cmRSbC", 2025-12-08T18:00:31.887465217+00:00 stderr F + "1XSWYyZm9oNjBYR3UtR1VRT0JNaW9pNGV5MlZpRFJkMVFmTVJhbnhnZ29HNlFVcl", 2025-12-08T18:00:31.887465217+00:00 stderr F + "E5Y0JNQS1DWE9VWnZFWHFKVmptV0N0MkM4UEFNUDZaOUN6QkwxVldiTmM4UWZnbl", 2025-12-08T18:00:31.887465217+00:00 stderr F + "BFRWxKMUw2T1p5S295Z3V3YXhrZFhKaEN6Rk10bFZuckRHUGpFaFJGYk9pWGwtd1", 2025-12-08T18:00:31.887465217+00:00 stderr F + "pYb25OS2w2S1NMUFpJdWZuV1gtMnlVdUV4ZEVRejlMYXFIekxETERDNlMzYmR2TE", 2025-12-08T18:00:31.887465217+00:00 stderr F + "FjaDlJQk1uT0xOYy1ZRHh5RFdVaTFMeW9WV3MtZzdiaWItV3hSUTZHTWU1bWUtWj", 2025-12-08T18:00:31.887465217+00:00 stderr F + `RFWTdWUDctQml6U1cxcTVzRTI2TkQ0dkExVG5vYUJlZEpqMzlCSQ==","email":`, 2025-12-08T18:00:31.887465217+00:00 stderr F + `"pablintino@gmail.com"}}`, 2025-12-08T18:00:31.887465217+00:00 stderr F ... // 2 identical bytes 2025-12-08T18:00:31.887465217+00:00 stderr F }, "") 2025-12-08T18:00:31.887573330+00:00 stderr F E1208 18:00:31.887490 33375 writer.go:231] Marking Degraded due to: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"" 2025-12-08T18:01:31.891344358+00:00 stderr F I1208 18:01:31.891233 33375 daemon.go:2057] Running: /run/machine-config-daemon-bin/nmstatectl persist-nic-names --root / --kargs-out /tmp/nmstate-kargs3912311835 --cleanup 2025-12-08T18:01:31.893631779+00:00 stderr F [2025-12-08T18:01:31Z INFO nmstatectl] Nmstate version: 2.2.54 2025-12-08T18:01:31.893715902+00:00 stdout F 2025-12-08T18:01:31.893724032+00:00 stderr F [2025-12-08T18:01:31Z INFO nmstatectl::persist_nic] /etc/systemd/network does not exist, no need to clean up 2025-12-08T18:01:31.904696974+00:00 stderr F I1208 18:01:31.904625 33375 daemon.go:1645] Previous boot ostree-finalize-staged.service appears successful 2025-12-08T18:01:31.904696974+00:00 stderr F I1208 18:01:31.904653 33375 daemon.go:1780] Current+desired config: rendered-master-d582710c680b4cd4536e11249c7e09e9 2025-12-08T18:01:31.904696974+00:00 stderr F I1208 18:01:31.904661 33375 daemon.go:1795] state: Degraded 2025-12-08T18:01:31.904696974+00:00 stderr F I1208 18:01:31.904681 33375 update.go:2651] Running: rpm-ostree cleanup -r 2025-12-08T18:01:31.975227743+00:00 stdout F Deployments unchanged. 2025-12-08T18:01:31.982858167+00:00 stderr F I1208 18:01:31.982765 33375 daemon.go:3092] Daemon logs from /var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71 preserved at /etc/machine-config-daemon/previous-logs/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71 2025-12-08T18:01:31.982858167+00:00 stderr F I1208 18:01:31.982821 33375 daemon.go:1445] Shutting down MachineConfigDaemon ././@LongLink0000644000000000000000000000021700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-0000755000175000017500000000000015115611513033045 5ustar zuulzuul././@LongLink0000644000000000000000000000022300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-0000755000175000017500000000000015115611520033043 5ustar zuulzuul././@LongLink0000644000000000000000000000023000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-0000644000175000017500000004014315115611513033051 0ustar zuulzuul2025-12-08T17:59:13.179347207+00:00 stderr F 2025-12-08 17:59:13.179249 +0000 SERVER (info) Container Name: qdr-test.smoketest 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179330 +0000 ROUTER (info) Router started in Edge mode 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179345 +0000 ROUTER (info) Version: 1.19.0 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179449 +0000 ROUTER_CORE (info) Streaming link scrubber: Scan interval: 30 seconds, max free pool: 128 links 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179461 +0000 ROUTER_CORE (info) Core module enabled: streaming_link_scrubber 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179471 +0000 ROUTER_CORE (info) Core module present but disabled: mobile_sync 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179483 +0000 ROUTER_CORE (info) Stuck delivery detection: Scan interval: 30 seconds, Delivery age threshold: 10 seconds 2025-12-08T17:59:13.179523002+00:00 stderr F 2025-12-08 17:59:13.179493 +0000 ROUTER_CORE (info) Core module enabled: stuck_delivery_detection 2025-12-08T17:59:13.179559843+00:00 stderr F 2025-12-08 17:59:13.179527 +0000 ROUTER_CORE (info) Core module enabled: heartbeat_server 2025-12-08T17:59:13.179559843+00:00 stderr F 2025-12-08 17:59:13.179544 +0000 ROUTER_CORE (info) Core module enabled: heartbeat_edge 2025-12-08T17:59:13.179569103+00:00 stderr F 2025-12-08 17:59:13.179555 +0000 ROUTER_CORE (info) Core module present but disabled: address_lookup_server 2025-12-08T17:59:13.179578473+00:00 stderr F 2025-12-08 17:59:13.179565 +0000 ROUTER_CORE (info) Core module enabled: address_lookup_client 2025-12-08T17:59:13.179598664+00:00 stderr F 2025-12-08 17:59:13.179576 +0000 ROUTER_CORE (info) Core module present but disabled: edge_addr_tracking 2025-12-08T17:59:13.179605664+00:00 stderr F 2025-12-08 17:59:13.179590 +0000 ROUTER_CORE (info) Core module present but disabled: core_test_hooks 2025-12-08T17:59:13.179614314+00:00 stderr F 2025-12-08 17:59:13.179602 +0000 ROUTER_CORE (info) Core module enabled: edge_router 2025-12-08T17:59:13.179743947+00:00 stderr F 2025-12-08 17:59:13.179710 +0000 ROUTER_CORE (info) Protocol adaptor registered: amqp 2025-12-08T17:59:13.179943423+00:00 stderr F 2025-12-08 17:59:13.179835 +0000 ROUTER_CORE (info) Router Core thread running. 0/qdr-test.smoketest 2025-12-08T17:59:13.179967163+00:00 stderr F 2025-12-08 17:59:13.179941 +0000 ROUTER_CORE (info) In-process subscription M/$management 2025-12-08T17:59:13.180078576+00:00 stderr F 2025-12-08 17:59:13.180044 +0000 ROUTER_CORE (info) In-process subscription L/$management 2025-12-08T17:59:13.185858679+00:00 stderr F 2025-12-08 17:59:13.185811 +0000 AGENT (info) Activating management agent on $_management_internal 2025-12-08T17:59:13.185930690+00:00 stderr F 2025-12-08 17:59:13.185886 +0000 ROUTER_CORE (info) In-process subscription L/$_management_internal 2025-12-08T17:59:13.187300086+00:00 stderr F 2025-12-08 17:59:13.187250 +0000 POLICY (info) Policy configured maxConnections: 65535, policyDir: '',access rules enabled: 'false', use hostname patterns: 'false' 2025-12-08T17:59:13.187824811+00:00 stderr F 2025-12-08 17:59:13.187788 +0000 POLICY (info) Policy fallback defaultVhost is defined: '$default' 2025-12-08T17:59:13.188037296+00:00 stderr F 2025-12-08 17:59:13.187996 +0000 CONN_MGR (info) Created SSL Profile with name sslProfile 2025-12-08T17:59:13.190266134+00:00 stderr F 2025-12-08 17:59:13.190215 +0000 CONN_MGR (warning) It is unsafe to provide plain text passwords in the config file 2025-12-08T17:59:13.190266134+00:00 stderr F 2025-12-08 17:59:13.190246 +0000 CONN_MGR (info) Configured Connector: default-interconnect:5671 proto=any, role=edge, sslProfile=sslProfile 2025-12-08T17:59:13.190871221+00:00 stderr F 2025-12-08 17:59:13.190807 +0000 CONN_MGR (info) Configured Listener: 0.0.0.0:5672 proto=any, role=normal 2025-12-08T17:59:13.191347563+00:00 stderr F 2025-12-08 17:59:13.191281 +0000 SERVER (notice) Operational, 2 Threads Running (process ID 1) 2025-12-08T17:59:13.191458776+00:00 stderr F 2025-12-08 17:59:13.191418 +0000 SERVER (notice) Process VmSize 115.54 MiB (31.34 GiB available memory) 2025-12-08T17:59:13.191767214+00:00 stderr F 2025-12-08 17:59:13.191677 +0000 SERVER (notice) Listening on 0.0.0.0:5672 2025-12-08T17:59:13.280856162+00:00 stderr F 2025-12-08 17:59:13.280773 +0000 ROUTER_CORE (info) [C1] Connection Opened: dir=out host=default-interconnect:5671 vhost= encrypted=TLSv1.2 auth=PLAIN user=guest@default-interconnect container_id=default-interconnect-55bf8d5cb-rwr2k props={:product="qpid-dispatch-router", :version="Red Hat AMQ Interconnect 1.10.9 (qpid-dispatch 1.14.0)", :"qd.conn-id"=6} 2025-12-08T17:59:13.280982296+00:00 stderr F 2025-12-08 17:59:13.280861 +0000 ROUTER_CORE (info) Edge connection (id=1) to interior established 2025-12-08T17:59:13.281374046+00:00 stderr F 2025-12-08 17:59:13.281308 +0000 ROUTER_CORE (info) [C1][L6] Link attached: dir=out source={} target={_$qd.edge_heartbeat expire:link} 2025-12-08T17:59:13.281465348+00:00 stderr F 2025-12-08 17:59:13.281426 +0000 ROUTER_CORE (info) [C1][L7] Link attached: dir=out source={} target={_$qd.addr_lookup expire:link} 2025-12-08T17:59:13.281497919+00:00 stderr F 2025-12-08 17:59:13.281461 +0000 ROUTER_CORE (info) [C1][L8] Link attached: dir=in source={(dyn) expire:link} target={} 2025-12-08T17:59:13.281550890+00:00 stderr F 2025-12-08 17:59:13.281512 +0000 ROUTER_CORE (info) [C1][L9] Link attached: dir=out source={ expire:link} target={ expire:link} 2025-12-08T17:59:13.281615652+00:00 stderr F 2025-12-08 17:59:13.281564 +0000 ROUTER_CORE (info) [C1][L10] Link attached: dir=in source={qdr-test.smoketest expire:link caps::"qd.router-edge-downlink"} target={ expire:link caps::"qd.router-edge-downlink"} 2025-12-08T17:59:13.281637203+00:00 stderr F 2025-12-08 17:59:13.281600 +0000 ROUTER_CORE (info) [C1][L11] Link attached: dir=in source={_$qd.edge_addr_tracking expire:link} target={ expire:link} 2025-12-08T17:59:13.281704564+00:00 stderr F 2025-12-08 17:59:13.281648 +0000 ROUTER_CORE (info) [C1][L12] Link attached: dir=out source={} target={$management expire:link} 2025-12-08T17:59:13.281712215+00:00 stderr F 2025-12-08 17:59:13.281682 +0000 ROUTER_CORE (info) [C1][L13] Link attached: dir=in source={(dyn) expire:link} target={} 2025-12-08T17:59:24.608030546+00:00 stderr F 2025-12-08 17:59:24.607871 +0000 SERVER (info) [C2] Accepted connection to 0.0.0.0:5672 from 10.217.0.84:47746 2025-12-08T17:59:24.608097458+00:00 stderr F 2025-12-08 17:59:24.608069 +0000 ROUTER_CORE (info) [C2] Connection Opened: dir=in host=10.217.0.84:47746 vhost= encrypted=no auth=no user=anonymous container_id=name props= 2025-12-08T17:59:24.608231941+00:00 stderr F 2025-12-08 17:59:24.608178 +0000 ROUTER_CORE (info) [C2][L14] Link attached: dir=in source={ expire:sess} target={ expire:sess} 2025-12-08T17:59:27.625849052+00:00 stderr F 2025-12-08 17:59:27.623245 +0000 SERVER (info) [C3] Accepted connection to 0.0.0.0:5672 from 10.217.0.84:48080 2025-12-08T17:59:27.625849052+00:00 stderr F 2025-12-08 17:59:27.624639 +0000 SERVER (info) [C4] Accepted connection to 0.0.0.0:5672 from 10.217.0.84:48096 2025-12-08T17:59:27.625849052+00:00 stderr F 2025-12-08 17:59:27.625500 +0000 ROUTER_CORE (info) [C4] Connection Opened: dir=in host=10.217.0.84:48096 vhost= encrypted=no auth=no user=anonymous container_id=smoketest.redhat.com-infrawatch-out-1765216767 props= 2025-12-08T17:59:27.625849052+00:00 stderr F 2025-12-08 17:59:27.625637 +0000 ROUTER_CORE (info) [C3] Connection Opened: dir=in host=10.217.0.84:48080 vhost= encrypted=no auth=ANONYMOUS user=anonymous container_id=smoketest.redhat.com-infrawatch-in-1765216767 props= 2025-12-08T17:59:30.637551748+00:00 stderr F 2025-12-08 17:59:30.637454 +0000 ROUTER_CORE (info) [C4][L15] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:30.649444341+00:00 stderr F 2025-12-08 17:59:30.649316 +0000 ROUTER_CORE (info) [C1][L16] Link attached: dir=out source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:30.940401100+00:00 stderr F 2025-12-08 17:59:30.940264 +0000 SERVER (info) [C5] Accepted connection to 0.0.0.0:5672 from 10.217.0.84:48100 2025-12-08T17:59:30.941673593+00:00 stderr F 2025-12-08 17:59:30.941487 +0000 ROUTER_CORE (info) [C5] Connection Opened: dir=in host=10.217.0.84:48100 vhost= encrypted=no auth=no user=anonymous container_id=openstack.org/om/container/stf-smoketest-smoke1-pbhxq/ceilometer_publish.py/3/c3a785e50b8945bcb000c68825ad2749 props={:process="ceilometer_publish.py", :pid=3, :node="stf-smoketest-smoke1-pbhxq"} 2025-12-08T17:59:30.945157774+00:00 stderr F 2025-12-08 17:59:30.944940 +0000 ROUTER_CORE (info) [C5][L17] Link attached: dir=out source={(dyn) expire:sess} target={rpc-response expire:sess} 2025-12-08T17:59:30.946994653+00:00 stderr F 2025-12-08 17:59:30.946805 +0000 ROUTER_CORE (info) [C5][L18] Link attached: dir=in source={/anycast/ceilometer/cloud1-event.sample expire:sess} target={/anycast/ceilometer/cloud1-event.sample expire:sess} 2025-12-08T17:59:30.948011040+00:00 stderr F 2025-12-08 17:59:30.947923 +0000 ROUTER_CORE (info) [C1][L19] Link attached: dir=out source={ expire:link} target={anycast/ceilometer/cloud1-event.sample expire:link} 2025-12-08T17:59:31.018349753+00:00 stderr F 2025-12-08 17:59:31.018163 +0000 ROUTER_CORE (info) [C5][L20] Link attached: dir=in source={/anycast/ceilometer/cloud1-metering.sample expire:sess} target={/anycast/ceilometer/cloud1-metering.sample expire:sess} 2025-12-08T17:59:31.024082805+00:00 stderr F 2025-12-08 17:59:31.019860 +0000 ROUTER_CORE (info) [C1][L21] Link attached: dir=out source={ expire:link} target={anycast/ceilometer/cloud1-metering.sample expire:link} 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.156210 +0000 SERVER (info) [C5] Connection from 10.217.0.84:48100 (to 0.0.0.0:5672) failed: amqp:connection:framing-error connection aborted 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.156413 +0000 ROUTER_CORE (info) [C5][L17] Link lost: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.156441 +0000 ROUTER_CORE (info) [C5][L18] Link lost: del=7 presett=0 psdrop=0 acc=7 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.156495 +0000 ROUTER_CORE (info) [C5][L20] Link lost: del=3 presett=0 psdrop=0 acc=3 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.156508 +0000 ROUTER_CORE (info) [C5] Connection Closed 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.157268 +0000 ROUTER_CORE (info) [C1][L19] Link detached: del=7 presett=0 psdrop=0 acc=7 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:31.160938771+00:00 stderr F 2025-12-08 17:59:31.157293 +0000 ROUTER_CORE (info) [C1][L21] Link detached: del=3 presett=0 psdrop=0 acc=3 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:33.635932842+00:00 stderr F 2025-12-08 17:59:33.635773 +0000 ROUTER_CORE (info) [C4][L22] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:36.636614386+00:00 stderr F 2025-12-08 17:59:36.636522 +0000 ROUTER_CORE (info) [C4][L23] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:39.638274697+00:00 stderr F 2025-12-08 17:59:39.638127 +0000 ROUTER_CORE (info) [C4][L24] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:42.636762174+00:00 stderr F 2025-12-08 17:59:42.636626 +0000 ROUTER_CORE (info) [C4][L25] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:45.639160442+00:00 stderr F 2025-12-08 17:59:45.638943 +0000 ROUTER_CORE (info) [C4][L26] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:48.636929436+00:00 stderr F 2025-12-08 17:59:48.636790 +0000 ROUTER_CORE (info) [C4][L27] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:51.637758951+00:00 stderr F 2025-12-08 17:59:51.637600 +0000 ROUTER_CORE (info) [C4][L28] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:54.633253036+00:00 stderr F 2025-12-08 17:59:54.633140 +0000 ROUTER_CORE (info) [C4][L29] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:57.637130883+00:00 stderr F 2025-12-08 17:59:57.632825 +0000 ROUTER_CORE (info) [C4][L30] Link attached: dir=in source={ expire:link} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:58.168413254+00:00 stderr F 2025-12-08 17:59:58.168305 +0000 SERVER (info) [C2] Connection from 10.217.0.84:47746 (to 0.0.0.0:5672) failed: amqp:connection:framing-error connection aborted 2025-12-08T17:59:58.169654187+00:00 stderr F 2025-12-08 17:59:58.169465 +0000 ROUTER_CORE (info) [C2][L14] Link lost: del=3474 presett=0 psdrop=0 acc=3474 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.169709698+00:00 stderr F 2025-12-08 17:59:58.169687 +0000 ROUTER_CORE (info) [C2] Connection Closed 2025-12-08T17:59:58.170519099+00:00 stderr F 2025-12-08 17:59:58.170474 +0000 SERVER (info) [C4] Connection from 10.217.0.84:48096 (to 0.0.0.0:5672) failed: amqp:connection:framing-error connection aborted 2025-12-08T17:59:58.170668073+00:00 stderr F 2025-12-08 17:59:58.170570 +0000 SERVER (info) [C3] Connection from 10.217.0.84:48080 (to 0.0.0.0:5672) failed: amqp:connection:framing-error connection aborted 2025-12-08T17:59:58.172409509+00:00 stderr F 2025-12-08 17:59:58.172332 +0000 ROUTER_CORE (info) [C3] Connection Closed 2025-12-08T17:59:58.172442720+00:00 stderr F 2025-12-08 17:59:58.172401 +0000 ROUTER_CORE (info) [C4][L15] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172442720+00:00 stderr F 2025-12-08 17:59:58.172421 +0000 ROUTER_CORE (info) [C4][L22] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172453060+00:00 stderr F 2025-12-08 17:59:58.172436 +0000 ROUTER_CORE (info) [C4][L23] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172485351+00:00 stderr F 2025-12-08 17:59:58.172452 +0000 ROUTER_CORE (info) [C4][L24] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172485351+00:00 stderr F 2025-12-08 17:59:58.172470 +0000 ROUTER_CORE (info) [C4][L25] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172510392+00:00 stderr F 2025-12-08 17:59:58.172486 +0000 ROUTER_CORE (info) [C4][L26] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172521842+00:00 stderr F 2025-12-08 17:59:58.172505 +0000 ROUTER_CORE (info) [C4][L27] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172554323+00:00 stderr F 2025-12-08 17:59:58.172520 +0000 ROUTER_CORE (info) [C4][L28] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172554323+00:00 stderr F 2025-12-08 17:59:58.172538 +0000 ROUTER_CORE (info) [C4][L29] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172668326+00:00 stderr F 2025-12-08 17:59:58.172609 +0000 ROUTER_CORE (info) [C4][L30] Link lost: del=1 presett=0 psdrop=0 acc=1 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:58.172680826+00:00 stderr F 2025-12-08 17:59:58.172629 +0000 ROUTER_CORE (info) [C4] Connection Closed 2025-12-08T17:59:58.173500708+00:00 stderr F 2025-12-08 17:59:58.173461 +0000 ROUTER_CORE (info) [C1][L16] Link detached: del=10 presett=0 psdrop=0 acc=10 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no ././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611546033104 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server/0.log.gzhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000021230215115611513033100 0ustar zuulzuul‹K7i0.logì]]o]Wn}ï¯üZødo~Ó€^&À´@ ´(ú4Á P¤[ˆ,©’Lú뻯œùh“¦d±T΂8¹¸¹:º\››\$i¾žôzÄ¿O#ñfÚC=#füýoÆ8¹¸8ÜÝüþäáòýáôýôùë“«Ã÷‡«ÓËëooNÞß¿=}uÿpv÷pyýöäööîæÛ“ÃõÅíÍåõ듳‹‹»Ãýýé««›ó³«w7÷olØxõwôó’>ù)òí͇닓ß.ïèüìüÝáäüæúápýpÿê䛳óïÖãÞÞ¼½;|³}??¾ãô‹ÃŸîÎίÏÏήnÞ~ñ—ÿõÛË·÷?ÿ†Ç¿ú»ÐÜ„}©ê×Jóç¾ÖÃÝ÷Ç_âîðvý:w?¼zúÞÜÞÜ=œê:Ÿñ¹nnoOûh„Û'G`\^N.Ösü\oÉJÉ2õSŸ‰ÿ7ˆ>>ÓÕÕ«“7gÞÝÜ]>üpúê«¿>Ãã÷òÇWÆËë‡n×øêìööêr}—7×_¼½»=_ï8þµß¼¿½¹^o<=Zêp÷ñÕ÷‡õѧ_¾;œ÷·¯|ýøi®Ïî~øøòÝá?>î¶‹ÃÙÅÕåõÏþ*ò‡Ö#Χÿñîpvõðn¡wûÇÇûñ-Ç_ñë_úRßv|Ã×ïïOÇ6¦ŸÜwÛ_Ì1É·õú6ßpÚ²ÇÑl7ç7W?÷äÃzŠ×go¿ñ«¯Ž¯|ýñI¾^oûæðøñ¯ßÞ|17ãþøŒÖüöòúòþÝg7çÅáô_þé7bÛ´jÛÊæ“u†ÚüTÛÊoç¤êg³¦üŒ5åimù›8¨ÏjÚÉå¶µmŒÁŸì„í×Ô»ÃÅ»³‡×7·‡»³‡›»ûmýÛõý»Ëo^¿?»ûîðp{uv~Øî¿?ÿ•¶}v~~¸}x}¸^ X±Åñ+øÏËÛO:çÿ¼‚¨ß­èðêpÿSH||ÿ×+Ì:œ½ÿ4h(ü^þ4În/·û1^ûDØÏ bæÿDÄØhA1í’&þO(‡N’§ à—N÷ÿ>Õ9¼ <,Cm+"/@„,ŸàL¼7Ÿð¯+'\ßTo§0xV@`夢ûv /² ­¸%Ä Ä'tt £êS8tì ÿpø³xRîð|w•žr…ˆ;¾ šÃ€d–À@ÈDç¿Ïù'¯0|KÄùo ƒe<ÕhLKÁùotþKîÿ4ª Ã7 ü¢Æð©<†ocxÍ’P/#I®¾Ñ‰ÏŠ;Þ>Öƒ¡^S°r ØhJ-a@Û˜Q¡ŽÄ¯mâWÿ/¨ÛñÛ)þ/±»òÔdÿ¦·À´š˜p]Þ§Óñ—»³íÓ‰ï+2¼¦°Áï7õûRBÙXAŸ'¾>çŽÃÇòéíÞ0ü³Þ‹Ø¿  ÃwŠôŠN¼f&áÆo ƒÐšÀ/,S‘ðw üܨÉ+@¿_Wo ^Cÿe zZ-ÑÀ笤ì3ÇðÁhûèDüÌÃÏð€á~–ÜøSƒpã·mû5Žß$¦ãÆïtþ½Äð."1pþ›ÂÀÌj`F8ÿ}οÖ8þãlÇ0¾O©§$î[Ÿ8ÔvoÓÒGœ%†g•LdzZúJR|ÒpÌr7néåÇïJÁøÛ^ÿQ’÷Q¤™ µ³SÁ¿Äð2Øç¿oXSé‘1ÑéÕÈð“Jj»2§§£á§- \j` J‚ü¿áo5†I!8ÿm;=j®šC#>»ñ26á߉ÿ«9ñ7>GI±_UÄ0¬»»&ÕXÇ×õø/‡\;6tc€sw½>jÉ,ˆôvWéUç1"mmaà5÷¿»ªb€³kÂ?FÍýC ;OwÕ @%Ê>}–2ZQ½o‘È»ÿ,"òÈ:`е¨vÜËéÈ;e„üB Óÿòµ\l$ Æô×î´Ú¤uü‘v-Õœÿ©®MŸNê­%v_/3Aµ»+ (kP Ä‚>€Ý·vaä³÷5kzŒÉž¨Ý ¿ÿŒ0HªAFXŸÝ©;™¬„íìma 5`[Ùå„‚ïã>IŸmkæ¿M‡°>;¼ÿ-Š„¿“á‹N| ‡ ïO|Ò tyt…r ŒŽ8 º~5­ËØ€ºó?Kå â§m½¯¦çÓœŒÑîÓjò§æü»EÚ}Ú¿5Šæy¼ƒ¶Þ ¦ï#Ä}­ÂÀš®ÏP ÃïŠåø¡ü±÷pKžR¦¼jxŸtáý×¼•ÿ>†$º¾öõù²ûúQ8ý]É?-:ÿÇ­à€úÊ~”¤>g>qï®Ï<òI5†76,xk•õÕ8~âA|é{ÿ³×À l:d?w§þè̦xß_×§³y8º>ÚÂÀ¤žs`Ïßîª>.´|?`Ð;°šz®paelß•±5ýà"Ë FcÇ^UÄ44Àýw k¸±`¾Uî_“ èpÿ;œn ÃwjÞ¬iìPr°ý]ç|ŠÎ’`OpSЦ\sÿS€ûßÝŠH×AbΧëùŸ%Zá®s}ù‰qÏÝ® t]©†}ûeýT)(ž½ñ [–,\Þ€’yÀ#ôóY2æ§1§²Á!t‡Cp lŽ)‚ê@§=âYcxË ¡¸¾ "¤%’0F2M!(Ú^Fkú=Ò†€5ÜÝ^Ï`x\Ï© PCãü·;ÿ5rž3ÅÎ×¥ÓA/8ÿ/±kȓӆï$Å5†—,”jdx¯Q‡ð<î…T§>Á"Wæ ¨ÝMÃ#ÆÆÎ öÎ7ÆÔ9qþÝøÆ0üþ O›Î¢/4 ëãûœxM©1¼š8êûêû‘5†_0öÙi øÐúì•ÜÕws ±"¹ëZÀ­iè‰Il‚éßFç¿(«Ÿ,“aøNtn‘ã× r~oÓ=1-]`øN½zZcøPžØÙ³;… cwºóöçêaø½þ(çÃïÐð*6ЖÕÉð5Á…ú€ts×’mf RUƒ¶-5{yƒŠ hÙè´¥³æþgá€r{ãrN bŒ@ãÎî67-Ãgú7:¾D¶/dŠ3 ß)韛ô„d q§Ó/E†×”€OS)žØ¼¦cS,²ýû›¼î†2þC‰ eüN7~x2`øN'¾¦9_mšBd«­êbhRÌ¡ÃÜÒ”À†j@m«©#m æ®øï‰â;ÀÁ³á  ŠÜ²ûzÂÀ¶QDÙ  Åþý1N»šöÛî™ÄÓ"¡ÃÞ:*2ÄÇ Oìîj¸Æu” r9­AÀ 0ûѨ÷ÛÉJ ŸÌÎèýn[&57@ §C wo»X–áÍÀô:ñEŽ?†C ³Õ‰/qõ$´þ¡Hÿ»¯aI/p/pg; ¿_Ãkj¢ ¼ïþ=«A¬ÍÀ{Sñ[>jzB÷Vù{Ôyñ–w†·TÌvï/Ô³TF¨×Qsãû$~Úˆ?`ð¬0˜%0ˆœŽÛ­®Ã'¯˜¢Ác¯ûoì8ÈF@@?”Ô~yÌ¡Œt°k8%2 Á4¡ô½CÁçÏaxœÿ/øŠóßðüãø£õë¯p()°Ÿ3‘ì¶û›UÙ ›?:]Tcø$ [" dê—m¸ôÿïmy÷2¼ð{A>§ðÈ"d †ÈnÕ€ ?„=à zz*Zü´p –„6eÓ¨‰™ÜÕöy•¨T,èHuÜ{8:…c¦zz‚ÜÒŠƒ\ùa |ÜÝ-L«q 29RÆmÚHHl`"¹c€0Š<ŠìNĤÁß" Æ0”w+e—¦G5Ó‰è°;4£jì(%´+¨€‡ 6ž¶e ŠÈ#´ÂÀ ëPyM–hä<÷Á^7,Ë: Œ"`É„’b[á{5MÇÇÁTCA©-ŠH#Ÿ‚¹Ô¶0 )ÂÌY«¦8 e쬹Ü•…¤w ]SµÚ8ÖËвê{°×1ÙSimcC.‚pÀ ¯;(ºÖlÃi\DКšbNñ4ôW0ªaRL³J} åø ë *e=­ºòàEw¡ú ‘‚QµýŽªù`ž ìWØÌ‡‰ŠIMƒ¹Í*O™(%ì9˜SÖ—tD€Ö @l`.­g4Pƒ€•Ãæ„anT²Cu‚Ýv¾åeˆP lúÄtJß2sMë™ópUÌ&ì8[`aÚùlÂˈ¸f˲³‡¦Í[Aàhƒç2Cb©^ß%ZCJp@L©(6·Å„ÖàÀc0fØÚâ Dqá€iòÓT‘9<ÇX17ÅØ=N[…ÐÅÂCŠ­OšãA6ñ’HAÈ' úÒöÚ“² ë‹÷—оè%!ƒéæD]º£G(¹LÃ#ô¶¬ÐJ\x8R XÄÜ3D¨A@r†€NjJ'å6kBEK53,Ùhïh–° >攉%}ËN^‘tÈ¢ô]Òœ%‰ƒ3é˜jØëž……“›6ZnÛ‰,/èDiª‰37皘@å˜.Àtƒ€n³f”H{ãrY½æjp'}Ú¸\Âs…‡E·“¼W ]¡¼>Kq)tÇÏ’ÆõXw‚1&» ËŒZÒ±IfŠŽÕ½NÅÛÈA¬ ©eP€9Dq'¼8„×àEž6‡ð\ËZ©ÁXÏØ5>€Ä< ¸Û¦Ôä¡:Á&÷ïR— *qšÌð±ý •Tƒ52o°Û΃i¦9æ„GhN%Ò–Y‚[è8¸2“JúT‘$ à¹ÒF)AÀÊ…БÚ¾IIGêô19íñ@%Tât90ãÚ½ …–­j𠤉¥éšáÁ«š2òʈàJŠ„?% þJ#Á0LhP7þêñMQ› lüÙ{ì—/wÞØ\*ؘi£”r°V*2Á$…2qŠ¢ñwwUÒnÈIz׬œ/¼n™t.YIÀ ÄBS«>Í ³ífþØ)ž!œ&øî£i¨m¿›šÖ9"tÁ_Ïdäôâ`š:Þ)Cž„ˆ#j|Ž*äþÁ% ”Ö6+ ˆÕµE¶¤òX©„€vO¾û¨Gà Œ¸ˆ±0X®ý60‰á¤WÖßÄ@€È#n< ·E¥y.y¿µü êiˆ/š¥œ*eƒa¥š"À“'K Š! C‡ÞˆÛ…~¤!<¨ú¡ºøYãì @E*·?»Ániá¬ö"9&K¼áì7¬ ! µ¬$2Z•cÎJ‘€Ï¾$àÛ–—ÿ, ¥A ($t¨0$|W-¨Uç(Ò‡ÃÖÓF§»Ïž\þ0Ól"íO r D¤§÷w Óf®ž)gWé³V#§IÀJÓFK£ê æ+0…Ãx¥ˆQ 7¢ÌŠ#ë/SWθsÑñÃ-§þ-±(M‘^F0@˜—r8m¿Ôt„dQ}^ª>C:P­b§8²¾2KH žãÎGaÿÄ2""¸Î ^ÙølÝp »y ØØox |OÂÜï‘#¸ûµÄv%‡ —‡NIέ­Ì@ŒG#ÂúªA’3˜ÜÏtJI_Ý}¿øTqlm9ò˜bpHk ‹“DéÆ0íJaq9~žrL©§ŽñP×J] D€ µåÏN2{ÇiŽ-îäPפy¤g;6Ï4ˆªlÎ-®œIÁ¿möWJIŽ!ÀO7Í•?;•t ø½4KY/&EAäÄ.13ö›ÖÇÁt »â„Ó5a9ó¨‚x­Ã éÏÚ«y·ÛªË+µELÝ8g‚ûnoÐ!—/æB3ö¢Åœs¬ûÅf‡¬¿ùœÓf”öiOÎq/Ê ëy°§}0@TDRgxŠBl’BÃwM›/ÝayAÎP˜p?RŒ#,ûˆ4 >Fë­b´bèÊ–ïŠÑc¥g¸"»2Oļ>¡Ê†ÔwÇØî÷ßU\wøîÛäi7æŸÍÂÄKƒ¯Àg_°û‘Æ Ð; P ^%|>[ \Æ-¸Ð°’L`vüpÒàÃi™ b´)}¶‘æ!­l$îïe`€™p0oŸNLÚ™gX8|X<ÈQ Οm ñ¸6ˆJñ¢«Ú~ 2Ö3\ryÁNCCžŒ¶ûñ¨ÓΤR2IÃî7x ÿl1$äX0xÏZ Ć‘@¼HzzK„`ùfÆK@g :¬çÁ&!<”Éî½1"ü­YƆÐhÍˈIÔ1IB» ï¾`¨% £ö;¢Î°n”ñ†ˆ¸tÿáÅŠÂʃa°žà¨ÚöÂáéÀàPzh÷ª%¦Ž ÕªapÈû=ðòçþS° é9ïGvÐ c¿ïn äÁè’3Ì(×w&ò™ œT\¨:F‰î6?C+îí!ÁŸ‚øî^ r„Ià d¨ÂL³÷ä±bûzã!¦9¦‡·?_à»Z& ~f¼A –oÆ?Z=葜C÷œ•OD"¼SÜŠ #Âö'âj›á%r„‡÷ã`^ªOÓ”yáP)H QvÂiʼ>e¬G„«9O¼}Ö§ýPÒ{ß¿ê=•µŸ"ÛËH}ÎÁáz¬(3mBžÁaçF ±^hrˆOû"Ó9äúŠELUáÕÒâ@@Îy…˜û!˨G#E†¦†ˆõ‹2m˜^E¤yÐÃkýp*…˜ûIa&7Z¾CÊÌVkkF„ýâØã—†0Þ•^0-7MhjÓë{Y‹øÝ™£GÏ~,08x„R˜^?+ƒ!—‡ö  ìF«ø n‡kS+SFLŠ`ÇF8à´Ò :1Ü( ,Wޏ„÷ìb÷i¿½¨lôÈIò°¾l<‰¿™xö¾;} ¨0îE¼Á£ óBäÅa(4¯ß›ºß„‡’4žŸÞªD#Ûµeû}W ZN('Ø–ÑŠˆžqãÁ±<ìwÂÙ|ZiÙI-Æóð éJYÁ!ܲ!X6|Ö¶3¬Cìp†uýDJtCx˜¬¦±ó~º E¤Ë)1&‰+DŠ¡™aÝÈ]!Ó{¢ùDlBïw¥ ªnˆ`wze9:[Ø|Aw",eá¼Êwwc³Ô„ž\¯8‰ÝG•6ž;÷í¦ÕŸ¶Äˆ°HÕ0~›9ÂÊâ´rY~½ÅÒ<íˆ!çr¹y#CÂÆU8‡`£‘s_Ï&¸…‹†•ñ@û U„Üj Sj¼2üÝ€^'¹ä°? ´bx(Î08 ä­hÓüUÒyüzˆž]H@ ¤hŒE aý«C¼u*bTŒ7EW¦! ÍÇ™#ìç!!*B”xvצϕ-Á æõ8(E;J»µý§]ô€x˜tg¹2e„ÈyZ“}èð0™1edœ{ò+W\ Edª^ØyzA9\ÕhÛ¾s1QE¶E4§?ü&´•–+˜··üñƒGh×±0*‹ ¥ƒ£ˆBaäf‰'™,¼è4ª¬ò ÷ÜÖ;sC ÖFB¦kÛs„xÃCêoó<,_zÓÇAïÅÜä´™1|¶Œ=å9b^Ùnj@1@€xIQL\ CÀ tb~cÒ`"ÂýÕÅs/àÁ1"ܯd"Œ¦_U¿˜yª0$ìæ!ž[ÎAxð £+Òv³»ðãaʃû니û%!xðËCre§´ {âÐaû®û56&"´O7#ÂW4/S*Ãâ <„¸$sÆõ9£>Ó"â÷AG9—öÙ©$ +ofoPž!ÂsxZ…çá`’„ðЦIÊöˆp¿C‡ ‘iîtÚü¬²å¥Ã}ÇýÁ Ë sÿ¢¦®°ˆz´ ÃCÈá`ûgíùôÉäHãrüiƒD„<êb4Éønï!ÿ»íF!aeÕÉSTÏÐa÷IÃÝÿÈÃþîS„b€ˆî¤Ò¸Þ%ãFq C{îL0dUò1­ÅUÉ•9$¦Š0©ä•¯Ø@”¥t QÖ‘+—#!uCUKó’ÓþM‡:.œk^G€=c±ÅßáOÞ騝$$($LÎÓÄïŠÍ}ò÷?¶' s eCËÉl– +‚! ;¨.¯ñ[Tƒ¡vŠ&ÌöTiÓ¨â”ÒþAÖƒ©Úʆ“Íû³ÆLH²]­«ˆõcŒ‚ñåk¯ÿrÊ7kdDXþBø…Iêg´ÄnÔv ~VÛå±êXñø¬§¿tkkñLøÊyLÝ0iIï•ÿ0® =ÿIÀFs-H¥0·"ùëÀ$ào ªAuá~ËúÊà7ºŠ‰:%ÎvÃþvCD;³èb»a¿˜¤ž J„¦þo0RÁ¤~ÒŠSK+oÄ"”=’1tß[¿y‡œŒU 1sN-­”—B@JdТw¿Ð$'BêÔi&ë/ƒEcpð,eÿqåŒ ¢jPÓqö>k¶¦·l¬¥l¯ô©v¢£‡# _mF]ªâÐ:竦íêj7/¤'óÊ€ :‘‡iág—âÕ=-}˜n_oŠƒÉ ó Œ #Bb"BE›²½´>"4¦Ýè­6ÍîÒÊó&ü¬“„3Iûs R7†d–qáq9ý(â0 ÷#,"WÊ2]Œ½…õ¡@¡Æÿp ‰<@6ß1žd¿Ÿºqþ`ã@ÄEéà)ÂfãÊ"C€ˆý¡µß¹äcùë>s"éÃ!ÍC‚BóúªÁÇ0Ä@˜:£[áÞÊs™aËÞ>ì #¯‘ÕÞ½›ûyЙ×Èê'lÙ»·ƒ­I˜ l·®c·f¢ ¤ ÛóÆcwyn).hØyêsäºÕúƒ #¥¤ÍaDZ~/,ç¶~B ö%¬L&\„kU£°¾E…GÎ7ø{–1 ·S|†€¢Ll[]YHèÄÓKt¶G‰õr³ ”HQ[ÜÙµV3d$Ö­ÞìŒÃón è‰ÎUw!&C=yåm¿x 'ʇÔ38ˆ†¢éàÞê¡ke–d<@fÁ-Xá Öó 2ÄCZà’×*I:SGpïhA%ie1ò¼à¡›€…$ HB¿DgÊ qpp @®oÊÊ’³±Xëޅ̇€¶6,RÚî"ìÅ9# %ê…SoëÓJñ!‚>ØÙfDÏd•m'³DVyí1POá6¼M®leq iƬXƺ“ÂM‹Ön‡õ÷ót§ï}œ®02l_]ïâ•#e¥J—À¾ö'lÚ£(sv”PFÁ¡ß—QEX˜4Ä ´¼ÈCö·/é !FH"ïz©NqÆÜÓFrdö±IT°Vë kµF|Bs°nx¬Üª5£"UÊâÊiè´”qvÿ ‰²A3û»Õfè ÚOÚ ¶ËÑñT² ïP¸ød#þ¢‘½ìï R¬D.„ÙÐåÒíhW[¯)=ÃC–šV6°Î(ÂQÃÊÍí<ØËG¦bßǽ(Iõ÷ݺ%ù¤P„í”#ŠÐ¦†ÂâFE¨Òr‘º oÜéz„1' 5ìjñ ‡="ÀÍèC¶ÄÇÖ´ÃC|V{ŠLÀìêŠ:ÂþmJÑMç/ÎÆhü¥s°Ít²F¸„D`ïýI€•…ƒ˜BIБ´~ÒѨÂá“¡`É&||ÂÏ=Š:ƒC[4êû;ÔrÆA·*Ëû¯÷ØL ‘ºâRˆ‰1ÈwM1“äÊ1§ „ÒD±ãÚ-Ìß…ø„‘õQ#¶—{ãþã½u¥P*WÄûWªùDA%-]0÷¶rªAG(­„"¬oV©á¡Iÿî„=$%acÎʱ†‘¨@Ù©•ƒ^`FTLŒP9ØŸ'tÌðЮÂàaýù‰Õ”>x¬<ŒÌBûHéYª‰ÊâʬaD¼ÏïDðû#ȑʢ½ÇÜQG¸¶‹ù{EQ„‘¹'cÑÌD]éÚžÅC@š5ºV73>ÁÍqz%3à)ÈÖó0”&”ª¡Eí…¤â¡OÚyXÏC̸‹”iCÀpéÍÇC€Y%<Ä®xˆ:I¤ZW*BŒô ûG ‹¬}\%Žø„n/7´#¬tš|s~/HÀ]èƒN ).,fhGÀ¥øŸxPN-ƒ"Üzôñ ìLtzÀ ð`rÒ´#|°"„§êëy‰3C+ ++‹9B@©a}eqd¥V¨ûq„\9 ;áÂicœ¼A¡ë&1#‹bÎiåÛã§+Âù½Î/q¬fß!TNð)Üò›«J Eøòo(BMPÅ‚0C¤ð ÔÞ„‚Š ü~høícNP„¯â¡¹'xÈ÷¯"(ÂBEøüj! ýï¹ßEø*Ž÷àAìSаP>¡Ö_–øí-«P„/S„(#¤ž_Á~1ÿ|ŒôW þû×üõßÿÃ1ãŸþø_ÚÀÊ'ç $Œó}ßå&'ñ†à‡?þË¿ŸŸÔ" þ–—˜¨-¥ºj–×Ýªð øØÞ€0«‹¸,pü‰€¥5%~ˆá/nÆ€½ñý_ùý«âûÇ÷Ïâ#tßÿžïŸsÆð%%ï+>òý;¥E ¾ÿEß¿<Äðøþ?5 œÁ@ÏÈáÿ7Åÿ°ûºÿ»Cö?3íÀ`äÆ ¼¼ì~[ÖÝßAB÷ïüþiÈðJ)„ïmÜ3D‹3¾ÿ=†ØýNÝióp;?oAµoµF÷MÉ>6J >ÕýÛ ¡-7p[Ùçü¼Ùv_d÷÷höÛÛì3CÁ;@¯ï}Ñ¿gˆ'>ÿµ½¾3î?(I ßÿm½¾ªœxõÙ›ýÏô|… ðêsá÷Ÿ&N0ü"ÃÏDüÉbŠzÿu…žÔÊD¹-6„AF±âó¿Îág… ¿Éð3_|qýëä!üã™Þ ï”ßAÁå]^Õ$zÿuÑSh¡Ü¿÷óŸyõiîÄçÝs_å—Â÷¿õ¹o¨éã½:QõÝäþGâ¾ ¢ T}¯þ ¦†ðß½Ú3Èß'Ä€ÁÚø?g0(eGøB7ÐïEß0üu†g -Tÿ/ï >(Æ<.üþOÚg(øß—ñse3 ÛKOˆP^znOøƒÃÑßûƒ_ut* þ×µy7Qº`‰ûu}žMÊÃß7ÊÕäÝ‚E.—ïql¦’D¥gS¨7ãñY ›\v-ð“Ça“ËN Žål$Õk¡¤Â*‡M#}#vw6Â&—½Ì„}nÇÝ ³û¶õ½œY(ðÝv¬×5IÍ‘îoÅÀGžy]›…¯;Ûðÿóã¿þùOÿôŸÿõÿöÇüÓŸ—|m†€ö'Zû7y€ ×>–óŸ‚X‰½ºjsn´öÞÖáu _ŒÖÞÔËGε»1{56{ì­ÿ̸¶$Æ+ÀÞ.›Á ”Õà½]3›ƒ6•ƒuÆð^&¨o2üL6 $Œ « ?ø©Y>¯+üš¾wÁð·]l>†?ù~£ð³yåPÆo¤ahøÜ$ü31¾½Û>PêÙ‰¿Ž8`àzþÆížMÿŒá#Âñâ·Jøg<~¦ÒuË<žÑì3r¯Ñ­$ ÜzÎã•v÷b‡Gð0CC›°0ôàÚ˜ÀçñTèÁz=èœá!Oààa;Ò#É¢³¸7ŠEk‹E3oΑŽÄá⪱‹5ìâ3ÃH 03>â*úÁ¾8†Oà ^:rØß„ÇpmMÑÍ$Uå{'‰Âƒ51D|o÷èw ÁÀ'f‰ #ü’#Ìôlj ìò³aà!e¤¸)n‚ÎÓ­9#ÛŒ,”xkBÖ» Sðp»,ð+idAyU;š®½6ö=€c˜È'uæ<)M°Vÿ¶øó½‹)ÖjßëÃî •Þaø+ ¯0ü¥JÏ0ü†OA= ú /†G»ÿn”†¿4‡Ò_ªôOùâÑåù™°Â†¿Éðã)†¿è‹ÿº§º`ø!ÃÓ*YÆ¿Úþ•˜üÍ?0ùBÇ`0üŽ¡ax<åÃð•{†GòÃ#ùƒáaxþw)õðñˆêaø›¢zØÃW0<”†G©†G6Ãÿ·i@à ‘³ÿº(};?þ?5}ÿaíW»‡~¿üßø? L¾Ð1( 0<žv`xLëÁð¿Ë/>bxŒi~&#·ñýߺ‚†GŒÃ_t²9TI¬Pî½î‰ï;¡Þ§6s¾läÀ÷é÷Ï”øþ±Qý /JŽˆÿBáW\ËÙdø!»Gfì~ÛÞþÖ×#’L~QQoÊð-ÃßWÔ{o^³‚áo›ÍS³"‚áï{±µL•†áï“ú:>†¿/ªwn-‚á¯ûâ]É‹aøÛÎ娇ˆã°ýu‹ÔÓ"`øëö'jpà\ÎN Ø^mC8³¡Ic/1ã’9O8‹Íôe§µ{B¶ @ù E*ŠjÏuƒžÑü1ÃC>7Lp€v]yðß¹u¯ðõà ¼¢€G?Àµààaà‹gž‚á¶^ ü€Oµ@Ö)?ê@€w ˆDþøþ/ýþõÅ©€û€_‰Jàý Ȉ°±p©È«¿°3äŠ{-ßú³÷¯£._âÿŸ N¹]s¸ì%€ ÜMÀP¶ðq4>ƒ‡ (ÂåQ¸<(hpyPà àra!ÂÂ_x˜p I%ÕÍ(-o}\V²ÚÓÙ! ûea¢zLš…@áÖkÉzb(Ânâ¥9ÃTºxØ®ê3<E£åº 2ÙUK±©l'rŒ]#¤IEÃìö9td*…*¢PXz@)à<ü¿…&ö/>ðÕ1…9ør!X/¦Î»cµõmhÇð%Ý0ü}Ê/ÔÕ Ã_÷Å‹©NŒoj1œIú¤„qizÕ?c÷ÂÁá ]¼º¨â åmªŽáËðüwŸáMÜu¼=kHÈkÆð¥íh][Ç«‘ËD).áŒ÷ÿ½õ\šá NŒ™ör0óð'÷C p_ΟÙax×[úýŸ¸0gÞõÊ“Qå¿ïûo¶VÈÈFF¾|%¦ì w“ÓH/¸²±&†„Öó #KÊé©EØø<ã„/ìùxÈàè ^e60òB¤JY†IÑõ<ØÈ»42‰Âúêµ…¢‘íÕÀ`ý·‡p€záóÃ’LVÌï=cÂ#uã’bê6p°õ åÈ*J`°^²g8tCÉ^b&<( B;Ñb&ŠG¥Ée….ÓµYbÏp š—¤•/I>C€™ XÙ_Ò3À[âN òÅU `¥ÔE¥†.Ãk» Â÷-“x9C€ÒK àÁ­æ€p×Á&hbŰÁµ‹çK[\7i®Ý= ´5%†Íïsÿ*Þøô`#Ô·k¯]6xap­0z¿ 2Üÿ¢úÏHîçç/",˜]dør›…aø‹¤v¿Óî Ã#¿{Xðù<(½*@Ü>  ñŠPp€Ù W'’ÁÛ €#tÓ»)\ºE¢©ùú­ÓÀAâ%4ÑØÂÑ^‰Ø`el`'Wà„+ žaeù`†c11¸†ååƒz…ûNl ×°4m|[o$f”H Æ$ùµÃ‡€Ê0Ìß:AÖ’.åˆ ¶Çý¢œ +B>–.ˆ øÅ#íÅíâŒsfk×RΜ09”™à¤Í~OÁ#] íl—ï-~Hè #¡ƒK;6U®}6` òÕá38U¢)áÚÅGH™ñôtmïò!@µ0|ñ㣻ix#KXwÈ%xV‡ý8 ù‡Êbt­í÷9ÃC(c“Áµï‘*x^0Ʃ݅¥¶÷6¥xZ .eßœ4æù›Q:¼7 (f ,µ½8 (Q‰Df¸?3p E¤ï§u^Û†d£ÃA* F¯u~Д‰Õ”c‚Ö<#ÒÅ[ï!ü} Uã[û êDêÞˆG‡>ÃAV%²ÆåºÀ/²ÔõHta­.¤Žp`™l ¶r@6Ë)18ØÉ¼$fü‚¥aóÁƪAT Þ»/Lì f„ í};¬ bF2ZqAq¥C ™Ð°øÙ;·ÉŽ[‹Î(|“³¸ó#}UXlT'ÅƒØ }é#!è¬ÞÜdðA²Ñ™>žï8©õâÁM ©ÂØTáø·dïDIq¼.HDiœÐ…±Ž±£KùpðzsJć¹{‘¶÷pP¯ýºà`¬X’NûlwµªŸï~’„ÜØ7“Þ«¼¥~ D'…[ÏôòµêJ0vÿí–Q=Z€Q…Á슎,èÁ`zšN”ówÎ|Ž?»£rHÛi—¢å`®? há ´Üûná ­ÌÕƒjá \ Ã+S9UÖ‘7Ì‘/‹cõÀ[zÑ耰·ÂŒÕVîá@KyãX=–—"ÞiŸ8—kâÀU õƒ±q!{ê¯6eä cõ Äz8Uä ƒë—ù^›!E=qpÞ¸›8ðb,O¿¶Èè·÷Ü1ªÐ㪇3u´&Ï-,gQ´ +$Žs õ\M‰ãÜıçÉÓ57ŽÅ šê‰Á•Ž¥Xƒ9豉^¨€ƒ¤üåñ5pðQ¸Ç¤eà½q²ô¤%´iãØÉfjê?¨T¼7Næ ZÊ˼…‚7Î-#%÷p.…¼q.-˜ºl¸ÄÁ]ëÖÃA‰`‘îä.Õ\YàçêÁÎìá 2§[¯=ÇE²÷†?¼-z8ÂÓàÍX=Î éüúGF„%¨MžhD›«Þã ˶âÛäˆÐS;ªªL\]™Ë´èl)Å‹ÂT¨isæá <~׉‡ø!‡(-»Ñä5¬\›#™–#KœH@6i@™cÚÔºá^ÚÓ‡*ì¯c à`êå­¦±5‘­"xIº÷%IÎçM 4Ïæ¡–¨÷ðL‰D@jÆîµtû‰íð {yDn †A«§ƒ¤|W&Ò‡¾Ó"ñ4< º0>P”ö8ÌËc<#Plë"¶‡ˆÑdâµ¾‚Äð«›´¢zÊ/³&2¢°ùäÖÆ%f·ôD‘aºCÈ•;[€ˆ-XÆé@Ôbm ›±,oî‘Ö–‡ì×µ½ ímóXÙ¤Iáho›H€©ô`‰þ¶k^Ùëü{O0Š–¡Ô© §UÆ–¥åñ‘Ë0T†ë‚.i9Âwx¨0Ãcôt¯X‹½Å*TÑaŽóB;JͲ)„ðÃy0[=8(ŸßB5áÚ¥û²O"Á…~Æù-ðêÚDÐv¬Ò™) ÖB@zøFyiä†å[`ò²‰H#§_²€øºáª+[ºÜNp ×î`ÿà âs¡!±öpP¡‰–”‘JÐc” E¤kwn4-)×v°\¤Àµ;·ÄÎÿù_!6dˆ‹šB‚Ñ6EñâñrêI@O‘ÈÌ™@À­«Xu’CÜé»ö:—˜K צV ee6q`BØÌ?3ô”¼öÔ‰GŽ-µô•X¼Ú 1´24ÔòÝ“†²¢Xxó‹QØ,èŸ^,|­¯ká!·J¢Xx±"¤mÌ)ŸS²¦âqm ÁXÂôTY-Ç=”àa¼>Änâ!(pëóbÇà›ioìÈ›®ç3÷!ŽÃ7¿7ûÎÔ@Yaº$ð’ž,‰“ ›•GJB‹OtVV#pig² E"Q¸v8Aåü)ì8™¸15¶¶ êÛЛ>{¦ôோÐÀaüÊ›žæ4}¹Äx„kÓ•2ÇAð™­I¶—ZWŒÊí Ó×#îe-@¤ÄvL0Œì[î 馄 ÒȾå”JˆÐzð€VŽ 4Cp®oü3Óù†=õ%S©$¸Æé Á‹³ˆÐ]†~æk[ÔÊÌqôý‡8¶tìY×"7f¤#—*wÔ–ŒDH §7†óà±¢e7‚‘êùQäÓ󈽌½³m&b<¡-<¸s$"Æü3ðÕ0Bœ‚Qg˜¯Í–eb8<óm¢£Îà;\5L|Ÿä’Â@À­ï“žÁîÝk#+K î&Àš4 < ËõFÚ€t{(4‹h‘5‘ajb|µ¨z2ÅL#ìݼuÿ@gº†P=X¥b!ÆÈbQ+¨½C0 =mV6…"Þ‚#=#%¡§tP¦G@À½¶ Î ¸vÊñu›EÅ0ä6¦‰ª v/ã{ÏŽï„ ͩӈ%ª=@¸sˆé@äʰ ˆ^'_Äüy†&Aº«ÄüZíì"X#óÌè! <#-ÓloØÄùIWŒ0ÜÛ¹V,¹SMÓ]B­Òž<‚5=QÙÐÞD@ÈÆ{ÄÌfÖ¦ ÛëR&àÞB€lìÝœIõ@*„ÉÆywÁÛžDR0Ý|qgJ‰f2F¯=ÍpˆØFfƒM¹£ ¨žlP‰•@ÀÈ\ f+ÜðzP·ÀËñÈ)…žš°Ú02ìÑÛ"ŒÞù½Ú²ò"H63.󌚮.E‘bÄ­61˜“ɡӡV´,Á Ù;±iúÑ…ó«#D$Qò.´–\Û\”ÄTŒQ×ñ’`MoŒâ!¦µ´ Ì…1Ù8ˆ\\ÔD”n1}ÔÕ×n¹ÌSR¼Uñ5²Ýñ"YV‘˜[º¶1éO€ ðyÂÎg, Þ@Ô¢  ¾Ð$˜̵€« @P@Pxó@ºô1¶’ð)¼%•tÚAsn#£€ËŸ¥¡·w¸ûC€3lX„d«câ¡¶‡r`îñÚ­ü‡€ýåÁ.(Âçy¨½4€øÑˆ`¥@Äù í33Ç—ç'ÍQOÎCÆÒ êüÙb|ŒÐòZ¢ƒ¼r>ÜÑð~€PÅeð\nêÂøµ–@Œ_ÁÔ‘Vï:"1ã)D ¯øÔnƒ&@Þš@º¶#J€ˆ¯þf_¤H,ÄJGb ¾ÏÝ×*S ~(DÂUˆoW¹Ä‰æå/˜=i™¥cÇüQ@ˆo íyá¦Ð½ÑõôðÉB›ža g¨"lö· ¼7´HZò†HQ ó†x°=1¡%mÈãKPZš^ZZÜ!’ÔÃãc÷2i1ÉZaË0±WžZPõ¤ ×^ó¬*+˜„ñ8¼nH¢¥qúž_a-e&1' Ôžƒª–¥Í)±=6V¦ó 'D´<>I*oÃÝñ‘e¦Ó¨tþqt¨ÌŸ„qmq ÊÌÁ †—b÷ð ”‘ߢzpÚUà^â€RóÈqéè!àÕ©FÐã …,à Ç/ÚÊ&X˜ Š0q<ºz0ÅDO ‚ÚN”ŽæW“S@ˆ—æí) ñä%*iµÕäœ{j‘Ïܶ]Ѥ4=FÙ£IeäPˆé•ZÑ2"ŸÉ¥ä(5MO+b‰´TŸSÂͱˆm~ÈðjŽKÛÊyæø$Ëzx,‚…˜/=Û|©åx¯¼·­1=(±ä"-iDmK¸Æù}Œ•ÖDE*–3Îïtnš—,rìsj·¤ÅΙ¨ž¿†ÉzJÓ%D„¦·,|ß=!nŠ‹óFµŒÆÔÞ阞ÙÞà-P);fc&àÚB€¹'vwÎçA¤GbK&ú[æ{Fï(C–ÕÖr JŒ|Šx P„–eß„Ûg)‚Aøæ!!— ‚C ‚ðWO- áFI¸2r“?„(BK!!XÈA€ |å EÀ’>(ám6ŠEø*#˜APX„"@ÞÿøE˜­lP(¬Šp¹"Šð̓m…"\ž5(Šðí8¡ð àZh%j‰¨ÀÀ|½.lä ×+ò(ÂÏA¼6‡Ÿ;´â󨻦'˜XKlÉ49B1a|LïàÁØÕ"ÀÃll·¼?›h¹mDˆ‰¡‡¥ÊÍ àZ—hZÛCÆ×vË{£9G*²†‰Š@ÒC@Äf¸‚‘ô¸¯“(lÄ„ñԎߥ”ˆnivU߆$rd¡¹¥9!wDâ1z>i-!}³8ÆFs´ŽŒ ÑB€‹1ßæ+‚P"xì¼7ެ#dŌצùŠ=ž1HŠQ[¾8k=Š€¼ñÚ¥IB"¬ ¸vEÊŸ ® á@ƒ.R ¾€ %-G[#&»H$á‡$ð†m¼[zv+B" üúV„Ë]B@ ß.ÁÊ€øzpðíà<¼b¯Ú-¡•–„%*ÿ`ïj–î¸që«äüñÎ2Y$‹,Ry)Å£Œ]öØ.K™Ež>ÝW–¬ŒZ=üê’6;ù+]Ûß= xÌO)";"‹ÈK‘É€‰w„§¤Q)Ë‚, ~•©iRHÜšyTÌœðùýSME¸¹"äIñî È”)á³c‚&’¿óÁ³|ø¼dÌ#Dòá³òÁ²~H>|¾æ)›Lw¿|LAHAøl7lH©\¡hÖºíédÀÝ{L)aõñɇœÏ â* "[\2'dNøÔGÒ<4äΆdÀ½aYå-õ¨XnrÊ»¦ßøX‹å4Þ LgÖ>°Ôç¶Áf†ÕZŽÉ@¹Åå ã]/‡T„Q5c2 ý™2%üFΜ|¸è%è ™t¸HŸY ë…” È.B2  ÈýàYf]øT1åÝãô|À(>(¥á¶û;¸8‘>·û3!Æ¡S3Vv*ùÐmþ rñÛQ¥R.Ÿž5DÈHL²^¸í³ÇíðP´d½p…É)ÃwÌÝ>÷u&ˆ«ºææŽÉé€/¥„”R k ÂŒSúCA a6§W²EP`ÀT„»NèfEÕl*å8Ößøàlfyh¸¯=I+ÀvnLEȬ>¨8gp_«Š!ûvzLE˜œVÄ ôçÍ©WV„[X1!ì}àC%¯%í¬·Ýõ¶1À7AÈÎâ!â2Z°˜oG‡T„›Îà!ç¼|¼€ HhKî)·ªž7/0„ÙBøÀŠy÷xgAàªàY!ÌOიL{Òm'ð ê&%›ÙVüÀTÓ,îÛV$4VÉWòó›B ¬BlD˜ö¤ûÖ{5ÍóóÁCÌdűæLîÛ¶Ȭ°§ám„Cƒˆ—*éE¸íHn¥­JÌ6ÂJ„Cƒ#hzîëWG¬”o.ÐX i+ia%HE¸oA·j0‡¥\ÀR3*€pÍÆâmÛ æ,Y"\ bORd‚’n„» ‚¢°‘¤aõ¶x“w_øÚƒY¬3%afEÈœp÷ª@B dŲs49ô… FTjö’§<(Z K¾už“5„¾•š».`SéQ!QʧmXÝScø` 5/æßÓ[ÞJS¬î:eU qKÙY¼€#‰Cø@E-Ï÷]ß´1@6QÈÇ®÷½o$2Æ™uSƒÅÔ\9?_àö1$C•R5Ÿ:NyjàdÀ/5ħ¬Â,–[/ª'’Ÿ6ºAP†ŠÈ%ù0ÿÎO¸²b¸´;Aœ]K:Ö¦|ÿ.a@æ„> Åœ!¼šÕœÄ=?0Ä`êª%ß¾Þv °šÉ–"Ò0åÝSŒ˜ïž”dÀ]gä¨9–ŠyÛt½¿ÃUâ<5\` BHgÑ*Hv’n;3I½–’“V/ÐWâ>ì®Åô+Í_1Dñ¡Z‘~NÌì@Šx¸à‹æz†ÙmÊö1×K•ÁJNQ»!$¦™XUAs]ÃücX5„µ:jòaz>x‰²WKÙ†'æÓ̽ÅÊÛ‰B9‡.ÞöS2 ð<2'Č˨ɇäÃg+ß 3ÄÍ3„¦"¤"|–!,!<¥"¤"¾…®ê€ôܽTêÁ•—w$nŸ’;”$çÄK0 «Âªkò!ùð0ù|øŒ™/’Ÿ÷³€¼u‰žzzð‰D1õc%vÏ{†){ t¤"ÄT7 P`gD¾…¾m—± Rbæ²s*ÂÍ»Î5!!k„T„È—ò©ש"fn¦"ÜÝŸ”ŠpÎbÌ)As{ˬ³TBN ˆuŸ§’Š0}PcøÀû~·\ |×»†‚só¡„ÔŒ¨[**%á¶EÔjƘŠ0¿"øÕJÙG®åÆ»î€í€T„E T„|÷–ŠŠð‰Q3ÂvdpæœÔ>e€a@*BH±¿@*£IÖSv/B€„˜j!|0p&I;Â]Gnva@*BL¡¦"d!!áÓUCöU?Ų0¥"„ë¾ê‰5aþÁ.‡T„QŠ©©¿+BLÍX«‘rZ§l,j *(I*Âüň¾ÒöÃíÌ yj˜‘ ½vTsHE¸Àx„E@4§|Ö0§ACÀÄPj*Âô3öJÄ]’m”(9Tí¶E$—Ý’Š0¢¼Q‚=:ÝupÿÆ*[Å Ô!§¨ÕÓ²xWË"ªº‘åÜÕ LGˆ ƒT²šz0£@Ð(+„+T5âî‘vIÈ aÆ”`x¤"DðÁCnH‹kÞ=Þv¤Z¤"ÄŒ]è*‘×Jµä«†)O !ŠP„k.tº€"Dx˜¹ YÕì#̨‚! `Ü$!O ó÷0‚‚Äþ¤ÛÖ=Š¢l!|0³|çtÛo‚N9‹éOúÀ­¾?ŽOE¸ëú– HEˆ±#DdE–»zßÑì=аÎtÝŠFÍSÃm×·ô`@*BÌtJEÈSC*B*BhAÝÁ ¤Cé¶/µª(s>k¸€"DôŒöiý5û·}×`DÅ+æ]Ãü|p®TDò-ômo{0 !æK„"TWL?œŠàa@*BLð®ÕÕ\žsµ§"\¸FØP+šåíãô| ÈèVÝÀr¦Ú”sWî°⢜§†Éù€[9p×@h;GæÒÇÛúº0 !æÔP#”*…ò®á¶·]гztÍHôR…Ô]ª5òèuŠðãÏÛ¿ûùÝûW¢}á¿|÷öÛžmÿUøoA{üÃwoßüøþ»—¿ÂË¿=þÔ€ÞãKùÛhÖ/^®âÝÞØ ÊßÅîþOþ¸ýµÿzûOqhžE÷p¶†ç°¥hlùÅ­öÖŽßã+X$R¥š|4s€ÏÐD…¦š¸>ƒæ‘:Û/W†–7fÞÄ¿ÝâÇë¤Tî&»|”RùëhŠPõAhrm1È«¦Ô±Øâ—Ë+Gc»‰p©.,Ú:Œ×I©ÜMvù(¥êY¤r…æî¬5~Í%"u(¶ Œ­ì*,…6|[sª¬“S¥›îÊQN=“ÔpZ0WԱЂX4¶û¹FI°5¥Ê:)UºÉ®¥T:GÓ§Es‰HŠíYí;Úú‡²‹°7®éÝ?²H n¿J'ÙýíKù¢ôõæ @éÞsè‡æõu4¶Àí.ÂD¬[^'R¥š‡ßS4I§Es‰HŠ-0c › Öí\S¤[X'§B7Ý…ÃÎïINEÖahnYí4WˆÔ±Ø×hl½B‡Vl×É©ÐMwáð˜ÊghZ™Í%"u,¶Õ‚±Åýd³ÕK¢HØâ:9»é.žSOîÆÀG¡Y©4®pü š+DêXl4ÆvSaª[]íØªÂ¸NNÅnº‹¯lýnhR™Í%"u(¶g¥1Ø>l¢nE½ùœJëäTꦻ‡¦_ù*˜$ ó‚¹B Ž…¢±Ý›…â\¥¹­Oë¤Tê&»¯õüîhê 4÷·x,Ï ¹D¤Å´c»ÛD…±õ`Ãë¤TǦߓF>šð´h®©c±…¢ÑØn* Vµ0µ¶õyœÊÝt÷Ðô+g|SœÍ%"u(¶ ŒíîUÚ~ªÞZýÊ:9Uºéî¡éWOΩ^‰G¡Y€ŸAs…H‹-B8¶ûÊv²©­õ’¬“S¥›îº~åMÆQhº–æœ*«æÔ±Ø…b+åaÝp¥Mÿ›f?>²F¤î¿JÝýø¥|qN=yE®^xšLh Ï yùHŽ-œ]ÀÁv?Ù £¶}£¿‚E"Uº¡yxzfïáÁ\"P‡B `ÁØî>QQ2÷¶Q,¨ÐMv]¿gþü‚¬Ó¢¹B¤ŽÅ¢±}\À¡U«­* ë¤T覻ðÚÇ©{oZ4—ˆÔ¡ØR0¶Ÿ([a7kÄ×É©ØMw]¿t6–{ÏÙéˆæ ‘:Ûƒ•Æ£±ÝO6ŒlÞ6tþãW°H¤vÓÝC×ïÉø6*ó¢¹D¤ÅPƒ±ý`u¶Z[›…´NN¥nºûZׯñ€î`/0WԱЄcûhª‹–Ö­“R©›ìº~ÙÎÐÄ2-šKDêPlÏŠß1Ø>†Ã"(Ks ‚×I©ÜMw]¿'wãŒTçEs…H‹-ˆÆv÷´˜¢›µÖK¼NNånºË¯ø°%˼h.©c±­Œíc:¬9£ÔÖ„¬“S¥›îº~í¤‘Ï2ÀoÖ Í"u,¶ í®ÂRÉ+¶¶õeœ*Ýt÷xÖ¯Ÿ¢©cÐÔZµ>ƒæ‘:[.Š-<|¢ä E±ídë¸~¡›3¾âú=As«O‡¡©ÎlÏ yýH-@ÆvWa*jû9?~‹DªtCó𣟡‰<-šKDêPl £±}Œ‡•â^¢°ŽíºYCá+¶ß³)JÜÝÄÝÍ"u,¶pb?„íc<,ì¯Þ¸Ûur*tÓÝ×Ú~w4}šBÖ|–UsêXlÏf³ŒÁ'›­`jûøÈ2‘ŠÝt÷µ¶_¢þo?¡)µÕö «Ú~Gc 'KŽa»«°»WoÅvœŠÝt÷ÐöËrŠ&O‹æ‘:Û³ç©c°Ý¢¬Tµ5§Ò:9•ºéî¡í—¾Þ䢪Ӣ¹B¤ŽÅöô!ùl/ȸRëɆÖÉ©ÔMw§ýšƃÐd¯O¡¹D¤ÅölËñlNÑ¢u?©6bËëäT‡¾_99§r÷)ëŸÐ4«O¡¹B¤ŽÅNãÂvWᲯ¦jÔur*wÓÝCo¨‚‰£ÀÜŸrÀ3`.¨C¡=!:Û‡Q”¥(6b+ë¤Té&»‡¶_?I©  g½Ð\!RÇb‹g—4c°Ý7ƒm?õZZSª¬“R¥›îÚ~ ÎÐ…¦+<ƒæ‘:Û³”:Z|øDeŸ"¬ÒÖÀu\¿ØÍŠÇ®ß“ Câ`8-š×ÔÑØž4~A»O²3*¢ \Çô‹ÝŒ¡x<êWNÑt…æV{5ŽfÁUM¿£±Ýb1[xŒ"ÒhÅuL¿ØÍŠÇ¦_;K©Ý ýŽæVÿº?ƒæ ‘:[0‹Æv?× ª­Ø®“S¡›î«sª©B Ó3h.©C±Å³‰ëC°ÅDzM%Vh­~qœŠÝt_iP")½wËwDs…H‹-ŽÆvWaP,äÍØ®“S±›îCOŸEÒQhBgÐ\"R‡b‹ íÃ%jd†jÐÒ:)•ºÉîñ¨ß“ëPªB“ë3h®¨c±…“âw¶»¥……kkC‰ÖI©ÔMv=¿'Ö Sœá0—ԡТS0¶»ITˆ ùކ×I©ÜMv-¿g£ë,×ü„¦U|Í"u,¶pÖÕƒí~ÿª€ÚzQÎë¤TüÊg4š€ƒÐ4`}Í%"u(¶p²a ¶òááEQm¾“urªtÓÝCÏ/Ÿ-C@ÓQh KsçWVÍ©c±‚hlKLXÙ¹µ!ëäT馻‡ž_æ34¸X> )ÅÅÊ3h.©C±ŠÅ–6Q%ÜJ¦Ò–SiÓ/u3†Ò±éWNÎ2Õxš´ïØ-Ï yýH-(Ec»Ÿl¹nÕVlyH•nh¾Ö¡T­ûûÅh2`mT^5R‡b‹ÆÁØ>†Ãbum}zAë¸~©›3”Ž]¿'»£¬tŸ1ÙÌu,´p6sg ¶yëP÷‚Ñ:¦_êf ¥cÓ¯žF*CÓJmœžO«š~Gc‹ÁØâãéri-~q”ŠÝt÷Ðô{6‹±Î‹æ ‘:[P‰Æv?Ø QiUa\'§b7Ý=œô+'%¦2/šKDêPl(Û‡O” hë¼uZÇõKÝœ¡tìúõãC¥ÂÓ¢¹B¤ŽÅQ£±}Ì[¯Þ¼9•ÖqýR7k(}Åõ{©Ý÷ ý†f…}$VyÍ%"u(¶x¶q¶ü˜ºƒ¼~iÄ–×É©ÜMw'ýžT¿…§Es…H‹í™Ei¶ûɦ˜+J« ó:9•»éî¡íÏö\ñ´h.©C±„`lw£¨/NÐz'ëäT馻‡¶_òÃY-ešµV³ò š+DêXlñl/ØlFQfÆVl×É©ÒMwm¿tަBÓ¶¬ÚÜû•Usê`l«‡bËŒ¢Ô\/ñ:¶_îf åcÛïÉŽcáÂó¢yýH-ŒÆö1Ö÷+ hŖ׉Té†&¿®úÝÐÍ%"u(¶P,[xœlÄçîð:¶_îæ åWÛ~A N æ :ZˆFv¿~ccݾóVd×I¨ÐMtáµ_PàiÑ\"NÇbëŒíÃ&jµ Bk±„ë$T즺‡¦_>¹Tƒ¦±ª<ƒæ ‘:[<Ù 6ÛǦMVCjm@à:9»éî¡é—ôM„¦˜€ã3h.©C±ÅÁØî6Qª•¬r« Ó:9•ºéî¡éWO6SÕ2M.›òÂ3h®©c±%ôhlwàÚºŠœ×1ýr7c(›~å4RG¡Yµ<ƒæ‘:[ Æ–áeÿÂ+·îÛ?òºHýõ퟾{óþ›Ÿyûë›÷?ÿúîeûÓOï¾ûþ¿ßó—7¿þðöý/?¾ùöíË»¿~ûJì¿yóí·oyÿÍÛŸ6|ÿÓŸ÷¯à¿ÿ¥)Ðÿýûwïÿù~úÓoß}I‰ÿïÞÿúöÍ_©Q?|/ÿo~ùþå?ßþyûO}dØ)#~ÿ|íðE½{ÃwR€þAÐꬿÃÜJ9,UZ´îyetÿã(Ð*×àƒz¡J!Œ`+êO1⊚ðo¾ýaû¦&"ˆB‘ pÙ—5ø½Eá„ØRø—ÛaD„Tê4á_ß~dÀs•ã(%ør‹K ࿼­LàïüEp¿vꟕ“IƒƒQIƒûÑL²¸gù¯ÿÿ˜ñÓøÿrUÆÿýâŸ8»@Ù Èð¿kú÷˜ôoX”(Þð‡ ॸqÆÿ´§À 8©KÆÿ<À…ïÛÏA3þgmÿÝV-ÒüF&ã™ü¯¥€ $ðƒ;W)˜¸Ïƒ»†$|P*YðO›ð9†Èy웘Š14rËcßí²?¸!ç©oÞZ äî_ føOþ1E rõbÿÓÞýÑ@ŒÕ2þ'Šÿ˜üÍ<¿ðS¤¼îŸWøcŽÿD\­"ŠÍŸ½†3›?}0 É×å’¯ÖÌh¬|\gøÔ&‡ÿ£ù¿öûª-’¶ÿ÷}ÿ®Üvÿu‡>úU|¢î»¯î‹<ÿAôµöÁŸ‚A$ üßÛöá})ϦωÀ ‘º@û»÷ÜÓȹ—·ôÆ÷¿ˆÎž© ~Ï{¿AA¶`ãsÕóï#7ôû6åý ¼ªYÀ÷smÝ3ß¿Çù—0öÙ[þϼÿïÕ?<›v>c¦üOm8}mºó7ø:™ÿ}¯{F§wÓ wæ¯PRH»6}ñqgòLA¥·ö ;ä}f+i<ø·]tQQtø·Â`ÄËû  ‚ þÛýÂê^aç¦Mo­©v.:â4r»WØÛ0Ò_œø;G`Ò•pï¾í„óùM­pÑu' äiå”$¡ï³w±ÛF„Bž… ÿRðC6¢ó”âL…«ãVdt¤ÍóºË^úÏoûçßÿò¯ÿùýí·¿ÿsi?h„(‰Zbp›½³(Saþ»©iý«¼Æ^†œ¿qX3(Ñ€©ßm¦¾¢ÊöaÿÅb¯™¸[dÁÌy¯›ÓÌ˯i•nrõ˜yðâC±¾ÿ­õužÉ(ù6­õÎÞÅ:%ß½E¿›“Bïw›Ú_ƒ¼Ãÿ½'œg ÿxÝ|ÀFƒ‡dÐÝÆ~ß1÷‹ÂC‡ËñÀάiqE2^ˆ{K„òTd„íxÐÁ¸6±]~ÿù;òCÍä‡ÃÂxØŽ™”Ÿ4”MÂënŠ‘)D£+› <ƒ7N `e±Ð3(ïë¤(ÿÊŒ ’½—F¤%–æ!,]$Ù'³Ã)¡,]´A3ïÃ9•@ö-–ké ¬£09XYŽPA§Rbt‚®»áì­•h ¯o ÏœpîôÆ‚ÁX3v’.'\Ä7)ËGè¿ë;”ÿ[7Ktæû·óÂ$Æ\7ð·þo´ÿ¶šËÙŒ¹¬{¤R¡ØÞ ÑŽ8° øà¦ŽÀLàSÛ.³›öÌf_*‚E³Å3D ™S1¼ÎSÊ»‰ >‚›:@#ZÀ ?ÀTbQàsD,m 'é0ð§d¤¯l‚øí=>ƒ‚bð¾MÿÈ(4ÕÖb{?ÿ™WÀÄg¤WÕ}3ôß g¤ïý…s7ÉìMüC(Hêûß«ÿéþÅë/æ ÿ›^™º/Ý ¼ï¾3òQÚ%þm… ½ÿ}ûü$£›Y!üÛ*üg‘}$™ ã²Èå²S69ºÀ[g>S&‡q¢¸é¾Ô L£ÈFý¿Iâ3o5'$þ¥0ð™n@†r*†{iàˆà=)…âיÁÉ©`·•ÿŸ/såh í½64"*‹>žÛ¶ÿª¤=1 ¸ïý¯wŒÄ¿Ö ÒFºÕ– ÿ§UK`3q+,ÿ­zðG˜^sÂ÷kÕƒo3Ïê‚üg- zdà×L¢ðþ½öL3{‹oùÊLà£LÑò]+!ô©üÑòYtø…f¿h KaOóÈä¿5Œ!Ùk3ÙÀN)†BàÚ…€v ºnöó%˜ixpJךA´·¤àÌþ{03cáIöãmæ…ˆ(¬ m½à3Kea´°?-ÌôS«7DWŽ“gºHÙÒŒ³›?ó”½§äÁV «™Â±ªâòaâw0Ñ•µjZI÷ÚKö¡e ÜzXB‰Lå:Qñ—T1ƒ€V*+XÏ RGðÀäêè¬Ì3(6$„õ Ág9€`ÈÏW&™A€t$°²P!œ–™Pž­TŒ¼\~þ>À Ö³‚Áƒj##,Ì1ò&‡Ÿ a}F(ÁC)2Â>ÉÑ̶’ž‡‹¸UiðŽRØñ&ìÆƒ>1ó&(e ™¼²n¬°3;Dg×ÍU4nµ‹OWŽT‰ç 0†Mé} C 9àLÉ}¯w‚ ÏŠ­0°è/¾ÿ_üý»Jþ¢À'/þo–…ý{¥aw"`³YÈLà›2öëA\¥1"iw$‚Û®œÀ{S ð·ÙIœÀW¸ã*ýVŸ¡1$´C4Ågi6%~› ¼— ÎÒìùâ› |u*ƒúo§þ9Dýå÷L€D°è:‰Ï^%zß•5À ù“Hj&ºMÝwßé ußÖ.°ùÌÓ¯Iî€áøîD`Ê÷õ}¿Dá?ÓtW'ZúècC0ðÃá#¶<-øsû *‚Q^\f¹Y!#ìÆÃ‰}é J?5Sø9Ô£C ãæP‡w̽³C&§HG&Ø› Fº,Êd¨×âÀr&¨Vb„¸j„8R%²yŸB `kè‘"ãÈPŶø¦ð%qG`¤GÕH¨ 'p\w"`† ¤wÄ%kI!ÏL–¹HYÑ%ZŠzlÄUÔ¸¹]0HÚNó±D%'´Ûñ¸ ço;„ñP¬|(â16a­NÜ7^žì‰ëQq¸Ó¬ô Ì‘BÒíü´®5#5Vþ,àM©%‡ž, ÅÁÖž‚<6ƒl"Ìöç…¹]d¯‰‹ƒ*\;uðVv `'Fr@u7ÈâöGOõ3€ÈS=Àäò ŽÞŽÌ^†à qÊÊ'¢f§p@Gi%F*Ç8ýá]3 à§ 3ÐäD¡pïfsH‡)NŸïç…4S'h8NŸßÛ:HgQ‚DmF¡ÍI 8ì‡ ð@üÔž[—S„9Z —‡ãMØyÚf8‚äë‚c6R!&—O¾öD4–«Ë51qZÚX¶‡l¤¯ÔÄ®ÕÓëv š¢Úqälm3A;fpÐi†k÷¶›“˜ <ßß>Ð!@G‡ˋLJÒgðPÖp(ÚùDÌE{ôŒ°_—6T6ZµFÛ9C?>ãfØnU¹}Q'FPq* "Iô•m-ÚLàKªÐOXI{*•yÊvn \Mz÷ëm×Ú܇hxlî¯5¸ 1fmØÛ]{ìäRì¶³¡Àw:¿é‹Ï™À+« ¼ï²¶×AMb‘FÈ+Û‚5ƒ€æ \=]kd@#88?þ8ÜÞæqu¯"‚åÕµ àÔæ#8J†«å¦à3qgkBÜm'óLâiQ¨Wv†>ý°Äüïæpäk_Ü;ý‰>¿päôâ^p’¹++€™JiЄޗüS».¶K{?ôdÍŒ2’2°•v¶3¥`f Ãu`o8gÆE^ MÐ&*03ÿ+¶™_·$å¡ð$¼×qè%î[_{ÌàW ›a6ùèPAØed°)݈Җ ‡8æt; ê‘Î@”© e´‘4ÌÆ4Y>¼ŠŠ”0à,rbO#uÄÉiÊÄò7‚ŸŽ‘7­fTÛ!Ú Ú„°l²³åø¾92wvQûð ÈãW ]Õ³qPy¿O!åHÇÉ­Ý`výÕ„Ûˆ"Á¢Ð“Þ~dF¢âé© ‰ÊRòxp0rB-²NZè€RaÑœz„3[°+ÀÎË8ý˜PÅÒS=¼ŠfÆÔ½V8U]æÜÊ ²#\ 9àZVÌAÈ×z¡”0K;æ‹k½PÌdYpFØXDŒ @T꺉Òn©QÏÞO%¨ü¢ÀÛLàÓÏWœ¿‘ý@•_þ¢¸Ï|ùêÜŠÀß&+>ϰ†þJÙÐLÁoô®—·®£¨äun6»¯ßÍÄÝ .FWú–f ÃãÛ&}õ< k¨?¶'‚”œÁƒˆ ô`+Ià Nh÷,€ÏPÏp`ìéJÝûì | I>B²¢ŸkïŸ8©á ö¦û'<øòvx^ÜþO–Âh¥Î‹g†þiôáu$ÿ¯úgˆáöñÕÉ¿*  •˜Iþůû,ëÉ€Ô T«a pñP½.B]Ì ªé¾YÐnUÀL7¨ÅJá ±ýd>¦#CA=ÀK3,ƒîÕa…9ƒ£Ã À ®ÝP•2‡½è½‚óä°â|Ù^zN`Þž ‰ËeCý0Œ‚ù c„•OÃHÓ0”T äàÞE’lj†Ždç.ÙD‰Øç ðó¿-ØŽQŸZÁÛƒ¤uȧÓG`*ÞŒŒ°Üvþ±‘ “ƒ×*4–6ìa›a Î. —©•õâL&ðJù¬Z@&øÉñb(ûµ6s0„å­ÄÒ@¸ca£þ°gÄ…[Uë%(õ8â¼ òv@ôÐUÈ—=Ëqâ^#kL-,@,Ïò¨Ìdˆ"wF£igy™zÍÀÀÄjæýj檘„ë‘ÕL²¢2ˆýÌy&C4“§ËÁÚ •lé­±ˆ@Ä#2Rl ‘™v#)k&ðÑQ°Ê¸Uá¨çÇ^Í…ãv=S=ª2ˆr7ƒoæJ! O €IRÓÊõÊ>ÁŒœpQeeFpA@С‰xn=­ò" U¦‹YAt…# hhTöª€ þ—!@Ø)!Zú‚ 1qŒóD“9DKK­Öò©œI }È#a⸽¥t1ÒR²ÔD1qëyî•f‰—aëË<ÂDM/ÃúŇžQ«@ØùåHoH磕O:ä€XÎí ÉÚÉí˜Oï_°gOkš¸äJ.y­5„g)l>ì·] ™ÁC aña;Î;1Ó…4•®„§óÊ3`3°sü‚¥úY“iæíÆ<«Ýkˆ)h—Àç šF®H@˜(]ÞtZœê1ÂÁ!°áYß¶§=gáùáQbÄe¨¤°&/àa?sÈ‘´¹œßèAnŸgÓa3%¦[„¡ ¹úXÂÓˆ/„òLó!¨5a¿}žËLšÚÄ~Q6¢x±ä,ÁàêÞÝKËHž»õíA@ÑyЉÜÎÞ0Τ„²Gçi9 ü–Ú#€hÊb¬ÞxžvfwÂ:R Ë4Ûßj¯µ+ ÉíOÄÁtü”+Ø»]iä¥3ð×ý–{·õ¨ á U8X‹ƒÈ‘!¼æž÷6–Ј óÈí·xÎOgÉEš¨"¿`ü”#¥,b2±Ñz&%”²TmËñÐň,>[TÐXZßX:±ÁCYc³ö^?7f þÈ:xx¤Ñ@|ÍE{ àxøŸ\!G8ä¯8ÃOÍ¢ ))á'žî@J¸8%ð»N;SGü¾/'Žýû´l=—þpá9á§Z #ˆwCóÉýŽ 5²Q}aì—[Æï>3åC¤P`±]¸ åLË1Û* eú/{g–[EÑ•rvÑû_M×kð µüeœdé†A|€ï(rŽ»2UÈJ¢p&¹³§4D€«9vUŽ}ªú& YãJ—çZ= ‡çæþzx”‘¬Œ#]#×,,1?¡8 « L[;Ävêî<Ñ1kaü`Wη%W´¢Ûü fÜ<“»ë¢z&‹õovŸ‘4üÔüad"™YŽJòÜ—dÿ O„TŒœEeÝu„¡³°½¼fʈzJaIacˆ(y„ºá’™e–f+aå9Ì VÑh6¯ÜQ Þ Þ\¬W3QVj?„¤…{JqÙÌKÓ„½•A$Àka  ¿ÖÙíA@@P)€€Ÿ ŽN Íðj$C$†ïžjÔH  ) àðwC>y´›"O<ýŒÉ „·ýC¾Ú0SÀ‘$’ðþ `ëßÛ<øçïKÂWß®(÷ÿ/s! SŽ:_|m‡j<|Ÿ¤?å²64’s‡_ÆÊ 0ãÎ\âA ð€C·™íEé`Æ‹ç5ü 3þ3wN¥ä*pgÞ„eÊšÔ•1b&kTMn<wìík©U9^‚ÛnÆ{ÝÕýF$äàayŽðßkâ§"ÄLiEÕ0ð_σ ŸœïE8W¾Oa`ütg:³øê™Xiy@ˆ˜>„ëý'àa¹@èÅHÂOM"l„€Ôúf’~jÇ=G(¦v¬«¬Ô€R”2P(lO_ÀÈC€€$üÔ8²AÀá§Ž € ðaš,@|Ê|äMñœ´qäX@<&dTqÿC„K¹ýÏõPÄè¨Ä5ýʺr¦ª°äb,­¬·^| Èä`À‰•¼ GKBÄHÍЩ¯eAÀBtb%!ˆŠ&|냂^ÑçóÁðå\š'¶ š…ûØ•AaÄ›7^/º”á͆ý¦“¢ÀÃoòŠ‘WÅSX(ŒÁÃò^c^:â«raTˆí@èÔ9Ü D+‹¢¨8ÔV#E: ¯E®¯)âržB9ºÄ~ hdåñÂ’»Är ꪜÉxƒþØãÉs’F]¹Ÿ‡˜ B ö÷¦ô¡“£ª•£ª™ÁªÑWX™#ð ŰpÆúž¸œ{@Î8’#ø]G–¡Š<7Gð2/Á0jû0격"½™Ëàð½r5B@g;f˜=¼Ádw ¶/¸éÈ=u†°¡±´1DŒl´…5q@Öã #Ë Íb.=ØøR?„ÂÀè‰/™‰â)5„$@|X2Ɉ½xxHİ+G@¹y6ÙH‡$t7$á`I¡ÓjHÂc:Ï("€Ãç ×D€8|V=Ñg*NÓH÷—ê! &Ìl+Œ¼+ žRC˜0x³©€x¢. (€x~ŒŒªâðªB „wÄ Špø%­‚€Ãc‚!& &|ðP0A ï LE€‡O<ŒœÖ#BlŽh5C>ñЄÃkÈ¿GÀ?–€¯þé"³ÿ¿:^¿@øÕ‡ÅØ@@<„¤#E…àð© ÀÃ'yT"a8ÛòŠðs†"  9@€xgºB¶oÄ“Ž4¡œ›”ñ,ÀJ¿®‘¶’[QÑ+'Gv<¬Zúähy™±BVš¸ŽAj޽ž*68½ïçÁFLù*Xʪ !fh †ÃÎþqˆ[…ˆX`m„˜I$]Äýg‘42k eÂs‚Û­®™ ¹0.oÔëåÁSgxˆ@=qp‡Á˃†£¦ÂÛãþŒqæ"ò.!s§•'0#ur€Ìd…e퉬peV8Ó6h5-t÷w™grľ%±±¼¿É<òžh%Q*j†ÈC ñà”fH66f`㦀"lç¡fìYSœXаQf0æNäëyHž445•Fëyˆ‘—o̹ÑdZ!FA:» bÿ2ÂŒ"ü O¾y›GO˜€˜ð±¯8²ÞE8ýùPpºÁ"ØL€!+@VðÉ·ðð™àá½jÐrd ¨@ÀÉP#& &|ÌŸ <€‡OúÈNï* Š„"|ŠŽœñôœñïðÏ$_|cJÜMÿ[°/_ÝJ´¾øT §<0ÝÁb7:$!í¸“\ƒ]Cê`Tf0_[Žƒ_:³àa¦x]xçXb⡟NJŠD‰¹‘€Œ‡€˜0RBÚ Â§•õ9‚çȘ*“o~œj¿ÖT-AØ.9òžøÍƒW8œVNªGÈÖ€¥ÿ ZGp(jÂÛ çZvw¶{3œ4PBŽ(BYh „|€³J\K•§*c,}n›ù¿—C¬\L¨Òï_Ö_ÈÍì6VR†ÂÔÿÜ+úÊ2eô‘Ž Ù%Чc÷ÇšÉK9ë(+¡g¨¢À¾â–Sf¡Óò{u#xáaDšèõ@$pØŽƒNì3‘Q³bô´rô¤#8 ÚJx,FxHÍh"jØ{PìjH`Å–<¨Sy¡Ñ|èþê @¨p£Ë¸_r$e|½0Œ…öS}vnZíØgQ#î‚"êçý" µ s‡Ìx„vfǾڹ9‚q'Ã*“È7DƒQ5¬$Àg²D0d‰Çî«€Õ`eqˆåv𰞇ԑ^b¹4á™Çb¦n ‰»T€"¬5ă…;ÞìyÀÙÓ̬!":ÐGXÙG˜È…ÕC0ÞßYdÓʰð€Cé‘„AnêØ¬ NF û ŠÈøñj3"I\éž3Xp¡lÜ_6Fð÷ç@H—ó W©ðPÝñ½<ÌøðàðÞSHš Íy—H 7޽͑F9ÔzSÞ!„¨ û‹;› #)£°%JˆõŠÀò O~ü& Ô'/ûaäÄAÄ£8ÁÃz}héAk—²#B{ò"F¯»HœÅ®,x†€²Æ»ÐÏ¢M¼/þ=áà”î¼nJûOÝ3<ôëUXˆbdÖd¦¡…Å•ŒàlÆ‚°·‘¾¢…²2Æý£ÍèC·`½ž›((Õƒ’-…•mÅ|„‘1ÒfVOÅnʹ¬7-…”ñ<ÔLŽÐ*šè)¬Ü_•ÒL#<@&‘ôòÍÀFûÊaD‚YíOph®ÄŒ ëI+Ag(é„û"œ•~ó`aô½F3áÉ+í7YLÈð„üH_))_§oP„c«†?@aæÕø„"`_íç (Noœ9mç/͉aäf¨š)Ü=E½D‚°‡ì‰Qƒ1e[£±x¬U†1G¢hxBgÁ² Ÿ{i/GÞ,Ü8ì?{£EțƣñÇ®,7•`EíÜ5v»WD±’´’€‘:AX¨k˸tÓ^Y¢¡µ¼ÿVF"„Z¦£jx@ÕÁ ÃÏÌšf6Ô Oáà‡—E€"Lo#u’baå¨a¤ˆ Ç:Âþ¦Âȃ°áÖ‚u„s‡íÁ¦6­|4BbÂÏó WÏÔ¢Žá9ÂÈÊ¢‹ÖâÉð•~)1B€“cù€ª¡FxÊ T ÇÞ9ݰàMàØ®ÒÄìÉÍó&9©>ín᯺‚s„_<4)9N¡5G¸ ÈfÂðñwNmFÏÈ 1oT„™$ñE˜±]Q„¢âúÞëP„ŸÊžBaæÆa†‡¶(<ø·³3d”âÊe¿"èˆ"tÐMÖSŽ}%Üûäè#<àÎi"BY¥):‹+«†! È›1kx@Õ0±°Â*…'Þv*‚pÇž€‡ÖNëe!ÀR¸Öó :‚C1ö“ö_>šNŒžBÕ…‹Ç¾ù¯GË öˆ‘[è0*#4V»sÆ.äû‹<’#$å3"G8Övõ& YkÎÉ¥sþ'\ºÍ𠚪Èp =ÒhNcÎBÕpîð1­ï¥ØðŽ ªÒ‘3ë©MFxÐé+‹9BR[ÁeñØ*2ï€ÐªpPÚ¯1ÑYLæ;g$¬§œêŽ,wšˆ6Ân¡k„mKGаrø8’"°ß¨ÁdñŠm„4Õ;oÄc Ǧ¦©Â ç`®M°a~@IGxðÇ{Oxz$ct ¬'í=Ž(‚ky% ˆ¿x(#Ã2ÂÊ”q$g¬ÈæÀ;ñ+ûH#}ƒ*¹¿à©÷€…Å‘QSµcôøF.‰Š(T Çz,–ª7'<÷/,Žô*Ú´ñTÃÎÿ&Μê_ö®mG®ãHþÊ~A£ò^©Ç°ðƒü­%@@´ XZÿÖ!…ÖŽzÜ}ÒÙ]!IšÔDDÞ3+H(b„þ•E«°é¨M ”vNYˆ‘Lè=>@]Q+ø.Nȶ½¹z0 tžÀ"t"t}Žà.û¶U„Å€cžÑiè_EÐ ‹ÀÌÓ‹ÐûZfu™°ý; %|‹ˆwX\ª3ÌpP­‚¢š:¦v=ºjÇÚ«Ýxš¡$F˜A‡kŠ£Õ°ëÄ¢©ðÊYCs>ðÅO/#È%á¤d»zAÞf>}^¿øñó/¿¾ì×üñ㇟~¾ ³õ¿¢ÿŒÙ—Ÿ|üðþÓ¯/§Ë¾üèðäªC'|@·ÀûÁhÿ.vï¾þIÞ­/ûó‡ÿ¨Có5qßçµê|l¿}tíllõ+”c÷ëŸêÕçQªÝ M}ÍoÖýhΓÐL¿~•HŸU©§b+ªÅØÚ²Â+ëb5¾6IJçñ©v7»k/Ù]ï£9)ù$4-,&ß‚æ3(õ\l…¨[=øFúøZlõ Òá?®œäkþóË·”øúõïVÖòáý_®¤†½@7¦?ö’_~å]ªõ[ZÞ˜ÿ|a€,O òïäýåA€‡þ>úס+ œý7rÿTü2ãêå+è¿~ø¯¢ûgÊd˜ýo¤ÿ”àçb£ûד ò˜ù¿ÉXvÿ]Àgˆ`ü¯¯¨¡A­@ÿ›µÿæ9•ôß5àŠ6-ßÏ+„þ÷jÿ-Üc˜CþmYP°ûk3X‰Ñ§á_}à!ÿSËÿË¿©ÆÎ ïßÉûÏà“íÿÍÛ™LKÿýoVý[ÀKz¢ý·uõωbâÿNË¿Y<gs,ÿ´ÿ³8;eBÿ{Mÿ¸gÈ@ó¯oñß+h‡þ»äOù ÀCÿgÒ ¨„é"†ÛÒ®~®ï÷ „ÿmý?•øÿd„éÿVú·ú?µüWÐ:*ÿs„Ãÿw üJpŸ+ð3ìþ·­þI…ûgI‘§©¯¨~NÂéï¾áÿ¬¨ññr*fÿ;UÿýA€‡þÏÔ¿UTØy¨$Nö^+ŽÜxèÿDX”è?ÍÀíâÿ¬>9–ÿ·>þuÌ€Œ)éßNÓR<“àø×Ö/ÿ¹°(‡âö_§å¿|à!ÿSÓÿŠ&°ˆeº`úo³‡ðΓ Ómõ_Òþ“™dËÔ@ÿ»é?§ã坯þ¿€:rR¢ûßjø¯¢ì£4HBáþû΀•èŸbnôÒ¿—Lÿãá߯áÅðo®oþ<üµÛéÿxžfþßúáïåü(o; ýß9ðÓà×1nÿôíþs Ô%Íáÿ;•ÿ*pŸƒ4î¿ïî_A4F¸Ÿ–þú¼~ñãç_~}#ȯ#÷ãÇ?ý|hëÅþ´/?ùøáý§_?^þN—?|ùÑ¿ÜÂÿ&yãºÞ6yÌßÅîÝ×?É»õeþÊÐ4_õ p^«ÊÀöÛ©¼hm3üt£Mg……Ö´hÓµ¥TÜjJ †<½Ó˜ž”ïË«LŒéõmÓWäkÇë<‚1Nú§à}ªáHçÞú—!1Ű¥Ó©N%À«ëœØÒo›zEü/IseØÒït±Âÿë fÇšNSð…¼¢O§>h¦aMo³+=ð*dÐß9½ ý›“Ž0ÌévšÓÍàSPÿï°À9Léÿnc:ÎÓÕêï{£³€S–óOÁ }»½Ðyà¡ÿSŸè*ˆþ§*‹¤bKw/÷Ü!ÿS—t*¼€ššå@ñ³zî<ô¦þKÜ€§Æò(þu*þóƒýŸšþ° ÈVœ¡˜ýßìBï]€‡üO ÿKô¿>QEþov£gÏJŒ%ÝÆKº%,8b@Ç›è» ðÿ©ï´T$!4gâD_'àkò>áá:‘ý·¥—¸—ÉÌXýí|”Ÿ¦âèþu¥AŒÄA”ÿ¶ëþE˜3 œèÛúÆó¸Ôšˆÿ;é_k€÷¹,ô¿·þSl„cøw·áߣúG&ðÿ[o€ÏIæéx¡«Õ‰î,Þõ_¬þ¶=ýÁ%4SeôÿZ~‚;äªüOoÿÙe¸‘']ÿB‡AþwÞþ-K__€gÉáóà¡ÿi RAƒaœý÷~Lyà¡ÿiPpþ 9u0ôßGÿç·ÿï<ôfxþåß/4HR ý÷Ñ¿•~2t¾áòô_îÿKÂ@%Õ ý÷Ñ?×ଚÛ,Ð*h0“ éà§ÉƒýŸF®;hÎÎý72üZbøs²¿áá?还.Y@ƒÉ$N!Уð_K€÷™†ö_ã.p DÙß0 ýøÿ¬>Åÿ7.ÿ{ Ò-•&Úô/ö ÀCÿ>Fƒ%2åÿFÀ-^™VŽ ýwmÿq dÌáõ¿Nñ îtìþ ýßÜýËä9!ÿVÓV|šk è¿«þ}VÐ@s Âÿ^éToëWßpüú/×VÐÀ“]GBÿ{MÿÓˆ!:忶鉠‘InЧéÿŠø!Ïsâÿ¶þÖÐ@åxúú?5 ¤ ˜:3âÿVå­>Õ þ¿­ÿ—‚ñâj²Aÿ}ô%¸¯¤ÏÐýï›þSEùY8Å!ÿNåÿY¼ƒþÛ¶ÿ+X ¤‡þ!ÿ­vÿï‚;Ôj Xâäx’1ûßIþV¼²š}õ/4P¡™Ã?Ûé_Eo­úBÿgFÿ%$˜f¢ØüßìòÇ>…F ÷ßzó—Ø#}:v ûJо õ[O>Aÿgê¿„á¦.þ;Mþ•T}"h øï<ù_âþ'gf õ×)ù/1üSlL…ûï{÷»‚)¾¾ûÈþ;É>ðÿ™4È ÷/Æ ÿZé¿øeÕ¿¾ú¯èýÉâªÿ»Ýý;€Ÿ¬Ð×ðTd¢ã¸þŠÅŸíü¿Ž)‚»ÿý?—Ð@Ýsbñ·Óâoá7ÒHÌþí=û·œ¿±ÚÛùÿõA3þ¿+ ¤äþ‹SPÆôO§üßxèÿÌö?U´bL³yÛý'èÿõ+¿3ÿo®ÿå†AÿÛõÿðªƒÿ÷}þ¥¢þ,nŠú_+ýS ðIÓã]i`£Ä ˆëXD€þÿg ð¡.†‡ö>üªÇñ/œþéäÿ¹xbý·íÿ°`SMEøßÉýWàNÇÝÅÙß¶ÑÉá¿É“n-Cþwöwá~ÿÀòOÛáß’Ýß)ÆSòïtúGj€wý÷mþ•Ð d Xþi4õ5½xYþÃ?]iPrnΕeˆ¡öß©ö§5À'OGí¿ïÕÿ’ì‘À†¢ö¿[ñ/ÓÂáý7ô'É= £?­FKp_Ñ¿3&û>ùWQN^! ÞÖ†üðê² ÿõ¥AÅä“åþqù£Uô%Àë—úo{÷¿DÿáÁ,ÿ7Ûü9€OÑÄæOß ýó’ÿp\þkøE ðsLÄÿm˳`Œí¸ÿJ„áŸNå¿(^”c üß6þ/èóÌcõ'°øßIþ<äjó¿Âýç°˜>àþ÷êþ-Ü]}¢ùß7ú·ˆÎ)î¿‘ü+v>î<ô¦þKŠÀéG³¿ÛeÿéÂB‚½ÿ¾á RÝ9ÐýÛìÁ§¼ñ Çê_Û °ŠÍ¤¼ˆ€»ß{¥ÿîDxö¯¯È 0-;hþw’?=ðÐÿ©›ÿVA™´"A ÿvºüA5ÀÏH<û׸ù¯40Î`ų_†ÿK ¿‰‹3Êÿ}õ?+hàƒÈ'–ÿ:µÿJ?']àCÿmõ_Ñ–FNxös·öÿ>ˆæ@þ¿õ³Ÿ2&ó áÿ7{öç^fàìã°Sbø¿•ü£xÍÀ«wK¢Àd‰À«_­ÚVüôHìþö}õ³ÂÿÓJ @è¯Wÿð9bxBÿm_ý«ðÿÄ#Ôpús»øŸøxõKqû«¯ÿ—¸J:üÿfÓ¿ ÷0%TÿÚÊ¿‚"+ØTŒþ7ÉÎÇžy(Vÿ÷£åùgàðg+ý—~“Ü¡ÿ¾# %n ÈG$zÿfÿK’ÿàéF˜ýëJƒYN6#‡þ7{øã>Ö'áÙ¿¾³?»?”cˆè¿Süï5À¯ÏÄ韽ß‘cñ—$qù³Süï<ôê鯊&ÓÁí^úçàWð?·úîþ•è_H9&nì–ÿ/àŸQÿÛ¹ûϲþŠÜ¸ ôÇÑþÏ_ÿûÓ‡_¾eÁׯ·@üðþ/…lÐfÃTº„Õ0™5îç\ÄùàÞ'XÇ úßmüË$8nAôí•”T…RPêÔÊà×á!ÈÆËà%ñ¿N]Q æÁ;ùÿÜsˆb¼±üK¢@Ó1ÅQÞlláÎì‚jpßè¿DþÎC± Ò)ú/ž2'ª}«çFÿöÃKºÚ<ÆA¯¢ÁñŸ¼MÿŸ>¯_üøù—_ßòëÈýøñÃO?ß Úø´/?ùøáý§_?^þN—?|ùÑï¢÷oÊ7³|ß Çt ¼ÄgŽßÅîÝ×?É»õeþÊÐ4_õ p^«ÊÀ–4«±ÕKP¤ðd¹[}¥ÚÝÐÔ«.¯ýÍÚͧPê¹Ø&cKË ›IŽ9øJléy|*ÝÍîÒK>õÛ·1þM%á³Ð4óÊ'¿ƒæ3(õ\lùÛÝç³±=¬ðXBµq­¦çñ©t7»KzÕ@óoÑ”yšš$z šO¡ÔS±%¦blyYa] ¯û•ØòóøT¾›Ýå—|ª|_©ë;î£-šÏ Ô“±Yí²Â#W<ǵV˜ŸÇ§òÝì.¿˜§ú«hz[4ŸB©çb›\Œ­,+,A*N׿©ò<>UîfwåÅÚï÷•*I“Û¢ù J=[¢rlÌFU‡]‹íóøT¹›Ý½jñþ·hæ< ÍäÉ| šO¡ÔS±%‰blõ¨ÒQÚϼ[}Ÿªw³»ú’O}%øUÖq˜¶RÕq ˜Ï Ôs¡¥WŠ…'a{ᙡî-ýêó¸T½›ÙÕ7–~šÆmÑ| ¥ž‹mj1¶ö%± 6×k±µçq©v7»ko/ý޾h>ƒROÆvF5¶_Š…Nœ~m±ÐžÇ§ÚÝ쮽dwùÕB>¶h>…RÏÅvr)¶þuP”ÒM¯|ÊŸgì×ï6ê/†Ækg|÷1–û¡ùøJ=[¯ÆöÈl$iÄ•5ž±_¿Ûh¨gì÷U¥Š·Eó)”z*¶/¼¢x2¶Ç ¨zŠÙ•UþƒRÏÅ–<ª±]VXÈI—-¾Ûçñ©t7»ûòØï+`Úè æSõTh‰‹¡=ÆDÝYåÚfª?ÏЯßm0Ô_ ÕW¬nPž…¦ ÖA· ù B=Û׺©'a{áA>çµØ>Gå»™]~ë"MœQs¸šO¡ÔS±%¶blE.:—Oµ×V Þ:ôÛòàË}O~Ïýf6ô_:ùá/Ó÷€óŒãŠAðßeÀ‘ÙFˆÌ“ÐØVËÝ<ï‹è½² ±ôl|†žï‚æ3Øês±%®†V—ä-Ù"®†VñVÿÅp{äqÐãlÓ½±üö šv #ÑyÿãNW#¼«YB¤‘¼¹QxBÐK{`gb¦2¹“M¸Ëé¾-(€ßøàÛÏÀ}KÜÉüžÀÃÒo |?i%ä¼o–ך|‘é%4 %qƒþEz\¼³SÀðw2üà÷ž/9èA€‡Ç?SÿéÐÿž¿ÆðÏõ)"þ®4 z@þðøvÀ)ž¦‡Àîwµû\Sð›éi¨ôtÒ¿•Ÿ¤ë_øFã5¸§jì~S(Wtø&ÓT5Ä}ì¾<ðÐÿ‰4àš¸4hþO…@ÙÀ#ï»3ðl1C|#àK:|,Ä&Hø÷S¼L~;àÕÇÔð»5õØèxÀo§x[;¢úNÀ×äñÎiQý~Šw¼­‡:Þ™u¼QSÎ —PÄøô?Kêxv<¾ëhçö㱡ôgý;ô…MÑ„þqšòßtš#Á8ƒËH¤„ Á…!á¦)!¢D—€þqà úß¶%„Ê ÜÀ% €Ë)øD4°e4  àqÑÀcÑ£ø?müw¿/+äÊoNÜmzãû{߆-h —!ìNºØä¡¢…Çté4¡ÿ¶n@KH,\ÿ7ìÜ<ô¢þ)úßñþÿôðgÿõuçÿúü·?¾ÿë_?|Úy„Êow”àùw÷ür‘pès€ØosPQ CÓ·vÁ®y̦dX€ííCMˆ0m$jÄM#ŸبiärJˆ¶-ÜNØíÀ(iù œÁ0Û^›s»ó«^¸`zŠ hÌO€OøÍùÀ‡ß”bsá5 ðLš`@G<àJB„(Ic±øÐÞà zg9%Wz2À€Ž (IÓÌsb©åXj pr:¶—²†(nìmªê†›b½Ÿ9,Á݆¥ ›ÛOðf“ ›ý\¼“H"›ëÔ¨Ÿ5Àûô@6×)¯>”RÍígêçÿ²w&9–»ÞQ€}³ ï5Ž+»J R¢y_œJ RBÆ÷³ç!Ë{б»¿ø™ŠmuÝ\¿æá[gL}'S#ªÎÔT=¨Ü=7†Q¥U„‡MÚ«®¥wTêŸ{÷&Ssê7½ûHÁ¶™Œ1ý¦ÍÐÃêvˆé7}ñ#S-R…˜~Õ„­Ï<|ô8ððöèÖ oõÏMZ¶1W Rÿ^To©Ž¨þÁ‡w•ÀÂôªtnÆÇ{»U`az):”Ü…‡6bü÷B½¤VÃ4Φ‡ç™‡ÏJCŒÿÜ4N—v(Z6¯ÝéìÏ!>gÄø eÝ?æ†û{KïïÅ‘ ÌÊâ?f÷?Ÿ–…‡ïᙕ¹Ý¦‡¯™‡w!Eãî±ÝªÏ×’úU_|<¼Ü\s˜Íß~ÞÝØ!˜°êÝgrzÉ$†üÝ{.^©»Åœ¥˜ó ö9r –bÀCÄê)›ÆvbäáL žòZ÷öóðê{íþ„PÞgE3çµ-¬Ï÷ ã¾ÕÞ¸oÆ ¸ª9*üïùÿ ò禷¾ä8zÎà =|ú‹>ý™v~t{¡ò·ƒ˜)§2 À‹1è! Ò5PØ[ Ù…€÷¢â$È(?Ø.I1T€ÖbP3àòl\º]óðr*g¾å>—#:k3 ÀZG`C˜‰¢Hô^$ø±ûŒ‡ßT晇Ï`EuðåO¿ZÃáï=¼p¥@7fiÐw:ûK0À÷ÿ3.÷/þ½‡÷öáá|øp#<üs{€.MÝXÀÃãá1õJqóH<üs_¼ê ïp4tÓÃëÌÃ[âáß«ãiX ®„>èãSçaüâÍœ â}ïÍ阛ù^& oj‡Q͆ ŽùLÓΪ$±Ï¹ÑÌœƒóýYcdsi÷žÏL$è׈€‚È)¯º~¨ï†¾  ¡m¥hÜ‚ZÌDˆÉ& à݆@Æu ¸ý³Þ' Ýþñ²ÃíŸí<|Îä!rÓÔ’ÖÕ™ ÐûûGò°Ý0øñ™Ëâ¡Ua»°Ó.Èá).…Ei8ØÉ²°‡Ëf8¨Pk9š±®Ÿ+‚à`-3a¢GƒÅaâ9èrAam˜È2\¿À…‰„ÅáƒppŸ|¦|ÌÖ˜MYÊ&™á@(ÁÁ^uQ¶‘~ty“ƒƒ½$8xžƒ>”6¶$ãÁâð€fÊHí(+oÖ"÷¡öBw³8XËAŒ¤IF('îg¦U“*oH ¶Æª#ùBެѲÕ{§–{ˆƒ,bL©®µ"#m¦§,º½«ƒ•Ÿûö/ÁyLg"E·v‡gXëjf6½L¦µ™cÍ@Ïø\7{#ÅIùô¥‘âÂH±g°t±B¤¸³ ˜D•‘âÚZ’ÄzÿbÁqo€`9cTE ¬ÕTlâ K6‚€@ñ›Ï"3™«9„¶W•e†€neŒ,õ~¢fbC÷PGsa¹]èC±AhêãÍ…¯ð¬>ÂCY¦A‰ÿY%~¦N1‚ÖòÊXq&[è¾@@q»²ªŸ)$]¯@õxAñ;ú 3.‚Y" Çwƒfg1ˆ±ïOx‡²@#z=ª:ÃC3 xX΃Ði™1Bꉯ›rI›yx¦€šââYí!Z ì]s‹™È@Œ™°¿–“â DÀÁÞ!v“B*ÀÁ^™œŠ»®k@ƸýhÙLQY#%æ…5$úàþÕ{ <ÄJ&ÖÖÛ…ª(©Ñëò‹íÊ™ÌA3›!œ´“ƒ>¦ `ŽO™ƒò,Œ-®l5Í´ì& PPÛ|ÇOÀ88Ô3‰£]àp÷{qa+nÈ%íÄ ÏÈñ÷K»@’&èýÓå¨'o߀ž Ô"­ Ž²Ñ ÈŒKðTNŒ,çÁÏH¥h¸† ’¸²’¨3´ÿPG|7Žçõ>A†ÒÆ2+Å¿¥òYvJf’ÇjwmØ…ívAcdAÛÄrI+#…‘úQIA@m¿Ep‰¹›á]‹ Ƌϋ¥VsDóÀTD ±Á³—ü8U\½…wu²R½9 ¼¾²»4ÒMH#é‚óÚgò&þ½W<ƒg8p³ÂúÚÒì@ŽêLd`UæXp_™ŒÔŽÓ…¢ ³½]f;ޤÎaŽå¥!B©‘&c·‡y‚ƒµ)C€pЧg†OÀÁò”!gSÿ2|õŠ;,ÁöÈ@08j‡€3x™ƒ/€/¨“àŸ9è ¶\q<9lð à NÈļ’'8Ø;‡V3ˆ„€ƒ¥ä©‘¦Òå È[*‹óáÀ›j©›ãÿPIü·*‰°˜Gü«kè„Ix|y‰a,œV)‚Ä×äZ½1ŠºxUef1õf =[ËÓŒ=È0k9°™úA¹à¨Êòz’ðᙼ¡ˆî?avÆ‹|¨gò†bƒ€þ³gø.j”¸Ö½²Ë8“²5î0.Ç¡NÛÈø3{ãZ÷N—`3˜.j<<”®å‰œëyèC4RNÔ–.‰ïÊbŠU‡þìZ7øæ4ÁúsW2x+©!;ñu ¶/¯ž›ÎÏðªDàaÿq•}Ä ÄM’Är èTÄ ÎÒí8ĶöLsÒŒ§`w'Eì¸1vœq j×;\Ãr×à§j†‡¾¹ÄvúPŽTœÝ¹Ý@,P²S93¸w'NwîŸX éJz“azm23£í¯¯Â~ƒa¸ÙeÏ„V„ÖÔÂì’‡ê IDØzZìt´(ß\ó¼¿zÂìâF¦\AXZjži:i¦WöÅÉŠ¥¦go¶^¬;0¯º’€!/Ú VF‚36 ™~8|þ-r† WŒ¬$`¦WÐåTôŒÖ-Fª7øik9Ù`'‡váBÐÃÚ;‘lCXWAÿh-^C!? x„gõ"(¼P(z·L¡.‚BÑ»¥ÂˆO©¼lJ%P*|83LÑÜXzؤ{üì¶øê8 â àY/Pe×Ú >¸´.tŸ{d¥¤ZØ Á»® éŸ%@9Te¡w‚K€«b~èÝ9âK@xD¶Ë”Ôáê ’)aÁÊõ"›!À«Ûæ+BPL¥ §°\€‚N× m7L„îþk f_Â\Äw»°7L©¨}²Ã'lÏóðL9Q½›¥„íQ£‹ ¢)ã }D\YSm3…æízÁ'gjKF™%0 +ÃÆ‘ú²¹°367à3dq“Á),õäˆ2¾Ú¥Aåæ/ˆl¢´`DÄé8³µßcŒè_ÿ< føæëJ `1 ï8%xÜ (ÂB„…9Šà<üæaÆ’Ch1w`jme1ã"šº÷ “0’CÌ<ıìòì«'…ãPû~‹ ã æ7DhÃNôJ‹0@PãÖêR…;m#¡'Y5ÃöìÑ•~ ð ß|yó Õ…§å<ØI²eŒPaíÕù à¾YJͼ¨‹NÛyÈÃj3<„„bvûŽ‹™H¡EÓ@¬×Yó™V¤·†€Ø/Ÿà6RwbéÀÔþµ8Ëš"¬õÇå@¨ž¢ÉõÇ•K0#…†à05L<ï÷œ#u‡øˆu¢Yýn³ºÔH$À³[Q]•柰_v'LÂ}ólÔ¢÷W:s¦Ö„¤ÿ³w^.Y†Ã+ °§ ‘©ˆöKúKñª&fÛYLš(':3EêËû{Ô42´pH1Â=À•QˆSø„‰„(á ¦Œ{ˆ’NØî#òÔȾ¬s«° ¸´ÝB«‘¼R(ÛcLë#ˆ1Âáu÷í¨‘¬RÄÜ+RÛ#†“î7 3e¥ j-Ì´®ì5Œø„ 1t×}¤®TÜ]Ø{ÙhÌF\BIº£’´RÏ亼—Š«!o\ÏCÌ ¤”æÍ$®¼$<ã,"{.ð©äY·ôÌl¾ÕÍ4PHÚÏù Ñ¢†´aå*ä—‹ðe’\›-+kË3…„,1Gmy¿Ià™™”*’lŒ­®œR"@B#ÏÞ¿8ØIÀ h‹„ó»ëmÕÌá¨&n ýÔŒ²Zu‡7ʉûx£@ˆ?Õ7=Fj M\f؈ÞDÏt$oÑ"(/nœSÒ “’ÔÈ#^]‚¾X•B'åÙbRH1Æ•w&M$ ‘iÜ'ð¬ÈΠɸ=1èë&Š‹Ql¤bb=<¢Œ‘Ì•?”ÓZ9~xä´€ø ìŒ\öù'x@ ùÍŠ«Âj ‹ðý§œBÛ]U,Bõ„ùÎd…‘…ý¢Ñà0 0 ¿|„"$–â/‰%ÁGB³GPiqðìqÙK@fZP È™LA›ˆ@ÀÊYµ™ÀдRà<ür(yßj ³d˵R>IGñ;‚èPð~y ;÷¥&€PvK,K}C»F,„Š[áüëz ˆ‘¤SU(IÆö"O ñ\!÷í<øÑÈ :·v66F†¦õsÞE&aýŧª‘:ÔG°×ºÝ+çåFèÒ ˆ6ïWò—™M;û†e{U­ñÀܸö²ß$èÌP½Ý4Ò ^•ï¼Ht;ò†ý&Á4f€(ÁùWO;\Œ˜Ð~ø‚j³ÅŒÈäVŒM?;6-\ÔÙˆW:! ­Ø ljš DnLÐòѳê}bÌêŽÉµý³Î”1„±° Ç´Q»GL‚“©5šLÛsE:>2ª&©Á. –×ÏH5QÒ<•1¹¸ÿrð³w.9reÇÞŠWpqâ¡©'öÛ À4°áõû$É" êb©èÊŒŽ›÷[ ‰’ØÝŒïþñ8ñØZÞ„-´.Žw|P“B¸¤ÞF:¡9UXž çïdM ^9ÕÃÑJ ÈMòX«Écdr ê0þ…[z9Mê“­­¢%ÍŒò“¬âÌ76Ëд2²e¡'lô¤Â‚€‰P=^`+Æb‡ = f@=ðöhZ™¿­²§ºTI˜˜DfII. ±ÍØ$ ñYI+%zj‘æX¦2^ òðžF×Ì-˜’Dë,< †|Tq±‰!ÂÒÖ(‚H¦ Æ7­l3VK§k-M,Û™¿Ö9[¶¶Ê"Ž(BÐpÕe;÷ ’гú¿eÕ €8ÍãDKá <œ„9\{€(Ö‘Ó=Æ‘\'á1äƒbÈT(áå­ªŽmr ^ê ~¬‚Bˆï áGñ’UèáÌ@¼dy˜Ò 4ÊC _}5-òZŸÞ Z€H  ^üpÏ Ju¡ î²ó•âJEŽQš‘»»ZÂF7‰EØÄ2œº•z€p*7™_k¢–ÃÄr»KíØ{‚Õ<µ¨ˆª Ôš.{«\"wò¨XÚ:ÿʤ5 µ"ªÄð&…#VOe¡œml3We´ ·¥ÞÎx‚¼ì´íD Ö¦óP·,v×àå±°´w~aÁZNQo ÜBEŒïYY-÷¡4„t ’ˆé…&;([Þ&ÂV¬Âºµ‘‹þ¹‡#%,󼓩 g)ÇfÿñYDô‰3Ž/3­Õ3z 'Z˜.»S#œ³'pÕòô¹Ø …ç‘;š{b„äøä}8(BO‹c“"”òÒ á!·<Ú#‹DÊ×íXŠ’*—ÝÚ®·Ë +е8’€ž8 "¢@Àe76ïêðW& ]Ï '¨ulJ±[v˜‰îóyXgáâQÎE€"¼ÔŽì°, ¾‘«ii|Ää&Ø®.M¢D/a¢Z Ä÷JB¶¼9ÃCLÞÂÜñâh®º)ÔšÇÏ·‘µÜhnÎŒ¥‡_nˆ¦+Â-HèqÆŽ áºÒ¾£C»úLzÂÿt{2œBƒSX‡´ô«Ûmá¦&úÕGJõ`ÄŠS '¸ÝRlö¼­F (ÂÄ̱‰÷e ®:Áb^ØŽt†Ú®žbR‰¤ ˜4?HèiKòŠ œýœé"Z‚„턵¤ë†‰AI‹°cu¾O°Ÿb™Œå8óõ¡ZjÍ¡,ûx˜Îƒ´<>…)/lE˜À6Z¶TšoËÓ°ƒûÖ18­Í„ «y¼¿ˆðh¥âˆüu_&wá. àªw¿œwAÊÈ­)ÒB€Ù GÖ0¿°Ø“5Ph©â!j<ÚrüÑ™—öîï^ÊCzÒwrt«Œlj¶R½Ð­22hìq ZµPZ(„D AxÁíðž¤@œ#dضb¸Ü„$@¾òµ\wp÷¢x–˜ßáºzxˆð*t·]vo¯û—©z¼D\õÚ‡Ç6?§À%LÇ{êŒALf8%}ٽ͛€L &€…çfö¯5°ý€¢ttÝÒQˆ-Sô°^wø1ŒÔ ƒ.ó»Ú½Ç'˜º*â +‚3)cQóü{‘-cµœP<šßÅÚÓµiùÉëp§.)É‚‹˜ÏC´„ •fN¯4Ve¶îïS<¢e466vdŽadq™[à”åØÌ;SsËìÛíöÓÒÂ*ÿœûÉ4i%¶îìI¢,Upìå£ TÑÄF‚ —=+ÈPjž/ j-’@«¬’pÙ&¥ º]…Äɧù’->‚¤ˆÅæËÞ ¡nŸ§pÙ熠[ÊR8…áNÁ’¾d(%œ€· œ½°ŠËš¿ñ«8ð5?‹é‰ 2Y ÓŸ'·«¥Î Â%q‚›Pzd>*ÉL(áuR ^ càá‡@”€?FçZön‡“tÅË‘«T`$’ðròA W—m‰#K\¸>Š€‡WKZ«%fÜØ1V2Îç!O‚ÈG.žD \\— „‹w¯(‚ðCÔ¡P(á y°ƒððƒ²ÀxxuW\2\¼Y…fHÂ!)O‚" U \˜€XÞ¸qá÷ż\ˆ!ˆZ‰!ˆQ£Õ?± <Èe)ï@: E¸úË„A ßyq(êL àÒ>AÀÅ Dˆ ^íWPð^×ÃC Fø£øí®ŠðÓ¿²+ÇúGö wÑnò'oÚúq•É:0ù-"OBbÊŽžèð‚"\Zh@Ý àP5¢D_}Âi¢D(ÂÕœyÿ|Âixpƒ"`žŠEx‰ Š€ÊŠðmï¡ ^yð^u´ýqu¥ßÎ1üô2Õ‚þú'«A4ÅoþVB?æMÎ1x¯jR-5Ê"Qwd WíhÓu[ñÈ Š0ÿ¬}p !®ÉP„‹ÞØ”ìè4 Ó;[NÐé"Þ>B ŠpÑ—ìM€J%#F΃nÔÂÃÍ 1ÂÈ[ÖB€Óm”LÌ{|‚;9¸nT+Ëpd¤d ¹,¯“㻸cJ/Õ,ø„ëV“™’“ ãyèhgÚ8°íH¯º¨s n²‚0>DˆžAÉõsi#xèàA¬ÇCh,qœœ¯Ù’R²™^ç?5T<8S1.NŽÇ!›* nÄ.H(®›PD„!€œ0D¶ô«qŠ;ãâº/‘\Ææ`=Uç*'C™q¤´ô(ºØJÁfïëzWñOö(‚€_ ¼P ¹áD´%pãJÁcÓøj÷ð aŸ{|„"œxþ}à+Q-:A¦˜=Ñã!ÜÈ˸&NÈô$й²µÄñ8xOå¨DwNpÌZrH¡L#ä#+‰-°X$^GÞïèO" OÂæþ™1ÔC@ÕªBP0þ–GOA+Ê Iãg&3[ôÁ–,+ÓõÁ³En·¿´1Œ|vhIMÈ,Š0¾C­'b0µ`Ç~ïùçc«åÙ!È…ð 5Æ>[Î SšÛ*”GF -Š.ÆŽV•‘DZæ¨2a•ç7Âr9Z˜GÎÕ÷DIäŽ:ãü,’[^¢²væ`ˆG¾DµD‰µRØÐž‚ÉÇo<0»<Ä Ö;·´¯•ˆ2fãgÎÆ·d ¥æ¼PYœßÏZ‚iË-‡"\õŽà&@ÂYÃÈ:‚ô`ûO `dm™ZÚ€×…«žý`æ”å®Ú’ĬaµP:šÿ¼ÔÄC- >Îobfm „ôÖ¥¦ëC¶¬]eá[m!ãeRXDS±ØfÒУÆ ‹VO#,o‰Å—;šG>'´ìËÙx– ‹¯¤ÕÂCˆ”cõòøÁi9·yÈ*ÅÖÕ‘-j-”:}nö‚Ð"Và<¼êXl)3êZ±S.ó¬µô´³š®",à¼ì” « ‘`{Êü‡In)29©g¡Uáºegç1âiú ŠÐ‚ƒx†abˆÐòéÆ$ !Â|Aˆ–$2·"ì?A&*BKÚ˜šYŠ…ZX–ñ‡fƒ"ŒLz|BÆmU7a~ŒÐ£%FXÜ?SZ Iµöo½cz¾"t‚ÐÚ@à ìegà„¶çñ…1Ø‘D bXŽq‚f„–*‚%Iá¡á· ;rHÙò ù¹kÁðòÒB€×*B?ó|EhY¸(·,ÒQE¸nATÜaÏÐŒÐ#¨RîA.›EêŽØÑÒ>¿…¹eäE4Ò>9 E8wŒ°ƒD_8÷v‚n„bÿu §;®Û Á&hb¿n%)¤X¢ÄT’z¡n“-¨,ŽçÁZ¶kI.§•XÌ~Ù1§PÊBÏêD´%FÈ !Fmy<e-<Ô*6Fa~eQ:*‹ÊÉK•Å˾5lL¤ðÚtÙ½ýÊu[Ÿ€Ùׯ<ÈZ˜…>-úà·Ëñ…±†ËÖÔy9*I—]¡¥îËKϰd1zx /,YÄ­§¯<Ø’Û˜|™Er z[™ƒË W~µeÂx~ž"X ;iøä=PàÐ3ÕБAØíÚ[:Gú‡!º8û“NPWìÈ!MýÆ&Gæ -¤TžæGŒ«¥ˆf¶ #ËÌ-IÃþe\±—ý eÅ*ɬп:2B¨ÊC‚p‚w§ŽÒ%‰?é  êhï\’†ñضݶÝ*"\Ë>béóøT½›îêÇ^Þ_[³t¬5ŸâK}¬m“šmk[…Õ™K?\0µçñ©v7ݵ7}êOi!1טÏð¡>Ö´ôûAºGÛv‹°/ä÷¿Øó¸T»›ìÚ›í­þž5‹dͰH§ÏXó)¾Ô‡Ú–ˆZmZ76 ÉÎÄßþ/Oò¥î”;éî·ß”_((‰±èƒ¬©¼Œí3Ö<ÿ—úhÛ¾q¦ûѶÝ*LµÄõƒÃêß~ žäKµ»Yó-Ÿjþž5m=ÈšZK?cͧøRj[­j¶-m¾3%}жô<>•ô–Oõw#$˜5sÀòk>×úXÛRj·mopå,ùÁ·ß~ žäK½›îқϩñ®5s¬5ŸâK}¨m‰ªÙ¶|Ël2½$üƒ¶åçñ©|7Ýå_-ý–ðXc>ÇúXÓE·mo‰ ©”øGKü<.•ï&»üË.µÌÇZó)¾Ô‡ÚöÝÊï#L+75-þh´$ÏãQån²+òko4b&k¬5ŸáC}¬m‰µÛ¶·¼&hÅGk…ò<.Uî&»o7ý¾WÇ7ã±Ö|Š/õ±¶-o¶­ê–þ ’ÊõAÛêó|©z7kêÇÖzýhá6#}”5Í™è3Ö|†/õ±¶%æ~Û/Íå+ð¥ÞùK•w¿TžkM|©ÿôK]ÒlÛ[ŸèmtnÿñQŸjÏ“§ÚÝrûÅAK'kÍgøRk["ï¶­Iy»Ñùá²¾=Oµ»éî›]¿ò®5ùQÖôÅšõk>Å—úPÛ¾çSaÛüÒ'z[>¥i«æótýæÝ:Cóí®_{ç‘Æ5}¬5Ïÿ¥>Ú¶$ÒmÛÛœGÚVŸçKµ»Yók¿æFü kn·šîŸ±æS|©µí{½„±í—>QY;°þà ð|ž®ß¼[gh¾ÝõË?Ÿ¤1^+ÇZó¾ÔÛ6£Û¶[…Ë׺‹ÿ¨mŸÇ§ÒÝt÷Í®_÷÷¬I>ÖšOñ¥>Ô¶VÜlÛ/¢\VùÁº~>O×oÞ­54ÒõûÞJ,Zs­ù _êcmKí¦½%6&ž*ôQÓ>Kå»ÉîÛ]¿òž5y®5ŸâC}¨mie³m¿4ŠR. ÕÚVžÇ¥ÊÝdW~±ô«æö0kª–Úg¬ù _êÿ±wv;v¹"Šßd.½ÀîM.‚¼@ Ø“µ±^Ùä\äéÓ}$ÁÜîíƒîbjª Û‚!ÌŒ óãGù/²/Û½”Úíªi &l‡+4OJ¥ËÂ߅¦á°4§pÔ¾lC’Ù®Ãa)À‘ãhUŸçI©|YØå'g(‰9û°4gðÔ¾lwŸ¼uAû¸×˜)E;OJåËÂî¶<ïmª_>·ù+M ä34§pÔ®l9™íc8ìºõäx©PæI©rYØÝýîÕ“(:\e®¢9ƒ§öe»+ÏïÃv½Ø¨0 Í©2ON•Ëâ®[Jû{š=âîE4§ðÔ¾l=—m|‹¶– • bÑo\& mÑïÞéw9 ú°4ß¾§öf»×£éÄö1VÖV-eËóxª\FssÔ/íÑ”qiNá©]ÙÂ~îÀÏÕX"²…yr*\w7…¡{ž|¹Šå+McöhghÎà©}ÙîÍíƒv  aH~í<). »ÀO;*¶N4@¾ŠŠY5¿½ÙîMféÃymÏ»š\ŠóH ã2¹Yü‰”pgv±KsOíÌ6<Ÿ­»†•§^í©¸G3Æ¥Yžú/Ùº'³]e¢ªëj°ƒš–˜Gô— Cc[ô»÷â—Sê°4gðÔ¾laoàz¶ë!°\V²'§Òeq—ž|H³ÐÔ6,Í)<µ+[ Mf»êD™µæGËúdò+MZUJr†æ žÚ“-¼ˆb6ÛõbÓÌ#ôh BæÉ©rYÜ~ÚS‰‡¥9…§öe»·Œüj¶þo+ËÏ:Qe:R,üò-3xêç¿Êq÷·å‰& »];½ãw4£5–v†æ÷Ô¶éd×l~h-ØoÀ$~*—±Ü¼¥ÒÍkýFÓa íghNá§]Ù¢c2ÛU%JÀŽr-Ì“QᲨ»©ùå½Ö8€ö¡©Í´v†æ žÚ—-šd³}ÜkÖ&íhN…yr*\wái)!P'š‹—*…Ÿ¡9…§ve‹˜Œ3wÌÖÑ,Ñâ<)/ »›s~wŠƒB=®2ŸiÆr›;CsGíË÷ö"ôaûØ`ÂÎØŽaœ'¥âeaŸÝGCFÞ‰¦Æ“ÎМÂS»²Eãd¶«JT%¤áA¶4ON¥Ëâî¦æ—w6Ë2v¢)hNq†æ žÚ—-7Îf»Dan°üh9Ú¢¡yr*]w·5¿¾GóZ]èïiºÙšSxjW¶Ð4™í—Ù°Ë‘©ÂVѬCÎâ赆æI©tYÜ¥'—/4;Ñr6=Cs OíÊvù¢d¶«D”›-üò<9•/‹»üdá—ܸÍhn€ghÎà©}ÙâÞ4î>lW‰¨iSÒ£Q˜çÉ©|YÜ}vƯ›µN0­…î·ñ¬)µ/ZpKf+å%Ä(íèÅFæI©rYØ•'ç 4—æ žÚ™íŽ2¿ÛµXhBL|´¨/ó¤T¹,îÊ“ïRe­ ô¢éM_Se֜ڗílé¡]®¿&z°¬Oó~é2U(m ~÷6UãrŸìD“×Áùt†æÛ÷ÔÞlw6¦vB»Ö —Oœ;*Ïã¨rÌ'SêJÓ;Ѧ :Cs GíÊ–öêI]Ø®"QÆÀõÎA¶0OJ…ËÂî¶äwç¹pk}hRóhÚÎМÁSû²Ý›ß‡íçœ*ìµg4ä—.“…Ò¶äwop‡0ö¢‰«ð¡¡9…§ve»7« Zü„—‹âA´8OJÅËÂî¦,Th瑱^>Yý:š38j_¶{#~;±]§Â‡Aƒ£lçI©xYØÝñK;õ$µq÷"šSxjW¶»KÈ»°}ÈD›9pÂ4ON¥Ëâ.=ÙM%¡ÀaiÎà©}Ù¦³}ôß@ˆôh±æÉ©tYÜÝýîÀäË_/~…‰n-ô Ì)µ+ZOf»ªD)d¹Ü°dËó¤T¾,ìnj~÷Š~ý„ëhÎà©Ùe³]k…†¨G»©ÆÂ†»=/É<9U.‹»òä´qŒ6,Í<µ3ÛÈF»¾¼Xî5îGÑΓRå²°+OV~Å©y'š¡®ÒÎМÂQ»²ÝÛ—Ú-?D¢,¤Ê~ -Ï#ùåËd¡¼-ùÝÛ—íò°û•&›´ƒxVÉoo¶à’Ív½×„ªóÁ¥Q<æ—/Ó…ò¶æWwânô¢Àhx†æžÚ•-íÌÜéÃvU‰79ÈæÉ©pYÜ…'JÂL½h:.ç_;CsOíËT²Ù~¶¾N{8Ìvžœ —ÅÝí1¿¼G“}XšSxjW¶{+Sû°]e¢b(Ü ²Åyr*^wñÙqˆ0.Í<µ/[Ø™uÖ‰íç{jXÃÃlçÉ©xYÜÝýîÒÄ^4MÏМÂS»²²d¶«LÔDV”hžœJ—ÅÝMѯý¹§R0ò°4gðÔ¾lAÒÙ>Ù1·@?ÊvžœJ—Å]zvgj,—™N4בÜW¼ñ¬ªßÞlao–G¶L‹A©­ãÖÖ žUý~xýþ‡wŸ¾ùù—×ï>ýüáãËòï?þðãúæŸï>üãõÓ/?½ûîõåãÿ|÷$ûoÞ}÷Ýë/Ÿ¾y}¿Áïÿ¾~ÿûã/‡ýßüøéÛ_ßÿÓëÇ?šÄç¯ÿ¯Ÿ>¼¾ûçAӈ߉C¿šÆ»_~|ùÏ׿/ÔW ÛµˆMuégõ¬âðÕ(@—sÖ‘ÕôÿÒÖGr.ìÞÉŽÕ|Yæå''~i^h:/ÿÄš3Äê¾lA1›-¿4@$@9ª–xZ)üÿ¬šÆÛˆÜ¡ñB|À~uì~XD¯]=û`Øìýï¾ûÇòI d[‡õæ&ÀÔT[Ü;(¼ƒ€6K0ˆuÉ/àbÂß^¿ZÀ¹sC¯HðÇ=ƒ‚Û‘`p3X²uаƒ”ÿãÿà9à…´üT3HÊÿŠË¡ÐËÿòÌÏ„åÿÃ5Ç œƒ[ùÿ@þ)àäd1¨ü¿§PÎùß—»`ùÿ8à%çàÔëü?ª ääÿˆ`¬óÿíêÑD˜Zùÿ f@j9f`Ú¨Îÿ#ÿS~FuþÖÿ%Ç @ŒËÿÇñÿ?މìžZc«óÿ°þßrŽ"dR÷ÿqÀËŸ w¯‚-¤üP3Ф4`­îÿCÕÿ2 ?DÔĬ„_#Uüs¸S¨QÅý!­€^4åÜGÄ l¥û¼ ø{ÆýT¶:é >EâKë “?xK¯ѪÄ?®ÄßSÌÀOÞôÊ zü<çÂoŠAÕééà')àƒ$Î)½ üÅàsò,¡ÅJÛ1¨/RîøÜ€¡À$êK¹ê±`h«âÎ8‡;˹걬KÑKÍ1¦ðKÊ Ÿ9ÈÊûïVèa7¯ ßHM|ÉoVW»‘À§4sØ„´Õ $Ù~ w_ו\s¤99àE¹nv#9|NŠF-ðÕrr¸;AÔeîv/ânQÂÜ‘Ú5)}zQY>r®Þ¨/24É B£jø·{‘MbÄÚê¤wCð®uÔ»Û ¶¼©WÆÕ <'ã;¶jߌtâÏñW (ÖH?¥a+ÀZí›ûeüPT©âÎHà)¼QíY¹_ÃV×{õêb(M–ä€g¬ÁjcÏán^åÛ»-ÔXÀ Tùöv9Jzïyú‹ˆWÿvµ …ªÞÞN¨³@G»"ž"„y½ºéjG9àÙ«_sC…†¢¯=›¿P^àïw¸£l5;íÞ[²3àæ^m»‘ü_sÀ øûe| h|·¶ßþúþûŸ^ÿúó‡¿üðîýûןî\Ïç†(\pßN/‡¿V0¢´E¶.Ÿ<Ü®—[1à7 0bBˆ²€ñ,À<åêg, Z7€ÛÝL9ìÜä…*ýt3|Kño ­2ÀmkÀÔ(ZYÀ- \nWþ}@)_G]n÷^—øì^<§2àdÈ7´ý&âCÎ]Á˜Z+sÛð¥EÎ¥A›ÖvÝaKšsj0lFõl¤ CŠÿ°.?µüXÙPŠP8д• ;´;'*×Ї¦&–šá9xÉÙµ®½Ê×<%p3hQ£ýn§^Àc´ÚÌ4”t,RÀ; ÕS±ÛÍXÀ“ŸœðV©?c'qNêlµÎaÔ•¬îIf@K>¨9p#õ ¼²¡”ÿiúšr€†í¤Œ°Ì g€²‚jRŽ(AŒºÝ4à| qmzõ0`-Çÿ¡IUG’'g)AÀ˜fð”“@ ¬&È ¤ö¤ jžÔ¸O±lBV͡۵ƒW)–ûêþš2Ub1Ö»÷GŽ.9‡t\þ¨J7'É@Øœª'£jÃ$çÁ 2´2ƒa£AäYôä>ñ2ƒ®b€œjÇ:A°NrÚÿBl^] aO–Sƒ€šs;y0ªb´’;7*ç6à­¹– ì~ùßŒë¥øHOsä?PoCo÷€"Þ¼ñû&Þ(aŽ=Èzú¯—A·½©RYÀˆÛr,WATNÛ4kš$-'ê ŒyT¤M:*¢hÜ|©À ôB‘“&HT¨æ‰ 9f8Ÿ©s݇?'DÎT nÀÞªm|;õ0C#©~áýÔƒ n¨µfúv"F…Öj²ü˜f/9ûEÃBëñÀ°ÓÄ’Ò?)z«úиâáœÃEÔñÿv=d–5Ôãáag‡yJ›€u 4\oHïçÿjË9°ŽãNɹ†GùÿýŠõ†|Øbç\¥a£2ƒaÍÀrjÂÒ !JC>P(gv°Àr”z<8èš)xñð;A­nàíf -ÔÁ©ê@ƒ€49€PÓ€zE:î’™$3Ð +Ü­(¼œ4¸úcš¾˜äø¿RÉï·K@D™¢.‚ãöRÔA¢ [«{àHÃr*€Ê@X»F ü’Þ™ëâ7lÀ-'ÿ,ç¾2ƒa£'™RÝÿo÷H,Ü¢† š"©þçBecšgí”Wr¯jà¸3¥sî±Î•ª¢Ð¸ÍA×;0‰sꀲƒž‡ƒ”³6ðà*TL‰­Ï†kÂôБ3P6¬}S·{+  ¡åÿcú¿¾ §ôˆ”H€«9x¿Àë¿52f$9@Š:XE­µZ*q»®°*#h¥üaß…hÎŽ Õ@÷Ú11ÐààÈIùÆÂV%¿aïü‘3.L½aM )4L/Uô÷Òï’“‚X±& kfžc¥ wnd†XkdVÂQ"9V!5.d¤²qx‰å:Pî?ªä4‚l¹€W#h¤²pw`¥â>’4 ¼8hI¿F9àèÊ#o9à±ùrÜ+ðy|ÎÙÉ1ªî;ê ?$ÉÿEê!èí$?†a Uï·Î›Sâ#Ró*÷kœs 6¦ÞîÍQ`£*ñž9E÷aܽ&ÞÏã…È¡j|#õ5<»X¿]7GÜØ«Äw»Y¯¦­ùWàßâ;>s i%ê·ÄŸ“ð]=¼þí†üÙr§W©Œ¿»]ceüfüpâ¨6ÞíÎøÞ˜‘jXÏí^î_¾Îø]%Úžc®ZóœVsäD@a©6ÞHi ¥qï¨ ­îø7¿J÷¢ÿ½e¼NívÍo}ÿýO¯ýùÃ_~x÷þýëO·¾ÒªÞ‚²€û¦²åPÅŸ»5ö×.Ÿ –çß¶ìëŒBRÕ¿ôÚ‘Söaò†¥×¾ˆÏ×®>V¡ç~à ƒ±Z»·{¡íŽ.RþaÍ §À¿ö©¬`Üså\õ=Ô´^íÜ®ÆÍeù¥üÐ…­š³¾= °vsÝPîˆëVÆ? øå‹RÀ¯sxë-ÇÀŸ+`P¾Ìãm”úÿ½sɵãè‘ðŽ É7¹‹Þÿj:Ï@–€¿{Ðo4* Û¸2TŸ™|ƒ˜Ì¼ÊýŸM2ã™WNwß·ÅõÙå©ä‡ßóá#1E^(ç1¶EÿÇ;èÃûÉ¡ïö¦½MLrW“Tq¿ïŸþÜ`}uuÿUž€Ábhǹ4,ÔýÖ îïý¥+ãª÷ 1Þ}œóÝ=ƒ=iÇ|ø¨öd"°>€”‚÷#”Ê0¼­"¸¿Ýí•lþlêóäËL'½;1øÜ_a`Ç^ŽÁ$÷+–`xpË`Xز1¹ù`Q²±"€ã¾´úÐðck£À“¨÷Ãü`wX°'ºÈÃÛÃÂ<3 °PGÏyîÄ@î§LßÐNEÑd¥ˆó ˜Ç­ vF‚x41ä‹à‹°¶ŸØ˜‰rÆÇ$ˆÂ[┥g€…;„ ™byý¥iŸ‚¬ ÔAlw£¼|•¼òÝÇÍ„Á+·‡íKàð“5a‚Ä3ò—]BF‚š"BÜÂCŽÙüÝ¡0F‚ŸŒ£ nI˜l.Å@Ÿs@á ¢ŠûF;9PyÎ@†"ì½Ö`ì`ÒMV0¦CŠlìAúÄ¢&^M^zQæç4—¶/Üú WÂÚÎýa¬¶Ö‰r /ƒ›ˆÛ00, ò´c€H)g²øÞÛcáujŒ3¥¥OÃýD‰Á -9RÚy ¢0ô–[3p¤´võÀ1‚c}ái:vþà3SLðZóš‘é_¶ùüýFfGs„}ä×_¬ÅHÀ{Ûq¦ëPtüb´*ÍIÀFÙ9¤7Ÿq²³"\ég^¦S9P^σ%†—£|ÕZÔ™%¾×ñ>ú¦‰JcÕþ¦aL»¶ñMØŸ# p(ÑTaÙ¸ÒÁ ò$”Šžä-Œíb³|t@@´6ué;s‘£/7°ù SüåÏÂOåAH€€´–*¾ 륤µ\%æ¼±2"`üíHÀbÙÑ`Èq¾ ëO17õª3:i~¼2"@:£áQ\OZI@`¨6#¯µ¿ó˜¦×ÝV÷kmDHµ>îÌVvŒ ¤LµF¯ÝJIýØ']L–ó0Ï$bC!íDO±]°žS úq:┥NóÄ;Í‹ÒÚf­µMaÂA¨v±p\ŠA= ¹­›V•Þ {ÍòÄAòŠÎ^££ÓF‚µt /«®÷ã 8pù¬¸3,å@ž‰&äàŃ›'r°—ƒR éÌÖæ¡ á@EJÉÁÚxà^ü¨Ò wm§ØKZé}æF»‰;óB¦ÁfâkW“.)Êü‹íò[0”ÍÒßkƒYé§C¸’´±".cÍ6áJÝ1$(é›’€•ù DdVš]JåùBbpVe§{Ø^(3LÈá¤K@h¥çëyŒà¨l2œÂã÷JÎ*òÄam¸Ž}ÎÁä…y¼is¸sX„É sæó•B ånAÅÀÊ€© ª-™àJ ãjJÞ;1®9Âõ“õë'r@<¤¥rf°1"ÈzAõžU^΃=3^AË­Œ9ÂÊ92¤_Ø&’<•¶³NÐ*r‚f–+=Î@G6Îö›CÔ…­j}нÃ×.›\:­Ø;\Ù92f¡‡uÁÊ R`˜Tá–ÁÆ´p …ÁXßÁ¼p¿¶ ¢+é)žÆÚÞ;ô§,1@ÜL± ¶ÑCæK=£YÅyã~{³Sˆ1çþ)ÂqÓö‘·®TA þQ$d „Ññ¤RõµJÕ‘ÉJªÕ_kl3:g8€^ $K´ûR°ÐÎÂC€•Ðõvg @§ÎiãÅœ—!`zª˜¾vuyúÄ'$åé !@¬¬…í¡í#G‡˜\]nRÀ¶R4Ì› .Õì,$À R¶ŸO—oÂnú~tÌ›àÑÍcッ<y Z¡¯Ì B@Þ:!˜,4¹2È#0=ãNÁr5@øx`@øæ¥™‘ÔfDXÿ@D†‡× Ë„¡õK`D@h 5@/D¡'âÎ3 "ȹ¤9ÇKÛÇKç1„–µÒ.¹®´?‚„um¥ýÑk Ç7OêŽVÎ01 Z›K(+ –å.ì¾Ufò!`‡¬$ !”‰;·n"~ˆ¶d¿p{¿PžHHe`·44±ˆ´‹JçþE“ý@L@žŒñ4õé+w–‚¼›€A”ŽrÎý‹Ûë;c@~ L [k…#~x0qãDá¥'5DìãkÅ^ÒkWÙIÀjõa’€—•íK`Zèõs’<‡ßîÈÍA þ8ÆÙÉ”Ýd@3\ðâÖ“&ì¨t3"¼|E!C’ì´6jSW¾ J^N@~ Ì YA=u”@ˆ_½#ûèˆ ø}sŠ@ˆ_@„>YÒgÇÑ×¶ñ膇9™4¿x«CÖ% ¦EyÑ{{U1Oa •¥Í ,ïuC‘>Õvx‡ë­—Ø.r ”Ã%ÇíFªY $Olµ±#mn75£QÂDÁ<ž•äa¥p C@ˆÏQ†„í^ËGÃÏM#ÉÃz­{ &gˆþøkˆå9ç÷ˆy1R\ƒUÅ~EÃ1Lç)C#é£ñ^™[ç­,>û߈>˜Pò·ÖÜðFÄ£ ƒê1•dÒ°¿ÙaÅ¿“†JCCŸ…e9 ¿ß—áñr[€—ïP à‘'ðêö"ó€·Ç€fiÀÒàWiPO¦0$¼üò» ÿ„„¸oD2$¼:$ØQ††„ßYB…ñ ˆ~$˜6RªÄÀðÏÑÿiã!€øßþƒlÆÏJ|þo/ŠfBõ?-Æ“}@ˆñ'¿æœ?“sJ xyÕ!|ø(üòxD"d)vÿéIgûheáPäøpÊüNåŠ"ì4OÆlOõIÈÉTC´yb;Úˆ²Ï‘1>ÛâÜcq,øbìçÁ ÆÏ)ghÔ²°°É¬æÄ s†ýû/Ç„ùý;CÂÆ5€®NÊ—·ï»ä“i<õ9s© ˘§ —H/·ŽHެ·ç ý ÕRÔµmœPBTMÖ"~BIÀÆL– Çª©RxëŠËÀ„kÑÛÓÄx²@!M[Kûë†ÄtÚL<)}ÞD=¹9{‘û•Ð iN_ Ü2·í@Äy"DÞA ¶÷"û Eˆ®û'ëÌ}RhsLésßðZ>:A^ëÐv °ÊáDjÿ†T;¦µpóÑú»%CÂWO¤"jÆ(ß¿!‚i.¥¤(åïÛˆ'“6朢cÛÑE ÄŸªÀd‘u/ío?דé5•Š„ˆõI%¨Ì,·TˆYÙyÂT•fÃý©´ch=NN f0­¦>zí˜6Ú1…`ÐЦpi{¡ð1yÅ´ÆÒ)ß9€„„œ1UnM®ç!1IB•œ6*×¶û€Ï3õ{U¶&ùD¼V·T£!Ã¥ÉíY£^ !ácÙWl&í ‰! Ô‹…äþBÒˆ¨Ò1¡CÛk…lQÝ7O`á°½×8BzKÑâÉ¡ãÊ€!àÆO^•Ûh® ³J(Lúqs)ˆ±Q§•Âþëùrè9ÆBò Ôî«ï”,q^üo±cJ)îLî2rUîÑÇ…sûÇ¢ :Òƒó©ýî;¥'ã£s IöÖ©…ÈÝSåH8UMÛy8O`Þˆûí&’b½!—A¼àSõ“5°5¹¿¬˜‚4Ôìˆsgr¯º!¬FÙš\„‚| SC½¨íIå}2!yI‹)ú-\¨»ßBÀtOL}AH°AlY§›[6ö{óœ„$ îCƒx(w&+—«@¡ÍØÛ q¿=($Œ°Œø†ªR *ÈRõÓÜ¿]iú‹)ÔÇhùú^ë¦K@¤÷-·' ót‚…º¿EMûu° Ñ#‡@ì¿bÓÕYvæ6Ÿ·Gˆ‚å¦UÃmª×úø\̃¬ôñ9‚! Æ‚²¶ýÉVÇ‘•\°\D?Ñ 1GhÓ±>m”g ÄC»“‡íu¥?ê˜áú)- Äòã<aü¿À‹ŠŸN`Ö~T3"ì_ÁÇ,Ó•×ýIÔ¸íOŽ) ¬Ê`Œ½*ŽHÒém{„§m@@Ìe‚@ì4Γ.±ˆy3ÍŽÏÑcª¤¿@%˜6T|ü¤y¨êÅšÈhѤN~ÿl1Éü4¡4•»•+å-! ϨñLÕÊíZDRP•-e¬ö{Jˆ.û‚³D¡ <‹Ët;ˆÂ'yÎtg–" }šë”++-'X¹QÝòó 0/\ŸNaÞ„1Ë¿“=3$üTV`æ–ŠT±mŸ0éÓk®êÏé1¥jiû€ó$ä`iÍç¤1mX6ž‰pÈÙûûDäX²¸ÿPÄÑç³OÍöòðqwí‡ ö;»Ë! ËŘ4Cx˜QOnÇmŒ !@äÄ4…Š+§ 7AnÈ¿;@J~*+€PÒôPØŸ@ZÍéÝΤàµRÕÎJ 6’¾¡q*v¶YQ•´S’`Råp}á­¢¤®Óš7®7IˆG +Ð]Ÿ‚< ô‡< eC©·+@ìÂ!ìäÇM+Ekº'œoÄþ“Ãæ˜,r´"XG¾·µTcA–àÒÏaž¸Ý;é<7ƒÃQ2FáêÊf²xl¿}Ö]m߈í@äã;âklý¦Û8Ò¦ÃÙíYd?YbÂxPr=ö&‡ÕçÈzaYáeëhknͯ,,CÀh¥Œ¯•­ŒI´€•3HL`!´]}ó‚˸º§Ð_ TjsÄÏ´0$¼u =÷·ÝÂIÀk¯BŽœ3•ÜzÜH¤ƒ<âùY}eZ°<-8ON@€ˆtUZìm·Ó²§!‡AGòc—Ã{ û/Ç`€(S-N¡×§ UÚÍ$re )#ôHGð‰ØÒGÒY°úÌœ8s\9p€< v³SçÌq%§IÀËc€cHMáÐñͯÀ- І×î¹MD @ï—¬WÚ—ÁðÍ{n—€ž n:n$ @” XÙ1„$†~T29CØn¤v¿;ÄZñ‘VJ›¤•ÞšÄÐE²§–‡„|rþ¡Ñ‡@,¢È9à‹CÌqô¯/¤0)¤LKq¾ðÞù‚Ûi7®´l×®Êc) ¢ÒÙl^Ÿ1tbR?7i`—a¿¸}SRx‹7KŠí@Ø­)0/FD6¯~}AÒ&B¤Öq^øØ>¯Ž'!þýˆvÛŸŒ~0³K¿?Gyàc;ýXAÔ Þc\­Ô³`‘fÉ¢b˜ˆ0GkŠõƒ£6!|Lܸ µñ‰HLÒ89a4UXŸ4â$˜I áQ¸ÎKúãÊJüFº Œªûû³ÄÇQ:‡ç¶—‘ýh*=îFýãz ä9# DïÏ¢âmåÚ †=ÙT¼}Áay#ô¿Û»’œI¯ãxŸ óà­7^ê‚l `ZXðùýŠ­¦&)Óþ»Sùõ € òQ=¼¨ÈÈ)2¸‘GÜL êÄÙ¸Ÿ¬kUäpôÝnàzòˆ™Jƒ‚†l\Ù¡î¼1#¦}$ðbóT2 °qYª€Ë0£B¢¹âJ0â}ÜûqËñ/Õ™˜¡páyÂlûÉégÑo×^„ˆ[çR$%ÕĘðt š±Þ°3m˜@¦ÊÉÛ5B¼|F"dW`*e=äe1“T–„:Ö]ö߇#ž‰e¬Œu—•þÎ31¢* Çk=ÞÏÏ:˜‡?V¦ #PÊ(Emq»,×ù®Ž‚‹à½õ‚8úm"-¼a¿{'ÍTšTšº1¥t«¡ïA€IA3lï>ЫsF3Xv ÚQë "d† œ*͈íñ2©‚‡fwF’¹ÿÐHHcRa(†íü¯iT°¤{5††ˆäT¨$éöSÂyp€ø¢s%3€è¨jˆÈí€èW¬V²…:ZP‘ê3€ÐVÁüÓþ³1Ù:ˆæ,Ì?­¬Dæ˜5XÞ;5ÏA¬‚Ý©0€,…UßNŽ‘ÚR˜v`yîf¤x3<7®µêãè  ”÷ϹN>ŸG¤£¶õc#p`æüØØ3Ä“Ý|Ìχ=6f8@L˜0ؼþr™‘’o“àaýz½ÌD•(Á©°õ’±fÚilÍX¥]Ïn5ƒ‡æÀ<Â~~È™’SÆëÕÀÃú³a63ãšÉäŠÅ¨•§%'PÄ~HŒ°|uvhOîkà„ðèªcqK5rˆýjD#”Øûú0(áÚ‘ÅÒ2.ˆ„í”à/›©C—ÙI%QwÜï¯1³,['@$ãêä½ÃLåš8*·r QgàïCbˆ »ñ/Ö‘ÞC…ë­àÁߊfj‹'"8ÖnF@y¨B®Ô…õ@L¨‚ ™<¡¥ôcÕe0³û íî¸5»Ÿ¬G:ŽMv‹ŒõxHÑ<„Qa5öZ¯^n6#Á”ûzFšjn®l¸w_ì˜òN¼P[^ÏÍ3BY‚0£´~j1s–‰¥{« Âò^–F]ie¯AF f…Søè:‚+7CïieÁGp$"áÆËaä¨xÉ ˆû·å-€àá'~ñ_ûp€`øVI¤@Ÿ÷\ø¥ ̯@ Ÿ)¡1€øa…qwŒ0%€þF  P(”ð7ÙØJ@& J%|¢yYC%ÜÞ †J%|A jP ˜t%€~¢„üÇááw_~õ—m+ùÙøc÷eÙ_{7[,W¾@F†M€àá³ 8oh·k„‘]©<¿N'p­[«$[*vc..=æù W\Ûoé3¥ÇÔÔ…‹ƒ‚µi,ÌgpÒ‚ò„Yçþ à33 ÄcnAQÄOFLZ#ŤÒxû,Ë "^'Ù„‰Â‰éÞ%Ú²N1L0ìO*FÐà¡^¸ríDBy'× "”‘2ì:¢àRàJ…8éÜHŸàËæ3xHµFθ’&b‚¾ûц½‡"ÑF ­Ý¸ù±Þѽl†œ™qMøÿ#u僇öÆ5áBFn<¨Dx8Ù—ã!^1‚‡¬Ð ëʬÁGPG2 4Â~<ĈFPz߉C±Þß]ªGðÀÜÔh=­¬*𠺉¡6Ö–}&&¨±ö\ÖǧšÁC{2Ö\®5HPuV7#lǃë #x·"‹|@ÑG:QšG1¢Ò¼³®¤3(-…FX©f8 DÓ0µ¼9£Ë*’  Ö/ºÙL¡•Ê0ÅŒ)æOx0b/Žù{kËEN)è.¬T‰#ý¥bmJl2¬<ó32“tÒ7Ã…ù¨‚šÁÃ{ÛݦåxèWŽ\}ÒR«tÔVj„•hž§Í{/zh¿yrô—0¥8“5¸D *++3Yƒ{1ö]/î0Ö-È×O¥åŒH 2&¤3pˆÌìŒ#HÍ(4Ÿ÷á†ðP¬gö•93!¡´©1~°HuÆ¡ê#Ðz\YD˜a„.>Ÿ Üj´|‚±àTôbÂZ`µ|¯Ó®QpÑ[Ùm€TLôÖ_ãxåˆ!Š‘vW¡õü€[à1‚÷ (Æõü ¯‘›ŸFTŽ6är<øKG†ÕŒIØÄJ9’B0‰ Î¼?BdB-…ækÏB¿kŒžѸìåi#€°·CºÑ+ëŒ:ƒ€J)¨„í” /–„ •¢Ð´¾ð2"N(rÁ„ûÊ1£C£± »žü%>“HF*'Òˆý•…ò™bc›3aîÚ; &DŒñ<ôLZiÆØŠ{€f0I#œ¤3®÷.Ê41ìú/n?¸»Â®{P¨—³Ä€è!@D+¬y÷G 9ðb®dzÐ m9GàâÎXŸ_ïÝî1„‡à#@·zu›G˜ $Ü{õí  [à׿³0Ãù¦Ì$¬”…3(Štô›ö¯Ãqà¡Ù>¸î<Œà¡gø¡‹‹QYÜ_Hª‘æSPpÁdéâ •à·Y34㵎!dJ˜ZÜ/(gð`ï[ „!a¤êÖ‘¸_$ÊŒD°H¼˜\ƒc«ëñ`CÁ;«`÷²°8€„H¼¸nñ^‰FHØŽ‡â‘ýøH& $ (#ÌðC†Râ*èJÌK³ØÁÛ3¤jíÒXu»öœƒE‹#m¼õê›%qr iØ_GšOJRN‡ïJB˜A€UA$¢®ø#:,Ü C~ÆC„îýÞ§ðtpØŽ})ÀÃg<ØK‡ø¡ØŠa?dd3ÞY„¼° }­ÒA@D£‰â3T£±ã²31Á¤®›8#=2¾è–.Šæ{À¹“'¶ÞV:%Ø $ˆp8x-’˜h¾w:ÅøüƒÓòhEò R¬1ÁºÒSo-ÀÅ!]Ë&¬D@Ì  Ja¨ö„RbàAê$Ž <¬ÇƒêCð€ñä Vwu AÚ¸9‡>Y#ºÏûÍsfºÏnâ©H"/ÎòÁ+°`„ï©®ª0\Ü™Q QÊ„‡•ufA@¾û1³z­Áž{…SC#< ¬4‚‡â€f܇ñNñð÷ÙxA„¸v«!BZ?6ÅFø~²†à>9CáÚÛÊ Ö.Ïë!@L‰ i`„ËA€€Ë ˆ ˆ _ ¬1J`0ÂgÁ¡„p9!Œ°¢²ü»$~õ7jÎñ3 çWÖ$‡æ—~ñQÀ&wú¯…˜§Œö{´æHt7bÁNÌJ½9‚##"+,ǃ¼ZG"„‰H:̸î-J—oÊÄÉpCz¥sûL•`¯³ÿ¨¼6ÀÃOçm^”‰²Òð02Ùb!% /ÉYdqp/R8 ¥å4.sY„ÒòÍŒPÎZ FØ^Z–¡²Rçû³À·N;:){#M¸·ÛèL‰ëCÏH„÷Ñ`ý˜++ð0³‰àÒiŒ›Á÷Î?»zeC$Þ»&é––˜Q|@LÊ<…àÛ}sLO3¸²®g„™NññÅ`„TG¡Y<‹N+a$kè#ÅQGxÀê£Ìà¡%J·®5T[H;îÿíOz¢Ý$ìñ1^·"A€¦‘cîŸð î…¤aå8Â#H”t Õð€]è‰BsÛ $h>ÞËÖªD`„ýa$iˆ·3;Ž·Ük—ò5BÁÃÈžS—e´bËåÚNCWW¢Œð€ù¤Þ£’tŠ;áÖsNJúžNžÓ’†à <$§˜¢Õpk«á« Œ0Ó|È"••“ Þ®= }lf°]]‡¬ Àå¡äh>®Üsò¤²àì”ÆT¬£ Öì;+‹2€d¶Jl5,ǃ¼xb@IUª=áÖ^ƒêÉ:.‹û{ :Âl¡Ø|¼—*ý}Œ°óqB3¾MWSžj×2‚y¾wÐ}|@a¢²èäR`„••EA@z…âRü~F‰ %ïìÃ=Ø…^©&úÏAt4¢ƒ0³8‘5„§h.:­Ô1!‚O‚Z˜GØã­! ´å›í5üéÏç‡?üù?ÿò|í¿ÿ„ÿòÃÿý?>öhçâÿóÑ~üþø‡?ýå‡×ñë_ü·ÿï·ùçŽxò~ºóxÿ¬îNÿëÛýþÓïä÷çû·?þÓo}ÍL²oï?ðœ¿õëù€·š~[×WûÉî³é·2·ëwóMuûZ¯éúKJÜõ5-”kík~ßÔoû¶¿Ð¯ÿÖo{X˜¹’ý7Wfýû‰©þÕx×)¦ºý½×´oõšmñÛ}Éý{©ßömùç[¼_÷mÿ‘ÝPÚ1././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000026300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diag0000755000175000017500000000000015115611513033042 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diag0000755000175000017500000000000015115611521033041 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diag0000644000175000017500000001332515115611513033050 0ustar zuulzuul2025-12-08T17:44:43.770585285+00:00 stderr F W1208 17:44:43.769558 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:44:43.770769450+00:00 stderr F Validity period of the certificate for "check-endpoints-signer@1765215883" is unset, resetting to 43800h0m0s! 2025-12-08T17:44:44.328017746+00:00 stderr F I1208 17:44:44.327288 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:44.638919527+00:00 stderr F I1208 17:44:44.638859 1 builder.go:304] check-endpoints version 4.20.0-202510211040.p2.gb0393aa.assembly.stream.el9-b0393aa-b0393aa3e67302d89e91b8f7b1013b6d2e317f04 2025-12-08T17:44:45.432709454+00:00 stderr F I1208 17:44:45.427312 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:45.432709454+00:00 stderr F W1208 17:44:45.427340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:45.432709454+00:00 stderr F W1208 17:44:45.427344 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:45.432709454+00:00 stderr F W1208 17:44:45.427349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:45.432709454+00:00 stderr F W1208 17:44:45.427352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:45.432709454+00:00 stderr F W1208 17:44:45.427355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:45.432709454+00:00 stderr F W1208 17:44:45.427358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:45.436692865+00:00 stderr F I1208 17:44:45.436617 1 secure_serving.go:211] Serving securely on [::]:17698 2025-12-08T17:44:45.443014711+00:00 stderr F I1208 17:44:45.442971 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:45.447570668+00:00 stderr F I1208 17:44:45.447268 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/tmp/serving-cert-720727654/tls.crt::/tmp/serving-cert-720727654/tls.key" 2025-12-08T17:44:45.447570668+00:00 stderr F I1208 17:44:45.447398 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:45.447570668+00:00 stderr F I1208 17:44:45.447432 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:45.447674551+00:00 stderr F I1208 17:44:45.447656 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:45.518896722+00:00 stderr F I1208 17:44:45.518824 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:45.518896722+00:00 stderr F I1208 17:44:45.518839 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:45.520994521+00:00 stderr F I1208 17:44:45.520935 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:45.531870473+00:00 stderr F I1208 17:44:45.531827 1 base_controller.go:76] Waiting for caches to sync for CheckEndpointsTimeToStart 2025-12-08T17:44:45.619843601+00:00 stderr F I1208 17:44:45.619797 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:45.622412752+00:00 stderr F I1208 17:44:45.622393 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:45.622540036+00:00 stderr F I1208 17:44:45.622526 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:45.748865661+00:00 stderr F I1208 17:44:45.748792 1 base_controller.go:82] Caches are synced for CheckEndpointsTimeToStart 2025-12-08T17:44:45.748975794+00:00 stderr F I1208 17:44:45.748960 1 base_controller.go:119] Starting #1 worker of CheckEndpointsTimeToStart controller ... 2025-12-08T17:44:45.749092767+00:00 stderr F I1208 17:44:45.749079 1 base_controller.go:76] Waiting for caches to sync for CheckEndpointsStop 2025-12-08T17:44:45.749122988+00:00 stderr F I1208 17:44:45.749113 1 base_controller.go:82] Caches are synced for CheckEndpointsStop 2025-12-08T17:44:45.749149979+00:00 stderr F I1208 17:44:45.749139 1 base_controller.go:119] Starting #1 worker of CheckEndpointsStop controller ... 2025-12-08T17:44:45.749195720+00:00 stderr F I1208 17:44:45.749185 1 base_controller.go:181] Shutting down CheckEndpointsTimeToStart ... 2025-12-08T17:44:45.749744846+00:00 stderr F I1208 17:44:45.749717 1 base_controller.go:76] Waiting for caches to sync for check-endpoints 2025-12-08T17:44:45.749790197+00:00 stderr F I1208 17:44:45.749780 1 base_controller.go:123] Shutting down worker of CheckEndpointsTimeToStart controller ... 2025-12-08T17:44:45.749825788+00:00 stderr F I1208 17:44:45.749817 1 base_controller.go:113] All CheckEndpointsTimeToStart workers have been terminated 2025-12-08T17:44:45.853287577+00:00 stderr F I1208 17:44:45.852954 1 base_controller.go:82] Caches are synced for check-endpoints 2025-12-08T17:44:45.853358019+00:00 stderr F I1208 17:44:45.853344 1 base_controller.go:119] Starting #1 worker of check-endpoints controller ... ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611514033077 5ustar zuulzuul././@LongLink0000644000000000000000000000032200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000001224515115611514033105 0ustar zuulzuul2025-12-08T17:55:02.557464273+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:55:02.585047165+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/cluster-observability-operator.clusterserviceversion.yaml 2025-12-08T17:55:02.587099741+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_alertmanagerconfigs.yaml 2025-12-08T17:55:02.606350868+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_alertmanagers.yaml 2025-12-08T17:55:02.623517211+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_monitoringstacks.yaml 2025-12-08T17:55:02.625434892+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_podmonitors.yaml 2025-12-08T17:55:02.626746658+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_probes.yaml 2025-12-08T17:55:02.628044542+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_prometheusagents.yaml 2025-12-08T17:55:02.641716940+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_prometheuses.yaml 2025-12-08T17:55:02.673200037+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_prometheusrules.yaml 2025-12-08T17:55:02.674075431+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_scrapeconfigs.yaml 2025-12-08T17:55:02.686108265+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_servicemonitors.yaml 2025-12-08T17:55:02.687995716+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_thanosqueriers.yaml 2025-12-08T17:55:02.690192594+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/monitoring.rhobs_thanosrulers.yaml 2025-12-08T17:55:02.711839587+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/obo-prometheus-operator-admission-webhook_policy_v1_poddisruptionbudget.yaml 2025-12-08T17:55:02.712135345+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/obo-prometheus-operator-admission-webhook_v1_service.yaml 2025-12-08T17:55:02.712691720+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/obo-prometheus-operator_v1_service.yaml 2025-12-08T17:55:02.713012958+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/observability-operator_monitoring.coreos.com_v1_prometheusrule.yaml 2025-12-08T17:55:02.713595305+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/observability-operator_rbac.authorization.k8s.io_v1_rolebinding.yaml 2025-12-08T17:55:02.715100805+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/observability-operator_v1_service.yaml 2025-12-08T17:55:02.715409833+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/observability.openshift.io_observabilityinstallers.yaml 2025-12-08T17:55:02.716489953+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/observability.openshift.io_uiplugins.yaml 2025-12-08T17:55:02.717410577+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/perses.dev_perses.yaml 2025-12-08T17:55:02.722907555+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/perses.dev_persesdashboards.yaml 2025-12-08T17:55:02.723751237+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/perses.dev_persesdatasources.yaml 2025-12-08T17:55:02.725730271+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/perses_v1_serviceaccount.yaml 2025-12-08T17:55:02.725934887+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/persesdashboard-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:02.726184923+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/persesdashboard-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:02.726368068+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/persesdatasource-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:02.726656796+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/manifests/persesdatasource-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:02.726926563+00:00 stderr F time="2025-12-08T17:55:02Z" level=info msg="Reading file" file=/bundle/metadata/annotations.yaml ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000502215115611514033100 0ustar zuulzuul2025-12-08T17:55:01.539348675+00:00 stdout F skipping a dir without errors: / 2025-12-08T17:55:01.539348675+00:00 stdout F skipping a dir without errors: /bundle 2025-12-08T17:55:01.539547230+00:00 stdout F skipping all files in the dir: /dev 2025-12-08T17:55:01.539547230+00:00 stdout F skipping a dir without errors: /etc 2025-12-08T17:55:01.539798467+00:00 stdout F skipping a dir without errors: /manifests 2025-12-08T17:55:01.540457594+00:00 stdout F skipping a dir without errors: /metadata 2025-12-08T17:55:01.540752622+00:00 stdout F skipping all files in the dir: /proc 2025-12-08T17:55:01.540818964+00:00 stdout F skipping a dir without errors: /root 2025-12-08T17:55:01.540868565+00:00 stdout F skipping a dir without errors: /root/buildinfo 2025-12-08T17:55:01.541000488+00:00 stdout F skipping a dir without errors: /run 2025-12-08T17:55:01.541052220+00:00 stdout F skipping a dir without errors: /run/secrets 2025-12-08T17:55:01.541108181+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm 2025-12-08T17:55:01.541154313+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/ca 2025-12-08T17:55:01.541235945+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/syspurpose 2025-12-08T17:55:01.541283656+00:00 stdout F skipping all files in the dir: /sys 2025-12-08T17:55:01.541350298+00:00 stdout F skipping a dir without errors: /tests 2025-12-08T17:55:01.541407749+00:00 stdout F skipping a dir without errors: /tests/scorecard 2025-12-08T17:55:01.541472071+00:00 stdout F skipping a dir without errors: /usr 2025-12-08T17:55:01.541529863+00:00 stdout F skipping a dir without errors: /usr/share 2025-12-08T17:55:01.541587244+00:00 stdout F skipping a dir without errors: /usr/share/buildinfo 2025-12-08T17:55:01.541652986+00:00 stdout F skipping a dir without errors: /util 2025-12-08T17:55:01.541708117+00:00 stdout F skipping a dir without errors: /var 2025-12-08T17:55:01.541753648+00:00 stdout F skipping a dir without errors: /var/run 2025-12-08T17:55:01.541802480+00:00 stdout F skipping a dir without errors: /var/run/secrets 2025-12-08T17:55:01.541851941+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io 2025-12-08T17:55:01.541976414+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount 2025-12-08T17:55:01.542001985+00:00 stdout F skipping a dir without errors: /var/run/secrets/kubernetes.io/serviceaccount/..2025_12_08_17_54_57.3866371985 2025-12-08T17:55:01.542139990+00:00 stdout F &{metadata/annotations.yaml manifests/} ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000010715115611514033077 0ustar zuulzuul2025-12-08T17:54:59.310187945+00:00 stdout F '/bin/cpb' -> '/util/cpb' ././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611513032776 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611521032775 5ustar zuulzuul././@LongLink0000644000000000000000000000033400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000207457715115611521033025 0ustar zuulzuul2025-12-08T17:44:19.648393544+00:00 stderr F I1208 17:44:19.647418 1 start.go:52] Version: 4.20.1 (Raw: 89b561f0, Hash: f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:44:19.650310246+00:00 stderr F I1208 17:44:19.649276 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:19.651413806+00:00 stderr F I1208 17:44:19.651361 1 metrics.go:92] Registering Prometheus metrics 2025-12-08T17:44:19.651522339+00:00 stderr F I1208 17:44:19.651433 1 metrics.go:99] Starting metrics listener on 127.0.0.1:8797 2025-12-08T17:44:19.715780352+00:00 stderr F I1208 17:44:19.715724 1 leaderelection.go:257] attempting to acquire leader lease openshift-machine-config-operator/machine-config... 2025-12-08T17:44:19.725807186+00:00 stderr F I1208 17:44:19.725344 1 leaderelection.go:271] successfully acquired lease openshift-machine-config-operator/machine-config 2025-12-08T17:44:19.749161792+00:00 stderr F I1208 17:44:19.749106 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:19.838014546+00:00 stderr F I1208 17:44:19.836366 1 event.go:377] Event(v1.ObjectReference{Kind:"Node", Namespace:"openshift-machine-config-operator", Name:"crc", UID:"23216ff3-032e-49af-af7e-1d23d5907b59", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:19.838014546+00:00 stderr F I1208 17:44:19.836355 1 featuregates.go:112] FeatureGates initialized: enabled=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks], disabled=[AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:20.105368008+00:00 stderr F I1208 17:44:20.105294 1 operator.go:415] Change observed to kube-apiserver-server-ca 2025-12-08T17:44:20.117219242+00:00 stderr F I1208 17:44:20.117152 1 operator.go:395] Starting MachineConfigOperator 2025-12-08T17:44:20.118280601+00:00 stderr F E1208 17:44:20.118249 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.127081981+00:00 stderr F E1208 17:44:20.124974 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.138900833+00:00 stderr F E1208 17:44:20.138847 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.164006518+00:00 stderr F E1208 17:44:20.163318 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.208476861+00:00 stderr F E1208 17:44:20.207554 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.289193223+00:00 stderr F E1208 17:44:20.288923 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.452499217+00:00 stderr F E1208 17:44:20.452439 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:20.774376067+00:00 stderr F E1208 17:44:20.773770 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:21.417284414+00:00 stderr F E1208 17:44:21.416049 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:21.626201532+00:00 stderr F E1208 17:44:21.621086 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:21.697347073+00:00 stderr F E1208 17:44:21.693098 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:21.834765662+00:00 stderr F E1208 17:44:21.834720 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.537596153+00:00 stderr F E1208 17:44:22.534203 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.644915380+00:00 stderr F E1208 17:44:22.644774 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.662143980+00:00 stderr F E1208 17:44:22.661745 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.708997708+00:00 stderr F E1208 17:44:22.698898 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.708997708+00:00 stderr F E1208 17:44:22.699094 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:22.708997708+00:00 stderr F E1208 17:44:22.701397 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.708997708+00:00 stderr F E1208 17:44:22.707369 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.718597099+00:00 stderr F E1208 17:44:22.718505 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.744365832+00:00 stderr F E1208 17:44:22.739595 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.783253063+00:00 stderr F E1208 17:44:22.781413 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.869400153+00:00 stderr F E1208 17:44:22.863214 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:22.976596327+00:00 stderr F E1208 17:44:22.971973 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.024624637+00:00 stderr F E1208 17:44:23.024388 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.118110377+00:00 stderr F E1208 17:44:23.116286 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.527116113+00:00 stderr F E1208 17:44:23.527067 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.666720562+00:00 stderr F E1208 17:44:23.666681 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.698278142+00:00 stderr F E1208 17:44:23.698232 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.844591493+00:00 stderr F E1208 17:44:23.844445 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.888360317+00:00 stderr F E1208 17:44:23.888314 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:23.980400548+00:00 stderr F E1208 17:44:23.977817 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.019859214+00:00 stderr F E1208 17:44:24.019768 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.019988287+00:00 stderr F E1208 17:44:24.019931 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:24.118153765+00:00 stderr F E1208 17:44:24.118076 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.124203190+00:00 stderr F E1208 17:44:24.123977 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.135428386+00:00 stderr F E1208 17:44:24.135375 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.156933203+00:00 stderr F E1208 17:44:24.156869 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.199504874+00:00 stderr F E1208 17:44:24.199448 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.283025842+00:00 stderr F E1208 17:44:24.282421 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.448589908+00:00 stderr F E1208 17:44:24.446915 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.513297183+00:00 stderr F E1208 17:44:24.513249 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.769536622+00:00 stderr F E1208 17:44:24.768382 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:24.822713753+00:00 stderr F E1208 17:44:24.822243 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.099764340+00:00 stderr F E1208 17:44:25.099714 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.192147360+00:00 stderr F E1208 17:44:25.191482 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.225092209+00:00 stderr F E1208 17:44:25.224203 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.321569920+00:00 stderr F E1208 17:44:25.320751 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.424551109+00:00 stderr F E1208 17:44:25.423418 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.705229325+00:00 stderr F E1208 17:44:25.705174 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.707918169+00:00 stderr F E1208 17:44:25.705316 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:25.749210375+00:00 stderr F E1208 17:44:25.748748 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.754918231+00:00 stderr F E1208 17:44:25.754683 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.767914376+00:00 stderr F E1208 17:44:25.765627 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.787669024+00:00 stderr F E1208 17:44:25.786600 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.829227998+00:00 stderr F E1208 17:44:25.827689 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.903375060+00:00 stderr F E1208 17:44:25.899809 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.907938815+00:00 stderr F E1208 17:44:25.905500 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:25.911090001+00:00 stderr F E1208 17:44:25.910108 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.011856060+00:00 stderr F E1208 17:44:26.011798 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.038608289+00:00 stderr F E1208 17:44:26.038179 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.112053233+00:00 stderr F E1208 17:44:26.111975 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.551662843+00:00 stderr F E1208 17:44:26.551617 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.606931551+00:00 stderr F E1208 17:44:26.603373 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.606931551+00:00 stderr F E1208 17:44:26.603858 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:26.766585416+00:00 stderr F E1208 17:44:26.765293 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.120431348+00:00 stderr F E1208 17:44:27.119084 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.120431348+00:00 stderr F E1208 17:44:27.119215 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:27.178121771+00:00 stderr F E1208 17:44:27.178054 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.184595088+00:00 stderr F E1208 17:44:27.184424 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.195720021+00:00 stderr F E1208 17:44:27.195644 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.196201135+00:00 stderr F E1208 17:44:27.196178 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.201912201+00:00 stderr F E1208 17:44:27.201051 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.220076796+00:00 stderr F E1208 17:44:27.219285 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.237464090+00:00 stderr F E1208 17:44:27.237391 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.253988251+00:00 stderr F E1208 17:44:27.253261 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.254208787+00:00 stderr F E1208 17:44:27.254169 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.280562556+00:00 stderr F E1208 17:44:27.280471 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.381424147+00:00 stderr F E1208 17:44:27.380814 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.552655948+00:00 stderr F E1208 17:44:27.551217 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.560412900+00:00 stderr F E1208 17:44:27.560295 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.918615979+00:00 stderr F E1208 17:44:27.916638 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:27.933783573+00:00 stderr F E1208 17:44:27.933528 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.104548681+00:00 stderr F E1208 17:44:28.104299 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.104548681+00:00 stderr F E1208 17:44:28.104451 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:28.707143598+00:00 stderr F E1208 17:44:28.706615 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.717352117+00:00 stderr F E1208 17:44:28.717310 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.731121072+00:00 stderr F E1208 17:44:28.730636 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.751994612+00:00 stderr F E1208 17:44:28.751944 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.775172664+00:00 stderr F E1208 17:44:28.773260 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.793472063+00:00 stderr F E1208 17:44:28.793343 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.844286649+00:00 stderr F E1208 17:44:28.844198 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:28.958382482+00:00 stderr F E1208 17:44:28.956574 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.075491445+00:00 stderr F E1208 17:44:29.073304 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.089561639+00:00 stderr F E1208 17:44:29.089469 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.114654003+00:00 stderr F E1208 17:44:29.114535 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.122261931+00:00 stderr F E1208 17:44:29.122153 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.138010731+00:00 stderr F E1208 17:44:29.135718 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.145034212+00:00 stderr F E1208 17:44:29.144835 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.166961111+00:00 stderr F E1208 17:44:29.166859 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.220944843+00:00 stderr F E1208 17:44:29.216824 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.220944843+00:00 stderr F E1208 17:44:29.216989 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:29.232939001+00:00 stderr F E1208 17:44:29.232753 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.240024804+00:00 stderr F E1208 17:44:29.239187 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.240024804+00:00 stderr F E1208 17:44:29.239917 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.250966902+00:00 stderr F E1208 17:44:29.250673 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.258557449+00:00 stderr F E1208 17:44:29.258492 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.293895403+00:00 stderr F E1208 17:44:29.291124 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.295933309+00:00 stderr F E1208 17:44:29.294251 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.303971748+00:00 stderr F E1208 17:44:29.303671 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.319076430+00:00 stderr F E1208 17:44:29.315504 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.331934261+00:00 stderr F E1208 17:44:29.331122 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.355920085+00:00 stderr F E1208 17:44:29.355313 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.385420230+00:00 stderr F E1208 17:44:29.384285 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.409244630+00:00 stderr F E1208 17:44:29.409185 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.417984388+00:00 stderr F E1208 17:44:29.417862 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.435253659+00:00 stderr F E1208 17:44:29.435194 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.444544912+00:00 stderr F E1208 17:44:29.444502 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.444777318+00:00 stderr F E1208 17:44:29.444647 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:29.454955656+00:00 stderr F E1208 17:44:29.454709 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.466119201+00:00 stderr F E1208 17:44:29.466067 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.477728548+00:00 stderr F E1208 17:44:29.477463 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.499154442+00:00 stderr F E1208 17:44:29.499083 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.541352223+00:00 stderr F E1208 17:44:29.541304 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.622851386+00:00 stderr F E1208 17:44:29.622747 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:29.784787784+00:00 stderr F E1208 17:44:29.784411 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:30.105808559+00:00 stderr F E1208 17:44:30.105758 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:30.364509477+00:00 stderr F E1208 17:44:30.364174 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:30.593037449+00:00 stderr F E1208 17:44:30.592758 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:30.608272816+00:00 stderr F E1208 17:44:30.605943 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:30.747394060+00:00 stderr F E1208 17:44:30.747047 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:31.593962091+00:00 stderr F E1208 17:44:31.592295 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:32.503211423+00:00 stderr F E1208 17:44:32.503147 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:32.518559692+00:00 stderr F E1208 17:44:32.517069 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:32.596122928+00:00 stderr F E1208 17:44:32.592969 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:32.596122928+00:00 stderr F E1208 17:44:32.593132 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:33.622789212+00:00 stderr F E1208 17:44:33.621825 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:33.628517239+00:00 stderr F E1208 17:44:33.628461 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:33.639309573+00:00 stderr F E1208 17:44:33.639240 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:33.660180212+00:00 stderr F E1208 17:44:33.659992 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:33.701013296+00:00 stderr F E1208 17:44:33.700958 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:33.782248752+00:00 stderr F E1208 17:44:33.782201 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:33.944943350+00:00 stderr F E1208 17:44:33.944590 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:34.033126835+00:00 stderr F E1208 17:44:34.033068 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:34.266231103+00:00 stderr F E1208 17:44:34.266183 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:34.658304018+00:00 stderr F E1208 17:44:34.658160 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:34.690655940+00:00 stderr F E1208 17:44:34.690246 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:34.759264511+00:00 stderr F E1208 17:44:34.758814 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:35.223460904+00:00 stderr F E1208 17:44:35.223393 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:35.547103792+00:00 stderr F E1208 17:44:35.547047 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:35.618003576+00:00 stderr F E1208 17:44:35.617936 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:37.368288602+00:00 stderr F E1208 17:44:37.367272 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:37.368288602+00:00 stderr F E1208 17:44:37.368010 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:39.280239761+00:00 stderr F E1208 17:44:39.280185 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.286713381+00:00 stderr F E1208 17:44:39.286654 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.297593854+00:00 stderr F E1208 17:44:39.297563 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.318483296+00:00 stderr F E1208 17:44:39.318422 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.359457115+00:00 stderr F E1208 17:44:39.359398 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.440258074+00:00 stderr F E1208 17:44:39.440195 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.573262485+00:00 stderr F E1208 17:44:39.573074 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.601949113+00:00 stderr F E1208 17:44:39.601301 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.733031051+00:00 stderr F E1208 17:44:39.732976 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.751191145+00:00 stderr F E1208 17:44:39.751133 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.780449030+00:00 stderr F E1208 17:44:39.780396 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:39.811253027+00:00 stderr F E1208 17:44:39.811212 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.065380458+00:00 stderr F E1208 17:44:40.065333 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.068794413+00:00 stderr F E1208 17:44:40.068759 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.073996498+00:00 stderr F E1208 17:44:40.073956 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.078887284+00:00 stderr F E1208 17:44:40.078826 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.079029977+00:00 stderr F E1208 17:44:40.079006 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:40.242670721+00:00 stderr F E1208 17:44:40.242628 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.248413881+00:00 stderr F E1208 17:44:40.248372 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.259391257+00:00 stderr F E1208 17:44:40.259330 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.280660828+00:00 stderr F E1208 17:44:40.280590 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.321655389+00:00 stderr F E1208 17:44:40.321588 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.390765232+00:00 stderr F E1208 17:44:40.390420 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.403505106+00:00 stderr F E1208 17:44:40.402586 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.614265890+00:00 stderr F E1208 17:44:40.614118 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.675806793+00:00 stderr F E1208 17:44:40.675758 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.725385983+00:00 stderr F E1208 17:44:40.724376 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.735238097+00:00 stderr F E1208 17:44:40.735182 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:40.993695569+00:00 stderr F E1208 17:44:40.993633 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:42.393802197+00:00 stderr F E1208 17:44:42.393108 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.105988253+00:00 stderr F E1208 17:44:43.105906 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.238956142+00:00 stderr F E1208 17:44:43.238860 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.285814226+00:00 stderr F E1208 17:44:43.285780 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.286061473+00:00 stderr F E1208 17:44:43.286045 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:43.360498735+00:00 stderr F E1208 17:44:43.359802 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.365708890+00:00 stderr F E1208 17:44:43.365683 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.376615843+00:00 stderr F E1208 17:44:43.376557 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.397542695+00:00 stderr F E1208 17:44:43.397489 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.442270450+00:00 stderr F E1208 17:44:43.442219 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.524072516+00:00 stderr F E1208 17:44:43.523666 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.684829639+00:00 stderr F E1208 17:44:43.684759 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:43.793750200+00:00 stderr F E1208 17:44:43.792426 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:44.006831828+00:00 stderr F E1208 17:44:44.006507 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:44.359924553+00:00 stderr F E1208 17:44:44.355799 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:44.539195322+00:00 stderr F E1208 17:44:44.539140 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:45.196206353+00:00 stderr F E1208 17:44:45.193701 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:45.289799557+00:00 stderr F E1208 17:44:45.289746 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:46.593486353+00:00 stderr F E1208 17:44:46.592768 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:47.997181040+00:00 stderr F E1208 17:44:47.992690 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:49.397274798+00:00 stderr F E1208 17:44:49.396704 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:49.397274798+00:00 stderr F E1208 17:44:49.396866 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:44:50.791007799+00:00 stderr F E1208 17:44:50.790957 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:50.797052627+00:00 stderr F E1208 17:44:50.796801 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:50.809231986+00:00 stderr F E1208 17:44:50.809127 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:50.830041975+00:00 stderr F E1208 17:44:50.829987 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:50.870988355+00:00 stderr F E1208 17:44:50.870934 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:50.952392260+00:00 stderr F E1208 17:44:50.952307 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:51.084029473+00:00 stderr F E1208 17:44:51.083921 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:51.125545828+00:00 stderr F E1208 17:44:51.125500 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:51.767222182+00:00 stderr F E1208 17:44:51.767159 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:52.194332896+00:00 stderr F E1208 17:44:52.192330 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:53.049653035+00:00 stderr F E1208 17:44:53.049189 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:53.594820945+00:00 stderr F E1208 17:44:53.592939 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:44:58.174309779+00:00 stderr F E1208 17:44:58.173686 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:00.191650471+00:00 stderr F E1208 17:45:00.191370 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.042445985+00:00 stderr F E1208 17:45:01.042022 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.079223798+00:00 stderr F E1208 17:45:01.079094 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.079260559+00:00 stderr F E1208 17:45:01.079225 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:45:01.084191425+00:00 stderr F E1208 17:45:01.084148 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.088928288+00:00 stderr F E1208 17:45:01.088894 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.090558893+00:00 stderr F E1208 17:45:01.090537 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.111379582+00:00 stderr F E1208 17:45:01.111317 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.152596259+00:00 stderr F E1208 17:45:01.152513 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.233558672+00:00 stderr F E1208 17:45:01.233385 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.394406697+00:00 stderr F E1208 17:45:01.394327 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.417033927+00:00 stderr F E1208 17:45:01.416264 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:01.715361268+00:00 stderr F E1208 17:45:01.715300 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:02.997871154+00:00 stderr F E1208 17:45:02.997481 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:05.509625273+00:00 stderr F E1208 17:45:05.509115 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:05.559505701+00:00 stderr F E1208 17:45:05.559432 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:06.445825483+00:00 stderr F E1208 17:45:06.444758 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:06.960067682+00:00 stderr F E1208 17:45:06.958493 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:08.549409775+00:00 stderr F E1208 17:45:08.549010 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:11.929412584+00:00 stderr F E1208 17:45:11.927994 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:11.929412584+00:00 stderr F E1208 17:45:11.928487 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:45:13.081948203+00:00 stderr F E1208 17:45:13.081898 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.088621239+00:00 stderr F E1208 17:45:13.088578 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.099670247+00:00 stderr F E1208 17:45:13.099623 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.120908647+00:00 stderr F E1208 17:45:13.120449 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.161514747+00:00 stderr F E1208 17:45:13.161460 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.242389418+00:00 stderr F E1208 17:45:13.242339 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.403640484+00:00 stderr F E1208 17:45:13.403601 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:13.724988126+00:00 stderr F E1208 17:45:13.724935 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:14.213088297+00:00 stderr F E1208 17:45:14.211756 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:14.365734474+00:00 stderr F E1208 17:45:14.365676 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:14.391593344+00:00 stderr F E1208 17:45:14.391550 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:16.021433985+00:00 stderr F E1208 17:45:16.020987 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:16.040678659+00:00 stderr F E1208 17:45:16.040611 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:16.927567027+00:00 stderr F E1208 17:45:16.927523 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:16.993046489+00:00 stderr F E1208 17:45:16.991978 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:19.463729786+00:00 stderr F E1208 17:45:19.463680 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:19.463915031+00:00 stderr F E1208 17:45:19.463866 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:45:22.033023416+00:00 stderr F E1208 17:45:22.032563 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.039534178+00:00 stderr F E1208 17:45:22.039473 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.053357543+00:00 stderr F E1208 17:45:22.052977 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.074173521+00:00 stderr F E1208 17:45:22.074125 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.117019433+00:00 stderr F E1208 17:45:22.116142 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.131639451+00:00 stderr F E1208 17:45:22.131428 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.196058503+00:00 stderr F E1208 17:45:22.195713 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.197325428+00:00 stderr F E1208 17:45:22.197252 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.239950624+00:00 stderr F E1208 17:45:22.239911 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.273827687+00:00 stderr F E1208 17:45:22.273515 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.704184232+00:00 stderr F E1208 17:45:22.703403 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:22.839893857+00:00 stderr F E1208 17:45:22.839486 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:32.345117726+00:00 stderr F E1208 17:45:32.335171 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:32.448742880+00:00 stderr F E1208 17:45:32.448682 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:33.081905357+00:00 stderr F E1208 17:45:33.081766 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.393718011+00:00 stderr F E1208 17:45:49.391501 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.393718011+00:00 stderr F E1208 17:45:49.392208 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:45:49.467998651+00:00 stderr F E1208 17:45:49.467955 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.478056813+00:00 stderr F E1208 17:45:49.476428 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.489449734+00:00 stderr F E1208 17:45:49.489405 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.513473866+00:00 stderr F E1208 17:45:49.513420 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.540019273+00:00 stderr F E1208 17:45:49.539968 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.554892429+00:00 stderr F E1208 17:45:49.554810 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.570616761+00:00 stderr F E1208 17:45:49.570554 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.596791217+00:00 stderr F E1208 17:45:49.595283 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.628985453+00:00 stderr F E1208 17:45:49.627625 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.674330234+00:00 stderr F E1208 17:45:49.673901 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.686864170+00:00 stderr F E1208 17:45:49.685659 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:49.716479190+00:00 stderr F E1208 17:45:49.716427 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:45:59.958163572+00:00 stderr F E1208 17:45:59.957659 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:19.737620653+00:00 stderr F E1208 17:46:19.736977 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-config-operator/leases/machine-config": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:19.739002355+00:00 stderr F E1208 17:46:19.738857 1 leaderelection.go:436] error retrieving resource lock openshift-machine-config-operator/machine-config: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-config-operator/leases/machine-config": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:20.440439278+00:00 stderr F E1208 17:46:20.440322 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:57.271449203+00:00 stderr F E1208 17:46:57.271002 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:57.271903677+00:00 stderr F E1208 17:46:57.271830 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:57.271963869+00:00 stderr F E1208 17:46:57.271935 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:46:58.162191673+00:00 stderr F E1208 17:46:58.162098 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.168066898+00:00 stderr F E1208 17:46:58.167989 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.179433275+00:00 stderr F E1208 17:46:58.179343 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.201163360+00:00 stderr F E1208 17:46:58.201113 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.242973236+00:00 stderr F E1208 17:46:58.242917 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.324383439+00:00 stderr F E1208 17:46:58.324287 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.486128510+00:00 stderr F E1208 17:46:58.486066 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:58.807715004+00:00 stderr F E1208 17:46:58.807632 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:59.450084435+00:00 stderr F E1208 17:46:59.450008 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:59.927809784+00:00 stderr F E1208 17:46:59.927435 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:46:59.928694411+00:00 stderr F E1208 17:46:59.928660 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:00.055758761+00:00 stderr F E1208 17:47:00.055665 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:00.732064841+00:00 stderr F E1208 17:47:00.731678 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:01.471962452+00:00 stderr F E1208 17:47:01.471859 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:03.746538433+00:00 stderr F E1208 17:47:03.746076 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:03.747070750+00:00 stderr F E1208 17:47:03.747018 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:03.747169893+00:00 stderr F E1208 17:47:03.747124 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:47:04.939098393+00:00 stderr F E1208 17:47:04.939052 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:04.946261969+00:00 stderr F E1208 17:47:04.946216 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:04.958134823+00:00 stderr F E1208 17:47:04.958087 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:04.979458634+00:00 stderr F E1208 17:47:04.979428 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:05.020654741+00:00 stderr F E1208 17:47:05.020590 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:05.102000102+00:00 stderr F E1208 17:47:05.101908 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:05.263197696+00:00 stderr F E1208 17:47:05.263143 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:05.584575623+00:00 stderr F E1208 17:47:05.584508 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.226579682+00:00 stderr F E1208 17:47:06.226473 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.592139339+00:00 stderr F E1208 17:47:06.592089 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.702010909+00:00 stderr F I1208 17:47:06.701937 1 operator.go:415] Change observed to kube-apiserver-server-ca 2025-12-08T17:47:06.702062661+00:00 stderr F E1208 17:47:06.702006 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.702505124+00:00 stderr F E1208 17:47:06.702474 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.702990089+00:00 stderr F E1208 17:47:06.702946 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.703317480+00:00 stderr F E1208 17:47:06.703281 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.704257819+00:00 stderr F E1208 17:47:06.703631 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.704257819+00:00 stderr F E1208 17:47:06.704028 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.704257819+00:00 stderr F E1208 17:47:06.704106 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:47:06.709463803+00:00 stderr F E1208 17:47:06.709413 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.715433951+00:00 stderr F E1208 17:47:06.715347 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.726181839+00:00 stderr F E1208 17:47:06.726124 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.747173451+00:00 stderr F E1208 17:47:06.747112 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.788170370+00:00 stderr F E1208 17:47:06.788113 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:06.869318985+00:00 stderr F E1208 17:47:06.869238 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:07.030938303+00:00 stderr F E1208 17:47:07.030215 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:07.047431552+00:00 stderr F E1208 17:47:07.047386 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:07.352498686+00:00 stderr F E1208 17:47:07.352437 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:08.045858912+00:00 stderr F E1208 17:47:08.045784 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:08.633599803+00:00 stderr F E1208 17:47:08.633522 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:09.689024168+00:00 stderr F E1208 17:47:09.688964 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:09.848629762+00:00 stderr F E1208 17:47:09.848345 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:11.103970239+00:00 stderr F E1208 17:47:11.103932 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:11.509696531+00:00 stderr F E1208 17:47:11.509658 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.068578454+00:00 stderr F E1208 17:47:12.068496 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.068824602+00:00 stderr F E1208 17:47:12.068776 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:47:12.070496685+00:00 stderr F E1208 17:47:12.070456 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.077186715+00:00 stderr F E1208 17:47:12.077128 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.088496351+00:00 stderr F E1208 17:47:12.088439 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.110281197+00:00 stderr F E1208 17:47:12.110191 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.151599348+00:00 stderr F E1208 17:47:12.151533 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.233932089+00:00 stderr F E1208 17:47:12.233269 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.258862394+00:00 stderr F E1208 17:47:12.257855 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.351290584+00:00 stderr F E1208 17:47:12.351225 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.395239237+00:00 stderr F E1208 17:47:12.395176 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.422801785+00:00 stderr F E1208 17:47:12.422735 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.452555251+00:00 stderr F E1208 17:47:12.452497 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:12.847279157+00:00 stderr F E1208 17:47:12.847196 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:13.383059793+00:00 stderr F E1208 17:47:13.382967 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:13.676294163+00:00 stderr F E1208 17:47:13.676196 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:14.970428262+00:00 stderr F E1208 17:47:14.970384 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:16.675441774+00:00 stderr F E1208 17:47:16.674619 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:16.675723413+00:00 stderr F E1208 17:47:16.675702 1 operator.go:467] "Unhandled Error" err="failed to merge global pull secret: do**********ig bytes contain JSON null" 2025-12-08T17:47:17.763481354+00:00 stderr F E1208 17:47:17.763445 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:17.770922029+00:00 stderr F E1208 17:47:17.769698 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:17.781092688+00:00 stderr F E1208 17:47:17.781040 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:17.802223374+00:00 stderr F E1208 17:47:17.802183 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:17.843318008+00:00 stderr F E1208 17:47:17.843250 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:17.925577217+00:00 stderr F E1208 17:47:17.925472 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:18.087197605+00:00 stderr F E1208 17:47:18.087112 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:18.408913273+00:00 stderr F E1208 17:47:18.408822 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:19.050924412+00:00 stderr F E1208 17:47:19.050831 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:20.333233249+00:00 stderr F E1208 17:47:20.333132 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:22.895808216+00:00 stderr F E1208 17:47:22.895394 1 sync.go:528] Merging registry secrets failed with: failed to merge global pull secret: do**********ig bytes contain JSON null 2025-12-08T17:47:43.918263052+00:00 stderr F E1208 17:47:43.918147 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:44.920143350+00:00 stderr F E1208 17:47:44.918312 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:45.918320121+00:00 stderr F E1208 17:47:45.918015 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:46.918237337+00:00 stderr F E1208 17:47:46.917795 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:47.917739521+00:00 stderr F E1208 17:47:47.917619 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:48.917647177+00:00 stderr F E1208 17:47:48.917565 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:49.918122192+00:00 stderr F E1208 17:47:49.918009 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:50.917927804+00:00 stderr F E1208 17:47:50.917446 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:51.918585194+00:00 stderr F E1208 17:47:51.918460 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:52.918531655+00:00 stderr F E1208 17:47:52.918381 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:53.933782643+00:00 stderr F E1208 17:47:53.933661 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:54.918274059+00:00 stderr F E1208 17:47:54.918184 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:55.918227163+00:00 stderr F E1208 17:47:55.918129 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:56.918344352+00:00 stderr F E1208 17:47:56.918262 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:57.918125061+00:00 stderr F E1208 17:47:57.918043 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:58.921783007+00:00 stderr F E1208 17:47:58.921279 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:47:59.918225805+00:00 stderr F E1208 17:47:59.918115 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:00.917943732+00:00 stderr F E1208 17:48:00.917835 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:01.918053821+00:00 stderr F E1208 17:48:01.917959 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:02.917863711+00:00 stderr F E1208 17:48:02.917769 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:03.917714912+00:00 stderr F E1208 17:48:03.917634 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:04.919020507+00:00 stderr F E1208 17:48:04.918286 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:05.919282540+00:00 stderr F E1208 17:48:05.918549 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:06.917673178+00:00 stderr F E1208 17:48:06.917576 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:07.918227890+00:00 stderr F E1208 17:48:07.918073 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:08.918713130+00:00 stderr F E1208 17:48:08.918201 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:09.920521740+00:00 stderr F E1208 17:48:09.918379 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:10.919045721+00:00 stderr F E1208 17:48:10.918438 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:11.917714376+00:00 stderr F E1208 17:48:11.917659 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:12.918216258+00:00 stderr F E1208 17:48:12.918164 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:13.919219883+00:00 stderr F E1208 17:48:13.918202 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:14.917671061+00:00 stderr F E1208 17:48:14.917607 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:15.919127921+00:00 stderr F E1208 17:48:15.918593 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:16.917924800+00:00 stderr F E1208 17:48:16.917845 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:17.918232835+00:00 stderr F E1208 17:48:17.918163 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:18.919131578+00:00 stderr F E1208 17:48:18.918134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:19.918351460+00:00 stderr F E1208 17:48:19.917758 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:20.918556271+00:00 stderr F E1208 17:48:20.917888 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:21.917734352+00:00 stderr F E1208 17:48:21.917650 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:22.918365747+00:00 stderr F E1208 17:48:22.917852 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:23.918096695+00:00 stderr F E1208 17:48:23.917808 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:24.918054389+00:00 stderr F E1208 17:48:24.917994 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:25.919948212+00:00 stderr F E1208 17:48:25.918253 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:26.918918996+00:00 stderr F E1208 17:48:26.918298 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:27.919007824+00:00 stderr F E1208 17:48:27.918217 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:28.920892167+00:00 stderr F E1208 17:48:28.919603 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:29.918017766+00:00 stderr F E1208 17:48:29.917965 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:30.918007531+00:00 stderr F E1208 17:48:30.917912 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:31.918297146+00:00 stderr F E1208 17:48:31.918170 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:32.918564129+00:00 stderr F E1208 17:48:32.918472 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:33.918268676+00:00 stderr F E1208 17:48:33.918192 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:34.918641013+00:00 stderr F E1208 17:48:34.918193 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:35.918606727+00:00 stderr F E1208 17:48:35.918251 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:36.918317084+00:00 stderr F E1208 17:48:36.918255 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:37.917646519+00:00 stderr F E1208 17:48:37.917582 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:38.918298955+00:00 stderr F E1208 17:48:38.918207 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:39.919196257+00:00 stderr F E1208 17:48:39.918579 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:40.917582004+00:00 stderr F E1208 17:48:40.917522 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:41.918559520+00:00 stderr F E1208 17:48:41.918481 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:42.919548034+00:00 stderr F E1208 17:48:42.918614 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:43.917762645+00:00 stderr F E1208 17:48:43.917709 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:44.917937556+00:00 stderr F E1208 17:48:44.917811 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:45.918136418+00:00 stderr F E1208 17:48:45.918082 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:46.918413462+00:00 stderr F E1208 17:48:46.918345 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:47.918673884+00:00 stderr F E1208 17:48:47.918582 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:48.918595017+00:00 stderr F E1208 17:48:48.918523 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:49.918260942+00:00 stderr F E1208 17:48:49.918199 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:50.918411681+00:00 stderr F E1208 17:48:50.918337 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:51.918825289+00:00 stderr F E1208 17:48:51.918523 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:52.918764451+00:00 stderr F E1208 17:48:52.918399 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:53.918122128+00:00 stderr F E1208 17:48:53.918006 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:54.918602477+00:00 stderr F E1208 17:48:54.918521 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:55.918419307+00:00 stderr F E1208 17:48:55.918040 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:56.917909873+00:00 stderr F E1208 17:48:56.917794 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:57.918037348+00:00 stderr F E1208 17:48:57.917967 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:58.917843843+00:00 stderr F E1208 17:48:58.917758 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:48:59.918743027+00:00 stderr F E1208 17:48:59.918301 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:00.918860082+00:00 stderr F E1208 17:49:00.917735 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:01.918040633+00:00 stderr F E1208 17:49:01.917971 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:02.918237391+00:00 stderr F E1208 17:49:02.918140 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:03.917981073+00:00 stderr F E1208 17:49:03.917818 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:04.917679702+00:00 stderr F E1208 17:49:04.917621 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:05.918061988+00:00 stderr F E1208 17:49:05.917986 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:06.918456223+00:00 stderr F E1208 17:49:06.918342 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:07.918436743+00:00 stderr F E1208 17:49:07.918330 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:08.918091750+00:00 stderr F E1208 17:49:08.917993 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:09.918332881+00:00 stderr F E1208 17:49:09.917864 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:10.919057258+00:00 stderr F E1208 17:49:10.918566 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:11.918655604+00:00 stderr F E1208 17:49:11.918589 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:12.917596925+00:00 stderr F E1208 17:49:12.917529 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:13.918350604+00:00 stderr F E1208 17:49:13.918240 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:14.917603987+00:00 stderr F E1208 17:49:14.917533 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:15.918246113+00:00 stderr F E1208 17:49:15.918187 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:16.917733726+00:00 stderr F E1208 17:49:16.917606 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:17.918222566+00:00 stderr F E1208 17:49:17.918132 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:18.918358172+00:00 stderr F E1208 17:49:18.918260 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:19.917697610+00:00 stderr F E1208 17:49:19.917629 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:20.918705447+00:00 stderr F E1208 17:49:20.918350 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:21.918719668+00:00 stderr F E1208 17:49:21.918211 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:22.918507692+00:00 stderr F E1208 17:49:22.918408 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:23.918335996+00:00 stderr F E1208 17:49:23.918227 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:24.917977324+00:00 stderr F E1208 17:49:24.917869 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:25.918539956+00:00 stderr F E1208 17:49:25.918473 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:26.918213245+00:00 stderr F E1208 17:49:26.918156 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:27.918722655+00:00 stderr F E1208 17:49:27.918360 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:28.919496054+00:00 stderr F E1208 17:49:28.919203 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:29.917723930+00:00 stderr F E1208 17:49:29.917577 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:30.917702550+00:00 stderr F E1208 17:49:30.917598 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:31.917931230+00:00 stderr F E1208 17:49:31.917839 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:32.918121558+00:00 stderr F E1208 17:49:32.918028 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:33.917693042+00:00 stderr F E1208 17:49:33.917592 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:34.917803727+00:00 stderr F E1208 17:49:34.917699 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:35.917793417+00:00 stderr F E1208 17:49:35.917652 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:36.917947794+00:00 stderr F E1208 17:49:36.917801 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:37.917994536+00:00 stderr F E1208 17:49:37.917657 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:38.919198402+00:00 stderr F E1208 17:49:38.918313 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:39.918394942+00:00 stderr F E1208 17:49:39.918327 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:40.918295539+00:00 stderr F E1208 17:49:40.918234 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:41.918487787+00:00 stderr F E1208 17:49:41.918378 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:42.918110254+00:00 stderr F E1208 17:49:42.917515 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:43.917619007+00:00 stderr F E1208 17:49:43.917540 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:44.918589944+00:00 stderr F E1208 17:49:44.918521 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:45.918243412+00:00 stderr F E1208 17:49:45.918175 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:46.918277965+00:00 stderr F E1208 17:49:46.918211 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:47.918265546+00:00 stderr F E1208 17:49:47.917560 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:48.917930915+00:00 stderr F E1208 17:49:48.917792 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:49.917641225+00:00 stderr F E1208 17:49:49.917502 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:50.918319331+00:00 stderr F E1208 17:49:50.918266 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:51.918384201+00:00 stderr F E1208 17:49:51.918302 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:52.918380814+00:00 stderr F E1208 17:49:52.917957 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:53.918820751+00:00 stderr F E1208 17:49:53.918375 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:54.917708182+00:00 stderr F E1208 17:49:54.917651 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:55.917863550+00:00 stderr F E1208 17:49:55.917788 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:56.917930326+00:00 stderr F E1208 17:49:56.917844 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:57.918257749+00:00 stderr F E1208 17:49:57.918152 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:58.918306823+00:00 stderr F E1208 17:49:58.918228 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:49:59.917851034+00:00 stderr F E1208 17:49:59.917767 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:00.918173787+00:00 stderr F E1208 17:50:00.917850 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:01.918228068+00:00 stderr F E1208 17:50:01.918162 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:02.918399229+00:00 stderr F E1208 17:50:02.918314 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:03.918418377+00:00 stderr F E1208 17:50:03.918337 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:04.919166920+00:00 stderr F E1208 17:50:04.918583 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:05.917835438+00:00 stderr F E1208 17:50:05.917757 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:06.917699762+00:00 stderr F E1208 17:50:06.917627 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:07.917720189+00:00 stderr F E1208 17:50:07.917634 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:08.917843369+00:00 stderr F E1208 17:50:08.917756 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:09.919462991+00:00 stderr F E1208 17:50:09.918594 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:10.918422085+00:00 stderr F E1208 17:50:10.918356 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:11.917956043+00:00 stderr F E1208 17:50:11.917824 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:12.918375709+00:00 stderr F E1208 17:50:12.918311 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:13.918300495+00:00 stderr F E1208 17:50:13.918183 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:14.917643288+00:00 stderr F E1208 17:50:14.917558 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:15.918262208+00:00 stderr F E1208 17:50:15.917696 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:16.918944091+00:00 stderr F E1208 17:50:16.918516 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:17.918326635+00:00 stderr F E1208 17:50:17.918267 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:18.918558668+00:00 stderr F E1208 17:50:18.918455 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:19.918130826+00:00 stderr F E1208 17:50:19.918049 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:20.918557643+00:00 stderr F E1208 17:50:20.918442 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:21.918285584+00:00 stderr F E1208 17:50:21.918221 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:22.918541827+00:00 stderr F E1208 17:50:22.918461 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:23.919112726+00:00 stderr F E1208 17:50:23.918399 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:24.917592450+00:00 stderr F E1208 17:50:24.917523 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:25.918521296+00:00 stderr F E1208 17:50:25.918213 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:26.918227927+00:00 stderr F E1208 17:50:26.918157 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:27.919119313+00:00 stderr F E1208 17:50:27.918387 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:28.918299313+00:00 stderr F E1208 17:50:28.918218 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:29.918986175+00:00 stderr F E1208 17:50:29.918173 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:30.918485461+00:00 stderr F E1208 17:50:30.918042 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:31.919225884+00:00 stderr F E1208 17:50:31.918098 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:32.918362072+00:00 stderr F E1208 17:50:32.918269 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:33.918554833+00:00 stderr F E1208 17:50:33.918430 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:34.917867805+00:00 stderr F E1208 17:50:34.917459 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:35.918127268+00:00 stderr F E1208 17:50:35.917713 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:36.918013593+00:00 stderr F E1208 17:50:36.917925 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:37.918685106+00:00 stderr F E1208 17:50:37.917798 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:38.918477779+00:00 stderr F E1208 17:50:38.918361 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:39.918223200+00:00 stderr F E1208 17:50:39.918153 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:40.918500513+00:00 stderr F E1208 17:50:40.918141 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:41.918747717+00:00 stderr F E1208 17:50:41.918415 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:42.918251142+00:00 stderr F E1208 17:50:42.918205 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:43.918631778+00:00 stderr F E1208 17:50:43.918337 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:44.919114306+00:00 stderr F E1208 17:50:44.918641 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:45.917905657+00:00 stderr F E1208 17:50:45.917800 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:46.918278722+00:00 stderr F E1208 17:50:46.918228 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:47.918495973+00:00 stderr F E1208 17:50:47.917808 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:48.918218034+00:00 stderr F E1208 17:50:48.918167 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:49.917907935+00:00 stderr F E1208 17:50:49.917840 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:50.917900372+00:00 stderr F E1208 17:50:50.917785 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:51.918244227+00:00 stderr F E1208 17:50:51.918160 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:52.918442639+00:00 stderr F E1208 17:50:52.918143 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:53.918622690+00:00 stderr F E1208 17:50:53.918253 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:54.918579667+00:00 stderr F E1208 17:50:54.918511 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:55.918541242+00:00 stderr F E1208 17:50:55.918452 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:56.917811134+00:00 stderr F E1208 17:50:56.917572 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:57.917720749+00:00 stderr F E1208 17:50:57.917634 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:58.918012823+00:00 stderr F E1208 17:50:58.917953 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:50:59.918427849+00:00 stderr F E1208 17:50:59.918371 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:00.918482219+00:00 stderr F E1208 17:51:00.918405 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:01.918430345+00:00 stderr F E1208 17:51:01.918375 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:02.918768063+00:00 stderr F E1208 17:51:02.918153 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:03.918281877+00:00 stderr F E1208 17:51:03.918182 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:04.918041748+00:00 stderr F E1208 17:51:04.917460 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:05.918371195+00:00 stderr F E1208 17:51:05.918308 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:06.917572190+00:00 stderr F E1208 17:51:06.917504 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:07.918070543+00:00 stderr F E1208 17:51:07.918004 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:08.918540416+00:00 stderr F E1208 17:51:08.917839 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:09.918211215+00:00 stderr F E1208 17:51:09.918108 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:10.918339882+00:00 stderr F E1208 17:51:10.918285 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:11.918426758+00:00 stderr F E1208 17:51:11.918341 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:12.917967646+00:00 stderr F E1208 17:51:12.917563 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:13.918568450+00:00 stderr F E1208 17:51:13.918217 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:14.918205089+00:00 stderr F E1208 17:51:14.918120 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:15.918496309+00:00 stderr F E1208 17:51:15.918421 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:16.918532194+00:00 stderr F E1208 17:51:16.918432 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:17.918682221+00:00 stderr F E1208 17:51:17.918582 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:18.918490023+00:00 stderr F E1208 17:51:18.918380 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:19.918334935+00:00 stderr F E1208 17:51:19.918245 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:20.918939260+00:00 stderr F E1208 17:51:20.918344 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:21.918490737+00:00 stderr F E1208 17:51:21.918421 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:22.918536423+00:00 stderr F E1208 17:51:22.918461 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:23.918457526+00:00 stderr F E1208 17:51:23.918379 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:24.918361610+00:00 stderr F E1208 17:51:24.917591 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:25.917925418+00:00 stderr F E1208 17:51:25.917622 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:26.918834357+00:00 stderr F E1208 17:51:26.918567 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:27.918428575+00:00 stderr F E1208 17:51:27.918079 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:28.917634918+00:00 stderr F E1208 17:51:28.917531 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:29.918237712+00:00 stderr F E1208 17:51:29.918126 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:30.918491311+00:00 stderr F E1208 17:51:30.918390 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:31.918148830+00:00 stderr F E1208 17:51:31.918034 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:32.917956732+00:00 stderr F E1208 17:51:32.917834 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:33.918126759+00:00 stderr F E1208 17:51:33.918002 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:34.918732074+00:00 stderr F E1208 17:51:34.917727 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:35.917908806+00:00 stderr F E1208 17:51:35.917745 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:36.917965911+00:00 stderr F E1208 17:51:36.917855 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:37.918442854+00:00 stderr F E1208 17:51:37.918353 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:38.918314647+00:00 stderr F E1208 17:51:38.918203 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:39.918553295+00:00 stderr F E1208 17:51:39.917651 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:40.917753816+00:00 stderr F E1208 17:51:40.917699 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:41.917989265+00:00 stderr F E1208 17:51:41.917903 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:42.919293201+00:00 stderr F E1208 17:51:42.918731 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:43.917762421+00:00 stderr F E1208 17:51:43.917714 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:44.918795623+00:00 stderr F E1208 17:51:44.918238 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:45.918238468+00:00 stderr F E1208 17:51:45.918173 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:46.918167242+00:00 stderr F E1208 17:51:46.918092 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:47.918522632+00:00 stderr F E1208 17:51:47.918471 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:48.918494336+00:00 stderr F E1208 17:51:48.918414 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:49.918580012+00:00 stderr F E1208 17:51:49.918511 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:50.917663127+00:00 stderr F E1208 17:51:50.917604 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:51.918906918+00:00 stderr F E1208 17:51:51.918797 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:52.917829946+00:00 stderr F E1208 17:51:52.917682 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:53.918718908+00:00 stderr F E1208 17:51:53.918258 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:54.918344505+00:00 stderr F E1208 17:51:54.917857 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:55.918106196+00:00 stderr F E1208 17:51:55.918003 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:56.918659638+00:00 stderr F E1208 17:51:56.918566 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:57.918147341+00:00 stderr F E1208 17:51:57.918063 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:58.918259041+00:00 stderr F E1208 17:51:58.918121 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:51:59.918538236+00:00 stderr F E1208 17:51:59.918457 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:00.918382689+00:00 stderr F E1208 17:52:00.918334 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:01.918264383+00:00 stderr F E1208 17:52:01.918104 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:02.918459925+00:00 stderr F E1208 17:52:02.918200 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:03.918571906+00:00 stderr F E1208 17:52:03.918145 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:04.918509451+00:00 stderr F E1208 17:52:04.918442 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:05.918144768+00:00 stderr F E1208 17:52:05.918073 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:06.918465174+00:00 stderr F E1208 17:52:06.918409 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:07.918150132+00:00 stderr F E1208 17:52:07.918083 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:08.918511209+00:00 stderr F E1208 17:52:08.918419 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:09.918078925+00:00 stderr F E1208 17:52:09.918021 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:10.918770732+00:00 stderr F E1208 17:52:10.918714 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:11.918415690+00:00 stderr F E1208 17:52:11.918105 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:12.918212861+00:00 stderr F E1208 17:52:12.918122 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:13.918482916+00:00 stderr F E1208 17:52:13.918104 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:14.918178613+00:00 stderr F E1208 17:52:14.918094 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:15.922608869+00:00 stderr F E1208 17:52:15.922476 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:16.917847299+00:00 stderr F E1208 17:52:16.917784 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:17.917759564+00:00 stderr F E1208 17:52:17.917673 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:18.918438889+00:00 stderr F E1208 17:52:18.918361 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:19.918707583+00:00 stderr F E1208 17:52:19.918623 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:20.918494854+00:00 stderr F E1208 17:52:20.918406 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:21.918737318+00:00 stderr F E1208 17:52:21.918607 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:22.918330504+00:00 stderr F E1208 17:52:22.917717 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:23.918383913+00:00 stderr F E1208 17:52:23.918322 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:24.919249103+00:00 stderr F E1208 17:52:24.919184 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:25.918392008+00:00 stderr F E1208 17:52:25.918338 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:26.918412055+00:00 stderr F E1208 17:52:26.918296 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:27.918446363+00:00 stderr F E1208 17:52:27.918335 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:28.919437297+00:00 stderr F E1208 17:52:28.919132 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:29.918444398+00:00 stderr F E1208 17:52:29.918368 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:30.918250330+00:00 stderr F E1208 17:52:30.917852 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:31.918317028+00:00 stderr F E1208 17:52:31.918229 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:32.918169642+00:00 stderr F E1208 17:52:32.917568 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:33.918588460+00:00 stderr F E1208 17:52:33.918523 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:34.917917689+00:00 stderr F E1208 17:52:34.917795 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:35.919359025+00:00 stderr F E1208 17:52:35.917826 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:36.920014700+00:00 stderr F E1208 17:52:36.918632 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:37.918507397+00:00 stderr F E1208 17:52:37.918346 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:38.918346220+00:00 stderr F E1208 17:52:38.918280 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:39.917705659+00:00 stderr F E1208 17:52:39.917616 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:40.918739574+00:00 stderr F E1208 17:52:40.918371 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:41.917825356+00:00 stderr F E1208 17:52:41.917631 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:42.918402008+00:00 stderr F E1208 17:52:42.918291 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:43.918666252+00:00 stderr F E1208 17:52:43.918522 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:44.918941606+00:00 stderr F E1208 17:52:44.918549 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:45.918751288+00:00 stderr F E1208 17:52:45.918394 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:46.918548078+00:00 stderr F E1208 17:52:46.918439 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:47.917824966+00:00 stderr F E1208 17:52:47.917715 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:48.918384298+00:00 stderr F E1208 17:52:48.918310 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:49.919255909+00:00 stderr F E1208 17:52:49.919148 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:50.918157246+00:00 stderr F E1208 17:52:50.918073 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:51.918364908+00:00 stderr F E1208 17:52:51.918215 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:52.917615826+00:00 stderr F E1208 17:52:52.917520 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:53.918085055+00:00 stderr F E1208 17:52:53.917997 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:54.917791654+00:00 stderr F E1208 17:52:54.917708 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:55.918132650+00:00 stderr F E1208 17:52:55.917805 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:56.918446085+00:00 stderr F E1208 17:52:56.918120 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:57.919013177+00:00 stderr F E1208 17:52:57.918558 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:58.918185422+00:00 stderr F E1208 17:52:58.918088 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:52:59.918719873+00:00 stderr F E1208 17:52:59.918590 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:00.917661602+00:00 stderr F E1208 17:53:00.917570 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:01.917763642+00:00 stderr F E1208 17:53:01.917659 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:02.917810480+00:00 stderr F E1208 17:53:02.917666 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:03.918438464+00:00 stderr F E1208 17:53:03.918314 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:04.918119683+00:00 stderr F E1208 17:53:04.918014 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:05.918393208+00:00 stderr F E1208 17:53:05.917954 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:06.918177299+00:00 stderr F E1208 17:53:06.918083 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:07.919013769+00:00 stderr F E1208 17:53:07.918416 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:08.918613075+00:00 stderr F E1208 17:53:08.918483 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:09.917753979+00:00 stderr F E1208 17:53:09.917641 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:10.917964782+00:00 stderr F E1208 17:53:10.917785 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:11.917720523+00:00 stderr F E1208 17:53:11.917592 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:12.918699887+00:00 stderr F E1208 17:53:12.918567 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:13.918746226+00:00 stderr F E1208 17:53:13.918294 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:14.919055542+00:00 stderr F E1208 17:53:14.918565 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:15.917781277+00:00 stderr F E1208 17:53:15.917675 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:16.918113516+00:00 stderr F E1208 17:53:16.917989 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:17.918448106+00:00 stderr F E1208 17:53:17.918348 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:18.917720076+00:00 stderr F E1208 17:53:18.917639 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:19.918796205+00:00 stderr F E1208 17:53:19.917957 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:20.918235641+00:00 stderr F E1208 17:53:20.918156 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:21.918542530+00:00 stderr F E1208 17:53:21.918366 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:22.917792331+00:00 stderr F E1208 17:53:22.917716 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:23.918762657+00:00 stderr F E1208 17:53:23.917983 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:24.917857043+00:00 stderr F E1208 17:53:24.917760 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:25.917647918+00:00 stderr F E1208 17:53:25.917577 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:26.918383088+00:00 stderr F E1208 17:53:26.918312 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:27.918536293+00:00 stderr F E1208 17:53:27.918460 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:28.917945827+00:00 stderr F E1208 17:53:28.917858 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:29.917608388+00:00 stderr F E1208 17:53:29.917527 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:30.918336028+00:00 stderr F E1208 17:53:30.918281 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:31.918677287+00:00 stderr F E1208 17:53:31.918194 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:32.918727709+00:00 stderr F E1208 17:53:32.918299 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:33.918474313+00:00 stderr F E1208 17:53:33.918378 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:34.917480955+00:00 stderr F E1208 17:53:34.917413 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:35.918285518+00:00 stderr F E1208 17:53:35.918194 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:36.918728551+00:00 stderr F E1208 17:53:36.918261 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:37.918862884+00:00 stderr F E1208 17:53:37.918097 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:38.918572887+00:00 stderr F E1208 17:53:38.918196 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:39.918227758+00:00 stderr F E1208 17:53:39.918084 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:40.918727342+00:00 stderr F E1208 17:53:40.918619 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:41.918047943+00:00 stderr F E1208 17:53:41.917516 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:42.918966749+00:00 stderr F E1208 17:53:42.918676 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:43.919155164+00:00 stderr F E1208 17:53:43.918477 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:44.918381902+00:00 stderr F E1208 17:53:44.918268 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:45.918259189+00:00 stderr F E1208 17:53:45.918170 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:46.917928110+00:00 stderr F E1208 17:53:46.917760 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:47.917901439+00:00 stderr F E1208 17:53:47.917557 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:48.918641259+00:00 stderr F E1208 17:53:48.918253 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:49.930279426+00:00 stderr F E1208 17:53:49.918939 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:50.917866969+00:00 stderr F E1208 17:53:50.917561 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:51.918291220+00:00 stderr F E1208 17:53:51.917858 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:52.918120216+00:00 stderr F E1208 17:53:52.918013 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:53.918426694+00:00 stderr F E1208 17:53:53.918339 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:54.917620194+00:00 stderr F E1208 17:53:54.917542 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:55.918665043+00:00 stderr F E1208 17:53:55.918381 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:56.918570012+00:00 stderr F E1208 17:53:56.918435 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:57.918489720+00:00 stderr F E1208 17:53:57.918054 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:58.918484500+00:00 stderr F E1208 17:53:58.918374 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:53:59.917843844+00:00 stderr F E1208 17:53:59.917743 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:00.917812124+00:00 stderr F E1208 17:54:00.917610 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:01.918339299+00:00 stderr F E1208 17:54:01.918253 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:02.918079892+00:00 stderr F E1208 17:54:02.917975 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:03.918309789+00:00 stderr F E1208 17:54:03.918208 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:04.918938197+00:00 stderr F E1208 17:54:04.918544 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:05.918823774+00:00 stderr F E1208 17:54:05.918448 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:06.918552417+00:00 stderr F E1208 17:54:06.918386 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:07.918337782+00:00 stderr F E1208 17:54:07.917845 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:08.918493536+00:00 stderr F E1208 17:54:08.918381 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:09.918711322+00:00 stderr F E1208 17:54:09.918609 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:10.918415234+00:00 stderr F E1208 17:54:10.918298 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:11.918227578+00:00 stderr F E1208 17:54:11.918101 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:12.918910118+00:00 stderr F E1208 17:54:12.918725 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:13.918378503+00:00 stderr F E1208 17:54:13.918285 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:14.918973880+00:00 stderr F E1208 17:54:14.918367 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:15.919186756+00:00 stderr F E1208 17:54:15.918148 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:16.918396224+00:00 stderr F E1208 17:54:16.918314 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:17.918620410+00:00 stderr F E1208 17:54:17.918562 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:18.918716003+00:00 stderr F E1208 17:54:18.918583 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:19.918245255+00:00 stderr F E1208 17:54:19.917587 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:20.918572315+00:00 stderr F E1208 17:54:20.918464 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:21.920165910+00:00 stderr F E1208 17:54:21.918236 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:22.918204298+00:00 stderr F E1208 17:54:22.918095 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:23.919414461+00:00 stderr F E1208 17:54:23.918543 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:24.917793999+00:00 stderr F E1208 17:54:24.917736 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:25.917698098+00:00 stderr F E1208 17:54:25.917619 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:26.917734410+00:00 stderr F E1208 17:54:26.917640 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:27.918232205+00:00 stderr F E1208 17:54:27.918136 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:28.918615536+00:00 stderr F E1208 17:54:28.918088 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:29.918675929+00:00 stderr F E1208 17:54:29.918572 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:30.918662840+00:00 stderr F E1208 17:54:30.918179 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:31.918147598+00:00 stderr F E1208 17:54:31.918093 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:32.919178826+00:00 stderr F E1208 17:54:32.918256 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:33.918012646+00:00 stderr F E1208 17:54:33.917951 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:34.918163751+00:00 stderr F E1208 17:54:34.918072 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:35.918845930+00:00 stderr F E1208 17:54:35.918290 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:36.917572756+00:00 stderr F E1208 17:54:36.917515 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:37.918293657+00:00 stderr F E1208 17:54:37.918193 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:38.918407340+00:00 stderr F E1208 17:54:38.918334 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:39.917546327+00:00 stderr F E1208 17:54:39.917481 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:40.917693801+00:00 stderr F E1208 17:54:40.917594 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:41.918604917+00:00 stderr F E1208 17:54:41.917641 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:42.918106235+00:00 stderr F E1208 17:54:42.917472 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:43.918050924+00:00 stderr F E1208 17:54:43.918010 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:44.917976493+00:00 stderr F E1208 17:54:44.917867 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:45.919160097+00:00 stderr F E1208 17:54:45.918197 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:46.919405974+00:00 stderr F E1208 17:54:46.919298 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:47.918313126+00:00 stderr F E1208 17:54:47.918219 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:48.918781389+00:00 stderr F E1208 17:54:48.918666 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:49.918067462+00:00 stderr F E1208 17:54:49.918008 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:50.918038472+00:00 stderr F E1208 17:54:50.917576 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:51.918354622+00:00 stderr F E1208 17:54:51.917687 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:52.918356613+00:00 stderr F E1208 17:54:52.918235 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:53.918231031+00:00 stderr F E1208 17:54:53.918184 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:54.918479618+00:00 stderr F E1208 17:54:54.917733 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:55.918578882+00:00 stderr F E1208 17:54:55.918240 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:56.917923466+00:00 stderr F E1208 17:54:56.917809 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:57.918316788+00:00 stderr F E1208 17:54:57.918219 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:58.922289006+00:00 stderr F E1208 17:54:58.918678 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:54:59.918796423+00:00 stderr F E1208 17:54:59.918344 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:00.918610209+00:00 stderr F E1208 17:55:00.918543 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:01.919426202+00:00 stderr F E1208 17:55:01.918456 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:02.923346260+00:00 stderr F E1208 17:55:02.923282 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:03.918567402+00:00 stderr F E1208 17:55:03.918491 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:04.918361458+00:00 stderr F E1208 17:55:04.917829 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:05.918741799+00:00 stderr F E1208 17:55:05.918175 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:06.919035198+00:00 stderr F E1208 17:55:06.918123 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:07.918265329+00:00 stderr F E1208 17:55:07.918159 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:08.917500519+00:00 stderr F E1208 17:55:08.917451 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:09.919388180+00:00 stderr F E1208 17:55:09.919342 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:10.920540511+00:00 stderr F E1208 17:55:10.920210 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:11.918434125+00:00 stderr F E1208 17:55:11.918130 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:12.918444306+00:00 stderr F E1208 17:55:12.918372 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:13.921964242+00:00 stderr F E1208 17:55:13.920870 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:14.918592562+00:00 stderr F E1208 17:55:14.918231 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:15.923055293+00:00 stderr F E1208 17:55:15.919576 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:16.919792327+00:00 stderr F E1208 17:55:16.919358 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:17.919913721+00:00 stderr F E1208 17:55:17.917660 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:18.918027881+00:00 stderr F E1208 17:55:18.917765 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:19.920363526+00:00 stderr F E1208 17:55:19.920306 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:20.920238503+00:00 stderr F E1208 17:55:20.920151 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:21.918444296+00:00 stderr F E1208 17:55:21.918154 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:22.918430087+00:00 stderr F E1208 17:55:22.918162 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:23.918126083+00:00 stderr F E1208 17:55:23.918062 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:24.924591670+00:00 stderr F E1208 17:55:24.923865 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:25.918502973+00:00 stderr F E1208 17:55:25.918186 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:26.918392859+00:00 stderr F E1208 17:55:26.918111 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:27.918177633+00:00 stderr F E1208 17:55:27.918112 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:28.918309747+00:00 stderr F E1208 17:55:28.918263 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:29.919184660+00:00 stderr F E1208 17:55:29.918246 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:30.918796940+00:00 stderr F E1208 17:55:30.918725 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:31.917840083+00:00 stderr F E1208 17:55:31.917766 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:32.918779409+00:00 stderr F E1208 17:55:32.918453 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:33.917969326+00:00 stderr F E1208 17:55:33.917638 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:34.920656410+00:00 stderr F E1208 17:55:34.917919 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:35.918582433+00:00 stderr F E1208 17:55:35.918349 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:36.917782160+00:00 stderr F E1208 17:55:36.917725 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:37.918786048+00:00 stderr F E1208 17:55:37.918413 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:38.919241060+00:00 stderr F E1208 17:55:38.918467 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:39.920947656+00:00 stderr F E1208 17:55:39.919133 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:40.918616572+00:00 stderr F E1208 17:55:40.918550 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:41.918635543+00:00 stderr F E1208 17:55:41.918045 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:42.918685024+00:00 stderr F E1208 17:55:42.918253 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:43.918237140+00:00 stderr F E1208 17:55:43.917718 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:44.917757667+00:00 stderr F E1208 17:55:44.917706 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:45.918941620+00:00 stderr F E1208 17:55:45.918329 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:46.919924127+00:00 stderr F E1208 17:55:46.917925 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:47.919983078+00:00 stderr F E1208 17:55:47.918393 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:48.919982348+00:00 stderr F E1208 17:55:48.918168 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:49.917617103+00:00 stderr F E1208 17:55:49.917558 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:50.918635990+00:00 stderr F E1208 17:55:50.918512 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:51.918473595+00:00 stderr F E1208 17:55:51.918409 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:52.918614219+00:00 stderr F E1208 17:55:52.918240 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:53.918618419+00:00 stderr F E1208 17:55:53.918328 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:54.917601861+00:00 stderr F E1208 17:55:54.917545 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:55.918316500+00:00 stderr F E1208 17:55:55.918266 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:56.917938040+00:00 stderr F E1208 17:55:56.917807 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:57.922921007+00:00 stderr F E1208 17:55:57.918394 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:58.917578020+00:00 stderr F E1208 17:55:58.917517 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:55:59.918344201+00:00 stderr F E1208 17:55:59.918253 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:00.918437513+00:00 stderr F E1208 17:56:00.917552 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:01.918284159+00:00 stderr F E1208 17:56:01.917546 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:02.918186496+00:00 stderr F E1208 17:56:02.917736 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:03.919173132+00:00 stderr F E1208 17:56:03.917826 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:04.919926412+00:00 stderr F E1208 17:56:04.917563 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:05.918080672+00:00 stderr F E1208 17:56:05.917595 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:06.917699471+00:00 stderr F E1208 17:56:06.917598 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:07.918124592+00:00 stderr F E1208 17:56:07.917643 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:08.918960055+00:00 stderr F E1208 17:56:08.918509 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:09.918547434+00:00 stderr F E1208 17:56:09.918491 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:10.917932176+00:00 stderr F E1208 17:56:10.917516 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:11.918471151+00:00 stderr F E1208 17:56:11.918417 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:12.917924155+00:00 stderr F E1208 17:56:12.917771 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:13.922530342+00:00 stderr F E1208 17:56:13.921732 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:14.920434484+00:00 stderr F E1208 17:56:14.919783 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:15.917657418+00:00 stderr F E1208 17:56:15.917571 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:16.917680358+00:00 stderr F E1208 17:56:16.917599 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:17.918405878+00:00 stderr F E1208 17:56:17.918330 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:18.918321055+00:00 stderr F E1208 17:56:18.918201 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:19.919021214+00:00 stderr F E1208 17:56:19.918470 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:20.918064718+00:00 stderr F E1208 17:56:20.917924 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:21.921154672+00:00 stderr F E1208 17:56:21.921103 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:22.918404276+00:00 stderr F E1208 17:56:22.918327 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:23.918750286+00:00 stderr F E1208 17:56:23.918652 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:24.919062874+00:00 stderr F E1208 17:56:24.918461 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:25.918267653+00:00 stderr F E1208 17:56:25.918168 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:26.919021004+00:00 stderr F E1208 17:56:26.918378 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:27.917926214+00:00 stderr F E1208 17:56:27.917822 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:28.918830894+00:00 stderr F E1208 17:56:28.918001 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:29.918708472+00:00 stderr F E1208 17:56:29.918045 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:30.917794231+00:00 stderr F E1208 17:56:30.917731 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:31.918582213+00:00 stderr F E1208 17:56:31.917779 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:32.918950985+00:00 stderr F E1208 17:56:32.918314 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:33.920907568+00:00 stderr F E1208 17:56:33.918213 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:34.917542961+00:00 stderr F E1208 17:56:34.917467 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:35.918481278+00:00 stderr F E1208 17:56:35.917687 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:36.918458699+00:00 stderr F E1208 17:56:36.918363 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:37.917606338+00:00 stderr F E1208 17:56:37.917502 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:38.917618861+00:00 stderr F E1208 17:56:38.917550 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:39.918111745+00:00 stderr F E1208 17:56:39.917541 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:40.917836640+00:00 stderr F E1208 17:56:40.917688 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:41.917981436+00:00 stderr F E1208 17:56:41.917439 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:42.919073747+00:00 stderr F E1208 17:56:42.918074 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:43.917866317+00:00 stderr F E1208 17:56:43.917811 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:44.918779613+00:00 stderr F E1208 17:56:44.918236 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:45.917543623+00:00 stderr F E1208 17:56:45.917480 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:46.918245143+00:00 stderr F E1208 17:56:46.918051 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:47.918266115+00:00 stderr F E1208 17:56:47.918203 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:48.918170495+00:00 stderr F E1208 17:56:48.918112 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:49.918308640+00:00 stderr F E1208 17:56:49.918265 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:50.923524208+00:00 stderr F E1208 17:56:50.918409 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:51.923010527+00:00 stderr F E1208 17:56:51.920152 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:52.917801273+00:00 stderr F E1208 17:56:52.917706 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:53.917864787+00:00 stderr F E1208 17:56:53.917621 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:54.918697760+00:00 stderr F E1208 17:56:54.918185 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:55.918774714+00:00 stderr F E1208 17:56:55.918420 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:56.917778291+00:00 stderr F E1208 17:56:56.917666 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:57.917917876+00:00 stderr F E1208 17:56:57.917843 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:58.917955459+00:00 stderr F E1208 17:56:58.917733 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:56:59.918202288+00:00 stderr F E1208 17:56:59.918146 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:00.918144898+00:00 stderr F E1208 17:57:00.918083 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:01.919123605+00:00 stderr F E1208 17:57:01.918722 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:02.918744398+00:00 stderr F E1208 17:57:02.918467 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:03.918229537+00:00 stderr F E1208 17:57:03.918174 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:04.918677990+00:00 stderr F E1208 17:57:04.918587 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:05.917578223+00:00 stderr F E1208 17:57:05.917519 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:06.918486559+00:00 stderr F E1208 17:57:06.917967 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:07.918586324+00:00 stderr F E1208 17:57:07.918487 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:08.918327099+00:00 stderr F E1208 17:57:08.918256 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:09.918481056+00:00 stderr F E1208 17:57:09.918395 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:10.918824486+00:00 stderr F E1208 17:57:10.918457 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:11.918381086+00:00 stderr F E1208 17:57:11.918049 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:12.918448000+00:00 stderr F E1208 17:57:12.918331 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:13.917595080+00:00 stderr F E1208 17:57:13.917514 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:14.917813697+00:00 stderr F E1208 17:57:14.917748 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:15.917860371+00:00 stderr F E1208 17:57:15.917773 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:16.918382506+00:00 stderr F E1208 17:57:16.917984 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:17.918521612+00:00 stderr F E1208 17:57:17.918250 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:18.917917878+00:00 stderr F E1208 17:57:18.917829 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:19.918149536+00:00 stderr F E1208 17:57:19.918100 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:20.917775068+00:00 stderr F E1208 17:57:20.917708 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:21.918626592+00:00 stderr F E1208 17:57:21.918573 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:22.918486681+00:00 stderr F E1208 17:57:22.918206 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:23.918561535+00:00 stderr F E1208 17:57:23.918246 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:24.917545721+00:00 stderr F E1208 17:57:24.917475 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:25.918507727+00:00 stderr F E1208 17:57:25.918037 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:26.917907974+00:00 stderr F E1208 17:57:26.917785 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:27.918432790+00:00 stderr F E1208 17:57:27.918354 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:28.917715533+00:00 stderr F E1208 17:57:28.917646 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:29.918637879+00:00 stderr F E1208 17:57:29.918084 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:30.921913236+00:00 stderr F E1208 17:57:30.918154 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:31.918378646+00:00 stderr F E1208 17:57:31.918317 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:32.918626740+00:00 stderr F E1208 17:57:32.918536 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:33.918129463+00:00 stderr F E1208 17:57:33.917499 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:34.917717128+00:00 stderr F E1208 17:57:34.917535 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:35.920058144+00:00 stderr F E1208 17:57:35.918407 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:36.919595498+00:00 stderr F E1208 17:57:36.917963 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:37.918007752+00:00 stderr F E1208 17:57:37.917927 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:38.919042715+00:00 stderr F E1208 17:57:38.918351 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:39.917920081+00:00 stderr F E1208 17:57:39.917756 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:40.918736558+00:00 stderr F E1208 17:57:40.918258 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:41.919425102+00:00 stderr F E1208 17:57:41.918276 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:42.918467912+00:00 stderr F E1208 17:57:42.918399 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:42.918518813+00:00 stderr F E1208 17:57:42.918482 1 sync.go:1753] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:42.918703849+00:00 stderr F I1208 17:57:42.918675 1 event.go:377] Event(v1.ObjectReference{Kind:"", Namespace:"openshift-machine-config-operator", Name:"machine-config", UID:"7f2fd96d-8d64-472c-934f-96c0625ce7a9", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'OperatorDegraded: RequiredPoolsFailed' Failed to resync 4.20.1 because: error during syncRequiredMachineConfigPools: [context deadline exceeded, error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"")] 2025-12-08T17:57:48.496757897+00:00 stderr F E1208 17:57:48.496249 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:49.495046518+00:00 stderr F E1208 17:57:49.494682 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:50.494480410+00:00 stderr F E1208 17:57:50.494098 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:51.495266755+00:00 stderr F E1208 17:57:51.495167 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:52.495051126+00:00 stderr F E1208 17:57:52.494976 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:53.494602740+00:00 stderr F E1208 17:57:53.494101 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:54.497312966+00:00 stderr F E1208 17:57:54.497202 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:55.494743385+00:00 stderr F E1208 17:57:55.494673 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:56.494999997+00:00 stderr F E1208 17:57:56.494320 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:57.494391327+00:00 stderr F E1208 17:57:57.494306 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:58.494683071+00:00 stderr F E1208 17:57:58.494126 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:57:59.495780204+00:00 stderr F E1208 17:57:59.494122 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:00.495579875+00:00 stderr F E1208 17:58:00.495279 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:01.494498012+00:00 stderr F E1208 17:58:01.494137 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:02.496135980+00:00 stderr F E1208 17:58:02.495747 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:03.496727551+00:00 stderr F E1208 17:58:03.495994 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:04.497511667+00:00 stderr F E1208 17:58:04.497134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:05.494958966+00:00 stderr F E1208 17:58:05.494650 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:06.494401818+00:00 stderr F E1208 17:58:06.494089 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:07.495913553+00:00 stderr F E1208 17:58:07.495126 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:08.494414220+00:00 stderr F E1208 17:58:08.494347 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:09.494631361+00:00 stderr F E1208 17:58:09.494107 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:10.495554771+00:00 stderr F E1208 17:58:10.495197 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:11.494619252+00:00 stderr F E1208 17:58:11.494134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:12.494954216+00:00 stderr F E1208 17:58:12.494094 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:13.509938800+00:00 stderr F E1208 17:58:13.494302 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:14.494865725+00:00 stderr F E1208 17:58:14.494545 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:15.495540818+00:00 stderr F E1208 17:58:15.494975 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:16.494745043+00:00 stderr F E1208 17:58:16.494686 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:17.494178805+00:00 stderr F E1208 17:58:17.494133 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:18.494767995+00:00 stderr F E1208 17:58:18.494721 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:19.495215863+00:00 stderr F E1208 17:58:19.494664 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:20.495243798+00:00 stderr F E1208 17:58:20.494475 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:21.494550666+00:00 stderr F E1208 17:58:21.494080 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:22.494207083+00:00 stderr F E1208 17:58:22.494149 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:23.494938338+00:00 stderr F E1208 17:58:23.494382 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:24.495149219+00:00 stderr F E1208 17:58:24.495001 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:25.496177601+00:00 stderr F E1208 17:58:25.495827 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:26.494488703+00:00 stderr F E1208 17:58:26.494117 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:27.494775357+00:00 stderr F E1208 17:58:27.494176 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:28.494214357+00:00 stderr F E1208 17:58:28.494159 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:29.494240033+00:00 stderr F E1208 17:58:29.494174 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:30.494790694+00:00 stderr F E1208 17:58:30.494739 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:31.495296822+00:00 stderr F E1208 17:58:31.494723 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:32.495026031+00:00 stderr F E1208 17:58:32.494377 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:33.494491472+00:00 stderr F E1208 17:58:33.494427 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:34.494626231+00:00 stderr F E1208 17:58:34.494194 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:35.494950395+00:00 stderr F E1208 17:58:35.494148 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:36.495045583+00:00 stderr F E1208 17:58:36.494593 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:37.495266006+00:00 stderr F E1208 17:58:37.494774 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:38.494903288+00:00 stderr F E1208 17:58:38.494828 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:39.494453332+00:00 stderr F E1208 17:58:39.494380 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:40.494216252+00:00 stderr F E1208 17:58:40.494161 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:41.495152872+00:00 stderr F E1208 17:58:41.495073 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:42.496575115+00:00 stderr F E1208 17:58:42.495146 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:43.494242609+00:00 stderr F E1208 17:58:43.494185 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:44.494223024+00:00 stderr F E1208 17:58:44.494164 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:45.494208449+00:00 stderr F E1208 17:58:45.494144 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:46.494339158+00:00 stderr F E1208 17:58:46.494272 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:47.494242031+00:00 stderr F E1208 17:58:47.494150 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:48.494424342+00:00 stderr F E1208 17:58:48.494108 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:49.495159416+00:00 stderr F E1208 17:58:49.495100 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:50.494723431+00:00 stderr F E1208 17:58:50.494665 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:51.494200423+00:00 stderr F E1208 17:58:51.494119 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:52.494629669+00:00 stderr F E1208 17:58:52.494095 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:53.494542983+00:00 stderr F E1208 17:58:53.494470 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:54.494479117+00:00 stderr F E1208 17:58:54.494135 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:55.494514333+00:00 stderr F E1208 17:58:55.494144 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:56.494922789+00:00 stderr F E1208 17:58:56.494850 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:57.494195545+00:00 stderr F E1208 17:58:57.494123 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:58.494252192+00:00 stderr F E1208 17:58:58.494174 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:58:59.494188946+00:00 stderr F E1208 17:58:59.494118 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:00.495140438+00:00 stderr F E1208 17:59:00.494364 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:01.495017500+00:00 stderr F E1208 17:59:01.494931 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:02.494925223+00:00 stderr F E1208 17:59:02.494613 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:03.495176385+00:00 stderr F E1208 17:59:03.495025 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:04.494515733+00:00 stderr F E1208 17:59:04.494393 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:05.494267492+00:00 stderr F E1208 17:59:05.494190 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:06.494732820+00:00 stderr F E1208 17:59:06.494229 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:07.494333585+00:00 stderr F E1208 17:59:07.494040 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:08.494392932+00:00 stderr F E1208 17:59:08.494091 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:09.494201143+00:00 stderr F E1208 17:59:09.494138 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:10.494455695+00:00 stderr F E1208 17:59:10.494387 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:11.494348008+00:00 stderr F E1208 17:59:11.494064 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:12.495372540+00:00 stderr F E1208 17:59:12.495058 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:13.494720288+00:00 stderr F E1208 17:59:13.494613 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:14.495294859+00:00 stderr F E1208 17:59:14.494804 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:15.494924265+00:00 stderr F E1208 17:59:15.494319 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:16.494415997+00:00 stderr F E1208 17:59:16.494347 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:17.494495185+00:00 stderr F E1208 17:59:17.494399 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:18.494694716+00:00 stderr F E1208 17:59:18.494135 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:19.494227139+00:00 stderr F E1208 17:59:19.494150 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:20.494190744+00:00 stderr F E1208 17:59:20.494137 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:21.495165035+00:00 stderr F E1208 17:59:21.495046 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:22.494907164+00:00 stderr F E1208 17:59:22.494800 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:23.495386412+00:00 stderr F E1208 17:59:23.494737 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:24.494290479+00:00 stderr F E1208 17:59:24.494202 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:25.495005243+00:00 stderr F E1208 17:59:25.494439 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:26.494257849+00:00 stderr F E1208 17:59:26.494184 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:27.494531282+00:00 stderr F E1208 17:59:27.494164 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:28.494671931+00:00 stderr F E1208 17:59:28.494304 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:29.494329957+00:00 stderr F E1208 17:59:29.494260 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:30.494295812+00:00 stderr F E1208 17:59:30.494223 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:31.494910164+00:00 stderr F E1208 17:59:31.494419 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:32.494836648+00:00 stderr F E1208 17:59:32.494247 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:33.494490273+00:00 stderr F E1208 17:59:33.494382 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:34.494919461+00:00 stderr F E1208 17:59:34.494821 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:35.495007239+00:00 stderr F E1208 17:59:35.494916 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:36.494998313+00:00 stderr F E1208 17:59:36.494603 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:37.494676141+00:00 stderr F E1208 17:59:37.494327 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:38.494926543+00:00 stderr F E1208 17:59:38.494779 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:39.494252831+00:00 stderr F E1208 17:59:39.494151 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:40.494372940+00:00 stderr F E1208 17:59:40.494296 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:41.494565330+00:00 stderr F E1208 17:59:41.494444 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:42.494564026+00:00 stderr F E1208 17:59:42.494280 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:43.494605634+00:00 stderr F E1208 17:59:43.494202 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:44.494379986+00:00 stderr F E1208 17:59:44.494313 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:45.494233551+00:00 stderr F E1208 17:59:45.494176 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:46.494516586+00:00 stderr F E1208 17:59:46.494419 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:47.494455772+00:00 stderr F E1208 17:59:47.494401 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:48.494725536+00:00 stderr F E1208 17:59:48.494176 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:49.494256082+00:00 stderr F E1208 17:59:49.494170 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:50.494912277+00:00 stderr F E1208 17:59:50.494828 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:51.495112910+00:00 stderr F E1208 17:59:51.495006 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:52.495591700+00:00 stderr F E1208 17:59:52.495033 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:53.494425568+00:00 stderr F E1208 17:59:53.494094 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:54.495019781+00:00 stderr F E1208 17:59:54.494725 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:55.495249195+00:00 stderr F E1208 17:59:55.495169 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:56.494520874+00:00 stderr F E1208 17:59:56.494460 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:57.494924123+00:00 stderr F E1208 17:59:57.494577 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:58.494898620+00:00 stderr F E1208 17:59:58.494602 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T17:59:59.494199039+00:00 stderr F E1208 17:59:59.494130 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:00.494602218+00:00 stderr F E1208 18:00:00.494545 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:01.494532783+00:00 stderr F E1208 18:00:01.494205 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:02.494957993+00:00 stderr F E1208 18:00:02.494537 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:03.504366168+00:00 stderr F E1208 18:00:03.504023 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:04.494316761+00:00 stderr F E1208 18:00:04.494248 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:05.495029309+00:00 stderr F E1208 18:00:05.494390 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:06.495127329+00:00 stderr F E1208 18:00:06.495020 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:07.494594732+00:00 stderr F E1208 18:00:07.494490 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:08.494766414+00:00 stderr F E1208 18:00:08.494677 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:09.495001188+00:00 stderr F E1208 18:00:09.494713 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:10.494438112+00:00 stderr F E1208 18:00:10.494100 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:11.495066456+00:00 stderr F E1208 18:00:11.494928 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:12.494855148+00:00 stderr F E1208 18:00:12.494779 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:13.494322472+00:00 stderr F E1208 18:00:13.494216 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:14.494427213+00:00 stderr F E1208 18:00:14.494365 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:15.495067577+00:00 stderr F E1208 18:00:15.494747 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:16.495259709+00:00 stderr F E1208 18:00:16.495186 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:17.495366719+00:00 stderr F E1208 18:00:17.495036 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:18.494368351+00:00 stderr F E1208 18:00:18.494242 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:19.494431770+00:00 stderr F E1208 18:00:19.494330 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:20.494845879+00:00 stderr F E1208 18:00:20.494752 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:21.494331033+00:00 stderr F E1208 18:00:21.494250 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:22.494945547+00:00 stderr F E1208 18:00:22.494799 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:23.495321875+00:00 stderr F E1208 18:00:23.494592 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:24.494866360+00:00 stderr F E1208 18:00:24.494794 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:25.494370934+00:00 stderr F E1208 18:00:25.494267 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:26.494597218+00:00 stderr F E1208 18:00:26.494505 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:27.495134549+00:00 stderr F E1208 18:00:27.495001 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:28.495281001+00:00 stderr F E1208 18:00:28.494915 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:29.494444386+00:00 stderr F E1208 18:00:29.494168 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:30.494287790+00:00 stderr F E1208 18:00:30.494211 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:31.494489522+00:00 stderr F E1208 18:00:31.494414 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:32.495011894+00:00 stderr F E1208 18:00:32.494898 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:33.495980797+00:00 stderr F E1208 18:00:33.495134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:34.494762603+00:00 stderr F E1208 18:00:34.494192 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:35.494463793+00:00 stderr F E1208 18:00:35.494186 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:36.494436700+00:00 stderr F E1208 18:00:36.494333 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:37.494247253+00:00 stderr F E1208 18:00:37.494117 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:38.494490337+00:00 stderr F E1208 18:00:38.494381 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:39.494658319+00:00 stderr F E1208 18:00:39.494557 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:40.495021246+00:00 stderr F E1208 18:00:40.494911 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:41.495217119+00:00 stderr F E1208 18:00:41.495117 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:42.494720254+00:00 stderr F E1208 18:00:42.494335 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:43.494670900+00:00 stderr F E1208 18:00:43.494342 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:44.494252208+00:00 stderr F E1208 18:00:44.494152 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:45.494324407+00:00 stderr F E1208 18:00:45.494262 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:46.494546140+00:00 stderr F E1208 18:00:46.494458 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:47.494569668+00:00 stderr F E1208 18:00:47.494493 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:48.494504248+00:00 stderr F E1208 18:00:48.494426 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:49.494705175+00:00 stderr F E1208 18:00:49.494283 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:50.494636505+00:00 stderr F E1208 18:00:50.494241 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:51.495305254+00:00 stderr F E1208 18:00:51.495165 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:52.494420313+00:00 stderr F E1208 18:00:52.494289 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:53.494495116+00:00 stderr F E1208 18:00:53.494218 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:54.494946059+00:00 stderr F E1208 18:00:54.494835 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:55.494960482+00:00 stderr F E1208 18:00:55.494682 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:56.494409799+00:00 stderr F E1208 18:00:56.494266 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:57.494495793+00:00 stderr F E1208 18:00:57.494168 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:58.495005298+00:00 stderr F E1208 18:00:58.494619 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:00:59.494773823+00:00 stderr F E1208 18:00:59.494688 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:00.494245049+00:00 stderr F E1208 18:01:00.494174 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:01.494468427+00:00 stderr F E1208 18:01:01.494393 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:02.494754218+00:00 stderr F E1208 18:01:02.494653 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:03.495306674+00:00 stderr F E1208 18:01:03.494737 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:04.495023169+00:00 stderr F E1208 18:01:04.494936 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:05.494472744+00:00 stderr F E1208 18:01:05.494372 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:06.494277741+00:00 stderr F E1208 18:01:06.494195 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:07.494811916+00:00 stderr F E1208 18:01:07.494708 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:08.494136230+00:00 stderr F E1208 18:01:08.494044 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:09.494519162+00:00 stderr F E1208 18:01:09.494389 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:10.494657217+00:00 stderr F E1208 18:01:10.494267 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:11.494528056+00:00 stderr F E1208 18:01:11.494146 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:12.495274797+00:00 stderr F E1208 18:01:12.495186 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:13.494425126+00:00 stderr F E1208 18:01:13.494331 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:14.494903840+00:00 stderr F E1208 18:01:14.494794 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:15.494504001+00:00 stderr F E1208 18:01:15.494415 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:16.494629146+00:00 stderr F E1208 18:01:16.494529 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:17.494425892+00:00 stderr F E1208 18:01:17.494322 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:18.494567458+00:00 stderr F E1208 18:01:18.494446 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:19.494469177+00:00 stderr F E1208 18:01:19.494123 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:20.494638083+00:00 stderr F E1208 18:01:20.494026 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:21.494178103+00:00 stderr F E1208 18:01:21.494098 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:22.495113279+00:00 stderr F E1208 18:01:22.495026 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:23.494203617+00:00 stderr F E1208 18:01:23.494111 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:24.494782474+00:00 stderr F E1208 18:01:24.494205 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:25.494944860+00:00 stderr F E1208 18:01:25.494537 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:26.494169079+00:00 stderr F E1208 18:01:26.494084 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:27.494980862+00:00 stderr F E1208 18:01:27.494777 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:28.494419979+00:00 stderr F E1208 18:01:28.494342 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:29.495224642+00:00 stderr F E1208 18:01:29.494548 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:30.494217446+00:00 stderr F E1208 18:01:30.494166 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:31.494748363+00:00 stderr F E1208 18:01:31.494678 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:32.495793922+00:00 stderr F E1208 18:01:32.494954 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:33.494520508+00:00 stderr F E1208 18:01:33.494408 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:34.495042054+00:00 stderr F E1208 18:01:34.494367 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:35.494416509+00:00 stderr F E1208 18:01:35.494335 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:36.494527753+00:00 stderr F E1208 18:01:36.494447 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:37.495156362+00:00 stderr F E1208 18:01:37.495040 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:38.494652891+00:00 stderr F E1208 18:01:38.494120 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:39.494303913+00:00 stderr F E1208 18:01:39.494242 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:40.494355916+00:00 stderr F E1208 18:01:40.494285 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:41.494667286+00:00 stderr F E1208 18:01:41.494546 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:42.494845263+00:00 stderr F E1208 18:01:42.494738 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:43.495235784+00:00 stderr F E1208 18:01:43.494646 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:44.496750147+00:00 stderr F E1208 18:01:44.495714 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:45.495191216+00:00 stderr F E1208 18:01:45.495114 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:46.494246533+00:00 stderr F E1208 18:01:46.494152 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:47.494583894+00:00 stderr F E1208 18:01:47.494489 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:48.494620537+00:00 stderr F E1208 18:01:48.494503 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:49.495103551+00:00 stderr F E1208 18:01:49.494242 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:50.495067952+00:00 stderr F E1208 18:01:50.494977 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:51.494365013+00:00 stderr F E1208 18:01:51.494228 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:52.495690934+00:00 stderr F E1208 18:01:52.494269 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:53.494267953+00:00 stderr F E1208 18:01:53.494179 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:54.494554577+00:00 stderr F E1208 18:01:54.494432 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:55.495291413+00:00 stderr F E1208 18:01:55.494957 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:56.495263958+00:00 stderr F E1208 18:01:56.494815 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:57.495150040+00:00 stderr F E1208 18:01:57.494746 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:58.494765346+00:00 stderr F E1208 18:01:58.494477 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:01:59.494347429+00:00 stderr F E1208 18:01:59.494221 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:00.494234032+00:00 stderr F E1208 18:02:00.494139 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:01.495098672+00:00 stderr F E1208 18:02:01.494986 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:02.495326204+00:00 stderr F E1208 18:02:02.494945 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:03.495193146+00:00 stderr F E1208 18:02:03.494910 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:04.494929263+00:00 stderr F E1208 18:02:04.494818 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:05.495391132+00:00 stderr F E1208 18:02:05.494650 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:06.494736220+00:00 stderr F E1208 18:02:06.494682 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:07.495232008+00:00 stderr F E1208 18:02:07.495156 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:08.494240278+00:00 stderr F E1208 18:02:08.494150 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:09.494155362+00:00 stderr F E1208 18:02:09.494087 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:10.494270060+00:00 stderr F E1208 18:02:10.494173 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:11.494669038+00:00 stderr F E1208 18:02:11.494285 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:12.495435415+00:00 stderr F E1208 18:02:12.495072 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:13.494521337+00:00 stderr F E1208 18:02:13.494427 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:14.494298467+00:00 stderr F E1208 18:02:14.494188 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:15.494295564+00:00 stderr F E1208 18:02:15.494207 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:16.494757163+00:00 stderr F E1208 18:02:16.494641 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:17.494662297+00:00 stderr F E1208 18:02:17.494177 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:18.495524716+00:00 stderr F E1208 18:02:18.495332 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:19.494952846+00:00 stderr F E1208 18:02:19.494447 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:20.494837479+00:00 stderr F E1208 18:02:20.494544 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:21.494358073+00:00 stderr F E1208 18:02:21.494271 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:22.495013226+00:00 stderr F E1208 18:02:22.494089 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:23.494735835+00:00 stderr F E1208 18:02:23.494268 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:24.495196493+00:00 stderr F E1208 18:02:24.495097 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:25.494915982+00:00 stderr F E1208 18:02:25.494799 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:26.495331889+00:00 stderr F E1208 18:02:26.495222 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:27.495967571+00:00 stderr F E1208 18:02:27.494307 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:28.495363821+00:00 stderr F E1208 18:02:28.494946 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:29.495192773+00:00 stderr F E1208 18:02:29.495061 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:30.494374776+00:00 stderr F E1208 18:02:30.494254 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:31.494515696+00:00 stderr F E1208 18:02:31.494434 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:32.494870852+00:00 stderr F E1208 18:02:32.494777 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:33.495370481+00:00 stderr F E1208 18:02:33.495274 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:34.494663628+00:00 stderr F E1208 18:02:34.494575 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:35.495287021+00:00 stderr F E1208 18:02:35.494742 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:36.495612276+00:00 stderr F E1208 18:02:36.494861 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:37.494449270+00:00 stderr F E1208 18:02:37.494337 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:38.494447976+00:00 stderr F E1208 18:02:38.494369 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:39.494837542+00:00 stderr F E1208 18:02:39.494722 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:40.494759816+00:00 stderr F E1208 18:02:40.494687 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:41.495621334+00:00 stderr F E1208 18:02:41.494939 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:42.495521097+00:00 stderr F E1208 18:02:42.494289 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:43.494194608+00:00 stderr F E1208 18:02:43.494138 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:44.494213504+00:00 stderr F E1208 18:02:44.494131 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:45.494173809+00:00 stderr F E1208 18:02:45.494090 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:46.494816602+00:00 stderr F E1208 18:02:46.494365 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:47.494591061+00:00 stderr F E1208 18:02:47.494524 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:48.495320176+00:00 stderr F E1208 18:02:48.495243 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:49.494714697+00:00 stderr F E1208 18:02:49.494626 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:50.494567459+00:00 stderr F E1208 18:02:50.494099 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:51.494167384+00:00 stderr F E1208 18:02:51.494085 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:52.495760902+00:00 stderr F E1208 18:02:52.495094 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:53.494401972+00:00 stderr F E1208 18:02:53.494282 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:54.494478689+00:00 stderr F E1208 18:02:54.494387 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:55.495144483+00:00 stderr F E1208 18:02:55.495073 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:56.494409644+00:00 stderr F E1208 18:02:56.494134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:57.494523532+00:00 stderr F E1208 18:02:57.494141 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:58.494726683+00:00 stderr F E1208 18:02:58.494657 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:02:59.495087618+00:00 stderr F E1208 18:02:59.495007 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:00.494213619+00:00 stderr F E1208 18:03:00.494097 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:01.495092069+00:00 stderr F E1208 18:03:01.495020 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:02.494451156+00:00 stderr F E1208 18:03:02.494063 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:03.494704918+00:00 stderr F E1208 18:03:03.494266 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:04.494086797+00:00 stderr F E1208 18:03:04.494006 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:05.494965846+00:00 stderr F E1208 18:03:05.494902 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:06.495310441+00:00 stderr F E1208 18:03:06.495208 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:07.495055779+00:00 stderr F E1208 18:03:07.494986 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:08.494370056+00:00 stderr F E1208 18:03:08.494294 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:09.495807280+00:00 stderr F E1208 18:03:09.494948 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:10.494770777+00:00 stderr F E1208 18:03:10.494457 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:11.494821684+00:00 stderr F E1208 18:03:11.494750 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:12.494063849+00:00 stderr F E1208 18:03:12.493990 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:13.494447125+00:00 stderr F E1208 18:03:13.494369 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:14.494189733+00:00 stderr F E1208 18:03:14.494120 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:15.494751343+00:00 stderr F E1208 18:03:15.494160 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:16.494525953+00:00 stderr F E1208 18:03:16.494464 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:17.495038171+00:00 stderr F E1208 18:03:17.494969 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:18.494381659+00:00 stderr F E1208 18:03:18.494293 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:19.494515548+00:00 stderr F E1208 18:03:19.494379 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:20.494852122+00:00 stderr F E1208 18:03:20.494772 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:21.494180090+00:00 stderr F E1208 18:03:21.494088 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:22.495909071+00:00 stderr F E1208 18:03:22.495183 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:23.494622262+00:00 stderr F E1208 18:03:23.494304 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:24.494337510+00:00 stderr F E1208 18:03:24.494074 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:25.494849939+00:00 stderr F E1208 18:03:25.494758 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:26.494227487+00:00 stderr F E1208 18:03:26.494154 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:27.494270923+00:00 stderr F E1208 18:03:27.494205 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:28.494298579+00:00 stderr F E1208 18:03:28.494224 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:29.494495060+00:00 stderr F E1208 18:03:29.494158 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:30.495840312+00:00 stderr F E1208 18:03:30.495281 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:31.494157561+00:00 stderr F E1208 18:03:31.494090 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:32.494389953+00:00 stderr F E1208 18:03:32.494314 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:33.494312056+00:00 stderr F E1208 18:03:33.494216 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:34.494404174+00:00 stderr F E1208 18:03:34.494313 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:35.494683226+00:00 stderr F E1208 18:03:35.494125 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:36.494255020+00:00 stderr F E1208 18:03:36.494175 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:37.495901270+00:00 stderr F E1208 18:03:37.494215 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:38.494202900+00:00 stderr F E1208 18:03:38.494131 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:39.495088388+00:00 stderr F E1208 18:03:39.494995 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:40.495258228+00:00 stderr F E1208 18:03:40.495125 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:41.495143380+00:00 stderr F E1208 18:03:41.495069 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:42.495171897+00:00 stderr F E1208 18:03:42.494641 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:43.495333226+00:00 stderr F E1208 18:03:43.495064 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:44.494572091+00:00 stderr F E1208 18:03:44.494212 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:45.494588077+00:00 stderr F E1208 18:03:45.494519 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:46.494962492+00:00 stderr F E1208 18:03:46.494625 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:47.494736272+00:00 stderr F E1208 18:03:47.494331 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:48.494229453+00:00 stderr F E1208 18:03:48.494148 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:49.494269180+00:00 stderr F E1208 18:03:49.494178 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:50.495930569+00:00 stderr F E1208 18:03:50.494138 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:51.495065632+00:00 stderr F E1208 18:03:51.494963 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:52.494126842+00:00 stderr F E1208 18:03:52.494047 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:53.495622387+00:00 stderr F E1208 18:03:53.494977 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:54.494465732+00:00 stderr F E1208 18:03:54.494170 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:55.494726414+00:00 stderr F E1208 18:03:55.494655 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:56.495201822+00:00 stderr F E1208 18:03:56.495099 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:57.494573791+00:00 stderr F E1208 18:03:57.494457 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:58.494849083+00:00 stderr F E1208 18:03:58.494680 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:03:59.494179510+00:00 stderr F E1208 18:03:59.494102 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:00.495680574+00:00 stderr F E1208 18:04:00.495150 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:01.494917914+00:00 stderr F E1208 18:04:01.494219 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:02.495148961+00:00 stderr F E1208 18:04:02.495042 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:03.494557224+00:00 stderr F E1208 18:04:03.494466 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:04.495397618+00:00 stderr F E1208 18:04:04.495058 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:05.494559116+00:00 stderr F E1208 18:04:05.494187 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:06.495284686+00:00 stderr F E1208 18:04:06.495201 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:07.494185156+00:00 stderr F E1208 18:04:07.494116 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:08.494188146+00:00 stderr F E1208 18:04:08.494120 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:09.494779462+00:00 stderr F E1208 18:04:09.494693 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:10.494228568+00:00 stderr F E1208 18:04:10.494143 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:11.494422133+00:00 stderr F E1208 18:04:11.494320 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:12.495503872+00:00 stderr F E1208 18:04:12.494796 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:13.495230485+00:00 stderr F E1208 18:04:13.495126 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:14.494594259+00:00 stderr F E1208 18:04:14.494449 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:15.494686481+00:00 stderr F E1208 18:04:15.494565 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:16.494315102+00:00 stderr F E1208 18:04:16.494230 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:17.494847646+00:00 stderr F E1208 18:04:17.494763 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:18.494316432+00:00 stderr F E1208 18:04:18.494217 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:19.495213576+00:00 stderr F E1208 18:04:19.494714 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:20.495141284+00:00 stderr F E1208 18:04:20.494802 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:21.495952296+00:00 stderr F E1208 18:04:21.495844 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:22.495174556+00:00 stderr F E1208 18:04:22.494367 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:23.494767275+00:00 stderr F E1208 18:04:23.494134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:24.495255849+00:00 stderr F E1208 18:04:24.495134 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:25.494275543+00:00 stderr F E1208 18:04:25.494191 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:26.495065524+00:00 stderr F E1208 18:04:26.494125 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:27.494231692+00:00 stderr F E1208 18:04:27.494151 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:28.494517390+00:00 stderr F E1208 18:04:28.494199 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:29.494518170+00:00 stderr F E1208 18:04:29.494093 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:30.494277404+00:00 stderr F E1208 18:04:30.494153 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:31.494532130+00:00 stderr F E1208 18:04:31.494453 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:32.494700905+00:00 stderr F E1208 18:04:32.494173 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:33.494849609+00:00 stderr F E1208 18:04:33.494762 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:34.494969083+00:00 stderr F E1208 18:04:34.494816 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:35.494722506+00:00 stderr F E1208 18:04:35.494123 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:36.495957650+00:00 stderr F E1208 18:04:36.494354 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:37.494076880+00:00 stderr F E1208 18:04:37.494012 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:38.494428550+00:00 stderr F E1208 18:04:38.494335 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:39.494784909+00:00 stderr F E1208 18:04:39.494665 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:40.494829941+00:00 stderr F E1208 18:04:40.494351 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:41.495138429+00:00 stderr F E1208 18:04:41.495016 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:42.494496412+00:00 stderr F E1208 18:04:42.494425 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:43.495352355+00:00 stderr F E1208 18:04:43.494373 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:44.494960855+00:00 stderr F E1208 18:04:44.494795 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:45.495108019+00:00 stderr F E1208 18:04:45.494517 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:46.495096709+00:00 stderr F E1208 18:04:46.494989 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:47.494913394+00:00 stderr F E1208 18:04:47.494824 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:48.494266028+00:00 stderr F E1208 18:04:48.494159 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:49.495572182+00:00 stderr F E1208 18:04:49.494941 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:50.494769671+00:00 stderr F E1208 18:04:50.494456 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:51.494638788+00:00 stderr F E1208 18:04:51.494521 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:52.495155142+00:00 stderr F E1208 18:04:52.494775 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:53.494541456+00:00 stderr F E1208 18:04:53.494149 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:54.494855734+00:00 stderr F E1208 18:04:54.494442 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:55.495333777+00:00 stderr F E1208 18:04:55.495214 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:56.495264216+00:00 stderr F E1208 18:04:56.494648 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:57.495348328+00:00 stderr F E1208 18:04:57.495233 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" 2025-12-08T18:04:58.495219245+00:00 stderr F E1208 18:04:58.494507 1 sync.go:1689] Error syncing Required MachineConfigPools: "error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: \"unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \\\"/var/lib/kubelet/config.json\\\"\")" ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611521032775 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000000227415115611513033005 0ustar zuulzuul2025-12-08T17:44:20.561922032+00:00 stderr F W1208 17:44:20.560989 1 deprecated.go:66] 2025-12-08T17:44:20.561922032+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:20.561922032+00:00 stderr F 2025-12-08T17:44:20.561922032+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:20.561922032+00:00 stderr F 2025-12-08T17:44:20.561922032+00:00 stderr F =============================================== 2025-12-08T17:44:20.561922032+00:00 stderr F 2025-12-08T17:44:20.561922032+00:00 stderr F I1208 17:44:20.561288 1 kube-rbac-proxy.go:532] Reading config file: /etc/kube-rbac-proxy/config-file.yaml 2025-12-08T17:44:20.565909641+00:00 stderr F I1208 17:44:20.563073 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:20.565909641+00:00 stderr F I1208 17:44:20.564467 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:20.565909641+00:00 stderr F I1208 17:44:20.564980 1 kube-rbac-proxy.go:397] Starting TCP socket on 0.0.0.0:9001 2025-12-08T17:44:20.565909641+00:00 stderr F I1208 17:44:20.565530 1 kube-rbac-proxy.go:404] Listening securely on 0.0.0.0:9001 ././@LongLink0000644000000000000000000000024500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_s0000755000175000017500000000000015115611513033012 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_s0000755000175000017500000000000015115611520033010 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_s0000644000175000017500000013131215115611513033015 0ustar zuulzuul2025-12-08T17:44:21.753561747+00:00 stderr F W1208 17:44:21.752960 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:44:21.760355772+00:00 stderr F I1208 17:44:21.760200 1 crypto.go:601] Generating new CA for service-ca-controller-signer@1765215861 cert, and key in /tmp/serving-cert-947172199/serving-signer.crt, /tmp/serving-cert-947172199/serving-signer.key 2025-12-08T17:44:21.760355772+00:00 stderr F Validity period of the certificate for "service-ca-controller-signer@1765215861" is unset, resetting to 157680000000000000 years! 2025-12-08T17:44:23.250951510+00:00 stderr F I1208 17:44:23.247569 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:23.250951510+00:00 stderr F I1208 17:44:23.248464 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.250951510+00:00 stderr F I1208 17:44:23.248476 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.250951510+00:00 stderr F I1208 17:44:23.248480 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.250951510+00:00 stderr F I1208 17:44:23.248484 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.250951510+00:00 stderr F I1208 17:44:23.249106 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:23.288525056+00:00 stderr F I1208 17:44:23.288476 1 builder.go:304] service-ca-controller version - 2025-12-08T17:44:23.290792197+00:00 stderr F I1208 17:44:23.290761 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" 2025-12-08T17:44:24.288024908+00:00 stderr F I1208 17:44:24.286957 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:44:24.295012239+00:00 stderr F I1208 17:44:24.294823 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:24.295012239+00:00 stderr F I1208 17:44:24.294844 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:24.295012239+00:00 stderr F I1208 17:44:24.294897 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:44:24.295012239+00:00 stderr F I1208 17:44:24.294906 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:44:24.301989739+00:00 stderr F I1208 17:44:24.299593 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:24.301989739+00:00 stderr F W1208 17:44:24.299628 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:24.301989739+00:00 stderr F W1208 17:44:24.299634 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:24.301989739+00:00 stderr F W1208 17:44:24.299645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:24.301989739+00:00 stderr F W1208 17:44:24.299650 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:24.301989739+00:00 stderr F W1208 17:44:24.299654 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:24.301989739+00:00 stderr F W1208 17:44:24.299659 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:24.301989739+00:00 stderr F I1208 17:44:24.300020 1 genericapiserver.go:535] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.308885 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.309235 1 leaderelection.go:257] attempting to acquire leader lease openshift-service-ca/service-ca-controller-lock... 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.310105 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.310139 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.310184 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.310192 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.310206 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.310641426+00:00 stderr F I1208 17:44:24.310210 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:44:24.310729208+00:00 stderr F I1208 17:44:24.310716 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" 2025-12-08T17:44:24.311493989+00:00 stderr F I1208 17:44:24.311478 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.314514671+00:00 stderr F I1208 17:44:24.312151 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"service-ca-controller-signer@1765215861\" (2025-12-08 17:44:22 +0000 UTC to 2025-12-08 17:44:23 +0000 UTC (now=2025-12-08 17:44:24.312111545 +0000 UTC))" 2025-12-08T17:44:24.319752944+00:00 stderr F I1208 17:44:24.319734 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:23 +0000 UTC to 2026-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:44:24.319705243 +0000 UTC))" 2025-12-08T17:44:24.320637348+00:00 stderr F I1208 17:44:24.320596 1 secure_serving.go:213] Serving securely on [::]:8443 2025-12-08T17:44:24.321543432+00:00 stderr F I1208 17:44:24.320121 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.321568203+00:00 stderr F I1208 17:44:24.320523 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.321604534+00:00 stderr F I1208 17:44:24.321570 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:24.321779250+00:00 stderr F I1208 17:44:24.321759 1 genericapiserver.go:685] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:44:24.326845407+00:00 stderr F I1208 17:44:24.325626 1 leaderelection.go:271] successfully acquired lease openshift-service-ca/service-ca-controller-lock 2025-12-08T17:44:24.327022342+00:00 stderr F I1208 17:44:24.326995 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-service-ca", Name:"service-ca-controller-lock", UID:"45afceeb-6ad8-4848-9b59-df6a62d733f6", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37488", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' service-ca-74545575db-d69qv_4594cad7-584e-444d-aa1e-5187ed4cda81 became leader 2025-12-08T17:44:24.328409570+00:00 stderr F I1208 17:44:24.328372 1 starter.go:62] Setting certificate lifetime to 17520h0m0s, refresh certificate at 1h0m0s 2025-12-08T17:44:24.328581565+00:00 stderr F I1208 17:44:24.328557 1 base_controller.go:76] Waiting for caches to sync for ServiceServingCertUpdateController 2025-12-08T17:44:24.328629836+00:00 stderr F I1208 17:44:24.328617 1 base_controller.go:76] Waiting for caches to sync for LegacyVulnerableConfigMapCABundleInjector 2025-12-08T17:44:24.328996916+00:00 stderr F I1208 17:44:24.328981 1 base_controller.go:76] Waiting for caches to sync for APIServiceCABundleInjector 2025-12-08T17:44:24.329273353+00:00 stderr F I1208 17:44:24.329140 1 base_controller.go:76] Waiting for caches to sync for ServiceServingCertController 2025-12-08T17:44:24.329299974+00:00 stderr F I1208 17:44:24.329191 1 base_controller.go:76] Waiting for caches to sync for ConfigMapCABundleInjector 2025-12-08T17:44:24.329320585+00:00 stderr F I1208 17:44:24.329199 1 base_controller.go:76] Waiting for caches to sync for CRDCABundleInjector 2025-12-08T17:44:24.329341105+00:00 stderr F I1208 17:44:24.329207 1 base_controller.go:76] Waiting for caches to sync for MutatingWebhookCABundleInjector 2025-12-08T17:44:24.329377836+00:00 stderr F I1208 17:44:24.329215 1 base_controller.go:76] Waiting for caches to sync for ValidatingWebhookCABundleInjector 2025-12-08T17:44:24.334763133+00:00 stderr F I1208 17:44:24.334729 1 reflector.go:376] Caches populated for *v1.MutatingWebhookConfiguration from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.335849343+00:00 stderr F I1208 17:44:24.335831 1 reflector.go:376] Caches populated for *v1.APIService from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.336157582+00:00 stderr F I1208 17:44:24.336141 1 reflector.go:376] Caches populated for *v1.ValidatingWebhookConfiguration from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.357151774+00:00 stderr F I1208 17:44:24.357108 1 reflector.go:376] Caches populated for *v1.Service from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.423528605+00:00 stderr F I1208 17:44:24.419183 1 shared_informer.go:320] Caches are synced for RequestHeaderAuthRequestController 2025-12-08T17:44:24.423528605+00:00 stderr F I1208 17:44:24.419337 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:44:24.423528605+00:00 stderr F I1208 17:44:24.419462 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:44:24.444935638+00:00 stderr F I1208 17:44:24.444826 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:24.444774554 +0000 UTC))" 2025-12-08T17:44:24.444935638+00:00 stderr F I1208 17:44:24.444901 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:24.444864686 +0000 UTC))" 2025-12-08T17:44:24.444935638+00:00 stderr F I1208 17:44:24.444929 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:24.444916518 +0000 UTC))" 2025-12-08T17:44:24.445013400+00:00 stderr F I1208 17:44:24.444970 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:24.444937358 +0000 UTC))" 2025-12-08T17:44:24.445013400+00:00 stderr F I1208 17:44:24.445002 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:24.444984649 +0000 UTC))" 2025-12-08T17:44:24.445050672+00:00 stderr F I1208 17:44:24.445025 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:24.44501168 +0000 UTC))" 2025-12-08T17:44:24.445075883+00:00 stderr F I1208 17:44:24.445055 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:24.445039522 +0000 UTC))" 2025-12-08T17:44:24.445357800+00:00 stderr F I1208 17:44:24.445329 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"service-ca-controller-signer@1765215861\" (2025-12-08 17:44:22 +0000 UTC to 2025-12-08 17:44:23 +0000 UTC (now=2025-12-08 17:44:24.445305609 +0000 UTC))" 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466760 1 base_controller.go:82] Caches are synced for MutatingWebhookCABundleInjector 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466795 1 base_controller.go:119] Starting #1 worker of MutatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466805 1 base_controller.go:119] Starting #2 worker of MutatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466809 1 base_controller.go:119] Starting #3 worker of MutatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466814 1 base_controller.go:119] Starting #4 worker of MutatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466817 1 base_controller.go:119] Starting #5 worker of MutatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466948 1 base_controller.go:82] Caches are synced for ValidatingWebhookCABundleInjector 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466952 1 base_controller.go:119] Starting #1 worker of ValidatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466956 1 base_controller.go:119] Starting #2 worker of ValidatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466959 1 base_controller.go:119] Starting #3 worker of ValidatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466962 1 base_controller.go:119] Starting #4 worker of ValidatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466965 1 base_controller.go:119] Starting #5 worker of ValidatingWebhookCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.466999 1 base_controller.go:82] Caches are synced for APIServiceCABundleInjector 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.467003 1 base_controller.go:119] Starting #1 worker of APIServiceCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.467006 1 base_controller.go:119] Starting #2 worker of APIServiceCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.467012 1 base_controller.go:119] Starting #3 worker of APIServiceCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.467016 1 base_controller.go:119] Starting #4 worker of APIServiceCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.467020 1 base_controller.go:119] Starting #5 worker of APIServiceCABundleInjector controller ... 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.468042 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:23 +0000 UTC to 2026-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:44:24.467974027 +0000 UTC))" 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.468202 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:24.468186243 +0000 UTC))" 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.468222 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:24.468213964 +0000 UTC))" 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.468234 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:24.468226694 +0000 UTC))" 2025-12-08T17:44:24.468291996+00:00 stderr F I1208 17:44:24.468268 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:24.468257325 +0000 UTC))" 2025-12-08T17:44:24.468364478+00:00 stderr F I1208 17:44:24.468283 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:24.468274765 +0000 UTC))" 2025-12-08T17:44:24.468364478+00:00 stderr F I1208 17:44:24.468302 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:24.468293246 +0000 UTC))" 2025-12-08T17:44:24.468364478+00:00 stderr F I1208 17:44:24.468313 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:24.468306086 +0000 UTC))" 2025-12-08T17:44:24.468364478+00:00 stderr F I1208 17:44:24.468335 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:24.468318817 +0000 UTC))" 2025-12-08T17:44:24.468523982+00:00 stderr F I1208 17:44:24.468503 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"service-ca-controller-signer@1765215861\" (2025-12-08 17:44:22 +0000 UTC to 2025-12-08 17:44:23 +0000 UTC (now=2025-12-08 17:44:24.468492411 +0000 UTC))" 2025-12-08T17:44:24.468683126+00:00 stderr F I1208 17:44:24.468663 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:23 +0000 UTC to 2026-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:44:24.468654016 +0000 UTC))" 2025-12-08T17:44:24.782054994+00:00 stderr F I1208 17:44:24.781667 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.812187466+00:00 stderr F I1208 17:44:24.812124 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.829052477+00:00 stderr F I1208 17:44:24.829000 1 base_controller.go:82] Caches are synced for LegacyVulnerableConfigMapCABundleInjector 2025-12-08T17:44:24.829052477+00:00 stderr F I1208 17:44:24.829030 1 base_controller.go:119] Starting #1 worker of LegacyVulnerableConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.829052477+00:00 stderr F I1208 17:44:24.829040 1 base_controller.go:119] Starting #2 worker of LegacyVulnerableConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.829052477+00:00 stderr F I1208 17:44:24.829044 1 base_controller.go:119] Starting #3 worker of LegacyVulnerableConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.829052477+00:00 stderr F I1208 17:44:24.829048 1 base_controller.go:119] Starting #4 worker of LegacyVulnerableConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.829084267+00:00 stderr F I1208 17:44:24.829051 1 base_controller.go:119] Starting #5 worker of LegacyVulnerableConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.829105148+00:00 stderr F I1208 17:44:24.829099 1 base_controller.go:82] Caches are synced for ServiceServingCertUpdateController 2025-12-08T17:44:24.829112398+00:00 stderr F I1208 17:44:24.829104 1 base_controller.go:119] Starting #1 worker of ServiceServingCertUpdateController controller ... 2025-12-08T17:44:24.829112398+00:00 stderr F I1208 17:44:24.829108 1 base_controller.go:119] Starting #2 worker of ServiceServingCertUpdateController controller ... 2025-12-08T17:44:24.829119668+00:00 stderr F I1208 17:44:24.829111 1 base_controller.go:119] Starting #3 worker of ServiceServingCertUpdateController controller ... 2025-12-08T17:44:24.829119668+00:00 stderr F I1208 17:44:24.829115 1 base_controller.go:119] Starting #4 worker of ServiceServingCertUpdateController controller ... 2025-12-08T17:44:24.829126649+00:00 stderr F I1208 17:44:24.829118 1 base_controller.go:119] Starting #5 worker of ServiceServingCertUpdateController controller ... 2025-12-08T17:44:24.829548470+00:00 stderr F I1208 17:44:24.829517 1 base_controller.go:82] Caches are synced for ServiceServingCertController 2025-12-08T17:44:24.829548470+00:00 stderr F I1208 17:44:24.829529 1 base_controller.go:119] Starting #1 worker of ServiceServingCertController controller ... 2025-12-08T17:44:24.829548470+00:00 stderr F I1208 17:44:24.829533 1 base_controller.go:119] Starting #2 worker of ServiceServingCertController controller ... 2025-12-08T17:44:24.829548470+00:00 stderr F I1208 17:44:24.829536 1 base_controller.go:119] Starting #3 worker of ServiceServingCertController controller ... 2025-12-08T17:44:24.829548470+00:00 stderr F I1208 17:44:24.829539 1 base_controller.go:119] Starting #4 worker of ServiceServingCertController controller ... 2025-12-08T17:44:24.829548470+00:00 stderr F I1208 17:44:24.829543 1 base_controller.go:119] Starting #5 worker of ServiceServingCertController controller ... 2025-12-08T17:44:24.830298610+00:00 stderr F I1208 17:44:24.830262 1 base_controller.go:82] Caches are synced for ConfigMapCABundleInjector 2025-12-08T17:44:24.830298610+00:00 stderr F I1208 17:44:24.830288 1 base_controller.go:119] Starting #1 worker of ConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.830298610+00:00 stderr F I1208 17:44:24.830295 1 base_controller.go:119] Starting #2 worker of ConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.830316521+00:00 stderr F I1208 17:44:24.830299 1 base_controller.go:119] Starting #3 worker of ConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.830316521+00:00 stderr F I1208 17:44:24.830302 1 base_controller.go:119] Starting #4 worker of ConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.830316521+00:00 stderr F I1208 17:44:24.830305 1 base_controller.go:119] Starting #5 worker of ConfigMapCABundleInjector controller ... 2025-12-08T17:44:24.845814574+00:00 stderr F I1208 17:44:24.842390 1 reflector.go:376] Caches populated for *v1.CustomResourceDefinition from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:24.932707314+00:00 stderr F I1208 17:44:24.932574 1 base_controller.go:82] Caches are synced for CRDCABundleInjector 2025-12-08T17:44:24.932707314+00:00 stderr F I1208 17:44:24.932600 1 base_controller.go:119] Starting #1 worker of CRDCABundleInjector controller ... 2025-12-08T17:44:24.932707314+00:00 stderr F I1208 17:44:24.932608 1 base_controller.go:119] Starting #2 worker of CRDCABundleInjector controller ... 2025-12-08T17:44:24.932707314+00:00 stderr F I1208 17:44:24.932612 1 base_controller.go:119] Starting #3 worker of CRDCABundleInjector controller ... 2025-12-08T17:44:24.932707314+00:00 stderr F I1208 17:44:24.932614 1 base_controller.go:119] Starting #4 worker of CRDCABundleInjector controller ... 2025-12-08T17:44:24.932707314+00:00 stderr F I1208 17:44:24.932617 1 base_controller.go:119] Starting #5 worker of CRDCABundleInjector controller ... 2025-12-08T17:44:24.932707314+00:00 stderr F W1208 17:44:24.932663 1 crd.go:61] customresourcedefinition consoleplugins.console.openshift.io is annotated for ca bundle injection but does not use strategy "Webhook" 2025-12-08T17:44:30.610059524+00:00 stderr F I1208 17:44:30.609414 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.609373965 +0000 UTC))" 2025-12-08T17:44:30.610059524+00:00 stderr F I1208 17:44:30.610045 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.610024883 +0000 UTC))" 2025-12-08T17:44:30.610087335+00:00 stderr F I1208 17:44:30.610066 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.610053864 +0000 UTC))" 2025-12-08T17:44:30.610094695+00:00 stderr F I1208 17:44:30.610085 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.610074314 +0000 UTC))" 2025-12-08T17:44:30.610112365+00:00 stderr F I1208 17:44:30.610104 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.610092075 +0000 UTC))" 2025-12-08T17:44:30.610142906+00:00 stderr F I1208 17:44:30.610122 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.610109725 +0000 UTC))" 2025-12-08T17:44:30.610165407+00:00 stderr F I1208 17:44:30.610147 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.610134086 +0000 UTC))" 2025-12-08T17:44:30.610189987+00:00 stderr F I1208 17:44:30.610174 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.610160557 +0000 UTC))" 2025-12-08T17:44:30.610219288+00:00 stderr F I1208 17:44:30.610197 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.610183707 +0000 UTC))" 2025-12-08T17:44:30.610250399+00:00 stderr F I1208 17:44:30.610228 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.610212628 +0000 UTC))" 2025-12-08T17:44:30.610489555+00:00 stderr F I1208 17:44:30.610471 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"service-ca-controller-signer@1765215861\" (2025-12-08 17:44:22 +0000 UTC to 2025-12-08 17:44:23 +0000 UTC (now=2025-12-08 17:44:30.610454484 +0000 UTC))" 2025-12-08T17:44:30.610718892+00:00 stderr F I1208 17:44:30.610675 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:23 +0000 UTC to 2026-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:44:30.61065932 +0000 UTC))" 2025-12-08T17:45:16.041350299+00:00 stderr F I1208 17:45:16.041141 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.041102682 +0000 UTC))" 2025-12-08T17:45:16.041350299+00:00 stderr F I1208 17:45:16.041329 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.041314868 +0000 UTC))" 2025-12-08T17:45:16.041350299+00:00 stderr F I1208 17:45:16.041345 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.041335298 +0000 UTC))" 2025-12-08T17:45:16.041389820+00:00 stderr F I1208 17:45:16.041361 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.041350339 +0000 UTC))" 2025-12-08T17:45:16.041389820+00:00 stderr F I1208 17:45:16.041377 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.041367179 +0000 UTC))" 2025-12-08T17:45:16.041476992+00:00 stderr F I1208 17:45:16.041407 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.04138219 +0000 UTC))" 2025-12-08T17:45:16.041476992+00:00 stderr F I1208 17:45:16.041434 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.041420961 +0000 UTC))" 2025-12-08T17:45:16.041476992+00:00 stderr F I1208 17:45:16.041451 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041440001 +0000 UTC))" 2025-12-08T17:45:16.041476992+00:00 stderr F I1208 17:45:16.041468 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041456882 +0000 UTC))" 2025-12-08T17:45:16.041501473+00:00 stderr F I1208 17:45:16.041485 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.041475872 +0000 UTC))" 2025-12-08T17:45:16.041508513+00:00 stderr F I1208 17:45:16.041502 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.041491273 +0000 UTC))" 2025-12-08T17:45:16.041729559+00:00 stderr F I1208 17:45:16.041699 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-947172199/tls.crt::/tmp/serving-cert-947172199/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"service-ca-controller-signer@1765215861\" (2025-12-08 17:44:22 +0000 UTC to 2025-12-08 17:44:23 +0000 UTC (now=2025-12-08 17:45:16.041685078 +0000 UTC))" 2025-12-08T17:45:16.041912634+00:00 stderr F I1208 17:45:16.041893 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215864\" (2025-12-08 16:44:23 +0000 UTC to 2026-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:45:16.041857703 +0000 UTC))" 2025-12-08T17:46:24.342099420+00:00 stderr F E1208 17:46:24.341440 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-service-ca/leases/service-ca-controller-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:24.345363197+00:00 stderr F E1208 17:46:24.342904 1 leaderelection.go:436] error retrieving resource lock openshift-service-ca/service-ca-controller-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-service-ca/leases/service-ca-controller-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:47:06.327270532+00:00 stderr F I1208 17:47:06.326610 1 reflector.go:376] Caches populated for *v1.MutatingWebhookConfiguration from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:14.088449788+00:00 stderr F I1208 17:47:14.087860 1 reflector.go:376] Caches populated for *v1.ValidatingWebhookConfiguration from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:14.558923838+00:00 stderr F I1208 17:47:14.558457 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:23.893842583+00:00 stderr F I1208 17:47:23.893244 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:28.014494657+00:00 stderr F I1208 17:47:28.013635 1 reflector.go:376] Caches populated for *v1.CustomResourceDefinition from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:28.014865909+00:00 stderr F W1208 17:47:28.014799 1 crd.go:61] customresourcedefinition consoleplugins.console.openshift.io is annotated for ca bundle injection but does not use strategy "Webhook" 2025-12-08T17:47:34.579718194+00:00 stderr F I1208 17:47:34.579263 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:39.790022309+00:00 stderr F I1208 17:47:39.789440 1 reflector.go:376] Caches populated for *v1.Service from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:50.963780138+00:00 stderr F I1208 17:47:50.963462 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:52.364366030+00:00 stderr F I1208 17:47:52.363436 1 reflector.go:376] Caches populated for *v1.APIService from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:48:05.052946979+00:00 stderr F I1208 17:48:05.052132 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:53:18.704465288+00:00 stderr F I1208 17:53:18.702064 1 configmap.go:109] updating configmap openstack/openshift-service-ca.crt with the service signing CA bundle 2025-12-08T17:53:19.355052268+00:00 stderr F I1208 17:53:19.354729 1 configmap.go:109] updating configmap openstack-operators/openshift-service-ca.crt with the service signing CA bundle 2025-12-08T17:54:28.311189759+00:00 stderr F I1208 17:54:28.310439 1 configmap.go:109] updating configmap service-telemetry/openshift-service-ca.crt with the service signing CA bundle 2025-12-08T17:54:53.158755402+00:00 stderr F I1208 17:54:53.158131 1 configmap.go:109] updating configmap cert-manager-operator/openshift-service-ca.crt with the service signing CA bundle 2025-12-08T17:55:12.183022626+00:00 stderr F E1208 17:55:12.182475 1 base_controller.go:277] "Unhandled Error" err="\"ServiceServingCertController\" controller failed to sync \"openshift-operators/observability-operator\", err: Operation cannot be fulfilled on services \"observability-operator\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:39.130589449+00:00 stderr F I1208 17:55:39.127980 1 configmap.go:109] updating configmap cert-manager/openshift-service-ca.crt with the service signing CA bundle 2025-12-08T17:57:29.166358308+00:00 stderr F I1208 17:57:29.165652 1 configmap.go:109] updating configmap service-telemetry/serving-certs-ca-bundle with the service signing CA bundle 2025-12-08T18:02:40.521081177+00:00 stderr F I1208 18:02:40.520469 1 configmap.go:109] updating configmap openshift-must-gather-gctth/openshift-service-ca.crt with the service signing CA bundle ././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authenticati0000755000175000017500000000000015115611513033131 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authenticati0000755000175000017500000000000015115611520033127 5ustar zuulzuul././@LongLink0000644000000000000000000000030300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authenticati0000644000175000017500000004520015115611513033134 0ustar zuulzuul2025-12-08T17:45:48.398547341+00:00 stdout F Copying system trust bundle 2025-12-08T17:45:48.439744838+00:00 stderr F I1208 17:45:48.439634 1 dynamic_serving_content.go:113] "Loaded a new cert/key pair" name="serving-cert::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.crt::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.key" 2025-12-08T17:45:48.439940773+00:00 stderr F I1208 17:45:48.439896 1 dynamic_serving_content.go:113] "Loaded a new cert/key pair" name="sni-serving-cert::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing" 2025-12-08T17:45:48.909436465+00:00 stderr F I1208 17:45:48.909356 1 audit.go:340] Using audit backend: ignoreErrors 2025-12-08T17:45:48.920414425+00:00 stderr F I1208 17:45:48.920344 1 requestheader_controller.go:244] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:45:48.927910930+00:00 stderr F I1208 17:45:48.927808 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:45:48.927910930+00:00 stderr F I1208 17:45:48.927841 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:45:48.927910930+00:00 stderr F I1208 17:45:48.927863 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:45:48.927910930+00:00 stderr F I1208 17:45:48.927893 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:45:48.933037914+00:00 stderr F I1208 17:45:48.932975 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:45:48.933081605+00:00 stderr F I1208 17:45:48.933046 1 genericapiserver.go:528] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:45:48.935713054+00:00 stderr F I1208 17:45:48.935676 1 requestheader_controller.go:169] Starting RequestHeaderAuthRequestController 2025-12-08T17:45:48.935806417+00:00 stderr F I1208 17:45:48.935754 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:45:48.935806417+00:00 stderr F I1208 17:45:48.935774 1 dynamic_serving_content.go:132] "Starting controller" name="serving-cert::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.crt::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.key" 2025-12-08T17:45:48.935825268+00:00 stderr F I1208 17:45:48.935805 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:45:48.935907370+00:00 stderr F I1208 17:45:48.935769 1 shared_informer.go:311] Waiting for caches to sync for RequestHeaderAuthRequestController 2025-12-08T17:45:48.935959182+00:00 stderr F I1208 17:45:48.935723 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:45:48.935959182+00:00 stderr F I1208 17:45:48.935950 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:45:48.936092036+00:00 stderr F I1208 17:45:48.936026 1 dynamic_serving_content.go:132] "Starting controller" name="sni-serving-cert::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing" 2025-12-08T17:45:48.936203619+00:00 stderr F I1208 17:45:48.936169 1 tlsconfig.go:200] "Loaded serving cert" certName="serving-cert::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.crt::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.key" certDetail="\"oauth-openshift.openshift-authentication.svc\" [serving] validServingFor=[oauth-openshift.openshift-authentication.svc,oauth-openshift.openshift-authentication.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:45:48.936121606 +0000 UTC))" 2025-12-08T17:45:48.936413915+00:00 stderr F I1208 17:45:48.936380 1 named_certificates.go:53] "Loaded SNI cert" index=1 certName="sni-serving-cert::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing" certDetail="\"*.apps-crc.testing\" [serving] validServingFor=[*.apps-crc.testing] issuer=\"ingress-operator@1762070846\" (2025-11-02 08:07:26 +0000 UTC to 2027-11-02 08:07:27 +0000 UTC (now=2025-12-08 17:45:48.936364564 +0000 UTC))" 2025-12-08T17:45:48.936578780+00:00 stderr F I1208 17:45:48.936531 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215948\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215948\" (2025-12-08 16:45:48 +0000 UTC to 2026-12-08 16:45:48 +0000 UTC (now=2025-12-08 17:45:48.936504808 +0000 UTC))" 2025-12-08T17:45:48.936578780+00:00 stderr F I1208 17:45:48.936554 1 secure_serving.go:213] Serving securely on [::]:6443 2025-12-08T17:45:48.936578780+00:00 stderr F I1208 17:45:48.936572 1 genericapiserver.go:681] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:45:48.936600821+00:00 stderr F I1208 17:45:48.936585 1 tlsconfig.go:240] "Starting DynamicServingCertificateController" 2025-12-08T17:45:48.937970332+00:00 stderr F I1208 17:45:48.937853 1 reflector.go:351] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 2025-12-08T17:45:48.938473947+00:00 stderr F I1208 17:45:48.938435 1 reflector.go:351] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 2025-12-08T17:45:48.938498128+00:00 stderr F I1208 17:45:48.938479 1 reflector.go:351] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 2025-12-08T17:45:48.943043114+00:00 stderr F I1208 17:45:48.942989 1 reflector.go:351] Caches populated for *v1.Group from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 2025-12-08T17:45:49.037822559+00:00 stderr F I1208 17:45:49.037723 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:45:49.037822559+00:00 stderr F I1208 17:45:49.037759 1 shared_informer.go:318] Caches are synced for RequestHeaderAuthRequestController 2025-12-08T17:45:49.038020035+00:00 stderr F I1208 17:45:49.037962 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:45:49.038264332+00:00 stderr F I1208 17:45:49.038202 1 tlsconfig.go:178] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:49.038164199 +0000 UTC))" 2025-12-08T17:45:49.038620984+00:00 stderr F I1208 17:45:49.038565 1 tlsconfig.go:200] "Loaded serving cert" certName="serving-cert::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.crt::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.key" certDetail="\"oauth-openshift.openshift-authentication.svc\" [serving] validServingFor=[oauth-openshift.openshift-authentication.svc,oauth-openshift.openshift-authentication.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:45:49.038535731 +0000 UTC))" 2025-12-08T17:45:49.038980474+00:00 stderr F I1208 17:45:49.038924 1 named_certificates.go:53] "Loaded SNI cert" index=1 certName="sni-serving-cert::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing" certDetail="\"*.apps-crc.testing\" [serving] validServingFor=[*.apps-crc.testing] issuer=\"ingress-operator@1762070846\" (2025-11-02 08:07:26 +0000 UTC to 2027-11-02 08:07:27 +0000 UTC (now=2025-12-08 17:45:49.038866941 +0000 UTC))" 2025-12-08T17:45:49.039274493+00:00 stderr F I1208 17:45:49.039217 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215948\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215948\" (2025-12-08 16:45:48 +0000 UTC to 2026-12-08 16:45:48 +0000 UTC (now=2025-12-08 17:45:49.039197811 +0000 UTC))" 2025-12-08T17:45:49.039581602+00:00 stderr F I1208 17:45:49.039523 1 tlsconfig.go:178] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:49.03950127 +0000 UTC))" 2025-12-08T17:45:49.039581602+00:00 stderr F I1208 17:45:49.039571 1 tlsconfig.go:178] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:49.039545691 +0000 UTC))" 2025-12-08T17:45:49.039648384+00:00 stderr F I1208 17:45:49.039597 1 tlsconfig.go:178] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:49.039581102 +0000 UTC))" 2025-12-08T17:45:49.039648384+00:00 stderr F I1208 17:45:49.039629 1 tlsconfig.go:178] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:49.039613263 +0000 UTC))" 2025-12-08T17:45:49.039722526+00:00 stderr F I1208 17:45:49.039670 1 tlsconfig.go:178] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:49.039637914 +0000 UTC))" 2025-12-08T17:45:49.039722526+00:00 stderr F I1208 17:45:49.039712 1 tlsconfig.go:178] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:49.039689025 +0000 UTC))" 2025-12-08T17:45:49.039773018+00:00 stderr F I1208 17:45:49.039739 1 tlsconfig.go:178] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:49.039720856 +0000 UTC))" 2025-12-08T17:45:49.039773018+00:00 stderr F I1208 17:45:49.039766 1 tlsconfig.go:178] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:49.039749037 +0000 UTC))" 2025-12-08T17:45:49.039833170+00:00 stderr F I1208 17:45:49.039799 1 tlsconfig.go:178] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:49.039776808 +0000 UTC))" 2025-12-08T17:45:49.039849200+00:00 stderr F I1208 17:45:49.039830 1 tlsconfig.go:178] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:49.039815389 +0000 UTC))" 2025-12-08T17:45:49.039868311+00:00 stderr F I1208 17:45:49.039856 1 tlsconfig.go:178] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:49.03983967 +0000 UTC))" 2025-12-08T17:45:49.040245342+00:00 stderr F I1208 17:45:49.040180 1 tlsconfig.go:200] "Loaded serving cert" certName="serving-cert::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.crt::/var/config/system/secrets/v4-0-config-system-serving-cert/tls.key" certDetail="\"oauth-openshift.openshift-authentication.svc\" [serving] validServingFor=[oauth-openshift.openshift-authentication.svc,oauth-openshift.openshift-authentication.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:45:49.040157219 +0000 UTC))" 2025-12-08T17:45:49.040566981+00:00 stderr F I1208 17:45:49.040511 1 named_certificates.go:53] "Loaded SNI cert" index=1 certName="sni-serving-cert::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing::/var/config/system/secrets/v4-0-config-system-router-certs/apps-crc.testing" certDetail="\"*.apps-crc.testing\" [serving] validServingFor=[*.apps-crc.testing] issuer=\"ingress-operator@1762070846\" (2025-11-02 08:07:26 +0000 UTC to 2027-11-02 08:07:27 +0000 UTC (now=2025-12-08 17:45:49.040490719 +0000 UTC))" 2025-12-08T17:45:49.040842149+00:00 stderr F I1208 17:45:49.040790 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215948\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215948\" (2025-12-08 16:45:48 +0000 UTC to 2026-12-08 16:45:48 +0000 UTC (now=2025-12-08 17:45:49.040772787 +0000 UTC))" 2025-12-08T17:46:07.973399984+00:00 stderr F E1208 17:46:07.973347 1 webhook.go:253] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:07.974052783+00:00 stderr F E1208 17:46:07.974010 1 errors.go:77] Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:07.994411494+00:00 stderr F E1208 17:46:07.994377 1 osinserver.go:91] internal error: Get "https://10.217.4.1:443/apis/oauth.openshift.io/v1/oauthclients/openshift-challenging-client": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:18.465375716+00:00 stderr F E1208 17:46:18.465307 1 webhook.go:253] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:18.465539571+00:00 stderr F E1208 17:46:18.465511 1 errors.go:77] Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:18.487200761+00:00 stderr F E1208 17:46:18.487137 1 osinserver.go:91] internal error: Get "https://10.217.4.1:443/apis/oauth.openshift.io/v1/oauthclients/openshift-challenging-client": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.048792164+00:00 stderr F E1208 17:46:29.048649 1 webhook.go:253] Failed to make webhook authorizer request: Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.048792164+00:00 stderr F E1208 17:46:29.048780 1 errors.go:77] Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.076432774+00:00 stderr F E1208 17:46:29.076370 1 osinserver.go:91] internal error: Get "https://10.217.4.1:443/apis/oauth.openshift.io/v1/oauthclients/openshift-challenging-client": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:47:06.796224854+00:00 stderr F I1208 17:47:06.796166 1 reflector.go:351] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 2025-12-08T17:47:55.667836338+00:00 stderr F I1208 17:47:55.667757 1 reflector.go:351] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 2025-12-08T17:47:57.579435893+00:00 stderr F I1208 17:47:57.579358 1 reflector.go:351] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.29.2/tools/cache/reflector.go:229 ././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samp0000755000175000017500000000000015115611520033064 5ustar zuulzuul././@LongLink0000644000000000000000000000040500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samp0000644000175000017500000000000015115611513033056 0ustar zuulzuul././@LongLink0000644000000000000000000000033200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samp0000755000175000017500000000000015115611520033064 5ustar zuulzuul././@LongLink0000644000000000000000000000033700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samp0000644000175000017500000024052015115611513033073 0ustar zuulzuul2025-12-08T17:44:22.085478460+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="Go Version: go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:44:22.085478460+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="Go OS/Arch: linux/amd64" 2025-12-08T17:44:22.152717394+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="template client &v1.TemplateV1Client{restClient:(*rest.RESTClient)(0xc00083e1e0)}" 2025-12-08T17:44:22.152750845+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="image client &v1.ImageV1Client{restClient:(*rest.RESTClient)(0xc00083e280)}" 2025-12-08T17:44:22.445298425+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="waiting for informer caches to sync" 2025-12-08T17:44:22.469143745+00:00 stderr F E1208 17:44:22.468889 2 reflector.go:200] "Failed to watch" err="failed to list *v1.Template: the server is currently unable to handle the request (get templates.template.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/template/informers/externalversions/factory.go:101" type="*v1.Template" 2025-12-08T17:44:22.469143745+00:00 stderr F E1208 17:44:22.468982 2 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/image/informers/externalversions/factory.go:101" type="*v1.ImageStream" 2025-12-08T17:44:23.970033325+00:00 stderr F E1208 17:44:23.969152 2 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/image/informers/externalversions/factory.go:101" type="*v1.ImageStream" 2025-12-08T17:44:23.989803134+00:00 stderr F E1208 17:44:23.989725 2 reflector.go:200] "Failed to watch" err="failed to list *v1.Template: the server is currently unable to handle the request (get templates.template.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/template/informers/externalversions/factory.go:101" type="*v1.Template" 2025-12-08T17:44:25.848942846+00:00 stderr F E1208 17:44:25.848124 2 reflector.go:200] "Failed to watch" err="failed to list *v1.Template: the server is currently unable to handle the request (get templates.template.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/template/informers/externalversions/factory.go:101" type="*v1.Template" 2025-12-08T17:44:26.135190534+00:00 stderr F E1208 17:44:26.134489 2 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/image/informers/externalversions/factory.go:101" type="*v1.ImageStream" 2025-12-08T17:44:29.702718644+00:00 stderr F E1208 17:44:29.702168 2 reflector.go:200] "Failed to watch" err="failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/image/informers/externalversions/factory.go:101" type="*v1.ImageStream" 2025-12-08T17:44:30.284506414+00:00 stderr F E1208 17:44:30.284441 2 reflector.go:200] "Failed to watch" err="failed to list *v1.Template: the server is currently unable to handle the request (get templates.template.openshift.io)" logger="UnhandledError" reflector="github.com/openshift/client-go/template/informers/externalversions/factory.go:101" type="*v1.Template" 2025-12-08T17:44:39.144936677+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="started events processor" 2025-12-08T17:44:39.144973888+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream dotnet already deleted so no worries on clearing tags" 2025-12-08T17:44:39.144973888+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet" 2025-12-08T17:44:39.147156029+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream fuse7-java-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.147156029+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java-openshift" 2025-12-08T17:44:39.149658839+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream dotnet-runtime already deleted so no worries on clearing tags" 2025-12-08T17:44:39.149658839+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet-runtime" 2025-12-08T17:44:39.151264873+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream fuse7-eap-openshift-java11 already deleted so no worries on clearing tags" 2025-12-08T17:44:39.151264873+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift-java11" 2025-12-08T17:44:39.153457144+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream fuse7-eap-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.153457144+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift" 2025-12-08T17:44:39.153969988+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="no global imagestream configuration will block imagestream creation using " 2025-12-08T17:44:39.154787190+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream fuse7-java11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.154787190+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java11-openshift" 2025-12-08T17:44:39.156367555+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream fuse7-karaf-openshift-jdk11 already deleted so no worries on clearing tags" 2025-12-08T17:44:39.156367555+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift-jdk11" 2025-12-08T17:44:39.156452887+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:44:39.157964450+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream golang already deleted so no worries on clearing tags" 2025-12-08T17:44:39.157964450+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream golang" 2025-12-08T17:44:39.179655272+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream fuse7-karaf-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.179655272+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift" 2025-12-08T17:44:39.184972781+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream httpd already deleted so no worries on clearing tags" 2025-12-08T17:44:39.184972781+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream httpd" 2025-12-08T17:44:39.208979409+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp3-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.209020530+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-openshift" 2025-12-08T17:44:39.211089287+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream java-runtime already deleted so no worries on clearing tags" 2025-12-08T17:44:39.211089287+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream java-runtime" 2025-12-08T17:44:39.212986020+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp4-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.213023361+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-openshift" 2025-12-08T17:44:39.215994604+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream java already deleted so no worries on clearing tags" 2025-12-08T17:44:39.215994604+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream java" 2025-12-08T17:44:39.217523297+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp4-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.217523297+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-runtime-openshift" 2025-12-08T17:44:39.225174920+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-datagrid73-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.225174920+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-datagrid73-openshift" 2025-12-08T17:44:39.226752763+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.226752763+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-openshift" 2025-12-08T17:44:39.228602975+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8 already deleted so no worries on clearing tags" 2025-12-08T17:44:39.228602975+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8" 2025-12-08T17:44:39.231736242+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp3-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.231736242+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-runtime-openshift" 2025-12-08T17:44:39.239273382+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.239273382+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-runtime-openshift" 2025-12-08T17:44:39.243325694+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk8-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.243325694+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-openshift" 2025-12-08T17:44:39.245580087+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk8-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:39.245580087+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-runtime-openshift" 2025-12-08T17:44:39.246946095+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="clearImageStreamTagError: stream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8 already deleted so no worries on clearing tags" 2025-12-08T17:44:39.246946095+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8" 2025-12-08T17:44:39.248283473+00:00 stderr F time="2025-12-08T17:44:39Z" level=warning msg="Image import for imagestream jenkins-agent-base tag scheduled-upgrade generation 3 failed with detailed message Internal error occurred: registry.redhat.io/ocp-tools-4/jenkins-agent-base-rhel8:v4.13.0: Get \"https://registry.redhat.io/v2/ocp-tools-4/jenkins-agent-base-rhel8/manifests/v4.13.0\": unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" 2025-12-08T17:44:39.707431458+00:00 stderr F time="2025-12-08T17:44:39Z" level=info msg="initiated an imagestreamimport retry for imagestream/tag jenkins-agent-base/scheduled-upgrade" 2025-12-08T17:44:39.718969859+00:00 stderr F time="2025-12-08T17:44:39Z" level=warning msg="Image import for imagestream jenkins tag scheduled-upgrade-redeploy generation 3 failed with detailed message Internal error occurred: registry.redhat.io/ocp-tools-4/jenkins-rhel8:v4.13.0: Get \"https://registry.redhat.io/v2/ocp-tools-4/jenkins-rhel8/manifests/v4.13.0\": unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. Further instructions can be found here: https://access.redhat.com/RegistryAuthentication" 2025-12-08T17:44:40.022957317+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="initiated an imagestreamimport retry for imagestream/tag jenkins/scheduled-upgrade-redeploy" 2025-12-08T17:44:40.026114955+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream nginx already deleted so no worries on clearing tags" 2025-12-08T17:44:40.026114955+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream nginx" 2025-12-08T17:44:40.027753441+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream openjdk-11-rhel7 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.027753441+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream openjdk-11-rhel7" 2025-12-08T17:44:40.029414668+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream mariadb already deleted so no worries on clearing tags" 2025-12-08T17:44:40.029414668+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream mariadb" 2025-12-08T17:44:40.030722554+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream nodejs already deleted so no worries on clearing tags" 2025-12-08T17:44:40.030722554+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream nodejs" 2025-12-08T17:44:40.032249807+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream postgresql13-for-sso75-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.032269907+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso75-openshift-rhel8" 2025-12-08T17:44:40.033760648+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream mysql already deleted so no worries on clearing tags" 2025-12-08T17:44:40.033760648+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream mysql" 2025-12-08T17:44:40.036369551+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream postgresql already deleted so no worries on clearing tags" 2025-12-08T17:44:40.036369551+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql" 2025-12-08T17:44:40.037939045+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream redhat-openjdk18-openshift already deleted so no worries on clearing tags" 2025-12-08T17:44:40.037939045+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream redhat-openjdk18-openshift" 2025-12-08T17:44:40.039469097+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream perl already deleted so no worries on clearing tags" 2025-12-08T17:44:40.039469097+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream perl" 2025-12-08T17:44:40.040974759+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream php already deleted so no worries on clearing tags" 2025-12-08T17:44:40.040974759+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream php" 2025-12-08T17:44:40.042399508+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream postgresql13-for-sso76-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.042399508+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso76-openshift-rhel8" 2025-12-08T17:44:40.043849919+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream sso76-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.043849919+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream sso76-openshift-rhel8" 2025-12-08T17:44:40.045526005+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream python already deleted so no worries on clearing tags" 2025-12-08T17:44:40.045526005+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream python" 2025-12-08T17:44:40.047148841+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream redis already deleted so no worries on clearing tags" 2025-12-08T17:44:40.047198312+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream redis" 2025-12-08T17:44:40.048981172+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ruby already deleted so no worries on clearing tags" 2025-12-08T17:44:40.048981172+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ruby" 2025-12-08T17:44:40.050583706+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream sso75-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.050626237+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream sso75-openshift-rhel8" 2025-12-08T17:44:40.052393717+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11-runtime already deleted so no worries on clearing tags" 2025-12-08T17:44:40.052393717+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11-runtime" 2025-12-08T17:44:40.055313468+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.055337229+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17" 2025-12-08T17:44:40.057212931+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.057232692+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11" 2025-12-08T17:44:40.059378311+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-21 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.059378311+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21" 2025-12-08T17:44:40.061578542+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearing error messages from configmap for stream jenkins-agent-base and tag scheduled-upgrade" 2025-12-08T17:44:40.064964916+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins-agent-base" 2025-12-08T17:44:40.068665290+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8-runtime already deleted so no worries on clearing tags" 2025-12-08T17:44:40.068665290+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8-runtime" 2025-12-08T17:44:40.070815639+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearing error messages from configmap for stream jenkins and tag scheduled-upgrade-redeploy" 2025-12-08T17:44:40.075358366+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins" 2025-12-08T17:44:40.078509233+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="CRDUPDATE importerrors false update" 2025-12-08T17:44:40.078577975+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17-runtime already deleted so no worries on clearing tags" 2025-12-08T17:44:40.078577975+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17-runtime" 2025-12-08T17:44:40.081247189+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8 already deleted so no worries on clearing tags" 2025-12-08T17:44:40.081247189+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8" 2025-12-08T17:44:40.082951557+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-21-runtime already deleted so no worries on clearing tags" 2025-12-08T17:44:40.082951557+00:00 stderr F time="2025-12-08T17:44:40Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21-runtime" 2025-12-08T17:44:43.109155291+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="no global imagestream configuration will block imagestream creation using " 2025-12-08T17:44:43.109155291+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:44:43.162075034+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="no global imagestream configuration will block imagestream creation using " 2025-12-08T17:44:43.162075034+00:00 stderr F time="2025-12-08T17:44:43Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:47:05.719347885+00:00 stderr F time="2025-12-08T17:47:05Z" level=info msg="no global imagestream configuration will block imagestream creation using " 2025-12-08T17:47:05.719347885+00:00 stderr F time="2025-12-08T17:47:05Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:47:10.522955809+00:00 stderr F time="2025-12-08T17:47:10Z" level=info msg="no global imagestream configuration will block imagestream creation using " 2025-12-08T17:47:10.522955809+00:00 stderr F time="2025-12-08T17:47:10Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:53:26.015266663+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="no global imagestream configuration will block imagestream creation using registry.redhat.io" 2025-12-08T17:53:26.015266663+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="SamplesRegistry changed from to registry.redhat.io" 2025-12-08T17:53:26.015266663+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="ENTERING UPSERT / STEADY STATE PATH ExistTrue true ImageInProgressFalse true VersionOK true ConfigChanged true ManagementStateChanged true" 2025-12-08T17:53:26.231372268+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream fuse7-karaf-openshift-jdk11" 2025-12-08T17:53:26.243621561+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ubi8-openjdk-17-runtime" 2025-12-08T17:53:26.257435647+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ruby" 2025-12-08T17:53:26.272919078+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream dotnet" 2025-12-08T17:53:26.296490559+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream fuse7-java-openshift" 2025-12-08T17:53:26.310401377+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream fuse7-java11-openshift" 2025-12-08T17:53:26.349907021+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ubi8-openjdk-21" 2025-12-08T17:53:26.394266617+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ubi8-openjdk-8" 2025-12-08T17:53:26.433123164+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ubi8-openjdk-8-runtime" 2025-12-08T17:53:26.474893760+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream mariadb" 2025-12-08T17:53:26.512737628+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream php" 2025-12-08T17:53:26.552036987+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream dotnet-runtime" 2025-12-08T17:53:26.591210482+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream jboss-eap74-openjdk8-openshift" 2025-12-08T17:53:26.633042579+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream golang" 2025-12-08T17:53:26.676001747+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream mysql" 2025-12-08T17:53:26.713737484+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream nginx" 2025-12-08T17:53:26.753986668+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream postgresql" 2025-12-08T17:53:26.791372695+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8" 2025-12-08T17:53:26.831763173+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8" 2025-12-08T17:53:26.872256324+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream jboss-datagrid73-openshift" 2025-12-08T17:53:26.911649425+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream jboss-eap-xp4-openjdk11-runtime-openshift" 2025-12-08T17:53:26.954932432+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ubi8-openjdk-11" 2025-12-08T17:53:26.992456532+00:00 stderr F time="2025-12-08T17:53:26Z" level=info msg="updated imagestream ubi8-openjdk-11-runtime" 2025-12-08T17:53:27.031736310+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jenkins-agent-base" 2025-12-08T17:53:27.072823777+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream postgresql13-for-sso76-openshift-rhel8" 2025-12-08T17:53:27.112668160+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream sso75-openshift-rhel8" 2025-12-08T17:53:27.151498836+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jboss-eap-xp4-openjdk11-openshift" 2025-12-08T17:53:27.195829572+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream fuse7-eap-openshift" 2025-12-08T17:53:27.240435435+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream redhat-openjdk18-openshift" 2025-12-08T17:53:27.272913938+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jenkins" 2025-12-08T17:53:27.312755081+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream perl" 2025-12-08T17:53:27.354471856+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream python" 2025-12-08T17:53:27.392281333+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream redis" 2025-12-08T17:53:27.432301661+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jboss-eap74-openjdk11-openshift" 2025-12-08T17:53:27.470609163+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream fuse7-eap-openshift-java11" 2025-12-08T17:53:27.519844502+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream fuse7-karaf-openshift" 2025-12-08T17:53:27.560538708+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream httpd" 2025-12-08T17:53:27.594749598+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream java" 2025-12-08T17:53:27.634772947+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream openjdk-11-rhel7" 2025-12-08T17:53:27.670775106+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jboss-eap74-openjdk8-runtime-openshift" 2025-12-08T17:53:27.713602060+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream ubi8-openjdk-17" 2025-12-08T17:53:27.751324816+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream ubi8-openjdk-21-runtime" 2025-12-08T17:53:27.792829764+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream sso76-openshift-rhel8" 2025-12-08T17:53:27.833757858+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream java-runtime" 2025-12-08T17:53:27.876408417+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream nodejs" 2025-12-08T17:53:27.915066988+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream postgresql13-for-sso75-openshift-rhel8" 2025-12-08T17:53:27.955849857+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jboss-eap-xp3-openjdk11-openshift" 2025-12-08T17:53:27.992005320+00:00 stderr F time="2025-12-08T17:53:27Z" level=info msg="updated imagestream jboss-eap-xp3-openjdk11-runtime-openshift" 2025-12-08T17:53:28.034780434+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="updated imagestream jboss-eap74-openjdk11-runtime-openshift" 2025-12-08T17:53:28.034780434+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="CRDUPDATE samples upserted; set clusteroperator ready, steady state" 2025-12-08T17:53:28.034833955+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift-jdk11" 2025-12-08T17:53:28.048124756+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17-runtime" 2025-12-08T17:53:28.069328693+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java11-openshift" 2025-12-08T17:53:28.088585187+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet" 2025-12-08T17:53:28.108552909+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java-openshift" 2025-12-08T17:53:28.128307136+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ruby" 2025-12-08T17:53:28.146960094+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8" 2025-12-08T17:53:28.167265536+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21" 2025-12-08T17:53:28.186712105+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream mariadb" 2025-12-08T17:53:28.207326445+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8-runtime" 2025-12-08T17:53:28.227692028+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet-runtime" 2025-12-08T17:53:28.247467776+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream php" 2025-12-08T17:53:28.268901769+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-openshift" 2025-12-08T17:53:28.287836724+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream golang" 2025-12-08T17:53:28.306772139+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8" 2025-12-08T17:53:28.325855457+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-datagrid73-openshift" 2025-12-08T17:53:28.347208738+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream nginx" 2025-12-08T17:53:28.367277984+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream mysql" 2025-12-08T17:53:28.386509167+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql" 2025-12-08T17:53:28.407770215+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8" 2025-12-08T17:53:28.428225601+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11" 2025-12-08T17:53:28.448313117+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream sso75-openshift-rhel8" 2025-12-08T17:53:28.470154041+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-runtime-openshift" 2025-12-08T17:53:28.489291222+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-openshift" 2025-12-08T17:53:28.507776214+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins-agent-base" 2025-12-08T17:53:28.526543744+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11-runtime" 2025-12-08T17:53:28.548007928+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso76-openshift-rhel8" 2025-12-08T17:53:28.566891091+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream redhat-openjdk18-openshift" 2025-12-08T17:53:28.587597955+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift" 2025-12-08T17:53:28.606675944+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-openshift" 2025-12-08T17:53:28.627537040+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream perl" 2025-12-08T17:53:28.649396765+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift" 2025-12-08T17:53:28.668064213+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins" 2025-12-08T17:53:28.690833762+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream java" 2025-12-08T17:53:28.707217268+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift-java11" 2025-12-08T17:53:28.728037423+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream python" 2025-12-08T17:53:28.747522843+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream redis" 2025-12-08T17:53:28.772522833+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21-runtime" 2025-12-08T17:53:28.788803086+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17" 2025-12-08T17:53:28.808198123+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream java-runtime" 2025-12-08T17:53:28.826302345+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream openjdk-11-rhel7" 2025-12-08T17:53:28.847926763+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-runtime-openshift" 2025-12-08T17:53:28.867907846+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream httpd" 2025-12-08T17:53:28.887365326+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream sso76-openshift-rhel8" 2025-12-08T17:53:28.906304681+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso75-openshift-rhel8" 2025-12-08T17:53:28.927585579+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-runtime-openshift" 2025-12-08T17:53:28.948134758+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="clearImageStreamTagError: stream dotnet already deleted so no worries on clearing tags" 2025-12-08T17:53:28.948218920+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet" 2025-12-08T17:53:28.966290471+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream nodejs" 2025-12-08T17:53:28.989610755+00:00 stderr F time="2025-12-08T17:53:28Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-openshift" 2025-12-08T17:53:29.006999499+00:00 stderr F time="2025-12-08T17:53:29Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-runtime-openshift" 2025-12-08T17:53:29.029853190+00:00 stderr F time="2025-12-08T17:53:29Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17-runtime already deleted so no worries on clearing tags" 2025-12-08T17:53:29.029853190+00:00 stderr F time="2025-12-08T17:53:29Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17-runtime" 2025-12-08T17:53:29.045407252+00:00 stderr F time="2025-12-08T17:53:29Z" level=info msg="clearImageStreamTagError: stream java-runtime already deleted so no worries on clearing tags" 2025-12-08T17:53:29.045465494+00:00 stderr F time="2025-12-08T17:53:29Z" level=info msg="There are no more errors or image imports in flight for imagestream java-runtime" 2025-12-08T17:53:30.229725134+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8-runtime already deleted so no worries on clearing tags" 2025-12-08T17:53:30.229725134+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8-runtime" 2025-12-08T17:53:30.242667437+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8 already deleted so no worries on clearing tags" 2025-12-08T17:53:30.242667437+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8" 2025-12-08T17:53:30.542340695+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17 already deleted so no worries on clearing tags" 2025-12-08T17:53:30.542340695+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17" 2025-12-08T17:53:30.727436768+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11 already deleted so no worries on clearing tags" 2025-12-08T17:53:30.727436768+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11" 2025-12-08T17:53:30.900027300+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="clearImageStreamTagError: stream openjdk-11-rhel7 already deleted so no worries on clearing tags" 2025-12-08T17:53:30.900027300+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="There are no more errors or image imports in flight for imagestream openjdk-11-rhel7" 2025-12-08T17:53:30.929991355+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11-runtime already deleted so no worries on clearing tags" 2025-12-08T17:53:30.929991355+00:00 stderr F time="2025-12-08T17:53:30Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11-runtime" 2025-12-08T17:53:31.060302868+00:00 stderr F time="2025-12-08T17:53:31Z" level=info msg="no global imagestream configuration will block imagestream creation using registry.redhat.io" 2025-12-08T17:53:31.060302868+00:00 stderr F time="2025-12-08T17:53:31Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:53:32.580609475+00:00 stderr F time="2025-12-08T17:53:32Z" level=info msg="clearImageStreamTagError: stream redhat-openjdk18-openshift already deleted so no worries on clearing tags" 2025-12-08T17:53:32.580609475+00:00 stderr F time="2025-12-08T17:53:32Z" level=info msg="There are no more errors or image imports in flight for imagestream redhat-openjdk18-openshift" 2025-12-08T17:54:39.135703267+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream java-runtime already deleted so no worries on clearing tags" 2025-12-08T17:54:39.135703267+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream java-runtime" 2025-12-08T17:54:39.139529411+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp4-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.139529411+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-openshift" 2025-12-08T17:54:39.143304502+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream mariadb already deleted so no worries on clearing tags" 2025-12-08T17:54:39.143304502+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream mariadb" 2025-12-08T17:54:39.147071993+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-21 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.147071993+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21" 2025-12-08T17:54:39.150911877+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.150911877+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8" 2025-12-08T17:54:39.156388344+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream php already deleted so no worries on clearing tags" 2025-12-08T17:54:39.156388344+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream php" 2025-12-08T17:54:39.161494621+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream httpd already deleted so no worries on clearing tags" 2025-12-08T17:54:39.161494621+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream httpd" 2025-12-08T17:54:39.164669417+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream fuse7-eap-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.164669417+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift" 2025-12-08T17:54:39.167546515+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ruby already deleted so no worries on clearing tags" 2025-12-08T17:54:39.167634317+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ruby" 2025-12-08T17:54:39.170000480+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-21-runtime already deleted so no worries on clearing tags" 2025-12-08T17:54:39.170000480+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21-runtime" 2025-12-08T17:54:39.172923838+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream sso76-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.173001160+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream sso76-openshift-rhel8" 2025-12-08T17:54:39.176178647+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream fuse7-eap-openshift-java11 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.176178647+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift-java11" 2025-12-08T17:54:39.180341788+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream golang already deleted so no worries on clearing tags" 2025-12-08T17:54:39.180523953+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream golang" 2025-12-08T17:54:39.184321486+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17-runtime already deleted so no worries on clearing tags" 2025-12-08T17:54:39.184321486+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17-runtime" 2025-12-08T17:54:39.187034188+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.187133552+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11" 2025-12-08T17:54:39.189938127+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream mysql already deleted so no worries on clearing tags" 2025-12-08T17:54:39.189938127+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream mysql" 2025-12-08T17:54:39.192831695+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp3-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.193007050+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-openshift" 2025-12-08T17:54:39.196164014+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream python already deleted so no worries on clearing tags" 2025-12-08T17:54:39.196164014+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream python" 2025-12-08T17:54:39.199941026+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jenkins already deleted so no worries on clearing tags" 2025-12-08T17:54:39.199941026+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins" 2025-12-08T17:54:39.203462320+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream dotnet already deleted so no worries on clearing tags" 2025-12-08T17:54:39.203462320+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet" 2025-12-08T17:54:39.207853809+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream perl already deleted so no worries on clearing tags" 2025-12-08T17:54:39.208046444+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream perl" 2025-12-08T17:54:39.212068622+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.212068622+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17" 2025-12-08T17:54:39.214119797+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream postgresql13-for-sso76-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.214119797+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso76-openshift-rhel8" 2025-12-08T17:54:39.216247955+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream openjdk-11-rhel7 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.216247955+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream openjdk-11-rhel7" 2025-12-08T17:54:39.216865191+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream java already deleted so no worries on clearing tags" 2025-12-08T17:54:39.216865191+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream java" 2025-12-08T17:54:39.218874146+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream nginx already deleted so no worries on clearing tags" 2025-12-08T17:54:39.218982918+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream nginx" 2025-12-08T17:54:39.220903240+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream fuse7-karaf-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.220903240+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift" 2025-12-08T17:54:39.224574579+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.224676212+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-openshift" 2025-12-08T17:54:39.228270868+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream fuse7-java11-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.228270868+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java11-openshift" 2025-12-08T17:54:39.231135585+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream dotnet-runtime already deleted so no worries on clearing tags" 2025-12-08T17:54:39.231135585+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet-runtime" 2025-12-08T17:54:39.233138209+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream sso75-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.233138209+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream sso75-openshift-rhel8" 2025-12-08T17:54:39.236241863+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp4-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.236241863+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-runtime-openshift" 2025-12-08T17:54:39.238211466+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream postgresql already deleted so no worries on clearing tags" 2025-12-08T17:54:39.238211466+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql" 2025-12-08T17:54:39.240561779+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream redhat-openjdk18-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.240561779+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream redhat-openjdk18-openshift" 2025-12-08T17:54:39.242439769+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream nodejs already deleted so no worries on clearing tags" 2025-12-08T17:54:39.242531262+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream nodejs" 2025-12-08T17:54:39.245462561+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8-runtime already deleted so no worries on clearing tags" 2025-12-08T17:54:39.245462561+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8-runtime" 2025-12-08T17:54:39.247562797+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk8-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.247562797+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-runtime-openshift" 2025-12-08T17:54:39.250333732+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11-runtime already deleted so no worries on clearing tags" 2025-12-08T17:54:39.250333732+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11-runtime" 2025-12-08T17:54:39.252430109+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.252430109+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8" 2025-12-08T17:54:39.254441993+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-datagrid73-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.254441993+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-datagrid73-openshift" 2025-12-08T17:54:39.256931119+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream postgresql13-for-sso75-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.256931119+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso75-openshift-rhel8" 2025-12-08T17:54:39.259021786+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk8-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.259021786+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-openshift" 2025-12-08T17:54:39.261248326+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream fuse7-java-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.261341538+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java-openshift" 2025-12-08T17:54:39.263098136+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.263098136+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-runtime-openshift" 2025-12-08T17:54:39.264939375+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jenkins-agent-base already deleted so no worries on clearing tags" 2025-12-08T17:54:39.264939375+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins-agent-base" 2025-12-08T17:54:39.266744994+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp3-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T17:54:39.266800115+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-runtime-openshift" 2025-12-08T17:54:39.268582094+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.268582094+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8" 2025-12-08T17:54:39.270742301+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream fuse7-karaf-openshift-jdk11 already deleted so no worries on clearing tags" 2025-12-08T17:54:39.270800682+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift-jdk11" 2025-12-08T17:54:39.272641552+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="clearImageStreamTagError: stream redis already deleted so no worries on clearing tags" 2025-12-08T17:54:39.272641552+00:00 stderr F time="2025-12-08T17:54:39Z" level=info msg="There are no more errors or image imports in flight for imagestream redis" 2025-12-08T17:57:05.723401868+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="no global imagestream configuration will block imagestream creation using registry.redhat.io" 2025-12-08T17:57:05.723401868+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T17:57:10.521137064+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg="no global imagestream configuration will block imagestream creation using registry.redhat.io" 2025-12-08T17:57:10.521194995+00:00 stderr F time="2025-12-08T17:57:10Z" level=info msg="At steady state: config the same and exists is true, in progress false, and version correct" 2025-12-08T18:04:39.135585133+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream dotnet already deleted so no worries on clearing tags" 2025-12-08T18:04:39.135685806+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet" 2025-12-08T18:04:39.140604507+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream fuse7-java11-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.140604507+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java11-openshift" 2025-12-08T18:04:39.143189905+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream openjdk-11-rhel7 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.143255247+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream openjdk-11-rhel7" 2025-12-08T18:04:39.146496613+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream fuse7-karaf-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.146496613+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift" 2025-12-08T18:04:39.148645280+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream java already deleted so no worries on clearing tags" 2025-12-08T18:04:39.148645280+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream java" 2025-12-08T18:04:39.151648799+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream dotnet-runtime already deleted so no worries on clearing tags" 2025-12-08T18:04:39.151648799+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream dotnet-runtime" 2025-12-08T18:04:39.153923660+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream nginx already deleted so no worries on clearing tags" 2025-12-08T18:04:39.153923660+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream nginx" 2025-12-08T18:04:39.159600401+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.159600401+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk11-tomcat9-openshift-ubi8" 2025-12-08T18:04:39.162252371+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream nodejs already deleted so no worries on clearing tags" 2025-12-08T18:04:39.162252371+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream nodejs" 2025-12-08T18:04:39.165013604+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.165013604+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-openshift" 2025-12-08T18:04:39.167594342+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp4-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.167594342+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-runtime-openshift" 2025-12-08T18:04:39.169832021+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream sso75-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.169832021+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream sso75-openshift-rhel8" 2025-12-08T18:04:39.172238685+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk8-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.172238685+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-runtime-openshift" 2025-12-08T18:04:39.174245129+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream redhat-openjdk18-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.174304120+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream redhat-openjdk18-openshift" 2025-12-08T18:04:39.176322444+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream postgresql already deleted so no worries on clearing tags" 2025-12-08T18:04:39.176322444+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql" 2025-12-08T18:04:39.178474040+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream postgresql13-for-sso75-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.178474040+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso75-openshift-rhel8" 2025-12-08T18:04:39.180542425+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11-runtime already deleted so no worries on clearing tags" 2025-12-08T18:04:39.180542425+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11-runtime" 2025-12-08T18:04:39.183541015+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8-runtime already deleted so no worries on clearing tags" 2025-12-08T18:04:39.183541015+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8-runtime" 2025-12-08T18:04:39.185625340+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream fuse7-java-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.185625340+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-java-openshift" 2025-12-08T18:04:39.188803805+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.188863106+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk11-runtime-openshift" 2025-12-08T18:04:39.191004363+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jenkins-agent-base already deleted so no worries on clearing tags" 2025-12-08T18:04:39.191004363+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins-agent-base" 2025-12-08T18:04:39.193318285+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-datagrid73-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.193364826+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-datagrid73-openshift" 2025-12-08T18:04:39.195856122+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp3-openjdk11-runtime-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.195856122+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-runtime-openshift" 2025-12-08T18:04:39.198653776+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream fuse7-eap-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.198711178+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift" 2025-12-08T18:04:39.201005948+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream fuse7-karaf-openshift-jdk11 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.201063489+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-karaf-openshift-jdk11" 2025-12-08T18:04:39.202951420+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap74-openjdk8-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.202951420+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap74-openjdk8-openshift" 2025-12-08T18:04:39.205262931+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream redis already deleted so no worries on clearing tags" 2025-12-08T18:04:39.205262931+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream redis" 2025-12-08T18:04:39.207643715+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.207643715+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-8" 2025-12-08T18:04:39.209634487+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.209634487+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-webserver57-openjdk8-tomcat9-openshift-ubi8" 2025-12-08T18:04:39.211684921+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ruby already deleted so no worries on clearing tags" 2025-12-08T18:04:39.211684921+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ruby" 2025-12-08T18:04:39.213949642+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp4-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.213949642+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp4-openjdk11-openshift" 2025-12-08T18:04:39.216468578+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream httpd already deleted so no worries on clearing tags" 2025-12-08T18:04:39.216468578+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream httpd" 2025-12-08T18:04:39.218502253+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream java-runtime already deleted so no worries on clearing tags" 2025-12-08T18:04:39.218502253+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream java-runtime" 2025-12-08T18:04:39.224180503+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream mariadb already deleted so no worries on clearing tags" 2025-12-08T18:04:39.224180503+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream mariadb" 2025-12-08T18:04:39.227108901+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-21 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.227108901+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21" 2025-12-08T18:04:39.229288918+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream golang already deleted so no worries on clearing tags" 2025-12-08T18:04:39.229288918+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream golang" 2025-12-08T18:04:39.231495597+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream php already deleted so no worries on clearing tags" 2025-12-08T18:04:39.231495597+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream php" 2025-12-08T18:04:39.233479739+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-21-runtime already deleted so no worries on clearing tags" 2025-12-08T18:04:39.233479739+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-21-runtime" 2025-12-08T18:04:39.235353500+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream sso76-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.235353500+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream sso76-openshift-rhel8" 2025-12-08T18:04:39.237328011+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream mysql already deleted so no worries on clearing tags" 2025-12-08T18:04:39.237408533+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream mysql" 2025-12-08T18:04:39.239948701+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17-runtime already deleted so no worries on clearing tags" 2025-12-08T18:04:39.239948701+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17-runtime" 2025-12-08T18:04:39.243109184+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-11 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.243109184+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-11" 2025-12-08T18:04:39.245509858+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jboss-eap-xp3-openjdk11-openshift already deleted so no worries on clearing tags" 2025-12-08T18:04:39.245509858+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jboss-eap-xp3-openjdk11-openshift" 2025-12-08T18:04:39.247211694+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream fuse7-eap-openshift-java11 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.247211694+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream fuse7-eap-openshift-java11" 2025-12-08T18:04:39.251506617+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream perl already deleted so no worries on clearing tags" 2025-12-08T18:04:39.251746373+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream perl" 2025-12-08T18:04:39.253518641+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream jenkins already deleted so no worries on clearing tags" 2025-12-08T18:04:39.253518641+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream jenkins" 2025-12-08T18:04:39.255223476+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream ubi8-openjdk-17 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.255223476+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream ubi8-openjdk-17" 2025-12-08T18:04:39.256674104+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream postgresql13-for-sso76-openshift-rhel8 already deleted so no worries on clearing tags" 2025-12-08T18:04:39.256674104+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream postgresql13-for-sso76-openshift-rhel8" 2025-12-08T18:04:39.258573605+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="clearImageStreamTagError: stream python already deleted so no worries on clearing tags" 2025-12-08T18:04:39.258573605+00:00 stderr F time="2025-12-08T18:04:39Z" level=info msg="There are no more errors or image imports in flight for imagestream python" ././@LongLink0000644000000000000000000000034000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samp0000755000175000017500000000000015115611520033064 5ustar zuulzuul././@LongLink0000644000000000000000000000034500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samp0000644000175000017500000000123015115611513033064 0ustar zuulzuul2025-12-08T17:44:22.784503138+00:00 stderr F I1208 17:44:22.784312 12 cmd.go:331] Waiting for process with process name "cluster-samples-operator" ... 2025-12-08T17:44:22.790968594+00:00 stderr F I1208 17:44:22.790722 12 cmd.go:341] Watching for changes in: ([]string) (len=2 cap=2) { 2025-12-08T17:44:22.790968594+00:00 stderr F (string) (len=32) "/proc/2/root/etc/secrets/tls.crt", 2025-12-08T17:44:22.790968594+00:00 stderr F (string) (len=32) "/proc/2/root/etc/secrets/tls.key" 2025-12-08T17:44:22.790968594+00:00 stderr F } 2025-12-08T17:44:22.802819067+00:00 stderr F I1208 17:44:22.802493 12 observer_polling.go:159] Starting file observer ././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-vers0000755000175000017500000000000015115611513033105 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-vers0000755000175000017500000000000015115611546033113 5ustar zuulzuul././@LongLink0000644000000000000000000000033000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator/0.log.gzhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-vers0000644000175000017500000076113215115611521033120 0ustar zuulzuul‹Q7i0.logä][sÛÆ’~ß_â‹“Ý€žûEU~ð±“Ôæâ²syr©@p(bM<(EIå¿o^D‘ˆÄ…ŒìT,MLÏ×=ßtÏ îcâ#õ –WŒ]a4Z)™PÿƒÐB^–Mšzï¼ï1AÊÛ–#Zxå bAšo’+‚?yofË,7éo&Í¢$þyaÒ OR "Ÿ@«ŒCÃÞ\$Rà ËÌ|4»fyj‚ùÐÌ´_]û/²WUɱT;U•L“µª&¾½ R«+–ä“7xg‚|™ï&È76“`9Ëmr3ð&åµWƒßƒ<œþeù›Ydâ|w F33~5 f™9U=Žäiê•Ze¯g³äîÍ¿~þд~˜4¢ßûÔLLú)ÕšÔQPœ¦à÷ñ$Iç&ý`²d™†¦òÝ:jJB‘Ri‡šVѵšÅÌ„0.¬¦T 6A85™·HËh7ö@%ï¿oñ°êÁ·¶“4™{7Q>]ކa2™,LœM£Iþ2,0öo’—aO¢›—QÕ§ì¥ù†cÌnË^e/'mù¾‰ð½ÝRˆc CÔÕ-+GØ"`þ[ Ï£:4´Šz•E÷òi”yo~ûùÊô€‡”!”3Šj(£ÈeÐÒ“e<ö¢Ø K®úÆ[fQ|ãEy¶r /3¹7xA^Þg¹8U5‚öá¤Ù'‹ÀªÙ¬Àe…Q ÊÊ ·t¹òþ›,J͸rÕ«JtŽ“»xõiá·…»/³ff‚̼NÃi”ƒïAkÕõð6yS8ͨzýµ¿tØòÂØÙaÉØfXBÓ g¬ÏC¥çQ~™šê’·ºî} 00"yÁO˜*°ÔVbCQæ%°\~ö²ÏænXÜåórdü`e&ô¼1€GsãåÉ & 84¸šTÙÐû=I³Ü‹“Ø¿IƒÐL–3¯Æ Âÿ,£,*”a2gt-þeÑ¿ˆÈþîÇ KÌ9ÑÜ…–H£cøá}šüqß!3`%µFÂÉ GÅ1z@tÙ3«îži¡ŽêY¡æÁ¢ìÔgpÇ(ÙêÆFÿmu:¨.LŠfQ~ï:ã’…'Ad§È¯òû…yõâ×Í7wÒ0}ÁÜù,dIüêÅ&áÍM–AG_½xep3#ÀÄ’ÓháÝFÁ*€XÏØÉêëÞ"5·6ê]õ¦’÷å䑚9ˆm‰Ž ØØÎäQ¼„F†/¾>ˆ#Å”7ŽŒC8b±ÉÒ¼p™¦ æª¯Ê8â`ëk*k´ÎH+V\ ÿmóÁ8†9þ \Oæ’en>ÞÇáõ»âNo!ªÈ·¯eWWoÁ‚³$gu¿²å+Ô’ …eë^6M–³1D9XsÕå1üœß{0J6a]våíWz«¿WÛ´f ¨Ì4»‡Ï^‡¸Æ›BddšZ—úÏÒ@˜òÕ¸ÐÞK ŇÅ?ÃõèZY)ì‡EÓ_?ÍЇ ëBññªí•êƈ¢AšKêðR£xODÎúäý€?ÚÁÃ%Ю‚‹Â7‰U RšlxrëR!<† õü¹™/ M¶"èL×sI&[Ô\ú¦_yÙËêßáð¦’E™ ÈQɾ¤©:É–aƒ"d°t¥è¸®Š‡ôSqEÜúA@¶ÖoÜ[/)xŽAnõübñ[.Æ0KUWˉvðrp¨e-wN¡¶eº Ù º/g{0Ý¿ío_Áìþóèÿ«véÃ@ò×ÿFñøjðƒEfð÷Sܲ€Üâjp¦Jìj°ùý×ïß^ äh‚M|AñÈgˆúJ í 9¡˜3èQ²¯ß¿Ja’¤G‘å «@ãƒÐÎÊÇÕ€ A(\x™Ùø}O¯ƒ¿¿†Ñ Ì|å½ø "’`¶baøà‡Â/þ]ùÅ /LÃ뉚h=¡È1ø …?À!÷ùØpdÆ‚H5Š ¡s•_í·ÁŠs-žJÀpzŸ'cz;ñ- -3bnò¼#ð¾*g¿¯«ÜóÖTž’.‹dòÊó±`܆À`L¸Õ¤„oÜfn› ³BM sãåï'¦¹7‹Àbë¿6Xüî—_Þ´œ„†Åß+ é~](‚ UHW<rxkªÞ† ëOžù\±>ï`jŒƒ®á¿/õWsâ5«kÕ¥Õ•Ì/矒j†éxxÌgï[àÕ…xÙ(=.&ÊÊE^1h¼A5íT¾úÖL¢¸ˆåñÚ®~z0¥ à oËÁªì\dië›bÉ¡rŽßxY­Òø™É_íôå€0B9rÛA3ÙÞšÛ÷ŸFæîŸa‹=ý9`à&¨“Ú)'[‹Ê-Ù£ëñ Ëô­óQÀ¸â˜¹'TÆ¥¤í£Þ½÷·|mŸç0ÀdîDäoý_L8ýÀ¿¯ðÇ”c"Ýøc˜šÆŸ\‡ÛKû{}ÿ!ðûùyüÞÝÝ]Ü;sm,8Åîl äš§srCzŸ¤ŸÉ, ïw`¬®ÙÕœçý©¼ò¾øÖ`+𾬮¶0ý±‰ïåÒÂ8Nò¸Ù~$×f3×¢ÈKF•=ö뽘ög¹MS}MpRC¸Í”sˆK9oâ.¦´Va®=äRQíÚ~9)IPw2µŠuý©KQ…ˆv§´ŠÊ&ýz™O dÜaÉjþw÷‹bÉh±u*yØt_|¢U\¸ÓF£¢ Ü«÷„wÛŽ˜á^p33›¬6¤üit3õƒÛ š­6–1 ~eͰTÊ=(M%îåX¼u¯Måï1‘͘èÄË»¡öÖ PŸßµ”r3ì´€J·f0‚,ðw¯ŸÅã!BXHJù—Wh 9®YGÐ?'þ±Ð2M0sì¦r ÑŽLð¼(È‚Ëg@ðN#p`ªÆŒ0 ÂÏËEû©iÕNo$#EœH'¸‚ Õ8¸]ÐI{×§I0X;A–˜êÆAî„0ÚCù D¡¹Ú­rL5ÇÖÕ:\4~.Öù"Óٶ̓Fû¢,¨MïÞѸ]Ø;Ü·iúÚäB˜b 9NŸr„µ —7mã_Ÿv(‘T)ÇîY!'LR+æ À“¤ð½®8gÓb_„è\rìFEƒ‰ÑcÀ;d›VA¯M51Fjd¤QÙ¦§wÉ3­"_Ÿd$A˜aîŒÓ%Ö¼¹Ø¦Û æ,¢EÅ;VD7˜mvÆœMü¢„TÀ*Îðäˆh ðNå|"¥ˆTÊ2*…jòŽX¤7úÐH+%)sA rM{s‡¼Ñ+ap‚"æ$ M8åMCÜSôJ˜H)˜f®iä„lnÉ*Š'iåé²8ƒßWI…Æû@k¼zÞtÚÍó©+5n›ÇR7·jé±è3ŒJªœ5„ßÇ-N“Ø wöÈšL Â5ÂN<5WàÙ_öÌ”#Á©pâ r5kgÙ7;Ú56øãdG-åQG}éæ±~Ë_¤ÉmMø…ê9LŃÿÐ3ûD0ËüªLÝÐá,YŽ7b &lnó¡¼ËáGþávþæ~ë¼o«¿ÖºY2ù¦¶½(åH+"]Yˆ•ì{±½?Læ‹$¶8Ísëú¦® ÐæCô ¶àD å[`}2Ø'G Û ö™ÐJ Æœ Íš€ _ywÙ%˜E¢gf–?Þ‹0Ì3/7á¢?³H„9®e%Ÿ—YÉd¦Ù%˜F³çešÛl15}’Y]ÓH„ðó2Í(H-~;ë*³íN„vg ’¢f¦ü§-çÁ¢Ñl`õYXåaè~Ùd/ì @Ü ÔÄ4AV Œ×A&Ë8?ñÇRìu)u2Η0¡Ý p:°OqŸÞðUH©k0¶Fª|ïÌhš$ŸWËÛ>àözDš ̸ۓ%MP½NGAx&3³»,c/ ma’$þÜ.P¿[zô|ù\‰¡>œLô çÇKä`F1æ‚8¡fÉ& f×á‚»°¸ù¸•X¢Ï…2&™ÂT 'ª’õðÍ^Twnƒ»fWlû\á’)`„á®”œë¶ð´KIÍ"Úëâ”’„Ãÿ\@kܦ7a³yD«J i·”ty;î Ý:xF£y‘Ã6 êꦽ!kŸrÑÄù´(ÈQ-[B¶Hïi£¸–·ì UƱDˆ¸Æ¿•CmÿjM«QX{^'ã˜Iޱs®²Ç5i×tì ©)òÎìËå³aõëð¿ºÑùC- ¾(¨/dŠ.äÕ!çËY]Èö$ùá¬0c‚×À™É‹Âyl³ä~^Ô¹D¸9¿,¸Ó ŠÓåÌ\&Ú‚^Ú‹$™]&Ð]TÜ‘VkãqQÕúa—— û(*Þ|Ñè«Ëb—© fù4œšðóeâ­/$¹YµZ>Œw‘{+×DXéu0›%w>ÈBΖùybwÓ(ÌüªŠFQâ§•W?8:PC‹¢¨ÎsÜÖŠk`Õ³ÃFY¹{Ÿ&YëЛAàCàç:ÈaÙ“óÊ ¦Ä¢‡‘ñ¸åÁç sgAD+‡› zñüÎGÀn»=¯4ü­¼T „ãX^o¿T¾}¸w^aßÈX%'AŽ7Âõê1×Wy _µÜø!L´óyœèã^wà¯ê”mGÍö¤ÿª6XÛ5_Ö[õ=Uê㶺SÎÒVóv0î¢L›8×­Ãm•&®œ%8•ìØ§?]XwR¦M°ë³“„q˜!]hƒjÔ³éõælHgÒcµOηŋœ~ r˜¶†t—4Òs=Oñ¶­œêÜ<ã [©¤â’Iß;¹ÄD ˆò\˜ƒ=’½×UƒüÕŽùVŠ×Cñ¢}jt†·âXjâ| ßÊ÷Ðáú¥‚›Ü·<]¯?&Y¾Ç¯¿P×èñ ýìššzqHÅI^¾Tý`ßµb牉^û~ÀÁ°~ì"Mâ(¬‡‡ÀHhÈ\iÈñ£ ðMVS·ÙXå·ß¹ñ¿»‡~”C¦òþ',1¿ÛÜhwy¹µú˜ÛâýÒùÈᣪó<³Ç“óÙã'ax íÆï¸žž„ßÞ©öÜ´åk…óíaVލVüºÿXªþÝëÆÇí2NM˜ÜÄÑŸfìݳ¥9ÉW“`–ºk¹?Â̧ÆßyµÃ )’J÷H§è¨þé@·7ØÏt¡{‹ÓÊaÑè­2Äù ®9…· ûL婨g‘ŸÅÁ"›&ùVü»&±UÁ•ªÞŠ?]öSjÛX¤mÕ “œçñ>aW«UÛèðëõ϶òÄapžRƒ¢V¾m­.`”bNœy4È.ÛL<¬:Ûw*nÎM6 CSçÆ+À§eë£Qn¯9 ÚÚ48™¤‚9 ˜œ8jsîxd¢Ñ¼*Wñs3g‘2h îö¸¤\ @[Š?ÌÎ9ç F4£§Ù (åæWGäî·¡ß*÷Ö,ð›|tjé€Fí¡-9ûعS rŠ˜¢¯¶(·,ÖŸuQÅ•’[%'z·†&ò™[cS¶%c(ŒãÒµ?bÌ8êó/û·ëÓ¹»'œïݤô´ã¿¯WâÛ§ö‚¾¦ÅÈ8î‚ûâÈ{:ÞhØ¢9(cܽ° r”ó¦ÍAüöæâÝÖ¶^ý¼s¡5l5¦\ îä‰8‘w>/GfvÜB´gÝzÝ”nÕMôSO‡µjŠ·gNí£èN"ÒœuœxË:›íCûS‘MXŠêÔt,÷®ÖÐ\m‚Ë«ÝÖ‹Vû:ÇôØ%9SØwÓÈ‹Aß‚4•¹Òƒo7>Û¦ûpß§Ge[» íî–Ÿôdˆ¶ohsdÁ¥Ü© Óó$Ž€;m³µÕâd»iðQîµG—–8°S\3]+®è‰œn_ ïçËøÈµ–â~©ÝG ïjuÅ©”ĺb§n®îGûÕ à*’?åxðÎak?˜Íê,s= NȹÃp¤>x, OŠSv_À¤õAäJ:­ÀþGƒ¸z¤òÌɤÞá§áÇÑ?¿U½ø'á‡!Ø9ÖŠ ¹ãžàÙÂ/ æ‹™Ù·Eë³Çn›fŽGmÍ´°‘(q!M ƒ>qtw=þÁöR˜Þ ÙÿJÏõþeyÛ=[Äux„&ª# ÄQ3»‰§9 Õ‚2‰•͹hÎw ×7ÛÚ&TP'Ö ÐnÜÛÖÇq׃ó°ÏSË j±¦Û1.‰DΨäošŸ9üf‚x¹è{ßȈ1£¬Øn§Ñmû[ØO³„"ÏÆö½þ8Ê>Ÿ­1´|^Ƙ@WÎÕòä¤åbŒa»]„þh–„Ý¡æ5ìAŸÍœQ,8s{°gCV €w¶†àφ¨’Û(Í»¶ƒB\JQÃ?;¬eoçwÁæ5gkšçån™¦²É"MF33÷ÇæÿÉ»ÖåÆåü*¨ýãuÕRšûE‰SGëݵ]'ÎúØ®J•KÄ,I0)EvÇÊ äÉÒƒÁ›1Ò:Çö®ÈfðõÌ7Ý3=Ý‹8rª¡4Á’["”BmÞ§¹à#Tv³­Õ.ÃæÎ•),— s9„:Btd.qÇæp"¹5{ŸÛ æö åüËÿXŽ¢/?/ÂùjO«z\ÃÝͤT¾émª µµD*¶ÊêªåœÒQþY:p„c«Å‹Ý»è®sq øŒ2†˜m äRnÐö‘ˆÃâÍòq4VL(fEqâhŒ{IÆáöÆF#˜æ¶#k;°(ý¢~fIå‡ÃpÞ‡‹‡Õ%Í®ßn)½ØµÜmˆ×Žÿî­ˆlÕ ×Bu¦6¨i£ú¾k}ÔõP6rÊš ’d] @Žâ5ñÒü8à8ã‡â1à1ßeÙqð|BhƒøV´¨3´…Óys[täÞd¦˜“{Ó†&Èá¶§PÉxb𛥓ê}.jIÚ;³roÆ£»8z?³xnmÃ0Œ¾÷4«¾v‡1'Âܨ·bÌIÛ›a ¾ÝøT0®÷Ä!²‚+0ĹYÁé1£W " Çqr"ènöÆ„šËÓÖ]##‡Z>Wï; ç_âÅl €˜º$®6*jÍÔö¬«Ï<€J‘Ò`P[‡-ȵ½3]DaÁ¸]K‘Ú®ÝÑØ*(bÇw΀å™ûè6‘r¦uK¦]Äé"Íþ;˜Ç³dn.d`lý~ñsù{íü¨àœ)Žm–?ȵËp+k¡¸EÈm=7¥;Ch³µúnØúÎFd\a¡­öȵª¡ ا¾®ªÖ[ZùVSv¥”Öe¢¬8JÝêRRŽ£I¢˜/Lͱ—}ÈÍ/ŠÑé{’J t'¥•@­®Ç¼€±ÝG87Ý{¤ #…°í¹V×G^ÀÚËž£°ûøŒH­1³îô¹V;*j•y9_¬³ÝÞênmQYÔý‰Æf‹eîå¨^ÙÍ7¯0AãÙÆ:ÈÁpw‡¾—ïhÊ8L!E¤°: '[Ýgo¨?'ÞÕМ‹gL†¬zà ë®ô°öúæNš{2Únò4ØHcL·†v9„âï±òO4æ#M5’a«¨Ä¡|ÖCð©ˆÆŒÄQð/³íÉ9âFÅë{&¥²ÕÞâ>¸a"%¹Í.5r»EÞ;9E¿) ™:Á˜%­PDS·ðODNUМˆBLilóŒ\«\?½X7ƒ–›ÛE˜ ¼Wya.¢d'&’c²y×øÇ\hÿAU–gë²ä®¢kýq¶©&8…l)®29ÎY;à·oæ9Nwæ\hpIòóÛûòú±ôM4Ó– Ô¹\-04zL h”±ßƒ_ãùèîÙ€Vt+0—¶MXqJ» ®Çãê› ±`8º7GÁpÂÇ8øî§ï‚̉ì ÒànžL‚GóÜÀ<[ÞHƒ/ñóxÁ[.äGÊäõ„»þð‘ó÷×êšÂZ$Ÿ4þôQüã«às< ¾ï‚¦ÑEð¶ì<, _ÿœÆÑrÝü[þd3!þå]ð©÷‚땸þ¾üûo5ú¨>¼îZ¡o¯? .ÍÇ÷[¿…Ñÿ⳿ÁÓ‰âèKp—Ìëï=šÆß a‚ÍÓË,™À+Ã?0—Fáx%˜`ò8xš‡³™?sÁ$œeOZ‹?6¡/ÅÄ‚öLªx¼Ù9’r䣌Q«î½ygíB1´ÍS ®äLÚx¿l¿ îÂñØH߆Á" Šä?ùÃÒw0-ç ¿üªë(mJ.§0Žƒ‡Åb–^]^NFóùÚ’ª¸„auY rg—«7]}~YôîÐ&‹@˜%É=O£V<€%Îimš,‚ùrz±÷ý$¥–’Ñtëýþ=ø-e:€ÆE ÃÉ÷IÑtoÛœPˆ½m^ ×(ÚV¸œ‘ykÓä2_W n»rKùU/‹?ßìí–9…jÔ­•ßdºpc!ò©Ë•(ºwUM¹\9¹ÌÞ¦¥V–Íì\ޝè£öÖ¦ifÓ/åG¹™6aYÏÖ7Ë\¬àHi!E„÷É»Ú8G"[—`+Œƒ>Š+ŒþóèŽK´­J°Y>Á4MÚë2x+”¹Ãðÿ›~½·àXî‡çrj«ÀéÛíèKPô¡I7aÁžÅQPPý»‚Qs²°}÷÷ÞZû°è½Úê½Þ1Y Cà]Άf)8fÊB˜n`² ´bɪEûÙàUf(ÌSc^-âÉla¬‘¹IƒQºÕå}Ý¿ƒkÕ ;ToaÅÔ>äkb¥ÙÌü-](ÃÁøâ—¬ ®-ƒo@Áå’ù&_Iá“̯¸zóßËðÙs‘98X\&Ѭ49þ–>„„‹+E€RˆÀ·:þvÒ[Eïb6Ñ…w<†ï4æ°r"ø+ c¦Ð-Ìg9”´õ)ß¼Xwóp &âW¹¿Æ…Áó>1ÿmò «Îo<ú"ø ÔËÔsð,ÆÏA¶ŽG˜ÁeV¦œé²¡1LlÊD™ÕÍ|Yvg ÌU£db<¿ƒ+PëÚ VÿyY7Eáªä.×2[l£p–“æ¨tz6 èlÂî]Ѥ‚40i¤”ûzL @óÙO9\…IööÎ( /•µ—ãL*{ÕÀ\Qhgz(lüÇ…r 6ãø6GãÍå  qŸÕò9¼ÛÝW0õë#1o°˜!&dhîos4¾®X¡èÙܬYy’ÉP°#»It½>´2]dîµ”¿ÍOoaúüûíAW?Çw1L(þ3ÛMx³>``Þ¬¶möl!võfõó/?|ȦÝõO?TóqÇ&Æå£™öå®\}æ~ÅãáOáâ~úÇ×WÁ¬È«à«3îëø+À/LA2øêsd1Þ¾ вEb:ÌÝþl;¡ E'¿)).ßEøÆÃíU«‚H6}µÍ¦¿¶ZÍ,®TZNéÓÕ 8ĔٽAjN5¶í›]d+'xK/˜0p“}†$•”a%­¦ȵºP‹V[ÊeEˆZñ–ò»â«òoWš7Ûíé€Ëà«`Ö IízCŸ<^vö¢‹fÇ]€3#kø¿MŒ‰\ëÃ÷|ðpw7êc®)µ¢r»GÝÿèw|ã1Ï9Gœp«ÁÍ9kU.ô@ô}^ùwóÃ2Î…â˜p;þ‚u>úÉÍÚækƒ*×/§ÒÞ½MÜñ熶Â0´µZ©P÷Ð1Ý.óçW6ߎ’M®7ÁËKB”VغGr¨ÍüÇèf£ ¶™öæð*Ÿ€ÎW¹US}-pRR.´°úvR’VãÐ ±%Í)̧¼B&VO[>b”¸€ÚËúåëæK—ŠjÆ­K¸8Luv R†}_%wO%ëM÷Å'࢕Õ}×XQí÷â…{ÂÛEø—AO*Ámwñ39)˜”=°¸óÞ”Ê1"°j*d³Þ@Žhâ?Ôî\ù#.À$áv pÑÊ1o¦Ÿaÿ˜Oàï¯_ Åcàn,¦Vè5âÈô¯Œ0ÖÌ”5²©c%|©àÕQ&œ1B…U „)Ñ™LéræÞ5-Úé‹d0…ñͱ‚KQ«ã‡—ÁõA'înN#T(M‘²ƒ,h÷#Ø a¸Cù¢àZ[Ö·\ug«ûpÙÑs¶Ï7нÛ¬5Ú…ÊÅÔæŠ‚•Ú-ìÏm\Cߘ\W˜1ëæ-ÈI!ÝÂïóàÆ5þÍi\O!„´.¡ÄìØt­€ZMvOœ³vÛ¯¡DQ%„•ç)‘î¥oî‘mœ‚Þ˜j¨ÊijZ‚0‡Àûä§È7'&)° ¶BÏd—$ãר9 k†ÒHJÛAȵ+ÚhÄr2ö W œ#aÝcá@úÈà^åt,®¥±Å­;‹\ƒaÓ-äžX¤7úX ‚­7œŒ!]Cë7z% AMžSj½‚‚`×ûcŠ~)Bj èY"Ýy•£éÝçOÙåߪ\=o»ÔävzÓýª×—•o¬/&„Y§¬‰±˜àºU³úbg/ƒ(™Ì’) ™î[çòÜÆ?†3[†zÕ‚SÛlIl[0z<ØG[ u{ƒLS¦áµ­évEN6!ÃçCáSÚ§Z„"ÙÕÒÎÐ;gµüðö©e 5PŒ$¯K1÷ѬGµ0¢' ÔÒªNù«e–¾‚u3€ŸâÛ‡$ùRno[ðño´×ÒXcʬY ‡d#™ÞÌoÃèfžŒãÍmóÅ…IL’ÌGäÅ€7·.ó'}†_>Ubh§V}ùÂñ,9Øìoifç&îjvM²ú§ulœØ}n”)Š ûZÝE´]¡º±q>u»cÛã§l,ûuSN¹î„_wâi¶’ºE´ÏÍ)Î8âŒj[ˆ‘Cʦ÷Q·~D»J\"I°Õ€œàÄž£ÛIæÃv jùÐÞU†)²¼9¢!›¹÷´S\óGö…ª@Œs)¨SAŽiìÕbO«SX{Þ'+cv\%Œwƒë|h3©:IòÃ45{@ùÿ3(tPKuVPŸ'ÈêL@.ƒœÏä®_ ›Hò3Ä™"~V8ãÙ8yžd:ç7>3¸çáh:_ŽÏ“D(¡g…ö,IÆç 4ÅgewÌ ‰ŒµÃá0Ëj}ްës…ý~:wÖè³ó²âp¼xˆâèËyâÍÏdí¬~5"H/ŠBÞçºB¬г“§jÒq–÷>Æiž3,'ì¢gx¸'ãkz2#× …è›p8¥Y}Ç*…££³½A”Á2Ønñà–âÜàŸÒ“šc,íg ‡T¾FelË&Â&óø~”.æ;Ùä×p<Â7Óûßò§|[/&{ªìÁ%Æ0Ví ÙÁZˆñÍjcË옥˜*©3ffc%F¬Ø Ú…Iùcu!q#vGNš c_â>-N±Pv³x   7áxœ< @|¶t°HÌÉà|¥ƒ"‹F–âÇI ãµÐ½èQ)qÔdp ÙŸŽÙ~¥”ýO•T}èQ!J)&ÌÅ;د¼PHœcÑÃÌØn¹?ð%b¦H¡|‰pT/ÖÁ÷>6Ûíx,9FvûEâ..¬`yS/*ïîö}lN$­ XŒî 2«m®/ ó¾h¹7ð%‘œ2»õrmó;”yÊêV³‰ô/sƒ¹ÎùRÕ÷”©OHɨÂÔÊ"Râ¶k§ cy`\âÜ4 XƒT B­N=ÈGãÙKn—`7g…"ÊZ‘äÚÛ‹6½Yņx#³} %Ì+b5AŽHgHû¤‘žóy M˜ÔÊÎÚ Ç°3Ľ’Iß;Í5XæÈf 9Ôó*kР<1¯¹x=$/ÚÕ oxc Nìxc‹j¼«¢‚« ŽnªO’t±c\¿×hûƒôššzÓš&‹¼¨ú±ï®0îõÝ÷ °ýT×.æÉt5ărS0Û2½9Ôf-ç+7¬H¦n¼±bÜ~.âÁ÷Ïðù”)Fÿ[ÌŸVÚÜ^v–SRAÖlkF®UÞÁ1Û^œO?M)±zŸÒ”uW®ñÛ¹Ôž:€àZR°@ÉZÙüúþcÞõï¯;Ÿ·Ëé<Ž’ûéèx<†ãe| ’ßÜ…ã4ÀëÆ³Åàsñ6J;ìÚ¤A¡vv”ºÕNàá@»›ì§ºb€§°Ó«b­n– ºS†8Ôµ9o°Þ<7rìXC  Òi8K’EÍþ­™I¬L¸Rä[E°kød-¾f?h/†Út8)á ÷9‚¼"3º‰à ~î&)…–àiÛ)åêÈþ”RQÁ¬®ANò#Í¢,•Û ‘{®C_K÷Ö-ðëMnE-íé‘;´M&v…­uÎ@ó#מòˆ²¶aQ}vÒIËNÖRNô® ªø+ׯ*)¬+e )n­rªU^ñ•Utîf„ó³” ÿ½.ÅëÑ;A¯hñ<ާð9 yŸW=t¨®VÖœ` '%êZdàn-Þl­Vúyã wØ®Z`nżâ#±ý²¼+³ã¬=3¬'£û|Xù±~šõ¡ÒCCqwÚÑœh!˜U;†±ŽÓÎêøÐ”áˆ3"2K ÞINÇü<᪂æje\^m¶žµÚWÓ°sŒNöM7òlÐWÌ0·mCƒ¢kôYî£]N[y[› mž–Ÿôì`—<ÿ— ŸLNÐcù&™Ž€;‘SÛ‚r¸Ø®Üò½vôŧv@-HbK˜S&§(9cS~°XN×@æÈ™‹ûR»[ ïr‡:UT`KAÝLNbíu>("€ Kþ˜ðà`ëA87Ùæ: ÎO†–píZÀ"ɦ̹@<ïDAÿÒ –WŠÀϼ»kr~ÿ¥ñ+óņŸÀŒ1bÙ+Êä(?2Ð! '³q\÷‡…óÕc³Í­•cKÀÙª!”ŠKbE >rÕØÜ_;^ŠæûHv—ô¬Î/óÇî8"n4Ò¿?=6PaûýåfyŒ#Žk÷zt W¤kTí”Ñ8†G/6ÈβÙ~ãÓEx}°j9gØÊK& ²ëÙ‚Q7§‰Ç Ià¤Ý’àá2§x?5Üœ#n–æ¢=áöÑFѱá~ÛXWá¸ÕäÜ?æÚä2(Zl:ì82ՔЖ©ó_‚bk ÁOq8]Îú>72 bâÛ4;nÎGî°Óa¯F¦®Ä`8J¿œ¬2(~]ʸƒW9]e¼Ž2{œEƒÛqyŸcÎu}°WÃTEdÁ‰ëƒ¿²‚îx§«ýZ‘<Žæ‹“Õƒà¯F•ìãä)\U¬9YÕÈÿgïi—Û¸‘üí·@ñG,m$ßðʵçµåÄ{N.çlUbÕzDŽ$®(RË!å¨T~—}‰{<Ùuc†Ã!9 !’qöârÙ$§ht7Fwƒý?dMΓ›ñèl\÷’IÒÝ)C"Í„VÊëwˆ4 Žœ…Ý'fì#Tv¹¯¹—aùÁÎ(j¹6ZùÏ(Žó-Q´I܃NŒÎÐ÷¹êHÀÖˆröð¦ýîÕÛI<žû´Šæšy83Rr!}Ô'dèñV¡­Ü½êùuNi?û-=ÞuŽ•ÛÕ^ô]×â*r£­¦ž w§ŒÜ µ÷Qˆc?oV¨ ’Ë”ôÔbspœéÝP}/Å8öCöÆF\*«4óž¤\ª[@‹‚¢Ÿ”Ï,ÅñìÇ^<‰/âÉå k!¢@+£ïu<¾J&7 ÞK²+GE©›’ϺømD ÔÌ«t¤£@¥›GaÜ.‹È¤vÛ­èlQñlg„ µXi×GØH[è~ž$é$uÿ“›Ñ270¶ÞÁ‹ofï…ø Ë"Å”õŠe&èjS ÅÍCn˵)wg-÷Vö†->Ø™Y­¨µÌzi«eÐ^hÛî+UµÜÓ|o5܇]i#®1ÏKLjE6˜ŽXDþÃXDs°?är‡í\:÷í…Örm#*½4¶<¨~e ÷á}Ü›ú%¥°yç^‹pBl—Ö{ñ9îƒØ÷ø6ï’ ê‹;8d×GóÊËÙbí¼½Enm~³èîO4–{œÕ^î–ovÛ·^‘vû÷壾2(߬!õ÷r±w4Ö8–2)¤w“pÔìpìçÄcïlh®‹¬°BEJ{ù Â*ÔVòaaø˜“¶{e´Úå—¡°ˆ‰¡Æ»ØHloTÐ7ì“Mõ‘¢˜*Ä´ŠRfõù°ÏûöɈÆIQcWÞ•àtP?'òáïY)ÍzýÍâ>3`àËÈ·\X%Ó (¿wu´Sê7ÖBœ©á‰G£Ýr`ÿŠh§,h®¸ÜX¯—àxЙ€¥µ÷fˆ™s;¯³Dïy]˜vw4NFÉq½œkü]´þˆ ¸–g%Y²êÒ|væTÓ’j…iypawÃÛürœ…ìѬÆieÍ…I’¯ðíuu ÷%±ZZ+¥ÌK8[Ž`Jï†Ý¿£$#ñD¤NÉÛ«þÍ Òîkµ{n¶¦ä|4&1Œºñ€ÌÄ‚8Êä(f¶¤Ï–0ƒ}â)ù!ë‡à?IœG×äŸÓø©ÖÅ}Õäɨ{sœSõ?Ó˘+݉°‚2×ìÌvµ€Og±8‹Äy"{ÚPÑ¥ñ¹Jà™eŠÁð‘Æ‰Œè™ë¢g4#Ÿú“Kr§—ä¸{3=9¹àjHŸÇ0…0„¸Eâëž–&@©†ó”Œ€ãÑ ÆÁ wœ^LâñE2!y@rOd›Ó6Ûa\ùýÏd2rÈ’ YàÎè°ƒqÆ“„¼Aê¼eUM¼jÇJ.G®•eø)ù~4éŸß‘Ée²€Ðä2žaòÉ}ÇI˜+£A²!%¢ÈçtvˆÏuwš".ŒÒSr[€ @œä—~êXvÙÇ´;‚×m ’IÒË.0 `{£NxzËÚo2Ý?+ÉV§Õ:"?füí´2öÂ/NïtZ»ç3ôõþÍëNër2¹I;OžÄÝn’¦ ù{@X\zž}âIüäÍ·oŸ#I;ÌR*à½ç—ñp˜ ÒÎϧ鬃apø™ âtòô+ïû›HÉ·aîߢÈtZÏgô\ÌxÒ{×½:Ú/ä;9"îû÷£Ûäú,~DÌ‘숈#BóǯQa&Y›@†MEG„ihŠ­¶õÛòzïŸ÷“^Ç ÏyÖÅë/’Þ›~z•‚¬|^'Õ\(á=ö¸rTr‹á¸4sJNðÛ°í¿Ïþ²ù&9OÆÉ°›Ü»q¶ æäTç¦GE͆Û°Nkþýý«NæŸýðª uÅBüäÉ1³, Høíe?ô~ˆ'—HŽÃ™ÜÝÀªüøûÑø:<†e7N’<Δ×n=y¼¼¾ä=±9[ížîƒË kËÓ–[\Zkùj©ïv¤ N³uêSSvJÞ-éMXò€Tݤ Ä@¥:[rÒþÅ0<’·Œ×§›ilad §B˜uF,Ç.' Æûé,Gó½;d îö8¡N±ØvA½‰Kœ)r­Î—Äý/Ä ™°Tç¸ëɵŸO›ŸïçgéÏØIUJRSy’LÆSÙýRiR11 )5™¿¶¾ûµ)ldý“MpYnÅQioTP$žNF)ˆC)‘qC,–ÛiL™ù]y\™BùÝ^ÈŠ­´–5r rÖo²Í\¿,j°Ó‚)Ú#×ñ HÝp4!°ÛA‚ÜO™ö½Îýg’ç„áÇùà³ ñ7·˜¾Éoœp?deÇñãwk`eÀo³a¼ž…f×XñY)Þ ¿–B¹²2mðãéá:u"YÄXP(¹V‡ܱàÞm4M3½š™ˆòU÷vÔ~ ¿üÍA¿u?ß“ “ÌûÑûå%(<´ý çÑØYMGäÅh˜tÀy7‚½9~( %üR²´;È(°J2}=3òò[ØÉdËë‚™­1Gä5˜f°ÏÈ(>7“Xný5NãñGÎ.*þº‡ïß=?Dƒd\ï|áÖìzëtÕÄBƒ 72fuƒ¯—û5¹é´¬ èæ; ¤ÜûâCN…3 ãÃÈðaÉÆø À'Keµ4f¦{§0â¿4.¿U˜q°‚¥Î–Ïç Šü»q¬{ö# oQ ó¨tr3r~Æiv3èwû“ÁÝ\<<4ÍTßç‡.ˆ’ªu ¢åê-œÁ]gfƒ }Q¬zDTzH vÑd’\ßL]ƒœio$¦Æ#“ùùù$N¯þ~1Žo.ƒÑÂþ6[ ë7Ó!òüctF(9p.@ê^IŒDd6G‚çHØ-!!J®«õHÐE$XŽ„QµXhI6«7ÿm -Ê×G~ŠÇØ‘s¡zJ¦Ã+T ä7ó¤•Þ$ݶÛyâú‚Ó„}³þ_­ö_*;š·ŒŸÎ¼NN„qðÓÌõŽs‰¬we<™¹0Šgs´J];³6©ïg?eÇ)èL#<é^Æ¢|IqÛæ‹ÑùÔ œ1ŽåX (;éÈŽºÃèOF¼T=° 'ØÁ/ûÃ~zNPr`X[[þëÿn*s8 µŒ£±8µRû¶ZÇJõho#DnÁ³:ÞPK%gBx‘ˆl°¡’9² f-·‘ð8]ÊýjŒ„Ê‘¨Ýʰñ:U/ÇäæHèFìBp¥¼¹Bx ¸ „É‘à5üÐmãF‰•ÚT'e,\¹ð%¶è–s¨õ~*±‡g'èIlhâiËyÉ8Gɹ\¸Q>Š%ÿCkñ6ê0¡$³dtNeö°CrïÆR€Rñ.¸*lËŒ..À¡À,Cl ”,ò0ÂÁq ÑLj< ¦-h„ûV#káà8×;bDo˜"õñ*¸êÔƒHŽƒ°äZÕWDËàJ¾„Æ$·þ hÚní ‘$Né¥Ìh,`Ó†E¯©ñ`¡°æf€‡‰Í¼;œzаœ[ÃühDB 1ótñ:, £œQëñt98Øc`1[(aõh(ÅLÔ iCˆ!›Hh$)å)bŸÁ•.†mŽ…ò/Ø®uX,¥'Â(ÃÂX ¬X,Åz4"°I­V¨¥kÕ£ƒS¥Ü‘íªÇ¼$*ªH­MŠÌ!¤&a0Œ1+«Ÿ”&püÎl„ZÒƒ‰"# »RŠâpXDþÙµ55BEÒ“Æ…pÚp€…m‚…¡h¢K©=Xª­1›cÁi,"ª"Í}Áµ\Èg~Í`Û`­€X(Y…ƒ3Vœp?-,Lx°™¬ðl­280G¸h„…2Za´ e”U´(6W¼ —Y[ëɰC8Ë[1Tbe©€˜  17¨Ý™a¿›y›lîÞŒýìã³Þu?Å=w–˜Až”JóÒíY¾Çñmþìà{Ó=g½þùy‡üú¯_ÿõÕtÛ†©óCõÚïK_î76ö( ší¸X¨ÌtïBZz„)W³0ÛÉK­x6è|¿S¾lƒe‚úºê{ÐKögÖ×ZR‡ö‚'ÜXæÓõ´D¢V©`Tk…€­Ú\¤ùýg%JaÜ Jê_]©U´q õ‰P›7°ƒ$»Áµ—Ü&4#VZù ÍtÇIì™Í óñªsßšöáVÄbÉ»ÉÙqïìÜ˳sqlÏ»½cs¦{`ýsEEëóÇ5b~“t+vž…ååŠw e®ãI÷c@ `R§ëàæ a ȸ›“ ©€ ˜@†óÔeUæØVwþ2N_–_yŸ%ƒfï½M^÷‡Ó_üï!›×Lî%'þ¼¢\>©gB´ÚU³4¤©G­e)(´ü¸Ãr앚ÍÚýÚ5›·[ ºhSœüsÚ)Är›¢=k~8Ë8z z ¿Üf…ëaMœùVjwÕN h{œ'1½™f“ëçpqxôhB@¸É“'„“¾‹™Ä¼_øÜOÒÐó;Ás “qÆûçoNž½;Á¹5ûåý/Ü/¡D-‘»[ꯔtó¾.:H»#·/ˆñŸB'Ý£G¡c |/ðµÖâRQm&¿Ü`´ì¢…S, íJåNþLæ ˆ†Â£¥³H³ú“Š>>¶^ ~?€ Ýãùzm«_“Öcç¸îÝÖðcÞG–%XB>oר›vuIZmZ~¼µÑ.¶8Úp)™[ d)ùÔ:È´m{f¯¶Êé!ùê«âꥲ é2ö¸{YŒ)³ŠUÛaܬ’ÝÃj¤þcܪÆkÑ.jŠO•ÀF¨§R…õ´=™¥Ò‚(;|‚­¯P›mÝF³r›YÚ?û6—!脼ô¹ò‰gX‚F>$Â5:=Zvç6µNb¥l¤%÷ÜÞèàh)sxs_à_úCW¦‰Kðø,~°kpíð6v Ö·´]× ¯¯Í\ƒ9åC;ûÃCø›yý={ÿpzý;9½œáMigWIšL•ŸÙ׳îfìÿxÛi½H†w­JìwàMòiÆ ¼Iž¦rM y7ku;noóánoÓán©7rûx{´ îoîöiÐâVÝ>Mú{˜ÛÇ'»}ü(‡Ž5ð½À×Z :ui70¦CéÓªÒÔ‹luÚ8Pd‚Þ y©zû¥•Å˲¬/áBâ)¹i‰a”1¬ñ`pÔÄSò¨ ‘ÒŠñhµîêÉ"Ò™n%vψƒ%ñ5ù0—â'Ù½ÄÇ“ÑhpÕŸ¸@>ËJ|®È »]C÷òd«Ö6u9‹°W̰p€˜q¬ÔXêlÜëe Yìùpƒ@@NaÿMýM­œQ!agþà7h38ÓFx°8iEü¶>ȉ³6eLàõAõÄ@8jd@$ `Þ¤|h]âÆÚÖ×ÿD8eUH$ àˆ¡9cš%=hhf5]›rÈ•XWg³"¸u¾(ÀÖ/Ÿ-³jQ/—ž—qm½Ät®ΛÖ;Ü£VŽ!’Rfês‚œÆ¬ƒZ-v`Böî\Ñ»˜ÞÎí3Ä‘ö³b.`¢Œ0‹r˜`9¼lA\“øl4TŒýjz–liØ–QB$lZÌàd´«Œ ^¢VT\×…7WIÀBÝ åÀBå‘ˤU!­BøÝ•œ·eDè—ú0s„ÃDY³#žÄS˜ƒÎHGEv%êØ±øÂƒ8QC‚WË$0š²NÈŠ·5XFRêúg‡U1¾¥v/“Þtð ¥ÀÛ`5‚Ah„õ Ûê…`û/xØn¬nÔ*†-Ú´¿`FÖF÷#œ6T†Là2´Î£\ØúŠ£G-ØAXho\=´.((CÔù,0c‡`áÏú€Ö%šœÜëÁB*££c/jÂŬJpæÁàÀ0 ÀÂ6‚KŽéÂKöE袊"ŸáJ Æ¥h{Ê©oüšÃ–ws.HêO‘…æ ¥•\û„AÃÂAÙïŠ ”$^M+…'QÙÁÁwö{°“ [¶)h"XUTk“98mv•¢ž­u¹læí¿î_¸ç.c]±:­öýYl0pMAñH]o;;8f ɼ¹»9°Óå °l°(6Ķ Øs … Ü6—j‚ü&“¤¸‡g0+YTx¾‰»WèPZœ>óaµ7^§¦"°)´õPDQiÄo¼z-ídÂ-ÙŽ¨…Œ‘Ô3lôême¡õ³b¢ó–.âIò)¾MxÜ÷Ž‹cñ†žÔo³ÐoK[ =iÐײÒôò€˜“¢FúÝâ¹üR\Û¬ªì¿C¤ˆ’ý‘KöG.YpXgÒì% ÆÛäš™³×ùo•?–_”úåêΆƒ»Ù:wœ/tiâëåc!æ¸b¤|Þf2ÜÖc|¬DMc|üM­fŒÎX“fW%eƒÔƒü´]D‹§OÉã'¯OÞ<&&£A/³UH‡dA9‡mWÚÅý“5aãu’WÚ°ÉH7ÉŽ«kw aRMš “jÒt`˜Tb7“jÒØ£mL  äšµ&5ÿ%›)ÕQ0 1¨ œêºk|g½âßtóƒâ¨ tèï…jàô9Lù™©É)˜£¯`ì>àÄvY?ë£6]J(+µüM¦ñÊ=’ÝØ\\¬Gæw6ÃÞ}œÜš"gY€ÛÀ´Ö¥‘½Ïú=˜#ÍSˆ*G™üª6âãŽ=fãa‚ÒÑR­åÇÉe?%Ø$¹†1’Ëø6!g£É%iyšKiø±É(ä"Ô*ØcN’—>W¿Â©²Ò(ísÉ0«¨úÍ}s³²¿ë½øAþ8«"aÒ•‡ ViœiIÅ*®Ú”ZÍ%õÄ/!œ²6 Šêÿػڷ$ýùþ…0Þ`Fé÷á²@.ÉÝz7ñqîØÛ %Ú¬‘½Äñ÷³îÜ/»ª&)qfDv±Iѳ—Yd[l’OW?쮪®®Rå6¿h„Á½PÖÆœ“ØŽkí>-%²ýlÞÁ%©1í–ÖÊsé¬äL¥ä°Så.·k¹žÃ—ÙÍÄvÊóxENØ<ÒcË¥Ä<é,´ãJ%ˆÜ¶.áñ @xau ´Ÿ˜æeîÊ.L÷0½HÍlŒcÐÎIùÂXê¶æ:LõØ=i…0Í©3óvƱO+†bë¢Ã°›1®ï0±<Ì?X­Ú)/O›oï²å¦¨&f±ºæú6½í°C˰š€ÿ®:wtÄ|^óûUØT}³>¬æãÑóý(ƒï´(€\Jïu–­FÛìvÅQûpy•á®[¸¸X2¶Äãõj4ÍÂ4n'ç%`RB%l|Š7c£µN{Aí¸JAáâÉåóÇK)µq"%l]£w¤aÇÚJ‹<Í0B;mv¦u0“·ãIêe¼œ<]2Ë<–ˆ Àv)*¿Žðt‹%3¢‰ÔE;q©óS«lkd¨d$+-‘ÕóÂÜmCAì‡P tT‘:´“&Evѳ¨ðtÏ´u\IAíDÊØ{ ½60›û°]ÊÁ1SšYª‚rŒŽ6¬èÕ¸„v†Ù„YÁð¸ÇO*œt¼®ùüLÑN&Ô¥0"jîI, î¹Ð®ùüLhg“ÎJç'<ÝA™iö8çí„ãeß§‡Ó R93Ò8øìûž·K9SfT¼¾<^z‘s1Ø®¢m<º!hf̬¼†5WªÉÛ1—2%ÄOâÓ1 _Ùæ1¡ñ"…‰‹«4…%KEP`5§$YØø¼d°>’) "( Oñÿ˜rÝT 0ì½îX¶¬yH°ÖöSûçïÖ¼LvÓK‡z€AÛ£Y_í`1M¾sÀãŒpÍûày;—¢;Ù¸[ŸŽÚ;ÈBÄPh°×|Ô–SPfñÔ^ón|ÞŽ»„U–ڂl„aKBE`@;ëÕ£<*”°@øPäPY¡býÆÙÂüC‘Šë&Š­t\Ó¼9Ú¦?±^Všjú‹ÇÝ\Ÿ¼hwf6êo÷–•1wòÿç줯ö‡5ªÙí’·s>¡<ñÅðxP&kN;ÚÆSVÀÒ"ÔM(À¾B+2jy»”½[§âq’ÊŽA»óÖ{Ѽ”a;X¬´bw“ÃÓ½öÜß­Y´ã 9w“+‡9«ñŒÎCu5’9og%¯‰d.>-eÅ¡«yöfzXîo`z]Vû›ÓÇtׯútQÌš¡SÔÁäÞüuæídƒ2®îŒA6Ëp6]ͰNÝâí Vðï ´~˜.ÂÚ‰æ*®ð]Qi卸À¨lm¢D­X‰j7zSÌÀ_çëöo`®ùO‚ÑêǾMЇ°ûâ¿j½÷s”Ew(â7 ó°Õú°œ‡ß¹ÆWZª ZŠÄ,Í×Vøeàc·Ùn³Î¿Æ¤ÄÆ­óïvÍGŠ[iã³ù±–YFG²\– ¬ÕÕ¡­ƒìp{;Ý.þŽBö£à˜Úu~©àõ/åòh!än0clɆ£éüçB4/Öû/ë…[KøgŸå‡ÿ¼ šî䨻ÿø–/‚=>So³\¼²üô|µ;¼Ëù~öì&“ïÁÔÏþM—ûw?aÖ<껯Þw|ädÄ€Êù}o*æÇ»Úɾ¢òº7š\ÿþÍ£-¶=+­…â•7wŽÍâ/ãéf³»Uy¼Ï‚}ß&titÕþΫÉh¾@£o¶™Œ–ëõûÃfD¿W>ÎÆ‚Û±s6Ñr2Âåst˜oÊ ðo9åÆè›ßŸk;Á÷­²P®~xsØeóßuçµoÁkjh¯cÓ3‘Þó€å/Ö%¡Oô™¼Xϳ¯¾üz jÊêÅúxáHýQ…jè:;‘fï,ŽöŸ—ôo7U ÷ß~„uc? ÙfÓžß,È—ÛÃ*ÛV^ÿ0új»^ý4TÔÝ‚ÖR´õÁ0©[0ÌI‘Ȱ³ë9‘VŽg̯þc5=É þ~T¢¯ðŽÃé긻¤ê}ºçÖ˜“Æ1)®¼Ì]……h¾FµcH…æ(ìWÁOvbŒßîAþ¦…ü+U*Z2•¤ç™[ó¬LþòåóWájE x±~¹žO&Å¥Å,Ûýôr›ÍÊd[ЉïÑÂ;{seY­GÇwÏáÙ¬çU%æ¸éêc˜ãÆí†¬‚ôÎösw.hÛ‚ •-¹ÓÞ‰4xø˜^0ùó—ð‚ºÛ+ƒòöÍj¾Y/€#Á‰» _³˜üT^¬Î«Õ'@Z?#¤kܧ¥j»çžˆ»ýÌûtš)뎰÷×Ù6¨`½£‰ßG}_ƒþü]0 þ~NÝKÓ¾¹™`˜£kßÉR"¼NN6=æÒÀ幌î·9boeBx&”Ó„©Ò´Qð*©ê[N•-ýÄ™3úÔéŸàŽWù Å.ÍwEóŸòO2¼¨¾UedÿRññ>çî#âe•[Ô«Ü_×:xÎyˆ"¯ÜZ>ÿ Œ˜€í¿\Âò‡Ãêíõ(7’^!ù+IÎ×?¯nN©Ô>‡¿†ÄYëj´Íï/Ép;ÅTd£Ívg¹ ›‚åí1Ï´üù‰b¥ÿZ/z'‰¾=] |j!úzßu>áþ@d+èâWárî ºýÛÑÂÄÐ’|k¬ÅÙô°»ã£Éw¦ß.ׯaFÚ–K°Àž„ùl‡ÏŽÿƒW¿þ£ˆ³Ò~ºXþøêÏ/F+h_?Ž;Ñxjµh§k?˺(¶i÷Ú:Ù0øŽw`B/þ^˜X£g¥?ÔaäÌ(.Ýl©ïÀx\l–òü‡›mö3®Ì¶NZbùìQv÷ÔÂëÒauïõ=gÊuóTx}ÖÔ@ìw¦Ý@ß%çõqºÀw•DoÿšáìàvØF¿§Ç0†ç8.¥iNR´ãõŸ€?^¸´fÁZÕ|Æ¢hgjóŽ©°YïÑ\ù¹ÅRdæ\4—Ó@¼ã ‚´Ë/æÛ¢¡bÂ8L ßÂT⛊_ü3Ž>Þšïßæž·_OZ'2_¸qŸ}V°¯ @xâïž±_fŒ‰×¯ç³ßÚµ^e+üõè‡õ~ºœàÌwŒmÊævûÖkø:–¨\`M` &ó…ÆE¼PA¥?Lwï&W7³Íá›oÞ ½b_À@ßåÝôvnüú-(f/Ë•ïläÔ×Ù¬ˆœÂ°)ŒÂR×# hpj•†eÕ—ð €ô›‰x{¶…c]ø„sbL-ßÞÿõ×Wûl3¹*~ÃË!,î»<×ññBxÜinùâo…Ôþv•/¦ðÃåÅ/›VFÞô7ÍûQj'æM+Héºøø'ÿáÃ! ä.>:üf~ØNW»ÅݘCüÓ¯¦Ëå>m¡tÆíë©y=›*we¿ìÁØa`õq­$’d3ÃñËÁGᜠ·sÆ0÷ô諲‚Ê"ÛUˆtüùã‘GሶÐ- I>¼ÿ×o‚æ<¯^:×ßùñ׫=,–ÈË«¯r-ÿx2šs[ ƒõ}¡Õ†ŠrHðÇïrÝòË—Ïño¥©õmYí»¢þF(Åóé~³œÎ‹N«þnŠ3áî äò§(€i]xõüÕjºÙ½[ïÃ_ÁjŸ|?ù˜PŸ.;vÿ9påí»ýQà¾äŒS‹ ¦îÚ"y¯ —þñ5(ìXÏgIçãg†Ûš‹ýò㉙æS_­Æ „“Í' ‹vŒ=EY?EY?ú(kd«åši©¢¬­ÈëûV‚ãâ¡sØã.ø2öMWXRžöÖ*¯Î}3¨?…¼tÙe³÷€ƒ^áÆéÝhúv Öàb¹,ìï›Ðht€‰r9ºÑÀ·ü×θ+¹IŽâTg‚Ö[ÊsôLcD1?¯Þã™$—þÌŸüä¹?d7ÈÀûSÝ@ñþ¶ì~¬œ%+}ȶh€Íò°m†q~F‡d¤^¢cw³ÓHÜj>6ÌiŽ©îÁvZÂê]^¬fÙèt.ÿZÐ#‰–Óšxä–|h–ûs I?l •À“¿<@ª*ßà‡éþ0V¼Ãê=*6£`wŽB•°qMÆ5 xX®»JÊTÎU\ ý![Á¥>Ÿÿ°-ÒÇç(¢£·ÌÿæE-¤£Ä Ž¢6Èì“£èÉQôä(zr=9ŠžEOŽ¢‹:Šè벫är~r=9Šþ8Šœ¹äq|nÜX2}ÎlÕ¦Ç`°{Á=oص/ÚqU\Ih‰qûÅ›¼†h%û =¡eÛL6­ñkÝ.&ãSáìOrc­ “’)ÞøFl§­ìkd›K‰ÅrõµÆ­}Ë_#ÉÅX3oWM¹æí¸ª—ž4‡×Ô3/†šnÊŒ{|™­=—k¬ÂznÙÏ‹5È4ùŠ==ÃOÆð“1üd ?ÃOÆð“1œ¼øzV{HÒ ýcžvRžsáíq…IP„ÂzœUÆÑtÿì³ÛÍ~ÄÏÃãÔ¥e ´ã CËLu-Zƒ¼=+«q|ºTPÔgÐæL€žôêýb³Édáä?ŽÑú̓ô½W!5Á>çüè™æ¶?_ìG³ê×=:}%»ájOh[„û€n*»"Ûl³ÞîsÍ:›n— ŒÛ_åºàr½ÞÔàX@Zhß VÅ<jx#žòZMo³Ý¾çs¦sE¶• \Ï”wG!×"ÆäüqÄ‚qÑ€¸J„®HÓéТ3v:Hd(hK‡ˆs} ÍúúËÐ.àÏ·ã2ëð/7ïÝ.ŸÈ…ŒsE:e)Ú¿há–Ô" ëZÊsE“ šÛm¶—v(Î@]•¥bJ‚‚.=u’îs‡ÏÞ-?E—ZðîŠÓ‰§K]Ç™"´d”.pÓ‚)½BïBj0Ò4Vo?á±i‹úí¦eôv]Mœ9JkCé‹tíÖö ô¡ …´â¤!«¯¨Ü/…ÀÄ–$<¾%…Náö°ìAòq Y.I²5T;ór}èB!r7‡q—)§US]àn‘6 u/È⸻5‹;æ5¨…Šà²PN¶›.Ø—”"w׳A(EÅ£Ek—Æ©¾Êè**ܸw^ƒ.ÊÎ--Õ‘ˆ°ËðS;1Т¤ c†„ǧÿQ©ŒÉ˜Ç}ï•¥)€µoÏ‚ ]È@í‹fW 0 ˆY^‚o9È;îúÖÎkŠP;vÙuäè.$!÷k’ ò¢ài½õ›Ž''ÉÆ½ÝF8Cqð&zZ-ÎÂë0ðä ´Éoç–‚GˆË,Ç’l-BøôÊô»NœEÙ…ÔŽèh@ÅcÄ…·7*á7Óù|½:^º)F!ît5N OéR£ó½ËfGë®t!µ·n˜ý2ã•cŠ‚§í~ÙÏÓåb>Å#²×ïÖë÷ù†å¡ˆ°¼ª—zvÜka)´èžêIë r‚P{e™„ –[)IxÚú^+U¬úòð¸ÿÕÂÜHê§N7—íG*Q»:ßJí­¦à1i: %¸œRNPÀÉ–*Ëeƒ“…Õ–Sö[­fí#½ËÍÐçæð1 Ú2eKÏ6é%­"½屇Ñ3+º^yL¢sì0¾!ëai¦àq¬ ïŠ8xdtžw è®A•<ÈŽÍ,ö0æ<c¼«®”æI½Å A¶q“ËPö-gL:Äôá'÷a˜ˆFÐô 'áñ]!òÊAØ·Ÿ78nª9e$'ì€5Õlˆzh3ÔŽ¨aöœÆ<ö5È3ni9£I\Z¥¸ób°Ò‡™ŠÛ 4̾¹6Ê î'𩈠Áug>¨e¶Ý߇@QâqãÉ3Ç(î®U€\À“©Bí™gÃPS¡XFÁ£/¡±Æ&2J²1‚.}Ð%ç’ ™ŽÇôº ܼîßßR«¯øy<)І `m{Æ*c:<—ŠÒ‰aŽNµÀ£û'ÀqCÜÆØI€i,' õ=ÿY„ɃOí‚äÃ|ýd<âƒß´:OƒÄãNY©g„[Ù6U_ÀÓ©BíÙ0Y>¤40ð´5xï¯ÀÇ#ÑA”qg«SÀSêŽ Qúà’!›a×zo)xlßÛt’ÿÙø)7)½5”Mni}OîõF˜é4 öà ãÔ"ãñ¬ÜÔGéžR:ß¼>¬æ¹3ˆÇ=ŸŠYe xc]ÒQ“q&Aq-%©#ØJHM8s ¸y¯D(ß{Ž{3ú))xï…1œéD wdQ)å ìmËþáT+â>Je s„]cE.Z†/}à©0è{ÊYIÙ†W­5‚󉚣aDÜÙˆ<%¤‡–ô¦SÚ“Âx5Ó–“ð¨îÙzŽâ…ol»˜åßWÜc¨¹RŒš©ôô;lÉN?PŽ’9Ý5ÃEMþƒZd¸ïJØÜkÎ4Ú˜Þ¢çŒ ÁŒ§ìGj9Ðà’ñèÞ²GæÌ¨ëOqç eJÔäLl=€M¥„’0¿¼n ã ¢ä)i3<°vöªí‡“òÿÇÞµí8’Ùçþ‹‚Æã«š÷K½³ã±aÀØÌíÁc%eui[%•%UM·ù‹¿lƒ©K§ªRAŠÉê]x0èFKTæ‰àa0 W×Õd >ÕûzÑŸ‚µ‰ ¢”$‡ÏA™LªªL‡àè3& xDÎY¾~ÐnkÂà}O©D†iþ¸ô.×Nk z-Êt¹wœê®Ttrþ´ –u]oò:„Åi@îÈÕÏòN¦8,žp«j»\äúWjWÿŠ÷—¿R–ñ†B”1IÚ:G(ÏpGòó:Üv~ÕGC VÄ‚p÷†U}7»jêY.êæ›©ÈõZtì¶È¦³…%¥!”4±*ú$A—“y5»ëÓ'êZ'¤§èÓzÉ…f‚o àhœ6ñÆCê7Öã N­õeNž¹pøXSð¸óop[>LÇ“° f‡jÞs³•³(· e¸cè˜àÉ×·%>9¹}ž÷Õáq'ƒ%%a·Æ1Uˆ!!o^ð“ë·½£ !ÇYÈK%à÷òÜÛÛ’pŸË3Ḡ¸‚eøÂU(tHÀ“z)kçÝg§Tò›¦ ΞPƒÝ¤12à nçJq6—¼â„ó>Ž—¹Ã…ë ·‚9]ºï´è^Ø)ÛltYê@JB"´Fg¸Ñ-“0ç2K2! ÷‹;ѲÂdçËìWn²ßûrR,¸·ãd™#™N0–<:ÖÁ¯îgÁÐÔGIYèÖ`ÊÂí'h×·CÜé²'#ò¬‘-q&2‹MúEOÃ(šìn h© ó‡…°ÌxŽº"\*ÛqE`9gx`ÝÊx,ñAüuû¢ Ì;àš:‹ŸJÛÙ\L —œ½ÄüRª‹p„Œ`×Ù±]·ûŠô½fnaÑä T ³ð8o)xŒ:ÿ¤} Ýó†GsAš[r ž¼íúN,vs/e,(c8ǙЮHàÞã´Æ/‡+ic÷€§‹õ‘n°uˆÐà Jü´Z¸r/ּ礔v=F€©‹˜{í¹¶xÆ6´c,[ƒSÚÃÒs ‡•&U‚v}é9M8:–"UÓá=`…<ŽíÉ›€­ê·ÕæØ`a+9á8hÛá΄cÎ' ÊÌìr`[AƒŽ9€ ï±ÁðhŸ/P´SàîïFsèÄ;ͼÄ]~ïdì¢(™{Þy@.˜\‰ìh¬!8úÞóذßì®z{¬ìȃd\)‹ÇX ]´%ÍË­¤­cèdÚ‘¶´ááÚ)OQÄó åNCL‚ÇfÛøÜÇ¿n®×—Û\§§GÛQ#+÷7¡üŠŽ$^Â0 £ñ%öÀÁc'7>y³ªÖ›ÕÃ$ØÓ#5yTMüD¼2ZHD3ÙáyèFF®L¼õð4ëAQÆzm½Áw1½k0[6…UæÕú¨–ÊL ã |5›TóP¹­Ó ¥“‚ã;ØÐŽE›Š3V‘ô¦p¨ÓqÜɸãmÆÛkëGbi$á ¢¶xÆ„ Å#»-'»L¡?;ÄQñ–3ši £0ELàñ‚‘ðdÉá~R °ÞL¦ãiµ¾½^V«pq¹sè®°"øB°”’Ñf"ÿt’8ò];ƒË‚ñé7´£tÂ9R£œ`ÆkWŠ`XÕcP÷»“ý‚Oä‚S*ËñõЋ'ÍÈò)·ïàrðÈ¢4»™Íë“ý¢pšIF8ÄEhF'ͤ•š$¿+C3*YŠfM›M5Ñ?[L|–ãÞÑ8Ù”!T‚íøÀdK*åB/’tÊ‘ñø’”kÚ¼Üï§'{Çà”3–pµ}P‚-A¹8¡òQŽªÃËPÎ*áHxt!ÊÍ®ïÆ÷“ñõ|99íçXœoNÁºŸ ™Uó-V¢|d#«À—!›ç4<®Ô”zW-fóêdŸ8œež’AÚ íµ‘EÉF/pX½ Én‹ÐKrnû­€›¿½¶±%<ø*Cé’(¾4­žŠNT™¹,C'ކPðØRÞØãlµ95ÂÃY%añh‰¡#TIò‘‹,z™©PJÏ5O±ÕeðRî—?׫Çu¿§âðؽT¡x*AºÁ——)Rå#VÚ‘ÔPh‚4\X -Н/ï~®Võøq}[¯Nž¾% M8õGÒðbëÌháòQÐ*ÂÈFke(Ê}RðX^ž‚»î¹_-Aówãi½©'û¾Á÷ ¤ƒÿ)¢¹‚Ä£‰”nÞ:EÒA™=O2žè:wYíôÂUŠ‘D1Í ¹SˆêØ#@Rw¯L’T±2›‹t<.ÛÕÛûÜ€ŠÛ5S»¿htGß••s ²psö-ÜçÊÌᬢÈ(ÊìÒñ¨´KÚŽNtœRîxµ-n!Î’pàŸ‚Xò¸ËÛ’‘&s,I™}2š=?ÿî¦è8þx]¢vxœ;d]Š0±w½ !D2ƒà!‚"¤.dMÈxTÚYóûå|6ù€$ª½®æóåÏcøs\7'ømãAkeBÕ0zC ZçD̲Te|Oe³Œ‚ÇÏ‘­¦Ç›å¸ºŸ·õ·½ã±èp§Èaípl!àOæ U>W&¦ñÆ™áy³+ ¿³èãUµh²£µÇ#Κ)©(²x5wˆ2¤ò‡.c™Ð²æÂh]Ùü<Ý?u Z€G”µd–’¶®9/B$’0ÉŒ" [Æ_¦ãqÃ3 ”?]4>ƒùh#œ»:'sC1Ï)É2 Dt<:77ž-FºKnŒ"‚ÊæÇBOf‹ÖÒŠhe|b2žèâÝ©*?åXz<–«‘–$Œ˜7!’DÒ”ñ޵Մr×-Ä ¯ é<¬](¶IÉê‰D—%™OdY yË^rNȹ»³eøÔrð ±a\ üÞ ®Ôé„ʺXe<`üt$<.}÷àHÙàEÞ¯–³õl¹^å¾äã¸ZLÁÂOVõf¼ª«pbb©÷x°Øp#AÃLüžBFüɼ¡ÊW¦<’3"”¢àQÃðæq9¸««Í¦šÜÞÕ‹ç:Ç£Åá’JN‘Aȼœ‰ÀžÌ²lee<åxˆ‰®q«óÕìúaS¯›ÔÏ邸HC ™9TU™ü++;$sÖ‹ê~}»|:V ÃcÅÆá ùÖýåÉ’)C@žÌ•Pª˜"Y¡C_t<g%XWÌþz=¾“à™¶ñP°±NPŽy+ÎçI4êdŽ8ÍIª2‰šd<ñÕBpmï–]¶Û0<êk½÷ÂQ°û| !bNåU&ËÊÄaèxl~~P&yÃd6¢X®ó%üàŒ)”GÇ#ó1殚-ÆÛ%D[»*5d†YE98¤ù´8 Xf´¾ƒ‚u>(—‰ÐÁ™ Å§Å„èbĘ‚÷vø \“FebAÈÁ`ܧÅ+23`¿ÔÛ´ló±ÀñL,@€Î÷‰Í .6SrZßÏ—BððÜ3†á‘ÒPi’Šm=5Ûdü©¼¡ÊçX™9„ŽG§Y¬(ìÙÀx¤ó8T•Z:à2Á ë`Ù. 99ª>þ¤¹rd†Ô–„žȉ:™#JhFÈ+ue®³ˆÀx¬æ³iµÏªéÝlœñçÛá{…o¿ï³\4êFØ^Kiñ[å”W}[ÊݵåªaNƨEÂä\ÓªÏG (bœ¼Ö\[ žøMº“•ÝwY§Kº;ôò Í=8­ÿ yûzÇ·5,Dg“-hÿ׋î·#ãâÇà ùr?H¾Ù’ׄAr1}¨/6K0E77Wÿúç¿þùÙÃâ_Ÿ^þÐúÇ?âä {õß×ár¥«‹»êþ'x ükµøò¤W£ê~öc½ Ž®.¶û|t° «úí >l¤º|çÖЯùè׿Iz×»Ùb oÙþ·×IU§¾&† X5oz¢¢QµX,7Û FÏ8š-`ƙ֗«z^Wëúˆ†¯o?;›µ4ýXЋþŸ†B-ÛŠ»L£Ÿ°®ç7ûogooÇÕc5›o'¸ñƒ¶óz¼XNëñ´~¬çaä={Ê/ð˜ýmßÏÀînª»ûV«mïð1ß3è~ÅÌŸ›w7ãj;_\ÕÙõ9Ÿïøý¬žOƒþjÔþ¬Ò(y1ºižû=ôSë‡Û—ýØnÿxÞ÷7W[ËÙùÕòºÉ;þ¡-Ø“fA[7WxûW·õ¸þ¶£Í/¿ÔÐÖ÷ÉIy¼à///áÇŸŠ²Z£ìù—ý£ìò”îèÃ/þç]C0þ)è0|þÈm·ý Üù¶¾©WõbRÓóæÝÕ?F3ÜïÐvÉ{Üþ¶Zÿ¾ý“?U×õœö»ïê?Íïñß…n>1¸÷«˜Çí8ýè¦îõž$I³]w—¦<êÕè) V>Oœa:FY÷Ü/šÇîž{€zìS|ý·‡°°^l¢aïXg|v ©*ÔÑëi^6î[ë¹Ïý”42À³÷îõ·ÛÁõS:^½ÊA*øÈ}ñúõ…¸˜M·÷7Í/àï,ËSŸ8Ú¡…Ùʸïû¯¾ýúËï¿ckÿÉßü®ù$U©-µ†×=yß(ëø7|qxÁz²l&â#ÿGê {õ*UÖÄß%þlt¿{nã`ÇKû|J:’vÝþ:›´ÇOM”6%½…‹–¼m}¾µ¶—{õ²¼þõÅgŸâÌÇ>ÞÍúmèØñäö ÓÖ8HÔíO¤õfwÿ²èýÓ_vˆGݸŽý"*ž4-'ѯ¥ï)çSµtDåO²÷•ê³Zhv.3[ëglq™'åG¿œø‰1Â*BÎèè»h‡’·ÏÖЃÁf V½Ñà¢Ä&$¼G9ï-OìѰ§iï®ëÇßÀsX,í–Q][>ãw®¹]Xc‰nÚ‹pƒºíú.éäã'†¿HZK«^€XÆ›á\…_÷í8{>dÄ_äHW™Â‡CÇóÔŠ¥À™p1•¶èyh×—kÑI‹»­ð‹ì!Â{BJCÀ#E®Í몜Ôêx׸Ѯ@µ«…cxA[hÇRlF1ޓżI´4Æ‘ðØ—3XÞ¤ÑÒZŽHƒvʽ;2ŠaJœøŒÁ“XÅön ë‡k¡Y¬A£k,“–ŒY‡Ë«/sF¦Âî¨ó:Â"fH\8%^|âBï“2'. ïŠK\Øi>õeÿÎ_x±üÎþ/ç/ü{KþÿÓ–|³¿ùmkß©S5[Sþ_Û'²ÿå$®ûß<^~W/>Œ:ѱ×XƘ½îþGí¶{Ô÷ÔL›ÒØãÏØ”Æ}Ʀ4¢ê¸Miìa¯rô~¦4þļ›Ò„÷¹)0"~S…œ*kâï6:²©O1%h“¨ŸQ—¥>îÖÆ'R&éW)?:±9d&CtÝc½ ¤¼øšŽ*›cEªZÃ{”°Zðñ‰­Ÿ=ªkï•öøúÙùè þ „éÒ´ªœÞyäOv-éJŠ"¿"ðè|ÇPêÍdzzÙb›ÈÞçÚ2Ùw_ú‹8ŸM6U¤ø$¼GZ¿xڙìô¤0oZ«Qhk²%HœËka”ƒïqÐFºåH& žnµs‚€¢Èm¯VXÆ ^ŽðDìŸÝÁÀç««»_ïnÕÝ,—ów³M˜^лE¬d`±ñ‹Þ¡("iœÐa»»UVræ™!áqƒîVui!‚1'Ñ b#¸ç2’Ù¨Pza½CQÂ?E‚™ñ3Oç‚F@Áy“àá=ÊKç xdt-¡}Ùê6ïîªÉílQ?=Rýz²\ÕËõøz¹ÜlûèþhÊÀôHGÏ]2sðà¦AKµ‡vÑEA¢=ÌSŽe£YdÙc98f^¢¼°Üô:øÝŽÐà´ø&vôf…Ð.eÕ&ØÞV‰>ceµÊ:Ô+‡v¦//ÏŠã,>ë4Sâ’8Y`¤Á{<¨ ½ð>´“׈«î¸w,Lãlhgûª³H€ýͪn¶NëéÅ~Mñ¼NÏ~TU÷¡°_Ó׊Ò[e”ÂAzUÀ“wàé–¯ÚE§"uW+Ùí‘uiO##ÅÁ\Ã¥R(Z˜Q¹(çÑË|°u> /îÑ#‰á^H&-Coò–ä"K3(ƒÖˆ.Æà‰¾ñf7mV“Éò¡»®ÝS¾~úÁx]5jE†”Þ öh"´ã}×" ä[«|è]ˆbÿ¢¾uï)yÉÀgf ó¥šv–ù4¯ppzlj¡wâðq¦KHVf£I…hÒ„ã8 à6í`Yö¢ËM1ü6D-³Õr̲åcL¿®97°¶Ô^!²…vÌžéhfå{ òá+ÃÇáQ:¯§PdØUðà1‡ `™‹žQ^šæ²V€,€G pO x,ÿÄLŠí×µà&ƒ‘Ò·M;®L5ë@ˆ`ø}©H<æ\Ÿd9ÝeZwëlÈí„ç˜Ú õ‰ŒÜ–3Ã=róض]ë°9¹†€à»˜°?–—’Ç kqnx(#bðê#* É4WÔˆOï4ÁÅÆY#ð»sûÇ“°–sý÷ÚÅÁó®@/Óñ8vÆ -‡Î^€î®}~µ×äÕGE¶­õÈêXE5,|"´³>×Tz>O#`;QÀ5Á#Ϻ¨å>N7Wštˆ&MÈ%& 7ÆÊØ©´ …#$þÐqËrìR²¾ÕÜo¸Á–¡Ôé]ŸŸ´1ÈM˜^ +‡˜0…zD¡ÀY¡&@à,ϱ”ÊÙ Þèy:ž„ûuNfâî´7©zÒq=² |ïÅzh'ù–ÿLÒÆ`äð\ˆÂãräØ J4 ÑÕu5 %¾ÿФS0D›Æ ï=0 í´Í–Sž…É`T5xâ‡.uØ[xóÎKkÒö[úú®Þ¬f“& Í#kgË çÖ8,úízWèIt9yƒ¾Dà5 O†Î LdmM˜Ï<ÃLXh½Y3s#àªQŸ(y:¼üÍQ5´“>mñWak=B?!g#d“§§…ÔØ߇$¸Ä¤§ØÑJ‡ÄEèW ©²åÁgŠF£½‘uɸãRJÓ¿€lÚ±¾{ÖÉÛ^¥¨îgM©þ€3\͉áR:®qœB îÞlßcC ÍoÛi•#Ô}PW»5wˆÞÂJzÕ› |¶TÖœ \Üjfq zâŒ{ÞƼ#à‘y[º6cýÚ„ºÔÒ[‰ ‡vÂg w§ñ6§jø^Á}­ïAY]µgEð]…ˆ¿VÞcØ[ŇÍÉÈô–èé<ö\6& $¼3ºÁâìÛvìlß5›GÀ°µn€§goI 0ê2œâÿ_ò®u¹ãJÿÖ[Lá‡#%Ø÷ ¶T»ZYŽU‘—$ûÇ&®Ê’X €²•Þ%/±/'Ûsz H8Óƒž±¶ÖIÙ$Мùúô×§OwŸ Æ»Pês ¨ØT GNÔlžu@õ(<>Å¡ÝãÅàôЗwgZjsÄxfHí„N`Èæ7«Ë2íeUfBŽÇ‰ƒhºþ<>‰ {OfÛË„Tê°ð$†ÀXh'Iq×WcúâmûDˆÀ£˜hI¬8ÁÆh>œÝÌÆÓ"H’Xt`¨½W†3 9žpê$æì†•& VÒ.†nlëkàq±Ã~_NÛˤTú°œÐ_¢ÿ: Ú)¯Zšêè\·_^*OôùTœ‹#µ!‚-pPkÆ)³ÚÙ背cÈZ—ã] y!¿„ߣ·î²¥"Îö4·ð©4Ô•æÆ[Ùª#î±,éŠïÀÄ‹Âc“PGšNA²„ñ¬ñ€’yOöÄmÅñ—ó)xÙËȃ'‰—Þ>!îùü΄–Ê–­Á{F© ïÓÐî@Ðiô¹v ÄŽè‰gX…1x¸J°€<éÝÓ‚°©æ,$q£Ðjæ·B FçAj†ÿÝ*_àVÍ€jÅȪú+»®î«¯N«ÿöN2¬«’.óÙE±<©ä’å³qvßNçùøHôœmÙ‹kô¼År²ôk´a°7§“eö‘B¥³âÓ‹Åèr²*Bí±a@W%–†=Øã³‡O^c¿aïç›ü è¨|:]÷«ÂAÿ±¼Ì…6C'ÀƆŸù‘‘ðÓY.Ïœø‘å…rìÌkfÇÖ໾™/FÅð<Ÿ.‹Ïû¤Ã¥¡¯ Ž›;ÍXUë­ÈðS”Iœb•¶ÑÍbGÛ@‹bõoÙl^ "ÌŸoP°G#ÛÞ9Z„éá`º¾ —‹ùlò­9 ÏY>5ª¯ÀjY¼™,WOg“鳪Áó¯pôñOËcÒwáÓOwå·D›O¦8ÈO_±¯¢@xâ³§ì×câìl,<{vôßÐ ’½Ÿ¯òégÛIör~u=-`b ÙIö¶"Œ&S:ÄÂ`Àtc†Æaü6Tú6_^{ýÑõÍ«WBÏØsèû¼Ë¯ÆFÁ§oòåêûÅüËÖW“«bð5 |Šâ>ÉÊß‹QquV,N2w2=É”:É4 áÚx±ÕðÍ|”O¡#/à ÿ¶dë—FùÞ¾ö.W«ëåðô–èú8v™¯£ùÕ)Œ}¾ÊOß~ûîE»7äž1 ÷89+¦Ëá_ªê˜ #>‡îL€,ãõ0àX_yIŒ!²åÍÃO?½[×Ã^õ~]Œá5ßž Œê‹ð¸;Ýòüo•ÔþÖ+“Ãí‹ ^–o"¼30èo8š/@„×@Ñ·“å‡eß5óÑâ_)T“øB>Ò@îjÒáœy¿ÈgËɺ_9sð§O¿äÓé¦6ìå nÏrs6Ê•¿*~] E$¸VI2Ͱ™9ø(Ô Šq;fì{™_—Õß&År‹H›o7< ßh_–Kc%ÉÇÿéÕ,?›ãío€ÎûÿòöSï?o&Säeïå|¶„ þøõæ>òei,Âga°Þ–Eoó VðÇïJ¬ß¿ÆßþR-Œo&çÅèv4-¾«R'…Jl OW×Ó|^´uÙ™£&\ö@.šÍ™5ë»×ïfùõòr¾ ¿bѾ—›( -å7 Ð@ΧGvÿ5påârµCž‹÷7hžQ‚Ù÷ÝHÞÞ»²ˆ þx–/ ¬ 8­'ÎÏ8Í®§“Ñd5½½£!ÓRõíµ³‚[z]¾—§ën]FcáªckVÎaê€Pqu¾œ`±š[ØæT«âó0=³qi~=ÿ³ædSÐAÏ¿Ú(ÓoKÁM]ÚX ð„ʰ¾SSø?Ï?V ¼8Ép}ç'è'ö`Y¯žyOã|š Å?ë·‹‡ºýáúµ—Õ’IfÍj¡õã=Žû)ûf2›,/ÛäÀ®œ™Qþ_ÿ³çîÝ#œ’Žœ~ÐŽ»$÷í0DôEwàp…Ç´yx³åç³¹ÁÒìÅ ;m§€ÔN×bîDZ5Ç*Ä õxŸEáqI|›œtW%Ö¯1^•êzNš6ÕC3 GÀ·h„<η©vÝ•JE8C9n-À—Ôݤ !¥I¼[àtD/,÷í“"ÐÇz/6t懗{錵4HÏô±nŒ€ä¶“‘­'úb÷|:ÿe9º,®òÝóe™—2#Œ&g¹Š j‚vÆè65|3•„Eˆ Kky¹Ãò˜¾Ý+n|ØŽÛ´cZŸ†Í”õ4>­uûã‰ïñ†µýÊv.µ©wo÷ã ÁÁò' §‡I_Tš=\z•Ó‹öû(<ºinÝæq,á½Vñ4Ï uìö­]|Òw2žÖhK«?ƒ“Lç9JíÑÂ&×ýЉÉxŒ ü‘ÊvÑ¥ñR*ž F6H‘¡j¤ÈИÝZ0)ˆZÇ¡ç>‰¼šŽlRcÚŸ1xl+s"HM–šâÞ)°­(Míæ¯TX¬Fc ¶U$./a‘p4.ß…ŠïÝšö5ðž"’u/%1X Ö§bFÝÆL­Å·ú(µìb\cðø4†è;Ζ1Àÿ„%Ðb;.SƲ0aûY#ñÄZš(œ{ÙòÃL*ÁˆRp¡ˆ.Èžl°`ðI£•¾ƒñH¥H‹ÛEŸþRp[Ú861ÜÃ’¥¸ B;åb÷ ±l‹£»XB£ðøãÃ.î©®í̳ï1†È*O. &8¢$\H#xq눹~&½ÞÚàýw^Õ@á:ÐõQx|Ê"íËÒCôžzåÄ‘^$ ¦-uí¸ÕÇû 58ñ÷ÖGߣ¹P¼®S8…ܗܽ߶RÕS}Y,w†"gð­á)˱7"g}´^Ø ÆT FR01(º0&£ðضÊ;?ª£îÇ@U®}ÄàÑí&£{xoã8!JtàÀDZtØEgMõ;²]ø¶þÉ…­N.äþ“‹òéÖzbÇP¶S¾fFàÑúÈ“‹]©Ó0‚„†/|¸0fôl9i^ìlHVí< Ýva£À{Æ­°x¬Oæ\à…(!bòV²ðrÙN³#O/Rq6”Ëa‚Ó˜¹b <¼Ç9¡D LÕÀ]¼|·û2!@Ic¥˜#ÍzyÐS9îÄ"a `×–1º‘X£ðP ÞˆxœJvZQ¿p Šò°HU˜&– íXt´eôÜ~|eÎø±Žöµ|;ÃË„SNxTƒjä€2¾“‘«Ç¦‰yDvMˆ Î9¥«±WÉŽ“’*ƒEÀ„çtÄVÆ‹ú»EG;Ò„§K#½”4 ÌÖÞ>÷`÷"„Rªžós~µvç“Ùä^NúÑr2^L0'Ó`=–ƒÍè&sX㉓!zOj"—PÙŽ©$>\±³_í¹Õ®Dëxø:ÔÓèÝc„d4 ÑþU[ù#¹Âv*õ*™ýŒsŒ°ü5Ö-ƒ‘ŒÃ;âèÌ`Ì RÓž<´ÀžúåÍ`¿S’b¤Á O\ZŠ‘ØNéd‚<ž˜.\H*Õµ)¶«ø -y%?AˆÏ;-œ¦QxÓÁÑ“K6 Â’x`¯/Ò8eÎoÆýÑ&¿è=‹@Qch1Ÿ5y‹ƒhÝ!õ£~ÊÞ}˜”â;å0O–!oh6ÚNÊš=H¢º·7Z1IO%X-»á‚`F^OtÔ÷¹‘ÒsžËÕrÄ÷ÅÃÉÎ5ÉQÇbÃvJÔäI[=8–C’k'Užú¥Úå¦T»?¨Ö`ù‚½¹‘EÏîCqZµã?Üœ×Ù*Ahÿjr±ž£Jw1ˆ]ÔQx\j˪’`)@øa9Øü8øà–%g•"–X°˜1âžœvØŽ¥`Á¤Ê3Øx:lʶ?Þ1x´n~½½¨UâÚ^Ë” ŽÌ,7^Rû l‘ß/#ú ;ØÀÇà‰vF¡mhL”½G7\ÐNDÿ'$mÎNNlbðh‘lÀÇ3°GFóÅÆ·”âÞí“„éí°Ú8†ºîwx]á’ïûŽàl}àVt°ÐGá1)v\m¥{óJ$@t…©”ìÐŽÅ_ tÂã˜.ø6]x8K·éªä8Ú9$Rb·å1ÐÖzò<ý®£UCÛŒ®]ˆÌÂ<2Iød=YžnÂÂÔá‚´vÀ„²Nk¢ª{ÙN¤³Ó1:@sJZ"Ý}ÙN¶ž~¿|åŒéxtt¦õG§µÂN™hA²†”¬ÖK÷Ä ¾w2ÓRŽ8™){ <ú5zÚ~tlù/á_5ðñ[q¨ŸÿãfQ„q°“4‹Z3²?šŒúj…I;ûq$Ÿ4ÓŒò_*Û)ÑŸ4óZh^‘¿Ÿ.F×aÉ&ŽzIнá¿›÷âX.qXQj,ÑЮÝ„ÆÞ¨xL’[¨§ žàîÈå¦Úˆˆ‰ÉKo›Aœ¤ ËvVv0ð˜z¶Š¢ï’y¾×”kÄ6ƒ0{B²pƉ]^Ù.:cm2‚Ç 4˜½!]83ÎÕÀcy‚€­ÃⓌÞáàä§àbþ{•Ì%¾{2cžæ¹£ûé ë€&!¯”ö¾gZë‹2aÞ n™ÖŠ4HDp(I^u÷ëÃ=äG’ŽQx|šÈš‡r;Ý)HN£ê´æ”˜¬RµV—Ìr¶vÔÇ£DlìŠm×õ%^Xߥ‡TŠØ‘Èêu~ Q­é4¡6IÜvqxƒG±Dé‚E¾[|‚ŸqJcB# ®qÒÇ*€TܬQ ×ÁGà‰>5߈”Þê"Œs;_hDî‘¡]|š¨ô4Ž€+D;Í(<þÈûzzTÄ›tà efc»èjµ_ ©1Q®ðÜÓ}ì ‡`ùã,y¨Œí´=> rMߥƒUš0Äñ )/)žcÜŠ96,? Ïcû¶QxŽÕ‡4¬"䆻YKo{5sÇg(n«–Á?’IºÞùFð(GU-ÛIul8î‘.̆f–;KÃÝöýn¨›.ÎREîÊv¶I“¤#êÂÓ¥äÌ3…ä]hxµôA¶K踙֛ò0ûÝ%„'l½ncµ¦æ1¶“òØD¶Í5fp#¹¦qrÕÁÎ4Òú¼ÛY–ìö£ÞªMÜ$á…ŒNPÖ)¶céâÓòÖ`ä›çŒìƒc¼Au¹.â&ª$|¼•ž‹0ôé#¶ïž¯«ø ”á‘:-'ýå,¿^^Îܪœ¦0û¼¯sËüîõ»ê¯÷õ ”¬P’îgÜà{”uJÕÀ¿´ï [NÖB®ã˜&-qð‚i€4׆+¦‹JX¼:‰ÆL°”ÞÐØ•ìàßã#]ѰIPðæ‘·,eI\¹YL‡¨Œ¦T„ …GÓ…¯§¦oD7|Z! O´·í.ám³J듉˰ÚNI1°ëÿè\}Q#mÒäˆßWßåtÏÊ»£‡tŒ$ˆ`æè½uãá»èÍ1¼ªÝã. £ð$80l>w«t<­T]ZuЙNX¥Ù—Å*­’m G‘ŽTFÄ‘ªÅ¾t©ö3½Äá‰>lØ»»OÖ øûÒÖp2klm(ÚNxáÄ—Å‹$Þs5>¥F¤SéÈqÈUb9ŽƒÜ C¼û¢b£CT÷Õ)«­·§EpŠi1Â=kŒ6–×]‰ÚíG\²ÜvÃ%c•45ð–¶VÛ1CAŸÏÔï–‹ TëÝ9ŠYu»,Å—Å,iŽaVyÜféʃ ï‹0a ŸÙP¦ÒpF]°`UÐhGŽÄÇ“1X»X±"ðX&Rœö<’Þj³ +Rƒ%I½$}½&D6Ç8÷uDÞúý‘²ƒ usÌæ‡>»k$I™{¡4l5å5áÑ»1IuÐ#©XëN~á=F`jŸx¢÷½óéd “cv‘¯&Kô‰¹žO'£ÛÎõý‹|Uü’߿דþh1îož¤LÑ7ÂRW×n´sh»„Ž@ÞŹH žè ãw)oÄ{ðÎÈ ·Ê3A”+ÛE‘üè׫öKNÇá‰&Í^)7/EÔX9êºGøD•È[œÝѦ¶Dà‰ö4¸§¬óå@¤ÆF²ËþjØv‚‡³ÎGA÷B49=rÃÍ+ÂEW_M¸'UÝ—]0¤>%»`ÈÇIñK6§Rù¡mêñÙ¸~XöeñÃv¢AÎòådÔ¿YV.ƒ"KšœÔ'À}W„¦¢*t¤Kêâáè’õJ§Â%M愪хú~-©¡ŘºÝkß²|aZÖÁ£l‚+ÀØ=¥W‡ùÂ1Y+fÎ!:€í¶² ^³b1•àýOÙ×¥Ùý¸±À_¬ÍèïK ü´žoŠl5¶œŸ³ýó_ÿüêf¶\-nF«›E1ü°õ˧¸þáÞüåì¿‹Ñj˜]å×…'®ŸòÙm“'=éøËØžÞ0+ö´·é Ì |¸‚ùô#ï=;iô®@xKùÏú]{Eßô-WÅ*‡æáMDÔËg³ùªÌ,ß{$ÀÞfVßÞót=Å„K[ây]%|v‚YHºãb€×-ù²¸ÿwËbzÞ¿Êg PÆýËÉÅe?ÿ˜O¦Õ;¶žt(j<ÚN‹þl>.úãâc1E&>zÊç“P $tòýä Si_]oµ*eÊûL¼gv¨ÕP¹ÿ ï³#ü´žÌVF=åøyÕo&ÅtŒRûkÖ#É6#RÖ;Ï}{½-âòe?n·€_Øù°Ôƒ;¿šŸ…ˆ‘ñ·;ö Jë|¸‚·¿¼,FÀЋm>Þˆa[Þ{÷íý € ðÇ_а¶æÆã/ÏÁ>Ù“fçß7k-Ÿçϳß}ýêÍ«÷¯~—ý{6ŸŽK[%fóðó ep±˜ß\g=-ª¬Ëë!æ=ûüùÈžî§QÜsÿ[=wkX¶m›W?ßL€–Ål=@ëÇor=¾ þó]Z«á£ËÙzîc{©¡°ŸôÖÁEoo¦Å«š=ìIŠ ÿÛ²ÓÓLd“qyÇ=Íà¿ ØÁ5}boCü»>®ÇþåÛW/Þ¿B=²þä‡ï¿~ðI9SÐdkŒ`-hðÁ¾0¯eü ÿ°yßr4¶Ç=žÿ¾©þyò¤i×þ]Síqq‰Ð‘—ùr£&ñêõì|~À‰ý,ûê«lÿרM×:´ÀÖ®­'ÿ±ÔÙ‹ï_g/ÃPgo×…Ó¾¾ì,_YeMeg·Ùê²ÈÂfýdØ*,ïAþf¾8›ŒaŽ„½Dó^¿‚ª-»ø»ûŽ%tJYÌ ,ñƒ–â|o/ÿ¾ºœ,3|dv}Ì.óEv6_]f½ÿ¥î{ŸÍm4ÿW¸J®ÚnþI_¥êæf³·©›ìNÍdóå’ª“¥·Ûº‘-G’§§35ÿû”l˶ô‚Ћ—îˇÝ™¶€AâO¢§—uøj¤<§zÇ,+{ç0êr œS~é·Ã¿’«R%È €wQ Ž¥xL‰!@lp‰ÈÁ“̈Aºísˆgïe’Ó%¨0 Qÿ4Ì'r9riûUq„ÿÊU*ùÕu~‚~ö¢1–” Ƕ)iŸlÁ'¶¸/âàÑJ¤xfU= G¤&˜œr`Áy wÎò°B1Ç'{½TiüúTÛïñÑ5Šp‹°cÐÓÍ(ý¥¼&ªoåéÍ´ØßŠÆ•ãäí‰#kr-¸”S-ƒ¦ñä€CëëÕU8: Ôx Š[í²<¥Q ¦-8뜫b¯Èy}u+¿«n·â¡°º }tDž00Ý÷±HÙÓÚ¿C´žTï×ç?Åü¸Fgª÷²ÑæÞ“È3J…8Nû¡Ù™í!§ùS,<^Æ1®Ô$‘_ç4Äiäy»ÎÞˆÞ·m±ÍÁã×vÝë'éÈ *æÂ˜$°œ¤3´Š+þUCfLÅþ¡_ìðP¯Uõm ,MˆT' } ÿâÞqCáÒ8@’c©sŽSìbÁãZÀzä š2ñœØ³îfy;/ëµW™}?ܪ–HËÍÝ]òÆ¢äqÊ õ,ĸÌݧdá ¢&áöˆø“‡¼áhEÂò8£O{XÞŠ¿ Q|ƒP O’:\4ªD†5è`“¡ŠŸnÇ9%ºòO ,mhcá‰çy™Õ×7Û1¾8ä0…SärL1H O¢%)487²ðásãA·–AƒÈðqnþ’†®ëáÌdÀÕ¦çà±Bå‰êý¢HxÊÀE ‰Çi+ìãŸÄV`×àlÇÁãGpíyD[EìQç$©¨fg…šEÊñ–ƒlï,Ôâa'ô(ùfÚýŒÖi}µž®æw¥Tà ];9Z85œ,Ä ØáÜ×Åvêïq]çmótg^*ØÓ”€®€ëa0%*`àA­  Úð zú„Vð81|œ¯n>£¢ÖÓëîfòRË@“¡spƒÉP‹u#ª¥‰m‘p²kðD9Oýz²še-ç¥÷BÅ>m-à4Ü‹¨z:¼Â󘩥Q'Éqý³‡U7]ÞÜá‘éö•ÃIVxí´¯€îë µHAÀZ¡Æ/©°ýcð\_ƒÇÂu·É%O^ª:Ñì°¨ÉàÆŠ²ƒ<€Õ"µ9z§ÐzTà±Z”÷¹úyWŠßÌ_¸p¶ÿ½÷vtP;‰0£î^Ô äl^xU¼à—è?ªè;»Zþ<Ï÷¥ø+¼{«4M²V ÷~05xˆ°£V¦Fç,(WƒÇJÜÝL¦×óÛ._³|x¸WÔÙS’ÏVýE醦IÔTЍÕþ¨4ô|©ηáKRT’ž(Η«åù³ž¢VÑ!OŸ"Õno§ç FŽ>œ˜P+I‹ô!WÒkBò5x¼à¾òd¢_Zg:ÎY‹”ØOjžÎ†zYB#6¤jðh=üéC¥)ÞýfÑ9ò­­’ Ö•F>€-µ²™6>)Ø`  ŒÍ–ÏÝÕõrùÓ3•Ó!Ñj¬‹,ýÀpÅÒ¡F´F–¥;ѲL¦ÓåýóRVuË´¨œŽ—âÉ*¥*"+RÀp¥V4ß&>–ßÜø*< í>i9—?_]M¦çèüò¥(›ŽŸBÐ1Ö@+å¨ÖBÀj¡Ù’¨T_OêØ~àÓ¾ºŸ¿q:'–'S…­5ÂÔ:,# 1€CµbÆF¾Kʤ®Á3´dÓÉz×tصT3­‘"yy¤ÑŸÎšzùRÖ glM¶XPF5„¢él5l­hR w/ªj“Lˆ¾ OÒr™Ð,dÍaâTƒÓøSú*Wã0óÕ4²Ϋ*&Z+ï‹ú¦ãªÕè’s>ªQ`‰·)©¹ÚÄÛëñDÞ='©¼ðÊÖáa@Ÿàp&Xgjă6j=ž Òxƒ¡n: ¢MU\ 0¨ËÆI p¤V¬ØæÞ7än1ºÏà¢æÇž‚ƒ†gôP“²’:µ…†ü딈»¡­÷”Ô?ôI%Ï#ñЧF›í+êà]ž¡emK«¦rx®­ÑOÁ\ %VÀƳ±a¿=ûõ¼oÁPÒOw%Žé’ؼÖªRg ,Š*Ç_ì•‘Xxb;3@ÜÜx@Úæplˆ½™î§™Ì­ÇîM pð€DÒç5[öý9bõ€Sš*Œ“ÇiÕnÑ9Ø®A‚ O’ p0´IÜÇcBÐT9_¹ Ñ…øt¾2 ¶ˆugG¬-«óó( ÓpŸoŠ65:2ð Pc<@=¤DbéÛ€[VÐd-SaŒw§#ƒnqúgá‰#=Ñxa6‹‰Ës§K±ò6Ç…#ù‹ã⎶€ß“¬3ÁUàaç©ÖîŸ/·MÜ5ñ×ÑE˜vç¹qå*/Ãóâ—âÎDë¼³T4c†F~ G†&<Áï ™³·ÅWìÓ&µêðÐ&hÊÊåâZ}Ùbƒ¼Di–×˦ÁNÂÁ3ø}½©xŽ>ⱆ@‹ãøûÇ[¢¶¿ÈÁãl+‘Hjðèæhüœ:îâ8ãÍ(ÛÂÆA9xÛm®ºÅäª[Ød_…d# ¥¥Š!·Ù#Kì³A'k,ê4„ã ˜v§JŠÉÜ©E`à±ê+9Uz"é)ë5˜Dñ:•·r§J^3°·ˆ7f<è(CãñJ²ÛúòªëS#±ñF£\îœGmt8ÎÆøu#)Z3dr¶Áµ r¥ƒÎ·.úìçÕä#þó|6Y__-'«Ùa]ÝWÁí›KÑP9 8.‚܉c0Íëa'Ý Œ;é±W»Ù§Ì@lͨ!“S¨«•x,»@Þq}n®'·Ëb`{•I8¨&WcŽÔ‰²Œ‹ hd˜\>©f…'Jw.+eq˳šywT£„ãeñ´É÷·†ÙŽs‚tá2<sƧ6…ºž¿â ŽÐD<|YM¬ò8e¥Û•‰Ð•!i°pðXÁÖ…»»Ó‡Ö+5¦~5:ö‰êØ¥‡.72'@T¾èL3· û½§óü«‹ådöR{žÐDd%DÊ ¹ÜåEΦ&e=ì¨UƒIgà1^rm¯—‹£ÒÁ3¯Á)<‹RÎi¹µK{Uhíè- ¹x†ö!}RÕcÕŸ§žŠG•¤™ó-aæÿ,þsQ-q„ö¹ KŠ– aá8½¤DßzÔÎ4pé8x¬\JÏìv}\…D thÅaìŒï¶fˆtƒ°0ÿ:öPÓCÊ|´Â³¢CâÌŒçx•KBS¡+€ÐÓ ‰m1˜ZL;O»…ÿØ!äU÷i²9®I"tt~ÖaEX­9½I­$a˜“n°pððÏx•:,Š#Ç!ë­uJÁqJ0©W‚¦ äãgøóð$¹_"$¨ÁMw»!%ŽE`€ÇY¥GZø5¤åµiü©çàa¿ =^¢ºÎ* %ŽÑ±Ôÿu:¶Æ\®^.õC’»õXÓÀpðØ8Š5ØþôQ¯ÇB,Žˆ«¡UóàL Èãœ×§%ùJnŽ©gÈÀãµ,‚}D‘8^£»¬™Ê” Ä£)*×K’ZDŽXx’¬‘˜Í?uëÍÍ©²Zw›£v·7g.” å0Ë8—âé>ƒ ™Yx{’¤(ÀÃãœÌÓàzÓ5¡MHÑä´v =$HQÖ4H¸^„àTBpðxY›@ù`ЯJmlî5ဎã”™×Ár\æ =>Xx’Œe8v-CêÖºE²âÙ˜$u®V”¬JêzèA…´`àÑVÖ>ÕaèסÉ/Ù-(ŠÊ8ÎG¡ÒccQ™! Œß;…‡‡}ã<Èè»À‰–аSÚDï%‘é¸Kd¼ýûÙ¿/7ó_Î6×[Êœm|yöíõÙãóƒç©†gÓëÉ-zgßü<™/rÑèí¤¼ûËê¾{w¶Yž½û×Ébݽ;ŒrÚo$zÙ–qÂÆîäÊÀœl~,˜á Ü®ÁŽƒ‡]ôY••õü÷Í壾.2¿^XßÇÏWÝdVœn Vž×É(Žb:ŽÓì>°oÎtŽtΌϞôÕÙChòKdî–q*¸!U‡F\õ2èñÇððn.|N:Ø?¡±œr¿c«c¢ŽC¥ß±ùꌅ“.Œ_Ož‡Gkê2¯VÕã2ª¹é/×oKyÄü`ƒ:Qá¸h7²Ïe2Lj1×õx’*BžÜ€X0¹l¤N6$4Ž  *ÆŒKPŽ8©Aø‘'*õöëßêE«™œvÅó8ãdÒ§844¸wààaw$;pÈ©À§3=«)§*¥ˆW½eœqþ탓'60,¹DÔåÚŽ³ãz²ƒ–CŒ èÂÀØ6ôÒT÷ª”XZGÕf;Îp“äÞñ¹|Ÿ*<¾"Ë =£!4Fž2.°/L-zâøUxxØAóÃTa©“Xy÷PàçqÚ}E%ÊÉ5~G÷í÷Ä|Yƒ' -/ròsä (娒 hŠ'Þ©6š Å e¬ 4žSjɉ½æ-rÔÑT Úˆ3\4x´š@÷në«HÀC‰ãE$˜(Ržéè Õà ÜW8óv}µ»i41SùŸ )P˜rg»$²‹ˆn‚ øÑ7˜bÄc&»oñ$?Ô”ÐW$õe£±ÉÓø¬·C-ÈIøœ£¡_¬)?£_ް3±Ïpä½Ñ$¢‹w§Ø;é‰ •i?Û¨Þ¤Æf’f´i°usðX-òy±¼Ÿï =X¬§]²ÔX"üTPè‚QÑ/}‰”ÕDvhxä°ù•ƒ'ˆTx˜Ú=«50s9ºyfÉÉâ>‰s`§‰ «wðº¸Fî\%Ø›Ô`†xØÖãXÞž CÞ›ŽÖ!âT5Âʸà•Dò>“’€©A¤‡'*#Ñ‹fOa^>¶½:æ78íÛMÙÊ̵ñ¢ëbЈø <ìž"ËÏkåç]¸]R@“ɺá!·1HÍ‘!6È­âàI- ž.‰ ÑуJ‘z=”»Æ®ÿ*JÜz Þ4xÂÀÁÃ?K'«åèŸ2–LÊ/¼]ú@Z­×VUè6}«ióP6 ïÑ4…MM—Àjí*ð°;ཬR‡?Ìu {ÊQ¥þ{R¸È/Ž´Â³Y/Þ2N³óûF5‹<ìit_”…Ç(-B¡TZTIÑ‚Ó)´-ã¬Z\Q‚¶ ¼Á5˜zžèdŠ+Ö¨Ñö«Që˜ë^D 6J!Š…9$[½¯Ÿ8x¼}ñ?íé%­("䣥WDS@ËS”ào=쨘žSƒèêѽÒäËŠ.]¿.s…ïOì¹êJ‚Ñm“À ô¾…9ààñjts€žÿÍü¶„ϯ'·³mÛ3're$kˆˆI‡^8ˆÕW”á3ûøÏé9x’RÜèÓ“é\ÝÎÖA]ú~]Ú¼³i0‰ÀŽãL2£Û†ÙÌÂ:Ÿ<~èYq›?Ókb)@²A%’À8.›k Æ&p=v«ü Á Kw„å¡Ð 6) XZ¤ç@ãì~±ÝðÚëëo| NôB,;SC«Ã× ®m°qð8³³‹+ÎzÕ„!x§Q¶yÈqଠ’µ¶W¾Áì3ð°‹ì9é1´™úµésiI…Þgî*‘¥/Á]ìñ ¤0ñ 5µ'¼@èbHÞ¬r¯dB‚Ä­GB 0ðD¹¶Çzäó¬Ù ú5 ¹åv´Ê’à8£a Iâ234Ø8x€¤òg5‡_)ØIKo4µÔq)7DTrÁˆ1([/I€WPšÀDâûO±ÐÄ)š%®óEŸUZSa7§]8=S|‘9 SƒÕÏÀcTÉxv÷Cl¦bÎÑò_qß lËW†$ã7ãá‰vÈFðÜ…ÚSßCNkq«q sòyPžrdqœ v¤ÍŸC]àÙû<<¤vþ}Q›%òÑêÜ{„BÁX?Ĥ Q’7Ø3ÌÀÃ^Òýq“ç¤@Üã% Î&•¨Tg=Hmà>rÆA\þÃaJcçÝ/›ó½ÇEƒÄ¦—½_=‰8—Ì𢑼SÉ@ëXlž`ÇàRbïÕ[È)o úþÔ¸2΀“^Ö|’²[=ú‡ÇXwßÕŠ4„"sj\DEÏûÏ(q»SøZÚZÕ`öxœÄìWúB¾_:{—ÚIqeœc§•IZpoìøóÏÁc“ðêÞèß0CIn3à“¥°â¥ƒÀ¤ q•{üVTL<0R„þEl£èúõht4*7 $pã8­ŒðšgR•ƒ,sžd|»Ê+ jÇÌit6Q஌ Q?…®õ¸÷Svë3ƒÜ.3Èô4äÁè+qbö ðßíJ¬çwᨄWOÀšOî7Ëõt²Õ˜ û=²–?#ŒùlÖÝ…t‹Ii«ˆ:{[#ád£iC¿Ì9ʸá8$îÙ†Ò¸¯ia8x,ŒR¡ƒÒ(kO9†®=Éà”ëDAoQˆÁ ü®E’Ï$‹Ãç¬Ë6¬ºÉl[Ð"nw.[© mÂr]PmG)Ë1”  Á¦ÀÁÃ~wªF‹"{ãëñBåhÛïÔ”q:*éK6wY€= ÈÃüâÓûÕ|ó%GJ»_r{Ôõf5™ßnÖÏõy¤°M4„.ñ›¢!Š —qüó⨬­ã× æá±Â7E_Я/­Iщ¶ŒS'Tk•¤ìQ?×,­ºOyçÑ:E¿Ò'ï)/ÇÙ8î@gëá:ÛÀ‰Ëß“\"Þ:nǹ4Jh¯¨-ö«Í¢ ŽZ‹Ì6 ;ôx0ÊŠžïñ"ÿîV·‰Ðmnu’=$J–Læ$“ 4”ÌõÁ©tààIb;Á3-îÿGÑ!ôëOÂ:Iëe\oé9f#»Q(Ì‘%68™0ðÅžÞv›<æn¹˜O¿Síd±X~>Çÿ{Þ}ÂcjÑbM„”kÏâ–-åÏØÜå܉mÃXÌÀœlƒ™¯Çã•Û^ê°¨Žp§FÇ.€¡ ºÜ …{±;I¨½jà pðè—:þ^VáÃMiQ¥&T4àÇÄK†2Ϋ$¶œÄUÔ`Nˆ;ć¸CoàÁç_ÒÑ0|~è¥Æ[2'L6ºÖiüuÃÁþB:A•9n»M:IýÕR0ɺ@¼)ãBêY7¡¿k“ÑÍ΢ɯ/}£Åüc7ý2]t»ZÈyæC frøõD­×k¡¤HËÐ4úø%zxxØ=rNPåã™Ôÿ^ym•®îµÞ+J:ýyY¨qêÜLV™¦³¿céÙî–ãY½î]\y÷£»ÿÿîýÙú®›žM¯'·ŸºõûÝT¼?›ÜÎÎî&_ËÉì(úéW4z³—~ú€¾·[ÏWˆþm™÷Ç6_Ÿý¬/þón† ñ×oV¨îM7ÝܯºËwú¯Ûß¹|ç.ŒºÐøÉŸnpñ]¾ûÇýäËÅ|ùaºÊáŒËéÝùª[t“u÷ß××ãá2'Îiú*M'À\MìU´;7Ë!㩚|ôþ,i¯ñü§št.ª«äU˜Èßõ¯K´ —'‹u÷ÛQí¤äa£hñ¨Ëé•úûÙVgùÓ¬“\œm–gÓûÕ*ßl#>u›ÿvv»ÜM".£ÜgÅCæRÒ¤3œ‘í§‡Z”•qåþˆ@¯WxnøçÞòÅ¿³þÃÇy·˜]üqµZ®¾›¯7¿»/~¿ð‡ÿ’g?ÿêvÃÿ±|úëÿìnó”çÙ´¨ÚÉ|‘'ùwÿuǾÊ_üýïÔ/S¥ÌÕÕÌ$õû÷Åö]¢éö—%.»Ë¼ÚÞŸ}»¼¹[t¸0.Õû³:$Âtž;\nV÷2%ß âà2Tú·ÉúúòÝùôîþüdü­úNôsÞMnfàðÓï&ëÍ÷«eYï—›ùMwñ/ˆðwYÝï϶ÿÝá±êª[½?‹ïQ§ïчyæö¹œáÓÀï–ÓÉù¿!!ãزõk£üþðÝå»ëÍæn}ùáÃd:EÑ/cדÍÅtyóç~²™|øáß~üæ<‹w©ñÌeñ÷¾ENÞv‹õåÿþûz³ÂI(Œø­ˆ3G²Ì¦!Ïõ÷Û)ßã2³å»—Ÿþú㦻»|·û,ÿ¸›á×üñeì~PþÜ“mùÃßvZûÛ»³yÖ~0¾ÚðË&{³ˆßYô·<›ß  ï¢?Ìѳ-óûÀü|K¸*Zz¿[ü—ÿ_Â×SZȽ[tyÍüe5¹]—;ù¿ ÷·+'ÿë×ϸ…^âÒ6Îw:\Màj:q€¿Õý²¹tJEc´w6“dŠ–áqåä?•m‚S:Ì”úýoâÛÉÝä —ûfÞ­÷ˆôøñ—G•Ÿ¡j¿Ýn;M¾þý_ÿx;¹Zt³ýŸ ÿæ—_ßýûù"óòÝ·ËÛõrÑåþËc:Ò·%×6V&ë‡îšÊÕ—òÁÖÈÿüóÖÇøæû?åÿúÝÆøÝƒ¿ùç»ù>gh£=ÝÜ-&ÓòEO»þz’-áúêåÝ.?ßž&Âúñvr·¾^nÊ.–÷³oï ö`l‚VED;Pü?!W>]o¨âß—³î/÷ÙS£sìgÍä}÷ã6'$ÿóé¹s•:ËËìÂùfñ剄N·¦ï¨Ç€îéÿ#ïj–#»uó«h•º ·‡¿1U^ݪT6É&ë»è‘zfº¬¿èg®'ï•È“…<Ý–º%5ôÁ¡±«8:À$‚@.ÀŸËGÁë¹ÜŒ…›M5ªÍ\íƒÍU©ítþ¾m¹/?/.ÿ<›–çÅÕÎüúíÿÍ™sq]÷ ßþåe3ý·b¦ Šôb5Ôß°7¬_·©·üÜýØðá—‹v¾û_.êþäÞëûßy´ãuU3 þ*ÿþwýµsñvo{~d5¶¶=ijà§üãâ_··ÛÇï󜜋¿eu!ýïÿœˆ"†\ÈÖ`¯ã\Z4~°ªçÚúúîÛG`œñö&BÎ ko)½_>Ž v~5Àa@èK‡‚ExØ^î¢ïL"¢O#rÔ­ãb¯p¹uï×—¿×?´‹¥} ,ÚqK¨À“ÔÏÁ;6wK~ßžü%)õ±žs›»öŸ›¦ÍÈ\Ä´–Cà¶ÛVó9Ç!Û”Åè”—'‚w‹„+?ÖÏ×O«jMÿœ4È$`Œ™" Ù©ã’ºêóòÔ•£ÏÌ¿}çèýÍüÞ~éh•¹g ß¶S漊:®„²Ä† e±1º3ÒêI’ͬ@Aa@ZkûN«PPxÔ=ŠÞ&†¾µ¢ßç‚u¦0º]‰~ïû%Uvã|¦™Y¬Á†Å_«kñQ#bN‡Þ1J -ÅÆ9t€îíëyýÉM6˽!b"^ŠB4€ r<ä½Ù´½­ ¯¯ï¯×=]F—ÐÚt`±CõK‹QKâÙ4V€Æ0€ <%𥰾Õã›ÿ?ÌôÌ’òž¦ê¥‰‘ Ž‹úB%ËRX=-Þ L‰'jTÜ®o6Õ{?:[û9.12ú«»$¤Àá…‚Éîuƒ%{几À‚Y"«F£Ìš #åÄiu\µ?bÏaÞ`yhðÄ`¹J«ðó £ÈÂ(°š¥LS Ý¸¢n—0‚Æ üyÀ¶ ÁÑ"õg\g¯ÊøN+‡ÔÕVã9%\ì´uƒ2ßΧ­8 ¸ VáÑÆ‚îï®®¶Ï÷M{_ž¯¾mždÁÜOG™‡«û«/“n™“[ŠLÀQ¸ŽÓ·×µ§°mÊnx ›_G¼Uá§·?XµÊŠO“É•˜°oñ%‡€¬(u\òÚ à`RkdƒºßÖ‡] :½lîåS«hôuÛȲûM¯Üù[€¢pœáS¦¬ óË Sº+D)v :3Ña}yy÷|\ ü¤j»»4seH}"×ï9¾çX4éX†î9FdC)ðxueá·Y… •NºdNn‚JÌ‘KÝ &‘§— 1ŒßrYB,x¡À“ŠAïçSÊ[MËw ìµ|sNÝ«ß9µ7r]ü»q.ÌÌ–5åµ{ZµE‡'Â’–î¡Þ&¡¯Éà›† _Äi‡äíìIc"k¤Xü ¸O êíáòA÷ij\]Vw~Ò#s䆯ãL 9ÜuC@·¤É æ±yÀž ‹ ïTˆ}Æö^,—~ÍÎÝ8ô½C#­78 4xJ6»Ö||þòxù°½ïg‰p'k¬|lõ]8ÞÆº¨Ë‰.Ä[dÀó¯ÀƒdôÂV˜)}¦Öý]¿iê4Îé‚¥¬ŸD4xÀ¬Lgw=|G?•gzXýº½œ”˳©u, ©' @Q—Ãm¹ä˜Ší;Ør84xÊÜ4Ø]ŠÇ§](uÿ|}½Ú] ºcÝ‹¾ð««ÇAè·‰Ü*#rfZ%Ѐ™:<êwxyí¯…>÷«kõÏßÐêæ@žˆCÞÊÞ•¹÷\V^>­ÃCVÅb÷zä·Ø}‚Ø{-ç¾–}{‡íœFª:.«_å-JhrX>*©Ã“ÈÀƒ<ÚZ÷?]I‹ñõâÐòð ¦à2'M[É(а<ÛR•8€3 <„ç ¢PÎk󈜡¯ÃÐŒáR­"shA=oàu.Ês4ˆË3Bƒ‡´iÓ/U¨^\ùIEÌAÜðˆ\¿;Ã~œÇóŒJs’*0/ïILßñ¹I€GñvõÒìo~Øûz•ž€Ž©Š; ê:çÑS)Æv¢Úø`¹H¯×ÕƒÜ1ÑŒ±plíõ§ÖWÉV¹Á 05xÔ¥]ù*Ý:Ý–¾n“Çä=yî J­Z³Á©°Í5"À€CB…§,ºs¼½í=R+sö&ÀÖ@'rŽrìkÑÛ²\! 8\êwª}è™\ÈžræìYõÚ§‹/%fb/‰¬ï „8Âè¯ß ÎE’àQýÓûÁǧê$߇í·MUàúªÊ¾Ý’!oã€Ôñ¡q{’\ ta@YƒGý:öc ‹¿¾_íkºÏŒ…•«JÐq7hÓ8´af2W-/_;C‡'{³E¾Z?Cg´Äµ¬U@ÌeÀ„+ð¨ïçO¨ìnýüô}WðkRöU|L\Qæiœ>{qN*ÐCÄå'\ƒG}e£ÖæG/ðû–pEÖ^~:Ç”Êhã2%£`½–² ˆLº­òýºzµJ_i±5U%ôŽ[ÛÅ×ù9ÌTH|X~Ú5xBšó²kýíÛæž‚›UÛ÷o#‘³z#¶¾Ä$†·qÔuåÌh)ë 0© <ê„Kq1¾×~C«ßËNÔ×cë>ݺ䃻Žs~Ö«¬óȨÁ‡ê†''ïœy»Åûc»ùç¤/Æ’M@ÕnŒ¬‹XÇ%õ­Ù’î›É—Ó½äcKƒŠÑ' 8ë™Ñ2_±¤P¹–yÌÉ X<í;­t ð¨Ã’¬™hÀN£Œ5ÙJ¾F˜b~Ó¸n{&qùߟ¿lV—ß7WÏ»l_ˆ…©ž}ÀC„@&=¦ºüˆÇƒ¾Xô‰>ÖÙ¡ X×f_y¤ºÐ¹Óƹl½Ê-Z‘aˆ)#/úþNûNvÀÚåmœ:Íôùéòî~»¹Z]>þ˜ Œ]rgQ¶þÄUeÀíì­ 4¡Eè9•ƒM#V·OÔ– hzzQÓá ÜÖ%³«¦f|eWˆ[Çu\*4Ï»9u „C.‰Ë´£Å€<)Ÿ{«¸i‡ãìÁ‚ÁÇzÀ±šÃàP@žA99,ï5x¼AŽï‰Mí DBµêú*¤:¢ÈÜmVçȨVÜYÔ M¶a ð0;õMгºkµñSä±vï,ů1a¥4džKlÅÅx<ÅÑró¼Ú߯L: œÉQŒlRB—üró½æÓ=õ,ç|+Î!Á£îM¨ÑỲÊYMŠèG ,8ûç -SÈIwòoâNiwŠÔ <”jb;ÇnAP •ùO ,Îqd±zäxЩKdNõnÖ÷%²#öuä&-wo¼Ò¯®=áªÞKßõšÆ…^+q,êf}ù}{»y…XúÛèôé •ÌCÐzúNŽÞ!òx²K¦Ý{Ž•×ipRwyF©€.g` ¾¶q@I‰þëØ¬ŠFPEŒ§n,µ3ß1æEza#èk·bN.sqëi\À`ÚÚÇŽø !âò¹x:<ê«ß‡'t9)Y_¾Z2>&äN‹:£Eg†eù¬fy¿l‡Bäñèý²ÓUÎÏÞ˜'c_ÇÁ‚–·ÎÈTÇôqñô“ƒä™¡Ó8G°N‹§ßÒ×oôí͸cw…VKTß:ÒˆØ iùZ»:<%šÜßnÔŬ™uMŽ5dbkrb“º½,G¡´71yy8 H9OŽ07z§M™¾[=EŒÄã#?»>â9øÈe9°øÈä4È3Àò>§qÓo÷1PH< ïh«ª‰AÅ:ñȧ{Ø|Û>>=ü\í+$Ô‘m[á¦-M¯àBä¶qÑ&±B³ëµÚ¶¡½Úâá%?`>Ûwr«¦*À“L‰»Û‡§…1Ûê®&(2•^§q>Ìê|.á*@*ˆ‘8 sÂô.@âñĘ bH'ôF®¯·º·"Õ½Ó[ll:ó41.?±<êÞ *ìÇãý÷ÍÃæ@eÌZh èSqìí@ç Ä{Îåž§÷Ö¬O ™ï³¦Gn«?5÷>7¾š[}í§CaRû§qÉf媉¨ˆŽY Jk÷öùi}»ýãUe‰YP×d†à9¶Öòê'ÒöT ‹•ã)ÎÙd2 ”ÈÜ”¶$yßœDt‰­¬f¥"¸åçYƒƒÅB¾¼¾{¾ZÝ_¯Ÿê›Í1+›]œç¶œ6Nýª`1F*@¤jðälV­òÿwÒcìë±´—“)çfÖqN]‘Ò”¦¤4`+ðT¡Ì.ÕNÜB}êÜN%f-µ4§䶪ÒqŬzå|îŠq“s©‘™G ]ãóln¼AiD ±P>¡DôxÔö܇©ÌìBütrÄ®"Ù4+ü1^Í· +I÷•!âX‘L*}³ï´X=ëFN¸™)™š]lwà . J"‰p½²4GT(”á¤:(c¬¡Ö½9²}ercÝQf9…]àÇ^:ÐñöeŠÛˆ6˜<¢DÌÇÐ&Cà‰yveH“… <“0;”ˆ”‹I‹‹bE®Bèœ@r CÈU\Š Á£~@ünF&SE¦vdÔ ƒçq—ÎeÐYxhRbÄ,oÄÉöTÿ•à‰VÕ,Ú/<[ Æ,ˆû”¤ÙoÌ`[‘F*eCJ.Fc_ ˜ì-{í^ÉÔO,kê(~êÖ°â÷˜sqÑF,fãü"zA0‚Ô5kvêߥšOƦDãÁ9ž)DAIÅ”P­È!•Œ†Ø+äZý>ˆ@fI<ú%a®¶KŒ×”\09Î…oC"¹´Ù!Qq® ¿áïÌBàIDí(ˆQ@ïÛÁ7"‘÷­e„@ZbÝ%“¾ŽÞÉêÙ.mpl˜šÚË›ì’Eš¹9YKdE5©À¡Z=öÎný,ÕdóÁF§©/$ˆ`†i1"V '|Cˆb,Áüb©ÍØÌ3(G¾ÈU'¾J³BlE•v æ<†*%’àXü¶ìSw*Øð3ÅêE2ªÌe±Ë1© FtоZF‘£ãÇÅè³À¯¬¸—?Ò:ó€<—r=¤$‚Ä´ØQ&ÀŠH€<¦1fxléà·”m¡”ÄŠZõIÁcC-H®:¤<*Òr{wµ9Zß|ˆªN£D X<ÆÒAoD!t P•2„Bè«·(ÁCy¾ÅòN½|ôcr§=žo«tpYM{*è$bÄ1÷DØîÙ%xÔ•âNï´Ì‡_±•éøÝi¶}ÒgE @ipŒ©Z‰+4·:‚ØC䣨%;ò—¯ø|æiqX#‚ˆe‹c.ý `–¸×%û9‡Ãk…殺ùøhóóQ`Ÿ‘+çjœF´‹‡¤]R=l]àIE)…ZçC¢•¦Å ìLœq˜¨áš]ý'Kn*©Œˆ‹V<ÕE Šó›‡ÎxÇzð‚>.†Úš€9Ç– Û‰¯;~–ÇŠd)Õ#A } 1$ËíºZ€'ù1¯¿'ÛááËúruÿp÷ÇÏiî›2?úØÂH,¯BKzHØ­\zīђQ/ĺaIáÇØN¡]fxžç¶”–6;‰”Rf;¡µqYükН)Â1TˆÞ 2vnõ+§‡ÍU»œ[_?>lþëyóøÑš}óQ'™?€eQ¬ž„¬…”Eã…cøò±_”ŒŒ)Q‘('º1ŒÌ”1 ðè»Ù“¶þïç‡MÂg#–$BÆF•`‹1±åÒ B qDžv]ë0IB±Ðp&n¿Üô§«°ñËêq±Z=ä•ñr:?:ô¾B“Xߨ®“½¸ƒ³±"#]í˜3Äx‚έçáUSîÀËé~þ↚”H„q¤SªÖµ¦Ic¾»`AE’$gãó¦É%“¯ €Ï~±BEpv•ªõb#„<ë­Â4¯êqêgàeEÀÿ:ŸjPÄ1gsH®¢¥àÖî‰2ã°>îO‡/Úï›Ý¦ÌKøe¿}8Þ=åî…(w›=Se’çk šä¤êàr6Ö«¬ýÏËÏþÇæ÷õêÛj»¾ÐôíUõlL5Ù|õf»ž¨é™¨^`*z Ææðü{ø[âjð8í•Üóò~9ìOoîØ¿O/ýõµË;ß<¼m:y&ŠwÆb´¹mäEÎh0‚µýH•!t¿¨Õá‰s»$_››ôã4½3¢ñ.F6ïpõ Ö!dV(Ð?³§Ã£?Å~X}ó¶¿Þýòøõó~y¸ž?[õp<¬§ãšmJÓí$ãµÉ;…ûã"7Ñ&¨rÎk;.k`÷oY£ÃzE*¬kBŠìÑ„J˜"¦ŽÌ%Y¡øHhð$Ûß9¼±¥¶¥³)@~±+`g9ŸB›a±íج@Ü&hðx?3\ø~\f®þúuä£ð¹ŠŠpq©>Cæ&NVºbÌr‰"öw * Ù€ D£çÖš>®´!D¡nìŒG]§¥³å÷U é¦ ér#ù1Ÿ¿´~f€Ð’½ àÞÚþ ÐàqsÇ_ü±ÛÔ˜2i-‡5N‚œûÎBW $­øˆðPƒ\×Ýÿ[=ퟖÛbKáô8îG„Ê–³˜l˜éÚðV,¿š¹õŸoa*̘3æp?¢a{¬ÐuûëY«À>"W ÁC¦«à0zñœs>>[¶ØT8tGÞ.>æt`9ts=A;k`£ïO ž3½Áó-X……ì[Ì©Å6æϾI‚Û¬Ð!Ž ‚Œô /S‚‹U…7ä¤V"|ËYŒ3ýB;B+`;çú“AƒÇ7Ê!Š®Ò¢¿³I£„ƒÜDÇD1c9³0ÒGèø\¯…íß:_‡ÇÙ‘^âL 2ÂQ-òòÂ$¥"§?GŽa¶F…8€<@mœÅÄáì1…ä–IÐÖKæzé&j ]¯˜)f  =Äãþ¾ØS8°s\’%’ØÌrmÑŒÍ ðÁ™þdÐàiG§µw²w)Ï/@RTœ&G×tpµlVàq©ÁÓ¹Ná­=ó?¾Ý/ÏFNñd1G[Qºa§ÉS¼ÎE4eµBä$%5x´qÂáíA…•ýŸ‚¨id¡¢u½:öéð„ô|Åû³Üä!J žÑOû¾"×,²hÈr•ÁtOY)ñÐü¶l×êB&êIcLÊÔµ¹=²¤B¾×ð?Áqh9®ÐíŠ(ðt®ux±ãâó·çðmzÊ ÜY>ZÇ$Í*r6iÓ™c®PÁ9èÏ ‡9Q£`T¦«OH­YÎù¾Ç’›h­€Ÿx ê[Áü~³<=üî0"J3ЏqîAÇeý¯ÇuxÒ¸¨â~¹~ØïŠAA0hyÅ_3') ÌlQ*¥bs=î×cÞê~‘üBQ‡¢ÿ Åò{ò€«*ð´©×ºFÈÕþ°ÞŸ÷û§Í/ô±¼ÿKÓ‹êmS‰BO–"g,sQõ»I£¤þ|ÐàAõ©ø°ßå­¢7h>C«Rµ^îNŶ(ØR$ŽP¬¤ GñSÜ®÷Tm¹]?šžT£¹8,^Ç \îêQòX,çÕé3B£Kÿ~Ê:<ê~Ê×ß\çjɶ<N;&äúlÒãÝÅÄçÿ{:”aïŸd§(Ð60±ù“%Ú²š‰l«gþë°^Ö˧õý§g=~ìFYÚp¿L8cˆVrºÑo¬ƒ±œuµ÷Ï1»B'‡>$OòRsº3õPƒiŸ:ë ‰'Þ¢ ëY.M•ûÌ!56ƒÈ!è 7ŽÏ…Tf x=ç™{YÓߎ«ÕoËÝ·Ó&§ ’à À%KNÑRä PSgШ mlûÐà‰®™ÈÛg»~R™„´o­ÜZLí±œ§&fºg¾<‘::Áb~»bÐJG–KÑ4s=¨ŠÎš\&•DUج²âü{,G°ﳜ±Í¼À[–C¥y…Te“ÍI¦´ú¡XKF+pŽˆ 5xÈ6÷ù~z¹Z­¥h^ˆ§ù³Uš=J[ˆåB°Í¼A/ÊjÔ¡A'ìã÷ûm­y`^Àò¤A:ȳêÙÒ˜Ù ¬i@DƒgÞ\å Û á59ïòT; +Ë}¼BCÚjÔIÎŒ›á%°h Àƒ]}@6žD'›$Zñ›ò%ˆy PqV¡Fêß H‡Ç†Û8ðýñÏ[Ó{9É^èˆ<ýpdýÛ{|¹ÄúÍ[ÑÌ¥|úåv_—»û/&/B%ëo<íý¨àG¬29çžPµ«üçr»¹_>ñŸýkýùë~ÿÇ›F/–,†¼Ø‘Íøüéý $„£ä0„I:ªäÆuÔ>‹«ôYõ`]àð5xbw‡/äCs1§sè¥}.ætÚÎ]ƒY«Ñ%ÝRïä.õN.\/xRÁ˜ìêéÂ?>ý÷›3!/õ0Ùnl„]évº_°È=^7eô‹õüíÓýæ¸üœ‹ØVËÇåçÍvó´YsñÿÜ¿¿üØU…rïz¬PÈ 8Liðø¶·¢ïÍü—üvù¹/Ò}¹wÊï£EºÖª1Ùú5OºÀÆžßi lØ?›ê»å¯7òΊ¼q&ñŠ:+0Ù.ö oÚŸËç @…‚θ!Œ©Æ3§¹Ö÷+kÛtñJ¬XÜÉT üm6ȧú£\s1³ÏæH­f#jµòïA“­À3ïQä´©‹å8©+Ø[><*Œ³y0b.hÇð µ¹ç{7ðªñ‹ÝƒÈ o€¼«ÀOµ£-î¹|ñ¹æ¢B?oÆøj<ÖϬñ¬5ø÷]e¶Cb¦h‰ ¶´B=›+µÚyÃÈm¸*ð„ÐÒ·{d–`"^ôó-|J5ÞÙüH”L^8&N­Æ£.üxWÖìF9Åœó5¨)Ìò!:´s¹rÏö(k̘So5žÎ0oŽˆ9þ+Á`1h’—Ÿ×•j€Mö{ŒVš½Â1øT‘æ ƒN Õx4ú:\ÛO‚&™è#Ö¨næc¶ ³Y”rpT¡ê óK=ž¹oŽn°ý‹£örv5tP£ˆê ÓA¹ Š.•& aP5õ‰æƒ¢‰5x™%y9<—Ÿ/ö—Ó¬ìNjô¨þ0õÁ?›?µz:åĘRMz;ª_<éì¿Ú/ÖO‡ÍêX .'[#ˆ5À«ÓjÏfH­b8æþ/DzUx°E¢i“?NçWgËbu9û ü}­É¾Fª¾ökˆz.Wjµ3&ž©Ç£g.m÷ÛÍê[å—Ûíþ_‹õ—Ãy•÷¡[ÀÕžŽÛâžÍKL~~Ì9¹º»éÍvç%þs—S³y˜d…"1ö&δ³T«(Œ‰óû¾š@7šA‡ýïÅürÒˆbªQ#Á(þ|6{jÕ¤1Y´kîÐøÑì¹”ñ•Óºè(˜E&ÇP5åÏÇ Ìe"W‘¿F‡cT‹GýÜ䆸_ÿ¾y3²¥å}û¶ÁzÚÖãµ~@>Gƒ'ŽØöBÆ&þÏIR¢˜å¢~Vc'’*@ƒ°Óù÷äŽ{bÏØŒÇ·Ûé¯j*Ÿ×ÿüx·<=}Ý6ÿ÷áü[+l(ŽÍ}â¨E [XÎuè2¦'p=^ï\@jðxêê|¶ŸLµÑÉmSˆìtB¼È9kš9€ÜÕéº tx"6ó ÇõêtØ<}ˌ뿞ø?làåf÷t¼{þ«,K‚eÏâ1Q OÐÕ+Ô±ºoHqxh„W°Óö³º:ŠFÂËr\3¯Ð¹M(ôg‚ŸeÚÅ ËÝ—u¾_-ÏŽöÅžlÝõa·ÜþÐðGØbyÈóHV0u˜å®§t=^êß E‡SG—PŒç¦Ç‡þdr·V,˶;&´g­B|uPŸ« —\%Ç*W“•üϳ•ÈE+ÂÈQ+vÜÖàqqö Ü G(\@íIúô@.¡ÑÚ>æÑÖ>"É`1 8\óïI.JO0ÏrÆôÞÌÂVɽÄ#”V-ŸÅæ»OÍ ˜bJFœ Yú¯véní=Yu³R)ÏVúzúüÜSð¥Û5¢ÄÏÅT!J])H(îjµ£±X°#ÖXÇéŸ]»±ùôËr÷í´9§)„m‘rßVrBz‘³4«êh 0]ðeÖà‰¾ÁëšÛKBÖ=œ—(˜ ‘ÕŸªšPP1€µUàQçA+M&lÊo’0:)î§üȘŸn¥ŸhÑà!h5ŽVŽ^’p…J@ë%N²\Œ¶1µƒH®32ž½ðܦá6þºgثչaŒ¼ æÎäq§¯EÏr›ÍœÉHnÛ}Ú¤7-šc]³â›S|¤ìXv¬±"n  ¾ÔjHSÌîo|tx¢º¯¯ÂlÂ.±Ž àýôS¤³œuµA?V*pc ý—[ƒ Å éiùª¡Sn§0mK`'4&9ËaÓ\IUÌîUZJ<wøC¾&ZœƒŸËÝ€¶ŒËw,L¿-?ËÙ`[ nGQ v°ö <΄F££+b¢ ˜rO¹è¤ËEwú-”­Ç ݇Â(ñØ0`Ë ÛÆCòHh¢€—åXïvxs›•îë­Âƒ×»OØÜ!O»ñ!Iߣ LSŸï“°½¸çß“›™Ä <.Ìms£)É<ÿRˆd\…±ÀÚ¹cÀ;‚ ~ÈJæ»ÞX±’Ým1ùÃ~·)¶Rn LÉ`ˆDS2O#vtÜUާ,t ªÄsà¥ñ„M-RLfúðY.©‹Nú’T¬í¿î*<©}îüÃ?}sÊAÁ †ñ÷¥SEÌ”©÷®ÍÀº±š Oj¼ë/mLŸÃ\aã€e Þ‰PYChŸRŸMTiÀÎ×à!×°M¶d, ˆÎÒôèȳÜdŒrÓ–W²V•`À¢×ãAÓyà ûùãí˜yI€Êr¨¾CéÊO ò×* ?}×WäôÝÏ:ÒS…Ü÷ÏÇêðØØc£ŸvËÿ'ïj›Û8’ó_AéÕœ"¨yAJUqt¾Øùâ’|÷%çK`E"%ëTþYùùeéž]’ lo³ÃKåéÅûLÏÓ==3=ÝÓ÷ã‹Ûå¬u„¢"dè…6Ú:Gaû•5žõ(ªö«¯4ÌÄ#ýÐzNh‹”Þ%-ÅN‰ÉåzÎg'³ǤU·w©Šf*b¢PnxÚá{`í×}c­m§óåOÃû`›éU=ƒEÖzs~g©¿ÎFGhqÊú¥Uw"ÀÔÎÇáMޤÁúè£#ÁB;Q`ì]°ì¾ØÛâ‰6§ÉY-׫նÝUÑ„â*1÷‰£Tó•;—-YZ.Š2à«âKýíŽkíNì²;º÷XxBÆ8ƒé¨ëÝ]ÅûúžÃøÆî]@袖ÈËѶ“96L-r1jX¤Ff(a|”€Õw˜hq‡8œñÑÝbÓà_›@’¥v"ÈŒ¡Ùy ·ÖKº'Æøá ï±Ö˜î<m;%K[dÎzµXÔl=![ ¦î T_p1/†³š„i,1$ Óܼ'(ku<Öç·If¦[f«¹)M®Ë ÷¶´àєї âðãÏÁcŠ›€Y…_Nr „\ÁAÖÖhj.3˜¦ÖäWÿ^<î1ú\ŸBš`ñ¶ºšZ,ØÎYïX…>yì6 ?Ä<.ûŒ½\mçïæÓt”ß!SbåcSGÉ>@;öÆ 4Åcf©…£aùÁk0·ïñàwùxrÔ+8P4ïÅC**imÁ"¢^F¥È©ø©É­éyØéP“…–îƒ.A|O*2Ú˾.ÿõv>}ð×Û‰B¢ÎÅÕh§ÜéU r™Ù™$`àñªŒ- ô‹Ž:g=u¦â‰f/Â[…ƒ#w©±)°C‡ï‰1(ÓOÈîÁoªëøÿÃÒŒ„4aÎ×ÖKjz…vêˆpÔA(ܲ¦vÖì½q_‡„xày{”dK(WJ¢¡\L—7ïT23ÐKU`~ààÑ2·iøT]/(‰*âHsNÉè55]@;þáuin3úâ lð³ð¸'¶„æE镈NPž´“Jæ¶9˜Îé+0«pðø|¶ãª^\O¯ÀACм”8¡âÓ¯%JœŽGç`’’rŒ WOm;b¾¾ØGD<ìC‚}'›Oðèzr/ëÉ'„‹Rµ:§1oQ`Þº{¡›üvù6&²qœ×ƒáyÁÃÃçÅA‰Þ¬WÿUO·Á*B°Î £ÔTG†¥gˆ¨†à½{á…ôèÁÀ£ô“™ Bé$L/½8UÇlf#?ÉQJ O͵_ËíQ^Güxœ>ß$ÑiBtxJî¢#¡bp{2û óõ— ?nC³ Y=ÁoK’%TKIpÄ,=•Ú9vÆž¼Tf@õ² Xx,]ºIeWŽ]w™ºe1(VK-hgÙ7À²1˜Òi=üØsð˜œ3üøÆx¼©·[øJ³äñ„èœ"GmSB;Ï“)¿Í׋  €ƒ'·»÷p©î±à’¼(…Ãa¥ xyá¸=›RøL‘ñdàñŒ'©ÅA xni|Ò™Æ3#¾¾8Oìœ&ó^ˆZÔêÅ£Lp_XlL 3¾YTˇÙ.…]ÙÐ-MÓ†2‚È“ÚYÓçO/¨! ?ð<Ñìr1ÆûRKLò·¯ëjÖnYP#ì¼NÔ£`:¯Øú<t„…eqvŽóâ:Ïó͹À…]ðÄÀÔβK~e¡&c|1ÌÂÓ™-“Vç6`oü…ìR¼Î¸žÍ[/FSƒì‚õNåe›vl#•.8g$QZ¶i7|t‰iò7*K¨qƒ§€+­»¥'¥÷Ñ"ëRÓŽ]Îz^`\$²6í†OeÓ¼'*L<ŽëŠ5’ûjoðže‰¦F&a,eMé ´ Rîq“h&¸%"8š^íhÑᬜ⋬œ.lÚ²ÕQ‘Ú‘¦îdø–à…¢ñHö¥Û=dkS©·µvÎò3߇d¦…Ý#ªÐˆ´Æô—섈98LYi N«áÞãET^öÀÜ郾[\c¯ü !?Ö¦;H}h’¢‘TA€Ô.س âñ^ûrM;v:ÌG‘þÇ‹4v‹ËCƒ@H›¥aÁË®7}ûã…eX pð˜0ŒúßÜ^,æÓ$=KHÄà)´ üÖ1ÚÛ¿ aø› <0ðxvPü~9î÷¹ÑµÞÌ×€þm"À}ðï|3ú Ïÿr3«¶õço×Ó«ù¶žno×õä€þkóÉ3Xý‹s O~¸œ<ûõ¶út>_½˜bôí‹Õg°E]mêÙ\UʺIP´ÒÉ‹8uþº¨ôEÐïj3@OEõÎÖðY”VÂøST5¸™Ñ ?óßõ§ÕzZOÞU‹MýûAéX!l¤¥c{ð ¿Œ Œð)ʤ†.FÛÕhz»^ã} Àe½ýçÑrÕ"èÓ¯·(ØCȬR^ÈÈvCz-’ÊPá·ôj >æßwô~góòݼ^Ìο[¯Wë×óÍöùr¾ø¦mðò8úøÕfWúmzúùßêeÝmO4ˆ¶š/pŸÿS˾–é¿y.~› ¡..f*ŠoÎ’Å›xÏF?¯¶Õb‚Úv6zµÂ¼` q6zƒå°§ól×·50³{@ã4~÷Tú¾Ú\Mž§7·ß}w©ìR¼„~Ì»êzæ <}]m¶?­W—ëz³™lç×õùás÷Ù¨ùw .øE½>…3éÙȘ³‘4Òº¨v¾^M«tä[x@Æ¿iØúFù¿¼y=yvµÝÞl&/^TÓ)tý8vUmϧ«ë0öÕ¶zñæû·ßޱ{aaß{œ\Ö‹Íä?Á ËËĈßSwæ@–ÙÝ0àXÿÔ yCŒ ²åõ—O?¿ÝÖ7“gí3ü¸žÁk~«]{b÷®\^m÷ˆâÏ«Yýó-ºl”`}öW$ï³·ð ü ÿ¼¨Ö5¸î hZœ¿£šÝ,æÓùvñé„LÓwÐc°Öj§èyÙî®`æet®kpÀyÿ þ „гóü³ÕLïfÅ—I=G³ÆýzùÿfÎ-À½üý1ý¾L²Ð¥{¯~¡u¬ÌÔ—üŸWÚ ^p~—g#°Oâ‹i½ýÍG¯ó§Ð]pðSòëßzÚ±øÒ¶9d5^‰1=Ö8nç®üý'ü2úÓ|9ß\¶È=×>ž)ÿç¿Ä:`nMXD“ñ!¯ ø¡NnŽÚ@` /±+ïÒÍTCØ$ÜjÀ›ÎMíF´D,¬±ÐÒR ÐÎ;™'h#ï£AäàQ>Û>Ý‚%ŽISbáÈͬ¡ ‡<ÃÉÀrFW¼.Àž NÜÙ[¯n·u_KLÄNé%,UåÂa^G™ïô??¹9)øÇÁãO=ÈyˆÃÞ»OnˆóÏàpÇ3 j_ö٠/e/¸sç¨AߣM,yÑQ‚¿©À[$ðC;!O=Å=‘´¬¾€›ÀÁb¦Ãû`ˆ½$D£ó^k¨muhÇ/•[‚­ ü¶xŽ-¹¸Ç¿:(RºI#æÎ ‹œ€±Vu_„Jí¼™V'›‡xø <<6GŒ½BS„Ð\TFj"p9µ“Vf[ ÎÞþÝR;ìí-%¶×Räák)<R`$OÈVã·çSí£wóË/ËxB«1›¿‹¸U‘ÚÉà3¤Ò?F‹ ÕðW”yx´ÈhzöæÎ1šž1:I” jÚ)­bÆi´ôR;K]»MíD4|Dk`L—_7^+hÆÄ¼óÒjk… ñXás{Ã_9“±»n³Å«X€Ky ¯ÂåzNãr”ž0ÐJ†meppÆÕíöj<«õå½¥6„ ÓAhâ ¢i—ãÒÛÉ Å= e‰ûµM;+ Œ9¼':ª,dÓ.¨¼Ûâ]b ÝbÔ˜[IŠ@MˆÐÎ(5€qU9˜C§ñè ­ê'†|êžde Ya®<擄a†•wCûZö†„(`Ñ9xØ „γ"rù»Ë+[¼Ë¥´–’Zn,Iêò©xzÂ;µŒ”+¶¶&ð=68zà19¶&ö%J‚#Öñ7Í´2$P<ÖÖ9/3ÒyMÝ«S7¾ÇKÎ'ØŽÓ¨µa,×óË»`÷—7·—óå¸mw÷2¾o‰í®‚lÛŠ ÖPjíŒ7TüXÚ2€ZY`êæàaç!ìröx=„ÃÛ¤!ŠrÐ1­<;‘w–2zõ[²Ý°][ ²ÄÞ#ÏÉ·[ï½îÅ]4íîóv’™ÅHLè.e·m!zMøñVyÊárLÒDCA OÄ…T†Ä„YWû•ݹà¤äž´³úÔ;¯Ã×…`¼ ghçcCxO Æ*Iã‰&S˜ÝéB&¦/,E@~D§ T>ëjô(Šsð7<)x”m2í$áò{œÒ­óÔ$‡iÜCÌ3ðó¹w§pcµ;xL¾MŠ.9Ã*awe‰)9Èlë©)Ûå ÁÉÂopS`]ÃÁcóùTê2K¬ª¨)C;§ó]äd0£%f žèKØ„iµ­«ËÇR%æaÌ%‚"W)ç¼ÈfN¥3Æ@jɶ|ʈîƒñZÓx”ðƒZ…‡”e–(€µ—2˜E‹V­)`Žb0£±À® O`à¸;ïYßö_Æ¥­õz;Æ/5Xç¬ë0@0ZËËÎN¤vA‡AÍB_>ó@»Á¶xxbV£°ß¦*B|Χ£V\ç5û4«qûw¨c‚¤…ÉC¡c&2ð°Ïa‡»µ,Üœßñ—!o.Ã)Á‹–B§˜©Ýq;ayUˆwø»aL<™,qÛÆjB„^ya#GšÚ9é³Eff *øŽ7ÕßòèÖò(ÓezÞ/Ó0”ìÚ:áỉC®.nOÑ•,0oÃK4Ö×=ð{ŠÖ¤*›O›m}½_r†œ‹IGqÚÙ®3! HZ×Óu]aƼ;ùºætJ5^ßå¤=72æƒuÁåà‰9§B˜T6çI€wò;hl4åæh0ìXE•šÐSÝIÛý§R”ƒ4p…x\Æþ¾i„­%ĉ2BEjF‡vÚ¸œÓcVúöï…1¢)¼·Â zà±Ü°ò½¥Ü¿6¤BtË Çkí$e¸ jÐ%ù1ÄeÀïHn’oÈ9xtîI> ÌsNKkˆkÞ©b¬ÎÅIçŒÖÁz£‘&w“® hzàa¯w¦÷¹û6˜D¶Þlg×ZÝÎÆmö•x,àqõ÷ÛuÄ,I1Ä彠»eM×™“ùeôöý¼‘s‘îLF³ù&%Mw³+Ž¾È†x¨ÛNDUn¿³ÛàqFè‰7ÇŸ˜]—é¨ÝErË+!•íÑ©NxPn}Ý™S™åA¥¤£;íM(¬ Á)ía·|ÔOͬùÅõtÕŒ‡&ɼ’NÒý öÉ ×ÞþœÊ¯4Ø.ºß±Ä> ÖoÑÞIZɽOίôÁ¶š¾OCb(Šy24¾G׺ô‡¥Ø¡.È2/ Y²i§L–ɨ‚=ðxñaÅÆ7«õúC£ý–¤š2>zÚñª3?ckv _§òMEm”îÑïŠðM£E«¦•zb¾¥ —| û¿ì]ër7²~•)þØrR"…û…§\u|œd“=N6e'»?6®S#r$qM‘\)[ëÊc8Ovº1Cjtá`@Î0r ¹”$ä|èn ûk bMƒÕ†]ؽŽjQ’Çm0ƒ £bQ²ñŠ–\´–v^,׳lÙ4Å&ˆ®·%wo—"Úââ}cD´œ!n’ßÈ0›„à ? ºÉ˜í-mNš>uƒ 0ùá››¡0¢µ¼p'–Û¼+ò(†€'ø¢åÊÁ«¤·©Ðþ0xÜ,·ˆú[ª©ZJïÚ%¶cÁ§Äº¶b<Þ&µ¦Ü^Š#D´ø+ ðh»ßßZYÞž™PÒ§y¤ÚÖx}).ÓœlÇLÝY/Æ©¬6GãxFkjÛ±CËjß•ÛiqzÖÓ4–žUÖ› ÆvDî·“·-;Å]F9÷c­a‰í©Ot_Sʇq݉ß_eS¼_”^™*}*Wf&K|yhgöÚçÓ±Å*CØõ‹=ÂLÏQÚWù¨h¾Cò‘ÓÕE¥ë骟ŽFóõìöv–™RÏjœ¡JIN½ù lGL'¾K8šZ+¨vÍMÄí™>GsâMó`;)÷sú¥ÖƒOË)Ÿ ¸Vz±Ãì!I Eº0ã€>cÇP«ÚÙ1>áRϲE«TúBYh§ìž»²êìš!Z ÁÃyÛ3D5É¢|ZWZ€Ÿ5^¤´¥c¿Øn@/Ô ݰ†%êÜ¥N€>UC°b‰ôìw+ÚÑšª;°Ù€Žt_€: OðeZÂ=™Þ±Q¡™P"Ô“æÆv$üÀw†Xaä‡àÑ{Œ|”Õh:Ï?ÎÜ]ÝEEÀÍŸýédöÁåY”OцJΙöÙ%Ö¼’ª Ef—Íáêî/c ÃÃL»ÁÙ|œmÄ&êÅQˆbLqá í$ßCËmYcN­»Wožà3^Ém–­ægè'´|ZÕŒa=ç# î´ í&÷BŒ¯9L~Œ ,0õªôd½ôðÄ“æRøLÐe×- Ý  A'I÷º º7ÎÓùÍU¶Oî§e¬–@(µ>ܸ¹JvÂÀCl²9ZΡõ<²+¦] ª îèÕ`§>ÀØŽÊ®hVG€A§Bðì›}¯‘\1Õõ’”TY>žöÈV™z¡*j8.[rO' ³¢ír&‡sz¾ï¥~·Å%ªé`í&¾í&éž·¨uŒï™ú×z2ú# FT/û—Y:]]Ž.³QÉÅ=6©A[Bâ ¥qÇ‹!û#bäÔ¶VEï¡C-§€Û7ýÊK›²d‹xºãV‚(õ% 5zÝJlÐþ\Ð ~ŠìÐR3î¹’+ßiC×NX{è¤Ð)>IEk6ýX„¿eéÕ­‹úFôÀàxÔÊútop7i­8`Wã1 ;<üvš‡SàOÿM§ýÑòfŽ1bÔ>‘ã)m¨/›e0s`[y;æÒ;gºSS¿ënÑ{›-5‚ƒóñEžÃ#q¸ƒÜÇ:B0†×±zˆ1›á!¯þ8»Î¦8ûð¾ÈF«Éµ Œ´O¦¸eÆ íÅ«´ NiwoÍÑë`×·/úÛ‹>±FjÑÚÅT3€7ˆV¾U-lg¬P‡[öaV†·+ÿ¡Ô2ë/&‹l:™ÎDû„ªŒTÖwÏTÑŽË´÷1Ž€~TRþÍ+XŠMí\²»‚¥Pj-®ã hg˜nÁF÷Ñ9‚”ÄåiY <ïC2»† î¢Mpœ„Ý(F=i4×NÖ%¢׆ÏPr/µ†KQyÈqºÛ¡ЪLÇ0ìÅØ…ßH€vZðÃM0\»!-;ÜþÖ¹ãŽ8}Ïg›{ GyPb2ß2Ê|(q5Är&í]C¸AŒ M0êVjÿ=@è‘¢»mÛ(å›±iÁiï§ë”ªEJ…R¶÷¥ögÅì-=X•¶À±=yÍ¢i¥¶^°Î_3´;o 0kj`æ‘« šEö Æv̶Gœö±¬mÚëÕzºšlÒÙì"Wyà*‹µA˜/à€vÜÐÖ5ˆæ¨ç-««qKp§Ö훿°WíÙîžö×¶€ÎÇçý 䘉è=±Ð“6ÜçZ±a-œF;@ûP…hgëXãéÀÙ*\çÐÌÛz¸¹î£ÿŒÜ´7Åâ2y¶œfyáŒ%¨ÔXå)ÖW´kë ´ÃT8¸ I]8ý©G´x†_0.|Q Ö% ´½95ÌPRÒB$ðϳyž÷³t¡û˜qÑ‹Ãi=8ÝÍÅVø†“Æê¦êÀ]Á‡é½9RCmkÛöê1{¤ @8>+02ªVÅ÷µÄ) ¥Â‹ÓâYªƒ­µ(þîàõ1¼vCÊHè³Ì›ävZêÖ¶AfÍ1nßyxìmÃ=²µX>…qâ‹©-J «S{@H«[°Òb=¸Ÿ§W‹) ª…éY]ÀD$aFû4í¨ ‡ïÕÛ[ã8ne'\íHâµrµ¸ƒÅ¬=\;ÑÆTº‡òƒ@JÚBpêöd»™>gy"wæ>îìÚq&[Ù3t˜Ö›ãDº=¨Ñjƒ{˜ÕÖz*_»vF¼Q°}PVÉ=Ú¤ÿ’Jøv—÷¦Ü4@±Ï¸GOW¯“Yf0Xó¬¡úä|2Údšmý‘‹kj„+åYûÀvƶ±@?I@±É"½Á*8»Ð#+«?ePö²’¯Þ w®9Ë'K@¿Aë´µU.XÜ5SÍçWK°ÆU6Z­—Ù° ÿV|fØ+0¼òøéÑ ·ÓÑäé|„.»¤Éæ—)“jh˜$x_Ñ™)  ì,åg†ŸgbŒ7þHz.3xÏR‰Å³áW’fÂ3+‰k…ÏúnœrxŽÖôû¡Òa»«*IB¶“-¾Š2Àæ#0æÑz¹ÄT4˜ÀE¶úd¶±º¯¬BÁîBÆÝÞ‘Ȫ¤Û™…sF‚ámg–JÅçÏ'ÙtÍ/‡½þh±þöÛ &gä%(ú®Ý¥Wc%àÕ7i¾úy9¿’W“«lð |â>IŠ¿!hº:Ë–' @£ú$ðŽh8­»+´Ë†oæ£t yH`ño k}j&ÿëÛ7ÃÞåjµÈ‡§§éh]€]¦«Áh~u ºOWééÛï߽꣆ÔÂás¯Á&gÙ4þã}î\¹³ˆß]w&`,ãP×?*/ cˆÖòæþ«Ÿß­²Å°W¾†oƒG:I~T©3îrÐá˜ùe™Îr—…ûl¿9øÛçét:„¡ saFõYªÎF©Pð©ìÓj(1 «àh$#˜¶#¿ ç¬-:&ä«ßÄëÊzCÚ¾|³µ#÷ˆ¶Œ+JI>üüço]D5®¾æ¼û“7Ÿ{ÿµžLÑ.{¯Ë:ðë7ÛõÃ"(Åל²ÞnÄã 37­à¯?!Ø«ŸÀ¿61õ›Éy6ºM³ÝšÀß ð![-¦éÈ=èÖë)‚¼rùïÙüãl¿.¼ûáÝ,]ä—ó•ûÓ]RX$Q§Ù²ãÞõ…vÿ°•‹ËÕ#¢øi>Î~YcæÁ'˜]ïý ·÷Þ¿ð׳t‰—p ýâü‡¦]&«éÍ­yxdZL};#!z)¿_Õ u·~ƒ…‚´LfdŸ@¨è/áç|y¥ôŠ/Ýð„xÞ…_/ŸÏI¦0½üÓv2ý¾Œ›  KÛ¨¾¡ ¬o§©ûþ§ùuéàÙI‚þ:ÌOäž[/¿óÎŒWûU.(ø*úð»þX]ÜŸÛïû¯]V-Ö ôÇÁ‚Ujn9ŽyŸ|Wæz!9É 6 ŠJ®®[š,pjMý6%PeîUbb¡&Ž#L/ªaòóÏ1nì{1éÃÀ€«Ì Ý¡‚ãùiAÃï¬M^ÓMiòóÍWŸÓB/šE¶0Z$Ü (aÃLp÷ÅCLYÌ`°oAi.³óu¾“;XÃ9'~ÆjUeùû©0VEðÜ¢òÚ"´#ϱ*" ü«ýÒ»·6tÂXƒb–ù‘ÉêÁâÈX#cŒ52ÖÈX#cŒµƪˆÁCSÆï—ï”ÓˆŒ52Ö'ÌXcšhêg ¸u²SÆ*±F¡ÞÉX€VÊ<3ƪ™`ÞÕ.ÍdeìSa¬€ÞU_зl§Ô³c¬šá©]¢üÒ©¹mªÆ ÏF ÛY•KGÆkd¬‘±FÆkd¬0V CPý~ÙV÷•GÆëf¬šn¹õæa ]eç@ûŒ•¨5Bìf¬š[a˜¡~ Õ ’ÏŒ±Z!€ôï:œ¼rùÑSa¬VHjÀýèÅ3Ü ÒQx3©_:’Ó£2V Fg UoŒŒ52ÖÈX#cŒ52ÖÈX;`¬0sJügĬ¸S02ÖÈXŸ0cµÒJó·×ª¥¥ºSÆ q¶R»«U˜éª‘Ï”±j®$¸aâc5ÐN(öÔ«Æ¢cZ+æG/…ynŒ¥£3Ü/µûrŠ.+^«ê\cd’DÆkd¬‘±FÆkd¬]2Vð·ÆX.Dƒ†™ÈX#cý«æ¬UQëµj­µí”±rΈØEXpJŸ)a5dÁ«Ô`;jžaTBPk­½x~ÇX¡×Rɘ_:’ñcVx"îÓÖª²Ê k$¬‘°F k$¬‘°v@XÁßbˆwùÂÝaÇ#a„õK ¬†Z-…‘ÆkÕo©ï’° ©4ßÉX€jö\«5x×–ðnñÆvìÉcTx÷% ÐÛgwŒÕ¢÷¡Ü[Þ¥¸û¶µ.+<ÑbµkÕ™‰ÇX#cŒ52ÖÈX#cŒµSÆj %œpoy ôß:^ŠŒõ‹`¬`ÕZ K¨×ª©æ¬Ûc¬ÄÈÝk¬ Xεϰ𒡠8aZ[[WÎ|ÛŽÑ'ÄX7¨,cÝ ñ¶Nk¬e¯9%RX¿n91ÇZcÝ<‘q)ñ#£<^nkd¬‘±FÆkd¬]1Ö¿åGÖ^Ú¶£22ÖÈXŸ8c-­Up!j‹ËnÚ1Û-c•Úaa¬¡@9#Ï”±Z!´¡Ši°ÓO±*ËŒ¨»ÜfÓÎHõÜ«ã§Vrᕎ¬¹ó¾ ÆŠõ²8\7@öx‡ÈX#cŒ52ÖÈX#cŒµ-Æjñ ךøý2µqWpd¬_ckµÄ*ÛÀªM§×±º5V+Ø.ÆTégÉXÙ€1E,1DÖ ©h÷ÈIÐ?”±:TÀ¶‡oè }^Œ5H:ð˜ã1V÷D†Û#hdJDÆkd¬‘±FÆkd¬Ý1Vço¹´Fs¿_æ$®±FÆú0V´V VM‰ßª5'Ý2VN”‘öqÆê€À•¨àâY2V1ÀUj˜¥X=cuí¸QO‹±¡â™1V×kI­Vºt¬<c-Y¦Œõ#“$î ŽŒ52ÖÈX#cŒ52Ö«ó·Jhp~¿¬h\cŒõé3VJ€IC¸|xËéß+V]´ƒo­úcºÄÑíìZƒY¯gpöJ\p™ô± `r˜¥h¨8ŠaLîx¾8¬j×gŠvÜtYªX„Ô„ÐÆcœ™Š’TJ3@-T×Nè§EÃÐKþ=Óï“×÷ÐÆ'` 7‘+œe¡Iv ^d%ˆ5²ÙeŠÄêQüÿþ÷1m3=`Ìõãuw™mÚ™ÊM;Ëì| ÓóÜÑC®Aç¯Á3fy²˜/ÖÓ•Rúæ¸ï27:þŒäÑ1ë‹Éêr}æfÎmNGÓ ˆ²1/s"§“|¸‘üb¯l9K§ÛtÈyŠO¾qΆÉGºe–h®©fµì²hÇô^ÝÞõéæX²h'Œ¦k3YE;Jå“•è…íhTâÐ]ŽJIB³£ëê¥xm5ƒM;¡é>æ[„¾?¦‹Â„?˜Üyî­ÑÞZkÕ8y—C  -#º6ËT´Öüáp¹Äœ¡5Ìâ+Å^p Åo¢þ#M\¬æŠÉÚ«¾7ídåÚ·{ù>Å Öù;Œ+t kßõ0mµ¡Âÿ0Mw?L‹÷ÉÏËìz2_çîÍrZ&1Oót1Oót1Oót1O÷Àùn4Q~çk¨Úå|-“ï“t±˜Þ “bBH „½ØzØ„'“™óÇYÕ1&é þ[eW‹UÂvÀƒ)Bñú¢?E;fo.«4ÿð?Ëtq‰è„…À¿ðý &o×3ÔaòÏùYB’!ž‚Ÿø‘ÇÂ脌R?^Yþo ‚• ¨„ Z0Þ„ÝYJX PÔ7ËùbQYè\f‹ùrUD—Yº„PréÔ…1Ôt>_ìÄ£ˆdþ¨M‰Ý—ññ ï·ÂpOt,½ÊòŒƒ;4È ßþfE÷mðd~ž`ðµSfæoÓDf•¬ÄnÅ‘»Š£ ׄ4æ(ŠkŠG‘PÅ`š_ÁT:_ƒ‡gø»  —®W—8?Þ3lÔxg× è”SëQª!óŽÞ Ú¶¿Û²qü–ÿ?{׺䶥û-XúÛIK qÓ–k×ë83Såݸìd~Lâ*³)¶›kÝ"Rí8®¼Ë¼Ä¾@žl@JMuKÂ!›‚­ñTMl ¾sð888<è“x-0²Ó¯‰³£Ìk‚ÓÚb°³zý>¯¨·\eé*«fuÐAí›Ø†¹ Šzq‹IrÓ“6Ö¨pËU”6m¨^ âWŒÇ ?&FN<²—)¯öü¤E>œ¬r»«°^EýôÃúÐfÇwxòž—q˜ž×L1Á£ÛΙ[˵°¡NYQîjŒÛáM™í4tž|(†ÙEÑÐ}sŠýùI‚•IaIrzY||‚Øîvà ð†RŽþpšË~ f ì81x‚ì7ØíAÝïö‹”DQ‰ËÈSs¬8½Ñ +¿$4Í$\ ”f—ù4;Ø/±ŸfŒ ‚‹ª 4CŠÓͰò³@4ã4V(<"Í\™2IaôçóɎͲÛ;ÂO¶˜ƒŽŸzNë T”‹ 5¡…@¶9O’r®Ì»t9\NöŽôSNNð”k'T”“šÄ(-è0”Ãâ‘4åò‹Ùðz™/¦‹ô°£ü|SR*”dæÄ|k+Qdӆơ% Ç"Û,™çÓä`Ÿh?ËŒR #’>µÕ†¥?zae7,½Øb8<âsÑ«ò-ùýÃ Æ a~Q‰CÓê¶½Ñ /s˜¥Q1&c ÊBYc×ùª<4Â5ñ³Ê~$Œ‘ˆ‘S›`HIú#Zô0K¡nG‘é€v×rñ![]Ç-í÷Ý«”ÍÒ|{ÙEªþH‡UC ý¥½ÊZ£ððàûËëÙ‡d• ¯‹åU¶:èxÒþ#%NHÛg¶®? ¢µhQ…Õ’bðHž‚u÷,W Ðül8ÉJæìúÆN ”a¨Ñ®H8âáDênh„9óTZcð¨–t«£¢A}€ì4ê÷ò+£µÁÒI®§×h &ÐÚ…ÆÓví*À8ÈÓ,IÓÅz^î‹ ¨U|3Àýàtï÷¾;i²ècÑ?»$8™ ]ù£©ð|ô·ÕEþ ñPÒv᩹ZL³‹|>±¿ )whK9 ûæšÅT¡£c38Õ# ã(Âã‘Ýæ’{ Àá,+Wyêü,ÚïçÖ1£# z¯uB!:3+d ”„i‰ÁÓ:p8+m™åbš§=jçÉtºø0„ÿfUô¿Õ¶ßi­m zA±¡u=¢îÌ´Ta\„WŸöZùñ‹†£Þ]Þ“§vó¦ºÌbÿûvyºYÞ_VËû9by&ë,*`D]^Ž£?þùÇ?¿ZÏ·'ƒ“Ñ|j'­ìÁ÷ÿ“¥å8š%ËŸª‹ßÞ$ó]jz0H–y}—Ò`Õ·È ¶6ͪº>ÊI5ªo’¼¦ƒÇgÚzŸÏ'ÐJõgÓÖAUwmÅ^5&®¥[*$óù¢¬L›Áò9ØÊ“lT_涳؜_}~º54e/` ÑñWmŠ©*KÅõIëŠlz¹yyx•¿»&×I>­/¿j_”fÃùb’ 'Ùu6µcïN-¿C5›{Pì]r0ÃÌ–RUïÐ!a?è:&ò®í›K± t>/eüˆ>v—9¾³W‚[ýÿäÔ~§ºQ2¸«Æ‹ Ÿ/Vý½Yþq·ï/ÇÕº÷§Å…‹˜Ÿü¥)Ø­bV[—ãZv•¥ïí jwËØÛÍj54õ}p;1¬AøÑh/)ÊjŒ²»?e£CºÃ¿ö¯ï‚íkñûUVÝf9ô¸ó*»ÌVÙ<ÍðŠyû~üi°Îð@Ó$fiv1œ\\ša|qɇæ2 Õ…œH£.™ |ðûÛ´†/³to‡]V÷cÖïÍÌ’2½²—¡`P‡ÊÝ,„GЬrë_h©{=¨á²øËj±^Öh÷É»[þ*)¾k¾ò"¹È¦¸÷^g/òùúWÿ{¶› î[W[߸\ëA½!I§Õn—v©êÁà6 ¶³<ï¸ÃrìeM»z¿qÕÖõn¡îÚÏYçÀÂl^¶†½©~{Ô묺Éz+»U½pæ[£Þ»vJ72@Ý›Mô«u5¸~êN‡ú ürGçç‹òIåYšFðßUž]kTC¨ža*7}ÿìÕó§?<·ckóäǗߺ']•ÚP«mîV{[¢í[øfÛ@‘.ÜB¼C㯻ººÊÚñ½Ž¯ v—Šýæ`öëÒ~p¶kálÑÞÉ=ú÷è¦ÄÎra÷ÂðÓHé,Òênëç{Úx;øÛÜá³Î^ØÐ=D߬õ›hðÐmv§®y·5|[·t±¾®×Øí¥½»$íH[4îMÚÝZ;JÛ%7ÖBÔ‚%oA[ªÙv´±WGrñ8úê«í Ù®wY¼³;L¯¶2UöÀV¢ýöD·ÞÜÇÝŸçûAÿôsx°×®]„ÅÓMK` [B×Òë©?ÎwÕÒ•žÎÖWW›íÐFsï6³±öm.»ÀéòÒk¶M̤’þK£ Ü±Sí“9e½ND<~â3Œ6xt§ì6ÃãýÔê Ž3’iΩ_ ÉZd~™b¨ .ûxÔçc‡/hÎhƹñg3sçÉŸ1¤¡J#ÄAØ¡™‘JwKa:[ÌshÔB³¾N×GÃèôˆ 'ÅQUNµ¥ÌŸH6}ò ×gZP‚ÀcØÆ#ãÓ5,›‚y’VåýÂxÔ£l§_­Zâ9QÀÁži]ŸcE´ò|ÿïÊÉcçâ÷1i»Á¶é–<ŸõTâi¢÷íµ¥1bV‹k;cŸ:¯|f0áñhÓWdãCê V‡ua§]æ×.pÕ¡U•Sô¤†ì¾¡æå†Ò`ö)ÿPSª‘û²}äÉÖRœ·Pÿ}QŽˆ×2ÅWSŸ(þ¶Ú¢ÔšïÚØ¿âQ>[< %æx”…Xü ±pçÕ¯çˆ{USMåÿ]y¬=ÑOÓvÝÿöz<ø6›ìEß{ì‚fDÇ.x«ªÃލ·]­} ªïd€¨ºk_Õ-‚ •=è£÷Û jì1È×Þ}‚ üŒhd€ÜUÖŽïu|m°3§ÞĘbGý öÍÔ»Ýêf㎔éôV——~?ðŠ–Œé߃jq,¯{~ûN»6‹ƒ{ÉbŸ69!Â÷}™CobÓ—ødKÆpûû‚“ÆäáoJéî7¥Òÿi³«œjBcå u—kJ„ñãa±ìíÓæ¬L'‡?VÞQkÁ¤çN@WNiÜŠñŽªØhÉ ñ£7"È%l2Gã‚´¿M»×µ_ø4«¹½Ü?_iFègµbz”äô·^¶Ã#uo™[z9t—Ò£kFadž¼ÉU9Ú–5Ÿ_6MgÒ/›&4˜MèËíT•ã_”O×Ì.¦Â{¦È1qoÉ6î¹”âA7£"…2Š˜Í÷+‰Áe–âñ´¾:Ÿ³Šr•%³EÏ«›‡åb1}Ÿ—Öž5>oJ %½ë6”ãâ ›·¼cIh£¸Ô~Ùt#+:ÿ‹ÆÑRXü†” b«3é®+ò†5A9Öú2ˆÍ=>;ž%éU>ÏêK~é«lQ /‹²¢³i˜/¸Æ÷Ïý†š¶áD=Ž*ôïgH£ÛOˆfÃà@áÇóÖ71nÄö%:ܧ=áóépIaöõ‡LryÔæBwq½ ¦‰McFhoð(e!:WRK"xØ=®ÚúOŠðh6Þvöx£Éñ"›†2Þá¢ìc¿ÿŒ+*Ú=' §b2sß–ñ®®`Æ Aà1¢›«'IÓÅzZãÛ*<¿ý`X$N­^hØ®*é'¦­ã»Ñ/ ±ãiG±>fýCŠó9OÁ¨ZúHn¿Pëæâ Â`¼qˆ“ì6xÌ=O)ÑÊtšô"KܺÓ}È¡œ"},]™Û¨q0Ýéíä§Ö^šñYŸw@MòÎý0¹r©ß¡§8íó)Ô3°l ¼½ŽÌGd(·¦Âɉ ¨$?Ô^„ ƒÅ#„äÌG2zï{5[”C›FØ+¿üØ ×Ý}wl ,,ãýnÄ–cî"£D݅ɱöùëm9IOqèyßѬí·bÌÏ¿ 7Mºv8¬ ’#ð´Ÿ÷í"º]Èäê"Ií%ñ¿~tqC¾ÙÅJY¬|F”kšM«ì²Ê-`±s\x`²"Z.–ë© ~±ð¿¾¦£gÚ:‰J¥ÜwÀÓõÅ(]Ìn§ ïy9|·8¯¤>ÏçPÇ Þ;Ï~…æÉ´NDQœ_&¶ñNsL’,–RR„dqc•^/? {½.“•eCñ: ,úk›± ÄÝÓ+5Æóú¿ƒCà$hÝ0?8¡n&õ-€º}‹Ôè7ÑÓ;À>d« z)N?Â_l2„ëlr¹)bšET^%óˆÍI¢µæseÓdYX‘óyšE· ÇcÊÿa«˜N£rõ1²½v6$ö (™±¶ÚßDßå󼸺¯ú£GŒ’‘bòÿ-sæý ÌE;P©¼K”LTƒÀà<‹lB”(…Ny—gõ”t%óI´L>NÉäz͵dÆ^Sy½›Æ³"_úëæÝ.syÁ(v)ʲOOW0å”™K6èzXñˆ‘…'³áãÁ/ëä£Íè“®ì|¾H—Ã:ãÏW r¬™ ”IzaRÉáo ¿Ðü2‹'Rž’äRd𛡂Âø+I2X·.Œ j¢¤më»Å*ÍÆ—É´È~?¤#„` 3 Ü¡!ÌØwûv½„Õx’9vþ ì9A©7:Ò–;œ,]RçŽË"ûÔvUzŸÚ¬oézµ²F) —•ÿÍ5·`2øemûû2°SdMhC)nNbXýÈ¥n²+Ù¤:õ´ >Ù|öOþxóæîJñ=hp•O²âaTÝ*ñä᯳òaTßnñäá·îòjÛS.!ôÎ2ºÎ“;cr±y=Z®`jœ—ÅFŠb½tL…†gP¬Qô"ƒ%)³Â”ù| Œ>¾¯iãTö–Áƒ•µ¾éÑZ€'Õˆ;Ô:lÝ™×¶­‹ø$½ø}=4ºïÛÌš¼\,¦ÛÈ·qvcÈ£âj±žN€À%¨ƒq/?dÙ<šåóŶW‹qô=ؤ¶ÛOuMµÆÉbZD ü0©¡œE˪§‹,‹Þ.ÒÆJ4K—oùr¹^•W™Ýã• máæb{ŒvȆ  Âa0%auõ÷køç·оu°å {ÊGB‰ƒ+ 4Îa’$ˆéˆ6Ï“ÜåxªÁš~ ð®V‹yþ[ä†zŠ'.ÑÝè9ì V/ò¢|4ϧëO¾²¢ÚW«]Ík÷ôÓÍcÓ|u5ÙøÑ×õJX/G®ÆÇȯ)ÌF2I`y|æv%c%õYôâL¦cküžEÏ3èj˜ÈÇä,z•#ÒÜÎc›]V-’…ÝZ²]ÖþšWãÁ0]®Ÿ?ÇÄœËFßÂGVÝgQõo°ÆfÙê,hTE1ü4мä6t».øb‘&Sä)4`õ}U­œ_Úòûã«ãÁUY.‹ñùy’¦ ú–«¤t¦=ô}R&ç¯þúúéÐêa "áðÞ3XˆæÙ´ÿô¦N†dñ»'²L6Ý`ûúeÕå1Æ–-/n?ýôºÌ–ãAýÌþœM ™ÿªf˜í®º;çÉϵÖ~TßýÀƒÓ« K½m:ýl{ó)¨p }•ÃöÝõï†ùvO½rZ:«WüñŸÂ(»Û¥ŽÜõ ³cæ‡U2/òMRÏjäØ¿}úL§cÚÿÇÞÕö¶q$éÏþ?äå6¤ûýE€qÈ9Ξq»{Aìä€Ý ŠI u‚bÜNÃÆSàœÞ”;ج…AÚþù~+Gᳯõðû¿½š£þì~â|ú›÷¿õÿín:C¹ìcCÄŬÀ¿Û†_–g¾ð·0Y?–•EïÃæA­à,w´ox¿m6Ù?L/ŠñýxVüq] ”s}ºº™ÆáE;±ÉjÂeøòs°‘ê áÍë7sp¯«ð+Vþ|¹ ììÀ(?­€…ùF³†Ã ²ryµ:Š?-&ÅÛ;ŒžPŒ9õÙÏ(¼ý7eüñ,,Ã:‹cçG\f7³éx .÷ƒx<-UßÇSû²2Z’k¤“òؾŒÂu¾]ÀÒ¦âî|5ÅZ8hÏ­wÅay‚¥\Áÿ4{No:èÅ[eúï%c‚&€!m­xÂÚ|PS7ø?ŸPnðâ›îï°¡ƒ~b¶õõ3÷4^å£Ð\0ð(~ø¬O;uûãýë„Ts/¼V‡Óÿz$Õ@·“"òat‹«;ȵ±¾›¿CíÕ Æe/”“‚r˜PPqÚ<þ~´µ¹%†Lüa¼Ç1Ék|z0ßC-±AÓõ)»6À "SaDO.2•‚^F¦„}p}7hK ÅMWÛÛÅ .üIã¨8]°œóm†ÎòÓ^8` pd¬ é,ë.VÞ‰gŽŽU=É0þ –€šf´¿İ𖵓×àf_.ÖAfôùo‹A ‚¿šÎ‡¤¼úϧp[à‹˜v+}Kž7 ¨šslFÞìDþ)®pÏ l4z·s–øO{FîxA'ù!õyçŠÓý˜ðòÐNxðs¼çs¼çs¼çs¼çs¼çs¼çs¼§xàh/hIïËb·ÛçxÏçxÏŽ÷®˜“q.½hÑÿ6L aU8c\YÍ|„Y¬vÊu“1Vî~‚¬"ˆt;—ÕþÿdŒ!pk˜à4²ÕŒ1níPšŠór‰ Њ¬tÊ™ åÂ@n¦˜vReµ î‰&àÔ]”‡IÂãs”Ú²k7å^sâ†pÜ2¡±\uŠ{üÅМ)½)èmWBSð8Þ¼ DRuªW”oÄ‘™THÇmŽº@õDÞ/`ï"ƒMHg:¸Hï‘X)IGàñ©7J·ÌÚòj÷ö­æÄ é8óàQRAW¤“¬yŇ¼™ÞvpÞÃ¥UdgÄãL–[‚iü$j+*Øk˜edÙ~ SÉW† j 6ÛA­<>KåèCeø¼êÇ Z»¤rÊ 0ô¨]Ia' ŸåR`^¹ÇoeeŸ’ðئ×ë”HN@è}ãËí"„½µFk).6WUÕA ¦¶¤«‰ AT–ªáí-sÀèÀ§”’‹õ”NF<`)JEãqܵ¤BW·(¬¼‰|~7ŸÌŠÀIÂðÐNpùÉŠö@§y§wt·º*»ù®J·\RåPS Ú®†¦àq2‹¿»Ç³]B*¢¦¤QÜneX5¼¥U_K2;Öï“‚‡ËVË£Sn¥ ¬ pÈÚQ9IÆ1/e÷·'€íÂNÁcSû|Úu+¤"*FZÉ=ø œÚßÎÕjÝó¦ò™0Ï:è­“‚'¹âé*D‰öSà,±—Z/±Ì‚¢üz Ko³ÓDxãquÒ !OrçÓ#Á¤úq§„¨TıvÔñJ’]ÎZ“­TV B =l¢FÐ#ñ]ôiÆ÷¼‹Gú ÂpÈÒ9ЂØ/\3Aö:&}ó8hчÌYÑÏÅìž•Pö p¿ôú?ͯFè|Nz¡˜H –¼è‡r ½Û5èÑÍÍìþ(ê¿>‚ý×G¸ÏzÅŒ¥:L—¡¨Ñöbbæãò² [d%°,†y*›5Ðí&Æ'«¡Te'! ÿh¦ýîuá=Kº™‡t‚u0ý˜õÍsx”ËÔ¤:ÄjrR mî)v·YŽj³HolÑ…$á1-)Ý#0KìÝΕž UW>ScãŒb›ßuÝOÂS3C÷z1Ÿ>Êd9ÉÑSÞk˃! À ¥v cÉçymÈ2öÀXK殃 –G År'SFÞË|±Ä‰&)'ݤã5O*:“bÌ.WŠKr,ðíÜ|Š^UcȉBñ @oïfIEB6¿ð‹¥ê­ZmŠ ™ “N*íÝ’NjÛ’zH–î6LTÖt²í¤ò=FzUíp­éXzPb|[¬(%Z£†.ÃëœÅ,èdê%ÞN…×ÏÁô@LT·äÇ! ³YÈ“!‹õÓ­uÕ9×t­_2(ßcü‹˜^«YÃűæ Ì ­žIØ¥˜t¬º}| «¾{b¯Ê¹zBECï¬'¡òöOdÊ÷°B«k+­ñøv/’?>¿tœ`%¦qEm>H'UèE.I ÙF;³m½p^ù'¤áŒÆã˜Ï±ˆe%¡¾…äX)Š”\ S\¶š‘–*¹ÉH錦¡ÎÛ—xRIÇçËG«ˆþ ÁD\ÖÜjùV.OÔ"—è&@o=w*×Ù"Ǹyäo%K ƒ^~x¿¥«Ä%¥³%Ö5a ¶€à4hë:ÐëB¤FÐxœ°YrèJ‚˜¯Iâ&ç³)² ®Ú1ç =mêx!–Ì·(Ÿn$#b–k:Þ‚ïÁ³\'ùfÛööÅõ&'rR\LçÓ½"!ãåtr;ÅbÝÃÍ\·³?œ.°Ð»' }…Ñ}°™¨=^Ùl dÍÖ‘ÊZÒ3A:ÑAHÞ£„¶†Gà©ë™¤åÁ–/s‘ĒκÆãAy­¹t4(/l3'™Ib$ɘÉr©å@Ø5Á2̇Hˆ P¦Îc'ú%\U^Ð#; ªñû†‹Ù7…ÒLJ…’ø³øÇÓx’nNÏë:cêôt:F˜‚˜ÔÌ4¯ÎŸ*éÓY.Ò¤®™ˆ;;Zü¿‘8V%rÚakOî Ú Ï]¶¥›aŠ€“¾b[óOäbŸí ™‚Ç™<™µ‹»É`¼í¶g¯«\s[[U½EõKïÍ»iɾ&(Ïz“é2ôëw›®õ5I;9اŒMG²‹‡›|Vÿ–KKìiZ,WËa`ÿÃ;×´œÄޤê^äžœ´5‚ÎdHÖÑj|³+ø*µf`?wŒ“†9Ði—ãâ\蚳ɔ^÷u\O/7kTi"œ€Ø´ž¶–ŠÇæ6Ö,?,‡Û‡ïܲ”Y¥ˆ-Ö8幚2ëÎe) >]÷‘Dl<6Ï:0•ÓÆ8úàè˜äõOþv7µ5»v÷2%ˆ¸„Åà§wž Mq:·½—G.ÆÐ~I™T<>»›´ m2’'Àª‡J¯,¸Gú5Úœº‹ OÁã²MødöÈxq;ù-¹xÒ}R‚0½±À–ÂQñI‡Åβë2›Üvae§àáY<®J[io])JœÐÖqEÉ1Щ䟎ä8~šupÔ’‚‡ç ™­ù8Þ6ø®b)ámav뵡à@'’M„–%:»l½ C"•¥[/Ÿoï)E‰ƒÇ[a–QÖ Ðqͳ)‰¬?Á:8ÒIÁ“ÞSê z{~`¢Κ|œU*62ÓÒEfRFªÍÓ’!£?• Fÿ{w[„y°ù$Éñ®%éè8:“'Ï»‘'ÇÀ?Áã?™<]ŽoÂ,8Zš"G#«º@·$M‡£h,K±£æIÉ’”YÚè*TæŠñ!“ÌseEõév syšÆg¶ÍÒ† Zw†Óðè|©Ï‘|Mp3 ÁwL¨S†púyykKäp‘j xÊÖ¯r'âñ&Ãí˜jöIVÍ>.E¨C 3Ò%7QxBœ2NÛáXñ1"{v»ÝKT L¶“¡öÕÅLJ:nE†û5 e?®º™Àˆ»ÌGà‘™jÁ<æÛó£ŒäÕŒÄË@ÞjM-®piˆ·z¯*Ÿ0§ Êšö¥C`ßS&¸ÀãS£lȶcÇ—x`ýP·OU¶>Áw;l»)µ¢B·qžç‚U Nnº˜l'Á¨bÊGàqæ”ùÍë7ëo7“ï@Í&à‘Œç»Û¶œn˜“˜&-x1Np¡‰>%K®ºÔ¶ÆHÀî;ˆ &àá,C'’&îXÊ’8r³R(tI¨èÒ%ºß”aÈ þ<*9ùúóvìÒú|ìÒÑII)°jëÿèFv2ÓZ`ž} ž,]Nµóy~bç=ÒzA:F Hô¨l´€t0š&r=b'ž–\¹ Ãúóð°ÛHÇó‰UeÞÍ®Xu0˜.¤J3ö¤¤J3•Í1¬=aD6¡Ò\¤ U‹céD¦¸}Z2%2]8?ÅkìÃYÚNÒR£±¡O êèJÐ6’‹ØÉŽ,£h<6k’åI†/B éT>áP&]8šAn$!†K3,Í»‘㤈Âc2µQŒÖÛ³bpŠY1Þ´vš«…4㩼 µ+6펣‘,ÅŽÕvdÙ8!E•·G_“© ã3ÚyªÂfø "ÕúpIVô;²o­ tBñLèÛZiÃiÿ‚jg›X—ùF— &([Î.«EŽà V÷9(¡cB´¢¡KfëÄ_›@®é¼¦ ‹ën$$O³à|,»ßO‹Û<Ÿ€T–îh, Gw"Šu#†i…Gw!ç£åt<¸[®S-%Æ[Â)(ét«jä$îF²;6Ó‘.q°OÇà±è’ÍNÿ Â%-.ž{3תR©‚ÞDbc^Æ Ïw£]vŸ31xt†#ÀTŸÒ+B^œð0ÚÀr`_=0ô²˜·Óq ÞÿÒûé¦4»{?o-ðo7fô¥þ¼†Þ›Ü½Õ¤åââ¬÷¿ÿãï_ÜÍ—«Û»ñê Úùå·´ñáÞýçùãÕYïztóxàúe4¿¯ó¤g}ÿsy·§Ö+öU;X!Søãn æçïyÿëoj½ëˆ¼¥üo󮓬¯û–ëb5‚ŽÂ›±¨?šÏ«²²|ÿ€ýíª¾ßËt}Ž—vØóz]<òëo° )ˆî¤âqËhYìoYÌ.×£9(”Éàjzy5½Mgëwì<Ä¡ˆxÐΊÁ|1)“â}1CI:IÀ Ø7Ö*ò/®G«ñÕËl¾i³PÎ[ŒËa°˜Ïî7ûÜ`½Ñ-?-ÅßnP[V¬¾’òq0¸"@+-O=e³øÔÞĦ6÷[·Ië¥ûq½‚kíDÇ'±Î£žõÏûÃú=|Rs¯„Íò@RŽk÷½Iܼù«u‘Óõý´|^¼è}ùÝ«?¼zûêËÞ¿ö³Ii«ôÎz‹ðÃ×CdÊðòvqwÓëkQe=Ú^«$ïëŽô´¥=÷wá±ëçîLË®móêî¦ –Å|•«@âÔ“ÃT­HÀA ;¶üKffÀ v‡rðDU¨¿Ù~Q Ô­ÌÝà¾Bïçà êÜk°‹tnWó«u INÚ#΋apYÀ«(®B;ã&ùC¸š:V˜ö3ðXQhÚg8O„³Œi,,B¨U¶z|†¸qDÍA[ãJ=¾'Š@^“N¸e¹þA°âs&Bb€ÅŠv$‹¡Ý°eÞ4,Î@]c ƒ'{ ÐËÞÔÏ“f{½k×µ†ˆLí¬u<‚1 Ï“èA)o~Hƒ£CòPÄÉC´:<Úzb×q'·`éÝ…£ç"›ö×ä°ïˆŽÃ¼\𹤠B;UpÏy‚A•!‰«Ñùx²ãY¸š}ÙMw“"m¿"¥uˆóZÀ¥‘1äz/eXˇßvÓ÷užÞ\Kü"F]±Í”ý䤙ýÍ´"”dPJDÊ:b)Ðìɤ„äÂWèm¬úi¥ <²ÜfÔSîà“¤SׯS¥E4ð2PŸüìµ›2N¿ýÔ½'$ÅÀ£C¹a~›da¯<¼ã*,Úoo*X -´S±Øx/KÓ |…¡ï1 9žìªb÷K´Í5/B£Ï]"ŽÜžv:}Zî“ð”Sd§¥ N¥v6æÚþº$æKâ\àGBúÛ:ѦoÝ©1é°vÎ1`ôE—dÕ‰>o6-¦ Zâ葌º!w_-C0FXžì¿Ã#ìNIëín{šþÔ·UË5ÉFo”²ž†ì{ýÂG—SÆBÁ®0ZÖდVqð˜ruÂÏ/gí°NÛëóíl³¸J©žèš6·Þ¯¹ ãi‘…x;¼5#“ UØðVKdàñ¾;0EÛâÜ™§ ¶$%‚Ï@ÐpûïÛñ(Á€9œlAŒ«ÃƒàÀébà±¶Þ-6—¿€¢¶³‹ö²yªeG’!ja…¢1ÇÞP6¸X‡3‚-¬cð=¹5‡í”+ƈ‹f3G-ãÐ{¢bÚ§Åk˜ÅuÐ\pR²D±º<Ær!:UŽ àŸÝŽºÙúò –L«O¶@±B a¨RŸ{ãxVäA̾P±ÊšC ÜÜ¡ñ(!ô$üض;LyòTÕ‘f‡1Î:p%‹²ƒ<‚\‘´«Ã k­äà1¦(7®1ûy›’ß,ž¸pºÿ¾w'¥=¶SE˜Á„;‚\jìã{¢u†ƒ'”ó8¯ìfýaç¥ð•g¼{-$I )µöô®oO•M<ÄÃÙ!•W–#“TUØÁÆ£D‰£Ëfv±XµxÌòòö\ýNg÷A>ú“ÒMtdˆ Ù)âJCÁ®p¶Ž5eˆÑ‚Ç…â|9_Ÿ<ª)ª¹å©LF2ðÆräèÃ9œ \I@õU˜¼r2ð\·>4ÑO­³¡¹à0×±‰æÒlpÞ±d±±¼ Q3ðdÛ…g"Ê™¦xÿͤsrËSA£ C‚ÀÝò,|8[زÅ*{^XRÛ)žþÚEØòK{~±^ÿý‘ÊMí©Êm{AãTdé>‚+¸®åˆ¦+qÅ98x²“Œî5×ÌfëëÇ©¬xÃ4©ÜÓ\ñFxÆÂ¼'ôó\)|WpKŠ#š·u¸eÐŒþç_ô ·)ÏCÿE>ŒJÇÅ>˜QxÜØt§i ‹“9ÎÙòП#F:GŸja;ë z_¶©p—ƒÇÊ€¡Mâ<Îiᜓ’ò\ ¾DâÁ|Í€êt… ? /Zøq¯¼—=Ê$F‘K|é­¸¡…M¦e-½R¶¯rðhUÝ'¬>ˆƒ[Sè=&˜”Õ¹P}¨qøž…ÇËJuH¡>GÍâðÏU ƒ ƒ–Zë"•õʸð®ÂÉh7>0C“„± Á,y ÚõD?e篚‚Ð|ILv9x¤.–Åjˆf‰ô¨¥³üBlW"Óv9vç ×q@ÆÜx»£Ù·¡™ƒÂV°Vøž=¹Üá.˜Hs±Ø©Ö÷Ýõ¢à‰CÚ”4Æ’—»"^ýÅòhM1Œø’T¹6—…'~V_¦7°Ã`jg㋸¸Ñµ“¾`žÍ|Bç5l¬Ž;›Ô“M-ÅôTz–Ñ ËÁãGî®ÞN([àtÛs ¤¤iÇÆœ?2ëÁÓ¿·ÝOŒ–%þÎø}™E7WÕº-´ãŠò!OÍ-}v¨è*W½ä ¶=Hº74]¬÷D¨@׎? ƒùÕф:üáâ±¹[z«v‡mRnŽ–êçí»æz¹;™·«›¤nKSÆG˜¾ðÙùFŠÂÌ®Xâ‹øž‘¼]­)Â5Ò!F¢Àɾ,”hv½¼Dç‘sià?÷_únñ®ÝÌ–í÷)Ùóæor¼8ul·FOv…!PãÝ)êSÐsµÑr”cØi{@ìT>nW§Se0&0ðHQ²(ûц §[+ÏAª¸ÞÙP„%é ­,¡l:pñhQâFÈ'J¦§PÜû`Ñ@Ç1wAz•ì~«#qX½ºŽ{eœ¢.®íq«b×ÝŸÓøËƒÝàh‚¸j·ØNšå2õ€¤é`aù¥ðM(µ;˜»$q |ޤVÕ!Ÿ”8 ôõ/'íûM»ÝžìÖ÷µ@Oæ«äþ17­Š%‹3“°(G†’”ò.h–Ø•&œ  k0{]•RÍÕâä¾È$ãTÔFá=#àÀY…O(I&¶ÌuöelŒÆqðDULûåèÉnÓ¼{·˜¥n ÷op"0dèO&1–GýØ RÈi¥ˆ¼ ];9äLLÝ_´Ä™† …ÎÄö§×Ð/ ‡‡ûk´›Åü‹Ãpc ¸ºÎŠÓGE\îñ”«œÖ,Û Ö ÁàÆíé½:?¹Œí¨A‡¡‚šªÚ‰¾Lk¢ÄU1IäôË3ñøÏ%NLðR+‹ù)OÚ‹]4.Æñü±Â9y[°NRÒgW>zÓE¹>Òêl½i×xû2é”8e“0%jçÈ•=´ÓÎÖàm]N’XÁ•ËÀcDÙlÏ\ã3ÄØRAD§y íBEo£O‹6ª kÀ<ÚMqõ9%C_K¬a­5åò@;á}Ùé ”ùÊAª ‡¢9xô4SBŸ6‰u aÞí¤ÕSÜDBc\õh¥ Z¹ {Óð-„PjV󣫿f¹næ‡Ð;Tº¦Ñ?œÅnÑ'ž¶ÛÅÐߢM}~wj¶Ø}§MåÁ?~µ™],vílw½iÏ^è¿uß9{aN•8•ðÉ·˜¸àìÅ?®›pj^Îp×d÷r=»‚ùlÙ6Ûöß¶²î «£`.€ó8sþ:oôyÐïZ3w^è™hÞÙžEŒ­ôþMk‚8Vø¹wø®?­aøŸ½k–Ûö·CÚñZÏèÛž´¿VˆŸ: ᧨“\íÖ`‹6 r ¼owÿz´Zï;†Ð?®Q±‡ [2¹›‰i”µoèÅŒÝ? ]øí«w‹v9?ýz³Á“áíî‹Õbùå¾Á«?`ïãW»#Ü·éÓÿÑ®°Ë±75¨¶Y,±“¿ø—=ûöH¿øåâ×™Â5 tÝ—ÇÉÔyŽ~\ïšå޶ã£×ëË«e ãL½oFÇ€ží6×-064NýwG¥ošíÅÙ‹“ÙÕõ×_¿Wv%^AG?æ]s9w>ý®Ùî~جSüÅÙnqÙžþ~ê>>êþߊí¼Ý4é <1Í‚¬µ“rßð»õ¬Y‚ _Á+0þMÇÖßåÿú滳»ÝÕöìåËf6ÑOcͧ¸—Ð÷Í®yùæ›·_ ÎdBÃ÷^'Wír{ö_?ow81&Fü–ÄYYæ·Ý€}ýC×å1Î-ß=ýôãÛ]{uöbÿ>nçðšïORÆþAú¹{Ûòê§½Ö~zq”2œÀÓ« ^Ö<èExgbÐOØ›_ ¯€¢oÛ¿oSÿÞ2ƒÜ7IKÇûÁöÂ~Ú¥‰ÜûA‡cæÇM³Ú&‡éGà~7rð¯¿4Ëå mel+ýyãÎgqð­ö×Ý™«h[£‘$3° w# m‚ÒÏ…øò7ñúA”Ê"Ý}|sÇ£ô Tûº›÷šüôû¿^¥˜‡O€Î‡¿yóñÅ¿_/–È˯׫-ÞH€?ÿx™þ:­Èñ³ÔYoÚ÷`*77éƒU2+øç÷]IŒ¯~øÿw(ÞŸ]6`OwWËf–^t?ëo´„Û —?¯Ö¿¬†‰ðöÛ·«æj{±Þ¥ÿ.××s`ŽÑ²Ý<€Ñ=«"‚)þ·À•÷»gTñ—õ¼ýñ½4J1‡ƒ”ðéþ?þy_‚¥Îßp˜]-³ÅnysOB§é;ä1(Ì«#9/?ŽL¹Ÿ—ÑY¸lÁüƒöWP*ÎÎ Lxps4»_¥áy4ïܯWÿo朣%Ø W¸3¦ßtŠI–DºóàöŽõ½™z:Áÿeýa?Á«ã#œßaBû$žLëûß|dñz Ý?%?ý­ÏÛOmûÓùë «5¬IɃ>h§\¼[ㄟþkìíŸE¬Æ\8 6þψ tD•’Ü6ÀvJ•Û‚»m`qˆ" [Ö¸/ïqXÔ—ÇÝ}Ù/¾û”é‰ëGÓ±x­(xLÝæjëÆb µ®°3ŸƒÇ¨z纖èü aþ…VÔ9 ´NÕ;Ã+‰»FS\*ýû8¾³Ådï±BÈpeË%v>é"læ/ßošwðçɼÙ^œ¯›Íüy]'ÝúQ›` e< ÷qã1’ïøƒ¬àbæàQåbOá}ú¤ødÄ +”=†v!ºr9ž'á6_–XÃùÌÂã>³Ýè Ô·§B{í-Ì"½²¤v*–³…xžpY Šv4~' èðX)µåìÆî¢Y­“ îU&Ep&¼vD]–Ô.¸Ïm4HY´ˆ&xR-XÙQŸTç'¶» ¼ jO¥3&¢¾Ø®àŽO>(ëH'ºkgÝôƒ ß­#N•»vÙNý³I)ß©õ†ÐWpÑiMáƒv}™e4àûaÛ·Ývð§W~÷ÃçaÝÀ@BôàÙöÛé ö߃ $8xzófd†fßš•C¥VûËíÚS…1Ö&*jÞÄv*×T ¢Â\ÖBhCãÓÆOßÏøžèE° \YnÂßÓ"Ee¥¼5‹ö F ÊheCôžšÊ°T#ƒ¨§evŽ(ÓgÂÎÃcM‰DèÏ)óÎ6Ï“‰f‚¶0rÈ9Ú™‚×*J:C‚é¯]åáñåöPn/°Ýòà5Æ~5Z-c°´eƒvÁÅáåãK’7s´–ƒVƒÕw>Do'2IqʱA˜Ÿ,µ¾‚v^—[!ŒæhlW£¿3ðx[r¨c¼è!ƒÙ_;Ùž:ÌO惠Xê0Ëd˜h¤sXšÔÉ ‹‚,<¡X¥ ž•JŒ¤ôQjUƒíDÑa?Н¨k¸y9xl¹Q?_m«Pö«Ðk…1’b.ÞÇÒrXàÖ¤ÌÍÀÙé9ƒÇË‚Å&è/cÈaIj(O Û©rÑY£8›ÙÖèò <®\Øî»¶Á›ï›ÝaM§ÌA gµÔŠÚå'S˜€¬x¬àæåàQ±ÌÍi¾é Ä8 AY#C¤ŽÞ³6ņ~ Þò‘[Yá< OÁ¨ª´I‚·VÚÕŽØ+1Ä‘x„iKa0 !AD? –‰Þ/Çä ôqúÄ\Ý{0ó´áà)”ªñÐ! ©[b¬a$3ÎR܆vÞ‰bV¢$·ùQÁZäà‘ÓX‹îé^m»ô§Hu˜¨ÜK©e¿ç“Ú9[èÎÏ4<ïd‰šÊ‚Òµó“Gú§÷‹iX>)E*h|QÐ'µº°1–µÐ]¨Ð÷x|,kêÐ÷ëP§ÍÊR|Õ”+KÜãÉ× À¦†Ÿ˜ƒÇØ"%3¬§#•i­10Àû0ªÞñ ÐD9²<¢¬Â.X‡Ž­b|°ŒÛhl}÷‹ú‹--—‡OëßW_f'ùÐü/s×¶׎k¥Ú’|‘óÚ_RÚ.;ÙçëÛZU$ ,«–m£ö¨¨YÓò´-ɲts}¹‘rX?¶Ÿ¯îî¾îvÀ§]ÝÉC5þÚD^ííÌÊŒ3d‡YuOH¢bØ÷l?ù@2`ö3V´Oê{¨?n¾è^’Â/y4àœóòJwØ790…ņDçOhÔé÷Ï3!ž¥rL fݹ©ïbêB„vü•‡ý– OÇ…uû×ýæáñþi©)xT™¹®Ì@¥r®ÆÉ"µ'+ÍÏ7—ò*¿j\îŸA‹½IÚ£@ÊZ¾È9ê—ŽÒIÑð<ÁÜ·àɱ#k—*¤GÕX/ÊÏ#Q ÈY;Ťt¤±|å~X“›`˜ð¤ïQ(ïÐK‚¨øÉ‘)“åí"×Õö[KÔvØÙMp,x _þé¾múQ-B]‹’1ÉåÔÔ®EÎ|-8Œ²Ô!Ÿ{ žHÓ¾ÓIòÔɳj.9ŠýºkÛŽÚÃŒ¹7àÁŽëþîòøÖ©d[°\Î+™“‹œ½•ü0ºP—ÆO¼ õ݉(ì·¾öêú•»¨K p9¬‘µ÷‹p?c~/¥þF5Ž&r<Áž+ß!Tk²íä _½Ï;y¤T‡JâC&©äŠN;ŠŠ\µå›Ó¾ž•xyÂ-¬º‹X Åd. ‚1jNd>©xÚ ¶cžlÄÓÅ+û³¨¬š›Î'%«ê)d"çóˆE˽à‡Ã÷eÂõÉTšõòo¹š¾¾xXt™]²î „v.¦÷°½ìl4@L8aº-xr¿,ª uUt}9kL”4mÀõéP=™hÁ&¬jž»ªë•êÓ’üX~WÞ /r©OY¯vŠ O­·áa:)ïíðp ±®$$ïBÐ6—"‡æ£w0ë Ø âø©µàéSû^Õ¦&ð{A½š|A)K?[m*rœÓI p&âf_ìPž¹íäpÂ)-ßI¤·ßɅس9Æ·GïºCªk‘äg¥,Ô"—÷ÈjœIYËèÆwØ1áa÷ñ6T´Íˆ™SÒ¶?‘êÙ.cÕ 0 ö^YãóE­ë×MÌ‚â{r!sV}‘Ãôáöì7ºño&–ïDLÉSžŒê»<üS~úöé—ö?=+ñSËF½~ÉàbƒÇãÙ•±²èçzª7£G žŽu .®¶—»G6\Åï–R”ˆ‚»È%ìQ÷e,‰-Éœ žÄï¿G(Á¸Àäsˆ ™ÕE®¸'ýzé¬fy;nï'D—LxxMÐW|x~Åòbþõûývs¹¸|Qñ÷£ô}HA©uµÈAü;‡ï6_ĆÇ\Eüx9¾ÕÝ¢k% wWÙ^£¿´ «ª‹¤ûÈMØR,xÌé’GqŠŠ«ŸÊšwÒÁZSäúnµ$§×26žà¢Zðä¶Q(aE)¤[µð|‘co b˜I]À Oät0ÙMˆ_Yð˜s|ÔIQý¶¨8ñ¼ÔÒºÅíä|°!ö…ñ±àñ®Ëï>zV‚„ÌS5$r˜úZO|èè&§C  — ü}³Ÿ>—ƒ‹<å¨ •ŠÉz¼#ÛÇÎMˆgZð@ø@Û…*ÌŒågJê¸Á\š°/Ó HóŸÂ„çÄ=bÿÖº¦À7z€Vy–JˆÅl ©š¹z³é[÷WÙ2†?+5âÉk놜ü˜x€àµŠw‹dZÄa6PtaÊ #zJ®¸3\~» ÌÚmþNÎ…3ÜèA…ðöºá¹.Äñ²»¿NœC žÂ³f<æ,êçy»zú|èð#(3%Ý.‘0j˜8çÓ ‡1Ç_;ðJÕŽ“kÁ3ö쨚E­¡®VÉõEr1)ù\´n1'rԀ鰢yûn÷» ×vX‚ÏÙ{ ¦0ô.hÕ·ƒÆ?2âIÝB»/7 ªJc]¥H!&N[ú"WË/h®à±Ð] ç¥¶0ÁÖ²à‰ØãiÉ^Y‡Ê ,\Z¨QÖvœ"çÌ¥&‘²}€3&Þ€Çlü¼í®šÔ™êê$’ÙDåªZä\æÜã½É)¼mGYv›<~Òå;9{Ì xìúî·—ÒY}sóp¿ý¿§íß“s÷työ[æ-ì£uͶŽ$¦Ö ƒFðé_—×›Ï7幨|ß|¾¾¹~¼Þ>|ú×äOþç×_\=Òä?‡ìéeGó±¶K±¤û»§GIÉÚëùh mÿXÜ ZŒAjUšÛJOØþÚág¤ ¤0à¡ÕÅ|ßÐkúB9î”ä·EÎ1¯Ô Å^Àq— ^¸TiïRåšK%½Ì!7¡H3X…Åöˆð`—ªR»=þù¨ßÏãï˜ÛR;T‰¦z¥ΕºÈÅÔ¯¨TçýÑ0Š„Â9&<ý<ëïû×ë¥*«\êÝb€ ñ¹Èù]Iwà³tž`ðç¦Û°ŠÆÏ¾\|_j¹ºnZÕTܾö­}t«l_“òî…̘ðØ;­ŸÍ‡ev@ç^ë(«ÆÜ\î½Ýjî5k!~,îQþxÜûÿ§ûí2?Ø}>}ö½1¾iü 8‡ì0s žðáø·H>n.¾.sD:[Ç?q5[u‘àcñ0ùõI¤/ã¾¹ubïVyTŽG€ÔÛ•"G)÷«^ØÕk1 Âã˜ðän7”¯xðü|ë͋ũ r«âcÔâeAžxûõù¡«HlÀš'¤íð€£•õ1^)îU¾ÏÊ%„ȱaÌ>LáUq¸™RžDïÊ«ÃK.ôQåV@LÔpf…êcñqÜ:6žµü ÁQÈ ãön ¿¢Ë˜λòk—„>©ÌŠÁù–³0’f½ÉZNI„54œ…SB„å;I®Θó»rêï‡ïWÛý Â*¯’Oz&¸È½¯ÞÍZn¥6œ‡•gÎ]¹ÅBËy˜òûž‡×Ÿ¿-ÿ—e:²J.–¸jñÀÕêlãÈud8kÙÅ9'×àòŒ–òìZNÃŒïË®ïw?¶÷/>UÐý†1WŸ#×Û£YÉ­à\ÙNÅœq·ŠÝGôS18xßSñöéqs{ýs™ и\dˆ¡aTá}|Ä·G³š[9@Ž £æ)§bdú©º x¬÷ÏÎç…ùÞƒSÜžEŽjýDšC”¼á|]˜I±5À£#ÜÉÁðÇ5»ïDLŠ¿“ó½ZWªÓrŠ%Œ(; °$ÒQ'UVJŽï÷mÂ.õè@v °¿|ìÚ1]+Ó¾;”©®Lq÷r„¤‡¥MR§.£kèi<Þò´áa°—<Ûi­Úl[ž ÕuÆ(yp5ŒÒE{tëÉP)_ZN!ÔÁÃøJý;<чH:´¿Ò•ywöx/3_,©Í¢?_×Û5bí/r`ÞÏ;´|›<•ýQÇHäÆÏ±|§ØÆŒ xb—¦’µš³MRÝ›I9@¥ëltµ-KºÎšK)`p;ÞŒL: rÝÒ_öJ|8ßÿÇˬ¬·íÎçe#ðò¢\Ã]ä¨Ï¼Ïdµat~|ë' .¥ù€RŽ[b©1É4ØLŽû=°ZOîvÜ@¼ ºoÛÇŠ*c]•^<㔜W 9‡¹K©ý̵ÀN E~W#â—5H9‹åqK/"mPã€ía­Û¡'?a‡°à‰Ð{‡¸Únn¯.®¶_+*Mu•–CBÔË"gˆ÷^7 Žç‰Æyu·«Qå´Ò('åµáNÎußFúpŸÑÉ®:-EŽxMÊwÊnB¹Oôý¶“íãæ†î·ß¶—×ùéáüúö¯ûÍCññ.Ÿî·ç{Šÿ”§SÏÊåºr#IÀÊÍzŠb„ºy¥È×p>.í\Èg}L9Oˆ\ ï´Wß;9ô=ë’¯ó ]+g½ôœ…DA„~%-Ã`ò„è˜ O·Ó”ûýfó¸mS²ro˜È¦ÄÚê,r.@Ï‚èãWƒely‚EcÀ.ôª”ÞAÏŠIäþ$!kWI¬Ÿ4n×é»0ÁeðQÎ!4øó1%P^WS¯²žü±ý|uw·$GåséÕ3j:ey§zÕÊ¿ ÞUWEäÜ®Èw<%5{EäÌE´ÿº¹ûñPœˆo›†¬åˆg†Y%D‘;µ¡òpRK6%Õ9?ášF¾“rB×€ÇüZðêR¹Í…Å—S79‘3÷ÁëIÜ€#eÕ¹AµòLÕl‘[ßËrm²ZZœv]{ögÔ#ÐÆ˜®[é[p«ix~{~üÑôò× 0f߀"§Œ[:9Å ã!s[¼—µ¨Ê?JQúJaš¬Ý“.Q·çäÁ«¹qÃèm±»÷‰¤ÅØ GÉE'lãå;€½×ñ ôèÈÚ¬ÈêyçŽ „úc†œqDÜÎÌ×èbïë Cž_¸|':ïm‡'»³ßh E Òö„UØEÎc‡‡ÛÝHË|ð:p)E5~þËwÊÿêÝ0ör1v^ý/ÂõΡø¡XAÁ*rŽ;Lz®8¨^|y/ç&L¹|‡2ynÀƒiP„þElcÑcTô(K"i™o‹œ§ÞkÞJUVÎæÜ€§Wy㕆vb"A.özz‘c AÑùèjÁ}iÏ ¢}foþh„QË´ÄÃüß÷%Öå]xQ«'`Ï Í7Ow›Æ(öȺû»À¸¾¼ÜÞ®…]¿€ä;ÁG xìVòÑ7÷{EþÖãÃùó—¿ð²˜UK|:y×+é´QHû×NMÎ{mèaÂŽjÁ;yËzʘf‹HÍUÅdQP9„~¥#:ÓØ2Š0Á‰²à‰Ô»Ä^³V³¢UFb¢¨¹&Es§§:ÝŽšÈOà‚‡žûBK>™f´ø‚Ü3fíØõKköÞóz1Ù2Šñ‘U#ž¸&’þÚð:¼‚ jm ÀÒë3i@Ù±¹:Õxò¶£Ï3mž<(²º(.ÕäÆÑk{VXnL×ÑWò3ÈUb@*P¨\Iõ›a s·ÉZ–€¦GPô˜cLA©{)rŒâ ði3Û‚ƒ nÀ86Žò‡‹9úä–*U îHàSî˜Î²’¨Ü&XúfÒö‘3×bÂGà4aA[ð˜Ÿ1·¼ý>Ûµ^9Ì5?Û|ùr¿ýò+ïœ}]§RH2ymSB%E¥Ã@ g pÉM €|'zÎÜ€xHhoQ+j“æ|‰³ÆT‘sq@•‚L5Œ!â„©g Q)i´Çç¬þEŠ ,%ø’wÊcÈEØ ãµPÖÇçêØð7ã†oÑbV´¸´ú‚˜4Ôars~IÛqçà'̾lŒ7àInÜs¨³ÏÑdQé¢EÅ@–’z ­°ÔR ÐÃŒ{¼ÎZPÑÙðä0üRW €Ö« à’¢GÎ×Û~ïä’øZÊÈdjž°Xðd7¾¹×¢CÅväÏ”4ƒ%å^ï¢Çñ¸}4Þ¹ ö Ow»«©µ.m H R¹i|{¦&b0‡T°à‰ã6‡iÒ¬XÜQ|THªÍµåËãnwW2Ø2Œl0áéÌM•J=2bH^I¤9i9n3°¹6ϸä±à¡>ý¦›¶UÅODå¬"Öˆ[äðÔ^]ƒˆk€Nn,x ¾úQ7X% Ÿ}H4KGJè%îÒ=º Ûa{8aÀƒnm«Ø­‰²²S•CÎÎéà"¹µa‚ó8e&ÛñX39öº¹Únn¯.®¶_W4,ÊŠíÄä}-ÈYäÈÜ~ö>eŒŸA6áéW_Çr¬*W0’‡ˆÅ±Ñ®`Šœ=v4—膑pK/ixQ„Åï‹°Àñ,6ã[µñänõ dÝ—»ß|Ùþ]LüòÛ·ë/ )Οéùê­7(ALÁ‡ÔM9—­,`·ò@W[û‚s  ðÝÝM›¡Ÿ6«NØa½£ž¨•ÒG¶Œ¯nbÃa­«–ŽZ1µÎz÷²V6œ kn×¥6UÈë$oJڮϔ!"h¡×"Çv¿eò fËA’u»ô’&ÙÍ`¦ObE‰@ä:9I’¸ô8¬/*‘ƒ¼.£ç¢²áÎxBŸ'¢?ND œpágÃkÛ\ÞVÛÖ£Wf!K§xа*M7VÃ'/ô̦½;¾ÿüy5’ëè(‘Ãòu}‘Cô+ïøV’Ö‚u|Iôÿ1w-Knä¸öWúd“I€ÞÎz"nÄ]ß…¬’ÝŠ®WÔ£gü÷—È”]ª‡„d{1=vËy‚ :<˜mò€$&A…ÓÃQ)]zç|÷u¯%«}ÿ&¦:<ê~绀IVts þÝY·eY·‚‡º[A–À»—QÅ—•dV@öúÓ¿ƒ>y×€'ÙNðJ‹§™t˜RÌ©j'H˜)&uo«ÑVÈR8„ <Ùiãc·û'sw}Øý8§ÚíõõÝ6õ¿›ý÷zL´¸\¦Ú³€%³ÝÎ5\ÅbæÐ¿›­:2z~U½Õá¤:Á®'–ä£LRàJßZ I¨c0á <)ô[êõ÷X…?oJ'UúeUFàÞ»!J»hä‡,Ål¸„«¨™.ˆ;àϸÃbà!RHJN|±ž Õy‘'[= Ø"5x²ë¿n8n;'”åjÀyÛHEçŸÇ-µ ýŽm2öWüŒ&¿¿Tøå]¾íw?v×ûc-džyDaæ¹qz¬~§d¡.jRßYÓíÐÁ8÷kð„ØŸ´?Í”å÷òð)CL1CAxÈ9u¥ìf·­ ï¾OÔæ?SýdH ‰L®¤ö¶]ß à#B•íx¼scˆûpØÍŸûCȾ dmyÜRx‚º÷ÛÝ_õ|–9Ú^°ŸB¨ð ]¢yÉ;âýº}ü´»{Øßñÿݰ6Aðý¹lf„”IB_WÆ!æ ™ÅÍÐë`4x|ƒPù±}¾®ÇýíIƒBÌ€0†’€¸Žóú½©ËóÑKÐg)iÀüO‰H¥ÈxŠu³²c0è˜ ± UÁµ%~zè“è‰×q9t1j+çK²M©å]ƒlK*<¸òýgÖÑ}\?,OáԻ’–rW¥Zyq:ì› «î¢½`¼KÀú?éØÇã|´n‰ge,5RÄ[OJf[Ðá¶‚¾¾¾¿Þ.érñ`?9pˆýòö9sV]°ÖÑXºß•èðD»ü½·z|ó÷Óë'ï/OQ’€@_2¯7…Ø `C;PgSÝnoöõôþjo]«,ëÏC5_ Hbo oK Ù«ôg€ºhêùû(F…5姦H$ø±Ó¸…Üýsç‘nÇëcÀžŒ–6àqRW\VW€˜ b’à~ÞIf¦†UHÒ% ŠüØQ‡Ю~''Þåð]Tô× ¿$C×îù§"ˆG)`9I Yý²À` µÃÃþ¥tx‚áÔ_ßÌoàü·´¬I˜vA çÍÛ'3ß³ i5²œ¼Ík6Kɵ˜% Š2ÀRàÑ{ÄË/Ø“ëIp3²sŽ’´Ä Z¤Rìˆj°ÄÚ‘# ا4xÔYç‹K~R_UçÕô£æeÆê…(U?šÆYFÒ¬)­ãÄOm·Wþh¯`Ñ`EŠ%ù" ŠäÉr 0ãC»ÅIEJõ§Q<‚2žÌWØ÷‡»çûmâ²6¤Œ^j=*”¹ ÑpwOüˆöÛÉ2ÿ¬Wiÿ% ä¨ã0$óË KºkDÁ–Dƒ‡ì:;ŸUí¢•® ‰‚«þqèÎ ÍÕO2FÒ½YŽàÜ3¢Áãצ@*T:éRع L%w¢—,œÈãÍz@wá·F–2Àe<‰w¹dØVòý=ÜT‚Ì ì¥bXŠÂÕN!ŸKŠYÄ_Ǯ̖µæµ{¦\PàÁh“<«UéâÖœ¸Zvïm—Ö´íCg~쮚ñ$'¹3ž,ªs¶*2 Ф@ü–%àÂÅ»&ƒÖšË ú§±èð¨¹p~Öj–µê§•‚üÂZï`vd´x ¡?4x «ç0÷p8Ñd4I!eO ¹Ž‹‘ÌüIk"·K±°ÕòAÇë›NìÚã>¬ÆÍ®ç'= [nà-ŽŠ¸ó8ôt´<Ö ÇØŸ<ÔÇYx§BTX—xæ 6 2w˜Ò7¡ìGÚvÜÑXü<ÁîZóñùëãîáp¿œ%"í¬œçùº¾ŽÃ@]\-o5ó€s‚ O1za« Ì Ñ’Bñ E ê8g—FhÄ`øþ9/J<ŪüÂu=}G?U|zØ~ûvØMʶÙ>@ ¹ùj1zmÊm…Á ‡ öp&­Ak\õ r”ö´:.¨C‹ÃI¬fD0AƒG}gu‰rï®oNÙÒœÀs9HÎnÀ÷pZ8¬A‰®ÿ´«ðК•ÿø£þùæË¯¹ÿrNïÛ”¤ä­ÒÔFxY>K9 ° jò*(¬Ov~€58–ez­Ta—Íà)£N¢ŽÓ÷¬Æm…Ô¿ ºy±P Cå„%/(–\ .–$ Bü&k€Ñ¸ˆã !òv(ðàÃñ®„£´Cc(J!É®ãô “Fò[#H’Pà!ç:ÞOLÊîúËÉc=>J`«Ûœýã çq»±§Ÿ­-Ãã¯ÃÚ¯&¬Dió%p>G‰³Äa´Øñ.¢…³°eÀŒ+ðèS™>¶¤g ææ…û«ÃÓ¤Ná>3CF$øÜ(<[›€K ۚ€Ȓ O‘£pz4Û¼þ& »jïù&˜yœ#÷À’Ôøý‹êð¤¸öžbJñø<‡ò7÷Ï××›ùJðô8&\ôq•Ï‚$£V„BÊv¹ k ­( ˆH©ðdƒˆÔK¡€/ËÕµ–÷ß<·{Í>/'\ð8O)¯½—XÍeà⻞`”ïúS²‰=&ˆ½×r´ÌÙÂHä$©ªL`†²#t;òìFðCGͼñW¦õøÓMk1¾å­8ò‚/YÆó{cghèÎv…T>¸þœQá¹°UgS(ç¥yDJYÐ!Õ çGfr uˆ§Î¾m¾oŸþ<в 2âÄ’ <‹ÆH#^“^BJ…äÌ»õŽá¿}Ô¿ìã'è¡“Xë!S2ZâJÖ* Àþ®Á£Nâ:£²»íóÓŸsÁ¯Ie(¨Œï•D"yôÐ;~¯æ¤}ÿ…:<êVÌjm~ôÙÆOK=ffAŒNèJÊÖOs kqûäqqÀv ¸º~…—àó8_lVùq ݼø@$(mb&Æ"$Ÿ}÷{º‹˜Éh<:% k=pP38H2ž‚«Ê‘n¿Ø×]p¿aûx|‰’׋ÉBH$àãË…lV²š– 1”þ“ªÁ±W¡É—~C›¿hÖc1Ò#¹WÕíLÆŠ§y&g·xÿ>ìÿ3é+Ûè+‚Ïê€êoÀÃÈ–<ý>óÌxº—wz¯NtFê¬'Æì–µ9M+¾â~£éfѯuvzôŒ”G!o½Ê‡2”B‚ßhÃd<ѯ;ï|ÞÝÝöW›Ýãß“ƒ1'çYy”!:°è=€  âï3½Œ'jÝ_ÖÓ/5¾Àåôƒe5‘/³“B'ÜÐ9¯ìªx!ëÚ–8⼪À“ШøÜ[ÅMNò«sà‹‹E¢Sç{^N9n-—½''ÃÐÂbþQB߀óúß3Fí¤D‚'!KS½šìƒ™ëç8£Zq—q|õ¾}N2RŒî§r€õŒƒ xÔÇÒ^8.Ô7A/é.TBXD¬Ág‹×˜ë°†ì¼x¿ÊãâˆEÍü1bhÀ£v¦ºÛïW&Q‡ü€,7ÌwrÐo¾•˜yÚcü'rCæ«£e{ò¥éštø®¬2‚¨ÉŠÛÅ(#/©ãj¿9×Ù $ÛTp'7çãNîMÜ Žq'(K'¾’$'½ÓšÇÁsgƒ}¼Bá/)È+_û¯þNNˆÐ€'©­æTèf{ߢDqıŽÜ¤eáÆ«P=0ï$?¸Žƒ%©šcQ7ÛÝŸ‡Ûý D’Ìhýtò1—!Ö…2€O&’‰PÇÅhÚ½çµòœT|I©ôÉÕsMµJayuM㜺åç?ÄæYªjI…Uó¸þ÷·Ów‚OÒ“Íyœú5ÌG5ß1æE–ÂFYÐnõú§÷ñ’4uœú hñɹ˜£°!Ïã  HýR ð<.ƒEdûŒ.'å ë˃C—£—¬F—}1¨‘Ù•ÏiÒ*hðdoV¶ÿbÃ<éS5Ú!ˆû §šê›ÄYÓ»,9?€<<(L0«ø¹"äqÞ­ "\H¸úáäê\`¤ÐFOÎ nû<.Ztð<£·â½Õ\rˆ¢Þø~>›øtJæµÄþMRtxÀ$§ñïÇû?÷û• k¡î !6D$Ÿí,:q^È= N`…5xJXù>kzä¶ù©¹÷¹ñT¼ =ò%g}’Äá“ÜE=Û!–&Xƒ§X¬ÝÛç§ííá¿/*‹ÂšÈ•[Þ9<“ÌýßÊÊ·Y«9¨A›¶5xÔ‰ogž»ÈJ ‚ÉE.ô&9#õ•ž ¡z \êâÿýñ¿fvÞs$w·¬„w÷ó¶ûÒLúäA/é _þ¸:wËÔÌy°‹@v¸¨”” A¸¥¾WT(”áZuóÂ+—𠘾29ɱ^P9UGMeüe©’çkóeŠÛˆ623guŒ¡MlȘp‡Õ•!Mr–™”Å‘–Šö¿bRwQ¬È•“ÔL稡1'´Rÿ5hÁ“ýZrM®J›ÚQf¿ kÁñR]„׊&…ÓÄ‘±T —Æ&±+>'.¯Ï¹‰}1"g%–)=C®WÞòcåòdi€CŠÒŠÓÀ&•‚ä×=F£»ž»Ã¹jÙµõ¯Ûº{iÍƼÔÇ)l?ÍðX²‘J†Ä’ÃëÜ|á~ì2–‘ ¢YæXÊX©*™nQœ‘ÕïÛ¶ž0<Œm)'6@@KX16F –‹‰ÞÌ;--sHŒ'ºûâø [ÜÜO2·ÂuX‹b<Œ+ ¥0, šjE!*ÁÂðØâ,\7¿ÕíûnxÊðð¼Õ\‰¨x1&FŠ–´¢]&ìj-£F‡à1ÅÙx;[_Íf÷nf8Îç„PR¸ÅÚrá—%ÈÄ-N(Q¥w@SE]®dHr2àÑŹµo^5d8ÅIÇ©!"2ZŒt‘¢ec£ V©Æ–a£$4 Èì.fgã¾Óà”1œŠ"bp óâ¢ec£U6 Ñ™“³º«¾nE”Y¡xLl׬]OŒ¶îšm;ƒI{·X-|¿„‹fyß]ï‰r} Êõ¢*hœä”êÌ4ü„u4Ö“¬ýþ³ß-ÞÕ³‡Ù²îiúzR<&X@lÔÀÎ ªÞ•#L ‡QÆzS®Tžè«ûé½m›í“3öÇŽMÞîäáYkSlOd¨%ZáÙ&†r©´`ÍÒ’&\#Jx˜x$yüTߤO»é‰Áƒ4{M¸»'øq”šH.ä'sœùÏ^ãðD×x9ûæi}½yÕÝÝ4UÛ'žïµÚvíìjÛÕ S;\N°ØG(\àóã”#û¼&är8lÿ~JÓԉЦÖ&„2£ƒæÇ VŸ}ù)Q†È€ÍUÝ@´Ý8Z€îw4ð˜à‰n {†NŸè’"º4”:B »¡„%j›Íèµ(À„ÒAHÊÞàÚ`@ž³ú+ò—Õ"D…zX…œsÍ ³Ø3 Æ ^xI`âH<ÿ%é8<ñ—¤£¹÷­6ͦZz]rD—† irYËãz¬HÄÛpÈ‚êÓ‡é‘K  F3¬FÁ•²R!MÍv㢙Y ˜ c\h»¶b‚k¢±Çc¢ë*ÇéÜè«}̹ÛkÖëÙt w×›ƒÑÆQmFZ‚„4‡Íh‡A 6v?°? P#}“>ÇW!éÞ~œVy­ÁÙ ŽÁ>a žè›cctzè쵊l¸%¬w æ•6¸vl 1!¡ÃaS^ b…'Qé}tÃåKXøí·S©D‚pÊog $*×*´Q|V¾`´å•‚_—Ÿîw¬RœàÉ|ÎðL«;ZX‚lÅ•tïaeÆÜ8rÜùqö¾ñФÀ†·›ªÝÔó!z£?y_·ÝîÄü“Ã¥þ­Wýÿ/.'ݺžMfwÕê¶î.û)¹œT«ùd]=,›j~½‘äºqGërÞs¹î- ß£õó¸9´è&ïéõß×ójSüªÝ-6õl³mëé€þÇî3Ó qÍÈ5…W¾um³§¿n«‡ëEójÖºä–WÍl S³¬«®þ—î®bRM ø”)zcgŠÃ_7¿1ü]-æJ>#Õ;YÃ{–J /ÀŸ¤ª…!7V=×ÊýÖ_˜éé»jÙ՜ҕ ÷vÚ9) ùy²ÓÀĽêtRƒ€Ëɦ™Ì¶mënênëÍ?OVM?‰—ä;Ù‘=õ´ð+ÆÀ ~ @ïÚfµøýhÃ÷t¯ß-êåüúë¶uYÝæ‹Õbùe?àõŸÜì»îÒ!ÞúW?þ[½rSîf“ƒj«ÅÒMòÿÔ³¯§€ÿÆ/¿ f„UU0u_^zs8Çêrò£ÛMÝŠºœ¼iî×ËÆ”\N~¨³Å€N7í¶¦¸|ìçï@¥o`9O/®fëí×_ß2¹"¯a¢Ÿò®ºŸ+¯~Wu›ïÛæÖ|7Ý,îëë?B×pI^Nvÿ®gõýMÝ^NÕ—ï€&Qœ+Jûß5³j ‚|?€ñ?ìØú¿òÿá»éÅÝf³î¦¯^U30tÝ5pì®Ú\ÏšûW0÷Õ¦zõÃ7o¿ºrz˜‚]#>÷8¹ª—Ýô?î6îLÛ3â/ÎÈ2ßOƒ›ëïwS¾#ÆÔ±å»ç¯~|»©×Ó‹þ5÷v=‡Ÿù+àñÊèßð_÷h[^ÿÔkí§‹ÉÂi ^ȯ6ø±êhá7=ƒ~r³ù¨p ýaÑýÒùùÝ3ß厴^K—ýâŸþŸ0„ŸN©'w¿èÜšù±­VÏåú¸¿[9UËå–685Õ7•º™UBÁ§ê›© Ä0F‹;’ÌÀ2VŽû*g¡zNÈ—ˆ7G_GD:¼üpà‘Tûf÷hì5ùéç?~½òÉdÇïOòáãÅ¿nKÇË‹7ͪ/ÓýùçÃõê7Þ“p¯ùÉú¡¾SÙ>øVÞ¬¸?ÿº;üûêûoÝ¿N容÷î+°§›õÒ¹r—ÇOý®r–°»½üûªùmužo¿}»ªÖÝ]³ñÿ|1ÓóñCRèHñ¿®ÜÞm^PÅßÀÃúqë<6L1§þÜ»oáø—ûó¦jëûz³«ó·ÌÖËÅl±Y><ÒÑéÎôô¸+ Ž{ƒôIÚÌãsÙ9 ÷58à;ƒP¥º§óü¿iÀ§íŸŠ¯ýòœÌwî×ëÿ7ÏœÉlÐë?Œé7;ÅxK"¼ø†Þ±~4SÏðkÞ÷xv9qÏwx ƒ}"Ïëýw>±xƒ_åÜ_E?ý®Ï;Ïmûóç×hV³£ LJ=Žùyò—ÅjÑÝÛäL¾àLÀNð¿ÿ«;±3D°õŠaeir;RG"DŽwbðœ•ú‚JÊž)I”ÐîÂ'‘ vèÁhŽM`p!Faò³!,Ó]7s¯OäðT* Õ 3dn‘i D:6G€çLC ‘¨÷rröL¡H&…áŒPaЈ®áÔRVÐ<²9¿(pøƒG–Œåº/w‘¯TäDÕða áX*Œ£Æ¦1iY.€«:˜Ÿ1xý iÏФ[X—àänß X—)F š‰ZÇa ØŠ<ñ— R(õù¹ràjážtXâŒc*‘ÙHËrº4ZZ\A ¸šîw„‚}lžè²k/Ô”>•£?p·OžÎÈÐùš¸ŽD5×Þ“B~ÃÎñƒ<°éª;v'óÖý9üŽ•†È<†å½†²×ãÕÍÃÞ}è8¹ƒe!ÖƒÁ7à¾Å&b¸a”(2Ô°é ª‘xÓøŠƒqÜ3¼Rå°R©+—¡ ÖuµžòF-Ρ5u7TÝu>§\äç„ ]J«ÅñÄûŸ‘‰þÍü1Ëw éÛÆ‹ÁKûqB’bæ!’ËBd®ë‹G’rb^Õ÷>Àí¢fƒ u ©ŒÂØÌ`m2_[‰as8nv\H'¼‡AªÅì¿RÂ( @‘ùîL4›Äƒ9EÈYÓÖMwuÓ4ŸYÑùZ,™Tç™p"ÑI5ÔXYÌDE¬&ÀÅ9.€5¦Œ{UX‹´EWÔjVn©Ä+Ôí!»«™¿«RW«íÚëVë§°ùáCµóúqÔDW_ÈÏmƟNLËÏ0X¶7XÚ Y,w³Îú¢0Œ+«ÅÂŒ—ŲûGnÜ):,×Éju9ª]¶ívåRv*í®{ïþ¹ÝåR>/e$B[ÁAÃþƒÈ$8X™M%™¾oëY[W.5y/ǧ|K¤C·i€H1£+\¿ª¸Â ¦Mt<õ3©¨Ä G7Ân\æ+ùûß¡àÁP€GÓ¤ÔÞmâ´‹ž$WR¸›˜ˆ40N u'Cj âÀÉr:¸ß±Dp€‡É1ÝIwqæéaN§Ýl6­VÛ… ÄH«¸t71œV¹j¿)A¢†KÃIín –Î ¸å³¬7QªUHX &Á#3˜k ãçWûK蜺À–"1 b@•ÚƒW‚á4,¾X[aª†‹¢I?75ɬÀ]ú C z‘P’æ”Ióm`¿ˆ™ŽÑ8-` bðpšÜ¸óé]ö·ÓâOkã*Ý4¢].Q:· eÃÅa¢@Ü1by,ºi–¡êeÃê¸Ü ‰9»0Ži™Ü"D1;+ç1xâ›çDêq¯ 8àÌâ»Zg¨ÈcRÒ6BUà¤4µy¬B¨jù°j-§B3&±uì×™-‚M†5wE¯h<Ñ…áÃtwï ¬ï<­«÷Ì''!J&]0 †Ae2 ©¸.Š,‡<œæ1 ½.O÷pr@«ôš¸ý-Sˆ÷ëÇqͲX„XVGù£Šqx(-e(¢D£Üáóã4“yLCþFH¡T*DàÑ"µUhºw¯=ÔâÊaÝRŸ±¦‰'µ-eh2Њ D žèë`ñJtdˆðÍý†ØˆÁnzç†ÄŽ…ÛlˆÀ#2˜‡8L +—q&¹´{ìÁ8ÁUvû@íĪbðÄ?-ÎÒ G4h¸U$±á»²óCZGS„xLº-à^Õó]X½ Õ­Ö­KóåY¸ËÈÕEŒO…˜ 4µLdž<Šç3NuˆÎ}CnÕPß6¤0q#d±ŒB8vFs㘄ѯ_²Gaï3¬[‹Lœ¥ˆ,µDɸ£ t TIóÓ Ík §[¸kN†3Ìp ˜c¥s¦8'`l°,ð¦ "ððØbóC»‘$•Öª23",&ŒÓÒäµ2T[À+ˆÂ£³Øþ6}A|léR{Í`WŽ~3ÑS]ްáRXÎ ° ÈzCÊç·7ÕÌõkúðà€ð½/Ï„Á( ã´YV…#ðºÆÆÙ'?4LâQ+ÃÀ^4 㸵9ýsÈ^ˆ+?%·÷ª¶íÒ« ÓkL3ÌÕÀuË Ø /#\ç§A !sÚ§<ĉö¥÷Anøq‚È’‚8ÎFˆ¡ ˜‚<æÌcçÇË?OUçõÅ0}1wÖñiŽýןà³òé]QŒ™Ÿ'_ÝU«ù²žO|“„ ׈áõ…/#?i{ÐÕz½|xõOÏ`ÿô ÷tÒ—³~lÍñô®+ż¾m}%~˜ŠÛÛ~þj‡é„ôî ,\Ý£ÛY§oÊ’§7eéoÊÚÓe©ÏO”œ(‰ p9 \ç´³v"¬-ׇgÊë8R²E1F.º%ði“<ÁmÀþ²ØÕzYÁrÙ/›®v»j&X2´Vˆ²ÆàQô¼ (Õlæ‚/=§ªõâÕI}ïTGâÖ=ð9aØ’ÇJÑÜ Þ¦k $k„ãa„¤OÖXÕðâüêp%Ûݰ%ÈþĺZÃv°@á~œg)CÜAŒ(À…<–ŽìÙy†:ƒìì¸tI ÃΡ'xúËñTŽƒ¬³ûבxlk@iµTLca\tîVÒFÀç¦"ðˆQGðúÓÃú7Tà 9b»äG[fÑÓtK,ú<ñ7q6«¶i6» % QžËAМp ¬«46ê-?ÃñÚü>ãðŒ‰¤ªTªÑ «‘q¨E®žøq†¦?>ãhØå­v¿c…1* ¹8G”g#’¡ WùÎŒ¸ž“š á°)+`Ô£ð$j¶p†wd‡µêšYÉ &…«Ø+³/{ž ,g6?bðp“kÙ{Í Ds@V.­Ä Œsõ·’ôLÈ@Öp)¥æ?#éJ‡kT ÛcpJ©+ÒŒI \‹åZþ! ŽAjEþ¹ÀC‰Îºö%¢9í°X•x7ŽZ-Ò•NÈÒ` |G¹À}M{8ÿ,B©È&Y¼cÔq”é¬K_¦Cj x~²LÛ<Ò%×íâýbYߺÃcPþ’~MHä¼Ñ£&ºÅE1®† 1ýxŒ›%1âPÔCåÚ•KE¡ÂæôÌ •Ÿ*/3ËÁxDì:_-óÊõÿ­¾¹kš_žÜ0:hÒ+²×#¨qéù $&­“âÌh.PWÆi™>?6Îfŀ͟³…ÇžÛà#ñPe„&¬9Ö¿-ÍÚpYŽ[t‡ç;Ñ>߉‰Ó Oq0†*1ñóäí/‹!×}>ŒÓ(áSSºl¶ó+2‡Çë·~áGÏÉéd¾èª—Ä6«ÖÕÍb¹Ø,êÎ%ŸÁçÞ>6Z U ›6~ú\Í¿dÚî &iîÏ`g‰Óx(.†&Cñ cžd?š=¡bRS†=¡x⫨.lîTõ¨ùÓ‡\½ïd¼¼yó„7icŒ¤ec¸±Ax’œÑZ¦WGüÆN•PäšÇ›˜QˆÇrÄPJ9 ̈" Æ3î™4¬j¯cÜOrµâ ñš¡MÞéOÆÑ<•…Ê2<Ü<Œ¥9ç{Öð¤ò½ÞÎPüƒ­—޹‘÷h¾(BOâK(ÉGæx†*üq•J”-–X-Dze"Ø’ õX®„Jgó÷ŽÚý'ŠÉ<&¥mAÖ¦ÂYŠ›‰6%ïh~Åiˆ\¼Ì¾8MbKBV#²J ¢PK>ʆġÍ C¸ ‘Jñ2ÜÅ£ã“JŸlÿçA¯PƒO¿•ÍŠñˆ`g4ÐÈ–ÄÅ=C€Û"3ŠGB=N­'Ä[Œ’0Eˆ …Ê‘ŒÑ"Œf‘ F„ˆÊD…âácï¡ûƒ¡æçbFÙA„8÷±’F€Ñ T–±CðÃRÓ&PücùC9LˆœŒ”áO0•?³æê¾Þ´‹YçÎpÂH×Ô1¸ Ysðh† fÊ0ÄïðH™¢Ñ°Ê7ívwë¬òZç8M\8„ J±zêð&D=š+¡Òi]„+Œ¸x¢s¯ú2ëf¹˜=„i¾Z.›ß®êÛvׂŠs4+UŠ‹ü64–÷X¾„ÊLj*ÃWò3奸ßâÿĸKœ8Bq%aÙ‰3,Àh… Ê 1H1؉àÅ´^·Í;¯~…ó'T ©Jñç%ø£Ù£µ ˆ‹:Êìž‚ñhZš=}ŸŸóÇZBC1¼^`4ƒBµE²U$‡ý…Õ!xT~ÍëwÕv¹¹‚7¼ÞÑ€°äÌ€Ÿï»ÓâË—`ù Eü¸$$âÇyººcgÆ]9:æŠj"Pq›ð,RŒæR¨´R–á’!2$É£Û—¿|}èÌxø˜[&‰uŸsI0šC¡’š2þ³p……IËFß–Λ?‰)Í~)®TF¸´Ðœ NT}eæ1w¹­ÝÖÝØÕwu;[_µËoeRɶàÁ™þ%ÅͶPñE!¶iªU@üXÈÏ϶EuÓâ'-»Ð«LéÏÆ³Ë0I( ‰ò cϸùÅú›_røâW8 [&öí‚ß¹¥€[&¼é²Ëïßúuۀήýÿž_¡HJsJØ!,¥]…/“ûªNV%EáI_oæßÅzÝS_¯ Ùøû"üÆ¢Ž¨k& y²Ž)é!-…ǦëæÚ6Àt_ï%?Q"²Œ ÷= м ì•$ùš¦m Þû§¨!_wÓ3°Ñ…þ³«ÂcQÌiÞ!ƒ‚(Ÿ¥ð½–äwÚ›R*€(WƒGÜ´º¾iíóÉŸfî”’ÛªŒLÏ\R‘ W9 Z®ØPT{4x–„·Z5 †TëTqhƒ0l§i@\ô<`%×àÑŸ~.”5VÚ«)Ìžã !˹%—]ÆÕÀîŸó®ÄC!Û÷—/SÉújP›ýéœ{/XIBÏÁtO2YÎùEáÛÕ¬T ÌnÀôjð”Åpg8Bl+0sðȃ´ôäzØ ÑÚ>jÀ⯭ÁC©·1 ¦’y‰ñ%y‰š,ƒ_ÜîÖ€š À9˜mž²è(åUKß^¾¼Ö|«v]Špˆ?%S…(ÔVƒC&Zw‹.v`Ë‹Öà!}ç³K76üÇúþÇËöpL!˜Eá%&`F)‚(È ,Y”—ÑP³ÿƒžœ öX—Ü §îìbjN(Ë9Èê]•ýˆ“2 }¬V0DOž2IŽ¥6‹Û§«é§šl˜TxЪ­½ p…Jj=û,ͳ\‰ÔÇŒÉ"8ÓàQ‡Ó-•}{`Ø77‡‚1^º¥š«LA(¶<É¡º·hOFÎÇMq€×&vt.”’fàIÁ¢8Ö%-¾+0U°yÅ?9€Pë¶i:ÉÕëb;ËÖÐT3u¿‰ÖáÉ¥Ÿu{Am¦ å(Á¤P@½Ñ‘•óq£‹¦[Ǧ‡tÛCžt*m]zìHTõuÁ¢~îÍ`Rÿ¶ñ‡ï¡fà‰ÑØÂïê5Ñêüï‚`2ž'/aðrTâ²EiCŠ*°§0`îxÔÞýR;Þ1Ql«1@M4öIòR,S2¶ô+(«À›¼ï?í||xص5-åšòïxŸ(Œ¬&ß‘åa¥Žé ˜¹†ƒOý„\çZ«‚(Kç|=…¾š·Š‘åáƒO‹ª G?{·¾g%Þ~¾¸å(ìðòkR pð_ $‚Œ ®rû~±ƒ9â~Bƒ§Xþo%„A° †Ú§NṴ̈\ˆÞ¢"ÈRF*—‹¾&Ãå+NY¤‹æz¨Q»¹J§ZUÎ[ž¨i«Aš8v ž»¼4#ú@£äšXÎ'ËŒå|UŒ…F°`>ýSÔźýyêBÂi;Ap䜓R+X­®« ;¤ý›iðp X:ù© ^Bcâu ÉQ–ÖõÍö ®*Æø žì Jñ¾&‡›‰¬D]OZmž¹çO‚/¼qm{²IÎvò3x¬BêC÷lH€®À šãU#e’b½í0¨ÚÛ‡©óGNô=?I)“”Jºœ¥”?yˆ>¤$TàžäÄ®#L»iÿ«tï]ƒ™Ôµa¦x‚$˜µ {1hßX±²õ0¶œ‹6ònÀ”*ð0KÍ=jìf·½}øç~÷°¾}útüì×ÜÇì·¦Ì ‡Àr!¦.ö<‡•˜ý›Æèðõ3fÔ†êUM–`²)—`–˜kJÐùC@`ó<1XÛ<Óaê½{øºÛÞo¨5µÕ Õž]rž'¹š5ßÍîÁfÿÎo*<Ák=~ÍgëÀÃSþvXì£`$€3ðNKŠ-j—Rk;·#åìaðä³­À“Ì×wA—¹­ËÙ…Šä§"$R¸¿ž©Xý; éð¨1j,!Ö+hD?kêµù½y ìýÐéðóûþáyû×öfºÊoèTØù$ÈŃC‰¦,œ·³æd+ã€åYƒ‡ ú\hš÷ùg)*kIH„œ´Æ°\TׯÅNÅâ§®Á“¢µÅÿûe{óáïŸŶF3Ô":Òþ2×ݸ¼«‘ƒ@ ÇøÁŽ2ÖÇ9Jî‹åô/YÆðV1‚<`·®Â“­=ÁÓúî‘ÿ}Y›ÔÖf©ù’b»ËI.Ñ /PÌ ç×T<`’‰xI‰>?ÞMºŒ«`pÓ}º4¼"]¡?™g£g¡®A…‡¬]ÃõÝNÒh®„kz}v¼}FPß=b²ÈiìÇmÅXÀ8à×àQ÷ı֭`yˆ¾_€Ät–ˆÖ~Ä銔¾C…§˜ùŽo›ÝÝÍ7Ð*è§ú(qËJ­Ÿþ®Qávœj9kò›®I.ªNæ·f,±ôg‡OÊ©QO?ø£»?ßtýç«ÿ¼ ÜªÕ$Ø¡%¹(%œT9omXr\1‚8`MÑàIv›‘ÇýÃÿlnž5ŠmÞÆ—)ÎQZ’T¹+ ¿"¸n©;=txrú0·á­"ðÊèH£@¸â¡ÍH’+‚i=xH{ñ»ÞÞÕu¬?^M?šTmÕypµ×xª¯­fã‡ùo7Šþ*ux \w yLY]·MšL«¶°§â„Ó&-?Œ©<*A@žäõûÒ§©íÊÏ+ÀÜn“Wjõ7—kÙVP^‚´Î(ǰR3’èúOµ OþP{Œ¨>Ë){ÉsÕŒ¹rÅA•ž´óA¦ZÔ AÖw›§Çõû£þ“¬ëvK¼òiê;Yüà ¾–ŵp°I`á*<6ý֯Ѫ`L5¥j× i,çÕ-–’WnÄ&OƒG=å-eNin7Å+Ÿ"DŒˆBù¯*@Þ¤évb*F#ôŸ{ õùV‹Šèb$©Ø$ê‡õv ž2:0÷ <ÞÔî_xŽWO›çgþ•Ö§´U—ju¯ä‹´*±œOéÃŒ?Ù8~žFzTûtï磺÷Š›ô•ÍôÕ«ø`|¡ü¿šÏ¨ÙçèK¶âŒÀÛ _¢Ða>íðåZƒÇëkšlêƒ¨ÝæïÍî]%¸_(±mªßØ”På<-±ÚŸ÷ÓÅþêïíæŸCpÂDÖk$1²\Vo…»ÓP¾D߯5x’ë:gá>ŸÐ‚”¥#×*-¿ ¨©À8"ÇHƒ§Ä%æ|LØ[ý¢»)_gµ¹Ý£˜vÓD¬Í–!vuMÀ“\0Z ¯a£ (ôoê¢Ã£~à¢l@ÐFBéABËr.-±e+^*÷¯Ã“´Ù£Íýv6øùܶ„™öÄ$\”U9^îB÷ˆÛ-〉œþ—ªœEn¬B.ô'›ºÔå²K©{-œÜþ¬Ï¼úŽ“›v…å•  999u>£©¡(€öõ¤Ã“ÒòI?m®qV±­¿ÎûœRðÖæÆêí]g’j°#öŸ{ Â+æþ-Óÿz•’ Òº±®›Tiu·ï–Óa1}k  ã¥þÉM:<½ÌÿñåËn{3i/µµÀ"Nzk5É53//ÎvÂ*†~€ÿ×àQo(.·bmiu–Ô¤lïecíp^ŠhX.»ÐÇ;(Ø­@›ă<êS…™Ú{{“~r«Ù5a3¸…+‰IÎûbÖ]x ¥5ƒ+H¢ÁƒÑ|ÓpÎ{/¨C $È •>^á*^+pÓ€ðQ':ÿp>æ*m½Õ\¹à=H[œÔ,4qõ–áŠj Ó€s(žäB¯ÂYM 'z©.–È?–ãI–$38hVáÉ6'ï³?Ÿýtµ¾½ÛÞOvÛ¯— Iœ­/¹ˆzm®á¬¹jð”ÒqÐŒ£ªŽßjëb¤˜–劅;è@ëù#Àþùtx¼ÝÒp…b…yMÛËÑ)’e¹ˆÐqÛ°œåŠ¡¤@ý9¢ÁK“ýÃËóf®'Žë BNAºne¹„vNÈ= ‚ª†ŒPà‰KÏ^ýŸ Í¢°åFðŽÄ{1¬w›K¯,Ù«>bèÀƒÎè¾A¥OီfÃQñNºl¨rÉ/$ÂRÒ¢w¼þS±²>|mrí Æ,ãñŒöoçog5(ìÁ \®Å›¤«–K`äû-ÙªÁ_Š*ÄV Ζ žBÏ*­¹'§Zæ1¹”bÛ÷Or1z³ÝA_öNpsP’<¬trH2? ™Pþr&Ôo±È(Ü|MßSb@ááAÎ'³’¯_^¶»CÏÍ¿¶_­|Y¼0“µ›IÁU•óQ afE ¥ ˜hŒ†®çìsÍv[Múä!A*A¨D3É…bWÛy-˜á„–ó Ll9 Šþ5¥ßCÌpšGÝ¿P ~ &©Ý*Œq`àš‚ä Y®¹D«Ëuv2m~ÀlkðôpVë—ço«ÛÍnóõÍSǶCí Ì‘x0‡Ú†+Y‡µz†jðâ€9WáAÛcñ–QP#BFWDGÉr)–QÄ5TU`F7`ê5xйOºJm]1%£!H ˺âL?Zj`—ë·FËQqûOíŽ^TÓõ\}ß-íd¡¦ {;ŸEO¶2À‹«ð )Ÿ{“:)NØÇ×â“LÈ µ0rDËËËå„Ô`ÇØÒ5xHœó&k¿þºù{³¯QíÝöëë«ó·|áÝË×íýê(·: ®Þ$«FÛ·¨–£,l» ¤ ËdÑÉájÚÎZ¿®Á–çg¢!àMxsˆb€™®²ø,ՌतÜì£âŽG©u4 @n5xÔ%gÝx½EÝ»í_››7<³'Ì~tò~žHXÐS "\Ï—Y-ƒk j>ÔJíþDÐà)¦;Èó×JBŒžÁaÈÎIxY®,~Š×¸šÑŒ¸’Pá1:OX®daùÏ8Ý='' ÛÝèuWà-\„ OîtÈtV‘BÈ?õíÆì¤ýóTJ1ÚL|w6k…" Êf‡-=ó.átgHÂ’\ÐûH^áfý¼Þ=|}¯Ua®µkyS)ÂZÍÎ/,¦³6 ˆ)Ux°«WøùJ> žê®HJÙ šDL#|Â5 ÖŒ"A*hðd}Í××ûžýËüm{Úõn³^Õ_:`ªKèRôÅA ‹)uu >+@Ó&ÌÇ“œ³t ç}j+£Ì»š‚W9·‰{ É©«Ä ®r'çõóOB½˜…yD±ø,£è«hñYæ‘…OŸ^çø·ä(M'ú N'zHÎÒ]gB ¸Ý÷­JÇâ0I0˜Áè­ÍÒ)W@.Ðßj4xpѺ=^}úñô¼¹;¯¹ØÖ\€”z)ËÅÖd#ýÏýæf¿Y?onÿx5ßÛœmïêå~óuûô¼ÿÁ£';ˆ%÷Ÿ\ 4] yQyú4)ðU HaNÀs.íg9(¸ÄÊSt>ÒØ½JŸO]wêç*W5»z2,ˆ‰Ra÷$Àg¹¢o©3оŠQ …º8øÙî¿;RçM«F)¾`¹à©ë–ü*â*à硱ú©¤äA'…å¶Âb}þ9:`„V{*MGK='5iÀ>W':u¾øž]âýóv½{Úoþý²yz×Áùf÷ðr»ú)s®bå{¯Öÿû²ßLjö¢š“wÎ…Ãj½Ÿñ¿ÿø¯ïÛƒž‡ çÏ?n·Oë/;*oÖë/Û/(›§?ÿøWýûÿzûó‹‡ÂvÍÆ£>Ù5ŸŽ¯ÓU{tÁŽ[Í×5]¹õû`F1+9?†Y æàñðÑÌÚ~¹»y8ÌÈäJfŒ ÒG‘ëìxókî¸cÃ/â]oœ'áGókúÁóúæû4%Q¦ØÜ¡5kºRìÒ–²,s°ìÒŒ¡ ao¡‚xËPåôÄûXýêñáŸÍþïƒõ'‘j³ÇÊÇz³ ãZÌ·¹ã÷Œ Ë <êFŒÖó2M„¼åˈ0k@ÅÁ~ÈbFÍ0Žñ`ŇqŠf‡Ã“‚÷/÷›ýÜ#¶º}ms ½Ï£¸.ÖêvÅø„xÖùÆ|€>¤s¯Àjoòzb¦Ê=´MÚ¦;¹R,À2T‚Ù¹pæÎ 9?`™Ñà eQ_öÃÿœ _¯[b³1ZŽ0æ(‚E‡¤¾èÎâùè ø ž®Ëñmêò盉œ„™ÏàÄ”¥µ”å|ö‹ºµ/§©, Ø +ð„Å XÞëíóu ÷iùÿ˜»ºÞFväúWŒyàÊC²ø9ÁY,²ÁÁ¾ÜMÞö¡-µÇÚ‘%]}xÇYìÙ’lɶºªØlÂ×cÓÖaÕa±X,Vy VBÀxÇ)“Yp½OXƒ® z:ãìû»vÑ& >Ÿ›JDå’Ÿ¯»êpLØÀ7¢åK‡ß•çpð¨B/þøW"wÈÞGGJYå½ÅqÞd–5Õtì*XžÜcÁuYžY,¢õ(gCôn¤qœu…²Ës—1 W#ðÜ=ˆSM3IxŒ.¦ÿ Á"—ÿ!õåŒ?ÀˆÇÉ J‘éH•®à/rð°)@ôµ6Iÿ³É´éØ«jy]>¥…Òý)iœulç°gYñjt÷€‡Š0áL/x {š%>Dƒ \ï¨q <â2[[Aß °U¦¤ª¢ÁÔ”Ì8 Ÿwîþm?Ÿþˆ #¸/Íl6yh›ÅîaúÐN± dÍÚ` btXl ŽSòf51Ž_.‹‡'„b¥ß;@G“ýZWirÖèëT ÏÚÐ/^J†` vîˆã„E|¹ò{cRWØÐ9xŒj½¹õ–ºÏ5ÒHAÀ§Ù|e|ÁTѧÑ: á¶4Ž]2âúšÿÈã_µÍãë×½m¦è!¤^X8)Ž‹ÇŒb;G³Wq:qB¨ùMãd…ðzúm­¢àQ¶ùyz"¼XL¦›çUÂdæ xˆÖM\?Œƒr(B_vWÁ­cáñ£š‚×½òUš‘fsüp‡/‹­dŒZ€® q­‡¯ðv™.À'³ö©]$ë9‰ÿm×ít7êBÑqí“h$9Žsì¸Ìè e ÷²ÂÖÏÁ£ ÒzíÞœ _äŠdȤì<å `HqœpjøÊÌb:^)*û8xØ}?’ßÏxtÚ´“õ|Ý.æËƒsäz•¬RinѸ~Ý8ÃVr5²²æaÏümz™d8h×Ë$Ç?7q=zÁH/% ÀšÉÒ9äøñž`†/”‡èXLæË§v9öÜI ú%&£©©Ììn\oyTrAäÙ2iÒƒ*†Ë ?¾&9x¤ò¦ýÕ”DQG~Ò–Ú/3©ˆ– £Ê,àÐf°ŽÐÈ ë“ƒGØÈöÛ.¶—¶ÿÕòÔ¬Ø:Ý/5•*qE#â”iœTCªçsƒqôö”‡Ï±Òyï x<”(PüNb˜V½ *=‘Æzé¡€šÉ=J[Á'<-üxÄ åBL)ö¥ÉüdyðFL¿ì µí‡\ewãzžàp s9ÈA8~~.OÅ’@¢Øz™x…é9¾¥Ñȱ¸§K¸Æƒ8ÉÀê+¬gv£ØÙ=î»ù) ß.¿ÇF'>Û/> ¬#hçÙ‰~£”º§{9¥sð¨Ñc!1…{qh[ŠAõBùPnmçò“j8Ø<ªÀx5»ŸœD¸‹Ë¨“™ë—™Q©¿¡ìÇ9ö¥`Y6r ê ™ƒÇê s.³¯ïˆé:E.Á#¯»q½÷ÔåœÅGFo+(™'èr[ôá9ä¢Ýß/µThÌ8k±¨á„*óâb9€ÇúÈÄã>}ì—¦êÔI\ƒÒxnYnOf’ÒË ú¥ãY`+þÛÝj»´ÍÚMÒõBwZéäúåÉžºÄ£gÀ4.k!ã!)TØ9x´+ölª_†˜¶} à«€±2“ÎÎÙ¬dà´"%ñs 8c%+°):šuâš$À ô¢_h>Õþ‹› v¸O·qº\ •A„d`î)YNÑ O\ÒÁ¡x✠TÔ½Þk•Ò8]DnœWÚ`Æ'޳¶@8;t=G½‚ÊeàÉ)¡þNh‡|Óɶy\/¢\wB“ýB `„³Ñ“B@†è[81üm[.é8P® 8x@yÉÖkùÓs"½Ò»Z‰ãL°Vpé ­©`ž9xløu÷Æ»ó\¶jÞ ¬7‘RZ’Ò ]Ö»q½moZ°‡×~AÁÃãÍÐç+¤ì˜îü‰ÛhÀA1C߬Œê¬±=QMŸÕ®ç©1QÔáO(U÷ð¼ñ¦ÆuÛNžÚÍü~>=Ýü‡þЕcÓÂ#©ÞÝ8« Ü|ð- ®‚¦xd­à·}³ù±ß^žc=ô M¦²íÞ"ùiœºTÉót¤—â¬áݺŠuá Pv|ÎÉT!þ@ðhY²jÆn³Z¬Ͳ}l¦)?·Ýmo_¿«8ݦòÐÞkÀf‘’Œ ÜÊä­:È 5µ™xBass2Ѻ_f ¬²VFÏ8NU²FIzrfq–ÅM7=öhz”è³= ªïá­:ov¸>æl'¹E!¼óýº]y·OX£¤ÌY5™«} ÿå/Ýo žÆø) <<ì…³ôNig²½xá|1>ª¾'¬]/‡"­A­>´/¶£F¡ÆÑIš¶ëÍê~¾h··gß{×0à6 @[« Ó0Úu øˆCž¡µUˆCÆãʹb/âŽË³½´¢O¼Î2Ú´¨qþçà᯺*èN¾WÅ,‹‘¢¿ÃT¬(Â|*J€ðEBB=ûøeÅv#T9nHrO÷r kDÉÏE¶[úQ‡Ê>i_ñÿŒœ,Þ(¤ìi²D²Œ~iÈó«´ÙPñh50‡¯WègåŒ~øm'n]Ž+šê±–Ã\ƒ">EL¡jù\©›rL±†Á”¢Ðkfü—´L<™½ÛŽ©ÃMºE™£…5Úæâ%o?a(DŸ¦«B!2ž Ê• Í¿+F!M>7‡!Šk>¦©ëPˆŒ'”ë Ú'~$:jÇ%ÏIZž5q.5(Ué„¥µôÊQð *é‚ æ’‘ö¾—½8sg"¢~ò$*mJ©>™'àÑ%Øc2–x¨– Ø> r€!y.ús‘Á¸:ÛKíä-˃|*ºIlø\$qªð†ñíU²x„Öie Àô¢Ðnñ!¼Š§Ï Žu ãñãlg†rú¦ì>ñ!Ê!4AhÂDŒ¨ ¡ãÑ#‡äÏn×'Íl¶Z¾Ö=þ©N xЕ<%©F г§2„P`¢r(³­ã‚’ñ(® úÔ,æ³f¿÷÷öîaµúq¸dÛošw)o¤ÞIÁ’¡u“)yAŒRŽ4«J/ÓþtH”@âñWò|45â1î<†PÉz$aª¦ÒæEÆ3¸ Ïõ´ÑÁà¬â¹,#粚 ¤TÜ>#3Ø2ƒ‘Ä`2 W'…’ŽÇKyš¯ãžJåůo¸ý™nçéD 0çÙ+gµôhÒ+«}á§Ô¬zÆPáãq¢@åÍÃìáTU«ó &Oóæõâ¹@z¨™NHZHMÞÊ•p,ÂNv©*hžƒLù•>]4óÇ>y"+©»ØN£ø}<~(Z1¹t´ºzòð¨ÑÖýyÓkøŸ”µÒJ,©.Ž3f„µ?”¯ü2ü ô7Àö9 ¶/•õ)*~þý¡ë™â!¸ô§ãøÁ3Pz<ìZ!>í9§ ªí¬vC]A•­d=QpØÆTQ2UÂÈ'ižµÚï:Ùªq2TÄIÇW°ŸJýNˆ!™—?öwídû…ýøµGÀºœ”9i–lœ£ÁÕûrð(røN¨+@†˜¯~ê*…†Ctv)öHÚ2v1 >žó(ˆf ÈHóÉàãd(Sñu†!ĽބÇf &«»ûýöP¡u²M—s»NÜhø„° }(opÔ¹$!>­8ý*D<šy 9ºÅ¸&u‹sÀHK˜„͈ààó)£¥$Ídʤf$<®È9ã¥ìó{¡·»édýc>i—»ùnѦüÝNê§Œ1^P&‘í}ŸO«\ ÌÎè:”I~‰#à±¢äù$CøgŽóAQæâd‰³Ë 9äÈ› I“tuP†‚Ǻëì³ô›}óc;iî6Í4É?ýN'p4 ]{OÀ²Â߃qg…:1)ª€éx¤.Úœ ÷$r)pª€³3Pv@Œtò|²P§ªY´Ñ–„‡ÿ\ºßÌwÏ©Š|cJD²œ/w[îbÝN§ø%Ng€pÍgÈ0Ç›E>‰¨Ó´uÜa:W.yý(£“+°½=}õ¶ö­“h4â·H üð:½yȳÉBžZ¨C:žrB^ÖçyÀKN BãAúôÄep= c>¨“uü^Òå ðr!î¤ÆÕ¯­÷¤`Ë©ÿC„ùʧNACåo4Pð„òÊïÛ›NâxPVyeV•ãx>U¨3su\S:îFñv~©ÁÒ‰¶ªàœÑdžº-Pe+„‰ŸH€\'…?Ý@ð€P…ƒîïª ;‰ÇJ»®®¸ …×{aæÓ åî:@‰:4ÐBPûüÚüöBx‘îk߃ÉÝ~9;ƒ$ùí´£à…A=È8ó‰@ˆ®”w<¡(Ž›ïãÑLHn¯•Eˆ€áÌ'‚‹i"uÂÜñpà-Ó¥®ìß›Z…Ç(µˆþ9!Ê Þ½œïÅ—­xòBÅkiðšn(R¡©?FáÁF}Ž:ÕR*ÁDšOêL*yZ;gIxìðò€/âkl3ŸÖ1$cÔ_ïÁ–¯p“îþ)àë„„Ò…•¤à1fhI­+õ®"s,™…ÜzZ…+2€V*B€U»:«ÙÈT÷Ç“]ìóƒ–!ˆÍÄCF+i q3#ýàþ&d°ù” Î*Q‚Œ§ì±¯{)¿¹k¦“èSý|îÄ‹‡üŒóÒQ†œ(5e> ¨Ó0uŒ>5wùögB{¬)<ÔgB@üg lóWÀ嫜ŠÞ×¹û£ãá:v³&YÖm»ËÈëPxüÏJ”ˆ¯!§bœM êŒìÙkdz¬p,€%ûë_E!Ú YçÎÁ*ë% ·LðÝ~¾˜]„õ±ƒ•N8h.=„­¨«,œOaiŽª<÷)xáq_8Žã7šx/Ámó¸^´“f½^ÌiðÔëb@Æ…;dÄqªÏ,~¨g6ë (‘Ú7kgÕ_È–Á‹£eÐ}–!þu)U@WbB*4„MxŒuh·º4nxÊÿbµŸM¦›v–²š›EO£Lï4ªÎøm!<¸wÙÝ`³_ݺþþÞ^þÜÕ‰´D§  CR•`!xŒ.Õ–!wƒ¢åFmeÂÜÐf°Y¸‡ò%úèÁ>?Ðuø¢£• ; ¾d;Øk¢¿ú“N e6,Á>jnƒòQf1”KÚ»àa¶NWáR§‚‘ïï>Ãí ËÖ¡Ùaa£n‘²BàEsÒ¤È=†+Lf(³¬€Ö…HãÎü º÷%Oç2@Ü/,H‚±´5"&•öƒèââx»ÚS³ž'KÓ^ä\a7Ƒڙ¢Šòº§H"ýp–y´ðF£Ñžãi\RŽésBdŒ%àq*oשâeS$Ç'²Þz¡æW¦q‚Û¸"‹j6=¯0ÚKQ•zœñs¼”õ¤v¿T¬6ÙÇé±UÕ™˜“ŽMIå&–O‡+2ƒ[ ˆgñˆëÖ]¼±îêTv¾Ï¸sPÔè ÉÂS lWRäûƒFÀ>\<yhu—ÊrÃ4ù´gÀ25Þwpð°³7ŽÚ»r®u^‚ðíâÇIÇmUQ]t˜Užépð@¹‚פ‡¤ãøT]Ìá‡iŸJÀº<ÛÏ!¯Ñ£ƒG³³ë–Û Ù ¡Ÿœ‡¨ûlj¾‹ß« ²,³èheš÷<*” xü'9Äm ñ¼¦,þÐ/¤ÂÜë+&ç8XjT*åàqÜõxßF`›ö{³»´YÈY1x02µÃ0yÐ’ëß”eé™6én4P.¯9(jT»cá)×-ò {ØßmoiMo_±÷ÛX}+"¡¤¶¢?œÚë-Áö!ó²V“ý’‡‡}èž?6ß/¥ãé¤êg+2ÓS}y±Ìþo%¸Æ@|†íиí`¡ˆ;EvqðöÂ|ÕUPY4Û‹¢)û¸+L¦ñGñØ»Hñ“þ‚ úV¦Da‘öÆÝ8`·d/oÆW"OPìÓ÷›f»Ûì§Éš^ˆ) bòR{¨åŠã€}6+È-:LMºyyk Å0È”ëR ê€bü‡Ú,4Ëïíö—£|¹i–³›uó¼X5³«èSÒáèá,«ä„¾£X»o"úÚN™/µ4æÛ›'yû?ëY³kÿñûÍôa¾k»PÍ·/ôÿ~çÛ—xú·2~çO)èûíËoûæùv¾ú:ݤ2ß_WÓõdÓ.ÚfÛþÇö¡QÆ~óÊÄÓª•waš‚ºkàÎÃ}«gÖ ˜ŠæÞ´ñgA¿¿Mwè»`„›9›>ë«Í´ývß,¶í?¯IG‹”¡BÎõ;'#Ä_o¸é¼Û(“6Npq³[ÝL÷›Mª.)ð½ÝýûÍruTb\¿í“`¯"s«Ú|@vZîhÑÑßÇåøkú°‰ìÿÎÖdü;ÛßÝÏÛÅìö?7›Õæ¿çÛÝ¿.ç‹;øÝ¿$í§_=I~í¾ûÿj—ÿÏÞµî¸qcéßûýXØA·ÌûEàµIfÇp;`cP-U»µÖ ºØé1òXûûd{X¬’Ôݪ")QÕZt%Zª¢ŠÏ9<<yÈ2*7Ú¤ Út82J~ò]a}… äO|úýÑG‰4Õ==˽TO u–|˜.ÓQÏô¶³äåÔdXCÇ衳ä}†ÐŽho9_e`)f çú[›Òéâº×9ïÏV¯_"|‚žƒ¢oÛ]:Wߤ‹å»ù4Ÿè™-ÝÝW€ð‰÷Yb¿gàß/³ùYаÈþ¡šÑùþNç7I¿ŸçÝ3Øðëù£s’ø çÿ¾v¦?XÁäžš´Žà E`½qSwø·Ó/ÅOÎ3¾Ã€þ ÝÖ‹gÞòxµ2á‚€GáûÏzX]ÜõíwǯJ«‚#¥Ç[/_sõ1ù~8.®#9@ÆírBþ÷*–Þ„™XÐL:ÃbEfÑR&$ü’b$´ëÕv¦ÒrÂFhýÑÀ3˜¢ œ`Ý›\nïÿ£¼^BJ!Æ)sÒB(Gƒ'鎈\¹ðÍn-|ù~É7MOúÙ(a|œõëõp<6.KĦI‡£Ò¨p,TZ¸voÚr¢*IùÕ"¹*|ÛÁÕÉÊêá}¶X–ÆLÛz–¨ÿVD)›—;;׆‹d2]&éà%&ÀIîý`l£²"ýÂü`AÍÀ»>Vµˆù¥Ä£EºÚɤ?&«1„ivþ$9gZZ) ’Õ•bºž‹ÉkK(_OöJÙ”ñæÛéòEµÜ\‚~òmO7ÿs‘‡0½õ¸Wó˜uÏ¿7£t/ùù”¹È÷)oÂï¾¾›z½_³Ñ(gpaù>K7?X?¶gf™ò¥›óM²îf4»h6,¶ZWÒÉ ¿b—z5PHÑ1³c67{*Eɦ^s Q uiÅÂ<žùEÅÔÌxÝd_e›˜Ø>í/ ÿusãoshß;ˆ8§ƒ 3}7XëA ö´˜y“^šœÎ~³!~éë”ÚZ;˺Yšyñy8;M¯êô:fb¨ãÅ,&oÞŽõýüÄñ>»ÊæŒÏ»}±( ñj:þ äy›~œ%–{¾73¤Ån]z ”úæÛî«oµÆq¾>|e]N®;ëžîܽû°š³jÃê¾Î,cÛ½Žýœsèò#耗ߊyZrÉÁüúbuYî0]Àðg¸´È–…Êžd/tí 4ýÏÉí'K ÉpY°¿Z;ŸCÿÝ<}z°ÿ¯yãÏ}ÿ¯$©ôÿ¯Šq0dÌôuû÷ŸPÖöüX´‰t&ÐD;Éò}îøìä¢u€ ÃZ™\fýtµ¸å!í§Ñô£Ùj42/Ngàó’Áô»õPõå ô+“‘¹L!ÿûÅÏo“ ”¯Öƒâ;Þöú¾ªc_¡!ïü2†?0xóeŽÑÈó¿„Uûo¯¿X¯fdÅÞ'e@ˆÇ„wM”Éçdñ´—üw¤¾ˆ9’tžUʾ˜ÏKd×Ñ|&^Aµ-†=;ù+°Oàžžo4cSñ,¹ÒØbQ}"¤ äãý&m#ka—Å›èÈòÈŽé-£ÿN- VVïÒÂð·å ÝYÉ7î×Hh— !!¨vÎÄ–«é(„SSãrxu.Ó2ÅÄÞî9-¡”ØÚù ,~õ¢ÿní>LÍcèU ÿÓúØr5Çi„éõŽ ©ê VN¹ÈrጿÆïTìCá”øpÍŠ®‚˜—׆y9Ž«³ƒÃ4{kóC±Sl€8Ä㶆ê}ÝóC¢æü´ÎWlu[k˱­œµSÈß)Ðs-ëǵ²•üQåïØV ªXýΗ¢\u~}ôü[£Ô0$)d”ŸfþMÉ€^nåïГÈß)srþžNVéüxpNƒ×ÿò›¿|xÙ¦ë´é:mºN›®Ó¦ë<Òt;¼jeöS»‡aµ$·é:mºÎ‰¦ëkZKÌä½m/¿Þ±j Î[Çq}Mç¦wçv-Á¬W»(—Ç’æxݬßÍ—Û¡š^ }2¬þ»½Jh†ÄÓ…°ê"$…”ã]ùBd\š½H.¤æ Ù{Û&÷Éßj–ÏbåÄ×ä¡°ÈÖb÷VÕv’ÝЖâRî ó¾˜äë1€˜ã.ƒZ„9`‘¤Ÿ¦@”‡£Q>==ÏÎí²yÙÙ(¹Rõ¨ú¯ƒ‘c~_ l‡ê%š<Ѩ‹Ý$ˆ‘žã@¿•¾òX¿¿t(%2þd¬eü-ãoËø[Æß2þ–ñGeüþÃ0Û½q¶eü-ã?9ÆïoÕ‚°£nÐÑ]¤I î%¶Þ{PÕ;÷&5y•›Û¢ Ú-¶ì^(êÏ™,5ùúÐ)²/»1¦ŸÒáä¬LÜKÆÓyfr"' /ȼÿ c¾H®SðGàóL“‡“~fI|º\fã™ù{—œ³¦ä\Qà9g;÷ɹԻÈù^ÒŠÞÕ‚hB+'f rzr=ýVòÔcáèŠ*Œ´änéHÉåèŠBïWBy S-Go9zËÑ[ŽÞrô–£·=*G‡aX`Á‘{ÖÛ¯øj9zËÑO˜£+†×’zXµ¦Çä茘WïæèŒv‰ˆI^¶\^Z£0Þ½ ¨5¡&òࣣ&ìà}~m­•´ÀmÕä¨Yç\˜m¹‹_+û^ Џ ¢f" (G´<¥µÍ=º I'zóÆGĽÖÒáfQÚ-L›â^eÔ&²vÇo˽ZîÕr¯–{µÜ«å^±¸×z– Ý= Öîøm¹×©s¯ÒZ)Q\3·UStÔ¿ŒwÙîlbU‡¡L×ÍË!­ÄIq¯=èJ#'zŒèãâ^Ò©^òνlXp¡‰2-ZîÕr¯–{µÜ«å^-÷j¹W,îe‡WB!þcîa˜ Ôr¯–{<÷²ÖJ9çXyXµ<æùÆÐê®Âz7÷bvJa¥kWŽX±ÂÄOŠ{èµT y —ìQq/Ûj §~Z«(‡ucÜËÖÈ8R˜y cªå^-÷j¹W˽ZîÕr¯–{Åâ^vxåŠéúĘb¸Öªå^-÷:yîe­U(0v[µ GÍ9‚§)¯à^¼Ë¢ ‰ú¬=[ŽTïéúà¯ép™\¨++ÓRsíQ™®Þ{'ÙÇäÝ<û2œB¬o´Þ —´quW·quW·quW?ê¸:h¬…P¥j¬Õ„LÒÙltÓK¬CH òø´P Aw>üfÛã 9ï½<úVÀ£’`!°[³¦ËtñùŸŸæéìÚ cZBØ‘ƒMp/y¿š&ÿ=½LPòäëpy ÍO*ƒo‘p¸Y4Òº‚”(H- Žc\S ŽèÖjÁ{Áˆâ<8ëϳÔLÈCç›®À§—ÑëzV»“®–צ_[¯›?A“éUb"¬ƒ!2Z>J&ý €Ì¶flçÙl:_Ú‰÷,† FcØ&¸M§³J<Š"Æ=ð0U-2"+–×xò™¤ãl1±í/nËì¼fžßÆ\Â#‚%VÆ —*‚~³eÀT<\‚ð&”ꇊJ5‚ÚV¥tJŒk¬€!9rÅy5Â|å èÍš`Çì¼Áêê3'bª—k†8G>xt zoË)é}¦ŸºåÄ1a>¸ª÷‡…(u³Œ0ìóf4 Oã>x Ô¤ζþ¤K}@p@{Ü9@Ad úŽ Fˆ4€G†jʳ4¬±ìgcó~£þÂÈM¸äFÂB;û ¤’±@%>NÝDO”T#¸AÝxbúíçÜnœÎª%wëÛ¹ý£t‰¢Dª…rÂf”@u2àâM„¹`ìÐJ¥=ð~x÷­ˆ6¶:eÎ"9Ð%œå­×'îÛ“›…ÌIR£SW ±âú¦£SæìéR`ø'<Ú"BÍá˜J ¢ó€-p#ã¸Ô¼íÌ9–Ÿï§ç—«É`”*‡ÒBkæ 务÷#Ú0`ÔÌÁ£-¢‰Q!äÇô…?¾e&cÏ!NŒ¤ÒyÀ¯£ù¹„0[ƸrÒÓ0Ý„þýñ(}$g°KõÐx‡ ’„P7p3ƒ~L§°§áúÃMÄ ‚CÅ8ñÁ£é–sbîˆÓm ’))=Ä)ƒ‰û‘ÍØ8CØŒSâƒG³„þ<[º¼),Í”3r‰ˆ(ÁÚ-:I´ f•òF¼¾?ªå‚#ϧ£ìr8˜{Å \÷ÖÒì•‚ìÝ¿TØ!JÊcćˆ1˜œ tÙD_Á#ö›Hûýéj²ô$UFˆÄ%DÂaذB9*÷›8hAGÈ´jb´Á:ÚÖ t­kªU ˜*}T ¸_xâ_72€‡£úÿù®ÁTs)‘RÎu(‡BNc¼þ€9iBûþx‚HšÐ¾é%>àÚÏtó@UgeDÕ°‚Š•ðÀ£âìÓf¹º1“ÀHSê–¬@\íg«{¨Û”jbå.íW£é×Eÿ:§uiʵ¦¡Rcçê“VXªhaÛQ,Ò»%¤z™4¦òýñ`%–ÛGªÎYi†©rG$J‘ÐA=š {Cä{$xÒ2Á³6¿3„jd¤ñÇ£uŒ<»r“ÔÂfiß²2ì˜u#D‘t¥8˜—só(éYM¡ÝÒµ·Á±ÂàjÓšC@4‘¥bð¶˜àÆg¢Ö]ÚÛ}çél6Ÿ~ÉæÏàÏ8g·ZœV&1@iîR&Q‚#'å2åPèJÕqqkêø…jBÿDKäÎR2åÚCÿkÖ²Ÿ8…[œZqó%5s€Aäõ£uH+4mÂ((œ3/<úPR¾åð†(uw"J;”Å!èÓŠBh¦Fj›~ž/RÛ˜KrœJó:F'RN©Š·6€”7âÈ9C@I¤ÁŽ¡ã\jÎმ“Úm‰f{ÐÃö±‡ø¤G€Gî715žN†¹ôhÕ\¶Ò%[É…`ÌÝÉÉžÙ©§ØÖÈ€'tsÓæ+ϸ¹¸P~?7?´ÐÉá1Ž—8¢0¥Çü¼œ?J%šP¿‚8C!<‘(cC]4ìt²ZÃeéiœ„ج±ú7dko˜÷„/&hý„ƒ?ш+ À#œpØJ![ó´{£>(ÐåÎ1RT2åtŸ&É”Eñ:‡ô´ª‰½Ã¦.´"x‚÷×ùì-‡íT0A&UL9!tðlR4£ôÇÜľÿ<ŒÆ™Iò¥rŠz v‡G˜ÈàQÑ,Ô¢"{Ê!|VK‚@èFLNÏ">xh¨Éõ!˜ŽË“™9ÃíÖ®Åþb8˜ÍÑWÝÒøºkw‡SP«ÐÈ¥V†•9FÅÙ†9Ç£u貉À"O<.¼Kš;®Y‘j—HƒÈHºíY0¤¢Qà‡jÂÖ>ï˜Wúù)¬‘¡Q0…¸‡inÍO‹¤Õî ¡K›ùvWá\€r(8O´+¹ÄZsl-Q{›ª´GToš*n«•PŽÆ“c ð¾O’†.ÄGbIO41ë‚GÅIÒ˜®çýõ™·|2‹§BU·ÂÏ>&Ÿ‡V|‡ ì%ƒá"?Ë/éo”˜Ü9ذª50Ì{ÌdšÖ4âÚýñh/]Kia^ç‘-–‹n.þÍ»}s§ø¶„!êi'ÇjAS6ÄÐ1 .Y„`©Î­Ì çÐDf:ÂÙsŸW—Ù:sª8²÷|<üTöQæÊÕ A,›˜×À£¢ÇU…­áâ»þØem–1┩@œ8Oc€rœÅ8pXœýj°áxØjDßþxäþûl¶µB\Ûc#®ü¦˜æÈyl)‡£‡{‘ì2  $ªàQ8:‡2gùVÒe &³ p;9”V5dÔ½=è`£õÇÉqê‘HHî‡Äsîƒ Ä#ýé|úµR¬dOŒ8Bo ÅBo§¿2å‰Nûö·Ùà¬Æ‚‡ãŒ«6VºÕ¯\g½*„ˆRH¸& V*šÄ´ã€&ÔœãÏBðPÍ5rì¯å¯©ƒmÁ ¾–8O SLj¢clˆhÑØe[¶Bð„Ó€]»Ùýdùl}Žsn©‘˜º·3@¹˜­1-:¤ Mœ„‚'8Ñäþì…×Dس{!Z.YO²uËÓ·gfŽÔ‚ƒffZ*mƆCÎ…›7y(:Oÿµšg¹¤Û’|ÛCqÓ–´³Û“o{›8,z0{úÔŸåZPñ¬©vËÁq¬é~+³%ÙoÒÆ zàQQ8ÑÎYG>€fZbB¹kVÊ Bcœ4?6 hBgŒ†àQ*Úi$žr  ®°‚uαsk*”Ãqv—ïeàþ( jà,² <:ÂYTõ⣵ù¢‹˜"’(Ç!äy9¬q´ƒL6æ vzôÑ# ÃG=²,@ÈÒ!d°iE1Ó®FÇP1Nâ:Ðöý᪰‰<ÇÉà¾+·g;‰ë‰À–ȱ§$/G‚O»y(ciÔñàÂð¨Ð“"ŒØv-_šëÍKbXý1àP·–cª¨ £–*x­í¨üøÇÂáÑ(’+°ùnñ‘zñfÎÔ%Øå¹L9ºÆÉ6C0Š"€<’F9è,~Ô¥2Ïת]ë†y9†t¤ã0㸼€ <ôp(??Jë%HQsîàÆ¶\ðF¾1ê6ÿüÛ0<1Þ„æ)ðÍ[ÆwâTKA%Ñ.2åjßêåµý3Ž MØ@uè1rÿÇܵí¸q$ÙgýѾ옭¼_ †- Ì` ßv0€Ùdµšën²—dKú—ù–ù²,^šìfUDTe¥$?X"“U'"OFFFFF¶XXÓ®7𺽕ÒbN,´“>Ãe–ù¹Ê@ ?|ÏsðHÕ·VXÏf\Óu©ø‘àv9¾$%~žŽ‰"~Rœ3üûV³%ÃúP ¦9]BEÄ×3fgm#*CªÐÛ»€e‹ÉÀ9ü-+,‚r6miŠð*ÊO‹W1ôŸDº÷Ããl£ƒÌE«($Õ¢PŸÔ¼xb¶…açþ¨ûAå#UkÝÞ3¤P–>œ’°І ¯5E8EÆÃŸÏŸÝmÒõ ~¿õ5‚FYCFí-u†Ë€¶/‚ü´xÁNÁmO4kTør{wj089`éd-]’ç©l{1„*–.ã;K°Üš‚§S5Ô^vû¶šœê¶šî¯4 §MŒ]ø§vŽê5+G/.Qee<%…‹<öغÖ$2àtëèFehçùuw 2p‰mÏíú·“Ûù ÇâÍdv7_§œ˜ûåí|úž\?~3ÙTï&ï'÷óñt5žPk£EÊd¶¹£nçƒìS‰+;¡éȃ)p„ƒÇúÒ«›ƒ–êEöŒBŒÎá0K—ò\Ù7}<º3¤*‘NÀÀã„ËeTº«!M4ÁÕ*©Ûi%2Ý@<Üàˆã x<^öñ>ž曼š$j4»o–5;ê•`ùTÙæ˜6Ç_û@î¼xåˆ}†8åÄñ¡J0äí¼zWk[â¡"o[¦ö'ÈĽøA•J¹O‹ìàF'm_MÖóéøa½KTùXbõ,iÄÝ‹+^ãܵt®¤ú|U_PR ùóü.•Ò¾»?jµÕ© õ³ð¯¬µþOýîztÔ?ƒÖóÅÆ™/dú|'Àwóêv–´öZYÏ4ÛH£‹ëú¹?¿¿?Vñöe¿·€<ï±ëW[;xö«åU}bdö—cÁž4KÚº~µ·sSM¡oδùðá †c}7®ÛÇ;Pþòò~ü©(ëhl<ÿ²}l\6é4gÓoÔtxdÃÈyÖÑõ£–ï€/?V×ÕªZL+º2~ûýÕ¿.æ`ã.‚œ5­®Æ³«ë86W×z¯§³±¿r3ýµ‚yþâÃo TVßWÓ³t š€ycg"tq7ÙLo¾YÂäËëÍZ€m¿]LÓp/·ï÷óÜx7Ñ­ÏoæGõÇ}²–-£¯)N…I#¬Òºé)ûÑxVà¦!¼MíÏ·’wC÷Ãnwš‰Îwb—G½¸xÚïèwðMǹ&ËgL9oÝO:qÿæ/vENwçC’çóç?>ÿöõ__ÿüúóÑ–·³­¯2z5ZÖùò2)åòÍjùp?ºÕ&“õdzmcÞ—>ô”´™F¼çþ©~ìî¹GÝrìÛ¼þ¿‡9вZlØ´ü¡ÖãOu>ÇòœÕêøèm=÷¹¿ÔQÙ/.ö‡‹~|¸­YÕía/r øl½|9R£ùl»Ç};‚ÿ¯`×õ‰â?ʸïûo~|ýõϯ“ÙòËß>ùd;R’ËÖÁ^Ñ ÀMǼÖüþéð¾õtYû'<ÿ¯®öçÅ‹®¢wü]Wëq:¹0läÍd}0“)õýâzYÿ% ì/GŸ}6jþ:YÓ½+°†¹ëèÉÙÚÆÑ×?|?ú¦îêÑû‹Ó¾}ììÑdUvÞÔèêýhsSêÌþɰTXŸ@þn¹ºšÏ`ŒÔk‰îRV€©ÝŠøùibÉeJJY-ªtÅOò—Rþ¶¹™¯Gé‘£;qt3y[®–››ÑòHðô’£HÙ]½‚¦ËÙåÊÑ: [¤tÓåGÎüħb«FZ…¸®Ûi¡3Ô±ÌSâ `ßàá±fÀ Ýö8ÄÉy™hóéÒ«‰úÝ0w äòäþRI&÷‘²«ŸmçG×Î)D4»µn´Ê•Ò2P²E‚)¥c*Qq¤fxÒ0ðHv•õóuIõ$Œ@ô˜Nõ8+5†ÛD¡sÄžì ©J˜ž…½ŽnFë× © .O;Û§¢aåè<=qd•¾ —¨xTïúz´ G½jæÁÂB%˜xBXÑ¡º•ÞU· ÍÅ­˜(ܧÅAöѧ©Ž‡"ecÿÐV°R}Xép x-ÈD§Ré¿tÍÚéh{fg‡ÜRš2_¯sðèLljš”ˆ&£wVGí0ä­ÕqYU ³xgÜ1`¦ïÜstŸ¤Á;8Xë"ÛvRö­â:0M©×*žÌf_¯U´M ,¾‘ÈxLï äô^³! qi´ "Dë1Ø© yVKy, ùðõÁyx¼éVœçn¹˜×ãµU™m_nU«Õ¦1# R nçeßbñ¹Ì€í ¬‘Yx|V“pN{Hüɤ¬R¯„@КLìXC´9¢ ŸzÍÃãE®ÅÅY£ªÍ¥{Ø,vñfÝNª¼#¿ ehcå’}ûy™Ô×ÖÛ! ¾¸M[BRhŒ–6]¿¦r- »Ð’T‹ÍÁ#UæuãY"»–°–’0l™í„î[Š?3p}‰þfà ¹ú›ìÄSvV ‹“ˆ ‡vÚ‰Ì>~¶2Q`þæàaÛu‚ktÎ#Ú*Ù`w0Ñ8ç-ÆÛ³¹VwùxKG¯d,@m†°甈øÂ©4¬Ò:µc'Ü É]pS`?œ…'ö/éü¨Ám¦sµ˜Ý/ë”äZ{È~±Ü1WÔG­bäwá*t‰ž˜gÀœ'ÄYN•k¢®"¨ÓYúWˆëGTÚ` ¸z<Δ™ë’ÏiImHuŒ”uhö´3¹b2°˜ÚðYxB>^4xS Ÿ×šE¼ë˜Š¢1ëv"ª2A?†3$:®DLOi°””Š,<ÜÙëpàè\fÓî˜\ê;¤ãÒºZʈ-¬ "T IJLcЙxb–KŒ¶—mÖÕOÆ“éïÉL+Di1xâK×íœö õN„¤÷BèmŸmd?õè¿©uêÚu*að¦šv ‘!µ6GõÚŒå€>ÁÃcU¾a¾/²°S^:ãPß°„h/•¸´Xiåºø™ÙÆ{^šÒeh)šš‘ <†»6}\¢­hmî’o׬2Þ À(I ]KáV¾èÆeZgüð<ààa×&n¹öó4kp}¹ÕéÓë> ¢LpÛ´’.`ࣗ†kÊ’˜!IÔ2!ÝþžhÓ¼îdÁ€V™î‰¾š¬ªT&è6I¸7äðƒFBJ‘!È0üÝF<<Æfa%Ý,×›õeýW½¿µ\ãl¤BväÃ)}¡àƒ7Eø …µH¡ÒnwÓê-X§õÃÕzºšß×¥žè7·ZOA®[oV"Ò‚…¸;;È2IQ†Zú@²±#•h›ß;óTÁ§„ïAàêþ–‚³¬³‘"ˆÑexàB0¹ST <äédP”(¡!9Úpvg‚BŠ$²Œ/êŒ2–ÑrJgœWMôSëŒÇ95>Rë˜a>¡ íÁª,Ãß…º}ƒ!OÁcmÿ£DS¼ûe­s<ä邎™sžϼ[Ȳ•Y«¸‚§àaWeëü]uu³\þ~¢r<$ê¥4ž´óBE–vàݹâ•“M­ W¼¶>PðÈŽ\™L§Ë‡ÓRV´aZ«—¦_ŽÔóZó¸’ x®PE3eÖºàÆEMÂs{§ZN%äÇ««Ét NÀïkeãñSŸ|PBhÏ;•ËQ¥BîÁªPÍa¿¼ü ãÉtZïXݧgœÆÈðÄcªAA˜HeÌ€BtçUÌ ÊpˆŽ'ô¬àÒYï»­´§H¡$ƒ<¹Ñ÷`1Á¢°A—Éÿ ã±Yƒ(Á'eS!X‘&d¸=xAÈáEZS¢óü³Ä'÷#šÅƒ¬Qii q¿(:Ý«LÆ×½çÉÈB=¯ð<Êä÷E}ãqÕæL¢‘Q‡|Îu–À ‚/ަK¨x²ÜÝÓIíxà5¹žŒ.ô¸Ä§'øœ‰ÂPr;Á/Á™ „r$u“åâ †ºÑ0,xñVIBh0Æ~·ltÝ™#T± ïbŽ(è~ éû5o: ÒÍi4šŠ]¯ÐÈ~:%H¥Àg$€>Š<Ñúø}ÉóÐ~Ї£Œ{KÇãú.{ê¥r8ø]íÔÐÒ+ô%´s’}øxàÓsð®@R+Okî_§£ˆ»’ MºDf#†«ÒhºŽNéú}/PÉÈ_:ìX"k„ƒ'ÆrfÙ¹1VÂÖaA2hgøl Ë\v[À °ð„—žÖl9öçÑcbº—Õ£‡ŸRîàÊ zŸ ¶VBž,<>O€ƒ¡Md?Κ­ÃŽÍZ) ‡];/_9PC¡ÎÁuÖ‹wÊ{Ù¢LdÙúx˜BÃr6+¶yJ!fd-½.QwŠƒG©â6Ùauà)°aúTjÖ¸â\Ȉ>Ø8eáqÙªR5)´áó¤Yƒlžzµ0]Ñ@;%\–›õò8øn«ûç24‰Û4{ †EÁíb™N ?Ê_ãö9/áeŸ„>ÝZ›hð#!%Ÿ8ëŠ0#¬L¥˜p¡¬ðEè`ø‹„5®íš™yzââ™’ñ)”ŒPÇ>gAZåì~ª0% 2¥÷xk<O×ËÏœ?§ñ—ÝàP‚8pF,aAlƒé}â½+öŒ¢Šë Y§U°„õkö*wG Õÿ–Õ«ù´õaU8ë¼",{èÅzŸ“DàiZм%òõ8xø…áZòŸNtžZÔ ([¼ÐQVuÎÇ'Œ¨(3Ò‚,X,3=y©ðóª[ÜYs»hæ=âDÑ0ߨ^†^Ç‹ºâÎIXjЍºŒEñà±* #û9zhå\Œ„µ·¾ëa£A#Þ£)ø]ßÃGps‹YïmîèàÉP;±'··uH”à! KOÞÈ ;#q‚TQ$ ¢ÌÄ”$'H=(q@éËwãêͪZ¯Ç›åã] ãÙ¢vÿ››Áx)!… Â ,âÈ“RÖHJ$%˜2ûâÁ)5û6Ì~Ý1¹Ÿ/™$ìŠF™â¾A\>5 ‘LQY§‹ê(˄ޣVÎPð(Q€L»åèx³š\_ϧu7àñÛhAŠ Ú È£vì9)äSé{‚¸6vØ“ûËmûžXBXʆ{l»à‡µ'¶Û½†¦éÀp8ŽW,aЮæ3ð/šá¸¡ÀE¶G¢Aöøw¾›Ó&·Õ*Ý’’×—ê|vÛµºx™ú^F…|¬Û™¶TÁYâ*Ÿ$qð<'+äGÍ׈f- 9¥ÉÚr&ÛAã|§â‡vÃßÍÊÃc\^›±½>zµÍr=Ñêt¹ª–é8÷]­S‹è&"R³»n'•ý¨ÉÛ:Ÿ$.`wY«œ9Æg±% ¬¿…GnÜHí|ô&ëiôAÑ1ü-<<¹ªìáJD†¾ª7 JPhgÛNÍv?w:0èáÓjxxØYÐT¿ë‰Ù¬õèÚõ¨Sí6¥=6wA;Ù†»—¿8,n¥ôðýÏÁ£Õ@ÞáÓifMø9¸Ójœ.®\¥a8®ý¯ZÇѱ´¡.5ˆÉ$Ó… yýÄ\~ C†–¨\FžpðÄA|Å6mb£ÎŠ ƒŒ(Ëatq /17Ëé2Y`ÉÂ÷C»öŒI÷Ee´ÐβO¥L´Î°”Ú¥”iÝœR–Ÿ¶5³( °-*§ùÍ`78à}"ž8€~–~óDŸˆŸII¡Ð-nh'ÛŠBhÀÿÃ.­°šö˜Ÿçî†×qòb(àŠqðD•/Ä·7;Mw:h$TcÔÂ*‹­>¡Rz€qß—ª üÎàOÆ«iNã%múls[¬Hù€N(ӟܶK9ÀÙâ|½xÌÃm¯«ÃÅ“1_ðÕb3ŸÖyÜÊôíÊL¯)¥X·K·e#A6Óñ›–ý–|dààaß[Ò¬ÏÍÍd±¬ l«25¢L›’¢ ÞÀÊ!£EÈÃdøP‚ <Ñä¾¹¬.‹[«™W í…¥ŠÞƒHíDÈ—Xž‡Ë ðR«áéÀÁÃ>À~¶ Öé)o}y'RXÙcøüÿ“÷µÍçF~_Eµ/®ìÔhÏ@+µUqö|±“õk×¾7±+Å!9³”(“Ôü±òòÉ®¤$êÿFó@“Jù웡0âÝF·§œíÊíʪеx¡}—T!]/šwzïa½#0b„ä)æl'¾þ­AT¾æµn„x‚ª±±ïÐz:§º\Mfϥ燥g½ö֫ẋ»qVÜž®)°]Lí]‚§b ŸY-jÈÁN-„¨O“Ÿ¶çÔØÚ£¨ZŽÖ›ú\‚'¨‘™œ¢z¨úóøÑcñ¨œ4s¾#ÌâïÙ~΢e\hçR²à"g›º\¨¦C_ êØ<©ž…?»Ù¡fD1ï<§·pœwc߃5æ°`*1t`ƒOÒ5J¯¾&Ì-<Ë2d|fj¶ 6 dÛ3ªšÃ` äæMê…xB=%ðiŽ×óËÉö¸$™Ðú&Š­þ•Ç%Húô&µU  uŠV³˜AÙÆ¿˜F›> ŽqŽƒ×Ú…¸ 5ŽSC{hŠˆŒ²Ì­ù~†©ƒž'<Ú3-,îq×»öÉ”àv~³e%Ž  ÇÞÞXnÑ’m´ñ‹H[t]€ÞkÖ <ÆV+¯]fGe2ntÄCÈê”8-ñ7…zÞEî–Ï )ÕáòO‚G·Ñ»Ÿ>ÈõXˆÅ1q5Üjh(Íy38.™›¶%·`¡ÃA!ÁkÁ>"¿Ä¸×É%«ð\ãìAjø¡‰r¨DeÁLŒQí™ Â“ê*‰Ùâr¾Ù^/*ëÍ|{Tï2Á¸„dF‡ÊsG_¢ÍO·ª‘¹/4/'*ÄSëip¹jMŒÓ (oä¯ál^ 2B•UC%—O! ÜÆÕ#„OŬ¡’‹VÇDÞ€ü­ØÌO**Õ–ËåèQòˆ Áëh†c×2¬l}uý^QD†8îÇéÊþÅ(RË 7¯,#ÅuõÃQFF†ùe½fBày¾ŽbhFeÁ\Ú2<} ˆc8ÉKX{:&ÌŒ4UÆñu•Å© —`ö¡=+$x‚©«%¶“KÞ*KŒ–´å€µUÖ]‚'Ti½(P¡¡ž0‹ûìU=ÜE8`»°ÁP=ÄT‚'í¦x´ÌhlÉœÚTñÄ7R|ñËZK~œ,³ µùøiþñjµúa§ïÖ»æAb|P¢›—v2c(;xt;o˜иŠÇžIØÚA‚Ç×{zºoáuTŠš‘b6عç34.Ê ÷¡oñ ’:¸Ý-ïf÷e]t8ZÖ½÷*8êVÁÀÀq^×»£¹üÔ¡C@M‚'ÖKç½YÍŽë!&RUhZDŽ8.¥ó—pÉ•ö(cc½Z.É@ÚUqF¨:2îP@®­ã±zj¬P¤cÄ+€:ô$x*–YÑ+ý£2d‚¢Qôr‚² f§†‚5(šT=¬ÐÁ¾à‘Çk^}´wDx‡ÞŒÿ=õ‡æÃÎ8NÇzA›q,-ÇlLƒN‚ÇUyYü´ö‰Œ n¢MÀÄÄÝRÓ8k¼(­BNêÐ!('“ºmq&*“\rÆ2uÐv㬸*xzJ Æq žTqG“¨˜=‘èu³ñ†»Jô6ÜuÛÉ¡jo;ìdéaM{”×^\™²Y\Ll h›LäàQ;äàêmÙþ  iíÛ¯$~Ú]Ry!¯,•Ã¥óƒD7ï©sÝ Û»qâÙXbÉàÙæ¦“ ¸‡o¸#.VSf‘0žt%"—Nû Œ O´5‹êþïùôh¾‡gö.¢Ö‘ãŽ3¶ÁMõàEß~‘%xR…Ä$ÎÜø@W¼‹é&ËYzt¶©î‡Ý+m\ÍRº£*@;ls ž# ê<«;ý²¿˜çö$´eR-ó8#®Dßš¯åØmû’¹2<ÖöÝûƒè“¡.&1ùò¸àíÈ:£Y+@U‡•—àÑõ–7Ó«ùlWQôˆîdvP)Ød9ÜT,%õÝý,c‹±ƒrN~ q!µW“¹XirýX=X¦Q[JŸÅÿqÌì¬KÔÀ®ZEí± /Ç ª‡¦àz—ÖÙ|Æ?__<Èëâ>óë™ö}ø|=ŸÌ²Ñ˜gó£0Ü¢i7N¥X#m±+ÓËg§Û?™“á ñ‹Ó+fXÚTmÐEÍTÌã¢üê¼Ï~Ì!ªöŒáß­®fóC=0Û‰2œ´ÖŠs‡p\’'ð¼9}³ ”…OÔª³¼ØUÛ¨ä"d¸Ü½¡ò‚A»ÀÚT8Î%©Õ!e²Œ7ÖZ‚Ǫ:ogYÏ-0ƪqZ.4:Jâìo@Ðòé˜öµÜvߢR¾¼þF}ñºañô&¨ïçWÓ8]©&ÇhŠK@·/å/Ãôø°Ôs>úôÙMTaÐÐ,R<½ä]·û³W0èà{ŠðÄj…GÛcYÖ~XÖ‘ª‘ûÈFÝiœ¸HGU’KÚ÷<îDõ°‘0$ÀW:h2Û‹òÆ’3L­©Ý8«•vìCgÁÜzdHð8ÿ…©&O 9ð)ÄÀmçàÄGøM6€xûÖíB<Þ75>Š™ÄÊì½”ôÊr6t´—Ò¦WB­¹2ô ~eƒûX€ÇA•ÜuäÌ$%€‹=CÍÍ‹Æ)hjÉŽÙ ’iô8†$xœ«v =WÕƒ"ev P.­Í% พ_G˜ñ–Œ/Ÿ—×#ÿi–ÁÔ»«Œ”ŽúåqÆÕ+h^o Ȧо§­ Ô9„D⌌8!K58ø´¸pí[2½|^ô÷ö4àÑnly‘“Ÿ#gÎk®—Ënœ>ÑøèÔú.+\ŒGü4±ÞkÞ À*©ÎõN5\áš@îGË«H¸û*Ç‹Hì~;ÚÙ®EìÂ3O)©¶¸‡Æýº]Ý}<¼Ý4zx¥´…–Pàd¤]ñÅ\ûC°>(¯Û/±O}X¼"¯ÄÊ˵áñ9•ÆjÓðQÒ|wŽ+õLqø½âHCŠCCŠà¸þy\HRcûÔZŽ)œîå*5ì%•$mý&Á«¼A^®îfçûBA÷ëñ”Ì5–ûÇxç …Ð4n¨ƒNq‘=ZÄ–Œ©‡-¨ö ,Âj,ðýÒh-ªØ>,5zG®(%ŽSªÊSä 4€nß@†Ç‹J®ç3´C“åf=ÿÛÝ|³})ÍÇ1¯'Ã?¿œÞ溪žÔ”V„|ƒÙ]œÍ›ÉÇ%jžéävòq±\lóÍÅÙ7ô«¾yø¶cRð6ñvI¡ýë=žôåqoòÓ&¯Žf¹WVà³vì $x’­ö¶é…$_¡€æœ .‚¦ÔD»‹QÜ °Ëçl‡—«QaHx†¹ ¢çš-\Ï%×ÀU{—T‡¼ì.œ¤Kò¾$(|‡¼/ žP¯—åïMn´ää;èu`ü¿èõ¡LÜÁJ/EÁUxÔ2f÷À&Ûaí%x\÷%;9qéFÀi ‘à `M½\Õ8+€`”+¢´WDn0‘,‘ói p«ã‚1U²+«¬¾wûúFB<06­R.…_Öã/ærcó)…ÀȼõÀõÊ)áþ$VC€º3{•·p8ŽžšOu»žL¯7ósT"”TÉeãðÓá¸à±ýs¤»ÏÊ0ÎñPl¾SžyêvS2<“Q7&’‡Nœ*ÅߘSû\ ã|=V%xV½2—±¼¢~#ìÛ‚,›>¼*ÅcÕÛòê0¸Àr«x^úm¸ul>½øeÍÆ/ëÞ”_»ìXã"Ϭ ¬*™‘{›³ðåLFsªtÆ>ôáTô|®[Æmß”S?nn¯æû$ñ¼ÂÿÆ’YÅø&¼z}6c¹å”1lT—fRnã·=¯ó?ÉË,¹œŽÁLË©·9Lg4» àÉR0mÝç4tN¥à ð˜·= oW?Í×?fŸÊóþ·óÆ›’YÙ·9_ŸÍhn•ÎÚõ9]pΖàñæM¹us·Ü,~Ϋ¡ynQES_0«ð6Üz}6£¹*˜’YÇNÜ*Å#¾‹5SúYÐ3‘RJ‘6ÂôÆ u¹-Q>©w𲸵ҭ8õ8™xÄu”Žõá¨0DUï‡qwýdžL•ä})%S‡ü H5zÑìóÀÇ®WÇâ—}w(>ùqï•‹ÊÍ”'¥qB¨Ô&útzJǽ«„x’¼lÍNjƒý`©Ýù°Ìè <‡†«Y—·›©ÉPød:,¸Ü‘å…¹:ß®iåÑ’šdù¹aùé]‡åh¼8NÞ«¸A%Û›e2<©ÊÕ ,õØŸÉ[3,m£BôÎÅÌÇ…Ô@H,ÀM Ácë=²Ú qó~ÿ‡çi?Æ{FŽ.éè “Ž‘Ç)¨²î]Y]>;í:<5ì–uõ–9n­R ¼Žl§ê¥¶&··÷í—_‚G^뎕ã|; ÊÀˆ’ÊÇT‰v¹­­UÚÆTan1lPªÃ1!Á£Ã›Ö¸·ÌYlÝ@Ï›Ïã<Øêêa­Ë¡“:ðC€ÇúÚâj>Yn¯¦Wóé"Ã"u´.ˆìöÍWßÔ¢(§¸`RÐ>ÓC†Çº~d£8ÌiíñÐóN[ÃÌ Çy[]Tá¾d ©M$xÄæ‘η“¥]ϯç³Å„>Ú¼_Ü|ZO6èãM·wëùû=ÅÞÿL&ï…›áºFGÖ÷ô&Mõk3ŠóåsÝÁA¡ïÁÿš"-W?mЉ¸žd­0G|üÒ:q@é¼rkBêò9€î•àé¢(eÉ\ˆ"m­5ÜëŽ<Έ“pjW4tºŠð¤±o¡Ç%«e ½öx´.Ž~ Ý­W'¼š¶ê¾lÀñGÓB© ãŠñˆ;à=¯R‡?¤º…娀»'M‚ÅÓ—sûöމZ,Çtmƒß¢J®IÕB(œH³( B1ÀqÐqœWÒj@[^c:ø°"<±NqÅ12—¤@ÊÊp-Äh\«…9ª0V:lþržé9­2 ÁS>j2ÆÚác+ àëSÍ_ìhš;2ßK†Ç›æê-ÿëÅMž_Mnf»¶g&0œ Ö›Ö&—˜YPÇSÕê+Vás9ö¡„›zŒ ï‰˜2,{ÜRÝð¨:×wŒ±õª,=#Kªy„™ÇYo›ë†SÙ,˜¤Œ(ÇãFçßåÏ ªX†Æ+@¯•q·ò¸d¥wb ,ÀÚœ¬Ð|GXпЬÈ-Ò)Ð8»[^7¼´ú†û]“%mSÛïÚBª‘Îj…}S·C=!ž:jgWœ Š1 ‹Ñ¢ªÄ匑MÙu¹êõÈ*:ìyPP'R!&0Ò$sÙ:–»8ÎD¨²õ«paC°`yØÞ@Eð„±Š¿ÔËÃ2t.PIq˜iœquBõˆ+A:Øž<ªE*Ž ôÈç$Ù¨ÉB›S4#K>ŒT µ¸\Žl‡h}Obo­öxàô¤ò'5‡_9I½×ÁYðœã|ôÕ‚ (+˜IÐl ª…&N‘¬f$K}Q£QìLÛ–ËGdHßcùé>2:£ ð„ÐÈxr÷æÞìï9u¨~½HD¾Jfâ;)%xä5?÷ÏSê@|÷9­Ù¬Š†‘õÙ6q!´@¡€ØèðQWøø»œŠ ^ŽÇ«Xëä?”wXFG­Ý•á(IY›ÊŒQéu()Áë:„{DxbÕ ß+~RdîñеÉèÄYÆ4nÜjŸÌGB;¬¯OµwðùüçíùÁcƒ,AæÐK]ë,goP‡_7’w#%hÁµ_ož¨L‹ÞkBd®ÞDôd‚æ.c©©õµ·õI$ ŽÔ¸¯ðî»XÌyx®Xm§Ÿ€"MªEÜî¾ @[Ó!`+ÁSåÕ¡-Ä\}ZZGÍyº@=°S…‡ÛÕH+:ì~žXy÷? o ˜á=5 Î3mó8#>Þ[qU»ý9/Âc•k¡ÛÈr Œ!Ö²¸!EùcûÊT-ÇšÚ¿f•᱕l»Â+á3P  ò62{i9"¢ó'еwR99å™AfŸdš?Ê` õ¾5‡øo÷%Öé]8 áŰûæÁ“»íj3ì$fãa¬Õc1›ÍoŽÂN襅ØíÏI!žP­Å^rܼ¿ÿ3þ†çÅ„ìp‰DGïz}`Š÷äqQœnÙX  CBHð„:•OãlCÅļaiŒãbªW«2³HíÛÉð˜êE8‹¥ ŒTókÆÀ¸a—¾§ë(‡ Œ ¬k žhjê…’|2Îh±´ñ:pvb.:jÌ«ÅdÉ,Ú?ááÑJ‰¤¿4¼¯ ìpm ‚wPHhçÔTUÈ[ŽÞÛ&‚Ó"«YpqXp=ýh½Ö МCèÆÑGòSÔ¶ïÊ"Ä+&¸prÔŒœÅÿpN!޳Ê6 Ÿ1S´}’» OLmã(OäÈXÌÞ¡š"“•›Çik*¦³Œ$ªwû&\2<Ð`ý9qFœD8§•ÆÕJu¯B_nh À|}<»˜8*VÆNä·Ze€™%å¥XŸ#Ù,€?àv×c…T°÷8ù1—Ê á1ÅÙ§”¢ç¡þò×bqù4‚î`Jð_Ñ(,”gbŒlTm6€b½ÕHµíjܳ¤±¯5Ü> qòšœD™X{„¤q˳V.Ó¾¢µX‹Áü¾ÃÁ ÁLíLÊ‹{6¬ç“Ù® EbÌðcâs©€#ÊrŒ¤°d6µ§€ ”B$_O´Ç­f³Epœ²©v‚å)ÜŽB…y£:<àØþ…Dþ4TRxÀ5(bp¾k½r˜k~>¹¼\Ï/òΓ–©vÑk¥˜(s¡í@ g . ]%®o_Õ$OPÉ'(À#Îö*‹‰d±%Fly«kÍÂÄ­îUƒ*5˜šæT0üb2–w¿'éÐg÷g9úa9Ñ ÑÌUU§¼mÆ+¡,ÁŒÑ2mGwãBh¿Üø=†::…<`{Üðe)#Et2ÉŠãô¥¡bâÐgã‘´·Ö6»´{uþqB’D‘f)2²ÅÓ à8Kã´ïqWÂYD½uìaEã\³ž¾Wß©<š_ê2Ðáj‘*ì9mpÇ(Uâó0º2“¨c‡Ó@‚'™öͽ² Û™J?‡d·¯h\ mP—ÇŽÞ%Yå ;›¤ÚW´Û}Oô*Ú¾¼? +¬ñ µ<¸`Gw†m.˜.+’³¶øiç^6WóÉr{5½šOѰÛ)’£Ð`f)ô¯Çê)Édz\ˆðÄjõu$Ç*s4(ÏÆÀsmw©sØ—èÅ3}P)àxý¬‹ÝaÑÇk°ÈPØ®Š«WÌ„ö=þl=¹œÿˆ&>~v½¸Ì¤Ü¼¿§ç‹·Þš äëAîþÇéકª¸Û30ºCÌS‚G\´Ÿ·û8iúzÒºöt‡õŽj¢fJedÖ.at?S×…Åx¢kÀ²¥£Fcj3üdÕ«—µá¨½÷&kkµmT™âe’·œÖ‡—¶,n4Å]½:%mN°ò¹ÄƒøK¹âø’2ºC(…¾Íc6é3ãÖM¬è#àÈ Ô7©ÄØÁ%ˆªQ96U9î'„Ë+ú=Ý%(|‡[ žÑ~³›Á¶õ†1% v<›9 €Û&51†OÞ4ÅÐéie‡eàI¡¹þyz5ÃÕîuʸ0Ï㌲#ïøF‘V†Õ7_yžZÜ8zF„’N.N¯Ù¬ôíÛñÄj]ë8-z~À ú·;Ù°l5=„ÓšéÀ—Ç –#”$$³rh_;[†G׳DŸHñð/Y†‘!P[®sgÅïùzSX0—öo9…x¤y¢7ó-¹]-ÓÏÇD;Y.W?ãÿžÏ/ÑMÍR®‰vµp5wUžÇyª£X,ÁܾާOªv0<—a]dDà]P^sPœ8 ´I¨£ë°à<âô_ñß‘ïoJ³(õ°(½÷1èÀ¤£åqAü"ä‹Uˆð@{Pr—BÃoã RHÞZ&…"‹P¯¡í)K5üÊ£Oá>ú4~¢_§¶ ÌËÓÝ8qbtëÅ.‡n¡Ç¾)Ç#Î>A”÷¯P`ø: ¬RŠSûÔ5Oœ!ý¥=4§OT]°^Lw¦ˆãDiÕÆ '8pô[„þÇ}»œùììþVéåå⃴\|šO?O—ó}Mt:3cäÎLÔ*Ïî5z¸¨ºP¶XÒ较'ÂÓ丟Í?Mî–xŒÎo>g 2“àh›"wtâ¸~(U¸z> ®Z]fÎ2çU@AkçÛb87b Ί-@œô Wµ‘¿!“¡€sÂ㕚Çc´yCv­¹¿%¾8¼„‘’å°4®±^?¿LÀ?Pôd¯åm=ð®ChT‚ÇÛz]Ñv:f߃ó!óbó~ºZÏWôÿ®Iš–‰6D@‡ž #ÁÈK²Ñ¼-U‡SR„§R%|V†šYñ\„ÕxËñÇ ¦;IÛŸÕá«}êpO&Á¦vOÄ}ÌyÏ…©2–s¢ú>Ê8îz'áS¥žG£‰,:8z<`ªÝ‘=—㳿^6h†)¥=Ûzóà«Ý¼Ñ  Ã !ÀÅ]ÏŽÇ%tPà=:öÁìÎã¼Qµ»eÖQp¢Y„öáôÝ÷D™Î©»q¦Þë“Å ‚^.o—“!YrŒðÆEo˜†_yÜàݯ0ž^“Óå3pî6ñ/¢d(Ú·wÎßãÑÝcÊ¡ìqëjœ|XáÇŒ©½ox<Ÿ<¿'‚÷Z©àœg2¹ó8yÑîÆûK€½Cî ¸ÜÍäz¾¹<­Ž0¶–‘Ÿ×Þë¨Y¼^;WoíÛð¸|.^òvJ4•…‰éuÐÍ<ª¾Ý,®NŒQBITÊY¹iÀP'Ïc±ßK€·Ç%Á}MÅ´Éâbö®Á ÚÎÆÃqÁêªï³j²T2ƒg…Å*È©½ ²ƒ:È@2<ÓH8Ójn–²ÅÀ‹ žäësËë]é‹óÇKÒÒQ¬âv5j'¹œØ¯øfr}»KÈõÃíÕe({èBüi1 ðˆ³ Üôýbï%wTËx§y ¯‹‡èTÏh«ÀWrßÁ\—àwæ4³øPœ³üÑ€DðD§TÒž™£Çÿ±^È©—ø­öŒà±Uì¤ûØÌ^”‡±ï{Ã+íñž3îqœ‹¶¦ûV‹¿‚xÓÁÀï Úi6°Cx¬¯®.׫»ÛiFFšO5èÌhÁTÚ'± ðà8U¯AëQÑ*bËÊ6PJ^à猰é«,Sìø~;Y“ëxO=œ³ýÓÐåÔþÿõîls;ŸžM¯&7—óÍ»ýê¼;›ÜÌÎn'Ÿ—«ÉìúèMR‰Gvç=úÌíùf±Fô÷h3¼ÞÅæìGýþÏ·3tŽùÍzzµØÎ§Û»õüâ+ýï»sñ•{oÔ{Ÿüþzr‰?ûÛÝä3ZÁ¦d'm?¬¦·çëùr>ÙÌÿËæjb|¸ ™kôG˜¢{¬Íljý˜ì§¹›Q}婚|òsüP 7*ü£šÌ]Rg2‹¾ë_Ð ›_|š,7ó•N´š©K±Žª$ñ_Ïv8£OI&sœàòl»B ¶^S©3¤Àå|ûŸÏnVûEÄõ·;ì1dô†<ì xh¼fZäÍ“p3@¯Ö«›Åßv4þžÍןóåìýo×ëÕúÛÅfû«›Åò×û_ÿ­>ýÓ]œíûüé/ÿm~CKN«iQ´“Å’ùWÿiϾ=òoüõ¯ÔÏSÔ3³Õ¯ßeõx|wö§Õv²¼ Ýöîì›íFÜêÝÙwkœ.–ôb»¾›#SHÕãà¼~TúÝdsuñÕùôöî·¿½4þF} ý”w“ëYpøé·“ÍöëUΡ¿Ø.®çïÿþJ¿;Ëùï“›»Éúó»3ü@ü_þáŸÿô âþ þFD€ÿnGÎ/áþîÛ‹¯®¶ÛÛÍҨq¦ï‘RW“-‚p©'Ûɇï~÷ýoΉe]z‹ÿî¤àÍ|¹¹øŸÝl×(óL€äé,³{©ÓÒþq·Â;\9¾}þé/ßoç·_í?£Ïgø5@N'.࿚ÿ¼½ @€1èŸZ"ÉAþ‡ôËéW‘ pJÇ™R¿þ‚øfr;ùˆ»{»˜oˆôðñç埡h¿Ù„{I¾ü÷¿üöfòq9Ÿþé|ü_~þå«ÿz·X/¿úÝòÕrNüç‡r”ßä¸ }–ë»ù%jÆõçüÁî%ýñ»'¿ùãïéoÿ¶?¿½¿ûÃþìUèFõ¹½]¢WH}F¹ü›ÕO7§Máûß3¹Ý\­¶ù¯ËÕÝ웇ëš»Ÿ VÀ)¢>9ýß#W.¯¶¯ˆâ_W³ùŸîÈVãsìgÿNäýêû]M@úãc»«"qþƒ¶Ùír1]l—ŸéÁÈt§úŽ`$ÏÃé°[íã1L¶ÁõÏ~´šÑ˜ÿŒB¥ÃøjAµ?ŸMïÁ¯óö<›í¬­¯ÿ¿9sΖ¨ƒ¾þ§eú»`²&À)= øövô£šÊç9}ÉþÔþ×Õóëóõ»3óî,¾;sx´ÛÇC=k¾_?O4Þà¯Jh$üUúåïzÛµx®ÛŸŸ_ÇX­)´a œ8Hn}piÒ_ÏþñÍÕ8Ÿ&7,{¯ƒý¿ÿçÈ]mTÊk»‚ÀqV\p©elAÜ™A q¢Ëíj6[lÖw·$½w3ôsÊrE>n¦ëÅípªþ€ûAYÆÆ¹ÃÌžÒ9µU(î`Xę̂ß_?à1iÌß|Æ?__±;×Ë'“¬êÀñUö)Â]-¯Ù9Os•œqÍѬoëÖR¬JsÉD ƒþ(ÇJ«†WYxLøÎ#=½Mó†=é×AGˆi\>Ô#¼ Á#®˜wŠ@÷mŸ •ñÔƒKÊú4Ôj|?.¦^J”P¸lRªCXA‚Gû*öÃQýyþH‘ùl±ÍâdâuÚ°'ËÐqÜ@¼®¢F8‰À‚Iô>Hð$ÕA+¼(gÏùö8À:<çKã8ãë\UÕdµ¾íádDªuáâPŸ²<ÖŽ½ÈÙvžûùíZÞ»Àß¡—Æ„óbn›7X5ú~œM=œ 9ƒËgà\B‚ÇW÷ 6ÞØCóG"ç¬'4Zq‡DŽ·4Îè±·x+ܸ#íý÷è‚öxR­^ {9òêtŸöRÊL˜/:¹Þ±V/޳¦ºÑp*— ƒé@ žÔ#uáÐ;ú²€Ïœ’ô<*8nïå¤A¨bhÎuɬ´§¼vÿ~Ñ‹‚8}ä¨+õ° _´à IÛ#µa,ÕË'dt'T‚G\O鵫ÇR®ÃÕ{‡ÝyMéyt=¨†«yœ=•ué,ÂìLóÕ—á±R•0{xÒ=>´¹oN1ªÆ%5.¸ê±¸9¨öþ¦ øR뵈Þ}ºÿô¼´Â÷°7¯)gÏçÔGf6”ïa¥¼èÈoÉrQ¨”ó›ªÝ­µåhùœ@wˆðJðˆ£=ÓõꆺXŸ^ç‡ žGèJ;>Žó±’³/d±b°Ð~Å%x|¥35¹Û^íŠ{e‘ ºÇæ½RTk5Úá‹ßß¾X§ ©¸)~\ÌÊò Ãòr(ÅŽµêhÜënß×ôèxBMÛïKo¾L1ÿáîãü|3½šÏîvé,¡¨ÑvˆvHð8éÁw3¹žon'O/ŽžÊìðЉq$ÐSã5GéݦÞ~-æ_1¾¤Œê°¸<ÒŽ_¸Ëõêîöõ’—ËÕÇÉòñƒ,C&,ä]R¨d‚b0£7P*ðX„£2!ËÁ&Õ#†%Â#ÍI!9=ˆéðY†NŒßç‘_Ñæú”ÆEðRc¥˜{„¥%xÄOIX2ÎÿN¢L ˆrÈÀÌ pœ¿$9œXÎw0²$xB…×GÔßAmçRœŠ3"pœ¯ln–ÏÀÛ‘) ñÛÝgqÓÕíb>;Ÿn~Ì–Î.-‹àÐëò¹-yœ¼‡jšJ ûæ™O¨ÑN÷˜Ù½HubœÖH¥¯\tœ“H&úØä‰Ô-GhíÀë*U£}.¸¬&¹°Mnô«!rö%ŽSÎÕè—Ûƒ±’Y%Óž<à*‚yÑ:âE«ˆ!‰qŠ]E9-G=ƒm¬T•ö4v ö0ùx´R*: ?‰š•ZÇÀÞ¡' ké ¥ú`íP L†Ç6\çóýÅM–¡aeÒNó˜}Ëõn„9èÐeÝ‹ñ˜ØpÝ_ÔiŽ–•$u„ó‰GÅŠ¿ä‡=jŽ_<©gOfñdaèæ |ÞpG_Ü|ZO²Ð°ЌKÎãçÀ€ÄqZ\ª;tqâ©QµýˆÜ@1rCV9!r8!¥¨klÝSØWb…,Á“B•ìțŸ?Êâ&ŒÊçàÚœV!Št"÷8;´—á^Ÿ¿šÉOèÎï%÷2W>f¤ÈõàX[Çë«dDŠH(€=·OÔuŽÙëíÀ_ÆQæªã­<G!·qo¶F³O€L‡å•à©”,Q D&2êè†Öh&Ó'ûê®fÉ‘GŸû-uØËXÕü'P·ÙöÎÄĎ׎ñÏe×VIY¶Ö*©VRõ¸ÜÑï2Ï2O¶dJõ×Ý•R$;¦Ž–Le~?‚HJ•™|…œt¦A/ ðˆ#øŸTØÛÝÍ/ݶ{¢2b!êä%°¢~©]eÎ5œNDèXÿF‘ x&~9iÑz|þ±×£ÖcHÃ4*´Tü§…¡·%´˜Ÿ|ˆÉ-m`¸%xʬ¥×·ûÙzùۣʱN ^%ÿ“ÚîÛÅ€ÅRhÎF>n° |b ×hT? !±÷­ VS~_ȉ °Äð–sUD·xl™e÷|µ¹]LoV³}jrýDsÄŠ6z¥}2ÌÔÚ"µSѶå"v ÐO†;?xîŽÁó´Òˆž§Ç…‘Œá÷pzܸ'ßþºÐlAŒ†áb:P¶ƒ]Š4\)±ÍzÇë|S…ƒ‹Ø–£v<õ#Éo½‘Æï‡ò…0lÌXÜ…hÃÓ´qþ½óÑrðŒ:.qú»_lr4n”¢™âÑqâ'Þ+SN€ZŠ¢åHTrļ?ÄÁƒ%'"ÉÍU6(Âí 0\S®Ä„4~!ã\dHêWÅ;¼ÇZŽx0¦r¶’^ CÑÁc‚ÈÃ)¹?^~)q¥õmÜò€Áp<¹t©üë\Ú|;‰äW²ÁDâãTZ¢BT‹©Ó GØÆûŠ:Mà o0*[—j¼þ £ÓÑíîd4¦ä‚”"WnÛf©Óꎳõ]<…Xâe,‹Ž|à 0†A' .Eˆ9B›…7h‹šá•E4ÕwË^v~ã©J÷2WÛãÊPˆN`GäFN>8‹žƒÇÖŸÒúDƒôeÊ^`¨6•±(E¤èãï½^Úl`&3Éð-!øJ»cÃ@§Ñy ǵ-¼ÆE^ˆ:lAm.×ÊE§Ë­~NG¤1DåŽ æð`®zXHKQ#Åñ¸°Ñ´œøãc,ƒ~YÓšŒ=§ÿ§a8‡8x<€Š=³ –!W"­T2heÑ06?sÙíSÎí *YÓR/½ÕF£cx^ZŽ_« à*Ôí\1Œj²¯ÍÇ£M±UÊGZtï;›||L'¯Oà•"W‹mHtdM¸áÄéƒí!Fš§3èÁ(ÞÐl1l)‚ 2ŒK°ÚDhCDˆ§LéôÕMÆGµ5>2¶ú´U~Ìd!ÆYˆl±L›5ƒõÎ0Ž&<%²B2µŽ4;‚uÛŸrJ ·I¢q‘áfÚF K ÚYž•>O¸Çúä½1t Õ)ŒM¼$~M?uÅ)D2®ôN5¹7 ]ZÀhsû»_;l/góéÍvóÛ]ß t@Ö4Šá„9Sù¶÷0üR$âJëÚøBl<^œz¤Ûç67›Õr~'ë‰Ew5»]í§‹n}è:˜ë XÃðé\ànùÕÁ_ŠF\qc›8¯C,<òpÒnøXûtP×GÕc<´Sº{†`" [Ê6÷ ´·Îp\p/¾_ðéR&§Ÿ£~¾Â £»>€†é-7€×Z²RÔã*¢ÍE]>ñ­„±©Í^DŠÞqü_…ûÕŸ)ù_ hç *Ÿ‘fàAñ2zÛ-òæÜlµÛvÿwÛí>5fÚ|ª´ÏŸ6t,9X=Ãýì]ËÏ AÀ‡—}!d$[9¦Íb*¸`#zlhÎÈÙï·Ûn¸Ãè0vˆF3ÇêàM36Š«ÆD®bB›ZcYx¢iÎÄååõpwÑ!ó¨@Ææ{€ØŒ‡±j±­l3CGc5g›>ªØœ…7›¿wÛ·»á.£ÃóÑ:Ç ÃD~¬µhÕØè¬cõ¾mã:ÄäFkF¨#:Õœ?Ïo¦óùuîK‡ó#ËÙ’¾Ý¢ðÓTãW±MH?bˆžƒÚsë¾RÅ9°t¸”šôIªhF:¡hµØ9ÏP 4:Ð9 öÍÙx_Xa°Ëèmðˆç 6c£P´jl h"G5GÔ"ðÇZz¸FIH6ŠYc޳6–_†›ßîö›ëm·ÛÜnç©Ó®–ëe_/ál³ºÞßåü(çËM¢B$In”ʙ˘#ûdÐÉX_dí×ÇßþuyÕÍïæ«îHÓô‹âéÔûñp QÃ=Qí0SMÒgœ*M¿nÂT“S#0f£¼Ôw¿ïÞŸ·›Ûg{ìåC_?5y‡‡j‹R>‚ui-D*Á”©k;ºî ©mQ{-¿'MÛÚ2ðØrI(^¨èýz Ò·#Lx• ¢GŠÄSPH9ЂÄàu²¼žÌÒ˜Û©—¹Ó{´J$T øxÒªÀU5iMïÔ»{Íö:%fZƒà¢µŠ2i&ÇÙN] %2¸3 æ {ªEøu½ä¨0«Ðæìd!ÍUdK¤™,` FóW"Cpõi ÁcC›ð° Ðk•˜nmvy#U'3·ðêD«PˆÌ|ÈØ T'Úƒû# 5°]mÜEjNKíŒÇ†6AÄ_Va}2HðÛÒ*l…³­Ã¡n²ôí”ó'„‚lÀ†nƒ†ÂýɆqX9«aP&PÞŽ'üÞòFI_‰¡=à‰ÐÐÜl½>‰ÉÖ#¨ ¸¶“ÛEyl¹"Ù°¡AaíÊ„I¬Ï†u?²òênP¥!W‹'«6ôíŒU M“Éüõ+¤Šð §¶*mòÃW›ÙA©Ä´¼ ÖSNPjç\¡Xci^ DÀ3†×…jË 8f(“ØÎ‹sÆ*CùB©º¡‘Z"D‹è£Qö3XŠ}6brŽ9£º¥öØbžœc£QŽáð¡£!Á mXnÛ %¶þÀ[Χpúv:~“!d·@žœm´:A$x*G$ô8½¼»_¶ibzF¥ÁD*ŸNß.ŠÏE¶à·Dl`0x Ôú‚pì>P(±9ˆ‚Ž<¢“Ú fX*qÜi ¡ðCƒÕ…O¬ë‡$ >î×¼ñç*AS‘VÄœð¸Œa(Èc®};¥«A†ÇøÓó»Ž8zî¡Ò¾œ‡" §ö픫£q™[?9Ë¿íék‰2F7 d_B+`à±EV./Qr¾Ùv›Ýôr³Ù/¯SGïúK@t*bÆÐðóý„0îFGíñÄ!Ô? -ã]3õx—¹WªVªÎ¾wýáf,a¤ŠÒZ窼ùB ?WV#lmUòVŒ•FŒÊyÔŒDã›.!0yïI‹›Û)U|å÷„œoˆG|Øq¼R³îz³î†j¼F>Rk“óØ DClà›m7ßv³}·˜Ü>Î1Ù'×~¨[– j"$yЏ©]Ïm¢cÎÕ'­8Û÷|»Yg«$WhvvÓyüµ›­oozÝÆaÝÚ|å7FCÉ’ÚÅ¡Œß§ð—†ˆ QõÐN5p[ú«¦m¨í+PŸRÒæàß]<0áb7Ÿ_ÌÖw·Ëìø1l,fËäešr»!Óôéi¿1Mó:,±ÀӲ߂y1Áñ¼—“‚äÉ~–t¹ÝÞ®Ó ª;¨tw~Tñáãí¶¯aùaÎðÄ:Ëå³äÖ‘¼v@¾•VŽ×§C´‘ÄÙâ§ ÆŠö€C>õn°6Pö µ,{öéþý<œÈdëï}ÈðÈëZ êøàÈ´‹„v]̉kåyåv*T´±Nß`(Áʯr˜x6Ÿw»þä1‚Br›Ñ&ƒN` ùæa(j*0–/M>äTŸ <γy‘½êö"ÕE¨Ö¤ã©ÅòË©ùXk0-HðSÙ C(¦© 5ÊxåvâpA[ÊJDñPŸ<±œ5x¦ËÞýbª—ØDêÏÔ¡¦ÅI _ù…߬æcÕõ¯íÈðÈïû³twÝçÕ9,²¦oMÔhX‰ ´¶Zåé¤vÉ…(fjñW"4p$%xÐÕ17›ÍŠ«^C¨×¡Ù•¤Äq˜– UÌÊóA{ÛÀNHðˆ ¬V"1°09·€4n˜s(ê:v¢ ‘â˜h ùÜÁS/WµÄ‘$LcßøHJí¬kf#t9бw!Á¶ºÈ¤FbHÜð$ƒsRy_Ç@”b0_” |2ðh]Ç8uùrþrƒç€Bξ¢±D:¬¾PÝ.”!õϲ ñ@£`‡5¨ó´¦‘ó¨o§¡’Q8¹ ]­§¥°ºò{B <KÛƒÍîòv¹Zpm­'tëÐm‰…OßN;ÝÄ*ØrˆchÀÐõ¬BV1ŒÒ&qDj.[¡LisP–¸Y¼ªO ž`ÊÙ ,Êí÷ù¢æ ã\¬g8œæCÅØ`ràP×PC1‚Õ1RPóú×—7e)Ë&Ú<à)X)á^w‹ÃIðnÏÕmÖ­Í—×uGÈ’Úi‹uÍ/b}Hð`Ÿáx»íˆ!†‘EÈiÍ´¢ð"Xo‹Ù„:´åËâêŸj“áÑ®æ]¨¾Ÿ>Ü ÃºÍ'}Z`kB–Ô.ú:QF¯x¡~LI†GÇvKsÅMT”îòe˜ªž p—/‹i±Tp¹úšr.2ðˆO¿/º›Õæîº;¹dTÀa­zÚ{KJ‘Ú)X„X ¯V X ÂSÕ"dåC(縋žJ‰)¥Áäv”eKJC ð_sÐ_)Ú^Îæ¹Èowýa5¬Ì_Èu¢ ð!çû÷5Ç?ƒ¿|°¨BƒÁò™$Î3ðÈë«K•G ›€iòDêD~ßîiö™|S¹GiáÇÉ÷7‹´¨O(ßôÌû*ÑRÌÊÉâ¶›ì7“ÅòêêbòÏüóÿöVŸ?<ð |þý«ïînº¯ºýìb’žtÿAø izΫÿ\®GÑÏξ=á=áA(é£z0üæ/?tÛ]ò¡.F‚yö„·ZúŒ^·ïÇüèëËÿíæû‡.yü8¦w_ŸŸO^¿ž˜É²/<>Ÿ­&WËnµØyØÝ'ºï¥'´‰¥ÂWßv««¿.׿~â•òží¹ñý_¾üÔÃO 3;·Aùi6;SóÙ|ƒ—34³xeQ_ùQ||ë(9þvŒ*<Ðôé-«F|ì¸>üs·îU Æˆþ&ßNûn™¹Ÿ]ßÌk­„óðá}Ò³~Ðñî§wgWI³³‹dÒ¿¯Ó²áø9}š­×›ýáhJþâü¾ÕrºtÑo»Ÿ÷GáxÿÅg"c¡µÆ¿óz#^û¯ÁÅô¯cÿ\-WÝùÝìz•¹÷þýOíÈ6j¢ÿÀœ-GM&¹¾¾ÝÏ.W¹ÿG¾÷ËYö!ïÎ>îÉÙìvÿËf»üýÀùÿYOr®üq¿ß./o÷i:Î_O&³›å¡ßêÃw¹ñYž.Ç û÷4YnïÇ øþ“?‰.磦¢Š©±vT†,ÀšýDxʧ¡X§Eo·˜>d{È™}…¥  2E÷í4Úšû4#BˆðÆ4ؼ—౦å†í®ßíºÝ®zµjB­.Ú€.b¤vòcuh,€lm&Hð@K@õz޾¢G*Ä ¹*;´Ü±•‘W FlpˆC‚F&"yLíö\u½¾ ¥/Ô½j72ÁL+|Or^¼œzT}z©GñåÌ£á] ΪHáOí<¸6&‘ JÔhÈ¡~£A)O´åp¬7ëíf³?ìõçÓêÊ‹çJi& ýöíÜP"6vNÆc2žéÍj–†Çý0Ùuù £qåÐúú)wdxÜȬò³ù<ñüÔ|’œ­×/êó醿±žPì¡< %HNí¬Ê{g`!4`O…£¬(ÏÊCð´&)‹à‚—¡· eù‚x°@€GÉEqG¨3 «SçTZy%à§vºB ‰`øÃ2<6Ö²½æ¡9ë´!õöíä…Ѱ”ßjÓ ãxÌIé˜ú‹ÃúË—°ÒÊ=×8ðkrQH\ýž–àASuˆ£I^dÁ’H³·yÒÍåBœÄärú`€Ælƒ1߉Û(‡vácþP‚‹©FVcqËh¶%²¼ÚžDj"¤áD#5õS%öï±i' ˜ÛUÈw³]¾]®ºŸ»E¯Ñ}j4þÔ“ Í :Û¤—ùx¤3þÛÙêÿ‰»–í¶r\û+µz.‡ 2íOèñ(¶*QG¶\~TÝü}G²-'Ö¡C25HjÉt´ n€ ÛÃãÍ6_¾í÷ßßUiy•ä$È£YŒ/%]~®â@¹‘V©Dõü‚|øëÁj6[õxÉù{¿ O_ï/Šü¼â;![é$¤mó<¢Õìæ®a.äìÉNÉ“øÔv6ÛÉ#ù™ ~€ÿûã?ß·NÞÓ`Dn,„_ ênÿ|³â!Ó›¸µ$ûÅ“Ýòó7ÛGÉè¿ùãz}¿þ²ÝmŸ¶òÜíßò{ÿ~ýµÅKGi_ ø <]«ŸÅüê|~y™Æ‹t3¥˜Å «^í4pv§<é){ª§9"BgÁ“Û5)™Dÿ&ùóšcðíx3ç©¿çM[àÃC0†1÷Œ ûǸ[ÜÝSÓÕɰIâA§J¶ÊÈgéϘ˜Eˆs¤vf@c8‚ѨÀ“¥ëÍ‹z’±î'd­ßïËœ.ÙxL—ò v.äqªñ˜‹J¹R9¾ô¸=<ýtVø“ÜAçFG5øçŠÏ¾ãF[Ü‹ùR;¿äÆð‰J©Á³4Á³VàoZšt¶°˜B z[Z¡^Ì•ÚÙ•!~ˆÄ0’£ <æöz³:ªèfÖXBS¯§âæùù6¥ïB~ÔÏ+Ä1üvB5x¢kbKj´uvä쓯@=ûÜE·!6´‹¹Q;«4Èv°Ã\Ù+ÁxÖ?}wD|-Ó3 ´èË_ l®}ûOÎh ¥+ÌßJ9Vñ`È‚§ÙîpNŸL*'|ˆ|–Ö§2“×W¹a,žÂbÕNÕÓA@„ <—î!vÙ¿êèt±ý-©b"p±kÚf‹T;ÑA;Çâc sk¦ò‰fÖàp½´Û<«ãïOò÷:(PÈóÀÚØYü‹ùS;Ï2$‚Bü½ÑÕà¡Ô•?×ûÕíFîS'«ÁV !ºŠÐL° aÎ^Êê‰ù2†!Q2í+ð´)Ü>/ò§‡çC‰õ$õ¨ÓDªÎ— ôP{n‰z1W2RÅìÆÄ× x¬ñµc­µûýn{ý£NòëÝnÿÏjóõaóxÐQÐÙÂçÞ\ƒ¿:ÂÖ÷b¾ÔίŒ‰¡D—ÐUÄPùQ|áEÿθ«¡YŠ[1‘8Ûठqæ'°”AÕõcÎá<7ô‘Ÿh~þ±xîïöNâ׃¶19 Š¸L¬¾ê1{röX3Í1÷ÈõxÆíW/â?¦ñM  ‡u£¤ƒU6bõ%s§ ,eL¥¡*&:h«ÆC®?ƒn6®ŸwO+þÁIîz@<í*"Pàr?âÌà^Ì—Úùù4†/ªâG`®6r¾vÓ…qר‡Ž“‡”*NWPTÙsK¹T=[Ã¥jºÂ§Î'e¸ë_~ùã˯4ÿð«Eöcx^‹]×.‡Lðãþz޳̮¦¿~~2â• TJŽŠZÔ‹ÇEê[„ ê5¥/À€»T ž”¼FU<+r>†’Ôlh¼¹wÆÆZf€l€O‰Íl€4¡aë}õAûD¢;$(Åç4wÇ¡¡ö¾Þ Lxz>BŸ„§8Ç$Þ’Z¸…$fšéüB†ÖƒN#® ,xr»­þ$§òeÑ¥ÃãÕ»Þn¿Èv6\®\r>{§T*R!?•žÚ¯³×69×ÿ­ª ô@é¬úQžTÑÚc¹i\$ßLõû°¶~.3…4Á€|3‹ð¸¹~~Ø>ýÜÅÍÿ?ñ_,àõöîéñêåG¿H–æ%ë¡@E¹PŸÆ…L½íAl6öo"`îÁÛõº²>æÅœgO)*Aài\¶{€#Ùk™IŽý aÁƒ©§°¾ûº‘»øëCè7y²t7wëÝ/å~œ"X@Ö÷¬äýLã©AÍêFÌ®‡]\Àƈ¤ˆ‘¤ höšŸÉ\·j( Á :ô¥p ]²s6vY®"`B¶ QåjÇi“Bìx¬W wëÛÍãýúýk€©ñïú~+9 ›‡Ób?«*Wà\àœ¦ú<ç^êT·é»]ó/=ÝïÖÓ iØÙ _é_Ûφ'„‹û“ÀP”Œü㤔N’Ö-±9û `aÀFhÁ“BoÕEEx¼Þ64;ÃãJ‚+m¥"Ì•We‡qýkñP暯Rûtòÿ§k¨(JJRc"æ!ð8So­F,ïlÊãqR¥ÿº[ðDûkÂsÇÅ?þµ¾ûñ¼=„ ”ÈQ"”бNÓeD ›g6 f=ôvhžEM—ÎJLÑ)—£†0‹#iï¶Ð‚ˆÙð©Mx° +AžL“K¨YÇÿ- Q^Ì»z„èF,ªOƒÆ¶¦NÑ tHúÅŠŒó—ÚA„{¯OŠ ÷Ûža__žªz-ƒÒ £R¾g_Ù.T4Îv] \ÔÅôEJßž¿¼”|-X-AÞY±)ŽÄþù–¤©j°Ž˜8@‹-x õÓb¯ˆJÎ%-8S¤ÕÊ¢[Âe$4À,öa jþ?gôŠ¢$hQ*‹2ôÓ^ß ¦Ol³On½ßÊ™sup`Žý äÓôNË%h÷Õ´P—óÑ´ 0Ó<ÙA«Þ³º3Sf†$‹+äè•§?Ó8ï±±^› jËÝ3ÿlx( Pð¨ÈOr¹BI á¥ µê9Û€§õ¸¥´@ÿu7àI®E¬sR|WH ‹¢?¤Þ*¸y\ 4@ßc;¼9÷_w „Žú> æ…|Š1'J X²9Á³IHâé¸!”þ‹Îß“\@òxŠkÑRzÞ`žÔq¢(PNçRTÃÎTt¾Š°¬5àR@lôv÷0i;zW©·¶§yáEç ¢¶%©½‘Zt’nÇRÆž$K[x™cÿD"sÝsíx+¼$E"’FÖ`ó8Ÿz+{jÀ57à)­]ùc…¤¯(Ï‹ÿˆ¿((PeÜÜõÍEël†]ð”u¨¡¸þ«,xrò…t<1–¾«ŒŠè¤m£¨tWÌî[?#d€M#¼7 ž˜—V´1%YÊ—R>–¨‚#éÕõp„#V²Opù2?üv·deT Å ò£ ‘0É­<õµØØ jNôׂ'ç>{’[Qä&ÎCLêÎÂã0àeþw/v Óˆ%¯ÇS\jMÿðÓwçE‡2;^ =h^ssåW—¨{ g 8}€þ oÁ±£®“"7ö5²ÆãCû0úr‚ÖO Ä‡/  ÒtI**„ Y…à5[ÅãRJUšáÌn€·àñ-#éw›C(šÄŸLy#~¾Iî[ýÀ³å.HR[NOrY3ᱞ¿ïÊæéÛæùñá¹.ño½Û<<­dôAšŠ²× Þð¸YÇĬì—VÀJ ÅX6ˆ¦ðPÓ„Ô…§\8KÏØ 5ˆ°Rá ­óî<5 û/½ O³¾ §Â<­'ÏÒ”z¾HŠo=!’j«¤2@om÷ÍÀf? Ð*ß“]P'¸–Ú¾¿{ØïŸŽÑލÜ5ËI=9Ul¼o¡ùy_7fò¹‚[M?‘ÙˆÌ𠹦o•Ï…>hNm¼›Ê˜A qöŠ÷0bË8M0©÷y͆'™ ıI‹âˆ¤Â$r®‹v_ÌKòîE-x’ï¡áÏw÷ëëï«/Ïw7G/ˆ¥ñÉA ~þ_Ʊ ŸnÇf0)¦þKmÁ“:èö$3PdÆI*4:zÙÞsž€Žõ˜OëÔÖ׊/µ†Î–:þë„iÞO<ŽËv sFåùRRRdáñúÛæ†TW/¦ù×úrn~Q°çÃ^.(ðy\nšrcP Æî/xfìd`’"3éUP\JF)éÛ®8j3ZÖÃÏ'™”õ¶޶†ælE÷¬ýã÷Š+ð5L ¸þÆÇø—犯=:Î|ÎëK³7¶Œ0Š ?=?ŒË:™Ô #¿Ƃ'µ,Fsÿ°ý{»Û|=Ô°ÚÒJÞ)– Âäq.4Ì#hORÃLºe6â7Zý…9ûÝns-ÎË€bŒ½2 eÜÂFfÌè¸<>ô³Êê&Öì!k0yœ¿ XÂP¦Zæ‚¶ ž’G[›µüò$×¢È5¹sÍ™I’Sý,6ƒÜ€ó¦oy ùKuݨ,pvÎïEÚ1)M½YFz¢ÖÎCÆù¡L ž`öYT믛¿ùôƺÛ-¦'»£øÒ{>|zbIR=þúêøû«×`4)‚–.'Ey”|çZúvj¦{‚OÆ®FAYc”]I®›¤<Ι üNò&滿ý4≡«•x÷áý~¿›—´B)~#×rÚÌ€\¡®f‚š!•Jý9aÁCÌÄÔ´5& ¹¨HåéNìj&Z³·~f!æœ0àÔ¢ÈÑÚÞ®ïXˆ7Ÿîö7씞zMTn– nµ ¬Tž ¡“úW‘Ø‚´{µ6#*]Õ_YcbÏ£'”2ÎS‹Ê Iiܽxª9Wu.ýó‚ŒvïL‰7ZBW´¹ð8Ÿ»*¼o‡ø€N3bJóbŠÎ»LÑiÎYt’¾Þ …¢X7V ÆÆÌm…¿ÀZðØ›3Mh "gõÈ¥'žÒZßÙœú1ïö_wÛ»ï3bU” €’g'€”ið8¸ ñS2[ 'ߟ &®oïùÏóÒT´¬0¡‘½& =ó17¸…éÂjÃ,‚ƒþœ°àñ¿ÏVøy©ÒÔÕc6›}dx™­XÊl zà]Xð´¶?Ö·;M¢AÑ2šêj8§…x\Jå·Ù ßlÙÈ̲à¹ôšãxå½Àq›$«$=ImAæ®×®æx´v6a¸aq7 x oñm³»½þÆ^š€~”‡•[ª|ú«Dgµ-\99Û§€óìžÆ¡»°›ÛvÛfÒ½ŸˆÆßj7¢"ÙC8¼6¢R3»Ñ’å†äÜ0àÁvvãþaÿßÍõ“E°ŠÒyv%|Ònž¦qð›ÍGl7“ûSÄ‚}“Ö—Hæ¥$­=³TfäˆÒ.œÑžè–‰à bÁcnÿ«ÜÞ•§”¯¦Ÿ?N¢S4+L7PÂú2®ÌÖã7ôìÁdÃ,( @59°þ6û©R)¡ŸµYHAok^lc &@s¿ºãí4?1EC¤žû4Yc¤Ô 47†ÆHË,h€¡7àAW–¶}/¸I^Y“ù,gC9JK;‹vÅ'½/¬g=ˆÖUyAÌÞƒŽ/×a=ÛáCÖ³·¿àÞîåÑÓnó÷f÷®ÜÅOoâååëê~·¾{«þ?¥Ž¥2/M>”aÈ·‚žÇ¥œìÒæMÄh¦Îu»åµà Ö]ön}»y¼_¿O¡æÈ8ÈÝ}ꨢ…sö:ÚÓÃÓù‚Cþ§‚CYïˆvø×)d*P”QrI9ö¼DO2?Â80êiÇ+w¨{r3øVdnõ½Lö×9e_%„ÙíT"ã50!UJAÈ‹Y•ý€kJùžùTVÂËñvm?]ë¯þÞnþ98H³­ë}¼rÀÞˆç?f1Æ™;žu%¡ {ì¾Þ6<æâÝ#ƒËEJŠH§Ã(]ã\^¢× (kÀØ¿µ-Qóc ßê'ÙMY<«ÍÍöèaÍ7´WLQÌ1)Ei¦q9¹ t¾?O S@?€–à".pã§À‚ÇÜÌêc9~ìLá¼Üòôv£†3K‡–¥Î_K¢Z€õ_pû#˜3~•IžÊ¦šI®KKÑn3d\Mÿ2Âp¦Á>žÒ&Ø÷>ÛñÓ‡Ÿ®Ö7·Û»I¢Ê½šTƒL%§Ì¡ÐE+ß™¹õøÉ¸°à ¾Ù¹à¬Hõ!1+;,’d!òî©M‹Ç™Kg!ºaý‹=ñ´»@¾@°Êõ\‘jÔŒR w©ó”š$z3Þ0­rbXêó˜ð˜ÇäÏç1ÙPìÏZ ž›núò¼Ýz±çðsíT‚Å…JqÑ'-TÃã0¦fA¯šf˜Hp©eÁƒK/µÞ’‡?ôA¹”Ö<ÒLóÐyœÃv…˜×€ÙŸÔ«7Aåh‚`ÎYPÀ€Ã5OpH*ð$läsŸÏ_!¯œYˆµÄ‘Š—cYè:-ÕÖþ5‰xJëµþP‚³: ’ï'U‰”ÊÚÓ8öë9ʳӆ»GRlxJnëω±(bd/]RÂTØòpÓ·^ö‹ˆZ\°ð<-Z€(´0/4/giÞ­€”â•Ûº’ Øi€ûŸ"lxbÓ JUoh¾l?cb N%:§<.PhPdþ"žÖƒŒq€r›ðPCåþð!)Äyé¦]PÒƹÐòjt9=/Pѱcÿ„ùÃ÷ d¤ <Ùe¡¬¿nþÞ<Èßn¿¾¼M+Ø=ÝÞ­ŽãVǫב"Ñùšû0¥é…ˆÊeÞ4Îk¨ð—q×€¶2¤ OÛújýüômu³Ùm¾¾ZO˜—a„ÂNZRJ&MスÈÎf}¾ r@ÇÈA:9°¡Î::E”Ûu –å´ÕÔ(í˜ÑLDºXV|uÃ4<ÉÕ [ddEèW)Ç„å†Â¦”õ¶ïžüàl…ÞpÅ%¹¼Ãv ¯y|Ž…ÕŽˆÀc2elô²!„Œ5…`$eZÃwé.!ŠÎñØ+0¤àá9Lÿ©C°Ap„R5ŽÑÎxjþ:%i}¦¦tʳáÙ„'_:w›œ-Ç ?Or5Nè-'ùŒíxŽËï:óÆRƒÓ@ùð5_Âs³Ž™Ë›xbúDèØ(¼Ø™ŽC;¦l¶ÀDF–¦ôÀP~ÎT »ŸTÛj¾|w_ªD|×h¦¼öFR½€vGgŽ'–Ašèð/=®§·ŒØw´_~Þ‹S5V¦Ïöÿ?;mVõd.añ®Þœïõr>ªÓѪú8_VÓSèV{½9 °Ð2×›ÙÐÐÜVÙŸmFøÅ«iµ­?}µž\϶õd»[×—gúoÍw.ÏÔ…`Þùö4qyöË®úx1[>›¬1…èÙr‚)éóºÚÔÿ¶¹®„6—Xp ÖüÊOŒ„WW•¼ròm­¦Æ29aÕ[]ÃgžkoÀKVÕ0«¿ò°ZƒÏúËr=©/ßVóMýû)éX¯•U´tZŽ:jÆ~5á»(“:8m—£Én½Æ‹E€ïêí¿Ž˽Á¦~Ù¡`O Þ ©"XçŽZ³q`ƯèõLäïG¶ ¿³yþvVϧ_ƒó\¿œm¶_,fó/÷ žÿ µ_mâ¯Ã»Ÿþ£^ÔMäR‚h«Ù•üÅ¿ìÙ·§@øÅ/¿`¿M“•˜Ê+öåyð‡—ÖÀ÷Þ,Á/ÑÚÎG/–xa'Æ%;½ª“Ù€^n×»˜‚yÐ8èï–JßT›ë˳ñdµûúëwB/ØsPô}ÞU7S£àÝ—ÕfûÃzùn]o6—ÛÙM}ñg@ø?…?þZ-vÕúãùÞ`Gÿ„|óp¿€à¯rþÑþã«——g×ÛíjsùìY5™@O/€R×Õöb²¼yª÷ìÕ7¯¿#Ë.9Þ×ß{\ÔóÍåÿ¼ÙbFC Àï¡;3àÆô uTí†\"9^>|÷Óëm½º<Û¿‡×SxÌw€'cÿAø¹;Wòü§½Ô~:ÍPjðÆðbƒ‡UGZ„gÂü„Úü D¸F¾šmÞo‚~DÇyÆ:Hé|oë—ÿ'üÞc•¾\N€Ó¡‰¼YW‹MH©zÌo _}úµšÏ/Á’…Ò5·W•¹šTÊÀ·êß¶—Š1'&/!I&àÂñÇñ§Ð(Æí”±//ªUuÖ½Õ›#"ݾýñ–Gá3í‹f$ÜKòñ÷?}½¨®æõôø óéo~ütöï˜1:{±_[ÃË?ß^þô"$’á{AY¯êwà×ËàEðåwèzQõ÷ø×íÇÁ—‡¹Æwû4Xøì¦÷¹]Í«IxÐÝ ¿¿©ø äòŸ‹å¯‹n]xýíëEµÚ\/·áÏùr7}q›Ž{£ù¼tüiÏî \yw½}Bß/§õ›ÎÒ(ÁœúìoHÞ³×Mp_^Uë¾ hZœ¿£™­æ³Él;ÿxGB¦ë;5AàBéèé¿ɺ†qnpSÃØóe˜Ô¿Pq0¾žmÂq“à ø<˜çhÚ̶žÿ¿sFsðAÏÿtëL¿i<tév’¿°ŸGß¹©0žãCö£ö÷ËõÍU½>‰ó‘=)ÚåÝ <ß—·{¯õ§L üü[ŸW}ûÃñë$«7txÚIç/iÜÏ£¿Ì³Íu¿5 ,¾¤½pFýãOlráÑ.Æ9½‚ôNJ“q¦Cø ª)‚MÁÓvr¤C®ßÓ)sÄ2Û†Ò¶ŽQžÚIS$üÒ%XÐ Å ìĤà*ùÒ‹C®Üz¿ã5¿W0AÙŽñKM<›H»Á#1OôQðVy›51°•ãñêá¯1JÃÓ–dÓ+í÷IAŠwÞ £™ Øë°ø˜(#t`àBHKÁ#ó1€ªG¥‰Lp·ÂÃì› ‚a;–/#8lŽ%Ó¡,‡G5Ï™¿x’ ú>.¸çàæâ ã‡G£ÔéÑhÛp,QcõG †”zøú=ix\Voô´3íâãZX¬5C;œ¯>$YS€%ÚÆû¾÷=Bµ9s3¦¥b ï³9m¦›ºàê¾2 Ïc-Ä9W-ÛE(0\ŧˆ‡íÚª´H€üúž¬ë 7ÏVòøž÷°u4^âÕ£/÷$D+ŒsŒ†h‡¿¢°yŽ&ïõiÚ%ßV×êdÀN6A€ùô8’ší %YÇ©qk8›LPûr5r,ú4<ªWæl¸…jó,éæiÉ©vÉIÃaafEYh'Ò3äKQ6¡EôŸ‚'Ù7er·ÏQXÕ~»<€ðÖ*n¼¦ÀzkúÙ}W¢Æ´%b†øk@ÁxÒc†×—”ô¸÷˜Aš„­àµ@ þCÍI¡Ik䀾ä:7SÀšªOÁÓ»&ó}¹=‹'±†‡ ¸‚„ï;”Sž¹ è‡ÏÛLÂ#™îV?¡U–w5gŒ& Éb5YLü °B;Ýûv»üÄM_b€OÁ“¥²#Ì ªÝ|;®&“ånqwf«E¦œØŠ³xå3’Æ +½f£oÖi_Ixì S€m=dz· ô#J˜‘è.wä·#¶£Ïë @Û„>ˆI)xD¦bzé–ÆV«‚2± Úu(¨9¡¶·NhvËkF2x/`R+,G$ÖÛûÔ½-%Š2„ayL ¢P¡,lÇ2eÏç'rJ/T™B Klû{ï¼Â[û¨Žxç’¯òšÚñؽ(A ð¢LYÉ#ð$Ÿø$ey{1­ædðø–ÖÀàv¤¡r&›ÖsÓ74Zr"Û´þ Ìæ90” M»äûãžpÈt»F™NÇ“*ˆOâ&Z˜ò .ZÏmýIŒÛÒÒ)M"õL‰Іç`©§­.qŸ¥Â±ª!,…ƒ™.=e)Ø.ùDP~f !-_l'øðúÆçh¬^GÙ¼!ÂÅrZĦÚÅ“}a•d”™à¢À L£e`¡íðz†çH@Cœ"ßãæév²šÌgÓ寋Pš¯©9|øs<Ÿ-އЉ!ìCàÈ쥡<· JŒt‰ü¥ð1æðõyÂs4ÌÐH/póA–õÇÒÓ„ôBÕ"fI´xNJ¦[s62z˜¹´–Æ©TgÏ±ŽºÅ·i—\zá)¹-êíò ç;AZ„M„»a´'ЇvBØAVä LAë XpÉÄP+ïcB€Þ[%Œ£ŒÚ)2qÐ9Y@½ x|êîÛô¶êfz,‚0  £h íT×:öYi™Ø™áŸ‚Ç«þÛ®$׃°í’ÔFxXæij”†vÖùþ;n9‘KÆpBD#÷%8€xŒaÌ“x$K =müBRŽ*&‡‰1£:ù ©tЃ%à–Ò C¥s×+j®,Ü\ìÿ|TÆîÕ‚aÁ(¦©u-¶ã2w›þè9“R)N¢‡ß(°*ÇçX-¤ŒÀ“¾1ÿ°²ÉqXÒ’‚’ÌG J$ûþ¾À”,BbrøKs›ç8&¸ŠÀct·%Ù/»Ùäý«UΪét|]Wóíõ亞엱„ç¶Ê{¥ãÃz§É•TJ ?V3a¥:íxõ<‡3a”ˆÀcM_ÓM-Jž«„ðJÑøŽ«rw´ànøœ`䎶3ªˆ>µ„ÿ8æ"[mã§&½ðª®n6·…!UûÂæÂ1Ã'ç»° qÆw[Š÷uB ½*0'OÁ£u6}?žÒî]ã]é©ñÝ·o)` aNw¾aÍCuÚ)¯ÀiúÆco)Š˜‘ x’Ó¢Òdy7VÞI“ØQv¦çR jêæÐ˜lLŒØÞ9a,…Àv¼@ ŸÎŽë<Ê÷ŸðÍð\ý|>ž¬?.7ÊÌ…WzoH x¼¦Ïê :Pyé91Ãv¼À®‹×WC&BšÞéBîàîfw¬!ÝȵuKÚ^À¼Øx©‰K›v\÷Ÿ%¤37<Ûw‰0yhgÝàºÏñ°p22ó9¬½×L§õ‡zŽ*ÿxUçvö!,-'ä§q=JUü íàŸB–ß…« ý8 6Æ—‡Rçìt¹a{ÁaíÆ$U|6´3Âô7™¾êçHxaxÞ|)¦ZjNâµ´È`>¿­æËu=^ÍVõ|¶hf–àZ“¦$eãÐŽµ1®0<] .'3â2¶€2ðXßç˜øcQG‰œí2Œso¹£ Ûe1Ø.„a:ÁˆÐQh'†?Òà1Â9¨M;™a»†éÀx¶øP/ðj¾ 1J­ÊÁ€ï%5-Ê:múœïA½xŒ^ð½)x’#ÓOÖW}$1EHÌ{ÉU§<´Fô7Ö¼„Ú{&h„R³:…ç8£…ŒÀÓ% ÿ¡Äv›´ÇYÈrnâ R#ô*Örµ’’šÄC>GeßTæI¼ –u4BÏJÌ“Rðx‘mOÄÖr'ˆš"²Q0k(Ôh缿ývdcJU`•‚Gg aìäÞBÔãE3S!4 3w¦´ó”¿Qú`Ùvû{ò25®;†×x¡üB‹c4í¢ƒe7Çœ/Êáæ'—ùbD]È™‚ÕP3â1œÇ™|†}³›og‡h{½xË >JÕÊq j©¤ñ¨oŽê =XÄW@Ó xx¦Ûeöì‘m» cF+ê”Wh§-ÏgÖ©™×ˆC6>Ç[*‡·i§xË^Nߎ"ÜÂèdF©Xy‰5~(ïc`rîužÓý8 Øqß)xœîy²]r®]r!ÐyMÅk,c^eH÷èBÇxŒœ±+¬<\çœ1Ͻ^ÏëM3¥¡4êîjNED,qŸ@ÔéÇ~L@:üùš4<]Yã†l"'ãQJYÂ’SðdX?ÿÏÕr³×ÕÊŽqÇ0,U‚Üíâñ"Xö)Š‘˜E$óœêÅHÀbtÖÒ˜[jÅæÓ5<Ç ¥$‹Àã}ÿÃS÷„wWðÏI"mïÛÆóÔRÔ3á¬êoÉ]9™€Ók3¼~Sð×ß–›ÊíA\c £” ä iH gg¨<Ù™„ 8}USË\–So­ÎO¶&O¸ Ƙ’ÖW\‡vRfØ;î@Æ4Ã[O£X†¥ñ>›t¼©nVsp€« 4J³XûÖ $´cyŽ.öcb^# (Ys+¹"Êmîq÷>땃°¢cDbpÓŽë¾\@Yil ¨£S-ñykò·v:m­ùu¼HOG ¾þtxg†9Çåª?||S˺_±©Ç°ì™½M{‡¾½˜€Ã*gÚK!=Ü{å3l‡tónñ µ*1$$à1’þBqˆ0 ÞˆYa¤t%4•ïÚ É2ËAÇàžwð.*Æ»$ Ão¯6ÏñBqGˆœê·ëå|5¯õM5¹Æì¿z»¹Ø¿~t6Ý·ëÞcZ K±0˜ycû»šd«÷¸ežY àJ ¯vxOhp ŒÆÓRI1ÞÍü²«Öïw›û n' ¡…Ó«VS¾Pt:…[Œ›ñ½PGWÅû½÷;‚µ9žmÛpâøúÀÕ>cåBx4ñ v²Ý!V”>*=sòfÀïá+oÂ7zwC˜Ýãs<ôŽÁãr$Šv„d{¯€w4c·w¾wd_¤}èÛ«þXtH¾4ê´Ë¡B˜!MêÕzùv6¯7Gï=ºdÏû|i]sÜ»ktø%ˆãEâhæ<±á±Çcòç n0ÏzúPК1’'ÚhIœjiÚ‰ <‰Fۃђ¶ -bñ¨|Óó ß“bæùH¡yoRÄb-A mþX”0,K<¨e¿_Ù+LܰFÅw±ÜȺIbûeËLKµ“Ϋ<6Ã}…mÒ>1ÿƒUM/Œ‰é„‹½ñ|ð}Hã½u1ýó¼ i¢ñôMâkúQ­“÷nÄ­H®n™Õ4vÃd$WòaîA‘øn•™Dãá™Jå§J]ÓLÆú¨.¸¦d…Þ‡0±½eÄF‚÷ŠÂÓq Úoüôwé†fŽR–›ˆ¾H6 Ї>Ší¦*3,Á|ת(<*_±¹Nâ·4…Àî¢ú¢c‡©áúЇBÑÝte(d¼•6Éx _›ø‰è¨ft×XcmTŸ\š7°/}(ÛÝB+,ã¤QxLŸ‚.¤pé`®ñœ“Yf¡G±ÃVW„}ÔïcQðEÔoq'*Ïq+<%cN‡j-']î;&ÒYÐh2Ä÷¥ÌªÈrG'í„v…†— ò¦Ãµ6ì*Ä€xy t’ÄöK”Ùá±R+…'÷€qy'Y:Bk±öy L©2OÂë£øè*4º«¹+=Ý[WfQǧú•Õ|6­¶ðÞ¯õÕõrù¾ÙdÛ­«G)¤¤MÇ`ñ&NݱXâdƒÜ‡ ±½âefN(Ë£ðØ<×&ö‰p:þê¤*b“Ë\űfØ~ô¡’‚ÕnLWe!*EãÑ}1N= β˜Íw§xÚ”eà\VgðjÓÜGE¤â3ƒÍ!3˜H ŽFaÊ„û\è<ù2áf+à,ž¯o.ö¸ø wçšt"!©É3–AäÊLÄ¢¬ŠËtK«)Y‰yL®2ŸE8œß fºXmÕK Ävœg;Š…”)ØU±$ Íoà“y5»i“'ÁŸÔ0àP 9h'”Èlä‘|MÀ(Y4€<9έ¯§×‡²[aú?þ0«î<$±{w.J±""Šôí¾ý©x˜‰Cö „ëLÁ£eŽÓFGâ4^´‹FçMŸ“E'P ®d_b>„ϑғµn“!^|æÑ]C»íu­¤5+¯(¨²Ï¦bÄîêíC)7L8ƒÇ÷I±|¿»ªÇ› ì›g-¦uÏbðÕ%Ÿ2gw"Äv¤D† >Ç AþÀvŽõÙ0|$Ô Oj’¤cÖ™xÑÞ> VG5nxßS‰;¡º€šðp™'¥úîZѧ /škªm474”0ÆEt ú`fàÝ©Û3iÊPEk+bðdÙI~Ú-Í Ë8²5Ù2yRð]w¥;é}|_Fé±x’rµûÛñ»j{½·%G*cÑ,ÒË,£ÁÓè:+=>/²À+<ú'{×¶ãHndŸû/ zÛ;Îj^ƒd¿,fÇcÀ±kÌíaÇ:KÊêÊm•$ëRÓmcþÅßâ/[2SRKURF¢8Þ…ƒé®•y"x ÉIÁ“¥êÉ vÞ÷ÊZ)`3¯N‚L§UŠ×Â{€ GÂc¯HjÙÔ“NÉ˰8eÑÚN6}&œÆšNª0%r‹Äà±üÒë™Ýىק¦Xƒ%ðDx"u6õ.& `r×S%%‡÷ÀsRv¸U¦k—C 8w†‚W² ïXÒp¦,HgPhcð(¸ÐìTzF·瀑BS ‚H´dˆéÝoµf„µ¹0e|:“Ç *V( $SÎP;y™% #M&U”ŠYÉ CgKÁc¯f ªùÝýfÕça­VasnÝ©J黜‚^ÈìæGNN0Ä*qp/¼G?…Qð¸k[ŒsZÇ#ŒÒ¯ñ#¡¯fDpð锡J…슒†ÇeYgì“;¿Tz³W‹÷mÕÌÖízÚ„ƒºÖñ ¤ µ:iÕEk‹À§S†*+ceg`HxlÎõI‚òñH¦ k-BÜEqȱv¹H†d)Ï{’¢Ì&Xø± %Å%Aî!K¿Üx5?6Uóa½¬ÇAÿá;Âñ¨a%¿²)áï‹q§…*˜.³¦ŒVŒ™Q ²–'è=¨œã1Rå˜ Œ» Fz òt²PE³evQ4WZ¢7*:¡÷7Ëvý1¤‹÷j ‡P½.ÛÙz;XWãq§~<¬ª…wå Í9ù¶åõ¤H&YLÁÊÈ›6A“ñ¸úVG;W`u»û×ó$·†ãÑX­-W„À…VpqBÞ4äéd¡Š¦Ëø1 ¥ÂÚ)¾|Ïù¤ûñyðâx€–Œ•œä=c2ÈB8ûOE`*?ö↫lÝ‚åëþ“¯Þù ÊìÎpG9µÒäïü¡Ù¹î4ŽeÉh•1ÀÓ©b4·É Ìf>XƒW…ìpÇNÏgà}²•N•x°Õ„]& 2Gµ TDÉK‡\Ænµ#,šò“%Ý_¤6•©eoD®ðú Ìt(?!–Ÿ¦ÐÞ ËQ©a¯ÝOª»ÍlÒƒ8ù4e¥cÈÖÿ2œÉD°aÝOÄ:¾c…3”m{£kPÁÛÉ÷™‚ñh¦U`,¯„,DÀp¦Á;*Œ°ÉlU™å!¹¶ì_šZÇ(­s¤3çÌ¥›óƒø’;Þy?VVÖ•±T<.zYx:…ÑðiÊüeG^^4 dIÊ8‡¡î£à‘pyÀ½zý[¶ã~|áCRS¶]ì:0[z‡md™§â1ìÒÜYgòœE渥ìº:+SgåÍÈ 9c 4²+rX&ÍVß±™hè/$Õ„}:ÿ qq!2ØtJP¥…(¡g$<6«·ßÝ”_ÞÕãÊûT>vêÕ8´Ð´*Ï¢oe: ¨bhQ††+üò¾o—Zqä¤r›mX€÷½5( /Éß‹Kïr*z[džça@·sæbïgMê`YWÍ:á\‡@ãœsËÜ“•êëçœL .Q– ‘ä °ì6Ì埮3Ž‚Â•!(HÇ)x¢ïÝmÚéä(¬ÄÂuu0]…köCei¨iô.K v Fž¯/cðHv­„z‡‰,2Ùhey¸­‚Iߎ mŽžìïxöE 9(ßD·nk!Ô…ˆAQ¢ªQÀ#E-DÀãàâ °óͤ/›I8Ý\O*cZ£ðî$Η0\þ5 ðÙ)ìËð¼/÷»X°‹›(<«l„Þ5N”ŒŽðKwiõ×$Üó…*_‰ðð>:æ뿞SýÙOº^ Ìa!U E*À^*ÅÅ\2Î*N‘¶ÐìdÁ: “³lt/ô©ÈúmpJ9mE(k3T„Í$Ì¥Ì ¿FsÛôʉ÷¾¼÷·]ŸIÄýa3†C°2“«ðvC‘ðÄN®õ¢ –¦9:{…íc I€ Õ<ºt‘³Ìˆ@lU…„|gÊ eÿm/_¨­êÇÅ´©êÅbÚöw•;­!V”Æjƒmc„vŒEösó"•HÇ…'ö0WÈaÌåx[ªê@MÈÂÇ„$¬4J/7œ$–:@ÓèEEڕ؃Á£2”#X-BÊÒã Èa<£&ÐÜ£FY=´Ð:="ÓùF†å/p°&H,s¤d}i=aœâh¥yßNº €˜Q)Jpœ=s¾ø®:ÄïƒB8ƒ'zÃîDâˆÐ‘/ã;—e•uÎpôh¦ogMbñ™(¾Óñ¸÷*bðD'MŸÌVGºA\QëLÈΊÖ?±áÐu¬óœ‘R0¡À…Û(<-™È9í!Gå\ÈÍÉíÛÄnkD.‹ác {ÿþ¾ñÀ–Í»z}l°WÔ[c¸p»B-ˆ¡ge^vÑÑV ýF žèbЭ·wš\?º[?›ƒIáÚµÓÑ5¼’¸‡éú÷Ô¢ð@t¼©}¬ßkÇ"Ú õl8 G¦úv"Ö’æå–s Ñk²};ÎnJ\õsV8obY`+4~„µ{º [P G¡„-@¬PùXhgp<:)÷ÊavvþTŸÃ\}¥üZSx#Š€õíO#žd]Â(ðï‘ÚX§p5Ëà kÃ1þIÂ;Ê5£X"ÀÔžçÞfF@é2=oÂ9XžèC°Ÿ.y®–Í_6ÍjM»"Zÿ¸ªš»Õîm¬6%‰ÕQA&r-àëË‚ñéׄ+¯½ðB I^p(B0é—Õ’`ª%ç¥ö×Ͳ©¼ºßŸít"—R+Ã\*}mŽÅˆ“fÒh«3†[†fÎqN˜¤µEivßN›³ý¢Pš)ï *Ât¡xšÅÉF3¥0Âô¤¤)B3ÆP¦K¥M!šumÖõØþv69òYŽ{GãdsaêPöÚóf‚PÙ(§E¨ý‰kAó2”ÓÊb•Hûv²(åº6ïÆ‹j19Û;€RÎÏM4A8P%('T>Ê9 †à#i[ÆgnÎMü}ÔÞi﫧Ÿº›ÎÇçýƒò <áÁmz‰ÊBe#kÁSËÍ0‡ú' ‘í±žµÓúlŸX”eF€–/Ápse–‘EÉF/ã]$Gp"ŒteèáÄv?½úØ|•– AñL 9…×ÕDÈF'ªX¦F[ˆNNrE™@,”¢Óü©]®ÏpËPV9o€%Áþ:`×vÁˆ’d#—sÜ(‚v¶H^±îîŠG±èêM—x)‹ùÍòi5ì©X4v¯:=R¤“¬€ï+U.Ò)¦s’ -ÊÎ £Å×—O?Ö˦zZ-šåÙÀ“E·R)|QœËbëÌhá²Q0”­Ôm”‰ª)K¦Ó·Ó¶<·Ý³Xνæ«I³nÆ»¾A÷ Bm#«ñ)Fd^ÏO<šH¹è¦¹°Bà†ß·c%覹æŽIžè¬|ÛS‡Çy´U8I¬÷í!—Á’¸{­…P~°â@+b8´!µ!à‰®4r¾žüîlÀVŇ¹OÐé¾kŽu1‚,6²âdHåôü!F´äEN?hLn­N+™xt£ãœr«eŸF[4`®A+©¶ Td)Åd¤©\\Œ”ÙAÖF(Á5Oj…Å‹`õ©x©¶hœ[{Ý G0ŒF™ôŠk¹„He±N2KÒ‰ ùÕ¢¬kËïš/æÓvü9¨öºžNç?VþÿUÓÝàï´­!œ9#œIz±Îœ¨9 ³†júÅbä”céÐÀ­_u*AØ0ÙlK,ôT¶ŒâKVäéHþ€Pˆ-çK‡ÆrÁxÕEø•yC"™AÎQŽmúveìõn áe¸É[ˆA^A×h<Bº3A˜¥¬’W&]–T>9ÎeUÝ<6õz]›ÙK£ÑbÃ7 Ÿ ç•1 ¼Fð@–aŒày;Œ‰N×= ôǺUýâP»hp×xnÂMctj (S9`¬t„³ Æ‹[„–KJÊ"“ Ñî¡Ã·W0šõú³x¥²ÐN¸L4@€¦2Áj') 2«Ë¬Wœ7'q<.>ñ¢àÞ+²†Œ¥$ÓåT.c02‘]-EÀmeeNØPw“àAXfsÛ‚ÝRo}¨`4xêmWp-qÀ—["Y€Me×Ü2 ¢Šx–;­!àØõè¤YLçCððÒ;ÀÐHi¸ú¤ g3¬ »“×ÁŸÊ¡#˜A+do 0œOÄŠø]´¤°g9S+lä|q¥,µVrn ~¢•¬ŒE¡N› à*[ѳ£êÓWº’‹h$ÓJ¶e èÚý9Q§rD1¿ä'LªÌY"«$ᜫUÑ¥Gžêi;©×þwõä±]güåvøNáý'Õî;óY§n,ŒÙ•35ø¤§¼£[Y&Ws2F˲ѳÏ;JöùE¦PCÃ;FŽ€GA¶*ÛSçSºÛÁâA‚ݲ.9³ƒÀ»v¹O#ø]ã¢í¸íþ|óÝ¢7ßïÉ»AòÇ~¼& ’›É¦¹YϽ)º¿só¿ÿãïŸmfûøúäö»ƒþ'OxØ«ÿº Å•ÞÜ<Ö‹ü“<Ê?׳)Oz5ªí÷Í28zsÓ?ì—£½eX6ïZÿËNªÛ÷vå»ãõýê×IïzßÎ&þ-ýŸÝ»Îª:õ-~b¨ýëîMÏT4ªg³ùº/ 0z¡ÀQ;ó3Τ¹]6Ó¦^5G4|ýðѳ³ûé@S¾ôfø«!QKŸÑ`[Á4ú «fz¿ûrõо{¨ê§ºöÜÇøÇù¶Ó¦šÍ'M5ižšiy/žò“Ì®šÀ·­·»ëúqqЪï^1ñ-ó½Ãß0øïîÝݸêç‹7Þê¬Aý’‡ßoømÛL'Aÿ?tjÑGi”¼ÝwÏýÖ÷ÓÁû—}ØÂÿð²ïïßô–óäGó»îÜéäw‡‚=k´uÿfíßþåC3ö\w¢ÍO?íÕp¨ï³“rµåÁßÞÞú/ÿ³(ë`”½üpx”ÝžÓ}øÅýÔŒ : _>²ï¶À¡=w¾nî›e37tż}ÿæo£Më,¯•7wÕäîÞUêî^Vî~<©ÌLÀ™{¡™ýôö í<ÃÍød‡Ý{!ül´5¼g4óX¯Ç_ÎÃäUûA½:×îÓD8ÐdÙ/=R Áùój¸_ýn9ß,¶hOÉ{Üþ¡^ýöð+¨ïš)í{ß4hg›ø÷B7ŸÜ»UÌS?N?¹©ÛA½#IÒlwºKSõjôœ{«àŸ8ûéeMÜs?ï»}îê±OñÕ_6­ga3[GÃÞ=~¿ÎøÆÛ.«Ð‰^O{ô¼sßžûÒOI#ƒöνþzÓ®ÒéðêURù?žÜ7¯_߈›vÒ×ošÞø¿—~YžúÄQ?„¶¦—q×÷_~ýÕß~ÆÖî7ßýñ7ÝoR•z ÖðºgïÛeÿ†Ï÷/XçÝD|DãKt¯^¥Êšø½Ä¯Ž§ŠÓî`óa®m{8û äö¤q¿ù÷›O-ަ‹°ö<);Ô?Ù›ë¯N¼ãíè÷³_™øÝ/F7ŸŸ}êç7£_t«Üi÷úniøvûïA¯ŽÀoŸÛ9ØñÒ¾œ’ޤ]~œMÚã§&J›Î’OÞÂMKÞzmý²·¶·;õ¶ƒ¼úÕÍgŸíãÌÇ>Þýê]èØjü°—©÷öö'Òzówÿ4; ú‡?mNã:ö‹¨xÒ´ä=á@ˆa-ðžòq>UKGTîð${_©>Û¹…æÉeæÁú[\¦ÀIùÒO§¿¢C1=¼7Ô· [GÖ…Ì=Œ~½Ÿ ¶l Vg£Áq04»öÂí{À!•€w¸c1—aDË”ǃ‹Ñ$¹}ÅíÉ`:O¦Ùã³ét‘÷9N4Ìz:Un0€¾€¡§©÷½‡Uý¶q¦: JÎöL…ùxd­À ¨>õŒå‚OVt0 à«m .±Ôm¬J¹)a©€µ«! ÁîÛhôˆ8 mP¨ýÑ„ǶszÍL¤ÀÃUÛö÷&£ÊIÖ7ܬÑD(/½¡œ ¶¶;¤w@3D3F)v4 k ÓIÒsö× À„¶<È¡œQ¤í…Ã.ÔhIEøÉæ8„Çt·Lð -kµ–Bùa[­ô®6d³T®¼"trƒÀCE3fd˜@+ÏCІZ¼¨TÙ€¼íp9Á›ƒÝzŠó@<Êv¾œiZ –7«NË‘—Ó²¡MòfYÐc: C»ëU¸ÂÇYœ^/?U-?*¯!ÒG_ŠF²í|)!CoÚw†á Τج½åWx¬„ Ä×à±&» ð¸>^CYLÀÃò2Õ£®#sÛöÆ;›1tÖÑïº`µD Õ>!BðT-ÅÃù6²‡£<ƒ¦;-8ñ\étå£ Y =vû7cñ˜]²dWðyÜ_I³M€þZIšjIrIˆ„U‚õ çÂZJ€ïLnhUŠîk”kßåêaL ®jà±vωgDr\,[ª½<²ZÙЀÐíŽ@®ãÄ\Ë,Q¬Ç(«i D§‹xŽ3ò²ƒ‹£i]b`æ$[·^)bb>)þ?Úœ)y~VŠú©5¢§w€}Næ tåUé|À˜‡Ÿ’áqä,Ú4ɲhqO"6–´§Þ£TfQ’Ƴ ›<š€^º]÷™ý¾"M£Åü*J¡ªyoçJsWúâCô3غÙŮ⦗üÿþ7Û'æ.Rªîò¤ÀêÀ<Ž0ìi4€>9O²ãb0GñdÍâ«twE#ïz§Ã’l4ôK´n䯌ÎQ}¢=ˆ<ùútË´EâBƒ÷t¹$z0ž¼Ä{óý£^ÆW·w0Çý»'ÓÁìq×÷?²‹˜IÕ7„)zf æNvó3Ã?&b¨4á” |g©¤ð>’8†œYIôP+¬ëç)˜Éýqš%ßv–ŽÙ8-JBŠPìI„OQ& 40Å êƒËù=@ódñïÑdZt" º^¢`wFfK ÓÑÂéJºû€^̧“Ñÿ”8¼';q!¤{/æóéüÕ([<œŒÒGE“Ÿ°÷ñ§ùº÷Ô=ýz%¦ÏA´yò¾þÃ?ì+(àÞøè!ù2 „ÇlÈÏÈ£c7 öµ‚ß½›.â´­;ŽžMdz4Ñ'ÇÑÛˆ0¥´q»)¸”…®ÿVTú%Î.úG³Ë/Ιœè蛼‹ÇC%àé«8[¼™OÏ1ÛO1'½ç€ð!=ŽÜÿO.ãùÕqHéŸûò÷wÏ÷Sx# ‚¿Íɹo ÿýí«þÑÅb1ËúOžÄƒ´´”ºˆ½Átüº:^ÄOÞþrúô1²¬O-t üîPp’¤YÿïЍ¢H€o®9#àÆp)uìÚ7yç<è#9^Ý~úõt‘ÌúGÅ3ü:B5¯ó `«/Üë®UÉÉûBjïòPBð }±Aeq©¡NG˜÷Ø›OA„3`äÛQöGæúwItÜ{š;)c½ÿCè½»]új:Nçc ‡È»y<ÉFËèøù@ÁO_?ÇiÚ‡‘Ì„L¨>‹ÕÙ  ~•|YôLÌŒQ)8’dŠÀý_ޝB ÕCB0[ô,žå±ØGIV"ÒêñÕŠGî;í³|&,$y÷÷__Lp®–¿:oþåÕ×£?_ŽRäåf¦ ~|¾ò.?ËÍkxæ:ëm¢ÿÊ=˜8-‚_ç®È§o^â_¿óà«ÑÇdp5H“×ÅébÔçb– ÿ,Ùû1*¾ìäò—Éôód»&œ¾<€1v1]¸?1„þ³ÕY˜ŒüÐ á:NwlþKàÊùÅb(~“w—hšû³é»¿"yNó¬øñ,ž'˜Ï ­'Îo8Ìféh0£öš™æªoW’²âzFÛ`œÀÜ$¦0t@¨8_Œ0Öì,‹IðÄ Ïh˜[['÷fÎUB¶8ùi¥LÉã44ie$À ;úZM¹ù+)fí_§Ÿ’ñY2?ŽØq¤#S;¿žÔæ{´2*w®ÝØVzq9–ºïyâ ßL§i©Ã–í[­»o†¨‹À ¸L‡°º\€ø—‡ðyñ9I&Ñx4™®z5ëG¿M Ãà%ØOÅ› ×d4ƒŠ3°q*ÊßrÍòžÎ’$úÇtÁB6fÿpÞ·—óÅE‚»ô hmæ<¸E•' &äp€»ó ¼p5Ì×9ƒÇyôPÓc’5 ‹(/¥.øáü@”—îhü@·¥#6üû¾~ @¦~ ƒèà:ø~ ƒèàú>~ Q¾Ûpðü@ÿ~ Iu‹~ ªEO*]±öâ`Ç*£¼@9e2ðHÎŽÇ…B°µŸ4" RM«»{$íIÕ—×ǘ$#Ù Ë…¢ž ñ®Ór»–¡Ç†êCâTuÐÝx˜i,qÍ#kN¢¾d1xŽT^ÂâåÈFÒ;´HØ€¶XÖ;êã‘D·¤ `‰£kˆ¼8»œ ÁÞDIÒjI2w2Qzî¸rÌÈÆÂ7Èë€pÑÁÄõŒ&jàQ¼‘{…õ¥iˆgdq*4¬3=7¢\9aEKZb+&#r+)c~äÐ9íóê1\SËýx ã­ðkgÌ#Zü†x X(Gy3×›¤tzÝÁd„Çìzé0$r‡«SíŸÈ8*/²ëuÃ@l°ˆÓ^x±Y-¥j5ðÁÎ cbhýMQ†t@I«5³Ä?MycÖm è$Ë«% ³’bšJíi‰ÀXÍ¥Uh§%ÚD'/'Ìî·0×4áúÖ›dƒÖZM¤±¾Á å`ï~I¯;´ª™ØÛ0DV·B‚–Òpãi_P¢‘iùû¶‚S½ët¸ÕÝ]W¹RŒâ©ˆØu^ìdð½íéôs6¸HÆñz>dqŽÑscNZk§Jú0•¡éºÇh¨mVŽŸÇ7¦`±+Œ¾9ʱ`]Û->NdÃ<¼¡z<®e-לŸêr”™†ÉØÐRRÑÛ16Ä ”É }J†Ñ ž ’4ÊFç“8>_ŒÒ$úˆ`KwFUZqoFEºFe7Å#‘‚,QeÑÇbóe×êÙX!¼M²ËtDÀ¶õóƒCÿ°¦V‹C~ÃEõÒ•Jî×)9‡JqÉ=IºòVr~¿NÉIG±oKºµ¶Ò?2]NܰÍö<˜9ìÎvç[Þ²jNÁ¹™ø¿o‘Z³rÀÏñ Gk ¬¾œüg‹"wÒ3:BÅØs“ÏØ$ÙѦúWÄœ6ÇYÚHn÷òE,F6^¾Pöα}d8h›dð Ö=©)ææ*‹âóiÕÏ×ϓǮHv1JoÅ@æÔ¿í œÊ–N¹+ÚcfS°ƒ0Œœt#¯T `{ p’ÿpA1ràHµNo5(ç=AiO$æ¢3ÔT¬ +ÑÜÉ!0`10»aã®»ëö Òúè¥0÷Î ­/µy‡½ƒ´>2]>°v0H餵ImÖLOÍ)}3=ÎÄúÙIXæ|˜¨h™—+§w݆ž1y¯t~ÞjÒ‘²†t6'l\çç5 F5Ñ~d7b,tþAçï§ÎÏÉŠgÉ%÷“ZRÓªÎ'=É7¬\%ºö¸²BTçîÈËFöJç¢7ä^éü0éP¡;ÓùaÈ=èüƒÎß{FjÎU›:ŸéžPkT¾$}Šs#šÒŠD3ËrÔ*±?*…žâM-âEψ¼G{×ÒߨÒ¡¼#•¿ª‘Yø‚û‘±rxðƒÊ?¨ü=TùK²‚}Í«î­H-Z5ó^@"v½Îw;ø¨5µÑU@‹3Êì•ÎB«.{¯t~˜t”¡éü0dZóƒÎ?èü}×ùa¤†ºÚŒþÐc–3iÆët¾ó-qÌ\±Y•+_Ù_ 7ÒXíG/åýÒù¢0<7Ä/¥º³óó´Œs?2m[¸¿÷:ß‘UàùlSCYÛ¦kG Õ³–­·óo‰ÂŒOм\ùbÁ>èü0ôŒÓ{¥óóVs¦­R5¤cXg:¿@f,ÆŒ rÐù¿ï:?'«Tâ'µ ¢Í³šÂôYëϧ…J>䛾¬ÊI±Gw‡ÂÑ›{tTóZ:JŠ{a×å:óç/kÔŒÊk 3â ó:¿uþŠÔÊ:¤Ö¢ÍÈžŒ¢?Ÿ¯×ù…Ÿ^@ÐâÆå^Ùù+ô–SQ½b÷KçJG³Ît~qTZ•™¦ßÎAçï½ÎÏÉj@ç3ã'µá­ê| •¬ÍêD~fƒQuò¥(G­Ý/„æ={¯t~ tLw:¿@f´¡ÔŒ²ƒÐù{¯óImZÝõ¼ÇÔ;ß9—Œ!DsQ4/SÇ^éü0ôzÍ…²eŸ·Ú¦+Âj]K‘ÑÎt~Ì2ëGfg5:ÿu¾#«¥Lcæ%µ%Š´z%KöˆY§òYŸ¨!‚ ®E…Ò\–#šw?&å{”Ç3}9úMÛZV•r´Ó5Dkø!/òݼÈÁrdZUº1ã÷¹@6ëû:>G“cÀ6ˆ/´Ë¼6ÈÍE|;Ð ˜/Éx†ÿ¯ |³sûJ€ó>_×&³2þÚy)Æò!ïsƒyŸW&Hâï AhK±¸Ð=¡mÕ4¦¨âUgy–å$¿O¶ÿJ:0H‰òKG±®öu—5¢õ#ƒ…ÝÁö?Øþûmû/Éjµ±¼†2²T¶hûk+{Z²*­Y¨PûgeCÿ*Ëjh5«ÙýÓù’à q¿ù&Ë¡6:Ñù’2°‰m dêø ó /a±$ª.œ¯Èoy«ç7Uq]¡ó­æ„Ô°­"¤Õð¥²Ç-etÜRnöovª^›û7;Yì%þ9ÀÞ°û»˜ê#3ö0;f§avRRL£â'µå­ÎNVõè¦í*zœ©‰•n¿¼œ0=Šaþ•x´Ø”|eõe• Ô_™ÜœkWi˜ßÌ“O£ée–gÊ•A?ú x Oò\0§îñ×ÿL&¨Â‘C@ÕŸaêÄaŽÙ#çn€»l@}­ìqônºˆÓ>¦öY‘úXù6’ Fèï£NF‹.ˆQÐô—8»è=Ì._¼8grBN€D7õK<*O_Á°|3ŸžÏa|¯"Ï“A1Dp|àpX † ¦§¡ÌHvk¬<…*Ò½QmwÇ'Úor3!ïþ>râÕí§ ü’Yÿ¨x†_;ý÷:÷0¯¾p¯»¶NÞR{Pjð }±Aeq©¡NÇ ÷Ø›·ÕÑz~çz¿ÿC˜/w»Ô‘»t8fÞÍãI6º9¹à§¯Ÿã4í“/&dBõY¬Î±Pð«äË¢/1ŒQ)8’dÐø§ÕÈÁW=zH¾€fÒCB}ÏâY¾ƒ1J²‘V¯V”¦tMAwûÕ“¥“:ÂÐáYX¦B(°ˆ„V†GXfµA!%E\pÉ(¤$TGظ´• À ÀÀtjé°ä0Jc™øPh“ ˜).(€Žˆ¥µLE“Z†ÁH„‘ˆYX2TŠ„bDÁtéEaéhŒ•ˆyØjï„¢+ÙEe1Óý04¬L`CeÀ 1t0 #4.mf/ Pæ ¢(„Çt‰ †N¥0é!E`!eé0g(t :œ oþš’ŽÊˆABPk S*C@'êîi 8¦‹l€tõõÀ‡n¡¡‹Õ©úçl2v?ŸŒ¦ã•1·^Í'ãáeòmÍíèü”ý…}Ó_|ÆÛh“›EÓh|v6HþûŸÿþç«Í lö]‚ŒÒŸj|êÖS؃Oÿ «™A2Í¿¸¥ÀoÙì2¦¤½l1.LðÞ )Ö½¬lôÒù€m«ÒßÕÊäïqïÑ7Quý>ž ÷_Y×^VÇÖbüÅP`fkºÆ¢,¡ækÛœUo‡½ñ líQž ´êkÓî‹Ë…=«s¶®qÊì¢ÐÄÿéøt Vü|3êO­£{Ô¹„U>9+?î_ŒÏ/úEÌ’õ`w/h'y6åýQþ>ŸGÈN)Ÿ¡˜á2ÏÊ•,˧‹•ëÜGä-’†HüËÖ}åÙjX· ö?²|Û€ïÌ%ñ†ÿ¿X¶ïôQœH&={ùüê-ôSíCWÙÏu øc·ïÏÎëÐøj~ºÊ—ïóÑ÷õ†]#3Ü:˜Ã8OÍÅífd—ÆlQl¨óû}5ªvZ¥àÓ4……YµQ¶ûÒ?ÊÒ}¼k?üºÞ4»—†»Eºn32dÎú½ÎÏòe>æíóî÷Á§Þf €{ gŒ óÓþèôL÷Ùéíë³á¨/OÅHhyF8¢½ÏïöˆHø"6vØ™Ûä.ïÎL³õðÂì‚@Á ^í£»š=$˱ÙëÈ…Y6ií­¾_Î7‹mS{·é/²ÕwõO^f§ù¤Ýwoò—ãÙæcø;ÓÍ{÷5ÿM¿t:—ƒº’¨Ù®¹KcŠzл.•V€ç‘³1LÇA©éVî×¶Ø¢Ü ê¶Mñüÿ6cÂ|¶î »,ÞˆÀj‘ ó7 †¦·z=®è¹5ßjåîÚ)qÂe›³‘›å0½qƒë—xqxðà&„ þáN¾ý6!ÉØF ³Iÿ_ŽóUl‰=7„ ãÚXöýÓ×ÏŸ¼}nÆVùä§WÏì“X¦ÖØjª»V_%(«î5|]U°ÎíD¼%Æ‹tĶ5ò»ÈÏzÛSE³9˜\˜Ø“m §š@ÒFåžü=¹¢Øš.L ¼êPZ‹Ô¨=o¨ã]ïÅÌâú ,èNzÉ×{Ký:éØË[½]¾+êp‡¯kà‹r­Ý½µ»SÒVkWõ×7ÖÚíR#[/%WÖBÒAJÞ·:m›–öjj!¯%_}•ôŠü¶w¶:7Û^Tmrö@Õ¢f{"®7›d÷×Y3è_~-÷šqmÛEmñÄq ,a#~.5XO7'ó±\Úe‹'ÚúеÙö-4—™µõshq'æ£ÏŸP¬4ÃX‡<’@§‰ðÎò6IF°a@t‚Ç ¨¶0˜†DRÅH†º¶“ÒÝ#ùñÌ$¿hå˜ìŸÄ_ì ÜÛ¼ÎJI7ë  ÕÕÍAYp>¶²£ŸòóSbôgöS]o%×›õc¼®­/YãTù?%ðº=vëþwï½gùì²×ˆþ|Z!ÍØÁ§(ªpyØÛ­Ô›q>‹w>‹Žw>…XÝÉù,ìÁMô~wçS‹oÔùÔ¦¾/s>…$¢³ó) 9¶­‘ßE~ÖÛÒ©×q c:–?½&M½Ý­VGŠLÔW15/9,g(£*´4A¡*æŽl³„E‘óÐ"ÐС˜°Õ…4§DêPp Ð!ÆFtJ(¢”`<€ÂÐሡUè–'D§ +ª9§Þ²‚GU£8x@‹I“A#? b£«DD—PÒ‚$•xÍ`˜`H,5‰amá¤0¹ô¥‚øãÃ]ýtc{n°Ü`)Á’›;eü¾KÇbÒŽ#s0P`!•À@×î°Øu"\¥ƒÊe0H£ÕÒQtJá¯Pº$&¯%–¡‘æö™˜QmzD “”2„´lÌñ=ªÛ PXk„a@§ZÍ%×xÁPø…)ÞD“JNC]bèb00ŒÙEh VéOçÀki4+? C“šˆè“Á¯<ÕXJB´?ÙÑ Æ#P”ê“x`{ÞWræÏ9ãèH-øË‚_Ç.#@µðïŸgëüCv™-ÆýárÔ¯ü`_êkö´¯£¯9TÒMúšÃuÝD0l¸–/p2« ÛŽ¸kÛiejˆ¿‚k˜³SÇÖck´=0hîÄí,rÏÈÙÝNþK…­º[â"N‡f8ôç³Ée9Ïõ‹‰nÕ|¿|l…:4Rì‘”Ï7ƒ{óNýðLÔÚ©,j7P5iªÚ¢Ø]Ié‘öÐ$B€ù%­ÜÃÉãÇÉɳç/Ÿ¿}~’ü=™OFÎVI‰óÂ?J SRn”ì‰VñIÞžˆª-í”ë)÷&öEZ»/Ò¢èØ}‘0³;ì‹´(ìÁM  ®û"­JŒÛ¹zâFÊ·w;ž’!¨¶ù´$å&±¦Ãú(Rd:nœ´iAlÓ#¿‹ÕÑQ»&Ò°T“0G_ÌÎæö‡Ø6Øpÿk£M¯Å±ÖJþÞéÆäÉ«ÉSÛÕI¹é›<»êl{óPaM%§—Éú"O즷/zõ»ùòt<‚1Ò>r±±•ùGPµ®‰'Ù굃ε›ShbnîÒ2–â|o+ß­/ƫęL¡ÉEfnAš¯/’^ H°ô ßµieœTÄZw ‚óÑçÆO8–šI8œméT«< ×=C¥G[ùC€cý 6`Ù˜há¹”)ÅR˜{Øü» –Ž«7™ 3Ã$vH›\ކ‰f¨p6W`yQ¬¸ @%‡—uÔDc)Ì£^–Åä:$h¼Cé°šC”$ÃÒµK> Ë'‡1¨15xQ:U¿1µ= ÎëI%žRKì5¬¤+¤ƒÒ‰Ðæ°£#QÃD„»Äz4ÃÌÎçèXÇO™JƒHcÿµŠŽÓÁ(Õ'óÀP¦Ã9Uy£ “1]¢ÃÌP)‡ºL<  :)""édØé¥  It@cX:¥#œ·A.Asùá@3š,µ'݃™hkÉ%H¨†£c,"ÐSÒ ‘ÁˆÉI%¨ŽŽ¨ˆ±*+ŸƒöÁ0—rHÅüN±‚ŽF8Åd©>÷[ŸÌLÛ– ˜{Õ§£S(–"œRÓÓ¶äþsô]Ìi~>ÈQ”ΕFmPĘŸR…3fýù˜ÿ•*ŽN‘˜a¶?¡ta¢^ óçptDDhqUêO©¼08¢0øÝ7&åšs¨Lr~ÿfAGŽr͹VsF“s‡czg®9W£`æŒF dêèš;ºæß5ç¤ÕLŸ~ÃÜÑINnÓ5ÇI –`³kŽ ãY1·ÃûÏh9:c¢©ðñ=(] Á)RP£ž9E›¸ ¥„?ëyA'#<0Šo7cÒãqÿ•&ŽŽÅœeT,¼•ÌÌ% J››ü急S<âÄš ; t&` ¡  £<檛°ÓcãfŠŒfGç;7ǶPäÃ|üÆ÷0› M¶½ñù fÒã Ì®ÙØÎa&Ÿ™i¿c¨*|רöÞ]É*Q­<—e»b¸ ’áÕíU`\^ç«ÍdÌÏlÛ ³züËo µš-X*‚½éUXŽŽðÃÚÆv¨8ÓÈÓ…£cú~]»çZ úøõoAÇîîÚ=W£R˜rF&kg£lesÎêh*MåÛ5•­°j¬”ö=.èømîbkDS©X“©Lݵì!ì»°¤Cµûþx_¢ÒyÝ—t0/ß#_´šbP ¾ žK:¤Ùéü²FbÒõð02‚ÅQçuþaëüRX袪”Ru›îlÒ¾€7mÒù6t PXŽû€:ºÃò‰wF/Õ½Òùw¤†ù.ÌVóÑݶÎw5 Dåad[´GÔù‡©ó°Jª:¿~¥nQçSMR¤p³ÏÜåö’áÕšŽ˜ïPQFÇaô„ó{¥ó î(…|gÖ*:.îLç»™–Þ›]-Æï¨ó:ÿ@u¾V®°T*,Ô¼!(ãu¾Ð©¦Í*ßú–(cðùp::ŠÐA©|‡Š3Áˆ £¿_¡/w¸Äm¸Ãk™7o[å»%’Äïtrt‚±£Ê?ªüCWù…P›ÓM4,Ô’éÛŒ|‘8%¢Qç3ç[2'Ò0ò,ÿYé«â‡¤óKô&hN² zFºG:¿â3!aîPzW[¸eœïé¿’Ž¡£;ÿ¨ó\çWB âê»áJøÕ­ºóAŸ3¬§M:ßù–'Ò—¿§ CZ‘ƒÒùÐcDɽÒù¥;_¡[pG¡;ÓùÝa‚:ÿ¨ó]çwj‚os —P•2©šíüÒψöjͯËÎ/ÑT ¡Âè‰÷JçÎpÌAaîPIîLçMà‚ïpÕ&:ÿ¨ó]ç;aȸSÃBÍÅíná‚¥/ñ¯S¸¦X?PK§ðn€»&»:¿HÏ?É“·U¤_ ŽêÚ *@æ|©ÔªVÖÒVì+¸gš¢Ažì°õC¾4ºuJwr ?Lòê÷ùè›ÄÓ™€ÖHÖ:!SŽS”‰¼Â4ö$[¬ OƳažìD#ñ/SÄ”:(Ï TµL¿¸¬AvXƒG ,gI±f1oUÜ/'`ÁB¸T-¸£v¸£pƒØ,ΗÙ(·=?ü¢Rê0,j&Öª¶c‡“¥xdï ƒ ¸á 1÷(ê‡&™ò㓟®¾|ê€÷#à\ŽÁp8I\æèÇ'{Þä들È[ýøäÙx…~ØKD€‹äý8ÛÑBóòód±„!<[¯ÊV¬Òä••¨x d5ÒÓül ÀÎx¶JÒ“G_ÌGr‡¶Ÿ«‘SÁu dµ}½k=Œþ-)ú£BR°ö±qûjÇXi‰ZÔ^;P“òõc¡?j‚õ,·D£Wóù¤&Jeû*3Í@ÉÌò>Tv6>OVóÍdŒ_ƒ`”Gð{ý!ÏgÉt<›Wò¶$?Î@” ¹MoKJŠ’P±Ë?* |“,œ ®òaH!ÖÂr¢âwtTù•ÏTÑÂìeMªôæö 5TÂy³Îç.k·¤{㎔‡¥ó*Í£WœÞ+_pGrªZp§žIú¶u>w»ÞRc¬‚È0ª]3~ÔùG :ß +¦ QjŒnÓ̧ÌäoRùÜÅ=a¥1µPÑÕ@þx•_¢Òy7Ð+ô÷j º+w4¹«ÌUZ+%[ ;šùG•è*Ÿ_%×Ѿ|€¿Íe„1sÇ@³Î·qODRF|ñQ%8¨ q%*…©öMªZIнÒù¹#îNçÈeŠ„‘)t ;:êüƒ×ùNX5ÖØç%¯„Zߦk,šu>£)˜H+¯-èè0Û«1¹O³ñzoâÓª2…/¼¡ªLí½mQHö[òj™¿Ï7+û²Pƒä+ÃÀ7ðÄåa}cºº¸y¢ú]6ž˜a5Ï—v€|“<›Ïòú›äí|M@¥`] 1ô=èë„l861£F@ ÍMÞ@\F!¦ÿ“­.½þp±yþüœðz B´­_²éH0xú†å«åüÜÜÛÝVž@éÞ¨¶ÝñiìƒWÎLpÝ?02ñòúSP~ùbÐ+ž™×Vÿýàvù«¶¸+‹àñ¯×~í%cÃ5xpûlƒÊ²Z/BV‚~5½y]5Ë·Óûƒ?…ù²Û¥V¸‹AgÆÌÛe6[·'óëÓ‡l2 CÂxŽåi&N‡ðUþq=`)B0gÔÉpððoÕÈ1E=zˆ>2„å!sñ|ò4[¸(’q¾ª Rõø²’#ûX»ˆ´ûý§ç31ª¿qÞÿåå§Þ?6㉑K˜¤g«ù$7?Ÿå‹Éür ¦ÖS'bžÙÎzŸÃ̾¼´fV­˜Ÿ?¸¨’'¯^˜¿Ê@——ã³|x9œä?d3øviÞM3КëÅ$ÚŠ®æ—Uf4áª|ùßÙüÃ,® o^¼™e‹ÕÅ|mÿœÌ7#hÁz9ŸLòe †{Zš ö ›ÿdåübÝÀŠÎGùÛÉ«b̾w?áí½7ð—ùyš-ói¾v Ãìül†Ùb2Ž×“Ë+ñðÔ©¾Ïû§XXk¶˜b‰ÜëÎׄÿ–d‹Åär8…€Áq^Í£ SÛκy}bL²5ü[çÓÅ:AÍð(•H3‰Bð(Ìn¸û½AåµAûî&ì BF\¡TÞ!¹ÓˆL‚(˜âw&À-®p©)Ò’â©ޏKƒ.Lp…+F´Ò<(³@Ç#®Ò`]á00"‚†0Ð1qÇmà [8FLbÉhˆ@'bdB´é¬…¦Ú{ƒhAÇ¥Œ¸MU†.6áÅ IµR~V¸’”3ª@/¹f:Œ^ÒûåŒ*Z­¨@m¸#ïÎõÿì]ër·’þí·˜âØN¬ îV¹NùØNÖµNâ²lUWyDޤÙðvx‘­Jù]γœ'Ûn`fDIÃÁº,ÑL‘==F£Ñhøñ>9JÃÈÌuÏ=ï¯öÚG£î %Êí2.©kµå·Ò,…!½!%arÕ¡º}×éÀ[ 6âê¤ÐuâÒ]f‰‚w¥Ú=3Os…¶ ÏÂ*¥F*”!@§U„((é‚‚YÎ$^Å@Áðx`„Hiè6ñ†¥BÓ0 f"ü"Z_¶Ö†üuŠ8x…0–«½ •·ÌÕ Á$F3DÍ,’ÅfÐIá´SöÚ·B¡h„×NUèR^‰÷ÚáÙ% ‹îvH‡pEÀ¨|æLQDÈ€Ó‰tZÛa˜°bƒ –׆© ¤‹±žÔvAÁ--Ad·Üˆ˜Õ5é‚BÀ5’å@'c®ªd4Zz0s¿œ–Ï€ 9\å¸tGGýä?ÿþÏ¿¿ZM`å°r ¡aúóÚn×döà§Ãÿ…5U?g³ßü‚äC69‹áô —ÍŠr!Ðë'åꦗUžûH´ëUú‡Yà²à”ö?‰jëb2„Vü¿ª­¢Žm£ÖÀ0s-]QrÓ¥ë΢wE€½bÿ0OËåNZ¿kì÷ÉÙÌ·>Z®I ÷rhÒþhq8†µÄt5<»pûpk‹|tT=|pRŸ”e \}{v@;Ê&Óa~0ÌOó†c®pùló<«Ö›‹e6ž­Qù·C{Ot_P¬óàÚ>¯u1Y*ñˆ>vû®ßùhˆòÿ͉ýÊ;ŠSɤwäø¾‡÷´ö oì—u øãê»?êûØGãOÓÃE>?͇߯wìJë¨Ç²ŸŸäƒ?p3æ* n””bX—÷i=ê~–V¥àÓ4…‡wEXk£ìêí£,Ý$»îÃoûÇ›†àö\‚Ãð*KÿÚP‡°^ÃÛü(Ÿç“AÞ]0ÿèÿÙ[¸gh&Ø ?<ÙqxÄìÑ`x Õf×#î@ïËÇ j>Ë/ìÈoµ—†wƒdÆÙrp‚û— P0¨›èÎ'Â’y{„[Ja’A[{G‹ïçÓÕ¬DÛÔß‹ô'Ùâ»õG^g‡ù¨Ûsïò×Ådõ9ü¾æ ƒûRé  }WƒºR’¨Ù®ù•ưz묵U€ï#gc˜ŽƒZ³ßoÛ’o õ¢Oñò_«´0Ÿ,·†]±GX̲AþìÀßVÃ[c=uîÛß«~Jœ2o¬’±šò·+?¸~‹W‡nB©à(wòí· K —1ÈF ü?/òE,ÇžB¥…ñ}¬Þýó·/Ÿ½‰c«úæç7/Ü7±B]+6w©½ZQÛ·ðMÝÀb0uñ5þ:vÐ=xÛ×Èç"ë]œ*šÝÁüó 3`.z8õ’6÷äÉ9Å…éÓ=à§(¥óH}šÜˆ6>ö^M> Á‚îa/ùf#×o’ÞCWºfäšwKÃe¾ Ïø’¯s°·ïíÕ)éBoë?ßXo/rìm¼–œ{ ÉZò¤õÈ[Û´òWSyñ8ùꫤW®à/úxG‹c|±ƒ“ºOÞ¨{ÔìOĽÍ&Ýý}Ò ú·ßKĽf\ý¢®x⤞0*D»”¼§›ÓùX)]Pe‡'ÚûŠõÙ6-4—™këçÐâ2NÌC_š±Œ!i(: tŒ›ˆP`½…Ñ$¶)åZ¡[¯ŽŽ­¶ß>øÏb‚È:…Kâk†[º·eh0Äé&Cƒá¶¶ –’ml!ü‹RòWŽîƒ^§ —‹ ¼][Ù5ŠÆ›ò½ˆw=l÷ú?žö{/òÉY¯ýG“–±s4)Ȫ дˆw;®7öéÀ>6ìÓulØ',ê-Â>˜=¸‰·¿mاÇ ûtkï:aŸ°Flöé9¶¯‘ÏE>Ö»`S/ âÎt¬|zM–úâkuÖ8Re¢žŠyèKã#L­Œ :nXÌa ¦Ã™6åÜm”¤H•¢cº ‚ n8W@ÇŽ@a» BÃ,×RHÃ#…xºµ9EG’”<éÃU낸¤ã1G£hð€p×\kÉ8Ñ@§t„bpÖA4eØs+Ûax:q&ˆópØ[§´=ÙÓ1Í"¤!:Hƒ¥ì…Ъ=ÅÑÓ)£áì-à €=³2€è´ˆÈ!ã*˜\(9š\ÃQ­(ŽIëãáÔWàÎŒ¡šQF(ŽŠY„í'pS „¶ÊP7 ¸ÜvA!ÐdhÆBoé:ÕKoDð) d¢¶B‰0.F= æìo‰ÂÄœ-­íg‹(kE%—íúé褌0ã"œü Ü%w‰êŒP å(*óÉZaX˜1™B`];åz½´ÓÂW¨þÇÙ2ÿ”e³â`0Ôq°kÆšÛú·]¬9ÈécÍÚº4Ô­\#È<¨ $\ Ä]ÚȪJCüBÃXÜì“G÷É£ÑqôÀ ¹“°w冑su#÷o•0êo®ŠËõàp8˜NFgÕŽT™í6N:õ ¶ë‘ÏÅZè|YÌñ«Ìä ÜÑW“£©û€Û¥ùmþ­é¥ Ò5Îß{Û˜<{ó*yî^uRmú&/Î_¶»ý±ô¦’ódy’'nÓÛ”7úÝt~X aŒtÏlìeþL­ïâÃlíºAçÚÕ!t1Ç+‘ÑSœnìåÇåI±He2†>&'ÞD9]ž$½KðôP†»ô2N+b½‚;LB ‰y¨aLJ—ÕÇ, Ä÷•»`¢Šh›Í!‰ÑYb˜ ¢@:?ªCäRâîÕ–s€t&¶›1±R[)è¥ ø_ŽŽ(z›11ËS)EsL XÒéÖ Ûû1†uT¦Òž»h7ÞžŽD,§ mâVì-cʲ0 süÄðÀífÀ]¥Äh)¬0í¾‘£:效ÐV2°Çë`e ­j× OG#â@&tî Ö1T‹À+qtFD$˜pÐ7œÀ0iEáéÚÖÙâŠ|§0¾Ùd€uîŠã ̤ŸNŠÌ®Yáæ0,ƒ‡3íµQµ\ƒ'.øwŠjã©O)H…j±ñ²ì­›Ûxw„T „·ùb5Z&Ó#×·~lOûÐÐ*îeXB‘ö•­£ÓvÇ|å½’Ë zCÖÔù>øÊ[JGÛ;ó•}‹T¦t] dDùÊxÎjï*ï]åÛu•½²2\Ú°R3v«·îI ­š]eíxàµ-éÖ*Pî‚Í÷¨8Ó’èè¼W6¿”ŽT¤}S¾¤[Ë›¼m›ï[‚›öÐ’nm‹|oó÷6Gm¾WVÉ0#;¬Ô’ð[½iU¥Šèf›ïr–ˆ¥ ?­6ßÓ™µ÷]°ù%zÉ4Óaô–Þ/?ßç£Ü2¤³V†æ¶m~™uL±O]ß7ÝÛü½ÍßQ›_&£n+5Uúm>Ç #…m²ùºŠÝK¦HËäTÑ!wÈæW¨˜Ð¢m´î%ã÷Èæo+FÔÙüªE.­lK¢ª‘­Eý÷6oówÒæWÊ ËRF;˜R!ou”é”PÚló]pI‚é㲨§SÒì”Íß½fô^Ù|ßkømË®¥£åÙüük‹(ž÷`oó÷6çm¾WV‹É„•Ú6…Ln.¶cxj•i¶ùœ§¸{g¸ä­£ÏÓ{¨¡ìªÍ_ÍŽçÙ0Ǜź`¼.,cÏ·ךFx–¡µ*¿r5åÀ”°¬³¨a±§>ò¹ZªÝO€s^€Íx˜øÊ"On x—/&e]“§_ `†òpEæ@³ä´È®¼Åiõx2›ç§`?U/iòÆi34<²5ÒÃüh:ϱ3`èVÐHúðÚ¯—J½SSúvè-ç›´€*ú!)ßY=I•Оz›ríÖÕíèàO¥ïqUùê_~œ.מ[SÀV£eF¸~a•#³H'ÓÕhô4©<„ÏËOy>IÆÅdZ¿ßE;á|]‹ºvœê2º£â(œ FyYôq¾%’äÕçòlY@Wk ¼š€>@Žýªéwùü´äå…á2šºZ:ùÕã ‘R›’~Å:”ù|Ë^$ãìs1^“Åj6›ºóÓó7çž$0^xTK˜£Æùr~öíb C«.}W‰)=•)è¤Æõ€ ×àdÆYaú$ùTŒFÎ_›ƒ‡D Øe1ºlç-4ýëµqkqK~’à©Öl³›Ô£"æ®-À)ÖêXó!yvا|Ž3â¦ÊÑ|pŽzÐîÔÈl}³3pÑh€þk©>°Ò-ƒ§×î Ö·jšŒL%ç7¡'†™Û ü‘TeßRAw.4Ô½!„Þ»eËÒwë9oLïOoì=ç¿„çÜ]©©¸ÍL^NX*ĆÝÁR+¸µÜˆÖu«££vm‡ÿ’]`’H~œ.‹#_dNüÏý`|ωa¸óf·÷Ê»‡†¥÷/·º&|FÕÆCià»~ؘʈ0¹@)—HžTçNZ,³õ€WkF—§#’íÔ\¾zJͽšË˳HÐïöm¨R:VÜÙ\^ž² ê4Œlý†Ýý\¾ŸËwt./1£ÆÛ+¿±·9—K–ÊMÚþD—àQ´:%Q;eóKT 7v@/í½²ù¾×œMÃÒìîl¾oQr!˜è€ÌìOåìmþÎÛüR©±Þ +µTò–³õ`Ø7Ù|ãSÄ9኶ÝóSÒ1»S~þÖèí}²ùµt´Öm§rj:yWÚU‹XÛ»­nbÌîOßïmþŽÛüZ©­l=jVÓ©Û¼°ƒ1–r%›m>ó盕b¬uô1ïqÙݲù=5„´Uî8凉W6;éP¡îÌæû™–º­ä`MG÷N{›¿ó6ß+«àÚ’°)ÅB‰·hó-á)±¢Ùæ‹Ò¢í…šŽÐ²ù«7¬zÍï•Í/¥£¬m«0_Ó‰»óó}‹LZÓv·MMG÷'1÷6çm¾WVæH›°R7†Éo.žOeªšOb\<†¶/HJ:²[~þ–èåýŠíø^S©5%aéPÊïÌæûᬲµÐçÞæïmþŽÚüR©”T„•š }«Õteª›++Z¼ÙI M1ªÅã*é”å- MX'>+–‹Ä×)ÅÛJýÕ™WB+->$oæùi1]-|ê—7ýä+à;øÆ×¬ç¾þóû|‚&uTõ»¬á0‡–§s7@ž$/¦“¼¯•}’¼Ÿ.³˜jk%†w½ÍAÉž§ï£-œËˆKÅ(Õô¿²ÅI¿w0˜­^¾Ôò®-%®‹ áq(uܨd‘«Xi0T…‹…·\u'‚9.—²ÈcªðN''b ¢Ýî¸=Ù"×”2¼kÕE„¦DyÜ,wŽä1&J‚HpyÜÇ*ÛLGiNAÓ]tœ×=Ò®Kà4Þ²ª"®eq³dZ8âq'lu3mƒª( Ð’(n– ûYýšŠw¥žÐoéè»âÓšêÜÑ_êè»ì‘!5qSV¿Qîæ2Ö›˜Ø×+¥•sØå…[ª9ùœgßLÇ}àÁ˜‡-Ì®(wŠpÔÃÑî]˜Ãî ΪfÒ¥TEw"HÔ† )*¨ÐÒA…g‚p*J[™F×D†¦ÂƒŒêbÚÃTˆ>}€ÂmVËɘx˜ˆ¤´–™¼2”Œ}ÈànÉ vb.#.…ƒ øƒÅ>S"ÜV{'*8ó0˜Ie1³ÃdHÐ&Œk³fù´pÔgJJ›¹‘ŠÓ/…æÄAÀÕó4ÛS»CÂ䊫…úT}ØüÁq´™ŽÆ:œFm¨ àEI{ƒ €«ç!µ§‚¸‰B©Óhé2G¸‡\PÚ†t3:ØÔA,çÈgPÖŠ úBI-\T.HÜ=öC«`m"CE”ã!wpÌÇ»¥¢ 3”àT!\â p¤v•¦‰f#ëhèÂûÁžM&™ýõÁxš-ÑÜz>›d£‹à~-øi£¥½¹yÓ;/Ú€ñ6^§è4³ÓÓAðŸÿçßß­s°Ùׯ÷_×þøØm<ˆìÖ³á¿À›Ódþƺ¿%ù…¦[a2Ï <…_&å 6mFÕÿ=^¢A~N»÷¼úú=ËÇЋý)û:Èjß^0j ÓÓ%…àBÍVf8Ëp‡a–ƒ­=Nû…£Ñ¯æÇ}v17µÇOW5NáY47͆S°âgëqQ¬yÜÃ2œ–{XV¹WÔÜ2qôîèv’öòÙ8íÓót‚,ŸÍh‘&¥§nùt^ƒ²³Cz}©'X}Ôô½‰¯4øí’ß!wÍ9‚ÀOX$ùÿư}gŽüD2MñÁå+˜§ZCÛÙ¯uøcwîO6ê°÷ÕlˆÕœÓñÏõ]Cn°üøC¬Ü‡‡1»0xPR°¡ÎïójTã,´JAßï÷¡ñ׬Ú*Û}Ù¼Êú‡x×~ùuo¾o vÇâ\†»(í´¡ áÝ/ÒÓt‘棴=cN~| ׯ$át”{ãá©îñá)ëéÓѸ§†r,µ:¥`ø„ŸNˆHø<í°S{Ô^(Þœ™&«Ñž_‚@Á¢^‚Ûl„ ‹ Ï;r!O¦ ­áéòçÅl=/¨Ý7Þmø³dùS½É“d˜NÚµ{™>Éòõw;œæ‹ûRü¦*_.êRH¼v»ýSêƒêVxY *­Ï=wcØŽRÓ ïm·"uÛ¦8þ¿uR˜æ«Îd—èQ–ód”¾=0ÂÙÚ3ë~¨gÆ|«áݵSü„pcYýõb”¾XÛÅõÆ_nݺ¡‚îàþý€™Éƒ%“þ_déÒch—P¡aì˹øâøÁ«c\[å“×Ï™'¾L­±»»Ô_%(Ëî=üPu°ÍÌF¼%Æßû.º[·|ÇêÙγY¸½Uì7ÓsÌ€Ù¶pª ¤¿W¹ 6[Û¦{À«„ÒX¤6MîxO'áãÜÐðpèn‡Á±þ„·Í]Ó½q OŠ>ìu35â ¼ÆÀî>ÚÝ-ik´Ëúëkí6VÏÑúKÉÆZ:HÉ pëŽÕ¶ýÒ^í’—wƒï¾ ƒ߶ñN—ïpb{£³jLÖ¨F´ßžð›Í}²û6ßOô›·Åá~º¶í¢¶ôøq ,aˆf.í±ž®Oæ}¹´%ʆoëË×f;ähîu3kþ³Ë¹ô!ǧѧ½MâHF'Ü‘Œ#¡}ŽØhu„Ñ$Ö ãhâ"àDíƒßî±À³o,jì à+‡¯sh°Óõ†]}u œ÷íì&Bø_‹’èÏ!¼ zý•‚^&‚ð¢æÙíeUå¿XÂïz0ê6ý'çƒðQš_„{©ÿ Ñ$—fìMr *4 ìí†õzÂ>Nôþa'jÿ°‹ÕÂ>Nd·®cö»‡}Z`¼Ö°O›þ®öqIDç°›dß±z¶ólnéÔK‹¸…1íËŸpŸ¦ÞžV£=EÆ«•O£}îWÜ„`Ji©ýGcꑢܙqŸŽ÷"DÚAÀELxP·¡‚ EbÎcé ‚ K£¨nCB(Q/.òHÑaUêVCŠŽîÓˆI)]¹[NødÓ1âþ@K÷A:©Šˆ#_ÉÂ)aÔÍ Ì0‹c)ó• 8æ‘íɘ;HèÌx¤Hó—bŽ3ypƒ·àÞ/ÇI¬uó·bŽÁìypýØMšzL9wPaÒÔ=r_™t&vP¹ $´ù[ '¥ò˜wê+¥(wš°(jž{͈[v ¢HD•ƒ „‹ -à¨GL[”1mz¸f"5ÕL¤†µÒÌ W¿µ²}áÆ2¨ÍN ðdž ‚µÔÉ0p\øˆFu&x¸8PqŸ‘lÖöVëøë*XH‹»#×1sq·³ü¦ Ü ¸¹ÊPÇ¿\ÁB{]™Œ‰K;ÊdtS°ð¦`á×_°°›T«Ï[°“¾"zÁB @ ΙÔÄ¥ô1-Fy$ wÝ`À j®PU*ŽsŸÐm63“D5×ÍT8ª”G†ˆi[€]°šÁ4b*ŽÄ>3âÎì kuÄds}' §”‡?%K£™7N‰øíšlN—2pŠrCQ·[Lj)ýÅ…lΗ*à"ä1IÆ;£}0p”Ôš5K†“±Ç*‘Ì-Ÿüx˜ʸtPpŠ*¸»®'ãX.BÙhž„çá\Jw"Ã:®œK&bæ Ba’™‡§-¥{JL4FZ6ç6Z8Â<<\©Üâ)Ð=!ŠqÖèáp>É·²TŸ¼‘ ‰^ øÐÄA†©ýé‘Ï'uf`ˆqÞ|™†…×Ï£¾;èÀ$fžv"›yaà"/*H*lU Ö*wPpLz,UUjOÖ@†Æ¤EIiN”·p§°bn#C£Øiófg¿€#êSU1‡Ã*ƒG8ã¨4T£â²pœyìïªTŸ²‰ X€ø.ÍŠËÀáÜyP!Ý%5;‘eó=ÈpÈÑ‘ æQýVÅî‚ij­¥”7¯V'•ôПÊmrcÃPN`km¤ÂÀ‘ÈçRªÃ«•³>˜°wÉ0p`Žoü¸Ù"5± ×’Mí¥SO“y°ñܦöZ©Þȼ«ÒPîã©Vo1LF½ùböábOf)ÞòT"üØxs„¹dOÓU2À°VùGGD=<ØúG–ˈ0¼× Ã;ªAuEeˆyðüq/ð$f Ã9éŠãàéš³‘͘©¦dó§ÏìîKz°‚>È~)“Qª7?¡S˜½Xxëe:9}’å¿ïé²ûÌÙxýøÑ>d£L؈SÑã:Š{<%½XÀ&š&êÌ÷Sá%O½z£<í¯Ä´Ž‘£[äEh#Z¿9¬]Ÿº3öÈgè/ç7×ð¢ëMÄ1XÖò[ ì@ 8_¿z~òaQ›î½Öþ­Gé&\zsžM®„óç¬Üçé"›_â{ãåÀ絪•gÛù¶ƒàãÇ-íkõÈvF/jäÀnáö-‡ðõjíêY|í’r˼OŸü”åÉ$ûºøN™ï§õŒ{L7¡Úzvœ¯>›ô-¯FA 3ªÃ9³÷¼p?›otChO&<1]qË.°X½Q$¸… håq=ˆdM{øõðS•ÄtVÉûWÁõ+1Á'˜!nóûAÿlåûï$ø›Œm µIÇl)/:>Ýû/ ã5ÙBY ŽôÏ!‹ø•ˆŸÓl’ö/’éeïÓ§“/'l^ý%Eì¹[zm&§Óõ ë0¼wþG úöçc¸;6‡r¶Èþ°2ÿ6‚2]øÁjµÈ†ëlÇø86Ÿ,¡[bŸ!pxàû'u?Âf¹¸°4ú ðÓÞ&2’„Ò=å8Ž·}u€‹.%"_‚¯¾ÎÏ’|tqQ‚;ßÛaôÍ/Í9¨k†²·£W‹uŠ‚k-šç³Ùd‰¶§c0ÂÌÿh ¿ c ì~ ÓQ²^¦ƒbPã5öjF_¢yjû³G‰ç xƒ{ZúaÄ'ã ¼Ãf”¦cìÉ"Úit›q½ùlT$ã‹{ðßjq=öqs›iÜ™#pÉ”A€õX€ìU2äžm‡»ðÚÌàïyQÜ} ´Dû—Ù8 `ÆNé|¶X–†ë<ýv<:³¼7†ÍÒ˜li°)F$ï’,_"¡9LŠE¨Í.61&¾Œ$ìÄ|4FIM l¤ÒH§ †7`kN³¥ù®Ý( Œ€K{ÿ$ÄR“ƒ^9°¸‡òγ<ûlV´e`[(´ ¬¦ ÂËÏæ‚ ûñQD4¦öX¸zó×àïÔÇœ6“UŽòÛú€¦#wdôÅü=Û£dœ6§ –”íõ÷b‚W­ £KÍK»‡šã½¾û'àç¨8úg›ÚÍÞêõS_fµ8Nòï é+DÀ`¼{'ú0ë/ G4ßäÑ,OJ‚÷òÊèOܪ*ïT'hÎx†9Ê&¨QÓ‚¤à'ï\x…(ý=Yž ÂÞh¾>>~GEÁDoË]2KOŸ€ž|¾˜™ê^5ߊŽÓÿ$ù:Y€ê·ÎTõϼ|ýêá]ôbV0ƒoÆÞõãpjŸÛ.öEŽ'—Ÿ‚“œÎañ _?ù©-P½0è6ªäèmÁµ·¡Uððàó³ :Kj³}y‹³yÙmݺ1& —îk}ð§Ð{»SjÅÃ%òj‘äËl;arÞ'“ÉV2å"%j˜Èá(áZ½5àQSJg($#PU4Q¡ àQã(‚ Áò¬X–.k‚T=¾¨äȼÃ4¸­4€Ýö¡0®¿˜³õ-/>†?®³ Ê%¦£-g“}”Î'³‹)èhk"â33Y/lݶ 󠨿öäƒçñ¯ÒÎ~’¦£‹Ñ$-Ž3CSŸ Ôçj>IF¦£ZEöß2¾ü#Ÿ½Ïý†ðòñË<™/Ïf+ó'ÞO#X-f“Iº¨‘a߀VÀÔŽdrÅá?Yyw¶Úà ´q_­Ñ‹w1æÐ;<ª —¶^=þ:L)zOÚ±ó.³ù$e«ÉÅF<<µªï  ãXÇܽ ËÚ·ä7a×›°ë×vEiUøNº¹¬ ÂIÅÕŽKºëÒ¬çÆ·2®[ wæªd ¶±Æk]#yš¢1^<2 ý`[/8µÁ‡;X|çèöëMËmåð è\d°oüÑí/ÓÕí ¨sttûQ¶dÈStx0γdÇᛕ̓ù"=Ý»,G±ìÏ@ÇS«SpßSãÔgùC·ï^™ò ‰½ôªK/¦4ܬ°_¹w->Ë,–ûËîôUo~™­jíjSøt=Ye vÂ]˶Úõdl‚Fô$x ¿¯Þ§iL³|VMñrÐ9ß;Ýz¯BqñÏr$@93Õà ޠÀ‹Q.[V¬ŒÜ æV—iœÌF8®Át4?±ñ£õbutŽa_ΰe>6™#,åeEÔzeTŹ'¥©P”’]tåÇãwZ°Maº+)cŒ‹ _¦‹ó¬Ê–†E¶-VèÚÆW­VôðyŸè~„ìÅê¶é¢ãÌ÷`B>dÓõ4X®ç„A<{ø|nÄKKUoÈ•÷—` ­ª‚š%›úç¢ëRIA¸âŒa$È‹¿E—ŸÔ¨"}±¡J–T]]}I"vw'¾çP°ãöÜ!\ö£˜ï?ìHd­~WÓ{¶Ð*r\D”—׿‘þïâ6ç/ï“ZïÆnU`¶®óßÑ;±©à©Û7gOhˆ¢•6÷•™S ^U (Æoöø·àÁcÞ§ Ü G°%M.Lè>ƒm—96_&X›—ÃÄA:oYžå£4¸Dº0öOD1™ ¾`—ÃûW`­¦D5ûûDÔkúƒ;Lõ%%× §ñ¾J×wÂŽ7fá=ùÌ‚¼$Ÿ5zÖèY£gž5zÖè14z3 »ÅvëöñÎoÖèY£ß Fb« ’Òδší8P£k#¤Kzé…¦TÁ-éÅY¶ã¯ŸšŒ¾">–¾1EX¡W Y}÷Ñ4ïz~¹Ó]kÆ»¢?¹ jª31Ô.„὜łl[Ó”•š5Ôr ¸·³A¿aµ¤†pŽðŽù+Ý(‰´èfÌ‚QÖ7Š öP%HYÂÂLùEÅ…Õ ¬q¬,fra&fra&fra&fÆfÊðªÝ@3þ0¬T¾“(fþ…™ŠÕšK‚¨,h1åæ nõBµgçÌ]ô¥ˆÕLô.N–í¤µü¦ AèQoKWVSË%%~ï “)¯@d,o[ÏÊ++¯¬¼²òÊÊ++¯hÊ« ¯L-™? 3š·­gåuûÊ«b+w«ÈÁê6IOy»°Æ´k¯êµ0Îa½ê¥jG[6NKíU¡ ª¨õ£g†¼)íU{Ç0k©ß;B¤[õª¾¨¸ B"5¶‘g핵WÖ^Y{e핵WÖ^#µW^µ¥Æ Ò'MXÖ^Y{ݼö*Ù* g” ’K£Ä”Û‘ [hcÛµWùÀ VyV ªv¤í¥æo¨½ÂÐÓ–©ÿŸµWe5³ÂsÌ÷âEžL{…!c„f핵WÖ^Y{e핵WÖ^±´W^¹QÜJæ<ï8ÌÚëöµWÅV!¹î_NªÛ12店T Ö*½hu•hëNÞtã¼´SVÝô Eù†¤×ÅjK™é»ëæÒΰT[ë/R 3¬q™Y–^Yzeé•¥W–^Yzeé5Jz]Â0…af™7 S¢ò-,ÅÝ«c~ïÀ$šL{]Ž{C YÖ^Y{e핵WÖ^Y{eíM{Õ^”€Ê†µY{eíuóÚ«>îÅ9SÁj;éEœ.„êX÷*÷<2a%5ªhÕŽ1rSÚ+½±oJ{…y‡7^ ™Z{"S:k¯¬½²öÊÚ+k¯¬½²öŠ¥½ªð*•áÒøÃ°°œ¢^€ 7%½jô .”ò£‡@ú¦¤We5ç 9ß;œ¤“^õ­…ÿÈòËÊYzeé•¥W–^YzeéOzUáUÀ(#Ô†Ë7ÌgéuûÒ«b«š÷¯&ÕíˆRzi»èZõª¶íàYæTCýWåL“Áö¾ŒÄ³vêïO_÷ÅßÞýãé_>ŸÅ~œ‡5Ìï@N,R½ëhð[qzQ™ýíÝÏë#ü2ç?Ðòa½Ÿ}^/_õâîòÏgûCñ‚ÄñbÅq1ûµd3|xÍMßv‡Â3Ý>²x×Ù½šÓwÑÕÒtù‘*úǬ¶ú*·.f5*Ç~Fà û+zÁz÷c¹±õì÷]g!Wb jd?}€Ô´8Z=Ÿm–®7çÍìxÞïwå€þå§_Ÿ†üâc…j~*CBzøúãÒÑÓü# Ž/˯󋛟冃v¥x-8w£à™$ˆ^þM]È'Tjªvl*0_•Ò!”?Uƒ_w»Ç»Ù/ÛÂuáÆ…Žú÷Îêß»‡Gí.;®þɳ}|ŽE1û×n5ûXœf›Õþ_¥o>œ'à´>Á =–s)è—Ý#„©:8Uà€W=¡IÁ¬ƒ™@^¿c¨hI‘SÙ÷Jð…±º/=ÒFˆ¾ÓÆ$*_^¾h(dw[µ|lKâ¶ü }Yúî\b=ŽIߌ€,–!ܦY®­æÚj®­æÚj®­æÚj®­Æ©­^«åRáöQ[½æuPæ[ür0ÙŸÛS…åÇåzû$«å²Æ2É<Á<=c _ ÆUĨ<ΖÄÁ.—y¬·«¢,OΖ§S±Ù»ÿÏš¦è;©ï8ÿ½Ã>AUD{íƒÁœ_Èɵã[¯×lUJSbü¬VJ¼®;Ù6a4(µŸ}ÏBcù¦K„æòUžÿÏP¡]ãLó—åÁM´åÔ0ÏÛO.ÌÊ´~ö+×-`žÞ.ݘr*LíßçŒk¹WC2ÕrÛ×tÕk<,Ú8oð¢6¦ì«ÚX9S®ŠÕ'@LÙ‚Ki ݸÙw÷ÃìËúñ±, ŠyÙ{Z?Î^@”ðéßGã¦r*IÎ’³nE€‘éÔº7\ã°S[ÀµæÙß_ûR\]` ÏãWøC]±æËúô0ƒ4âØtŸÜè¿×ô” X/FØò¤o̹‰Éa´'L´#}H¡r󣫜j aÓ”S©ýó= ½©Gž¯è­ÆúÑseÞÜ* gBqm¨ß;¢ËSÔ9ƒà¡­@ 32—èr‰.—èr‰.—èr‰.—èb–è8ƒA‡JþÏ'Ïr ë¯QÂâ̸—¹ˆŸÕÚL)¾$a ¥E‡JÔ Ì´°z÷ï3tí”5+cðŸËõiö¡FÝõ1©¤ÓÞi¡»?¦ADÿz(>¯wk»Ö³ÁÝ,çÕ9¯ÎyuΫs^óê7WW±ÖЉµRñ®Xk™üc¶Üï¿Þͪ a™ÇÇk@qHºËð[4ã [ľ¬gÓvxÆ´õÁ3D6ÒþÓòøé?–û‡NX iG vFïfÿsÞº>œý{÷~Ffß—+¤ü']ÉOˆF1 ‚Ö X4 ƒ‚©X-¬õsk†ÐHŽL q”ac¡”iˆƒÅ£h<â\Ü Ã³¸éh @|<ÑD(ƨ5Ï4Ú´@”(ã@ã±ñæ“Ò¿n¦~RP%$3(ðѤÀbC ´96 %̺<48[*Eëf¹ÇÆñO÷ÃûùjYvýÜàŠzö>_ŒÄr#è1$ÁÚÕ÷ã’D¡QxL Iê—howä’øsk-µe#$6SüÒ@X4(ûTÒ`ñç ‡Ý#Þéè7Åé¡8+Ý-|\¡Tq"$»¦H®ÄÃ<œ"X³(4LA<ÆPäýzëN'ñºŒÅJ¹ `JTè)#ØmF D›ä‚®T6vJW^æ0F W[¤‹GØ0‚Bh3»O×G¥ ¤ÐÓ <œ#d:… 6¥™Î†2ê¶(dé°Yh¹ZíÎÛÚýžê¨$&•‘a³Ñ„¶$ £4 ¥ŽÁ£BexåÏ*ÿô:×ú‰‚EÊÐÅÜÇt?Öˆ4›2Å$5}u׃ƒáGðåz{:†ÖãjUºß_VÅZc‰AŸ‰˜ÎŠÉIdi𲻕µènYÄ;dk]Rãâò§—WÑiÄÁ;´‚¾6oòádQÊbÎî[™&Áã1ÑÎë^Çg³à…8g%qðÝj>úx®ãp`0IÀ‘‡GÇ'ÀuA\ûØá‘Z¯û[í|VÞ6î_SEšÎGã±ñ;¿/:/KK?'8LL‰Ç‰àé‚¶L¥¡ Oðøz$ºt¥òs@Qe-:[À"Þ¹XÈ’¤é\4¹èþêò?í?åÆˆåL\k®æp`íHsJ6‰qŸòÕ»O×Ïߟ·÷U1ˆz+ŸŒRÉ8¯±c®PFãL´!iθ¢ñ€ÕQ‰Pß¶~"p#k³`BÎáD@¢Ó‹‡±XKö¯§ZF″±‹ó½ø†w¼¢Z£ °i:‹Gˆ(&ôï†aÔÏ,`)G݈€:œ †kÍ–(–† hå}kÈá1¡ÛÄî·Çg¾Q>ßHñ…öûƘྚKŠçYBðØÐø¡`‡âãòôœËÚç#ËeÚ?¶ ùåÑÐ¢Ì xä|ÀJ½ÅMòx"I΀Ç#Ë“ë]yãÃãòøì’‡3Dïù ~Y×£»RÛw€œRÈU]^ìƒI³"uß&o” Уű)ž £cìR|qÙUqZÝÏï—LJ÷»åÁ=ÍkŒñu$WÒRïh'u¨â:óâ1%y8 Oð~äõzî™w¼=¦ ¨,í'™Ò:´R7%“¢F‚'ø‘‡õöÃay<Î+Gëgn²>7iF­òžW îŽaž¨O23hN¨ò®if€T¤³Ç¡†ÿØÓ<Å¡3øŽàDùKî^ã±åü²Óº7%Xÿàt”ãÞ’5´“}iuûT1b0 a)–$lãñðð9ã£Û×ô÷¬£QwÒË1h'ÙÈ’þhŽ1"Œ¶ÔúÁ •¢3Øm•¢~<’«8…}¯}‚Á”"ÀÕ^ÌÌZ¥ãñÙ’…ÕØü­£ËкÑ&>BŒ’Fi¤Še@#éxl‘Y8Olï.ÃNÖv°€˜ðL…kÓQ4žº6¥··E#JDÆ"ðÈ„´½ÿ©'›ý`±%îßÓä©B†À†ž"ÐûÁ :Ãò¦îÑf'Ö£ó™‚keÐ-{ŠªÄs ž“tý†t3/!” í„(B0¡a®ö<Ñ›1Éò·ÇU=u¿?Ú/ø $\ A,ò}¨EÄÉF3 6žº òû24“B9Îq<2º0Áyýr;›×GûE¡4“†1/bé24#Š“f.$âä/röC1ÅLT<Þ¢YÓfSM`ôÏÓ›å°w4J6%at¬RE®IZP¨l”SZjOÑ‚2e(g- ocJR®iónò0~˜íƒRNƒ¡k(ÂyQ‚rqBe£œÜ8‚Sp¢ªNVÊi¥£à‘¥ÓÙÍýøéa2¾™/'Çí‹óÍxà îÎÉÍâ,|‹•(ÙœÇË5íÊXn†+KÁ¿ãœÚ5÷Õb6¯Žö‰CYfŒø‘·ÚÈ¢d£—qÁ'ÈnË,Ÿ–9- Ž)f±½ê“6¶äQZY)•'ˆbÉÁ¯ÁDÈF'«¥gÀ²Æ¬µ„âЮœ5ö4[mŽpÇPV9Æ H4¼ F”$¹š;*¦¾+dw9Í…'XÃ.:Éõ+åaù¡^=­O[*Ý;o=%vïÈ¥îKJ•t^Ja &¨/ õF:J´Ü+_Ü¿|ºÿP­êñÓúá®^ <9tK€3Æ$% }úNè¼~f´p¹(È™0ÂâÚ€—Ù‡æLk¼Šoh§Dy n»çaµÍß§õ¦žìúFâÄsŠl6ά*G<šHÙèÖÔE'耳"{ž¼ÙÎ<ñ¾ZÍt3È´C£üáœ(¬é@†zÙ'$q÷¬ %ªe<ÜMÜ`â‚ël·½ïr¶*îÝíÿ¢Ñ=}çB å(²/ýN†dþXa Nxº ¼•–‚Ç™´{èSîxÕVdƒEe‰B+Üzä’‹¸û“‘¦rA*áðsDÐN–á‚4†’5Â¥Öç_=ÇÏ7tjgqš„«žt"ýZ±\B¤2H…‚A!U‘Â/ð '˜±Jè´sÇËùlòIT»ªæóå‡1üw\7§¹m£Ak®Œ¡D¹ÒT)'êdŽ„{Õ(RY_„#@GHhàš±á9Òjz¼YŽ«‡Ù¸-»ÝèEs-µ•“UŸ´¡Ïd *o´ÖÆSäS® o¬ƒiŒ€'úÜN‚Þ·e"‚â·3úxU-šìhíш37L Š¡½Ž;DRùc„į– íxëÆ(ÃÁWn¤.ÊŸ—¦A§8N$Ëñû¡EˆD&™Qž3Bܘ›7Â{lsŽÇc™žQ ü颱< ¶á`ÁÒ·’ G›^ĩܰÚ{G°„­e¸á¤Dz*[œp–Ds¢LÏép%b…Ør̰ôx,×9fÁÀwÆ Ì‚© ò‚rEFØù(³y‚`Qze 1è„UÐôöžyF° ½c‰.K"ŸÚm\VÁÊDr“ÊiCÀ#T!>uÌ4H,˜a ðµ˜:½°“Yâ„´±ÊáÆéx¼Nß=8P6X‘«åÓl=[.‚U¹+æ;®S˜á'«z3^ÕU81±‹Ô{4X,BáOƒ‹„=…ŒøSyõ„äÁU!ÞXÍ4#à1ñæi9¼¯«Í¦šÜÝ׋×:G£Å¦bÎ(2x–—3ØSùÂQr6€TE¼)Æ–”<Ñ÷bu¾šÝ™1–NÎÈ2ŒñÒ)‚I¨]FÆÜW³Å¸u!ºÚEƒ»Â€;C8& Ë@ e*Œô‚½…vEâÿÂXè(x4ÏÌ®Á·W°Æià,%Ck3њʄp,%îj|‘ì(ae¸QÇc¹ÏÌ„Ý<¼W.eµ:$ À*“‰'@&3Àjá)B[†>Ôm&àq.3v®Þ¦«`zœà‘ã•“úf<½¹õcus+Çþv2Û35ÞÞ p(F?¿=B;`øC=éí°[V£íÄ{D3÷Õfr÷ÛeX¼À™Þ¬µ{^O4Y͂ɩ…pã¨ávý‡Õòña‹¶OÞÃöwÕú÷ÝŸü©º©ç´ß}[ÿi¶xü ÿ]èæ#ƒ{gR>µãôÙ‹ÜêI’V»þ.MyÔ›ÑKìgø¿}nc`ÇKûzI:vÝý:›´‡OM”6%ÏÖÂEKÞ‚¶>ogÛ˽zÙ@^ÿúâ³ÏöA¿Cïvý.tìxr·—©µöõÛi½ÙÇÝ?/úAÿøç-âQ?®C»ˆŠ'MK` BœÖRõ”ó©Z: rƒ'ÙúJµÙŽ9š½nfÇÆœË8)?úùÈO|(ÃJ¨ åÈwÙaŽkñ¼çšr@ÆûÎ1*ú-‡fwË¡:}Í¡÷JÂN¢÷e*;…Káü‡÷Ñ;‰/÷hß?ÞÔáT˾˜ƒ³´u£úâïã÷®¹#^cYGZx&@»U 'cï_.„_h‹o‘þ"Y&t<žóOÀ,ýHkÇ¥Á+jm û|ȇ߱9ˆðž¦ôOtÖêAQ½óÔŠå#igŒÔøþ2´)´øå‰¡KœÖƒ÷8î%ù„ìÀò” ó‚kü&9ÃÜ©C³#Ÿ'æîŒì€÷„»÷$I«y¿1ävDhkÐèË\2ÐÝBã¹ÐŽ»HÊØK€¯J¤¯ÄàѹRz†åQ­Ž·íbÇiŒä!Yˆ’yIŽON|²l¼Hñ¶<þ6ÉxT×àqjCMuŠ:Äg¼üÇv \E „ 0ÇÄ‹O€9ù¤Ì 0È»â`¶šO}Ù¿ò`>Y gÿ—ó`þ•Úñÿ)µ£Ù'ÿ¦³Ù«šv*ÿÏ6RŽdu|9‰ëþ·O×£ßÕ‹£^ôCäL 3cLÎÄéGmÓN¨7î©™’°ÇŸ‘Ü€=úŒäDÕqÉ ØÃÞäèý„äü‰y“ï;3¹aD|r 9UÖÄß%þlt0§¾Ä_=Q?£¾™ú°[›Ù8‘2I¿JùÑ‘MFc-î#ê÷XsªDî'ñŸQŸÎÂg,.›5%Ê–똕øU3€Ç«a6|zb;1dóZàuù Ý© 3ãâoƒŒ,ãÊ(|ÚuΗßœf/6§ínsúäÞ´ ¿YüŽhgJì.À{¼óxm7hÕÑ\ƒz3™O4°XžÓš ƒWHqZ:7ÌdÊx¢ÃV²Ä¾c5ìÎRŸ6‘ðºF1éÑò"d.ðlY2ç17t÷’/ú$ä(“P [`g" OlÑ…Ù= ¶ººïñj{Ïíf¹œ¿Ÿm‚Ýö!œ†%Ǭ hĽt3e¸ÐÑ‹Ew¢ð¸¡÷Ž™fÃBÊÍf<¶¤C;ëc'¢lì@é:—ŽÐgO™ybP¨K` ] tW[ºKÀûjr7[ÔÛÂÓËU½\o–ËMÛça5ˆ£#”Za+¸tÐslh'î¬AÜi&*‰.r³oÀ#­@ÝãÐNˆr.b_„´WaZ¾ÚÉèTÍáù °´-?ÚÉóH°Ý$%NÍR &o%Z =´;U2'·u~°çCïD„vÊ{<`ס÷ì…vÑ÷‘žo0!YÃZZ˜3 Z7 Ú Ž£—€þëUÝì{ÖÓ‹OñºrÎn$U¡Ô^3m*¬Ë#@zQ üI žøkþvAô¾:3}ÚÓÈ¢Qa’G QB;®\q‹^¡è¥÷‚ãè¥*°d†÷¸P¥€ÇØOjÑ# â€OrÏPÚ‰èÄÃì,áÎ/´P]hWâò¢ðõe<©ž]5™,û‹Ï½TáÕËÆëªQ+2µ3ƒcè #ÐŽiÿImkM^b‘ˆÁMãɲl=äôXÖ‚ª0 Ú“ÈšäÊÂ<ò\wŒ¬ÌF“È*ŒC0Ð ÉØ¨á§&{„l¦Kbð˜_ØÄ‚2^Ý×›ÕlÒd™xÄv–RY-Ç–,hgÂè;“ºX^ ÿ£ð¸YtGƒ%M juSMÂ}…?}lr(¢M§´ÿ`èRÚ¥mÙädo\Ï t~ “£óûØóÙÖÚntŠÕJja žjÛ1•#m.'cÐ_±¬yd`HžèÐzTTäôtž¼ ÉhÂ;&PðÐŽëT€ÎN+@€TzjÚIWÀG xŒÃ2ƒÚvÑ>âÞ ºØ;hT„˜ÓZjνT˜혖çOúY9  W I“m…ô|ÀcžŽG(“õªLš>¢Og87|cøaЧ‘!–ŸÎ€=å­Ã!ñásy[<.\k‰ãIžÓ{òZˆsc£QÄh6Í”–X'C;ácͼ$ 5I•±Òâø œmßþœó<Æ{Ïi|rtófǹGh£÷t²! E† :t»kéçá¼<Þ§À-j°$ ™´–spÓ%pª€GƒG‹,+"]›Ž!…•Ž9ï%FËÐŽ¹ »ßÏRT³æê†€ŒŠ|8‡Ï,ŠÃ]è¿wGa¯®nÿ¤Œè 8ˆÄb·6¶³Y–œ줣²ÀvH žÔ°´D‘æÆ+,1£i×­nš¸‡­³KOõ’„ÛBÜ%tœÑ a´Ó.Ë6`Ú ÀiD§>OôQ¯½²öºêzO0+!ºrÖ‚uª09´c<ÃV=nîÚ²uÛêàRË|‡¿¤&Oô,Ö? tÖ R!;â¾9VŠ@ ˆ.t&÷àšqæŽMÙðxpÓ,ŽGG{î½Í×óÛÕ©/Ÿ›Z K¿wÚcp݆à¬É2WŸAÒ°%Âò1x¢‰p¨§î€eó”žü%M:cÔi\M;]É¿(A£dQjðE:a þÍ*paìü›ÇÅt^7šäˆ&½3áøŸÆ]]lLö¶Òq™á# qx´:³íÈ„ØÙÁ{ï´ ¹‚õˆãÈÐŽ99ЀO"er. ï<Êšw¹7B ªu†[)ÊM;–Z67éOœcÎȆX1S”[íØiŽŠ gŽÃÔðððƒ+ú†ÖïuZòtÚÙØûþË ø\‰Õ Ïbþ¨µgŽ¢¶ÄÎ`Ô'^A¹åÐΕ!¾«ÞaL7+/u¤qE] Gñ=1r'Ûy—#¨ups‰ÒÖ> TÁžºæÌ1“uå°^HkÔ_¡©Á£…µ‘Æ3Ðh­µÖ¶7'E´£míßM;•%b•Å@&ÂÁ+Ò2ñxn˜**çà6Õ}UMûÝž¦?7L>’1PF_  Óñ€§–gÑâVºvíIí½TAS„ƒvŽ ‚K8ã¢hOŸ¹igãð] ï à&´ßPÞàao91=°^ûi‘¨N„ùQ`…333­{P¯´Y"kÓ·½Œ•ÚŒ²‘Æc•Î8—îÙ8ß®2¥M4ŽØ›hÚÃ×ÿè¾+98£¾kx¢È’žëPsÿÚËaDuw0-£`C;)2N¼ Fã±VA !îߘK¿¿jÉŠºÍÓƒP.ÄƒÇ nðà|!hŠÄƒXCRn¿ &Æ8T ¯ŒMÆm….0Ýpð3p¯#ê$–˜SÒ;!(ÇÛÉ¡œŽ.üÅ|’Î rÖÄv¶ÀÚ¦Îo 3´LÀ,{sbº¬Ö” wë:Û ¡º`TÀ\øTh'äÀÅ:P–ߘXxòÕš>©Ñ–úÍ!Î<&÷‹^[ŠÅØNñKÆæd±Ã’ˆ` 5Ú. ¾^už  >zì$¡JLüu$U‰7nd¶òÒÃ0˜!‹)0/pðX™­,•îuWƒ¿Ø˜]Âóö˜c5xMM¸ØN{ šËotSÀgäà±:[$sËv*QJ .¢[’ ƒ ìÛ‹E‰p¥¤!ñboâHßÁð› ݶƒá±FaÁÑ(¤)pìïQÊ;òÛ‰¾UÍeà|æB¶>Ôõœ¬£tí$;£äP㈺%oX¾ŽçàQ6Kx5¡@M(CahR>*¶Ó¡çÎE.¦bvr%´¦1K/ t:î9ç §5³k‘ªJÂ|GXzheÈÉ3bQ¸˜%~¸_#Öx’Œ@Áv¶€¥ÚÁ¸ *²lÚu­–Ç‹Öl^¦•°^Ó Tÿø`(ç}´4(mU‘ž3 w‘Ðsš]ìòøAô3²Û³Š©#àˆå~Bž½¥\†V`Âé iè&>DŸß±ÓÒjYýã¡Z­Wçµú?x:Ø¥ÍÆÝë¼Ï“¡$(Å!-e«¶+ÛÌšÒQ`~ù@ÀP:ĶC•ä{¨uuäm%N¬xóâvþn;FMk¢ &â`‡|<ìír^Ýh°Q ü°:ßýxKø†³ÆS¬ A¯ÚïM7íTXB‡«›Ì†MË‹<%ºG,ìOjuíÏe¦5ë,¼_köÅ Êí¬³ÁšÜþ^^2dp" ß÷<üM`Ò‡äÕ ERd¨³¹5Ñ›v:ôCèMÚtœF¹ÎÀ£M¶ŸÝ?2],gпO.ŸŒ"\oƒ'•0 BhçØÁiCr–Üùá¹ÀÁã³ÔWkõ•Æ•¡Hf RÔöÁâß6 ²ò8]-T:0ðÈ|›½=Nwk0¸7m;SF«-‹µ¥„'EÀvììj3šƒ]Ø`åà1YJ·¤éòõîJ¢1"¦‡U %BÔ:ä›)²2:]„áóñ1ñ°K-<ß½HÚ{ýÌE«5ëòi¶µ4ÑÁÎÌ@ôÚ™áHêÔËâ?w®OþïaYÕýàó1É»ÒL:*G1>ñ²øôŸÆ§wÓûºÉ&¼*`D‚4­ém†aÓs)úr)]ZW„K©x,;"ò¨ytW pR¯¨hhgt–r•Ù}3Ž®€cÃÁãe¶Ë‰ze,3·ÇÁêÍH¯©Ý}‡ gŸc‰Ô‰àV¢Î¶—~ß´S\|Ç‚ x؉ŽÝzjWŸ&"4<Ù©©µ[}Id»QœÌ 9[Šåä£ ²ƒ^þd(™poë‹RÚ‘î•îók#çç>ÞLòôPÅU¾Àñâ1Á{MãQ:Ó‹§z{}T‘Ä9%^—rA{ŠÌØN˜Aozæ#3G(S`baááÖ*Cµ;¾ÄëÇüŸÆ+¼ä¢ô”ËPßÈry‚ùó08Xe0U( \©ÓC¨“aI/ðؘ)ILí‘WqX F¦3«)—8¶Ö9ÑÉÀhEÈ5™§JM~¯‹pÎcì"X*hÚ)™érO§Ã5%Ž]9x¼éyÅ:ÍŽ¶\+¼Þ”‰®Ôu;od–»–…IÍ“qðZ<uÙÔëÎ,Xí?º{a=íbޏêSu>^Ÿ˜yTÑAä#Hk ÞA HS„W!¾,^EÓéÞ³2­‚H Ò. L V©^«øµ³N»yû£î•T*ðH5 ,E8åÄË┳y.œŸÒõ¾ßøAgcMëÚøp†Ë€¶/¢”/ŠQº¬A–'¾¨«Ðè`ò‘#Ù¤dƒ\„!/l•uÌT_-ÙnßT€SÝTS\³Ö}`óѦ5úkŸ6ÃÊQ„Kö…qɉ¼»út¹?D0Ú†±¼dPjpqz0‹!r‘•X:žÐ«\sZ‡ÍÒ+•'ÅÁ—u_Ô}à Jo¢^QòÙé{soO2°_÷‡‡Çª»=Ï´·^â(Åúšµ‰ÎÆd"HK†v&Ú>Á}eÈËÇšÓí•m‹Þ{l]k’Ø27ÑxçyJÅݵËqé°/•Ó{Qà䃇}ò÷ar3ŸÁà¸{7™ÝÎWs¿¸™O?&×ßMÖÕï““ûùxºœwO¨µLÐÂj«€Ó†:ÏÄŠ—ªW&®Ü„æ ÷p8x‚-½ºÙiy§^âÌÈ+1ÍŽ ÄéR çÏ£;CªP‚4 <Ñæ2*ÝÕKÆi‰H‡©4D¦zÔÃŽ8±[x‚P}¼g—ù&ï€&HfWãõ¢fG½Œ"›*ƒ´]ö_û@î¼x別äËbˆ²%òa^ý^k[æ#ˆÖCäâ"üС?¢%­MÀcŠXËÉj>?¬6!ƒŠdI2~;(KNâîÕtÙ^W\®lgúG®óÑ¥5ª¢7]Ú —`LâE1¦ý5õ»¦ŒÄA  ÖE‰Ú|u;µ·ýþ®º«–ói>þ:úù¾q»G¿ì<ð¯·nô÷þºƒ>š=T£õØruu1ú÷¿þý¯ÏîVëåÃtý°¬fç?ïýãŸ<ùða¯þûò«éúbt;¹ÿ;< pý:¹ûØåI¯Îü/ÍÝž³‹Qó°/Îv’À™Ã/÷S0¿þ ϾüªÓ»Þà-ÍŸí»Nª¾ë[n«õ8©ßôDEg“»»ÅºÉ,öLg»Qýñ Òõ5&\ÚSÏw›ä‘_~…YHº³ê[&«êð{«êæj|;¹ƒ2_Ïß]'&ó›Í;öžt¨moªñÝbVgÕ‡ê™øì)Ÿ¾ª ”ÔBþ4¿ÅTÚ·÷{­ʱP? aÍ… ÿS¿»õ× õüníÌ¿àÛyu3C­ý½VÖ3Ív#Òèìª~îOï÷Uܼì—ýðç=vuÑØÁ£-.ë#³¿í ö¤jëêb os]M¡ïŽ´ùôi§†}}Ÿ\·7 üùù9|ù¥(kol<ÿ°}lœŸÒ1hŽ~§ß¨éðÈ#çYG×Zü|ù¡ºª–ÕÝ´JWÆoï/þyö0wäĨiu9ž]^ű¹¼Òãx5ý¥›õºRVè³O¿ °ú¾ší¤+ÐÌyB·“õôúÍ&_^oÖ4ýv6Åá0^ÜÝ|ÜÎsãÍD·:ü4?ª?îÑZ¶Œ¾Lùt( ްJ«SOÙŽÆ£ŸÂÛ½©íýÖ]Pàfè~ÚŒàN3ÑñNìò¨WgOûýqý>é8WÂdùŒ)Ç­ûA'nßüÅ&Ééæ~z>ýëèóoÞþ×ÛŸÞ~>úÏÑâfÖø*£‹Ñ¢þáËsTÊù»åâá~tªE“õdzmcÞ—Ÿ>õ”ô4xÏýKýØÍs÷ºeß·yû‡9вº[³;hûø]®ÇëxŽÅ1«ÕñÑMì=÷¹¿ÔQٯζ—‹~x¸©YÕía¯r øl½~=R£ù¬9ã¾ÁßKXÁu}âÙŽø2nûþÍo¿þé-Ú‘ío~þþ›'¿iF ºllž 8uÍkÅá_vï[MµïqÀóÿèj^½ê*zÇïuµ‡“ ÃF^OV;3‰›PßÝ]-êp`9úì³ÑéÑšnm$XÌ]{Oþ[cG_ÿÝèMÝÕ£¶…Ó¾yììÑdY6ÞÔèòãh}]êÌöɰTX@þv±¼œÏ`ŒÔk‰îRV€©mDüü0°äƒR–w–øAOqqRÊßÖ×óÕ9ºGדÕèr±¾Ouø[Š”ÝXÑÕ+8µ`9º\Ù[‡Q‹”.pº|éÓѯxm… ÁDbKÆcy•!eî=%ޱ@S:+ú"’ m®CÜ—‰6—.­P²G ~7Ì7r9riñ²8¢ÝŸ]ýì8?Ñ¥˜3Ø`{JªàZÓ¤ðBZ ¶HÇ U ;Uòñ<ƒIù$ š€Éy•4‘ÊÖ°†…È”sp²s¤ŠB©Óñ`.£,ìèuu3ú\ú…©H$NEÃÊÑyzbÉ^—tïüziŽzM¾!V:Gˆ=û˜žÝÊn²[…¶äV,þeq]~õi¨ã.IÙãØ¿´¬TVã÷/×H!ˆ‰ hY˜©¤Ou¡³¾éõ²A†õ¢†]ËS׊! O´yãDM¶Æ×és!-¦wíȱÆh¦Y 3xg,ÜF ~=‡G»¾sÏ^=I“¯ƒ“}³¸̧äk•Oæ·Í×*NO LáEÉÕw9¼€wÚ„öÔkºÎX­$ âÔíZóÌrVK™,`:ò`t0ðØŽyowóz¼¶*³íÃFµŠP- NDC‰Œ•±¯g‘ËÁªˆÞ Û*[Â$«µp2Ðx4;]S»i/¶kObYåÕé3÷vÌ_ˆ¿(JÆ:Z”èÅðD“/„Ó:O4¹Gª&4`“H=Ý´>ëÈïBÙ`%ïð6íŒ+ÐÏ”çbˆ x|ß¿@õµõv„/®`Í ÿjÄC;Á®%˜•–€@EgHËŽíJ8{ðm¬ ‚Æ£UîuãQB0S9-ÅLl'úîd`f0^hGäe¨Û¹ásB7x`=*4Ç«Lé‰Òý¢@xÊZ[#t$ê—Ôí´Õ™}ü.lÕuYgêš]ÓÎÉá»ß½!]#l\~×þ˜GÔ(ÒŠ 6*X–RÓ9´ "Sq¸Œ¼e /1½sðX7„8¦DÂ6:8iÁ&@C;+m~·¾3w9Àm'ž…ÇõOéü¨Á&Ò¹º›Ý/êäZ{ŽÐp€Êdô Ü#¿ Wa€éà…£AGå t9¼Ç+%}ž\%bœ'ÂYÆÌ5ÒHEµÎäûgˆëGTÚP Ï9xb¡¹~/Xñ˜†=¡aðé­ö†Z¬@»žIä²²8µ-qPÄÁ£3òâ„7uâ÷µf ïÚiã`.3Ôâ ÚÁº¡ŒGÐቼëÒàéŠP`“‚ƒ'rCvŽŽE6m®Éaß¼ 1xÊKÁvBæ£âƒŠ!‰+pïÑ: oðx?¹zÝL»kEn¿×NG)eg¡rܳ§<¬e@ÔºÀº”ƒÇš,EŒšb›uö“ñdúÍ4áÛ{,ß&¹ vŽ­4(!Ó{QÀAåàQ:ÛÈ~êÑü¤Ö)áôc”2H@î§b;övt.Ær@êÏÁcT¾a¾M²°QÞq¨+,Ú Î ­É3Xh׺gÊïyiš., 0€Çp ýãmù¡Ñæ.;^jeHw)â©KÈgºq™ƒÖà;Ô¨¥ìçaÔàê¼ÑéÓrŸ„§ƒÕAhòÔ9bR.îöTY§K¢÷’m§GB†mhÓ¶îäÀÐ>SèËɲÂ4A78zdBÝÝzËPb³Ÿ…Çfa;%]/VëÕyý£ÞV-×ùØè\ê唾P ðÁË"|0QjëSð„||¸VÀ:­.WÓåü¾NÕðD×´¹MFBZ°wgG²LQ¿,vDŸ˜¢m~ îÌSÛl”0Â÷¦DÌáy`¤zQ<0üš‰'|5_ÞþŠZM¯«ÛÉS-»|dPý§T¬¡ÅËb„ÖÙq=YÎPË8ôž¨Øç£Cë&C’€àB‰ð!~±©Ó*ÿl;ꦋÛ{X2Ý=sØB>VØØŸ<ÈøáÌË⇋ƒðcU­1åÉSUG’˜_& xpYÙAîÎd‘¢/ + x¬Y¹ñ€ÙÏ«:ùÍü‰ §… ™¡‚!ƒijñDf$ÂíÁ‹d ñ"ÊÇ‹{»\|˜ãy)|åˆw¯©ë›ˆÈˆ`SëþÔà!îÁŽd™lvXi}ž˜ãÄèv2½žßUxÌòz{®¾ÓÙcO£þZ銦‰ÚÇLòöEnè=ø’*œ-³a½€é3ÏΗËÅø ¦¨ô–g2Þô-Ï^8{0Á•Ä_fÝjñÿ$<&ã¼òh¢ŸZgzŸÓF­…H@L†ù$i6$Ë‹°Á l öýå#剦xóÍZçô–§“*º œH­W˜yw¶¤ËVˆ-ðb™‚Gê¡Ùò{uy½X¼?P9½%êT1E¥†"K;ð\ÑNZ™"Z™õ‹3Úè<ºcÒ”Étºx8Le•6Lk•Óû¥k §`$+¹€÷àJ²h…¸â°7?ê…òúµŒ)äÇËËÉt NÀkeÓû§ÎKåU ô˜ËQM…܃©B¹2«]`"LÂòÜÖÛW÷á§11<é=Õda|êí€BôàP”R¥ˆÊÄy¤ã±=3¸tÖ»¤·]½Ò¥H5ƒ<¹ÑwgMº|¡k¼Ái*#kEÓ[°^ÁKS`'ïÎgۃɕ™‘¼y’ð„>%— ÍÒ›¬Þ©Rp¦Ç€tÂ×£çS%Ðe"¼ñÞ›<6¿/Bè›ÞWõxT›‚ÞØ|ÎG2ê,I•«P”±wJÆ$<&CížNj§7^½‹tÞȺ]Ÿ">=Á÷àL²x…æÞO Ÿ§, CÝô6¬Á”jð¡W•N {p$U¬PæÏGKgñ¬ñø¾IÍO]9-m“TM×ùo§)át]RžÇmÊó@\ôI‡QfC.È m ){.{ê-¬”ÌáQ·RМ ,-há+Öí\àföö ¼WÿOÞõ.¹q#÷WaéÕÚ]á?L©*ŽÎûb_\–}_âû0KŽ´ÌQäšäÚÞSù±òy²tc¸»”v9 p0R©:_­ÈæÌ@£Ñ=úB7”¾Š¸OÙpL—–Ñ%Jq£=‡¬†&;.ÈßtØZÉ ÈÁãê™߯F©Ai/k”“ÙòFfnöàÆ§@U¦éû9[×sÌè‘t;c+Ê)éê z_¶õz<+äàÈÐ&ôkSië„ðÀñ嬔%ªŸÌרÆïø<Ù>‹þ‚‰{å=ïQ&3ŠåDõšÉŠå¼ eR!dm:úžlΩGºê6 ôk“²ã‚Rô$'tu.Do+¬rð8U,+Õ1…ùœ4Û_¡Î\ªåp1ôå|vUŒ± \xLDU 0C“Œ±5Ë*Œç6¶$§ÊeIƒÐ-©±tÌÁãt±,V§hVök–©(¼`Zb©”‰ži»»3»ƒÉ/9c.rrïÑ<îÐìžî„™€B™ñ™™ƒG̰÷|±BرÖ÷ýõ"ðšéBp+8,Øåòh1Œ’[â*Ô#ÏÓ}x_Öô«~Í:mƒôÂq„v´pólžBè°.…t›$÷6ÉôÙ¤ AèñÉHïñ´NÀ#‡Ö¼›P¶È/ì¶§6(D;™@»TÌN ,ËÇzôôïU÷ˆÁm©áÉÁSªQªªx0\PÔ¾‰bXXQ>ä“Ù‘Ú$¨b-œK xB¹møQmû"êÞptqÂXeÛlsÈ»Z3BNäOzÔáO*žì*'«vG217Çm’êçíëæf¹;Ÿ·«Û¨nËSÆ||›ø^öÉ,ª]’ЬƒÛØé+#u·[SýK#'…—¬/Âu…Í®—oiñ˜rià?ö?úfñºÝΖí·1Ùóæ¯rhs¤PUÆžTà| žìpTãý)éS°sµ£Bß&ŒJ½Ô¢d§Ôb nmêt*Ž<ðQ²*û=‡à§[I;Ú¤65Ž÷T„%éà¬ÑIª3¿J¯¸Œ%{ܦÄGJæ§P´0"¡·Cî‚ô +Ùý©_¥û•Ðøî<§&ÒxâÖøS~´K$°Mp8%‡ßx?{A %7WÙ:²Â„<ºà‰«ÿsÜVo³èêžç8§aÍé6†‚/H"-@*Ð^¨³ÔLÆ“]ä´/þé=“DT8°lÑ8;Ë´:Ù0eIZ(å}Jä«C ìo‚';›dèCšyX;îèáâo“Nú”tÒb—“ݺÙc)ð¦ÝýódµÞw"ަŸoH±ƒ‘žÑDZÄ8€_!Ы rÿ£Ÿ³}ñzÑ.ç_n6d·»ÏV‹åç{ Þ§ŸvfóUüôÝ¿µ+êrêMªmKêäÏþiϾ=â?ÿLü6ˆÙåL‰ÏÏb`éÔ;8›ü°Þ5Ë)¶³ÉËõÛëe‹c*Î&ß·H„N«7ÓÝæ¦E¦ÐPFáØ÷TúªÙ^MŸÏ®o¾üò²+ñ;ú}Þ5oçÎà§ß4ÛÝw›u\óLw‹·íÅágòlÿñçfuÓlnÏ&ø8ø_üòÇ^"î/ð‰ˆ þ}GÎOá?~ÿÍôÙÕnw½>ÞÌfØÒ ¤ÔU³»˜­ß>Ç®nvÍóï¿zõÅ9±l*ƒ÷)¸j—Ûéþm»#³ ð{lι1¿Ó:uíw]w<˜9¾ùðÓw¯víõôÙþ3úºãk¾EþËÛwÏþõf±$^>{¹^mÉéþñÞùû2¶Óg±³¾oß eÜÜÆVъПßvY§¾øîkú×±%-}÷¶Aó¹»^6³ø¢‡I~ÛáÛ>C½üûjýëê´&¼úúÕª¹Þ^­wñŸËõÍ[°Û¬)(ÿF÷ ZªÞ,6ÿkäÊ›«ÝªøËzÞþpCë3N1Ç÷ôí>LŽþ|Hò”¤Îßi˜]SY°ÝòöŒN;Ó7x Oõ¦aZ¼mqîÇ•2.ÚßP©4_-(¦ðv2»›_Äá9™w«­ÿoæœÉmЋ?ÜÓ¯:ÅDK€Mº_$àöëè3çszÉ~Öþ îÞ^¶›³‰:›ø³‰Á©]?LêÑò}~¿ðxÏâõ> .Òá£äãg}ܾøÐ¶8 fµ>ØÑÞoiào“?áFk{5lO3ùL;T™ ÿóßǶޔVÌ%„ÖÑ=RõQoèr.化„aJéx\v:Ÿ²W çª×HSº˜Ã´åzËg&y*æ_ÈÁ_#€-OöýFŸo»u@wÃð=­ÎÖ›vM©´ÞF2ŽAƒ•^å€rƘzqV—kI väà ºh&¸'R¨fl¯  ¹`L”ÓΗµ ŸœÞ#+ôæàQrûЧM&´5€Õ2öòÊ©¾b£'ä†;…¿h¡ÂU‡ZtyN«¸‘rÆó­0½íOÏ v‘ÓA_34Ï©WØÝÙ‹«¨GfHIí¼`V¸QÎQÖ ”£pF+@W0 9xLÁzÂëù^‰}F]nº­b‚C:¹¾Ú±ƒü §09·©Á <Ù™CS÷]n·p·…?Ç™xÖžoZ$æó¸o:fÆ™2”¦G”ë­”“i-Šð:;1>?è=ÎJeð\< s.Û>5FtÅÔ±°„ #¹JÓ:£MAV Fž0ºcXÚ”`Be£œÊÎ :‘3`ëñk¨eâ’ö`ßß}Êô‚Q&+¤Ü‚‡|ŒBŽîJ€rhM®ÏÀc«¹˜ñc ”¡Ð(g³ýÊcó5|¨°}ÌÀã„að?:ºù@Ÿ’Ó§Ááÿ³ø­Pª–!”­¡ Òñ˜Š>Ë &«½´Æ:Á়ÀVÕÛ1–Ä*LYxàÓØ1ZÆ0 â„Àf±m ÒI5Â,1аåà7¾G2ðX]l¢xöéSõë“n6HšÃüñÚŽú4¶Ç©Mà­s|›¬¯0Ðíhé´NÀsÂbâxäóî„gþüͦyžÏ›íÕåºÙÌŸÖuÔ-Ç—@×e,SS<ʹìÉ|”ÒòøATXhÒ{œðì.“äL9¯ÓîªY­ã¨ìU¦îW¦×^yÃ%èä„/—ß{ bç´ÅTð9eáqÙh0<­‚5š¹öGrÒÚrF£ É3À»ñ3²gâ %R:¾ßÓN_8‰im‹wòð‘Û÷¥ØÙŽÚ|°#I®¼Œ»Æî–¯ÖÇoùâãA{o“t-Ê9Ÿ{ÌS¢ËÓñùñ³žæáÉžTŸÔ× òëœ~J—È>Ô:¨á#j:^)|….ÏÀ#C™XÝt³ Lï[š­¼b]@–*D• ÚBØ ÈÊTØãåà)xðºm(çÕ›fw\“ŠÑ$NæÎàþ”CNεB} R7½S\Á9x´)cŽ®°ºe˜A…(e7¯ÆÂqå& p:¹ÒX‘ƒ'»æqMF åVkW;ƱҟeÇQ–+œàaFÕ Åcöã_™ïÀhˆÂkä]ó†ßs0T”àƒ£ ˜~”“¦À®˜Aav_bi’VH²?tÇ_(m5-Ø%ƒ•äta»_B׈ËPÝnÍãï¹FQŽ»ôœå™ ”NÊ¡,V¯7Ív·¹‰µ˜Ž*“#nå¹ÒÈQ¬—¸ý¡(„!…ß k¢‚Ñ¢÷`—‚LÀ£‹ÜÌ>¢¼Ã ’ì¯áà)!Îy†‰Üëät¹È³2tÍ?~r(xÜÜÕÀ;ªFËŒ$ F8›L6è‰2s“Q1~Ìi)«fïdâu2Ådĉr¾`ÚPÊfÀÆïü<ªÜ}“U»£_Õ¢d´HASYË¡¦¸s¨6ð‹Ú[ ÉàQûñ“Àuï¡õP ŸzF {Ð×^]÷a¼Q]ÌæÎå XŽšt‹_—;ÛHÍÔ¾‚O'”«-µZÏÛHŨ<¹D$·A9Ÿ}xW€—¸ùÅ%¥S<¼0~¬`÷PBë<ΕÆÌ>×áþÛJÅúDH®`Έ!D(FI¥ ÙÈ =LïAÌŽa’+xôº¦;öGu¨i$xvarZ0ˆ¡<_£‹sðøá!XÜzæ9Y/fÛ¨Kf7ëµ7†| v”Ó¾\ðæ zf`®P2*’ûëýôs¤2è ¬°Ê°ûDŠZ jx8UQŠ&c·=Y) vw-êwÏlaA­4—û5Êõ&/Jß|eS• â" €‡¨ÇOè߃}-D žì´.=*#U1Þ(Üÿ ¼pTgûÍD°R j=‹ÝV(¸Ôá±Öñ´£»1P"L’Õ&'ð@é™mˆÄ\í(g!”÷ $΀æTÏJžãeW»–q<…xÙ˜?&ä ¶%â«òŒÒtÌ·NÉ s;½ 8—€Ç¹OÎ ôîb(SsLå„'q)ÚYp¤ý=.ZŽñ¿ÚÙÑXë-BŒE`*‡urâÓ3Šmuíß:;¾ß=¾ÇI¯O~¤Å¹3¶·øÑÛé½ö§wJœ¦kôgš‡ ©lcšCrÒ—¬Í3„ò2VQ‘xÔ8GO |ÛÀãéM¥””%øƒTÚ‹žY`´ØçÞh¶ÏQ.¿S} v2x |s¬­`!ð=tÝ\…<à>¾…0ýêUtÖ%¤òLsHnpYÏÁÜÎA;~j¸<<¦\TövvÕλ"$G,l`ôH·Š!xnPQªÅ!9!k‘˜œ Øm•_¬@ ăó©<©Êe޼p‹º¶ýºÖÚjÊËÍQžät¹XôÁ”×tP§ &SÓÛ¥j85sðXó YÆàèàÌryÔ¢œ—®Lö±Á¤Ïm+œŠdááá.*ð!¦À1ž@JG©$šS)NkÙ×É?&Á¢Œ`‡'ɹ æ‚RS *’€'˜¡9DN¾fXÐÁ%(Î:wÚ¶¡P*eúäINº*=ì,E€%à1rÄÁ*Î?ä­1b'£{ŽIrZ(ÂÝ%Š8ž'">=è€c—GÆÏM×½Ç+ï,IÈöqÝõÛÕÍåáÊ]1+wOŤÙ\i$gƒ‘ÃêNŸïr† +…tÈ^/w¸1"ÌA@E”sJ;Ó+góšàG'ËÃeŽî²ÔÉ1¼ÒJ2'tQNšriÿJó:£ÁV E:üy¹ {ý]ÿ}‘¢TèW*eÕTÒ0‰0¢œwe|÷ÿKÞ×-7–i¾Š¢.&ì¢ ÿHh£#ÖÛãY{·=ëè¶çf=uªÄhJÔTu—;üXûûd›y%Q?<‰äP½±3žq…?$>$2‰Ì’Ì–ÀO …Ïh>E^÷ y±ËM%²]T,¾€K:5n[ä„kõ‹è ì£/éxô¥ÿíZÃ÷(š°*O EžB÷áƒ_°_ǧ|_ª51K Þ:¯XõˆãL,÷Ä¥°zÌŸ„Õ¦$xÊ%0¿t;5¼é92Ä0Öç=óp®§C™ZÓÙ,=rØ—#‚8æ?ÝYd$>û´¸ëK¾ªrR7í\aùì&¸É2)ØØ„{V»Tç~uÜ›ÿ´íWG³ÜËž¥ÿõpïõì¦r/[ Áýº¸}zoþ÷ûMׯ)ǾÑ”Ù÷Æüšñ¯…).Á“~}º¯¹›/~ì×Èã «pИƒÇæ8™‡&š,Yè6ö_>žÙ¡¯<€§t—Ã#pÎQ¾‡œ¡X@º¬Ó’?§Zè!qwÁ·®Ç„g3ÞZßߘx¬¥æ¹šÊN!«kh°Ð<ÑO¬qñJp¯²è]òŒüú²8ɰxÁÛà ä5L"f>XW?ÿW†Ç@‘{¦WKþ¦_ÉYŸ¼áº[¦¡sB˜X¬¢I…·ÁÍãM®Á&—à ±Ì+±12×´TÂÐÂàèêúbRE.ÊÐU€;ÙËŸG^ÇèôÔ>0ùóàVÀ*¥çgé&>­_†Âú_À•ë[AÏ3æwK2Äè¢ÿØe‹Œéåé~rì5—§ª¦Ì³ÁZP»~CMpÅžähfÜqŒ,!ª¨Câm<„Ëeªcp6ü¨"ãÙjÉ«½Zr0¦—=8÷ ƒž˜êTìõ@H°d(dgxý ÙÕ¨!:¸7­ý¸Ñþ Ù ÷7óÅõò¶›! é• w3&ÀçTƒ¸¼ÏW¸<îþ†„n¦ŠIظfÑÏñ™LŠqJf\¿v†ûºœÚß{çYVyí¹FXû¹%V½1—©¼ÊŸsj«l<Þ|U^ÞeÊq+è¯Â­cóiƯà]ü ðUù5dªP±›bÌjø$e|&Í8æ×Å)ð_•SŸ·w×Ýþr¼kU‘WoϦ·RøUqË«¯k»//oúÒ/G*F.¯¾ŽÒ:2Vìòº‘ær)y›ƒçëj®»õOÝæsïSyÞÿΞÕWòßžÍdnåꑇ˜Ç~UnÝÞïæ·ËŸûÕÐå¸å¾ŽÿölšqË5:ƒËÄ“J<ºyôÌX´BÐì¥1Ž‹c¥M²C”Ïê½®Ååm*¸Éu™+Õ?v¤ !©Ìq‚CÉ$àÒ,úVðEº J)™0šQi yg±·ztìËÀÇЂkyƒË>ÊLú-5@Õ:Yî2‰ êR½#§ÐSØ7x§"Á¼¼ÊÝ µÑžêÞ2g=€ë[,r:ˆÅÚ"]äJ2T¾~å'!žX~»¯g» ­a/k欷@o*]²ÜÜÐõ’?n½ò'“tÃE„êiܰw«ù®Ër2š´Ž”7)gO-ðµvƒdn±ÁQ%Á±T¥ãrfLt‡£Rh¯só¢Ž`±žÖ)»1“ò Üj ž`§ÇÕØ«¬‡ê.¯×ë>é|´]4b£×“k ÐëÉ`KD®¿ó ÕŒ ñh©Müqµþi‹NÄÍ<#k…9âÑ ‰èH+Îë›Æ¹U!uþ¢m±ø<Î7P‡²Ôã² D\¥=‡=Ð+ué]IâJ€V¯r!Å3¹6ú¤dµ(—´âÑÆh¦–¾h‰à„WÓæááùÑGÓBÉ5a\>˜X; Hí.F*I$îž4€‹ÁEÅß8.$Õ਑¨Å|ìÑÄk/Àc}± 'Ò^” JNk.ÂFã´™XM«m%x}ƒ ˆ”©¦•#Fæ’4‚ó*zöÒ ÇÙ䊅9Š06º3¾x¬ª¾ùŸÎô>­20D ¼Õd´áÐÓ8ÊÔA*À_ ìÐàÀï¡ '{JxÀ«ƒôJ’/?èeÉ\t C¯™ò¾ûqZW×R лq) ««´üo–·}8pv=¿½Z˜Àp"õÕq p³Àq!øbUŠðY€M¶úŒàGŸžTçæž1¶Þ”%sY™ÀSëb6r–¨Hl}Ýp*›óg®AN¨Oœè+ù3£*v”ú\‘BÃoMtÆ)+5«X†=ž¬°l“7!ŠTûñŠÏÔ`<œQ ñê~uxÝðÚêíØGpÀ4çg«磛è¬Ø7ùpƒö V]€Ç¨"jgW¼c#%Æ‚WãOg÷ãlœ¸êåȪÉã·À¤G 㜫¿úô=€ßœƒ'¤2‘ 4#MÜÒÁEƈêǹŠlý"Ü͇íMÅ/Â3ÕòÈõð⸠EÖ½1q ÙÙ©Lˆ¢qU2)úÈ£OZÕg}*¥xB(©8&Ð#Ÿ“d£b$‹‡‚³V;n&€öÛT•PŠËÙ˜½Ò  cNO*Vsø•И“ÔZ)hn á8wêC¤6”•̤þ5¹7ƒ8îÙ"YÍH‚Ñ làfA»pz¦ø4"C°&&•xVÅËßCaH•'èJÖÀ³»æ0u}y!œiœ1Å"øŠÑY1Öò3I-|Cg½=Þúk?NžÌ}¸ž›Pâ{ÈiíͪhùA@7U³ÞSPéðQ^>ð€uõÎÃ÷&ÇÈËÙS˜‡ãKß7 Íi'ºG5E¥—¡¤·A¡Ö –Å”k°Âø=ÿ£Œ‹¾t‰½l©&FªàM Þ°RÅqbûª£¨cjÀqs»Q)æä“-†’óR`ênãlñ‚y¥˜,™Eh $xäúá0HùÚð:¼‚°ãµ5x¥”fŠûÑ8I—TEÈ›>Ó`Ù%xb¥Èj/¸8.8cÑsÒ`€Šã¼OS‚èù)Z?¿Iˆ &¸prÔŒѾ×NEÅág,T Ÿf1S´Åé.Á¡n噋™þtJ11¸©XçIw%•ˆ*Álx Påן§aĉ綉HN>¸¨}]ÿ_D_nßÀ “à ®< ^\L+c'; V%¸ià8Uy:Ld³¾W ì=žö'?ËÈ*‹Põ:/U1å—¿‹Ó¨_vV†ç$a¢<1²éò4€Ó~Ï4È6'ÒX‚7ùúë/ÀcT œD#QÊÆ8ÇÕƒO' µœ?(ÝÀ\ð”bå¢Kx ”Τ¼x`æ›_ -€1»ƒEÍ„5Ga£­R–c"…3Ó@)HðXÓH)ô‚dâ먢ªµ,p'®ÐR…»À-| ž ‰/î7ËÝŠ”v?S{Ôín3_Þî¶Ïåy¤° 0v´>8 ö2Øqœª‘Èa-5 ¤X¥âÇÛ¿úz <P©ì @//&t©×†‰,I©Ñ¤•·¥¨KRöê›…x@—îÜÜ'*m.ç‹S?éÈÐT\ÐPky0tªlø?‡œ|±Áf–à]5êß‹‰¡%õ ´·mé>̧ðQZ¬·ORŠ̆Ö+‡¹æ³ù§O›îÓcÞ90öp¢âEʱ.H¢f«ºê@gp“j@ ­«„öz±1qrªc…®¯áÌŒTòªB•‚LÍžƒW-L7 žr™ÇeÚËqÔ¶T[/é™â ¶o M•0OY ̱ 1¥–[†G¼Ü'݉ôRLŒ)r¤£cQSÝŸÔfãg‘T€bƒÕÏÇ£UÅçP³Ë9IEÚK1ŒK‘*zöÏ ÖdÀû÷x9œ Ÿê¯½OˆÕ/u™èx5D‰cR† Ž›¢ 3Y€Úëœà õ›{õ2Œã24}ÏKÅTºÆiUýZw"%³ ª>#Dx|½Û]N¬š+8 tcv”Qãë·gÊ"6b¶Lâ1Cýâ†2<ÆÔS/Ò¤±¸-žÌÉG¦¡Ì0nrK¦z –L£~ÃXžP¸$'Jȼ§Wjœ2°dõšzÊ@Jdlç0@€G\Iåív»Yj•±Ã•K@åØ8Έ«&Õ%®ºµ ÌE¨|õÃ*X˵ÑrÇ%6ˆ W¤s>ì CNð=µUìÔHXIqŠÚi®½`?N¥0µ3l=pº…½'Â# ìesÝÍW»ëÅu·øqBÃ¢ÄØNÔð=έõT':T¾œª§“ñ¶A Q‚çÔ†ào,‘«Ž‘(„Dþkäf@žÒ‚èm‰ž?vw¼‹zQ„Åï‹°èã5Xd(\E&“ŠÕ+ }?ÛÌ?uŸÑÄÇÏn–ŸzRnÏèùê­·f‚ÔïÚz®×Y?.øP¬Ú ¸åô=Éseò†q!¿Óx)Â/?˜QÁ—]o93.Zoü?ÎÜ¥qâç€ -™‡kpdHðø©¹m‘ö²d¶›§š¾Z)Ãat•tñ{¢ôL¥…%‘OW¬Û÷QÑŽje;.Û€..€sÌ\‚õÉOÍ+Lñ|ìA©—<ºPs9©H™­(K7ábÕŒv]¬%xjçÏLçT„'ì2úJõ.o ñüT@ŽjÊ3Úà5:PœÁ„㜳e’ç Ó[2h§áñÅN©T™鉆V‰ó¬iœŽ{‘–"¶¿· X!Á“J”mÍ$“6X£Bä #g¬*vz¦³d!Õ§ƒO”››‹M~ô‡Ä8[ ÝÞË‘ÙV.i4É9‡¨ÑZ(Q(µÀC»R‚'ªš†ÃÐÑã@’Ì{¢6Ê"i£Æê'T.¬F] îÔÀtá)÷äs{¹]l–wãiÜv¢BÏhæhî†ÇE05Í1}ó‘ƒi`ˆðÔ1 ^‰p43 œ+ï½`ª—ôãœ.wµY‚¹2ðõžhJ½üѪ‡O¨ûbp»ÍüãÇå¢n`„Ûß³c¸É€‹S¼†’d@®xĆÁ±Ç«² ŒKTSuhƒè˜Po{ð¥ªTb´`2Þšúôáêb½º9dGäö›o’ L×0ÎÄB³Ëò[0ƒ¨PB‚'Ö° z©¥q©‹¤‰yn6ŒÓºVW2×Àpàñ- ‡}ižçBeöRßhÝF¦ÒHšÚÛCw(Sƒ/Â3ÉgØ~Á?ß\<®ÿÅ11¾n^ã½—ªµ>F L×Í~\H-ì„S,˜Dlaˆð¤ZáU>n³YjNm5€-xgÓ•P•Ü‚YÔ/0&ēʖuY_^ ÖÙ˜Œ¬¿ìèµ¢j 5Ä—Ì 48MDxŠ»ÛG÷ì±ü< ‘Û_è9ZågAƒ3Z•­úR–Ï‚‰ÔO„ᱺæÅD/<3.ôvêD "綪AÌI‚GûB±…½yýºO{-e&Qð3&N¿á8›B+²*ɳqÎ×猗:E±Mó^D̶¤'ÝØq~Ð=Z¡Æ« hœ?+kD›DxNìÒ´yêâ=“lÿ Íp6x¢ÊeÒ0Ãid@‚&ñÓ—5×ázZåÈí”ÎAЊ‹|Ñ8ubKîâÌ`v ®Dx¤NÁUw·Z¹é ¼3ó£—ý‘j :Кy%<ŒSz:ŠÓY6W½Bƒ wU5ÄË‹Þgb ŒXÁ*†§¹i€÷IêE¶dxþ<‚iAk‹W —ÉÆe«m0¸Â,Õ5]KتÚc Õó§á•ñõ)"ÁcU‘Ã…¿÷íëæúbÌÖÓ¢J8n‚qÅkÙ—¥º`.Ñ5àˆœžžTi¾=_«˜úÆý8á´ÀB;ˆFÙ«šG\…¡P¸Ý¡K}óÇ2så«òU2§ÔàDàÑJü¾q³¾¥–§—âXÛZÅ”ú‰}Ómq¥Âl@­ÿ²Uˆ'•Q;ül–V°N¡éÄz©+N¨OË|øNé–¿O‹‡ko •9I$‡g½uÜ$p«—Úëù„€«ßhZ† õvëõjs{)o‚álØê™Ù)t”L"˜ú+.“j_à¼,ÀŽmäd,p¾ W0˜ÌW¸á[ ž Ëlî›ûínöi¾»Þ»=Ìõ& ¯mМ]‰ãFõO™È½”ôèQ×_p Wýrö­§üÜù~Îq”ÅqF\ ¹eó!ŽÄž .º8óûˆÈÖóûÝõPæ«s¯•¬Oø¹á|ˆ>w3ÕÞå§ðR0h£á)/ÀæŸ>m:ÜÝŒ´ãþ¡TäŽÂ„N£Ö>p™D R²¾Ì>–’R16Ȭ‘àSfïàÙ“‰3zW}çl¹NéÇi?éýÖ)¼á ªú‰,ãM¹}úyÙýÔË+0òJ*Q»¦ÈáKʔڧrÒ @FÓ`Q%xR­òO½‚f? Ç4.GMo†]0ŽÁãt€r›7—Œø½íÒy|¶~Yàþ{œ2!¦ ëüve‹O«õå|õôA/ÃÈÈpèØÊœ"4Î¥“PÕâf6n¯44X{*×jz-ΨÆÅ‰ûÜkU`àã8ù;ëJÔ%ÌTù x̾¾AÖžÉÎ%OPºôögƒD™ƒÓ /¢N†›œ߀ÄÙðŸÅÉo!o÷-äáxyèÓœ¼Íš8N‰K)7¡`ºÅ¹*Ác'–×þ°Xß-»«Ùbû¹·C†”<î4²­Ž^qÚǯ ´‹ÿñþ²›m×ÝÕý0¢¡ Äú-·„x|‰6ÐÏevÎÑ`Æ…çHe£uΩv罞f%Æ@B#DÍ# ¡ÁaHašMð¤T¨$ÕKÁõÚŽ;5Ø”¨~‡lj‹›'¢ll°‹Ex¤f/ÉéQL‡/í5Øq1Qí@ÊKàö+SºPÙ¨“Ø× D›Z³H}ƒ”Žá{|4¬ Eãœ)ðk¤>JÔ¬ì, ÏÕ€+ð(k"VòxFzk\“u¶x`0oî†q¾â:Ïö·+½ +C¯SbãQ4N™zë-ÅŒV²÷˜n²îžJ† tªùYy×àÁï ¸ž 3ð@(PT÷Õv{µ¿Æ‚KÌ}:îHz¢o9éÒËcmKDÂ&\Ö58H$xÄvo ¯ÿÓ‘ÙòöãfÞ [að”6¸(›Òëµ_³ió–à‰¡XSŽ“o/cæ’=Qì,EÏi8'îìUŠÖøå6YP–Ùà•Tÿ=Îèèljûñ¼}¥p»<·¦à¶ŽEâ8mu±~uy*˜Sˆ ( Àm‘§Óä;šΕ¥7±ÆçÓÓŠÜ’åSºÿZçcÃÃs¶z˜µÿo‘´.øúómµùy¹ÙŒ[Oð>@Rìzâ8Š<±¨ËO*EJWôü|BýZùÃ÷¤™#i“ïW¤é ô½A“”çñð¾>Ê5Ô‘ÅÌAO¨ü\.¿™‡ã!óá·‡ÄÝ0ã|lÁª`UÄ%ÉÀ“Ô”¨å¦û´Üî6_fûJ&8’Ô ·l”muÒ­úÔmSÄ\”é=@W¿J¦ .Tú¼½»î6Ýȸ5JÞÐŽ…ˆãÔ¤~ß§’NЇk*ÀL¨Ð¹%5.7CM=ŒòÜ™Iãt‘Í*æžbýNzý÷X«}²xbÏîö~7¿]þü$2Ç­*z ÍA$O¨D'ÍSÙ‡8QaÃã4-,6 ï'¾ÒꟺÍ$÷:×’—žEMfuÔZKíÜ\‰½+&¢í¯X£ñ<ÄhMýÆï¡°.òx@¹{w±Zß_ÍîVó¹9·¸äti.Y¬çÄÅæËSQ€¶Åñ+Á#>~¼bá…hÆ…è(ŸSD“ƹ”T‰ý|*9©Ö  °H½®_ÏbøG ³2ðl…}äêêÃÈ•–ã8@½yÀÍ„NÄ2Yˈ+Xh< ŠU£}!ÇçíåhÇåèÑÁK`-Gc?ÚúI”¯@ã>u´×üL¨©EuFÐ÷$ÇdžûÔX[À[àGë›Ù^Ú³¬ënÇÑ+ÒüœÖ¥qÚ+Z;Öt+®·Ë€;5ÌôS¯}õðŒÉ 4P_¥X=Aã´/àÞÕ`s –àÙ-Iãbƒ(\ [n­Ùë'îs÷Òà>¦r_Ÿ½Žã‚×F'.ý¨§q,>¯{ñi¤À»ù†r}X°gÀÙçn³]®oßz©½ÿчý¿{¶½ëg‹ëùí§nû~¿ïÏæ·Wgwó/«õüêzJ$òŽGoj3> ï Üm—Dÿ€¶_õÇk‡åöì³>ÿëÝÕ|×ýò» J}×-v÷›îâ‚þ·áß\¼sçFküä´+/ÞýÇýüËùrýa±¡fðÖ‹;Ü©«n¾íþËözn|¸ƒ{Ë}™h1—s{ öcç®BTv¡æ}‡?KšîšþQÍ;4î/“Wñ*ú®YoÝÅÇùjÛýã˜tþ/ëtÒQ…º÷ßÏ œÑ§$“'¸:Û­Ï÷› ÝÑ!>u»ÿ|v»Þ/"n¢ÿ¸'ÁC†&·Gæ¯{Zôûpßþ€@¯7¨)ÿ~°yñ÷l¿ù¸ìVWç¿G-´ùõãon—«ßî|óO´úôO‡[úOùoÝ--9­¦EÑΗ+Zäßü§=ûöèão£~^(åÔâraÔoß÷:ð"xö—õn¾º ÝöþìÛ5U#Àq¡ÞŸ}ß!˽Ømî;d *ø%î×ï‘J˜o¯/ÞÍw÷¿ÿý'ãoÕ7¸ÐÏy7¿¹ ?ýn¾Ýýy³þ´é¶Û‹Ýò¦;ÿgDøýþ¬ÿËŸßÞÏ7_ÞŸáêà?ýÿú—o÷ïð7"$ø÷9m ÿë÷ß]¼»Þíî¶>àé3=GJ]Ïwç‹õÍ\êùnþáû?üð»±ìBÓ©†ÿî[¤àm·Ú^ü¯ÇÃeÞàýt–È«©ÓÒþyXáDŽï^~úË»îîâÝþ3úqw…_ó'ÄÓ cÿƒþ×=©’oþ¶—ÚßÞ u©ðƒúbÃ/›¬"~gO˜¿ÑjþEx‡Œü~¹ýqÛ¯ïÑéÀÞôRz¿ßëÿOè½×KúÝzœöm‘¿læ·Û%mì¿ ó‡Búå§ùju;Ù8ßéx9—‹¹ ø¯ºŸwN)0K$Y "èÿ!ýrúU¤œÒñJ©ßþA|;¿›_âîÞ-»í‘?þòÈ£þg(Úo‡“p/É×ÿþ—ßßÎ/WÝÕáOÎÇÿå—_Þý×ûåŠxùîÛõí-úã??æQ|;¤Hãgýb}ÿ`Ó·½¡?þi°,~÷ç?ÒßþçþüîáÛŸöÏUßÓTŸ»»Õ|ÑÑ놃(—ÿq»þéö´)üðÇnçwÛëõ®ÿ+Ešp» ÕÃÞÀ~‚Z§ˆútâôÿˆ\ùt½{Cÿº¾êþrOf'˜c?û7"ï»ð'ø7úãå|Ó¡¹6€æÅùÚfw«åb¹[}y¢#ÓAõ5B0ÉYþÚ½u “mpÓáÙ2šÝÏ(T:Œ¯ñ¿×›/g‹‡Cð›~{ž] ÖÖ7ÿßœ9g+ÔAßüÓ£2ýà ˜^à”ü {;úIMõç9}ÉþÔþ×õçîæ²Û¼?3ïÏâû3‡G»}:Ô{Í÷ÛGÃã™ÆýUh¸è€¿J¿þ]_w-^êö—ç×QVCp\ ½aœyíhø÷³YÞ.·×Ó|š³ßDŠúÿó¿d6i¬ó‚ãL£xŒ,Ž€böxÿÇÄó¬Â>ÁÊ™± +úõ)º3`ŒçŒC—âÇå ¸Û¿a¥ÅF!¼JG®>'ÒA!cžÞ\œ]-·½¦>[ƒgohÇæçE¤ùùyÛàrŒ¾'ZÅï/T¹KÐ×Â~–ÞÉÝ1"œ Pר Øc•éÜ!MŠÀ-D’@’3fŒnB’à!Çùš$é…mxn@4hGË®žÌ( Q"j•\Ƥ¢2M(-"²xŒ+ñNšÝˆŽŽ I÷«bYîD<ÈUÆ1}®^i2R$Ÿ|ÎìclB2ŠÂ³C4N©bWù9K3˜‰£ €7¸À‚µ˜\5ÕpR…Go$CŽ |›ƒ.鈛€Ç“”)Zäà!ìˆø=Ë©DÙžÖ\Ëy®¾Šâ.D›D¥Bs¦ÚÑ ÿÇeØk `rAÿ"9pLB3"YÇ›|Q½ ƤêS)C®HžEÈ™¹kqߣR&OSÉÕ›*yb,ƒðKG"îäNeÐIx ÑD[co+Em\šh£ÎÁãJ5Hx¶PQŸ?JØR v!Ò¸’AÃ,4!±Æ¦ <ƨ"ºe/Ý«<ñ'–5Æ£«3ð»4IÇœŠ»m@; ÓŒMœÿhµ2YxR<‰6à áÞØÌ‘¸QŠeŠuÆfXÆq´«óL™µ9,ºÉÁBjÝÛ† Uy†ýdÁ•<ˆ$[Ô(6Âe5e˜]N§Ò©ð ‘ÈùèRÎl]9:+3ŒGmåBj#«À†¢£×6Ë(óÊÊýñrð ‘È[ªÿŸ1[ÓÄ-Þ£r̰)}1“¸„K‹¼gytŸa·å'*§Ò3*Dµ`“ɸ&ÆqM‡1k\†E¼©Kµ¼õp<à È™¨: “N¤±È?Î-ƨÚè°hq³gXwÑÄ)Ä›±l,:"îrn‰£ š€¸U'˜aèÆØ&*¤Ã;¦Pý¶ìÃèRðág°Z‡ {Œ©v9–;‡Bt¢úŠ>Ãä…FqgrmrlW®ú‘6²| :£u†˜´®v”eM ‘’³*#“/&ë›)… r‚tɇJ·cã À§SRÖæÌtá˰\äe¨J+Ÿx«T›,#PÖiŸƒÇ¸rÖψÀÙˆ4(9ö#(—¦[=YHKQ„¤2&›x\€ss1OÒbÐÇ%­ÙØ3hãCâ 3@“gBì9 b!2P•˜3# mÈÀe¤|‚Û–ÿ2*dÍó Y¥mJ°§dçd¢+D£­Î¸Þ£šX`ŠÆe౪DU1Y³qa0ÁCÆm7&Ä…3A–"«sæÛè«’Õ!Ï´ŽÃ9Jøb~·:Îæ÷»kzß8¼>}j€„Û‰e 5‰ÏÈk|-ÛB8“BÔ¢v˜Ñ^°.¶¡V¤ÿdà Ð*Òr»¾êžío6Ä NY9³È~CS}! 9cϰËns\9}ÈÁcÃt‹å•x=OލñèÎÀÌé¶Ê®RËž²b´à IŒ¼vhæóx¼¸æèqà•”Ùð+x›BF:ŽS“í“x…HàƒÒ:g6¾MTÃ#Ý2’ÀG7ñøÈöÙ(*•¼Í°¯ƒR'ž'€-D`•R9s3m¨Y†)ܤg(O͵FÅ <7ú‡cxC:å°ã,E‹)#Ë‚ž 5¡E4V› 38ê}2¥Î‡D£W>æÀvS1ÜB$‰ÑAŽCß$&ôÿ2 Ëqz»ò ïX^ÐSv9G"0&åÄÕG{‘¾uüÔN!’¡€¾ÆìÛ\éA_¢ m^÷¶Ãær¾˜ÝmÖ?éWÈ&­”Í0 !¥º¯½Çá"Q²ÎåÌ6µIÏij5BN8<9©¦ºív4æn½Z.¾ÈVâªû8¿_ífWÝí° |0— uÇœyÄìZ$Uð—¡ÉŠ1ÃXN© ’2!fËIéP›F¯¥Ïu“ò*¥øÎÔbÏìR¤A›4#tT°mH“¼ö9xÀiÇ:=ú¹…ÁFw“Ö¸y«ÇåZÝ­gVˆzÚ¦”ñX Ç5qî“B?¹hfi³£H)^™2F¡[ÿ•Н%C l3&D¬ZPýù‘SC ŒR*<–QÝRùînûÖž}óVûÝ<Ú°±ädô¾]2ó‘ÞÎg¸Z!´c£pjµØµ á‹Äƒ8M~/‚¸ïE Ç[$*qãE“šì‰l0.Côb ÝwuÜtÛõýf‹öqyÛ÷YBú¬n¶çD9$ÊùrÝwÅ`I“ŽÒÿeîjšÉqì_©è»Ü$Á°"æ´1÷ØØÛÌA–ÕUŠvIÉîéÚ_¿SvÙ.+A(IöœzƦ+ÀGA U—‘XŽõ"k/·û¹$:}…Ž+õDÅg¢‚ÀTô«`ŒIÈ\‘»â½lÖ§ž3Οµz<7«§Ó–tš¥:’Þ‡ŠÂt40_aºRZÞ¨·Oß Æ€h° ž¼°4ÄŸÝþ·ãúׯ‡¹»S¦ýáêœ 8)R¸Ôâ ~Ú÷ÅÓ;§¿©i0ŠÊõ2¤/ú5xú›‡7ºB ‚59‹OüùŽeîz•ÃÐ’È ƒlx܈ HúŽ£#—xíÃãÔ!Î÷Šü}¿«Q¡p¥H>¬qd¡$Þ’ž=ö·*Þ*°çÓ_‡ö ÛÕüØ]‹"çº`n оêÒü3€i\4q¡hÀ[%dßÛþ+ñ„¥Kÿ9èZ¡FÔH# †ùÂÓ¸”L×寤l=pì^êR‰Çö]ûÏîÔã:³.a^—–L–á’Õv‡s'ݪåߎ»ØÞ÷§€OXÚâ9Ò]¡Æ,¨C“ç_2Lã|´]M€ž¹ ìy€Pà¹âŽ[¥K:T¯ž£Ì§gÍúy:r–Ũ÷yœ5 ­A;k`w¯@ªÅƒm‡âÙªðà%/K=²afCÇqi€'ªÁƒv˜á¸[o¿öE¡Âá1Ú˜…ŽrÓ8ƒ©o¢Ô5¤®‡oG܃hðt¾! þ¸q¶BT0“¯böR˜žÆ¥ŒÃŒC= Ø,4x¼þYøaÏO‘ô å³Ýiµ)I Ûõþé¡èV8þgŒ.øÒñ?s¹é¾É”:RWãæW¶W¼Ëâ“E% `¢4x|“”®KÜÜŽÛÃiu{8<î¾ÑDŸÊƒÀÙÓ¥½1€Æbœ/uYÆÍuÒ¼DÆ‘ K'K÷ZJ<¹ÝkVvפËãñiOLØN*=ÝœU<ýß§cihôþ)ù䂎é0êlVÊ8MNEmé^ß¿ ½VÛ°dÎ6,áe#fo,„”Lö Q½ü5ÌPÈ”Àô_}*<±éê›`:ífA»Ü0‘ÀIšÒ3ô²4@Òü÷q»9n×Û»OÏü\³²ë~éƒFí¼Ïhoœš=;_–ÕI‡+\#MÄþtÖàI¡™÷ÛG•j£TËý¸¹œ$ 7‰}¸œšAL~€+¡Â“—´Ë™n­>¿ÌéçÓfóy½ÿþ´»+s;¯7 òý28iÜl›¥ èÁR…(©{;=%×Λ|£Ëâ–Wª× êÅ`‘;RIâàüÁÇLiÈèzœÖ pk4xÀw´‚¥íŠk"[ÉbÑ8ëÚ¹¸½èªÇߟ<Öô±‡Ã}­z ^¾tr&Šâ`ˆ)w´©Îdó(ð¸ØÜpÊËz³ÙžÊãဠl_VÚÀ8 Q}c6ž²qÒ€A…'ô±µªAµœ DQ¸Ó¬in t¬V`Mi xÐu¶Â;e i€•Æ…œúX„F”Uˆ2 Ò€ö±g]^.WÈIwóZÅì,†,9»óm‚Ükàhð¨»dÖéî·<^MÎÕêW²”敘¼!Hþ ØéÔ°œ¶ )b÷FJ<Á·6‡ÓíÓîþ®ÖÐA·üîÇÛùB Ó8gs“p­ ãÛ Á£÷¯U¢pGˆ|S*‚&¦g€Ö¶¡-‹5²¤á ´í-„ÎBvˆ>'çJ GG™ÛtpŽÐàÉÐÝD°…y†˜I(9<4n¶¸Òµö¡)‡5„;†Olw›N¸÷Û»)¿cûX«[!ˆ—Ñ'ÚûÀJ² Þu·5Ô¦Eæ­3‰x܈°#×J„,‡yœÉCŒÃìùÜqE=¦fšÏ)ãÐøfÆ¡c4Ñ…ùöªçqнžUùŽåÓ…‘ñØ6õ¬”o¨V/ bQÐ-Ò­ V’…·Ç0Ä6€ˆØ&ï…š‚Ó8Ÿ°Î Þ%³ã¬:?¯:b­‹@‚Jã¬K=“—W#KÿäWžŒMÚ_ñ²2fA«\L%ÓÆ¦Ñö35d®‡ 0‚ <>öµa^uÙ+㜕ÆAÐÞ7 ãªF Ìý  Á“»º%ÿïx»Þpùà?¿—{~#(ƒOÜœWÏUsrß…šAõýíéðxÓe៫pœ#/q^´GÑá É¢qNïòuæ©<¸V_ƒÂȳÀ©øROÇû¢V+¨cÌ6¥(‰q¶æ‚å¯ãp=ÞØ~ žN oõ'©=ïì(û<Ì]Ðô8 ¨h«ö§ç2¤!ˆ.ãñW&«þH󫺢/'ê Kñ§­êï?áKñmsÃÌá??ýò¿û¯ëý7Zü;q’\5ú'þö óóøéx½~x¸ÿþ!꼃ýw¸¹Íhù‹—~jŸÞþÁ§Ý‰|Ì/ÇõÝö/_¶ôùP¦ Ò'…(ÎNÌ+ÃÙwáìù1\¾üΕú„6f1šå1 OL­d*êñæþ? kdqÍ3î®î+OˆRÐÉŒñb¨;§Ï5=®~ús~¶z¸_ÓŠy^9§-;ÖÎ;­7ÖŠž,K…¦šñ°y÷2¯Îý?—•Xo6|îøh«Z?ì~½¨Ï×GU:Ñ Šå.Fz†[ÆA6=×| g1„@Žx’ÁÚ,Àm&WàÉ®÷‚‚ÜÍÙz礙¦qúTÞ‘”%€`CL( âò€KÆ“<º,ã`ö‘¹BÂ%’çƒÈ ŽV†Þ ·‹nÀÜkð`s °ßÒïV/ÕøA­âÈVÞøyœKÚM·£k®×½áÇEÿËwlH9zÏ…±^3 R‚Óœ¸•;tÔ qÞ¶wò¯b¬ràü©ðÄ1‹^G ï™Å=“Æ9¿èö¬ K ‡§AÑÊx½Låï õ*ð„Wé/¥­+Õ(8ÌXjÿ  ƒ fÌb·í ç;¼è›ÄÊ*<ì‡Ãã±F8&g¢_²ÎK`i, –7¦©¶3§ï“’xº/¸µn¹ÂS|åŒËTXÉ7Íü’»Ãâ×1·l²–½ Oî½ìgÏ€Óù<Ÿçë4Î6jïÖš¯]ÎÄ(K|÷gÜÓwûDWàQ—ן)[¯QoÒžd¹Œ³$Ay©ÜÛ@;°ýŸoëðô8ÚOÊ+šóóš³|ÔÍ!IH-ŸüM»ÇíˆjË!:…(J4ƒ¾“ƒ”9;KmS&Jµ‚R9™fÕHB`0½Vë‘’—8`úxº.ý0¯9ÇKš‡ HyœºeÊ(¢j„ˆV¿OrKs%Ü‹2„è§ÂŠP£óWîðÔFÌrt-ÖàIÚ+Ü?Ö÷»»õ#ýìßÛÛ¯‡ÃïoÞ½h²(ò¬GRãó£¤÷ïèd,è=ßázQ„œ]WSZ!õÆÅ,Pàé`ꎻ?v÷Û/œ0DÊ‹óÊðÁ8ð’aît£}:7˜²YÞ%|Õ¥<¹sÊ9 sžt0æB=ÎÿóÓÿü¾›ùp·a½‘~¶£÷‡§» ¹£½uWz>À«Mòó§»Ýi}Ëyl›õÃúvw¿{ÜmOœF÷_/¶X àKiðä¶IQïÕüâ”|>ÿÝŠ&鮄¥Á™®žY¾FŒ¹xÐkžt¿˜=•bư§OnW:žUõCó— ³Íx,Öò¦-ðaŒqðŸÅ˜%u8\\Z¦«WÊÆ];ªÌ¶I¸`b!Æ‘þ%ù¦ï$cžE•›çU]t\á'Õb­vqãQa\̃jYð?‹ѵ¹ç{×ì¢ò‹Þ½Ì tYHÞ=ã¯uJÚâ^Ì—ZùÒ _¶.Ìô¬UøUd¶dÚ𜥌 ¶´B½”+ÑVÍMã‡Ôã‰-m‹°6åC´ÞÖ°$ߦTã]Ìj¹Æì=ÑyȱµMlIÍjL2;ªQã"¢C»˜å8P!•sc¸QGŸmøæˆÈþ_q‹BQž~ïc¨6û¦ü­3Zhñ WÏcf8øäkðøVžç¥õ$à,s¢V”¹Óª c±‹Ysò©FÔ1ѱzøó§ZÎ8„?‰ëÇä<¹+6‡Õ·íãq·9…ËÁÖjàs Oæ"ॠ©,Á_6q!Ï*<©E¢y•?Ÿ¦G)ë¢u9úZÞW;¼ Q/æ vë¤ãWã Z¯øüÈøáp¿Û|¯ÓüúþþðïÕöËqê;CŸ•Ù’LΦ¬u…Ûâ^Ì—jùÆÄcZHUxÒ(¾Ð¿ÂŠcÜåÐlµ )ô&μ‹T-è ‹ÃÍ™jð ŒfÐÃÃñð[Q¿´EúeÕÂÌf>‚¿”=õbŽ9£V(øtÆ“G³çœÆW&@ëÖ ‚f˜ýùX€Å ªÔŽÉD@1Uáñýt·ýmýtÿ¸¢_|/z—ÂÕøëGœÜ‹ù1WñÅ9oWãÓ¬îØ•qWCÇè“ ®Få!¼‹‹¹T+­s2ç^Ô¦ OhòÈíÊðrø¸Z’Pk…zI°˜C1Aª’t=ªÅÍâ§Róyóæ3Ô D]þܰŒþj òŸ%[® :{òGîòqû¯§íiéê[}Ù<¬Ž‡ò«r”õrd¸V¼ljÉP±–²­^ü4†mÎ:¨ÈÑÌú¢ºÍ§e·þFÓR&D-× –þ2ž}(Ðb†[#¸³W¼ü‚ó˯ <üªG1f'Í>D_ƒGíÙÏ=™2ÁÏ¿ú×ÓtvSþóþɈ•‚PmÊR]Öiœm_mDù”RÖ0s <¡û;T!\ä¹wrN⪇œLnÖ±!W«%ðÆà€< žÐ®©ëñð'Ùí›éFò'%J4à6F`ÄYs=.Z-ø†`G”ÒàÁöED‰ |¹ÃwÊçòLB¤´‚.Y±È£]µ]ÛÖE U€vfÀÑI…›-óWÙ”ÏóN??ݬŸ¿Ž»ÿû°ù­•üRb°$ w i¿úõ&¼´ÃŠþ6‹¾À]@ä â‰Æ˜@ÖFˆ&%—¥Ô³ÈUqÚmú]¸Ë=’7)‹²·5 Ë­ ¬xÂãLjfNÛÍÓq÷øs·>ÒHÁëÝþñtóü«Ÿ4+±Còt`•¶5;Ó³¹ËxX®< ¥…ñ›Uº¾VXV½'Úî“#^ xiœÉí¬Bær]öY–ÄÅ6³8Cð¢M(ÙžíÇõþË–ïá7ëÉо蓴»=î×÷?•ú‘(Â&ö9É$иmW“PGiäCRãùiœà(Ðw2:#U<.Œ0  Bˆ\>Orëªc;G¡9q‚¸WÏðê•þ¨$we&R‰äºˆâ+¤ù°~¤ùWàñj_…'w\?EyÂíSöÆF—Qâ\&ƒšCƒ¶|ßÖôG÷ëòP…lˆˆÏEƒ ùK¾réöÓ/ëý÷§Ýš68r¶j@»œÜ8`Ukðd×¢XÖ%-¾)8•Ð zĘÉQÈÒašÆa„>«:7ƒ˜aÀæ¬ÁãcÃUýõ@°7›©4”u¾—<ë½&¡qÞªÃ$ý©ÁÜyhð$×¢™ô¼u|U܉ Íë’7 'ù4ÎEl¸º•T­‡ fÀAZƒÇv\áˆ\ªÝ`"7 -ºG·c¦ûˆø˜ OjÔ7ºÂ n8CŠL“èaJRK¸m3;`kð¸Ðxã{âÕäÿœïœ°'r– &o¥X=S7‹þ‹ñŽˆŒjðÄ8`¾g­c¸1Üô™³‚gñ–qVmÙ{Y#lgº{ê:<Ö/­q£ÊÇ,åfèepÞ¦¥ À;‚2“õx®¬Vþí°ß])ET‰1…”…âaÓ83Âvƒˆ7;—„<æ2s0õ|Ü@”ñdu&©FEy‚´œ¥“ƒÀÒ8 Wfšuâ©ådçeè`\ÿyçï  +ð„Ô>NþáOßq’ PŒÖD©k_gt\øUÄÅèœËe°nÈìÓw¸>©©À£ž}µò„ÝÒQIô ­zgÔa–TU0S¨Ýìkð¸Ø0'MÖ$ š¤]È&ÌÒîIãRJ½}hÁ˜vßzÑŸk?{IÂn ÷S”ì%ƒ¦ i‹ªAŽi·Ùÿ:?+ ´‚ SI(ºf¹˜ªV?(<&Û!8Ðà©zWñ‹Ú„<¢uIôcÁ‡ècv é¨LÐþ;kð$_±NyF~eüÆ9–E°˜$×ø}¶k·Õ±ÌÔáŒWà‰¦æmÀ›fÃ^ˆ¢ƒ LîQŠœþyBg²*Ö¦ƒ!ÐàQw‡Y¬ÛçLK²l\lðâ:ø}¢©hôlžŽÔš‘Ÿ…¦v@ˆ™#YJÁIQIôyšÞv`:W5ëÀ6@ƒ‡¨BÞÇÂð˘‘•¨ë¢U!½)F2¢´ ^[Á¤zH¡C"P…'6²e¦a1zHÞ`”®®²FªÐ§· S« >Àô"%¼)ax¿J)–AÎH3’‹Yj´a¦}vÒèÛo ´M7Œw"“¼D5–3©ÂG†›7PÆÆõWÕ°¶ÿªT+Ï­ÉQͰö×à!_{Ûg:”¡Ðûã×ýîð}D­Â×¹YPrâÊlÕ6ð$+`B‡„·OpíöýhȈ\‹•CA©ïZ‘#ý$¸N$U-#µïÑ¢ÃP{ï ºŒ‚.ù¹HþÑ(aç‹ÔnÃ{f0.º$Äö/±ÎxŠ£'ã ên¹üŒåihOþÎOD`\MÖ¸ä„b,ƒ­~°/c¢{ûêžhkïêÃñ~÷×nS®úGtŠ‚N)¥à½×@IoðÐt:¬Ô¾¹½:y=¢&áœs>æÙ c‘X.$uýj'ò(Uç,¡¸†hM‡]Í?'ÿm@ð„ê»úï‡Ýæ{†w?¢Q4šŒw&‚tf9c½=dX„V¨ù8Ëa§,ÿp.“ñÀŒõ7“ Þµ÷ù¹ áä¤HèŽZälíý^ƒ›Þ“1ÜW^^Aj?ÂâŒ|t$â!ãª'ãNë›Ûü¿ïk3 Ú¤à0»Æ’ýÏrvÆ{ù&žÙ¹V@ƒÇÛ>V@813_s4‰F2¤YΪ³¯Í9«@ï Ãy¯Á£Î̈Úü¹¾ÙKŸ…\9ÄUåÒ ²œ‡>VëAެ€ú¦íj¡ç{J|ç×/÷¿E·Â³ÏÌ+‰K‰ùG«hV8BS>ˆ¢!”ÜÞ,ê¾Ñs«”:Ä*<Ú‹‡Ãúf8Ý®_§í^”L;áª.Q6#Å{&–34/£Ø‰‡Š•ø®œÀ‡îðÑ#’J  E#Ô´¹€ZŽ,#° \l_?¢Ãc}ÅÝþò“ÆñaWÄ­ØBâŒ"g~è¶÷ÕVâÚwDÕáQoûë³ÉçhƵj=×*b Â*²¨‡³Vc±e4í¿½ O¨¹÷ò7^†ûûüGÎa ªËñ£I²™²Éig½ô£©bä:@'™Ûü‘꼘@h„|–3±ææWÓXµ}‰¾Äà9X~º„]ñKŠ·;­î†õö"‘ 6ž4còLæ3|ØîõVÌ¿ ÎûøÏïb_+®è+VÓ—£™oÝ{áKø[}Ï#v&è «éKï¤uÆçýoõ=Œº3ÑîÈï›öÃaÿª“ã/·vZÝîׇg_·RÁf{¾KËÿX}–s1ÌÏÃ..ÿ0‹Njo}–SOR¬Ê5Ð 2 u‚ìºâFbŒ(|æ|xd¿Õ' -Ït®rêNáž”m™©Ä—lÐçËýrù¿ú±þ9û%ã£Óˆk2Q:¸Æ“êì×%´›Žl‡[ õ÷Å~J¾},Ǩ% }–i”;eÏç§cl}©Ã‹öô¥ªoõ‹îJaÏjØî.^Ëø\4âŠI@Jâ¹’å¼zc{ŠNG¦C¬¬Â“ºÒQøì˜ù—r|'ñ”å¬_²·+ñT:È3q"&@Ðf9´º&5@©Ãõƒ OX¾‘_N¯¸ª?áò–(ä©Ù^~±š{ä¾Úð"Œ~¿Å®ù¥Å.ÉsÀu(B7§b[pè&à‰°œs—¹Þ½¸F|n¶¾úNÅJ#d/ó¡‡>J !Š\D³ÜÐ,Ý/ ¼h:¤å4xl#{sûðe¿Ûí —Å‰ XrNºüÉríò¯]•© 쩃˨ÀÔC?_½˜¯ÒѬhúd<*uŠªÛÞV&¯m{WC‰'µÙúOI_\NŽGËxø‚È’ðú²ÈÅäf|õľl©®Ã¶Úìï1­N’*ʶf\ÙÖÎÙ$,Îò‘ÛÈJÌá¹·§ö$Ñà¨b/®»X(è›Ð„D"NN»ûjSÂ;‘Y±¸ö׺:<ÁU®_kÇUè»5 Áy\èG<_þ]õÁ@p·yê2$¼„•Ýà–±D>O_J°L… ÏÒôÓÝñá~˜jsa\‘¡äç‚‘€³œ]êE.ä¯k°í?º O¬C<…áW5(8àÈz¡ÄYÎâ¯]•¦ àí txB¨ã-ªô)¤ï£d¢4¡È¡¯?,¢®1u°ø*<5J_UšàQ—š9“DS–³¦ŽÛW•¦ üÐ!k¤Ábµˆà]•Ê"g5 Éyäd8Jɰ,gÔcÅkqY²Ç Á°¢¸ú‡)ºlŠ@òU¸ÛcLÕ|ÿæÌ¾,g㌨t©²ï×@éP¸—[<¾ÞÈÇ/»ýyhî_»¯¿6¾Eá|'ž’# 3Ë9SÑÍÚN´¡C&[ƒ'úÞÈjýpÿmµöÃ×'ƒ.øôDMæ©”:ÉrQÝð§K˜_\ºL¶7d.öÆìö`ŸÙÚð¿2Yá22q“_2¼)ÝÎ7ðwfì–阣íq_¢À㌫gqŠ®„°«üøŠ^ª*Õ€¡vt;ƒ ¼= žàêÞ…©qìJ ×öEÌ;vÔ#¿È©o¾–PR‰­ùÝ–OèåÚ;Ô¢¸((Ž_/d‹% \åêÞ\-á¢vógZ<©æ]¦˜úK£Sû2&ËSqSD/`g9[#=1—« Í/(´xjæ(¯¸7(¨Ž²MvÂ3¢³\ŽäkÞMVà§{H>»ú T«îÖ_‡ÃÇÌ7»¯ÎŸÞì¾î«‹Üê"¸z’dŽáËÈߘA¿^9ËE„Šºž¸B Ù®ZjrØþ㻳Dç­ñãëÖ1]¿×J®õ3ÑŽ¿Ù:ËY¯Íûô!«b/æ£LÏ ØKN ¼›P¢ ‡ -Ì <ÅÕûÝ_Ãæç&Ù‰æw~ëÅz®0ýÆÞƒ )iWe9KXÕ µ«xGÆÕcƒoQ^U¤p¶ûì7sw\”€ÓŒ¾xýi¬X tpPUx°-–+Yˆ€OàIr^€=‚Ð(bÃu ð@íÙ¡ÁC5£!½Š‚?„ÆfÜ©TªµmOjÅ¢°ƒåÐà!¨–ÏÓóþæUÔ˜„À!xÌPÓø3Õ"ˆê%¶–²[;5oq¯ÅƒMÄóëùÑÙ †boQÊÆe¹‘11êlGMöN_ÁH¾¦" ˆnÆ„ðø.ö`³¾_ï__kUpۣϞ=¹ñQ›g9o}S›0ÌÐØájCƒ‡ ¦E¸nP >Þ!.€´“²D{Ø‚Y̾ŠÔèðº=ìãuÐÝÃô.[Øõ~¸»_ñ:;]‚CŽ<’=I‹@%TÕÌb²nêtPà Æ×1—ÑÙ …¼Ï.™ƒ¾el'Ú*‘üŒ„¨‹2u(¨ùØ%ýØwËÝ.,<}züÆ¿–¼E>'wgD^ÊÝe9[«EÑÂm¤€ì\‡d¸ -±D¥Ÿäy^ùuÍ 9JÉóÄX )%Rw!kÊRpœÑ “üÅè8³:ÉÏD$ç3ñL¤EGöÒ/­@êMM…'5 ä¯5¸ Bj=Q2 „Ø‹ÜX‘ŠÏðÿ}7lòO¾¶Èúrhp|"ÍEΨ߰µN~hÐChO uK‚K•Ô¨.Ÿk¥c Aw”‚ “„•@½ q+–BNZ2úò/WüÈÇV0<7ÓöÂŒ™³œ4¯²¯g5XS‡T¨k·¦YÏûa?°¾4›ÂµŠ/Cª 67ÜtKf?×à§,t8ü5xÂÒ™,¯õöyŠ:…oÞ§è(~–£šøs(«€l‡T‰›9¾m½Ù÷êtQ¸Ž 6Ã’°S@X:)ºƒðS§O‡L®†ÙZ?ìïW~¬&èÔ ”| ÖKÛ,gã<' 5Ø¡ƒS Á37x_—/oâ£paxÚu°ÂÓÕ"X£Ã` æN_C°=¾¾«ô®Goc­À ~å#ŠÇ[–³s‡°Ôá°©ëqG§ÂƒmB‚;þþÛÕf](ÜìÆr $å,"¿©¨RØ€°“WL(AƒG%¼¿f(V ‚CcRrYnFÿã|ÖîQ¢Áƒ3r¬«Í~·=þsØ×ÛK_ÀÇ]íw‡ï%·…Kx$pd’•ËrêK]«XHèhðDWãDøE§¯öXä£O9 !.Ë©CŠ|Õà ,¾ÏœÏüFo‡áþø…7VÑ–Pÿ@)“ ¢˜©Ìr–ªÜó,#át¸ÎuH÷hðxW7éw8n‡Gµ _9qžÈK¦†_'y¬°wUÔ ÃeÀ<¤-ÔÜ·ûãÏ›aNÂC(0IÄýÀH §Äƒ£SÝü†Œ ˜ØÁ@kðk™¿ÔÞèGvÜâšlòÂ5ËAÂÐ$,k‚vl6W­o­Ãc›…_/Çh¹%'4à,gÔÏ×›Ù"në:|x õA-¾ ?7ê-c{ø_ß< ¸Ù^v#­„ž€[Å`Ú*§ØáóOÇ“Œ]^€ó‹æÆ¢VפËaÏÞHséÞukM\ zì`4xhæÜó#÷—éjù3‡ó‰-Á=våÒ˜÷]¾àt Ÿµ)0!‚|ô€d©¢ÏY¾Û—2Wƒ7vˆì4xÐTØùÿ›=¤»au»»ö»ÃÙÏE'(€‘Ò†YÉ6Ýîs(:=Ù^ƒÇ™Nþy¨9·‰>ëU`zL™¾VJAe9WaëÏ¡®d²B4 ç–ï÷oÙ'Zí?rHt,¾0ÑkŒ’å³FŠ%(è´ÙçÐS±Ž4g¢#<607ï÷vÜ :¥$õD,r>¤å»DÿÁ5Sû¨Àíõo‘‡SI°#q<< 剴ãZ£-bR²YntVÍäVÁÛÓž¼«‡ ©Ã×Tà¡°ä±÷³-ɪz™Î!'ì€äc6¶rº?q¶?,ߣó§@™ÚÏB8ÿœÀÓ&à©™m䂨§¹î«ÃÙ ê0—6E©–­ÈQ4Kžt/`átŒ©}Pktó}£±Ñá?™2Î i»"(ÕK5é¨ÂÛ_ ëð¨Ó#º»yØßïSðÃákvä‹ú¢ >î)ä­á.rŽb>¾j2NGèÛ×dëð¨gÈ¿_1“Õ62„ÏQ-ZˆKŒ€:Ëa¨˜=žIK Üö¾³ Ù áåqû×êQ…÷™ Eg(èŒbHVÄÈÓ ëuVZJÌ騣ïñ¥xôÍH¯¿Q °C<‡•Ù– N–ÃdêÌ:F*PRû^˜:œs>?]z`–;áÙÂdçß2ÿ¸(¾&¾Ý"4A9°v¿M =_ðγ v¡…›ãõè#ôâ.x؃W¤l47¦(Aig“ü ø÷œyïÖóÔVOSë†ÂóžþßÝÒ>¡Íã¸s¡Q¡®T_Äa=KG°–qfßÂçÂn%ðmÏ[]ù.ëÖ¤ã~wßÖQ@[ÑÈÍã!gÝ ßÄÀtAÁÁAuÁcÙÛÜ §Ì³džÈTÍd8Åyìª,FÕõEZ(·€FÒmcAÎvÜ-[› ;NŒ>Û}ËwÂã=ÜØäÓhZÖöà*–·¦ºeÞT¸®+Ú†%!Ïù67¾Ùžž:#Šû%8m1¿£g¥y[Øûá!2ï‚–ÓÝÓÝð0{WÛÜ÷Ìç-Ämþ…ðü6ÄHâù®Ú{Gˆ×,dÜb›wÁ³… f0ð@ÎçÉmlÁ}o½ÔôW߲тç›mcuËÎQŠÝoyÝ ߤ€b®·8ùÊiû-"ó=ê¸^«úá44{ 4f§=ÊV/ðr¾ŠkU¨â+} kª„ñe*òi'Š|’à«*`W¡b6œn€ÊÞ{lTÞªA 5ª‚`¨PLUôàâó¿)ŽƒNT±HJT¬[@ ›âðý‘k!ËÁ–*,©(‰ )È(RÑùC²Õ;÷@ZqNN³™Ìã?°ÌòâÁ…®qã³ÏÉ»y$KeJ#þí*X<1XËæ×J4Ï+92#{?›jŒôÏi‰}3¨G¸ÖåP—vx–/*.ôñ\Eo³,)^JP×hHÌ/)3\ì†VËYtÄÈD…rQ¨ ªT´Èk»VgóÆ”w¤‹Óyä=Fªês àe„Ë‚‰ú*ˆe¡$“Ñ··®àM³PÈèz?e~ %ŽÈ©6íÙ›câZ()¡NbŒ€°¡yÐ!Yè¶€xÊ+¨nRP'*²4 ¿d‘"ab¡Ægh‘©úŒÛ€@ h‰ââ£ö,Š\É$†lQòRbh怒0*:0ø"×·@=CîÓ‰FŽksŘåˆÐST(Ð-›´$³¸˜É2œj#qVãÃ`|%óqOÆ•¨rlZqô/Àûa0Ø?_iuÑæßÝBôÅ]%æ+})àºïÒ©L#T†ØRl°C Jõ_šæYÿ¡ˆ^LöjkgÍ\6âü@bn±tÑï¸æËKe-÷Ö˜»`£,Ñ21D§ƒ(gYãð*ÓQxŠNªY;Áº']%Õt«y”¸YƒP=W¿ƒ!Ám.Hx/U1¬â!ød.¯q¿ùUè…í f·£ÀjôÚ«{ß­v–b¹^±‘±G7Ïry ú¼ÈU0п™w‚éäpç56d€# ×ÀÇÐ'@IÇY8?¨¸þZL¥åòÀ·\Ê,Î&"ä6\M¤=ñí åD\gHå…«à™`.ƒpI¥r +—z‘DZ¬—ÐÚàB&…ú²B:ŒRÈÛÝ@:«OŸq)]Zd¼‹2“.”8\ä9öAPKUþLNÕˆ$¯ÌÚJdœÙßYód­ÚOø®ž.{P#fÕ>ô"VI4Òjå^'ûU‚ß°õñUãì=¼ù»J±É±5mcű‘÷~®´¯RãþýRjCs¹t¨CÀÀãþœiû‰®jHŽ2ŒæÑt‚å„ 'ƒÞ‘ ]DK š‚ƒX·ßR•^Éb ÂùâÅ‹KËMé!4ô×z'gwàî1ØÉ·yv ~¦Êx¦FÏá˜eýÇ?dº9˜~¸AÿôÃwgG€ûä@ÁOŒr>5 wr ¦e9/‚ñX†!Ôt*5•å(ÌfchjYÊñÉ«Óg¨eÃO[ðÞ¨`ªÀ¾?/´{× ðEW'݈j©cÓ¾5-\ùETŽãoïÞœ–j ª{øÌ켆]ä,—i¡ÇýÎ@óMGÁ«›O2IèÉ–ã*æM$Ÿ„ÒáðÄ[C©oYÌulT’ ~3ǬÐ8”y¥û_Ä‘œË ôî2VEC‘–·¯—z¤Ÿh+‡^Iòîû7/t 5Ÿ€:¯~óúfð·Eœ ^Žª­Ÿáòùòk¼ ñžn¬“zA¼‘j+‚—U<ùìíkü«Ž³ã ^‡‰z£?×äø hÁGUÎê‚n¼Æ( —¦Ù§ôûªpúú4•óbš•úÏ$[DGfØ6Qy†yVªöôÕ ºr9-ïƸg h̪g¿¡òNá ü…—™ãYØt»8¿`7ᡸL®oÕ£E¦Æô­ ˜…Ü´ºaF› nÝ0Æ3¾ŸÄ)„ê3ñ~³übôÊ êî AªŽ¶ŸC°A‡?-é+#m  JË r¨âè[3¥ý9Ryí_²+5›¨|H¬!ñ†Ä×nß:umùö—ÇWomV¸0Y±»yýwÛâ[Ûþ­ÿZ©ÕVëÔþÆÌØ%¥ñÏÉËjüç!œ†ìù.y¾ýŸß÷ÕØÀqáÿú!Înž¾ú¸W'ôNóЀ{™Zs‹»ë†¯Ò­>ŒdëÜË”èÙL¬ŸP!k®¢ï¹WϽzîÕs¯ž{õÜ«ç^ã^ƽú {Z»öšŸí{îÕs¯'ʽŒ¶ ²íZís±Kî峑ãº÷q/'`†üÙa {©ÓQ‡>!îÕ=kìNùÿϽêZ[ÌëYJgõòŠ-s¯ºD›ZtÝò¿%²æ‘™=÷ê¹WϽzîÕs¯ž{õÜë!Ük醅c¯Ûžn™Îî¿{õÜë©s¯Z[¬ ]«×Ù!÷r,1ÜϽ,Mþ@{Ö®N¬ÓÑÆV:O{uCÏsàîejmir¿t|ûѸWGdÂî¹WϽzîÕs¯ž{õÜ«ç^Ûâ^ƽڶð…hwÖè¿{õÜëés/£­cŽÍÚµÚöwɽlf\&îç^fÒ£8ùÚo#&§ö“â^zA¿¶£÷(ý¡¸—©µïZÜãíÒñ©õhÜK—hQnÙŒm€ÌòzîÕs¯ž{õÜ«ç^=÷ê¹×¶¸—qà HÕºåòKw}ÿhϽzîõ¤¸—ÑV˳„ðÚµÚ²¼r/nñ‘ïpͽþö¿ñaLˆ+././@LongLink0000644000000000000000000000027500000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611513033101 5ustar zuulzuul././@LongLink0000644000000000000000000000030400000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/1.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000112415115611513033101 0ustar zuulzuul2025-12-08T17:58:36.323661894+00:00 stdout F bridge-368 ==> (/tmp/smartgateway) 2025-12-08T17:58:36.330679306+00:00 stderr F PN_TRANSPORT_CLOSED: proton:io: Connection refused - disconnected default-interconnect.service-telemetry.svc.cluster.local:5673 2025-12-08T17:58:36.330795589+00:00 stderr F Exit AMQP RCV thread... 2025-12-08T17:58:37.325982350+00:00 stdout F Joining amqp_rcv_th... 2025-12-08T17:58:37.325982350+00:00 stdout F Cancel socket_snd_th... 2025-12-08T17:58:37.325982350+00:00 stdout F Joining socket_snd_th... 2025-12-08T17:58:37.326015141+00:00 stderr F Exit SOCKET thread... ././@LongLink0000644000000000000000000000031100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/2.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000202615115611513033103 0ustar zuulzuul2025-12-08T17:58:53.105228922+00:00 stdout F bridge-349 ==> (/tmp/smartgateway) 2025-12-08T17:58:53.118641086+00:00 stdout F bridge-349 ==> (amqp://default-interconnect.service-telemetry.svc.cluster.local:5673/collectd/cloud1-notify) 2025-12-08T17:59:53.131385121+00:00 stdout F in: 29(1), amqp_overrun: 0(0), out: 29(1), sock_overrun: 0(0), link_credit_average: 14997.000000 2025-12-08T18:00:52.145024854+00:00 stdout F in: 34(0), amqp_overrun: 0(0), out: 34(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:01:51.162688778+00:00 stdout F in: 34(0), amqp_overrun: 0(0), out: 34(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:02:50.170083299+00:00 stdout F in: 34(0), amqp_overrun: 0(0), out: 34(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:03:49.193055925+00:00 stdout F in: 34(0), amqp_overrun: 0(0), out: 34(0), sock_overrun: 0(0), link_credit_average: -nan 2025-12-08T18:04:48.209322951+00:00 stdout F in: 34(0), amqp_overrun: 0(0), out: 34(0), sock_overrun: 0(0), link_credit_average: -nan ././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000125215115611513033103 0ustar zuulzuul2025-12-08T17:58:34.493993205+00:00 stdout F 2025-12-08 17:58:34 [INFO] initialized handler [transport pair: socket0, handler: events] 2025-12-08T17:58:34.493993205+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded transport [transport: socket0] 2025-12-08T17:58:34.611854541+00:00 stdout F 2025-12-08 17:58:34 [INFO] loaded application plugin [application: elasticsearch] 2025-12-08T17:58:34.613454473+00:00 stdout F 2025-12-08 17:58:34 [INFO] socket listening on /tmp/smartgateway [plugin: socket] 2025-12-08T17:58:34.638168972+00:00 stdout F 2025-12-08 17:58:34 [INFO] storing events and(or) logs to Elasticsearch. [plugin: elasticsearch, url: https://elasticsearch-es-http:9200] ././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611513033145 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611520033143 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000644000175000017500000005457115115611513033163 0ustar zuulzuul2025-12-08T17:55:34.350086423+00:00 stderr F 2025-12-08T17:55:34Z INFO setup running with arguments {"namespace": "openshift-operators", "metrics-bind-address": ":8080", "images": "alertmanager=registry.redhat.io/cluster-observability-operator/alertmanager-rhel9@sha256:e718854a7d6ca8accf0fa72db0eb902e46c44d747ad51dc3f06bba0cefaa3c01,health-analyzer=registry.redhat.io/cluster-observability-operator/cluster-health-analyzer-rhel9@sha256:45a4ec2a519bcec99e886aa91596d5356a2414a2bd103baaef9fa7838c672eb2,korrel8r=registry.redhat.io/cluster-observability-operator/korrel8r-rhel9@sha256:c595ff56b2cb85514bf4784db6ddb82e4e657e3e708a7fb695fc4997379a94d4,perses=registry.redhat.io/cluster-observability-operator/perses-rhel9@sha256:91531137fc1dcd740e277e0f65e120a0176a16f788c14c27925b61aa0b792ade,prometheus=registry.redhat.io/cluster-observability-operator/prometheus-rhel9@sha256:17ea20be390a94ab39f5cdd7f0cbc2498046eebcf77fe3dec9aa288d5c2cf46b,thanos=registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4,ui-dashboards=registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb,ui-distributed-tracing-pf4=registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-pf4-rhel9@sha256:e9d9a89e4d8126a62b1852055482258ee528cac6398dd5d43ebad75ace0f33c9,ui-distributed-tracing-pf5=registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-pf5-rhel9@sha256:95fe5b5746ca8c07ac9217ce2d8ac8e6afad17af210f9d8e0074df1310b209a8,ui-distributed-tracing=registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-rhel9@sha256:897e1bfad1187062725b54d87107bd0155972257a50d8335dd29e1999b828a4f,ui-logging-pf4=registry.redhat.io/cluster-observability-operator/logging-console-plugin-pf4-rhel9@sha256:3b9693fcde9b3a9494fb04735b1f7cfd0426f10be820fdc3f024175c0d3df1c9,ui-logging=registry.redhat.io/cluster-observability-operator/logging-console-plugin-rhel9@sha256:ec684a0645ceb917b019af7ddba68c3533416e356ab0d0320a30e75ca7ebb31b,ui-monitoring-pf5=registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-pf5-rhel9@sha256:aa47256193cfd2877853878e1ae97d2ab8b8e5deae62b387cbfad02b284d379c,ui-monitoring=registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-rhel9@sha256:e03777be39e71701935059cd877603874a13ac94daa73219d4e5e545599d78a9,ui-troubleshooting-panel=registry.redhat.io/cluster-observability-operator/troubleshooting-panel-console-plugin-rhel9@sha256:580606f194180accc8abba099e17a26dca7522ec6d233fa2fdd40312771703e3", "openshift.enabled": true} 2025-12-08T17:55:34.422124300+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"serving-cert::/etc/tls/private/tls.crt::/etc/tls/private/tls.key", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded serving cert ["serving-cert::/etc/tls/private/tls.crt::/etc/tls/private/tls.key"]: "observability-operator.openshift-operators.svc" [serving] validServingFor=[observability-operator.openshift-operators.svc,observability-operator.openshift-operators.svc.cluster.local] issuer="openshift-service-serving-signer@1762069924" (2025-12-08 17:55:11 +0000 UTC to 2027-12-08 17:55:12 +0000 UTC (now=2025-12-08 17:55:34.42067296 +0000 UTC)) 2025-12-08T17:55:34.464648417+00:00 stderr F 2025-12-08T17:55:34Z INFO setup starting manager 2025-12-08T17:55:34.464685668+00:00 stderr F 2025-12-08T17:55:34Z INFO controller-runtime.metrics Starting metrics server 2025-12-08T17:55:34.467002542+00:00 stderr F I1208 17:55:34.466947 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:55:34.467064023+00:00 stderr F I1208 17:55:34.467030 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:55:34.467098554+00:00 stderr F 2025-12-08T17:55:34Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-12-08T17:55:34.467138135+00:00 stderr F 2025-12-08T17:55:34Z INFO starting server {"name": "pprof", "addr": "127.0.0.1:8083"} 2025-12-08T17:55:34.467172626+00:00 stderr F I1208 17:55:34.467070 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:55:34.567716255+00:00 stderr F I1208 17:55:34.567654 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:55:34.568205179+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.PodDisruptionBudget"} 2025-12-08T17:55:34.568442675+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1.ConsolePlugin"} 2025-12-08T17:55:34.568552228+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1alpha1.MonitoringStack"} 2025-12-08T17:55:34.609241524+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.Prometheus"} 2025-12-08T17:55:34.609633345+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.Alertmanager"} 2025-12-08T17:55:34.609853632+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.Service"} 2025-12-08T17:55:34.610275923+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.ServiceAccount"} 2025-12-08T17:55:34.616603296+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.Role"} 2025-12-08T17:55:34.617426309+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.RoleBinding"} 2025-12-08T17:55:34.617570083+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [0/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "admin-kubeconfig-signer" [] issuer="" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:55:34.61673798 +0000 UTC)) 2025-12-08T17:55:34.617634484+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1.Secret"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1.ClusterRoleBinding"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1alpha1.UIPlugin"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1.Deployment"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1.Service"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1.ServiceAccount"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1.Role"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1.RoleBinding"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1alpha1.PersesDashboard"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "source": "kind source: *v1alpha1.PersesDatasource"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1alpha1.ThanosQuerier"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "source": "kind source: *v1.ServiceMonitor"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1.Deployment"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1.Service"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1.ServiceAccount"} 2025-12-08T17:55:34.618546730+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1alpha1.MonitoringStack"} 2025-12-08T17:55:34.618598691+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:55:34.618598691+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "observability-operator", "controllerGroup": "", "controllerKind": "Service", "source": "kind source: *v1.ServiceMonitor"} 2025-12-08T17:55:34.618608772+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1alpha1.ObservabilityInstaller"} 2025-12-08T17:55:34.618752496+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1.Secret"} 2025-12-08T17:55:34.618766316+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1.Namespace"} 2025-12-08T17:55:34.618834448+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1alpha1.UIPlugin"} 2025-12-08T17:55:34.619251099+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1.ClusterRole"} 2025-12-08T17:55:34.619251099+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "observability-operator", "controllerGroup": "", "controllerKind": "Service", "source": "kind source: *v1.Service"} 2025-12-08T17:55:34.619251099+00:00 stderr F 2025-12-08T17:55:34Z INFO Starting EventSource {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "source": "kind source: *v1alpha1.Subscription"} 2025-12-08T17:55:34.619251099+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [1/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "kube-control-plane-signer" [] issuer="" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:55:34.616812022 +0000 UTC)) 2025-12-08T17:55:34.619251099+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [2/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "kube-apiserver-to-kubelet-signer" [] issuer="" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:55:34.616834253 +0000 UTC)) 2025-12-08T17:55:34.619268400+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [3/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "kubelet-bootstrap-kubeconfig-signer" [] issuer="" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:55:34.616846193 +0000 UTC)) 2025-12-08T17:55:34.619712152+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [4/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "openshift-kube-apiserver-operator_node-system-admin-signer@1762069887" [] issuer="" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:55:34.617774898 +0000 UTC)) 2025-12-08T17:55:34.619723702+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [5/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "openshift-kube-controller-manager-operator_csr-signer-signer@1762071455" [] issuer="" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:55:34.619222878 +0000 UTC)) 2025-12-08T17:55:34.619736402+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [6/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "kube-csr-signer_@1762071455" [] issuer="openshift-kube-controller-manager-operator_csr-signer-signer@1762071455" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:55:34.61927558 +0000 UTC)) 2025-12-08T17:55:34.622901229+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [7/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863" [] issuer="" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:55:34.61930114 +0000 UTC)) 2025-12-08T17:55:34.623526986+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [8/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863" [] issuer="" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:55:34.623234068 +0000 UTC)) 2025-12-08T17:55:34.623526986+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"client-ca::kube-system::extension-apiserver-authentication::client-ca-file", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded client CA [9/"client-ca::kube-system::extension-apiserver-authentication::client-ca-file"]: "admin-kubeconfig-signer-custom" [] issuer="" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:55:34.623274919 +0000 UTC)) 2025-12-08T17:55:34.623826915+00:00 stderr F 2025-12-08T17:55:34Z INFO events Event(v1.ObjectReference{Kind:"", Namespace:"", Name:"serving-cert::/etc/tls/private/tls.crt::/etc/tls/private/tls.key", UID:"", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'TLSConfigChanged' loaded serving cert ["serving-cert::/etc/tls/private/tls.crt::/etc/tls/private/tls.key"]: "observability-operator.openshift-operators.svc" [serving] validServingFor=[observability-operator.openshift-operators.svc,observability-operator.openshift-operators.svc.cluster.local] issuer="openshift-service-serving-signer@1762069924" (2025-12-08 17:55:11 +0000 UTC to 2027-12-08 17:55:12 +0000 UTC (now=2025-12-08 17:55:34.623540406 +0000 UTC)) 2025-12-08T17:55:35.122330284+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting Controller {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin"} 2025-12-08T17:55:35.122330284+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting workers {"controller": "uiplugin", "controllerGroup": "observability.openshift.io", "controllerKind": "UIPlugin", "worker count": 1} 2025-12-08T17:55:35.122371355+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting Controller {"controller": "observability-operator", "controllerGroup": "", "controllerKind": "Service"} 2025-12-08T17:55:35.122371355+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting workers {"controller": "observability-operator", "controllerGroup": "", "controllerKind": "Service", "worker count": 1} 2025-12-08T17:55:35.122522709+00:00 stderr F 2025-12-08T17:55:35Z INFO observability-operator Reconciling operator resources {"operator": {"name":"observability-operator","namespace":"openshift-operators"}} 2025-12-08T17:55:35.123935807+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting Controller {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack"} 2025-12-08T17:55:35.123935807+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting workers {"controller": "monitoringstack", "controllerGroup": "monitoring.rhobs", "controllerKind": "MonitoringStack", "worker count": 1} 2025-12-08T17:55:35.124333768+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting Controller {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier"} 2025-12-08T17:55:35.124333768+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting workers {"controller": "thanosquerier", "controllerGroup": "monitoring.rhobs", "controllerKind": "ThanosQuerier", "worker count": 1} 2025-12-08T17:55:35.127771462+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting Controller {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller"} 2025-12-08T17:55:35.127771462+00:00 stderr F 2025-12-08T17:55:35Z INFO Starting workers {"controller": "cluster-observability", "controllerGroup": "observability.openshift.io", "controllerKind": "ObservabilityInstaller", "worker count": 1} 2025-12-08T17:55:35.270957072+00:00 stderr F 2025-12-08T17:55:35Z INFO observability-operator Reconciling operator resources {"operator": {"name":"observability-operator","namespace":"openshift-operators"}} 2025-12-08T17:55:36.834695420+00:00 stderr F 2025-12-08T17:55:36Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": true} 2025-12-08T17:55:44.434372244+00:00 stderr F 2025-12-08T17:55:44Z INFO observability-operator Reconciling operator resources {"operator": {"name":"observability-operator","namespace":"openshift-operators"}} 2025-12-08T17:55:44.452273365+00:00 stderr F 2025-12-08T17:55:44Z INFO observability-operator Reconciling operator resources {"operator": {"name":"observability-operator","namespace":"openshift-operators"}} ././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611513033076 5ustar zuulzuul././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000124315115611513033100 0ustar zuulzuul2025-12-08T18:04:52.915730805+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="found existing cache contents" backend=pogreb.v1 cache=/extracted-catalog/cache configs=/extracted-catalog/catalog 2025-12-08T18:04:52.915730805+00:00 stderr F time="2025-12-08T18:04:52Z" level=info msg="starting pprof endpoint" address="localhost:6060" 2025-12-08T18:04:57.664519994+00:00 stderr F time="2025-12-08T18:04:57Z" level=info msg="serving registry" cache=/extracted-catalog/cache configs=/extracted-catalog/catalog port=50051 2025-12-08T18:04:57.664519994+00:00 stderr F time="2025-12-08T18:04:57Z" level=info msg="stopped caching cpu profile data" address="localhost:6060" ././@LongLink0000644000000000000000000000026300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000000015115611513033066 0ustar zuulzuul././@LongLink0000644000000000000000000000026300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611514033067 5ustar zuulzuul././@LongLink0000644000000000000000000000030300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000000223715115611514033075 0ustar zuulzuul2025-12-08T17:53:42.037998174+00:00 stdout F 2025-12-08T17:53:42+00:00 INFO: ovn-control-plane-metrics-certs mounted, starting kube-rbac-proxy 2025-12-08T17:53:42.080608683+00:00 stderr F W1208 17:53:42.080462 1 deprecated.go:66] 2025-12-08T17:53:42.080608683+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:53:42.080608683+00:00 stderr F 2025-12-08T17:53:42.080608683+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:53:42.080608683+00:00 stderr F 2025-12-08T17:53:42.080608683+00:00 stderr F =============================================== 2025-12-08T17:53:42.080608683+00:00 stderr F 2025-12-08T17:53:42.081176189+00:00 stderr F I1208 17:53:42.081144 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:53:42.082405732+00:00 stderr F I1208 17:53:42.082375 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:53:42.082799062+00:00 stderr F I1208 17:53:42.082765 1 kube-rbac-proxy.go:397] Starting TCP socket on :9108 2025-12-08T17:53:42.083120641+00:00 stderr F I1208 17:53:42.083095 1 kube-rbac-proxy.go:404] Listening securely on :9108 ././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000755000175000017500000000000015115611521033065 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernet0000644000175000017500000011125415115611514033075 0ustar zuulzuul2025-12-08T17:53:42.137924722+00:00 stderr F + [[ -f /env/_master ]] 2025-12-08T17:53:42.137924722+00:00 stderr F + ovn_v4_join_subnet_opt= 2025-12-08T17:53:42.137924722+00:00 stderr F + [[ '' != '' ]] 2025-12-08T17:53:42.137924722+00:00 stderr F + ovn_v6_join_subnet_opt= 2025-12-08T17:53:42.137924722+00:00 stderr F + [[ '' != '' ]] 2025-12-08T17:53:42.137924722+00:00 stderr F + ovn_v4_transit_switch_subnet_opt= 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ '' != '' ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + ovn_v6_transit_switch_subnet_opt= 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ '' != '' ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + dns_name_resolver_enabled_flag= 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ false == \t\r\u\e ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + persistent_ips_enabled_flag=--enable-persistent-ips 2025-12-08T17:53:42.138039835+00:00 stderr F + network_segmentation_enabled_flag= 2025-12-08T17:53:42.138039835+00:00 stderr F + multi_network_enabled_flag= 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ true == \t\r\u\e ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + multi_network_enabled_flag=--enable-multi-network 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ true == \t\r\u\e ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ true != \t\r\u\e ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + network_segmentation_enabled_flag=--enable-network-segmentation 2025-12-08T17:53:42.138039835+00:00 stderr F + route_advertisements_enable_flag= 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ false == \t\r\u\e ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + preconfigured_udn_addresses_enable_flag= 2025-12-08T17:53:42.138039835+00:00 stderr F + [[ false == \t\r\u\e ]] 2025-12-08T17:53:42.138039835+00:00 stderr F + multi_network_policy_enabled_flag= 2025-12-08T17:53:42.138053435+00:00 stderr F + [[ false == \t\r\u\e ]] 2025-12-08T17:53:42.138053435+00:00 stderr F + admin_network_policy_enabled_flag= 2025-12-08T17:53:42.138077136+00:00 stderr F + [[ true == \t\r\u\e ]] 2025-12-08T17:53:42.138077136+00:00 stderr F + admin_network_policy_enabled_flag=--enable-admin-network-policy 2025-12-08T17:53:42.138112657+00:00 stderr F + '[' local == shared ']' 2025-12-08T17:53:42.138112657+00:00 stderr F + '[' local == local ']' 2025-12-08T17:53:42.138112657+00:00 stderr F + gateway_mode_flags='--gateway-mode local' 2025-12-08T17:53:42.138609260+00:00 stderr F ++ date '+%m%d %H:%M:%S.%N' 2025-12-08T17:53:42.141407066+00:00 stdout F I1208 17:53:42.140965124 - ovnkube-control-plane - start ovnkube --init-cluster-manager crc 2025-12-08T17:53:42.141423467+00:00 stderr F + echo 'I1208 17:53:42.140965124 - ovnkube-control-plane - start ovnkube --init-cluster-manager crc' 2025-12-08T17:53:42.141545760+00:00 stderr F + exec /usr/bin/ovnkube --enable-interconnect --init-cluster-manager crc --config-file=/run/ovnkube-config/ovnkube.conf --loglevel 4 --metrics-bind-address 127.0.0.1:29108 --metrics-enable-pprof --metrics-enable-config-duration --enable-persistent-ips --enable-multi-network --enable-network-segmentation --gateway-mode local --enable-egress-ip=true --enable-egress-firewall=true --enable-egress-qos=true --enable-egress-service=true --enable-multicast --enable-multi-external-gateway=true --enable-admin-network-policy 2025-12-08T17:53:42.186668387+00:00 stderr F I1208 17:53:42.186461 1 config.go:2357] Parsed config file /run/ovnkube-config/ovnkube.conf 2025-12-08T17:53:42.187069437+00:00 stderr F I1208 17:53:42.186597 1 config.go:2358] Parsed config: {Default:{MTU:1400 RoutableMTU:0 ConntrackZone:64000 HostMasqConntrackZone:0 OVNMasqConntrackZone:0 HostNodePortConntrackZone:0 ReassemblyConntrackZone:0 EncapType:geneve EncapIP: EffectiveEncapIP: EncapPort:6081 InactivityProbe:100000 OpenFlowProbe:0 OfctrlWaitBeforeClear:0 MonitorAll:true OVSDBTxnTimeout:1m40s LFlowCacheEnable:true LFlowCacheLimit:0 LFlowCacheLimitKb:1048576 RawClusterSubnets:10.217.0.0/22/23 ClusterSubnets:[] EnableUDPAggregation:true Zone:global RawUDNAllowedDefaultServices:default/kubernetes,openshift-dns/dns-default UDNAllowedDefaultServices:[]} Logging:{File: CNIFile: LibovsdbFile:/var/log/ovnkube/libovsdb.log Level:4 LogFileMaxSize:100 LogFileMaxBackups:5 LogFileMaxAge:0 ACLLoggingRateLimit:20} Monitoring:{RawNetFlowTargets: RawSFlowTargets: RawIPFIXTargets: NetFlowTargets:[] SFlowTargets:[] IPFIXTargets:[]} IPFIX:{Sampling:400 CacheActiveTimeout:60 CacheMaxFlows:0} CNI:{ConfDir:/etc/cni/net.d Plugin:ovn-k8s-cni-overlay} OVNKubernetesFeature:{EnableAdminNetworkPolicy:false EnableEgressIP:false EgressIPReachabiltyTotalTimeout:1 EnableEgressFirewall:false EnableEgressQoS:false EnableEgressService:false EgressIPNodeHealthCheckPort:9107 EnableMultiNetwork:false EnableNetworkSegmentation:true EnablePreconfiguredUDNAddresses:false EnableRouteAdvertisements:false EnableMultiNetworkPolicy:false EnableStatelessNetPol:false EnableInterconnect:false EnableMultiExternalGateway:false EnablePersistentIPs:false EnableDNSNameResolver:false EnableServiceTemplateSupport:false EnableObservability:false EnableNetworkQoS:false AdvertisedUDNIsolationMode:strict} Kubernetes:{BootstrapKubeconfig: CertDir: CertDuration:10m0s Kubeconfig: CACert: CAData:[] APIServer:https://api-int.crc.testing:6443 Token: TokenFile: CompatServiceCIDR: RawServiceCIDRs:10.217.4.0/23 ServiceCIDRs:[] OVNConfigNamespace:openshift-ovn-kubernetes OVNEmptyLbEvents:false PodIP: RawNoHostSubnetNodes: NoHostSubnetNodes: HostNetworkNamespace:openshift-host-network DisableRequestedChassis:false PlatformType:None HealthzBindAddress:0.0.0.0:10256 CompatMetricsBindAddress: CompatOVNMetricsBindAddress: CompatMetricsEnablePprof:false DNSServiceNamespace:openshift-dns DNSServiceName:dns-default} Metrics:{BindAddress: OVNMetricsBindAddress: ExportOVSMetrics:false EnablePprof:false NodeServerPrivKey: NodeServerCert: EnableConfigDuration:false EnableScaleMetrics:false} OvnNorth:{Address: PrivKey: Cert: CACert: CertCommonName: Scheme: ElectionTimer:0 northbound:false exec:} OvnSouth:{Address: PrivKey: Cert: CACert: CertCommonName: Scheme: ElectionTimer:0 northbound:false exec:} Gateway:{Mode:local Interface: GatewayAcceleratedInterface: EgressGWInterface: NextHop: VLANID:0 NodeportEnable:true DisableSNATMultipleGWs:false V4JoinSubnet:100.64.0.0/16 V6JoinSubnet:fd98::/64 V4MasqueradeSubnet:169.254.169.0/29 V6MasqueradeSubnet:fd69::/125 MasqueradeIPs:{V4OVNMasqueradeIP:169.254.169.1 V6OVNMasqueradeIP:fd69::1 V4HostMasqueradeIP:169.254.169.2 V6HostMasqueradeIP:fd69::2 V4HostETPLocalMasqueradeIP:169.254.169.3 V6HostETPLocalMasqueradeIP:fd69::3 V4DummyNextHopMasqueradeIP:169.254.169.4 V6DummyNextHopMasqueradeIP:fd69::4 V4OVNServiceHairpinMasqueradeIP:169.254.169.5 V6OVNServiceHairpinMasqueradeIP:fd69::5} DisablePacketMTUCheck:false RouterSubnet: SingleNode:false DisableForwarding:false AllowNoUplink:false EphemeralPortRange:} MasterHA:{ElectionLeaseDuration:137 ElectionRenewDeadline:107 ElectionRetryPeriod:26} ClusterMgrHA:{ElectionLeaseDuration:137 ElectionRenewDeadline:107 ElectionRetryPeriod:26} HybridOverlay:{Enabled:false RawClusterSubnets: ClusterSubnets:[] VXLANPort:4789} OvnKubeNode:{Mode:full DPResourceDeviceIdsMap:map[] MgmtPortNetdev: MgmtPortDPResourceName:} ClusterManager:{V4TransitSwitchSubnet:100.88.0.0/16 V6TransitSwitchSubnet:fd97::/64}} 2025-12-08T17:53:42.189561175+00:00 stderr F I1208 17:53:42.189514 1 leaderelection.go:257] attempting to acquire leader lease openshift-ovn-kubernetes/ovn-kubernetes-master... 2025-12-08T17:53:42.189991517+00:00 stderr F I1208 17:53:42.189962 1 metrics.go:525] Starting metrics server at address "127.0.0.1:29108" 2025-12-08T17:53:42.204316437+00:00 stderr F I1208 17:53:42.204251 1 leaderelection.go:271] successfully acquired lease openshift-ovn-kubernetes/ovn-kubernetes-master 2025-12-08T17:53:42.204699187+00:00 stderr F I1208 17:53:42.204471 1 ovnkube.go:397] Won leader election; in active mode 2025-12-08T17:53:42.205343155+00:00 stderr F I1208 17:53:42.205126 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-ovn-kubernetes", Name:"ovn-kubernetes-master", UID:"12fe07d3-9bfd-4c0d-a07a-a97a45bf2d39", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"40487", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' ovnkube-control-plane-97c9b6c48-lfp2m became leader 2025-12-08T17:53:42.275515513+00:00 stderr F I1208 17:53:42.275393 1 secondary_network_cluster_manager.go:38] Creating secondary network cluster manager 2025-12-08T17:53:42.275560084+00:00 stderr F I1208 17:53:42.275512 1 egressservice_cluster.go:98] Setting up event handlers for Egress Services 2025-12-08T17:53:42.275979725+00:00 stderr F I1208 17:53:42.275961 1 clustermanager.go:169] Starting the cluster manager 2025-12-08T17:53:42.275979725+00:00 stderr F I1208 17:53:42.275975 1 factory.go:531] Starting watch factory 2025-12-08T17:53:42.276142269+00:00 stderr F I1208 17:53:42.276120 1 reflector.go:357] "Starting reflector" type="*v1.Pod" resyncPeriod="0s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276142269+00:00 stderr F I1208 17:53:42.276136 1 reflector.go:403] "Listing and watching" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276338425+00:00 stderr F I1208 17:53:42.276125 1 reflector.go:357] "Starting reflector" type="*v1.Node" resyncPeriod="0s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276489539+00:00 stderr F I1208 17:53:42.276450 1 reflector.go:357] "Starting reflector" type="*v1.Service" resyncPeriod="0s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276489539+00:00 stderr F I1208 17:53:42.276461 1 reflector.go:403] "Listing and watching" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276600642+00:00 stderr F I1208 17:53:42.276566 1 reflector.go:403] "Listing and watching" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276758476+00:00 stderr F I1208 17:53:42.276714 1 reflector.go:357] "Starting reflector" type="*v1.Namespace" resyncPeriod="0s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.276800627+00:00 stderr F I1208 17:53:42.276783 1 reflector.go:403] "Listing and watching" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.277520237+00:00 stderr F I1208 17:53:42.277491 1 reflector.go:357] "Starting reflector" type="*v1.EndpointSlice" resyncPeriod="0s" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.277578919+00:00 stderr F I1208 17:53:42.277554 1 reflector.go:403] "Listing and watching" type="*v1.EndpointSlice" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.279411878+00:00 stderr F I1208 17:53:42.279353 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.282057170+00:00 stderr F I1208 17:53:42.282013 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.282150143+00:00 stderr F I1208 17:53:42.282127 1 reflector.go:430] "Caches populated" type="*v1.EndpointSlice" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.284920508+00:00 stderr F I1208 17:53:42.282721 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.298308482+00:00 stderr F I1208 17:53:42.298241 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:53:42.381837764+00:00 stderr F I1208 17:53:42.381290 1 factory.go:1890] *v1.Namespace informer cache synced successfully 2025-12-08T17:53:42.381837764+00:00 stderr F I1208 17:53:42.381406 1 factory.go:1890] *v1.Service informer cache synced successfully 2025-12-08T17:53:42.381837764+00:00 stderr F I1208 17:53:42.381414 1 factory.go:1890] *v1.EndpointSlice informer cache synced successfully 2025-12-08T17:53:42.381837764+00:00 stderr F I1208 17:53:42.381421 1 factory.go:1890] *v1.Node informer cache synced successfully 2025-12-08T17:53:42.381837764+00:00 stderr F I1208 17:53:42.381426 1 factory.go:1890] *v1.Pod informer cache synced successfully 2025-12-08T17:53:42.381837764+00:00 stderr F I1208 17:53:42.381783 1 reflector.go:357] "Starting reflector" type="*v1.EgressIP" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.382028249+00:00 stderr F I1208 17:53:42.381984 1 reflector.go:403] "Listing and watching" type="*v1.EgressIP" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.386217503+00:00 stderr F I1208 17:53:42.386152 1 reflector.go:430] "Caches populated" type="*v1.EgressIP" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.387149688+00:00 stderr F I1208 17:53:42.387046 1 factory.go:1890] *v1.EgressIP informer cache synced successfully 2025-12-08T17:53:42.387287032+00:00 stderr F I1208 17:53:42.387244 1 reflector.go:357] "Starting reflector" type="*v1.EgressFirewall" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.387287032+00:00 stderr F I1208 17:53:42.387266 1 reflector.go:403] "Listing and watching" type="*v1.EgressFirewall" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.389286056+00:00 stderr F I1208 17:53:42.389239 1 reflector.go:430] "Caches populated" type="*v1.EgressFirewall" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.392215926+00:00 stderr F I1208 17:53:42.392185 1 factory.go:1890] *v1.EgressFirewall informer cache synced successfully 2025-12-08T17:53:42.392296858+00:00 stderr F I1208 17:53:42.392275 1 reflector.go:357] "Starting reflector" type="*v1.EgressQoS" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.392296858+00:00 stderr F I1208 17:53:42.392292 1 reflector.go:403] "Listing and watching" type="*v1.EgressQoS" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.393398558+00:00 stderr F I1208 17:53:42.393363 1 reflector.go:430] "Caches populated" type="*v1.EgressQoS" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.397537781+00:00 stderr F I1208 17:53:42.397494 1 factory.go:1890] *v1.EgressQoS informer cache synced successfully 2025-12-08T17:53:42.397599843+00:00 stderr F I1208 17:53:42.397571 1 reflector.go:357] "Starting reflector" type="*v1.EgressService" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.397599843+00:00 stderr F I1208 17:53:42.397585 1 reflector.go:403] "Listing and watching" type="*v1.EgressService" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.399120353+00:00 stderr F I1208 17:53:42.399103 1 reflector.go:430] "Caches populated" type="*v1.EgressService" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.403393070+00:00 stderr F I1208 17:53:42.403376 1 factory.go:1890] *v1.EgressService informer cache synced successfully 2025-12-08T17:53:42.403528304+00:00 stderr F I1208 17:53:42.403512 1 reflector.go:357] "Starting reflector" type="*v1.AdminPolicyBasedExternalRoute" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.403562585+00:00 stderr F I1208 17:53:42.403548 1 reflector.go:403] "Listing and watching" type="*v1.AdminPolicyBasedExternalRoute" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.405238980+00:00 stderr F I1208 17:53:42.405208 1 reflector.go:430] "Caches populated" type="*v1.AdminPolicyBasedExternalRoute" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.409357122+00:00 stderr F I1208 17:53:42.409317 1 factory.go:1890] *v1.AdminPolicyBasedExternalRoute informer cache synced successfully 2025-12-08T17:53:42.409425424+00:00 stderr F I1208 17:53:42.409382 1 reflector.go:357] "Starting reflector" type="*v1alpha1.IPAMClaim" resyncPeriod="0s" reflector="github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go:141" 2025-12-08T17:53:42.409425424+00:00 stderr F I1208 17:53:42.409421 1 reflector.go:403] "Listing and watching" type="*v1alpha1.IPAMClaim" reflector="github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go:141" 2025-12-08T17:53:42.411001586+00:00 stderr F I1208 17:53:42.410982 1 reflector.go:430] "Caches populated" type="*v1alpha1.IPAMClaim" reflector="github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go:141" 2025-12-08T17:53:42.414650336+00:00 stderr F I1208 17:53:42.414618 1 factory.go:1890] *v1alpha1.IPAMClaim informer cache synced successfully 2025-12-08T17:53:42.414916643+00:00 stderr F I1208 17:53:42.414866 1 reflector.go:357] "Starting reflector" type="*v1.NetworkAttachmentDefinition" resyncPeriod="0s" reflector="github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117" 2025-12-08T17:53:42.414950044+00:00 stderr F I1208 17:53:42.414905 1 reflector.go:403] "Listing and watching" type="*v1.NetworkAttachmentDefinition" reflector="github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117" 2025-12-08T17:53:42.416239179+00:00 stderr F I1208 17:53:42.416218 1 reflector.go:430] "Caches populated" type="*v1.NetworkAttachmentDefinition" reflector="github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117" 2025-12-08T17:53:42.420302080+00:00 stderr F I1208 17:53:42.420286 1 factory.go:1890] *v1.NetworkAttachmentDefinition informer cache synced successfully 2025-12-08T17:53:42.420595658+00:00 stderr F I1208 17:53:42.420583 1 reflector.go:357] "Starting reflector" type="*v1.ClusterUserDefinedNetwork" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.421753289+00:00 stderr F I1208 17:53:42.421728 1 reflector.go:403] "Listing and watching" type="*v1.ClusterUserDefinedNetwork" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.421952484+00:00 stderr F I1208 17:53:42.420860 1 reflector.go:357] "Starting reflector" type="*v1.UserDefinedNetwork" resyncPeriod="0s" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.422079838+00:00 stderr F I1208 17:53:42.422019 1 reflector.go:403] "Listing and watching" type="*v1.UserDefinedNetwork" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.424355510+00:00 stderr F I1208 17:53:42.424284 1 reflector.go:430] "Caches populated" type="*v1.UserDefinedNetwork" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.424445172+00:00 stderr F I1208 17:53:42.424379 1 reflector.go:430] "Caches populated" type="*v1.ClusterUserDefinedNetwork" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" 2025-12-08T17:53:42.425758128+00:00 stderr F I1208 17:53:42.425726 1 factory.go:1890] *v1.ClusterUserDefinedNetwork informer cache synced successfully 2025-12-08T17:53:42.425758128+00:00 stderr F I1208 17:53:42.425745 1 factory.go:1890] *v1.UserDefinedNetwork informer cache synced successfully 2025-12-08T17:53:42.425775609+00:00 stderr F I1208 17:53:42.425756 1 factory.go:636] Watch Factory start up complete, took: 149.781763ms 2025-12-08T17:53:42.425814260+00:00 stderr F I1208 17:53:42.425798 1 controller.go:133] Adding controller [clustermanager-nad-controller NAD controller] event handlers 2025-12-08T17:53:42.425849461+00:00 stderr F I1208 17:53:42.425830 1 shared_informer.go:350] "Waiting for caches to sync" controller="[clustermanager-nad-controller NAD controller]" 2025-12-08T17:53:42.425856541+00:00 stderr F I1208 17:53:42.425846 1 shared_informer.go:357] "Caches are synced" controller="[clustermanager-nad-controller NAD controller]" 2025-12-08T17:53:42.425865041+00:00 stderr F I1208 17:53:42.425858 1 controller.go:157] Starting controller [clustermanager-nad-controller NAD controller] with 1 workers 2025-12-08T17:53:42.425943773+00:00 stderr F I1208 17:53:42.425922 1 network_controller.go:246] [clustermanager-nad-controller network controller]: syncing all networks 2025-12-08T17:53:42.425943773+00:00 stderr F I1208 17:53:42.425934 1 network_controller.go:257] [clustermanager-nad-controller network controller]: finished syncing all networks. Time taken: 20.631µs 2025-12-08T17:53:42.425956894+00:00 stderr F I1208 17:53:42.425941 1 controller.go:157] Starting controller [clustermanager-nad-controller network controller] with 1 workers 2025-12-08T17:53:42.425965244+00:00 stderr F I1208 17:53:42.425957 1 nad_controller.go:162] [clustermanager-nad-controller NAD controller]: started 2025-12-08T17:53:42.425989814+00:00 stderr F I1208 17:53:42.425970 1 network_cluster_controller.go:377] Initializing cluster manager network controller "default" ... 2025-12-08T17:53:42.426019915+00:00 stderr F I1208 17:53:42.426004 1 network_cluster_controller.go:383] Cluster manager network controller "default" initialized. Took: 41.571µs 2025-12-08T17:53:42.426019915+00:00 stderr F I1208 17:53:42.426015 1 network_cluster_controller.go:387] Cluster manager network controller "default" starting node watcher... 2025-12-08T17:53:42.426172459+00:00 stderr F I1208 17:53:42.426146 1 network_cluster_controller.go:392] Cluster manager network controller "default" completed watch nodes. Took: 129.423µs 2025-12-08T17:53:42.426172459+00:00 stderr F I1208 17:53:42.426165 1 zone_cluster_controller.go:217] Node crc has the id 2 set 2025-12-08T17:53:42.426293123+00:00 stderr F I1208 17:53:42.426263 1 kube.go:133] Setting annotations map[k8s.ovn.org/node-id:2 k8s.ovn.org/node-transit-switch-port-ifaddr:{"ipv4":"100.88.0.2/16"}] on node crc 2025-12-08T17:53:42.440358535+00:00 stderr F W1208 17:53:42.440303 1 egressip_healthcheck.go:169] Health checking using insecure connection 2025-12-08T17:53:43.441546957+00:00 stderr F W1208 17:53:43.441476 1 egressip_healthcheck.go:188] Could not connect to crc (10.217.0.2:9107): context deadline exceeded 2025-12-08T17:53:43.441621299+00:00 stderr F I1208 17:53:43.441591 1 egressip_controller.go:436] EgressIP node reachability enabled and using gRPC port 9107 2025-12-08T17:53:43.441621299+00:00 stderr F I1208 17:53:43.441612 1 egressservice_cluster.go:174] Starting Egress Services Controller 2025-12-08T17:53:43.441660940+00:00 stderr F I1208 17:53:43.441637 1 shared_informer.go:350] "Waiting for caches to sync" controller="egressservices" 2025-12-08T17:53:43.441712802+00:00 stderr F I1208 17:53:43.441690 1 shared_informer.go:357] "Caches are synced" controller="egressservices" 2025-12-08T17:53:43.441742662+00:00 stderr F I1208 17:53:43.441722 1 shared_informer.go:350] "Waiting for caches to sync" controller="egressservices_services" 2025-12-08T17:53:43.441750603+00:00 stderr F I1208 17:53:43.441739 1 shared_informer.go:357] "Caches are synced" controller="egressservices_services" 2025-12-08T17:53:43.441758983+00:00 stderr F I1208 17:53:43.441752 1 shared_informer.go:350] "Waiting for caches to sync" controller="egressservices_endpointslices" 2025-12-08T17:53:43.441783333+00:00 stderr F I1208 17:53:43.441763 1 shared_informer.go:357] "Caches are synced" controller="egressservices_endpointslices" 2025-12-08T17:53:43.441790734+00:00 stderr F I1208 17:53:43.441782 1 shared_informer.go:350] "Waiting for caches to sync" controller="egressservices_nodes" 2025-12-08T17:53:43.441799124+00:00 stderr F I1208 17:53:43.441793 1 shared_informer.go:357] "Caches are synced" controller="egressservices_nodes" 2025-12-08T17:53:43.441821274+00:00 stderr F I1208 17:53:43.441802 1 egressservice_cluster.go:191] Repairing Egress Services 2025-12-08T17:53:43.441972218+00:00 stderr F I1208 17:53:43.441944 1 kube.go:272] Setting labels map[] on node crc 2025-12-08T17:53:43.452142975+00:00 stderr F I1208 17:53:43.452091 1 endpointslice_mirror_controller.go:155] Starting the EndpointSlice mirror controller 2025-12-08T17:53:43.452142975+00:00 stderr F I1208 17:53:43.452120 1 endpointslice_mirror_controller.go:156] Repairing EndpointSlice mirrors 2025-12-08T17:53:43.452308580+00:00 stderr F I1208 17:53:43.452253 1 status_manager.go:221] Starting StatusManager with typed managers: map[adminpolicybasedexternalroutes:0xc0048fe740 egressfirewalls:0xc0048feb40 egressqoses:0xc0048fef40] 2025-12-08T17:53:43.452350301+00:00 stderr F I1208 17:53:43.452325 1 controller.go:133] Adding controller zone_tracker event handlers 2025-12-08T17:53:43.452431023+00:00 stderr F I1208 17:53:43.452401 1 shared_informer.go:350] "Waiting for caches to sync" controller="zone_tracker" 2025-12-08T17:53:43.452431023+00:00 stderr F I1208 17:53:43.452426 1 shared_informer.go:357] "Caches are synced" controller="zone_tracker" 2025-12-08T17:53:43.452472294+00:00 stderr F I1208 17:53:43.452442 1 status_manager.go:245] StatusManager got zones update: map[crc:{}] 2025-12-08T17:53:43.452479374+00:00 stderr F I1208 17:53:43.452472 1 controller.go:289] Controller adminpolicybasedexternalroutes_statusmanager: full reconcile 2025-12-08T17:53:43.452499915+00:00 stderr F I1208 17:53:43.452484 1 controller.go:289] Controller egressfirewalls_statusmanager: full reconcile 2025-12-08T17:53:43.452506885+00:00 stderr F I1208 17:53:43.452500 1 controller.go:289] Controller egressqoses_statusmanager: full reconcile 2025-12-08T17:53:43.452517615+00:00 stderr F I1208 17:53:43.452508 1 status_manager.go:245] StatusManager got zones update: map[crc:{}] 2025-12-08T17:53:43.452525176+00:00 stderr F I1208 17:53:43.452518 1 controller.go:289] Controller egressqoses_statusmanager: full reconcile 2025-12-08T17:53:43.452532186+00:00 stderr F I1208 17:53:43.452525 1 controller.go:289] Controller adminpolicybasedexternalroutes_statusmanager: full reconcile 2025-12-08T17:53:43.452538986+00:00 stderr F I1208 17:53:43.452531 1 controller.go:289] Controller egressfirewalls_statusmanager: full reconcile 2025-12-08T17:53:43.452545786+00:00 stderr F I1208 17:53:43.452539 1 controller.go:157] Starting controller zone_tracker with 1 workers 2025-12-08T17:53:43.452587777+00:00 stderr F I1208 17:53:43.452571 1 controller.go:133] Adding controller adminpolicybasedexternalroutes_statusmanager event handlers 2025-12-08T17:53:43.452680780+00:00 stderr F I1208 17:53:43.452646 1 shared_informer.go:350] "Waiting for caches to sync" controller="adminpolicybasedexternalroutes_statusmanager" 2025-12-08T17:53:43.452680780+00:00 stderr F I1208 17:53:43.452666 1 shared_informer.go:357] "Caches are synced" controller="adminpolicybasedexternalroutes_statusmanager" 2025-12-08T17:53:43.452680780+00:00 stderr F I1208 17:53:43.452676 1 controller.go:157] Starting controller adminpolicybasedexternalroutes_statusmanager with 1 workers 2025-12-08T17:53:43.452717841+00:00 stderr F I1208 17:53:43.452694 1 controller.go:133] Adding controller egressfirewalls_statusmanager event handlers 2025-12-08T17:53:43.452755622+00:00 stderr F I1208 17:53:43.452737 1 shared_informer.go:350] "Waiting for caches to sync" controller="egressfirewalls_statusmanager" 2025-12-08T17:53:43.452763302+00:00 stderr F I1208 17:53:43.452754 1 shared_informer.go:357] "Caches are synced" controller="egressfirewalls_statusmanager" 2025-12-08T17:53:43.452770242+00:00 stderr F I1208 17:53:43.452763 1 controller.go:157] Starting controller egressfirewalls_statusmanager with 1 workers 2025-12-08T17:53:43.452798043+00:00 stderr F I1208 17:53:43.452781 1 controller.go:133] Adding controller egressqoses_statusmanager event handlers 2025-12-08T17:53:43.452828364+00:00 stderr F I1208 17:53:43.452811 1 shared_informer.go:350] "Waiting for caches to sync" controller="egressqoses_statusmanager" 2025-12-08T17:53:43.452828364+00:00 stderr F I1208 17:53:43.452824 1 shared_informer.go:357] "Caches are synced" controller="egressqoses_statusmanager" 2025-12-08T17:53:43.452837644+00:00 stderr F I1208 17:53:43.452831 1 controller.go:157] Starting controller egressqoses_statusmanager with 1 workers 2025-12-08T17:53:43.452844624+00:00 stderr F I1208 17:53:43.452839 1 controller.go:145] Starting user-defined network controllers 2025-12-08T17:53:43.452871805+00:00 stderr F I1208 17:53:43.452857 1 controller.go:133] Adding controller udn-namespace-controller event handlers 2025-12-08T17:53:43.453162613+00:00 stderr F I1208 17:53:43.453062 1 egressservice_cluster_node.go:167] Processing sync for Egress Service node crc 2025-12-08T17:53:43.453162613+00:00 stderr F I1208 17:53:43.453094 1 egressservice_cluster_node.go:170] Finished syncing Egress Service node crc: 46.532µs 2025-12-08T17:53:43.453213024+00:00 stderr F I1208 17:53:43.453188 1 controller.go:133] Adding controller udn-nad-controller event handlers 2025-12-08T17:53:43.453239885+00:00 stderr F I1208 17:53:43.453189 1 controller.go:133] Adding controller cluster-user-defined-network-controller event handlers 2025-12-08T17:53:43.453247405+00:00 stderr F I1208 17:53:43.453224 1 controller.go:133] Adding controller user-defined-network-controller event handlers 2025-12-08T17:53:43.453254215+00:00 stderr F I1208 17:53:43.453242 1 shared_informer.go:350] "Waiting for caches to sync" controller="udn-nad-controller" 2025-12-08T17:53:43.453261025+00:00 stderr F I1208 17:53:43.453252 1 shared_informer.go:350] "Waiting for caches to sync" controller="cluster-user-defined-network-controller" 2025-12-08T17:53:43.453268086+00:00 stderr F I1208 17:53:43.453261 1 shared_informer.go:357] "Caches are synced" controller="udn-nad-controller" 2025-12-08T17:53:43.453274806+00:00 stderr F I1208 17:53:43.453265 1 shared_informer.go:357] "Caches are synced" controller="cluster-user-defined-network-controller" 2025-12-08T17:53:43.453318367+00:00 stderr F I1208 17:53:43.453290 1 shared_informer.go:350] "Waiting for caches to sync" controller="user-defined-network-controller" 2025-12-08T17:53:43.453327307+00:00 stderr F I1208 17:53:43.453321 1 shared_informer.go:357] "Caches are synced" controller="user-defined-network-controller" 2025-12-08T17:53:43.453446090+00:00 stderr F I1208 17:53:43.453421 1 shared_informer.go:350] "Waiting for caches to sync" controller="udn-namespace-controller" 2025-12-08T17:53:43.453446090+00:00 stderr F I1208 17:53:43.453442 1 shared_informer.go:357] "Caches are synced" controller="udn-namespace-controller" 2025-12-08T17:53:43.453472301+00:00 stderr F I1208 17:53:43.453455 1 controller.go:157] Starting controller cluster-user-defined-network-controller with 1 workers 2025-12-08T17:53:43.453472301+00:00 stderr F I1208 17:53:43.453468 1 controller.go:157] Starting controller user-defined-network-controller with 1 workers 2025-12-08T17:53:43.453482401+00:00 stderr F I1208 17:53:43.453476 1 controller.go:157] Starting controller udn-nad-controller with 1 workers 2025-12-08T17:53:43.453489211+00:00 stderr F I1208 17:53:43.453483 1 controller.go:157] Starting controller udn-namespace-controller with 1 workers 2025-12-08T17:58:58.390807387+00:00 stderr F I1208 17:58:58.390753 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140" type="*v1.EgressFirewall" totalItems=6 2025-12-08T17:59:09.426214121+00:00 stderr F I1208 17:59:09.426128 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" type="*v1.UserDefinedNetwork" totalItems=6 2025-12-08T17:59:50.402071726+00:00 stderr F I1208 17:59:50.401179 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140" type="*v1.EgressService" totalItems=7 2025-12-08T18:00:27.300686595+00:00 stderr F I1208 18:00:27.300199 1 reflector.go:946] "Watch close" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod" totalItems=391 2025-12-08T18:00:52.418331096+00:00 stderr F I1208 18:00:52.418224 1 reflector.go:946] "Watch close" reflector="github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117" type="*v1.NetworkAttachmentDefinition" totalItems=9 2025-12-08T18:00:58.407556329+00:00 stderr F I1208 18:00:58.406952 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140" type="*v1.AdminPolicyBasedExternalRoute" totalItems=8 2025-12-08T18:01:10.395002043+00:00 stderr F I1208 18:01:10.394934 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140" type="*v1.EgressQoS" totalItems=8 2025-12-08T18:01:14.285560443+00:00 stderr F I1208 18:01:14.285455 1 reflector.go:946] "Watch close" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.EndpointSlice" totalItems=77 2025-12-08T18:01:18.426944377+00:00 stderr F I1208 18:01:18.426843 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140" type="*v1.ClusterUserDefinedNetwork" totalItems=9 2025-12-08T18:01:27.387640372+00:00 stderr F I1208 18:01:27.387579 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140" type="*v1.EgressIP" totalItems=8 2025-12-08T18:01:47.287241220+00:00 stderr F I1208 18:01:47.287107 1 reflector.go:946] "Watch close" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace" totalItems=27 2025-12-08T18:02:17.286308730+00:00 stderr F I1208 18:02:17.285584 1 reflector.go:946] "Watch close" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" totalItems=57 2025-12-08T18:02:45.281062015+00:00 stderr F I1208 18:02:45.281007 1 reflector.go:946] "Watch close" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" totalItems=26 2025-12-08T18:03:29.412895622+00:00 stderr F I1208 18:03:29.412799 1 reflector.go:946] "Watch close" reflector="github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go:141" type="*v1alpha1.IPAMClaim" totalItems=10 2025-12-08T18:04:55.404057816+00:00 stderr F I1208 18:04:55.403971 1 reflector.go:946] "Watch close" reflector="github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140" type="*v1.EgressService" totalItems=6 ././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000755000175000017500000000000015115611514033025 5ustar zuulzuul././@LongLink0000644000000000000000000000033600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000755000175000017500000000000015115611521033023 5ustar zuulzuul././@LongLink0000644000000000000000000000034300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000644000175000017500000011435615115611514033041 0ustar zuulzuul2025-12-08T17:44:21.300335194+00:00 stdout F Overwriting root TLS certificate authority trust store 2025-12-08T17:44:22.164040923+00:00 stderr F I1208 17:44:22.162996 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:22.171955829+00:00 stderr F I1208 17:44:22.171905 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:22.359481764+00:00 stderr F I1208 17:44:22.358748 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:22.364721887+00:00 stderr F I1208 17:44:22.364461 1 leaderelection.go:254] attempting to acquire leader lease openshift-image-registry/openshift-master-controllers... 2025-12-08T17:44:22.382233915+00:00 stderr F I1208 17:44:22.382014 1 leaderelection.go:268] successfully acquired lease openshift-image-registry/openshift-master-controllers 2025-12-08T17:44:22.383265143+00:00 stderr F I1208 17:44:22.382895 1 main.go:34] Cluster Image Registry Operator Version: 4a2f646ef 2025-12-08T17:44:22.383265143+00:00 stderr F I1208 17:44:22.382928 1 main.go:35] Go Version: go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime 2025-12-08T17:44:22.383265143+00:00 stderr F I1208 17:44:22.382935 1 main.go:36] Go OS/Arch: linux/amd64 2025-12-08T17:44:22.383265143+00:00 stderr F I1208 17:44:22.382942 1 main.go:67] Watching files [/var/run/configmaps/trusted-ca/tls-ca-bundle.pem /etc/secrets/tls.crt /etc/secrets/tls.key]... 2025-12-08T17:44:22.385378000+00:00 stderr F I1208 17:44:22.385320 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-image-registry", Name:"openshift-master-controllers", UID:"40f60183-0fde-497b-b92d-7413743f8d65", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37242", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' cluster-image-registry-operator-86c45576b9-rwgjl_ab69d542-ce94-4e66-852f-c2ac39227a1b became leader 2025-12-08T17:44:22.415119252+00:00 stderr F I1208 17:44:22.412633 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:22.423727717+00:00 stderr F I1208 17:44:22.420305 1 starter.go:89] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:22.423727717+00:00 stderr F I1208 17:44:22.421116 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-image-registry", Name:"cluster-image-registry-operator", UID:"a4c18a44-787c-4851-97ac-f3da87e8d0e3", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:22.428986380+00:00 stderr F I1208 17:44:22.427658 1 metrics.go:88] Starting MetricsController 2025-12-08T17:44:22.429137364+00:00 stderr F I1208 17:44:22.429108 1 clusteroperator.go:143] Starting ClusterOperatorStatusController 2025-12-08T17:44:22.429146424+00:00 stderr F I1208 17:44:22.429140 1 nodecadaemon.go:204] Starting NodeCADaemonController 2025-12-08T17:44:22.429161485+00:00 stderr F I1208 17:44:22.429155 1 imageregistrycertificates.go:211] Starting ImageRegistryCertificatesController 2025-12-08T17:44:22.429168535+00:00 stderr F I1208 17:44:22.429163 1 imageconfig.go:105] Starting ImageConfigController 2025-12-08T17:44:22.430596314+00:00 stderr F I1208 17:44:22.430102 1 azurestackcloud.go:174] Starting AzureStackCloudController 2025-12-08T17:44:22.430596314+00:00 stderr F I1208 17:44:22.430136 1 azurepathfixcontroller.go:202] Starting AzurePathFixController 2025-12-08T17:44:22.430596314+00:00 stderr F I1208 17:44:22.430147 1 awstagcontroller.go:160] Starting AWS Tag Controller 2025-12-08T17:44:22.430596314+00:00 stderr F I1208 17:44:22.430427 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:22.484300719+00:00 stderr F W1208 17:44:22.482493 1 reflector.go:561] github.com/openshift/client-go/image/informers/externalversions/factory.go:125: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io) 2025-12-08T17:44:22.484300719+00:00 stderr F E1208 17:44:22.482579 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/image/informers/externalversions/factory.go:125: Failed to watch *v1.ImageStream: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:22.484300719+00:00 stderr F W1208 17:44:22.482606 1 reflector.go:561] github.com/openshift/client-go/route/informers/externalversions/factory.go:125: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:22.484300719+00:00 stderr F E1208 17:44:22.482619 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/route/informers/externalversions/factory.go:125: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:22.531099215+00:00 stderr F I1208 17:44:22.530041 1 nodecadaemon.go:211] Started NodeCADaemonController 2025-12-08T17:44:22.531099215+00:00 stderr F I1208 17:44:22.530819 1 awstagcontroller.go:167] Started AWS Tag Controller 2025-12-08T17:44:22.531099215+00:00 stderr F I1208 17:44:22.530850 1 azurestackcloud.go:181] Started AzureStackCloudController 2025-12-08T17:44:22.531217188+00:00 stderr F I1208 17:44:22.531180 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:22.531217188+00:00 stderr F I1208 17:44:22.531197 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:22.547416920+00:00 stderr F I1208 17:44:22.546814 1 azurepathfixcontroller.go:209] Started AzurePathFixController 2025-12-08T17:44:22.552636423+00:00 stderr F I1208 17:44:22.552584 1 reflector.go:368] Caches populated for *v1.ClusterOperator from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:44:22.555533132+00:00 stderr F I1208 17:44:22.555500 1 reflector.go:368] Caches populated for *v1.ClusterRoleBinding from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:44:22.562324197+00:00 stderr F I1208 17:44:22.562114 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:44:22.607435888+00:00 stderr F I1208 17:44:22.605659 1 reflector.go:368] Caches populated for *v1.Secret from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:44:22.621382218+00:00 stderr F I1208 17:44:22.621191 1 reflector.go:368] Caches populated for *v1.ClusterRole from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:44:22.632093810+00:00 stderr F I1208 17:44:22.631111 1 controllerimagepruner.go:386] Starting ImagePrunerController 2025-12-08T17:44:22.632421559+00:00 stderr F I1208 17:44:22.632361 1 clusteroperator.go:150] Started ClusterOperatorStatusController 2025-12-08T17:44:22.650748349+00:00 stderr F I1208 17:44:22.650708 1 generator.go:63] object *v1.ClusterOperator, Name=image-registry updated: removed:apiVersion="config.openshift.io/v1", removed:kind="ClusterOperator", changed:metadata.managedFields.2.time={"2025-11-03T09:40:47Z" -> "2025-12-08T17:44:22Z"}, changed:metadata.resourceVersion={"34624" -> "37269"}, changed:status.conditions.0.message={"Available: The deployment does not have available replicas\nNodeCADaemonAvailable: The daemon set node-ca does not have available replicas\nImagePrunerAvailable: Pruner CronJob has been created" -> "Available: The deployment does not have available replicas\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created"}, changed:status.conditions.0.reason={"NoReplicasAvailable::NodeCADaemonNoAvailableReplicas" -> "NoReplicasAvailable"}, changed:status.conditions.1.message={"Progressing: The deployment has not completed\nNodeCADaemonProgressing: The daemon set node-ca is deploying node pods" -> "Progressing: The deployment has not completed\nNodeCADaemonProgressing: The daemon set node-ca is deployed"}, changed:status.conditions.1.reason={"DeploymentNotCompleted::NodeCADaemonUnavailable" -> "DeploymentNotCompleted"} 2025-12-08T17:44:22.674191068+00:00 stderr F I1208 17:44:22.674034 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:44:22.730528525+00:00 stderr F I1208 17:44:22.729773 1 imageregistrycertificates.go:218] Started ImageRegistryCertificatesController 2025-12-08T17:44:23.683754896+00:00 stderr F W1208 17:44:23.683372 1 reflector.go:561] github.com/openshift/client-go/image/informers/externalversions/factory.go:125: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io) 2025-12-08T17:44:23.683754896+00:00 stderr F E1208 17:44:23.683683 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/image/informers/externalversions/factory.go:125: Failed to watch *v1.ImageStream: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:23.924196905+00:00 stderr F W1208 17:44:23.917258 1 reflector.go:561] github.com/openshift/client-go/route/informers/externalversions/factory.go:125: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:23.924196905+00:00 stderr F E1208 17:44:23.917318 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/route/informers/externalversions/factory.go:125: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:26.022910021+00:00 stderr F W1208 17:44:26.021736 1 reflector.go:561] github.com/openshift/client-go/image/informers/externalversions/factory.go:125: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io) 2025-12-08T17:44:26.022910021+00:00 stderr F E1208 17:44:26.022236 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/image/informers/externalversions/factory.go:125: Failed to watch *v1.ImageStream: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:26.990312539+00:00 stderr F W1208 17:44:26.987800 1 reflector.go:561] github.com/openshift/client-go/route/informers/externalversions/factory.go:125: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:26.990312539+00:00 stderr F E1208 17:44:26.988272 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/route/informers/externalversions/factory.go:125: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:30.375421893+00:00 stderr F W1208 17:44:30.374796 1 reflector.go:561] github.com/openshift/client-go/route/informers/externalversions/factory.go:125: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io) 2025-12-08T17:44:30.375465765+00:00 stderr F E1208 17:44:30.375418 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/route/informers/externalversions/factory.go:125: Failed to watch *v1.Route: failed to list *v1.Route: the server is currently unable to handle the request (get routes.route.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:32.399640278+00:00 stderr F W1208 17:44:32.399372 1 reflector.go:561] github.com/openshift/client-go/image/informers/externalversions/factory.go:125: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io) 2025-12-08T17:44:32.399640278+00:00 stderr F E1208 17:44:32.399624 1 reflector.go:158] "Unhandled Error" err="github.com/openshift/client-go/image/informers/externalversions/factory.go:125: Failed to watch *v1.ImageStream: failed to list *v1.ImageStream: the server is currently unable to handle the request (get imagestreams.image.openshift.io)" logger="UnhandledError" 2025-12-08T17:44:37.248863058+00:00 stderr F I1208 17:44:37.248323 1 reflector.go:368] Caches populated for *v1.Route from github.com/openshift/client-go/route/informers/externalversions/factory.go:125 2025-12-08T17:44:37.329426730+00:00 stderr F I1208 17:44:37.329312 1 imageconfig.go:112] Started ImageConfigController 2025-12-08T17:44:37.329426730+00:00 stderr F I1208 17:44:37.329334 1 controller.go:454] Starting Controller 2025-12-08T17:44:37.348263595+00:00 stderr F I1208 17:44:37.348208 1 controller.go:340] object changed: *v1.Config, Name=cluster (status=true): changed:status.conditions.3.lastTransitionTime={"2025-11-02T08:08:51Z" -> "2025-12-08T17:44:37Z"}, added:status.conditions.3.message="The deployment does not have available replicas", added:status.conditions.3.reason="Unavailable", changed:status.conditions.3.status={"False" -> "True"} 2025-12-08T17:44:37.365519285+00:00 stderr F I1208 17:44:37.365413 1 generator.go:63] object *v1.ClusterOperator, Name=image-registry updated: changed:metadata.managedFields.2.time={"2025-12-08T17:44:22Z" -> "2025-12-08T17:44:37Z"}, changed:metadata.resourceVersion={"37269" -> "38253"}, changed:status.conditions.2.lastTransitionTime={"2025-11-03T08:44:28Z" -> "2025-12-08T17:44:37Z"}, added:status.conditions.2.message="Degraded: The deployment does not have available replicas", changed:status.conditions.2.reason={"AsExpected" -> "Unavailable"}, changed:status.conditions.2.status={"False" -> "True"} 2025-12-08T17:44:43.841761525+00:00 stderr F I1208 17:44:43.841677 1 reflector.go:368] Caches populated for *v1.ImageStream from github.com/openshift/client-go/image/informers/externalversions/factory.go:125 2025-12-08T17:44:43.929305742+00:00 stderr F I1208 17:44:43.929029 1 metrics.go:94] Started MetricsController 2025-12-08T17:45:05.486114529+00:00 stderr F I1208 17:45:05.485446 1 controller.go:340] object changed: *v1.Config, Name=cluster (status=true): changed:status.conditions.1.lastTransitionTime={"2025-11-03T09:40:46Z" -> "2025-12-08T17:45:05Z"}, changed:status.conditions.1.message={"The deployment has not completed" -> "The registry is ready"}, changed:status.conditions.1.reason={"DeploymentNotCompleted" -> "Ready"}, changed:status.conditions.1.status={"True" -> "False"}, changed:status.conditions.2.lastTransitionTime={"2025-11-03T09:40:46Z" -> "2025-12-08T17:45:05Z"}, changed:status.conditions.2.message={"The deployment does not have available replicas" -> "The registry is ready"}, changed:status.conditions.2.reason={"NoReplicasAvailable" -> "Ready"}, changed:status.conditions.2.status={"False" -> "True"}, changed:status.conditions.3.lastTransitionTime={"2025-12-08T17:44:37Z" -> "2025-12-08T17:45:05Z"}, removed:status.conditions.3.message="The deployment does not have available replicas", removed:status.conditions.3.reason="Unavailable", changed:status.conditions.3.status={"True" -> "False"}, changed:status.readyReplicas={"0.000000" -> "1.000000"} 2025-12-08T17:45:05.509858850+00:00 stderr F I1208 17:45:05.509268 1 generator.go:63] object *v1.ClusterOperator, Name=image-registry updated: changed:metadata.managedFields.2.time={"2025-12-08T17:44:37Z" -> "2025-12-08T17:45:05Z"}, changed:metadata.resourceVersion={"38253" -> "38620"}, changed:status.conditions.0.lastTransitionTime={"2025-11-03T09:40:46Z" -> "2025-12-08T17:45:05Z"}, changed:status.conditions.0.message={"Available: The deployment does not have available replicas\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created" -> "Available: The registry is ready\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created"}, changed:status.conditions.0.reason={"NoReplicasAvailable" -> "Ready"}, changed:status.conditions.0.status={"False" -> "True"}, changed:status.conditions.1.lastTransitionTime={"2025-11-03T09:40:46Z" -> "2025-12-08T17:45:05Z"}, changed:status.conditions.1.message={"Progressing: The deployment has not completed\nNodeCADaemonProgressing: The daemon set node-ca is deployed" -> "Progressing: The registry is ready\nNodeCADaemonProgressing: The daemon set node-ca is deployed"}, changed:status.conditions.1.reason={"DeploymentNotCompleted" -> "Ready"}, changed:status.conditions.1.status={"True" -> "False"}, changed:status.conditions.2.lastTransitionTime={"2025-12-08T17:44:37Z" -> "2025-12-08T17:45:05Z"}, removed:status.conditions.2.message="Degraded: The deployment does not have available replicas", changed:status.conditions.2.reason={"Unavailable" -> "AsExpected"}, changed:status.conditions.2.status={"True" -> "False"} 2025-12-08T17:46:22.404721457+00:00 stderr F E1208 17:46:22.404555 1 leaderelection.go:429] Failed to update lock optimitically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-image-registry/leases/openshift-master-controllers?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:22.405469000+00:00 stderr F E1208 17:46:22.405409 1 leaderelection.go:436] error retrieving resource lock openshift-image-registry/openshift-master-controllers: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-image-registry/leases/openshift-master-controllers?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:57.043928641+00:00 stderr F I1208 17:46:57.043818 1 reflector.go:368] Caches populated for *v1.Proxy from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:47:00.228934692+00:00 stderr F I1208 17:47:00.228195 1 reflector.go:368] Caches populated for *v1.Deployment from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:04.049270833+00:00 stderr F I1208 17:47:04.048512 1 reflector.go:368] Caches populated for *v1.ClusterRole from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:04.336697870+00:00 stderr F I1208 17:47:04.336587 1 reflector.go:368] Caches populated for *v1.ImagePruner from github.com/openshift/client-go/imageregistry/informers/externalversions/factory.go:125 2025-12-08T17:47:05.087707991+00:00 stderr F I1208 17:47:05.087613 1 reflector.go:368] Caches populated for *v1.PodDisruptionBudget from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:06.587080871+00:00 stderr F I1208 17:47:06.587028 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:07.329635296+00:00 stderr F I1208 17:47:07.329574 1 reflector.go:368] Caches populated for *v1.ServiceAccount from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:07.339844677+00:00 stderr F I1208 17:47:07.339799 1 reflector.go:368] Caches populated for *v1.Infrastructure from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:47:12.060103237+00:00 stderr F I1208 17:47:12.060061 1 reflector.go:368] Caches populated for *v1.Secret from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:17.139239224+00:00 stderr F I1208 17:47:17.138273 1 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:20.502955821+00:00 stderr F I1208 17:47:20.502484 1 reflector.go:368] Caches populated for *v1.Job from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:25.410479305+00:00 stderr F I1208 17:47:25.409753 1 reflector.go:368] Caches populated for *v1.ClusterOperator from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:47:37.369402671+00:00 stderr F I1208 17:47:37.369332 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:44.070461243+00:00 stderr F I1208 17:47:44.070317 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:44.997501785+00:00 stderr F I1208 17:47:44.996968 1 reflector.go:368] Caches populated for *v1.ClusterVersion from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:47:48.965586396+00:00 stderr F I1208 17:47:48.965521 1 reflector.go:368] Caches populated for *v1.DaemonSet from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:51.561433102+00:00 stderr F I1208 17:47:51.561366 1 reflector.go:368] Caches populated for *v1.Config from github.com/openshift/client-go/imageregistry/informers/externalversions/factory.go:125 2025-12-08T17:47:59.236942413+00:00 stderr F I1208 17:47:59.236537 1 reflector.go:368] Caches populated for *v1.ClusterRoleBinding from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:47:59.238472338+00:00 stderr F I1208 17:47:59.238425 1 reflector.go:368] Caches populated for *v1.CronJob from k8s.io/client-go/informers/factory.go:160 2025-12-08T17:48:09.318797645+00:00 stderr F I1208 17:48:09.318080 1 reflector.go:368] Caches populated for *v1.FeatureGate from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:48:14.351661956+00:00 stderr F I1208 17:48:14.351123 1 reflector.go:368] Caches populated for *v1.Image from github.com/openshift/client-go/config/informers/externalversions/factory.go:125 2025-12-08T17:54:51.106929435+00:00 stderr F I1208 17:54:51.104383 1 generator.go:63] object *v1.Secret, Namespace=openshift-image-registry, Name=installation-pull-secrets updated: changed:data..dockerconfigjson={ -> }, changed:metadata.annotations.imageregistry.operator.openshift.io/checksum={"sha256:085fdb2709b57d501872b4e20b38e3618d21be40f24851b4fad2074469e1fa6d" -> "sha256:134d2023417aa99dc70c099f12731fc3d94cb8fe5fef3d499d5c1ff70d124cfb"}, changed:metadata.managedFields.0.time={"2025-11-03T09:38:57Z" -> "2025-12-08T17:54:51Z"}, changed:metadata.resourceVersion={"33773" -> "40747"} 2025-12-08T17:54:51.127994382+00:00 stderr F I1208 17:54:51.126799 1 apps.go:155] Deployment "openshift-image-registry/image-registry" changes: {"metadata":{"annotations":{"imageregistry.operator.openshift.io/checksum":"sha256:d23fe596b7d9fc259fb157109543a2f06ea79d075813138c3877cc58b09f333c","operator.openshift.io/spec-hash":"e94145b8bfa5fed31d44791402fd166c99d8742aaf3aa863300dac7192876016"}},"spec":{"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"imageregistry.operator.openshift.io/dependencies-checksum":"sha256:e847f40829a71bf8250456acdd010e475b1519e05c57e70ed4f1b28e3fea8414"}},"spec":{"containers":[{"command":["/bin/sh","-c","mkdir -p /etc/pki/ca-trust/extracted/edk2 /etc/pki/ca-trust/extracted/java /etc/pki/ca-trust/extracted/openssl /etc/pki/ca-trust/extracted/pem \u0026\u0026 update-ca-trust extract --output /etc/pki/ca-trust/extracted/ \u0026\u0026 exec /usr/bin/dockerregistry"],"env":[{"name":"REGISTRY_STORAGE","value":"filesystem"},{"name":"REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY","value":"/registry"},{"name":"REGISTRY_HTTP_ADDR","value":":5000"},{"name":"REGISTRY_HTTP_NET","value":"tcp"},{"name":"REGISTRY_HTTP_SECRET","value":"15ea86fe7fd20108cc09cc69d7f57ed1b7e1d87f5a6e0fa46fc90f41636c8647af8785432e6c579448e463c0f1a63039c63836565af1e25fb2e1809cad0a283b"},{"name":"REGISTRY_LOG_LEVEL","value":"info"},{"name":"REGISTRY_OPENSHIFT_QUOTA_ENABLED","value":"true"},{"name":"REGISTRY_STORAGE_CACHE_BLOBDESCRIPTOR","value":"inmemory"},{"name":"REGISTRY_STORAGE_DELETE_ENABLED","value":"true"},{"name":"REGISTRY_HEALTH_STORAGEDRIVER_ENABLED","value":"true"},{"name":"REGISTRY_HEALTH_STORAGEDRIVER_INTERVAL","value":"10s"},{"name":"REGISTRY_HEALTH_STORAGEDRIVER_THRESHOLD","value":"1"},{"name":"REGISTRY_OPENSHIFT_METRICS_ENABLED","value":"true"},{"name":"REGISTRY_OPENSHIFT_SERVER_ADDR","value":"image-registry.openshift-image-registry.svc:5000"},{"name":"REGISTRY_HTTP_TLS_CERTIFICATE","value":"/etc/secrets/tls.crt"},{"name":"REGISTRY_HTTP_TLS_KEY","value":"/etc/secrets/tls.key"}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418","lifecycle":{"preStop":{"exec":{"command":["sleep","25"]}}},"livenessProbe":{"httpGet":{"path":"/healthz","port":5000,"scheme":"HTTPS"},"initialDelaySeconds":5,"timeoutSeconds":5},"name":"registry","ports":[{"containerPort":5000,"protocol":"TCP"}],"readinessProbe":{"httpGet":{"path":"/healthz","port":5000,"scheme":"HTTPS"},"initialDelaySeconds":15,"timeoutSeconds":5},"resources":{"requests":{"cpu":"100m","memory":"256Mi"}},"securityContext":{"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/registry","name":"registry-storage"},{"mountPath":"/etc/secrets","name":"registry-tls"},{"mountPath":"/etc/pki/ca-trust/extracted","name":"ca-trust-extracted"},{"mountPath":"/etc/pki/ca-trust/source/anchors","name":"registry-certificates"},{"mountPath":"/usr/share/pki/ca-trust-source","name":"trusted-ca"},{"mountPath":"/var/lib/kubelet/","name":"installation-pull-secrets"},{"mountPath":"/var/run/secrets/openshift/serviceaccount","name":"bound-sa-token","readOnly":true}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"volumes":[{"name":"registry-storage","persistentVolumeClaim":{"claimName":"crc-image-registry-storage"}},{"name":"registry-tls","projected":{"sources":[{"secret":{"name":"image-registry-tls"}}]}},{"emptyDir":{},"name":"ca-trust-extracted"},{"configMap":{"name":"image-registry-certificates"},"name":"registry-certificates"},{"configMap":{"items":[{"key":"ca-bundle.crt","path":"anchors/ca-bundle.crt"}],"name":"trusted-ca","optional":true},"name":"trusted-ca"},{"name":"installation-pull-secrets","secret":{"items":[{"key":".dockerconfigjson","path":"config.json"}],"optional":true,"secretName":"installation-pull-secrets"}},{"name":"bound-sa-token","projected":{"sources":[{"serviceAccountToken":{"audience":"openshift","path":"token"}}]}}]}}}} 2025-12-08T17:54:51.155439351+00:00 stderr F I1208 17:54:51.155323 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-image-registry", Name:"cluster-image-registry-operator", UID:"a4c18a44-787c-4851-97ac-f3da87e8d0e3", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/image-registry -n openshift-image-registry because it changed 2025-12-08T17:54:51.157161447+00:00 stderr F I1208 17:54:51.156742 1 generator.go:63] object *v1.Deployment, Namespace=openshift-image-registry, Name=image-registry updated: changed:metadata.annotations.imageregistry.operator.openshift.io/checksum={"sha256:139986d456a8523e223a7bdbae4bf75e50ee56eb6703e01854cd742422c172ed" -> "sha256:d23fe596b7d9fc259fb157109543a2f06ea79d075813138c3877cc58b09f333c"}, changed:metadata.annotations.operator.openshift.io/spec-hash={"0da3aa3810d8130bdf08d8a57b4039d1da5044977d20c11a075f665d12963ac1" -> "e94145b8bfa5fed31d44791402fd166c99d8742aaf3aa863300dac7192876016"}, changed:metadata.generation={"4.000000" -> "5.000000"}, changed:metadata.managedFields.0.manager={"cluster-image-registry-operator" -> "kube-controller-manager"}, added:metadata.managedFields.0.subresource="status", changed:metadata.managedFields.0.time={"2025-11-03T09:38:57Z" -> "2025-12-08T17:45:05Z"}, changed:metadata.managedFields.1.manager={"kube-controller-manager" -> "cluster-image-registry-operator"}, removed:metadata.managedFields.1.subresource="status", changed:metadata.managedFields.1.time={"2025-12-08T17:45:05Z" -> "2025-12-08T17:54:51Z"}, changed:metadata.resourceVersion={"38618" -> "40748"}, changed:spec.template.metadata.annotations.imageregistry.operator.openshift.io/dependencies-checksum={"sha256:e5e688ac594438d0527a62648f5fb19a6628fa965fb9dde9880c9f9462e2cb93" -> "sha256:e847f40829a71bf8250456acdd010e475b1519e05c57e70ed4f1b28e3fea8414"} 2025-12-08T17:54:51.157609840+00:00 stderr F I1208 17:54:51.157568 1 controller.go:340] object changed: *v1.Config, Name=cluster (status=true): changed:status.conditions.1.lastTransitionTime={"2025-12-08T17:45:05Z" -> "2025-12-08T17:54:51Z"}, changed:status.conditions.1.message={"The registry is ready" -> "The deployment has not completed"}, changed:status.conditions.1.reason={"Ready" -> "DeploymentNotCompleted"}, changed:status.conditions.1.status={"False" -> "True"}, changed:status.conditions.2.message={"The registry is ready" -> "The registry has minimum availability"}, changed:status.conditions.2.reason={"Ready" -> "MinimumAvailability"}, changed:status.generations.1.lastGeneration={"4.000000" -> "5.000000"} 2025-12-08T17:54:51.178409929+00:00 stderr F I1208 17:54:51.175134 1 generator.go:63] object *v1.ClusterOperator, Name=image-registry updated: removed:apiVersion="config.openshift.io/v1", removed:kind="ClusterOperator", changed:metadata.managedFields.2.time={"2025-12-08T17:45:05Z" -> "2025-12-08T17:54:51Z"}, changed:metadata.resourceVersion={"38620" -> "40754"}, changed:status.conditions.0.message={"Available: The registry is ready\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created" -> "Available: The registry has minimum availability\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created"}, changed:status.conditions.0.reason={"Ready" -> "MinimumAvailability"}, changed:status.conditions.1.lastTransitionTime={"2025-12-08T17:45:05Z" -> "2025-12-08T17:54:51Z"}, changed:status.conditions.1.message={"Progressing: The registry is ready\nNodeCADaemonProgressing: The daemon set node-ca is deployed" -> "Progressing: The deployment has not completed\nNodeCADaemonProgressing: The daemon set node-ca is deployed"}, changed:status.conditions.1.reason={"Ready" -> "DeploymentNotCompleted"}, changed:status.conditions.1.status={"False" -> "True"} 2025-12-08T17:55:13.401224079+00:00 stderr F I1208 17:55:13.399753 1 controller.go:340] object changed: *v1.Config, Name=cluster (status=true): changed:status.readyReplicas={"1.000000" -> "2.000000"} 2025-12-08T17:55:14.175593017+00:00 stderr F I1208 17:55:14.175151 1 controller.go:340] object changed: *v1.Config, Name=cluster (status=true): changed:status.conditions.1.lastTransitionTime={"2025-12-08T17:54:51Z" -> "2025-12-08T17:55:14Z"}, changed:status.conditions.1.message={"The deployment has not completed" -> "The registry is ready"}, changed:status.conditions.1.reason={"DeploymentNotCompleted" -> "Ready"}, changed:status.conditions.1.status={"True" -> "False"}, changed:status.conditions.2.message={"The registry has minimum availability" -> "The registry is ready"}, changed:status.conditions.2.reason={"MinimumAvailability" -> "Ready"}, changed:status.readyReplicas={"2.000000" -> "1.000000"} 2025-12-08T17:55:14.192797180+00:00 stderr F I1208 17:55:14.192011 1 generator.go:63] object *v1.ClusterOperator, Name=image-registry updated: changed:metadata.managedFields.2.time={"2025-12-08T17:54:51Z" -> "2025-12-08T17:55:14Z"}, changed:metadata.resourceVersion={"40754" -> "41429"}, changed:status.conditions.0.message={"Available: The registry has minimum availability\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created" -> "Available: The registry is ready\nNodeCADaemonAvailable: The daemon set node-ca has available replicas\nImagePrunerAvailable: Pruner CronJob has been created"}, changed:status.conditions.0.reason={"MinimumAvailability" -> "Ready"}, changed:status.conditions.1.lastTransitionTime={"2025-12-08T17:54:51Z" -> "2025-12-08T17:55:14Z"}, changed:status.conditions.1.message={"Progressing: The deployment has not completed\nNodeCADaemonProgressing: The daemon set node-ca is deployed" -> "Progressing: The registry is ready\nNodeCADaemonProgressing: The daemon set node-ca is deployed"}, changed:status.conditions.1.reason={"DeploymentNotCompleted" -> "Ready"}, changed:status.conditions.1.status={"True" -> "False"} 2025-12-08T17:55:14.975924285+00:00 stderr F I1208 17:55:14.975832 1 controller.go:340] object changed: *v1.Config, Name=cluster (status=true): changed:status.conditions.1.lastTransitionTime={"2025-12-08T17:54:51Z" -> "2025-12-08T17:55:14Z"}, changed:status.conditions.1.message={"The deployment has not completed" -> "The registry is ready"}, changed:status.conditions.1.reason={"DeploymentNotCompleted" -> "Ready"}, changed:status.conditions.1.status={"True" -> "False"}, changed:status.conditions.2.message={"The registry has minimum availability" -> "The registry is ready"}, changed:status.conditions.2.reason={"MinimumAvailability" -> "Ready"}, changed:status.readyReplicas={"2.000000" -> "1.000000"} 2025-12-08T17:55:14.982955104+00:00 stderr F E1208 17:55:14.982896 1 controller.go:379] unable to sync: Operation cannot be fulfilled on configs.imageregistry.operator.openshift.io "cluster": the object has been modified; please apply your changes to the latest version and try again, requeuing ././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000755000175000017500000000000015115611513033024 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000755000175000017500000000000015115611520033022 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000644000175000017500000016351615115611513033042 0ustar zuulzuul2025-12-08T17:54:52.290067945+00:00 stderr F time="2025-12-08T17:54:52.289178192Z" level=info msg="start registry" distribution_version=v3.0.0+unknown go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" openshift_version=4.20.0-202510211040.p2.g0c09647.assembly.stream.el9-0c09647 2025-12-08T17:54:52.290067945+00:00 stderr F time="2025-12-08T17:54:52.289842509Z" level=info msg="caching project quota objects with TTL 1m0s" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:54:52.291966097+00:00 stderr F time="2025-12-08T17:54:52.290957329Z" level=info msg="redis not configured" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:54:52.291966097+00:00 stderr F time="2025-12-08T17:54:52.291080492Z" level=info msg="Starting upload purge in 10m0s" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:54:52.291966097+00:00 stderr F time="2025-12-08T17:54:52.291116453Z" level=info msg="using openshift blob descriptor cache" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:54:52.291966097+00:00 stderr F time="2025-12-08T17:54:52.291132253Z" level=warning msg="Registry does not implement RepositoryRemover. Will not be able to delete repos and tags" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:54:52.292542152+00:00 stderr F time="2025-12-08T17:54:52.292236544Z" level=info msg="Using \"image-registry.openshift-image-registry.svc:5000\" as Docker Registry URL" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:54:52.292542152+00:00 stderr F time="2025-12-08T17:54:52.292403028Z" level=info msg="listening on :5000, tls" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T17:55:01.524056803+00:00 stderr F time="2025-12-08T17:55:01.523307233Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=72face9a-988a-489b-b5ab-e645261cebb8 http.request.method=GET http.request.remoteaddr="10.217.0.2:43266" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="264.457µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:11.520432594+00:00 stderr F time="2025-12-08T17:55:11.519593042Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=ee5fd0fb-5166-42ad-8a69-ff9b46370615 http.request.method=GET http.request.remoteaddr="10.217.0.2:60546" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="84.553µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:13.266001780+00:00 stderr F time="2025-12-08T17:55:13.265856425Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=4d7b8946-db94-4102-ba61-711501ae74b0 http.request.method=GET http.request.remoteaddr="10.217.0.2:60556" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="49.151µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:21.533206019+00:00 stderr F time="2025-12-08T17:55:21.532487979Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=96cc1e46-9f80-48f9-912e-eb4e8339e916 http.request.method=GET http.request.remoteaddr="10.217.0.2:51774" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="50.301µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:23.269121874+00:00 stderr F time="2025-12-08T17:55:23.266521484Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=e4267069-6acf-44c0-83e3-ff7018fafa7b http.request.method=GET http.request.remoteaddr="10.217.0.2:51790" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="40.861µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:31.518140915+00:00 stderr F time="2025-12-08T17:55:31.517652522Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=619be738-f61f-4658-8b3f-37813acb0815 http.request.method=GET http.request.remoteaddr="10.217.0.2:44614" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="48.971µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:33.269914153+00:00 stderr F time="2025-12-08T17:55:33.269515882Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=2ad88112-fbd2-4edd-8ff0-4d599336f0ef http.request.method=GET http.request.remoteaddr="10.217.0.2:44624" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="80.592µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:41.520621960+00:00 stderr F time="2025-12-08T17:55:41.520134407Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=a1cf3206-f696-4d9a-83de-bbfb538bc759 http.request.method=GET http.request.remoteaddr="10.217.0.2:49336" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="41.211µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:43.269912211+00:00 stderr F time="2025-12-08T17:55:43.268165263Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=c42f27dc-147e-4548-ae47-676844540ddd http.request.method=GET http.request.remoteaddr="10.217.0.2:49352" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="63.982µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:51.517599956+00:00 stderr F time="2025-12-08T17:55:51.517077681Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9c0f9b5e-90a5-4edc-9a9b-316b93f9ea83 http.request.method=GET http.request.remoteaddr="10.217.0.2:58072" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="33.751µs" http.response.status=200 http.response.written=0 2025-12-08T17:55:53.266782552+00:00 stderr F time="2025-12-08T17:55:53.266414272Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=cac082ba-e2e4-4176-b3dc-a606ad7a9934 http.request.method=GET http.request.remoteaddr="10.217.0.2:58074" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="41.661µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:01.517861611+00:00 stderr F time="2025-12-08T17:56:01.51743645Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=a3dafb4c-2aec-4c99-88bb-c6d6c41e9cbf http.request.method=GET http.request.remoteaddr="10.217.0.2:53198" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="35.551µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:03.267591623+00:00 stderr F time="2025-12-08T17:56:03.267019197Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9edb490b-44cf-491b-91f5-ee9e7b3bc5ab http.request.method=GET http.request.remoteaddr="10.217.0.2:53206" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="37.241µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:11.521687813+00:00 stderr F time="2025-12-08T17:56:11.521228981Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=60029345-f2c3-48fd-a2f7-2b171e99a170 http.request.method=GET http.request.remoteaddr="10.217.0.2:41092" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="35.941µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:13.270232073+00:00 stderr F time="2025-12-08T17:56:13.269329108Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=b9f6b681-6f88-4cf4-9d0d-5760ef146f48 http.request.method=GET http.request.remoteaddr="10.217.0.2:41102" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="48.941µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:21.521943858+00:00 stderr F time="2025-12-08T17:56:21.520607511Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=55e455e8-700d-451c-8615-69172d6d5af4 http.request.method=GET http.request.remoteaddr="10.217.0.2:33214" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="67.802µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:23.265909602+00:00 stderr F time="2025-12-08T17:56:23.265803349Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=bf26179a-b201-416e-8601-cfa8a4a5c86c http.request.method=GET http.request.remoteaddr="10.217.0.2:33220" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="33.241µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:31.520191563+00:00 stderr F time="2025-12-08T17:56:31.519414522Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=f285a51d-d557-4268-966a-830f1a1658fb http.request.method=GET http.request.remoteaddr="10.217.0.2:40816" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="67.221µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:33.266983242+00:00 stderr F time="2025-12-08T17:56:33.266075268Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=34bffb9d-6fc9-4215-8b88-678f9643bbc6 http.request.method=GET http.request.remoteaddr="10.217.0.2:40832" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="39.191µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:41.518508578+00:00 stderr F time="2025-12-08T17:56:41.517704076Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=2a271e1d-6b50-4dc8-9993-88b1e7dd6b6a http.request.method=GET http.request.remoteaddr="10.217.0.2:45840" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="49.082µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:43.270257296+00:00 stderr F time="2025-12-08T17:56:43.269650601Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=d89443e6-dc5a-41b8-b052-3e17af481386 http.request.method=GET http.request.remoteaddr="10.217.0.2:45854" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="59.662µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:51.526616478+00:00 stderr F time="2025-12-08T17:56:51.525993212Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=1d2179b7-57ab-4fcb-b740-85404b2a3816 http.request.method=GET http.request.remoteaddr="10.217.0.2:51662" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="69.412µs" http.response.status=200 http.response.written=0 2025-12-08T17:56:53.265794589+00:00 stderr F time="2025-12-08T17:56:53.265704087Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=33c87ad2-a417-4732-8ed6-5ac77bc478fe http.request.method=GET http.request.remoteaddr="10.217.0.2:51668" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="45.091µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:01.517555542+00:00 stderr F time="2025-12-08T17:57:01.517001227Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=711c8322-811d-43ca-8fd9-0d724ec15309 http.request.method=GET http.request.remoteaddr="10.217.0.2:46718" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="101.773µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:03.267259568+00:00 stderr F time="2025-12-08T17:57:03.266986631Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=143f361c-7cee-4252-9e3c-94b7c5144ba9 http.request.method=GET http.request.remoteaddr="10.217.0.2:46720" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="59.562µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:11.517396687+00:00 stderr F time="2025-12-08T17:57:11.516948676Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=0fffaf49-a956-4402-bd50-3a152c58aa8b http.request.method=GET http.request.remoteaddr="10.217.0.2:59616" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="76.252µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:13.266613021+00:00 stderr F time="2025-12-08T17:57:13.266490048Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=527ce7f7-5c6b-4a10-80df-5d98ea31971a http.request.method=GET http.request.remoteaddr="10.217.0.2:59624" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="54.812µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:21.520498129+00:00 stderr F time="2025-12-08T17:57:21.519806321Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=5207f6f6-4ac1-4ba3-8136-fcc3e766b18a http.request.method=GET http.request.remoteaddr="10.217.0.2:36828" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="42.841µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:23.266743374+00:00 stderr F time="2025-12-08T17:57:23.26621883Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=6c29e253-aefb-4130-abc6-bb2c27286382 http.request.method=GET http.request.remoteaddr="10.217.0.2:36832" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="53.971µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:31.518180628+00:00 stderr F time="2025-12-08T17:57:31.517511041Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=85beef27-806f-4ad5-bb1d-51649844788d http.request.method=GET http.request.remoteaddr="10.217.0.2:51246" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="55.361µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:33.266711276+00:00 stderr F time="2025-12-08T17:57:33.266610864Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=52ab6eb6-4706-43ac-a32e-8613643a9f89 http.request.method=GET http.request.remoteaddr="10.217.0.2:51262" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="47.411µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:41.518827108+00:00 stderr F time="2025-12-08T17:57:41.518264284Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=04a9664c-855e-4e17-b139-8cde14825c05 http.request.method=GET http.request.remoteaddr="10.217.0.2:59900" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="33.68µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:43.267082633+00:00 stderr F time="2025-12-08T17:57:43.266412876Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=eee00ae6-1b08-48f4-a5c8-5e9488e9e341 http.request.method=GET http.request.remoteaddr="10.217.0.2:59906" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="56.721µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:51.518351172+00:00 stderr F time="2025-12-08T17:57:51.517714736Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=731db593-b591-420e-926f-463781b53f60 http.request.method=GET http.request.remoteaddr="10.217.0.2:50642" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="38.131µs" http.response.status=200 http.response.written=0 2025-12-08T17:57:53.274164612+00:00 stderr F time="2025-12-08T17:57:53.266344299Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=8dd29a12-1538-47e7-9671-7795525ba3df http.request.method=GET http.request.remoteaddr="10.217.0.2:50652" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="32.87µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:01.518206825+00:00 stderr F time="2025-12-08T17:58:01.517702322Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=c6d4428d-8fca-4f3f-837b-cbaa16135f1f http.request.method=GET http.request.remoteaddr="10.217.0.2:56758" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="57.651µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:03.266221184+00:00 stderr F time="2025-12-08T17:58:03.2656825Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=0f8f6e88-a936-4b83-b94b-33aa1cea71b7 http.request.method=GET http.request.remoteaddr="10.217.0.2:56770" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="34.951µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:11.519729871+00:00 stderr F time="2025-12-08T17:58:11.518762516Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=223f4ac3-c9d8-48d7-9f01-61f4d7df39b4 http.request.method=GET http.request.remoteaddr="10.217.0.2:47446" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="64.621µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:13.266434796+00:00 stderr F time="2025-12-08T17:58:13.265754458Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=46acdd30-3ed2-45b3-97c3-4982b69b25d8 http.request.method=GET http.request.remoteaddr="10.217.0.2:47456" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="80.032µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:21.522726965+00:00 stderr F time="2025-12-08T17:58:21.522178171Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=dc2a8ddc-1667-4c3d-9783-4d2915fc4e93 http.request.method=GET http.request.remoteaddr="10.217.0.2:59346" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="57.921µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:23.267665384+00:00 stderr F time="2025-12-08T17:58:23.26711895Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=c092e27f-bd7e-426f-a99f-b5f131ef6391 http.request.method=GET http.request.remoteaddr="10.217.0.2:59358" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="34.491µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:31.519556319+00:00 stderr F time="2025-12-08T17:58:31.518786479Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=8dac43ac-c6f6-4093-8081-277f742cf635 http.request.method=GET http.request.remoteaddr="10.217.0.2:46126" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="40.291µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:33.266275404+00:00 stderr F time="2025-12-08T17:58:33.266185482Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=671a2a7e-bee8-4d6f-9ac3-df3b2d7f3e0f http.request.method=GET http.request.remoteaddr="10.217.0.2:46134" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="77.302µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:41.518336213+00:00 stderr F time="2025-12-08T17:58:41.517687855Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=0cf7a9d1-2fc6-4d36-be6f-1fdbbf650f5a http.request.method=GET http.request.remoteaddr="10.217.0.2:33106" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="41.171µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:43.267508103+00:00 stderr F time="2025-12-08T17:58:43.266907538Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=e1c59a37-6fd1-48ee-891a-baa44408f306 http.request.method=GET http.request.remoteaddr="10.217.0.2:33116" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="124.953µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:51.518158024+00:00 stderr F time="2025-12-08T17:58:51.5176162Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=fb8d62ae-0d2b-4c9c-bce0-97c0dc4871bc http.request.method=GET http.request.remoteaddr="10.217.0.2:43310" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="64.191µs" http.response.status=200 http.response.written=0 2025-12-08T17:58:53.267282803+00:00 stderr F time="2025-12-08T17:58:53.266696497Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=cfc04740-65a2-45f8-8de0-54e03d239524 http.request.method=GET http.request.remoteaddr="10.217.0.2:43314" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="46.311µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:01.519344241+00:00 stderr F time="2025-12-08T17:59:01.518790946Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=5354a845-6dfc-42cc-9262-4862b19a917d http.request.method=GET http.request.remoteaddr="10.217.0.2:60948" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="59.321µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:03.265974864+00:00 stderr F time="2025-12-08T17:59:03.265903323Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=93706710-0844-4bf2-8927-4a499d0ade08 http.request.method=GET http.request.remoteaddr="10.217.0.2:60964" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="114.523µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:11.518026542+00:00 stderr F time="2025-12-08T17:59:11.517540839Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=76b4da2c-2003-4582-a781-28d5090436c9 http.request.method=GET http.request.remoteaddr="10.217.0.2:35138" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="38.951µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:13.266064093+00:00 stderr F time="2025-12-08T17:59:13.265535258Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=2a82ea2a-08c2-4099-a51f-eeb4280c6de5 http.request.method=GET http.request.remoteaddr="10.217.0.2:35154" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="33.421µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:21.517566935+00:00 stderr F time="2025-12-08T17:59:21.51699531Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9ea68e2c-b479-4643-a068-8cdabb5755c1 http.request.method=GET http.request.remoteaddr="10.217.0.2:43460" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="59.601µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:23.265971335+00:00 stderr F time="2025-12-08T17:59:23.265278957Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=74ea2959-a417-414f-beac-7cde4a3f3f1d http.request.method=GET http.request.remoteaddr="10.217.0.2:43462" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="75.372µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:31.519028939+00:00 stderr F time="2025-12-08T17:59:31.518448744Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=500e0cf2-4ec4-4c13-abdb-3efae4737b54 http.request.method=GET http.request.remoteaddr="10.217.0.2:40000" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="39.701µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:33.267072510+00:00 stderr F time="2025-12-08T17:59:33.266850354Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=1c076c83-6e91-42e0-9529-f11aca1a2925 http.request.method=GET http.request.remoteaddr="10.217.0.2:40010" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="46.411µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:41.519577290+00:00 stderr F time="2025-12-08T17:59:41.518695247Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=994852e3-b6fc-4716-9193-6504b7f2e16e http.request.method=GET http.request.remoteaddr="10.217.0.2:44650" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="53.852µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:43.267012989+00:00 stderr F time="2025-12-08T17:59:43.266162217Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=d5442248-8cf6-4878-ac26-46e83cd33b31 http.request.method=GET http.request.remoteaddr="10.217.0.2:44666" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="60.261µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:51.519586774+00:00 stderr F time="2025-12-08T17:59:51.518665809Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9e470667-ebde-4dbe-9c63-67c50748b53d http.request.method=GET http.request.remoteaddr="10.217.0.2:59512" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="70.032µs" http.response.status=200 http.response.written=0 2025-12-08T17:59:53.301806472+00:00 stderr F time="2025-12-08T17:59:53.301055013Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=a25ec71c-6c02-4d5c-81d9-fa99f24d7ecd http.request.method=GET http.request.remoteaddr="10.217.0.2:59518" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="83.572µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:01.519697355+00:00 stderr F time="2025-12-08T18:00:01.519147641Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=2acc7fd5-76d3-4989-9a55-b523cbf376b6 http.request.method=GET http.request.remoteaddr="10.217.0.2:52060" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="61.141µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:03.266793200+00:00 stderr F time="2025-12-08T18:00:03.26601873Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=46cbd20d-63f1-4a71-a967-373ec8be19fa http.request.method=GET http.request.remoteaddr="10.217.0.2:52068" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="38.011µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:11.520993468+00:00 stderr F time="2025-12-08T18:00:11.520294399Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=6d09d491-c247-4bc6-83b2-7216cc587ddb http.request.method=GET http.request.remoteaddr="10.217.0.2:42310" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="99.643µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:13.268725249+00:00 stderr F time="2025-12-08T18:00:13.268578555Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=51a08010-cf81-4932-91bf-e58b50659d75 http.request.method=GET http.request.remoteaddr="10.217.0.2:42324" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="67.982µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:21.520693636+00:00 stderr F time="2025-12-08T18:00:21.519944437Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9394a9e0-b609-4658-8757-da94a2af82f1 http.request.method=GET http.request.remoteaddr="10.217.0.2:40454" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="93.283µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:23.310931105+00:00 stderr F time="2025-12-08T18:00:23.266186779Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=8b340d16-c447-4662-a525-789763d3400e http.request.method=GET http.request.remoteaddr="10.217.0.2:40470" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="62.951µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:31.518686829+00:00 stderr F time="2025-12-08T18:00:31.518171175Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=8f80eddc-58f8-430c-a2d9-658f18010d68 http.request.method=GET http.request.remoteaddr="10.217.0.2:42818" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="48.511µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:33.270854958+00:00 stderr F time="2025-12-08T18:00:33.270695402Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=4128b74e-fbfb-439e-838a-6889d10853f3 http.request.method=GET http.request.remoteaddr="10.217.0.2:42824" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="66.752µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:41.518699097+00:00 stderr F time="2025-12-08T18:00:41.517960207Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=0cd79c71-9fc7-4aba-b655-99c38ff5f750 http.request.method=GET http.request.remoteaddr="10.217.0.2:60398" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="125.283µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:43.269337985+00:00 stderr F time="2025-12-08T18:00:43.268747678Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=d3f87285-1b55-4ce8-bf0e-891b8b4e9094 http.request.method=GET http.request.remoteaddr="10.217.0.2:60404" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="47.761µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:51.520325461+00:00 stderr F time="2025-12-08T18:00:51.519593282Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=7cf12a20-efa1-45ed-893b-5030fd9bf59d http.request.method=GET http.request.remoteaddr="10.217.0.2:53250" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="76.022µs" http.response.status=200 http.response.written=0 2025-12-08T18:00:53.265794812+00:00 stderr F time="2025-12-08T18:00:53.26570068Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=18dede62-15ef-4a5d-b6e3-8b1664d90d78 http.request.method=GET http.request.remoteaddr="10.217.0.2:53254" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="47.411µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:01.518361715+00:00 stderr F time="2025-12-08T18:01:01.517866582Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=1c2ff619-8df3-4793-b503-77d2d75ea2b2 http.request.method=GET http.request.remoteaddr="10.217.0.2:57498" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="51.981µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:03.267528735+00:00 stderr F time="2025-12-08T18:01:03.267004251Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9f664f84-173f-493d-95a9-09006cb9005a http.request.method=GET http.request.remoteaddr="10.217.0.2:57500" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="40.831µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:11.518467274+00:00 stderr F time="2025-12-08T18:01:11.51796424Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=db33318b-03ba-4bc9-9190-854e449a512f http.request.method=GET http.request.remoteaddr="10.217.0.2:39778" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="61.021µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:13.272695899+00:00 stderr F time="2025-12-08T18:01:13.272126344Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=6886da57-88eb-46d0-941b-2127c3381611 http.request.method=GET http.request.remoteaddr="10.217.0.2:39790" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="83.182µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:21.520596176+00:00 stderr F time="2025-12-08T18:01:21.520010271Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=e576687a-1d16-4f97-a6e3-f88a3bd28e11 http.request.method=GET http.request.remoteaddr="10.217.0.2:50392" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="83.293µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:23.269143190+00:00 stderr F time="2025-12-08T18:01:23.269004037Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=ff6c37cd-b6ce-454c-a1ce-ddac925de5e8 http.request.method=GET http.request.remoteaddr="10.217.0.2:50406" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="178.674µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:31.518213168+00:00 stderr F time="2025-12-08T18:01:31.517472088Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=6e24f91f-f92b-422e-a2c8-eb7dd6f3e41f http.request.method=GET http.request.remoteaddr="10.217.0.2:58468" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="47.911µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:33.266918584+00:00 stderr F time="2025-12-08T18:01:33.266446732Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=18e877cd-f974-46db-866a-d51af708d846 http.request.method=GET http.request.remoteaddr="10.217.0.2:58484" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="61.902µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:41.519806546+00:00 stderr F time="2025-12-08T18:01:41.518955294Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=07139fb8-1fbf-4aed-ac20-5fc975c75ac1 http.request.method=GET http.request.remoteaddr="10.217.0.2:44060" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="125.553µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:43.270562019+00:00 stderr F time="2025-12-08T18:01:43.269568433Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=7abd50ec-fe6d-4fdc-957b-769e19d0d1c2 http.request.method=GET http.request.remoteaddr="10.217.0.2:44064" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="75.162µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:51.521550157+00:00 stderr F time="2025-12-08T18:01:51.520113509Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=02166e63-2d4d-4faa-94f1-4d1386a3c552 http.request.method=GET http.request.remoteaddr="10.217.0.2:42484" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="101.703µs" http.response.status=200 http.response.written=0 2025-12-08T18:01:53.266678942+00:00 stderr F time="2025-12-08T18:01:53.266492148Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=948052e7-6bdf-4dcf-974b-ebbbb284770c http.request.method=GET http.request.remoteaddr="10.217.0.2:42492" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="75.812µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:01.520714234+00:00 stderr F time="2025-12-08T18:02:01.520033386Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=994fe9de-5cc9-4f3f-a561-171dbea3a56a http.request.method=GET http.request.remoteaddr="10.217.0.2:37724" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="43.751µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:03.266753483+00:00 stderr F time="2025-12-08T18:02:03.26623146Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=f2abb267-ccba-433d-90da-6cb9438b9b58 http.request.method=GET http.request.remoteaddr="10.217.0.2:37732" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="36.111µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:11.519801197+00:00 stderr F time="2025-12-08T18:02:11.519369356Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=4652b85c-93fb-4883-85b1-fe6674347f4d http.request.method=GET http.request.remoteaddr="10.217.0.2:57766" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="55.352µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:13.267330068+00:00 stderr F time="2025-12-08T18:02:13.267223635Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=eb3be022-a687-480e-bf30-d8f05a1ac4a2 http.request.method=GET http.request.remoteaddr="10.217.0.2:57770" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="140.574µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:21.519764879+00:00 stderr F time="2025-12-08T18:02:21.519093982Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=2c66bdb4-942e-4ec9-a512-b18036038798 http.request.method=GET http.request.remoteaddr="10.217.0.2:33984" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="84.352µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:23.266539878+00:00 stderr F time="2025-12-08T18:02:23.265794069Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=496577ce-f6b3-4cb0-bd29-b671e6a2398e http.request.method=GET http.request.remoteaddr="10.217.0.2:33994" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="37.431µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:31.519607704+00:00 stderr F time="2025-12-08T18:02:31.51908912Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=b86236aa-c175-49a8-8368-04ebdcf61248 http.request.method=GET http.request.remoteaddr="10.217.0.2:45774" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="45.671µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:33.269654851+00:00 stderr F time="2025-12-08T18:02:33.269471356Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=1e9ebd04-8ee4-4098-acf4-2ebd63edc2d0 http.request.method=GET http.request.remoteaddr="10.217.0.2:45782" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="78.132µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:41.521281008+00:00 stderr F time="2025-12-08T18:02:41.520603029Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=b022cf64-29de-416e-86b5-10b749a3f9d6 http.request.method=GET http.request.remoteaddr="10.217.0.2:55780" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="55.741µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:43.266488665+00:00 stderr F time="2025-12-08T18:02:43.265807606Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=d86a0f5a-d545-412f-9782-f5e5201c020d http.request.method=GET http.request.remoteaddr="10.217.0.2:55792" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="54.551µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:51.522053906+00:00 stderr F time="2025-12-08T18:02:51.518607734Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=68124ada-18c4-4b49-b923-1838d65f0fa7 http.request.method=GET http.request.remoteaddr="10.217.0.2:53742" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="44.991µs" http.response.status=200 http.response.written=0 2025-12-08T18:02:53.266676018+00:00 stderr F time="2025-12-08T18:02:53.266591965Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=7b62a57b-af9e-4551-8cca-8aa5d8f78921 http.request.method=GET http.request.remoteaddr="10.217.0.2:53744" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="79.242µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:01.518943307+00:00 stderr F time="2025-12-08T18:03:01.51828915Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=fe286676-847a-48ef-8902-1cac9e400e4e http.request.method=GET http.request.remoteaddr="10.217.0.2:40970" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="83.632µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:03.270976612+00:00 stderr F time="2025-12-08T18:03:03.270603281Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=06f2b498-05d7-45dc-a404-274bdb90bf4e http.request.method=GET http.request.remoteaddr="10.217.0.2:40974" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="42.581µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:11.519724791+00:00 stderr F time="2025-12-08T18:03:11.519121146Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=ea018000-7ac9-474e-8b43-b78d78e370d7 http.request.method=GET http.request.remoteaddr="10.217.0.2:44760" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="52.261µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:13.266429453+00:00 stderr F time="2025-12-08T18:03:13.26596176Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=aba507e1-6164-4541-9725-f03357b727fb http.request.method=GET http.request.remoteaddr="10.217.0.2:44770" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="38.251µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:21.522965471+00:00 stderr F time="2025-12-08T18:03:21.519724185Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=cbff3e10-fbf7-4b45-a7ba-f42ea752cff5 http.request.method=GET http.request.remoteaddr="10.217.0.2:50788" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="37.341µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:23.267770352+00:00 stderr F time="2025-12-08T18:03:23.267138185Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=8134fce8-2167-46a0-b132-4d8a5239e5e5 http.request.method=GET http.request.remoteaddr="10.217.0.2:50794" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="88.614µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:31.517908279+00:00 stderr F time="2025-12-08T18:03:31.517156448Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=57995dd3-5706-4bdf-85d8-dfb8dbcb565f http.request.method=GET http.request.remoteaddr="10.217.0.2:58788" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="82.482µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:33.266500160+00:00 stderr F time="2025-12-08T18:03:33.266427318Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=35f68aad-9d5a-456b-ac83-cf43f94546a6 http.request.method=GET http.request.remoteaddr="10.217.0.2:58796" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="51.461µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:41.518715153+00:00 stderr F time="2025-12-08T18:03:41.51823921Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9fc7684a-fd0f-4a24-b7aa-b9ce15f463f4 http.request.method=GET http.request.remoteaddr="10.217.0.2:34030" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="39.081µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:43.266341789+00:00 stderr F time="2025-12-08T18:03:43.265772613Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=5932c841-2311-482a-b916-6a6b1f7dd301 http.request.method=GET http.request.remoteaddr="10.217.0.2:34038" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="47.192µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:51.519253770+00:00 stderr F time="2025-12-08T18:03:51.51891605Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9eebf3e3-9bc2-442a-9880-1544df3e1388 http.request.method=GET http.request.remoteaddr="10.217.0.2:50014" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="83.492µs" http.response.status=200 http.response.written=0 2025-12-08T18:03:53.266615448+00:00 stderr F time="2025-12-08T18:03:53.265853398Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=b6b94fd0-ec99-4d8f-99b1-729bc74fc174 http.request.method=GET http.request.remoteaddr="10.217.0.2:50024" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="57.482µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:01.518290794+00:00 stderr F time="2025-12-08T18:04:01.517642287Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=d481ef20-f03d-498c-a0d0-6b2651506b03 http.request.method=GET http.request.remoteaddr="10.217.0.2:42202" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="39.391µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:03.266421275+00:00 stderr F time="2025-12-08T18:04:03.266317642Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=dc85fbbd-7027-4a70-997e-2c56b5029208 http.request.method=GET http.request.remoteaddr="10.217.0.2:42216" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="56.862µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:11.520219087+00:00 stderr F time="2025-12-08T18:04:11.519456257Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=2fb4dfc6-8131-49b0-adee-e12551ef5143 http.request.method=GET http.request.remoteaddr="10.217.0.2:49224" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="60.452µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:13.271209844+00:00 stderr F time="2025-12-08T18:04:13.270234118Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=78cbf00b-55a5-46ef-9a16-fd3ff1845ab5 http.request.method=GET http.request.remoteaddr="10.217.0.2:49236" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="77.992µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:21.519105811+00:00 stderr F time="2025-12-08T18:04:21.518547555Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=a93c8ea4-5419-4824-8b74-0855be20b792 http.request.method=GET http.request.remoteaddr="10.217.0.2:44312" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="43.451µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:23.267089377+00:00 stderr F time="2025-12-08T18:04:23.267012475Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=ea22faa1-94f4-4f8b-9f8f-fced79831383 http.request.method=GET http.request.remoteaddr="10.217.0.2:44328" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="42.681µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:31.524221858+00:00 stderr F time="2025-12-08T18:04:31.523588642Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=cd4fee54-ab13-4a00-94d3-463625796060 http.request.method=GET http.request.remoteaddr="10.217.0.2:33396" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="34.93µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:33.269773991+00:00 stderr F time="2025-12-08T18:04:33.269344209Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=c1ef304a-5f85-4e59-84bc-c5be36faae3a http.request.method=GET http.request.remoteaddr="10.217.0.2:33404" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="81.652µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:41.521612882+00:00 stderr F time="2025-12-08T18:04:41.52083312Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=9179bbe4-ba4c-4e6e-a552-db6dfd89936f http.request.method=GET http.request.remoteaddr="10.217.0.2:34562" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="60.242µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:43.265967752+00:00 stderr F time="2025-12-08T18:04:43.265815508Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=45a4c56c-0f3c-4815-97e1-b25b63441b62 http.request.method=GET http.request.remoteaddr="10.217.0.2:34576" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="34.301µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:51.523171694+00:00 stderr F time="2025-12-08T18:04:51.52261927Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=bb923eb8-10ad-45c2-9ac1-0f124223a8a6 http.request.method=GET http.request.remoteaddr="10.217.0.2:39034" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="92.973µs" http.response.status=200 http.response.written=0 2025-12-08T18:04:52.291961342+00:00 stderr F time="2025-12-08T18:04:52.291802918Z" level=info msg="PurgeUploads starting: olderThan=2025-12-01 18:04:52.291718666 +0000 UTC m=-604199.941990292, actuallyDelete=true" 2025-12-08T18:04:52.292253600+00:00 stderr F time="2025-12-08T18:04:52.292215389Z" level=info msg="Purge uploads finished. Num deleted=0, num errors=0" 2025-12-08T18:04:52.292253600+00:00 stderr F time="2025-12-08T18:04:52.29223534Z" level=info msg="Starting upload purge in 24h0m0s" go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" 2025-12-08T18:04:53.268725327+00:00 stderr F time="2025-12-08T18:04:53.267936296Z" level=info msg=response go.version="go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime" http.request.host="10.217.0.11:5000" http.request.id=6980c885-710c-46df-bcdf-7dca4c6085b2 http.request.method=GET http.request.remoteaddr="10.217.0.2:39038" http.request.uri=/healthz http.request.useragent=kube-probe/1.33 http.response.duration="78.002µs" http.response.status=200 http.response.written=0 ././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000755000175000017500000000000015115611513033126 5ustar zuulzuul././@LongLink0000644000000000000000000000027200000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000755000175000017500000000000015115611520033124 5ustar zuulzuul././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000644000175000017500000000000015115611513033116 0ustar zuulzuul././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000755000175000017500000000000015115611520033124 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000644000175000017500000002444315115611513033137 0ustar zuulzuul2025-12-08T17:55:58.111597444+00:00 stdout F Starting init script 2025-12-08T17:55:58.116710524+00:00 stdout F Copying /usr/share/elasticsearch/config/* to /mnt/elastic-internal/elasticsearch-config-local/ 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/elasticsearch-plugins.example.yml' -> '/mnt/elastic-internal/elasticsearch-config-local/elasticsearch-plugins.example.yml' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/elasticsearch.yml' -> '/mnt/elastic-internal/elasticsearch-config-local/elasticsearch.yml' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/..2025_12_08_17_55_42.607352561' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/..2025_12_08_17_55_42.607352561' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/..2025_12_08_17_55_42.607352561/tls.crt' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/..2025_12_08_17_55_42.607352561/tls.crt' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/..2025_12_08_17_55_42.607352561/tls.key' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/..2025_12_08_17_55_42.607352561/tls.key' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/..2025_12_08_17_55_42.607352561/ca.crt' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/..2025_12_08_17_55_42.607352561/ca.crt' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/..data' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/..data' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/ca.crt' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/ca.crt' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/tls.crt' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/tls.crt' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/http-certs/tls.key' -> '/mnt/elastic-internal/elasticsearch-config-local/http-certs/tls.key' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/jvm.options' -> '/mnt/elastic-internal/elasticsearch-config-local/jvm.options' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/jvm.options.d' -> '/mnt/elastic-internal/elasticsearch-config-local/jvm.options.d' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/log4j2.file.properties' -> '/mnt/elastic-internal/elasticsearch-config-local/log4j2.file.properties' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/log4j2.properties' -> '/mnt/elastic-internal/elasticsearch-config-local/log4j2.properties' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/role_mapping.yml' -> '/mnt/elastic-internal/elasticsearch-config-local/role_mapping.yml' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/roles.yml' -> '/mnt/elastic-internal/elasticsearch-config-local/roles.yml' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/transport-remote-certs' -> '/mnt/elastic-internal/elasticsearch-config-local/transport-remote-certs' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/transport-remote-certs/..2025_12_08_17_55_42.2284019893' -> '/mnt/elastic-internal/elasticsearch-config-local/transport-remote-certs/..2025_12_08_17_55_42.2284019893' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/transport-remote-certs/..2025_12_08_17_55_42.2284019893/ca.crt' -> '/mnt/elastic-internal/elasticsearch-config-local/transport-remote-certs/..2025_12_08_17_55_42.2284019893/ca.crt' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/transport-remote-certs/..data' -> '/mnt/elastic-internal/elasticsearch-config-local/transport-remote-certs/..data' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/transport-remote-certs/ca.crt' -> '/mnt/elastic-internal/elasticsearch-config-local/transport-remote-certs/ca.crt' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/users' -> '/mnt/elastic-internal/elasticsearch-config-local/users' 2025-12-08T17:55:58.214417835+00:00 stdout F '/usr/share/elasticsearch/config/users_roles' -> '/mnt/elastic-internal/elasticsearch-config-local/users_roles' 2025-12-08T17:55:58.314082229+00:00 stdout F Empty dir /usr/share/elasticsearch/plugins 2025-12-08T17:55:58.318011698+00:00 stdout F Copying /usr/share/elasticsearch/bin/* to /mnt/elastic-internal/elasticsearch-bin-local/ 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-certgen' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-certgen' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-certutil' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-certutil' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-cli' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-cli' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-croneval' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-croneval' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-env' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-env' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-env-from-file' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-env-from-file' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-geoip' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-geoip' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-keystore' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-keystore' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-migrate' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-migrate' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-node' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-node' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-plugin' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-plugin' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-saml-metadata' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-saml-metadata' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-service-tokens' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-service-tokens' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-setup-passwords' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-setup-passwords' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-shard' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-shard' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-sql-cli' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-sql-cli' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-sql-cli-7.17.20.jar' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-sql-cli-7.17.20.jar' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-syskeygen' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-syskeygen' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/elasticsearch-users' -> '/mnt/elastic-internal/elasticsearch-bin-local/elasticsearch-users' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/x-pack-env' -> '/mnt/elastic-internal/elasticsearch-bin-local/x-pack-env' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/x-pack-security-env' -> '/mnt/elastic-internal/elasticsearch-bin-local/x-pack-security-env' 2025-12-08T17:55:58.509603665+00:00 stdout F '/usr/share/elasticsearch/bin/x-pack-watcher-env' -> '/mnt/elastic-internal/elasticsearch-bin-local/x-pack-watcher-env' 2025-12-08T17:55:58.515614790+00:00 stdout F Files copy duration: 0 sec. 2025-12-08T17:55:58.611284195+00:00 stdout F Linking /mnt/elastic-internal/xpack-file-realm/users to /mnt/elastic-internal/elasticsearch-config-local/users 2025-12-08T17:55:58.614232425+00:00 stdout F Linking /mnt/elastic-internal/xpack-file-realm/roles.yml to /mnt/elastic-internal/elasticsearch-config-local/roles.yml 2025-12-08T17:55:58.616821216+00:00 stdout F Linking /mnt/elastic-internal/xpack-file-realm/users_roles to /mnt/elastic-internal/elasticsearch-config-local/users_roles 2025-12-08T17:55:58.619903081+00:00 stdout F Linking /mnt/elastic-internal/elasticsearch-config/elasticsearch.yml to /mnt/elastic-internal/elasticsearch-config-local/elasticsearch.yml 2025-12-08T17:55:58.709715805+00:00 stdout F Linking /mnt/elastic-internal/unicast-hosts/unicast_hosts.txt to /mnt/elastic-internal/elasticsearch-config-local/unicast_hosts.txt 2025-12-08T17:55:58.712230084+00:00 stdout F Linking /mnt/elastic-internal/xpack-file-realm/service_tokens to /mnt/elastic-internal/elasticsearch-config-local/service_tokens 2025-12-08T17:55:58.812049754+00:00 stdout F File linking duration: 0 sec. 2025-12-08T17:55:58.917086586+00:00 stdout F chown duration: 0 sec. 2025-12-08T17:55:58.917228609+00:00 stdout F waiting for the transport certificates (/mnt/elastic-internal/transport-certificates/elasticsearch-es-default-0.tls.key or /mnt/elastic-internal/transport-certificates/transport.certs.disabled) 2025-12-08T17:55:59.015608190+00:00 stdout F wait duration: 0 sec. 2025-12-08T17:55:59.015649441+00:00 stdout F Init script successful 2025-12-08T17:55:59.112996931+00:00 stdout F Script duration: 1 sec. ././@LongLink0000644000000000000000000000025700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000755000175000017500000000000015115611520033124 5ustar zuulzuul././@LongLink0000644000000000000000000000026400000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elas0000644000175000017500000015071715115611513033143 0ustar zuulzuul2025-12-08T17:56:03.753591599+00:00 stderr F Dec 08, 2025 5:56:03 PM sun.util.locale.provider.LocaleProviderAdapter 2025-12-08T17:56:03.753591599+00:00 stderr F WARNING: COMPAT locale provider will be removed in a future release 2025-12-08T17:56:04.622768069+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:04,619Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "version[7.17.20], pid[2], build[default/docker/b26557f585b7d95c71a5549e571a6bcd2667697d/2024-04-08T08:34:31.070382898Z], OS[Linux/5.14.0-570.57.1.el9_6.x86_64/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/21.0.2/21.0.2+13-58]" } 2025-12-08T17:56:04.623044736+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:04,622Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "JVM home [/usr/share/elasticsearch/jdk], using bundled JDK [true]" } 2025-12-08T17:56:04.623451457+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:04,623Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "JVM arguments [-Xshare:auto, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -XX:+ShowCodeDetailsInExceptionMessages, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dio.netty.allocator.numDirectArenas=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j2.formatMsgNoLookups=true, -Djava.locale.providers=SPI,COMPAT, --add-opens=java.base/java.io=ALL-UNNAMED, -Djava.security.manager=allow, -XX:+UseG1GC, -Djava.io.tmpdir=/tmp/elasticsearch-10019345711372005210, -XX:+HeapDumpOnOutOfMemoryError, -XX:+ExitOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Des.cgroups.hierarchy.override=/, -Xms1024m, -Xmx1024m, -XX:MaxDirectMemorySize=536870912, -XX:G1HeapRegionSize=4m, -XX:InitiatingHeapOccupancyPercent=30, -XX:G1ReservePercent=15, -Des.path.home=/usr/share/elasticsearch, -Des.path.conf=/usr/share/elasticsearch/config, -Des.distribution.flavor=default, -Des.distribution.type=docker, -Des.bundled_jdk=true]" } 2025-12-08T17:56:06.829926663+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,829Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [aggs-matrix-stats]" } 2025-12-08T17:56:06.830038706+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,829Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [analysis-common]" } 2025-12-08T17:56:06.830173239+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [constant-keyword]" } 2025-12-08T17:56:06.830292343+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [frozen-indices]" } 2025-12-08T17:56:06.830410816+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [ingest-common]" } 2025-12-08T17:56:06.830595261+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [ingest-geoip]" } 2025-12-08T17:56:06.830717374+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [ingest-user-agent]" } 2025-12-08T17:56:06.830838787+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [kibana]" } 2025-12-08T17:56:06.830978331+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [lang-expression]" } 2025-12-08T17:56:06.831106695+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,830Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [lang-mustache]" } 2025-12-08T17:56:06.831234949+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [lang-painless]" } 2025-12-08T17:56:06.831358082+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [legacy-geo]" } 2025-12-08T17:56:06.831491346+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [mapper-extras]" } 2025-12-08T17:56:06.831610199+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [mapper-version]" } 2025-12-08T17:56:06.831736853+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [parent-join]" } 2025-12-08T17:56:06.831851916+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [percolator]" } 2025-12-08T17:56:06.831990330+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [rank-eval]" } 2025-12-08T17:56:06.832109383+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,831Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [reindex]" } 2025-12-08T17:56:06.832224166+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [repositories-metering-api]" } 2025-12-08T17:56:06.832342379+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [repository-encrypted]" } 2025-12-08T17:56:06.832472603+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [repository-url]" } 2025-12-08T17:56:06.832616677+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [runtime-fields-common]" } 2025-12-08T17:56:06.832710369+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [search-business-rules]" } 2025-12-08T17:56:06.832829842+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [searchable-snapshots]" } 2025-12-08T17:56:06.832968166+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [snapshot-repo-test-kit]" } 2025-12-08T17:56:06.833088179+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,832Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [spatial]" } 2025-12-08T17:56:06.833200052+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [transform]" } 2025-12-08T17:56:06.833312945+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [transport-netty4]" } 2025-12-08T17:56:06.833442319+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [unsigned-long]" } 2025-12-08T17:56:06.833590443+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [vector-tile]" } 2025-12-08T17:56:06.833719486+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [vectors]" } 2025-12-08T17:56:06.833842670+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [wildcard]" } 2025-12-08T17:56:06.833980584+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-aggregate-metric]" } 2025-12-08T17:56:06.834107098+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,833Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-analytics]" } 2025-12-08T17:56:06.834252482+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,834Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-async]" } 2025-12-08T17:56:06.834395626+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,834Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-async-search]" } 2025-12-08T17:56:06.834588281+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,834Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-autoscaling]" } 2025-12-08T17:56:06.834988652+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,834Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-ccr]" } 2025-12-08T17:56:06.834988652+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,834Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-core]" } 2025-12-08T17:56:06.835246639+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,834Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-data-streams]" } 2025-12-08T17:56:06.835492835+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,835Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-deprecation]" } 2025-12-08T17:56:06.835615949+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,835Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-enrich]" } 2025-12-08T17:56:06.835724022+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,835Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-eql]" } 2025-12-08T17:56:06.835832795+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,835Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-fleet]" } 2025-12-08T17:56:06.835961628+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,835Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-graph]" } 2025-12-08T17:56:06.836079721+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,835Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-identity-provider]" } 2025-12-08T17:56:06.836179504+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-ilm]" } 2025-12-08T17:56:06.836287107+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-logstash]" } 2025-12-08T17:56:06.836404290+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-ml]" } 2025-12-08T17:56:06.836518903+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-monitoring]" } 2025-12-08T17:56:06.836624336+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-ql]" } 2025-12-08T17:56:06.836748829+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-rollup]" } 2025-12-08T17:56:06.836889424+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-security]" } 2025-12-08T17:56:06.837064799+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,836Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-shutdown]" } 2025-12-08T17:56:06.837217553+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,837Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-sql]" } 2025-12-08T17:56:06.837394658+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,837Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-stack]" } 2025-12-08T17:56:06.837541252+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,837Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-text-structure]" } 2025-12-08T17:56:06.837719047+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,837Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-voting-only-node]" } 2025-12-08T17:56:06.837820150+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,837Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "loaded module [x-pack-watcher]" } 2025-12-08T17:56:06.838413886+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,838Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "no plugins loaded" } 2025-12-08T17:56:06.864187933+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,863Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/vda4)]], net usable_space [53.5gb], net total_space [79.4gb], types [xfs]" } 2025-12-08T17:56:06.864462930+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,864Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "heap size [1gb], compressed ordinary object pointers [true]" } 2025-12-08T17:56:06.880720396+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:06,880Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "node name [elasticsearch-es-default-0], node ID [yZYce4ovTba2d9ExcpMq2Q], cluster name [elasticsearch], roles [master, data, ingest]" } 2025-12-08T17:56:09.177965602+00:00 stdout F {"type": "deprecation.elasticsearch", "timestamp": "2025-12-08T17:56:09,176Z", "level": "CRITICAL", "component": "o.e.d.c.r.OperationRouting", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "searches will not be routed based on awareness attributes starting in version 8.0.0; to opt into this behaviour now please set the system property [es.search.ignore_awareness_attributes] to [true]", "key": "searches_not_routed_on_awareness_attributes", "category": "settings" } 2025-12-08T17:56:11.518092525+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:11,517Z", "level": "INFO", "component": "o.e.x.m.p.l.CppLogMessageHandler", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[controller/206] [Main.cc@122] controller (64 bit): Version 7.17.20 (Build 7a252d9f420169) Copyright (c) 2024 Elasticsearch BV" } 2025-12-08T17:56:11.913587317+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:11,913Z", "level": "INFO", "component": "o.e.x.s.a.Realms", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "license mode is [trial], currently licensed security realms are [reserved/reserved,file/file1,native/native1]" } 2025-12-08T17:56:12.211946824+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:12,210Z", "level": "INFO", "component": "o.e.x.s.a.s.FileRolesStore", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "parsed [55] roles from file [/usr/share/elasticsearch/config/roles.yml]" } 2025-12-08T17:56:12.602675386+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:12,602Z", "level": "INFO", "component": "o.e.i.g.ConfigDatabases", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "initialized default databases [[GeoLite2-Country.mmdb, GeoLite2-City.mmdb, GeoLite2-ASN.mmdb]], config databases [[]] and watching [/usr/share/elasticsearch/config/ingest-geoip] for changes" } 2025-12-08T17:56:12.603627921+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:12,603Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "initialized database registry, using geoip-databases directory [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q]" } 2025-12-08T17:56:13.062311048+00:00 stderr F {"timestamp": "2025-12-08T17:56:13+00:00", "message": "readiness probe failed", "curl_rc": "7"} 2025-12-08T17:56:13.119400664+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:13,119Z", "level": "INFO", "component": "o.e.t.NettyAllocator", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "creating NettyAllocator with the following configs: [name=unpooled, suggested_max_allocation_size=1mb, factors={es.unsafe.use_unpooled_allocator=null, g1gc_enabled=true, g1gc_region_size=4mb, heap_size=1gb}]" } 2025-12-08T17:56:13.144540694+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:13,144Z", "level": "INFO", "component": "o.e.i.r.RecoverySettings", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]" } 2025-12-08T17:56:13.174366233+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:13,173Z", "level": "INFO", "component": "o.e.d.DiscoveryModule", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "using discovery type [zen] and seed hosts providers [settings, file]" } 2025-12-08T17:56:13.660426059+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:13,659Z", "level": "INFO", "component": "o.e.g.DanglingIndicesState", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "gateway.auto_import_dangling_indices is disabled, dangling indices will not be automatically detected or imported and must be managed manually" } 2025-12-08T17:56:14.264440424+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,264Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "initialized" } 2025-12-08T17:56:14.264511076+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,264Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "starting ..." } 2025-12-08T17:56:14.273185364+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,272Z", "level": "INFO", "component": "o.e.x.s.c.f.PersistentCache", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "persistent cache index loaded" } 2025-12-08T17:56:14.273937084+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,273Z", "level": "INFO", "component": "o.e.x.d.l.DeprecationIndexingComponent", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "deprecation component started" } 2025-12-08T17:56:14.372254192+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,371Z", "level": "INFO", "component": "o.e.t.TransportService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "publish_address {10.217.0.53:9300}, bound_addresses {[::]:9300}" } 2025-12-08T17:56:14.380326943+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,380Z", "level": "INFO", "component": "o.e.x.m.Monitoring", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "creating template [.monitoring-alerts-7] with version [7]" } 2025-12-08T17:56:14.385142866+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,384Z", "level": "INFO", "component": "o.e.x.m.Monitoring", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "creating template [.monitoring-es] with version [7]" } 2025-12-08T17:56:14.386360359+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,386Z", "level": "INFO", "component": "o.e.x.m.Monitoring", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "creating template [.monitoring-kibana] with version [7]" } 2025-12-08T17:56:14.388586930+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,388Z", "level": "INFO", "component": "o.e.x.m.Monitoring", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "creating template [.monitoring-logstash] with version [7]" } 2025-12-08T17:56:14.391982433+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,391Z", "level": "INFO", "component": "o.e.x.m.Monitoring", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "creating template [.monitoring-beats] with version [7]" } 2025-12-08T17:56:14.492412989+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,492Z", "level": "INFO", "component": "o.e.b.BootstrapChecks", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "bound or publishing to a non-loopback address, enforcing bootstrap checks" } 2025-12-08T17:56:14.509366124+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,509Z", "level": "INFO", "component": "o.e.c.c.Coordinator", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "setting initial configuration to VotingConfiguration{yZYce4ovTba2d9ExcpMq2Q}" } 2025-12-08T17:56:14.672575223+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,671Z", "level": "INFO", "component": "o.e.c.s.MasterService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "elected-as-master ([1] nodes joined)[{elasticsearch-es-default-0}{yZYce4ovTba2d9ExcpMq2Q}{eX6Iov0FQoygtDPLsOloJQ}{10.217.0.53}{10.217.0.53:9300}{dim} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_], term: 1, version: 1, delta: master node changed {previous [], current [{elasticsearch-es-default-0}{yZYce4ovTba2d9ExcpMq2Q}{eX6Iov0FQoygtDPLsOloJQ}{10.217.0.53}{10.217.0.53:9300}{dim}]}" } 2025-12-08T17:56:14.753815232+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,753Z", "level": "INFO", "component": "o.e.c.c.CoordinationState", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "cluster UUID set to [BHO8BtUVT8i1ANtvfdqJYw]" } 2025-12-08T17:56:14.786547360+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,786Z", "level": "INFO", "component": "o.e.c.s.ClusterApplierService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "master node changed {previous [], current [{elasticsearch-es-default-0}{yZYce4ovTba2d9ExcpMq2Q}{eX6Iov0FQoygtDPLsOloJQ}{10.217.0.53}{10.217.0.53:9300}{dim}]}, term: 1, version: 1, reason: Publication{term=1, version=1}" } 2025-12-08T17:56:14.846358452+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,845Z", "level": "INFO", "component": "o.e.h.AbstractHttpServerTransport", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "publish_address {elasticsearch-es-default-0.elasticsearch-es-default.service-telemetry.svc/10.217.0.53:9200}, bound_addresses {[::]:9200}", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:14.846803014+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:14,846Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "started", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.023389009+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,022Z", "level": "INFO", "component": "o.e.g.GatewayService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "recovered [0] indices into cluster_state", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.356610223+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,355Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.ml-anomalies-] for index patterns [.ml-anomalies-*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.474680203+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,473Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.ml-notifications-000002] for index patterns [.ml-notifications-000002]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.539426109+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,538Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.ml-state] for index patterns [.ml-state*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.582590733+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,582Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.ml-stats] for index patterns [.ml-stats-*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.648548064+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,647Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [metrics-mappings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.724968730+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,724Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [logs-settings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.777938284+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,777Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [data-streams-mappings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.830821745+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,830Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [metrics-settings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.888365174+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,888Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [synthetics-settings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.944074412+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,943Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [logs-mappings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:15.984522913+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:15,984Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [synthetics-mappings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.054253046+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,053Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [ilm-history] for index patterns [ilm-history-5*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.109841272+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,109Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.watch-history-13] for index patterns [.watcher-history-13*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.139184366+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,138Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [.deprecation-indexing-settings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.176148860+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,175Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding component template [.deprecation-indexing-mappings]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.213598659+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,213Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.slm-history] for index patterns [.slm-history-5*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.243813527+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,243Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [metrics] for index patterns [metrics-*-*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.275569649+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,275Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [logs] for index patterns [logs-*-*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.317385186+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,317Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [synthetics] for index patterns [synthetics-*-*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.345228321+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,344Z", "level": "INFO", "component": "o.e.c.m.MetadataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index template [.deprecation-indexing-template] for index patterns [.logs-deprecation.*]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.375448629+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,375Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [ml-size-based-ilm-policy]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.429009130+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,428Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [logs]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.462298172+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,461Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [synthetics]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.494781514+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,494Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [metrics]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.523336497+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,523Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [30-days-default]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.564507217+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,563Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [90-days-default]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.599959490+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,599Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [365-days-default]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.646814155+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,646Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [180-days-default]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.679745699+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,679Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [7-days-default]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.710266157+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,709Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [watch-history-ilm-policy]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.747691113+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,747Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [ilm-history-ilm-policy]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.784798702+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,784Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [slm-history-ilm-policy]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.814544328+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,813Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [.deprecation-indexing-ilm-policy]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.844574972+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,843Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "adding index lifecycle policy [.fleet-actions-results-ilm-policy]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.923194819+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,922Z", "level": "INFO", "component": "o.e.i.g.GeoIpDownloader", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "updating geoip databases", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.923317753+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,923Z", "level": "INFO", "component": "o.e.i.g.GeoIpDownloader", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "fetching geoip databases overview from [https://geoip.elastic.co/v1/database?elastic_geoip_service_tos=agree]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.986429375+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,986Z", "level": "INFO", "component": "o.e.l.LicenseService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "license [d89d1685-8ed0-417b-8a0c-6e817c3bf119] mode [basic] - valid", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.987161315+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,986Z", "level": "INFO", "component": "o.e.x.s.a.Realms", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "license mode is [basic], currently licensed security realms are [reserved/reserved,file/file1,native/native1]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:16.987863964+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:16,987Z", "level": "INFO", "component": "o.e.x.s.s.SecurityStatusChangeListener", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "Active license is now [BASIC]; Security is enabled", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:17.870073002+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:17,868Z", "level": "INFO", "component": "o.e.c.m.MetadataCreateIndexService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[.geoip_databases] creating index, cause [auto(bulk api)], templates [], shards [1]/[0]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:18.317234452+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:18,316Z", "level": "INFO", "component": "o.e.c.r.a.AllocationService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[.geoip_databases][0]]]).", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:19.196692334+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:19,196Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "retrieve geoip database [GeoLite2-ASN.mmdb] from [.geoip_databases] to [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q/GeoLite2-ASN.mmdb.tmp.gz]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:19.219060408+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:19,218Z", "level": "INFO", "component": "o.e.i.g.GeoIpDownloader", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "successfully downloaded geoip database [GeoLite2-ASN.mmdb]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:19.552093675+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:19,551Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "successfully reloaded changed geoip database file [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q/GeoLite2-ASN.mmdb]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:21.249746598+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:21,249Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "retrieve geoip database [GeoLite2-City.mmdb] from [.geoip_databases] to [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q/GeoLite2-City.mmdb.tmp.gz]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:21.261800640+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:21,261Z", "level": "INFO", "component": "o.e.i.g.GeoIpDownloader", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "successfully downloaded geoip database [GeoLite2-City.mmdb]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:21.837479726+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:21,836Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "retrieve geoip database [GeoLite2-Country.mmdb] from [.geoip_databases] to [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q/GeoLite2-Country.mmdb.tmp.gz]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:21.915340183+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:21,914Z", "level": "INFO", "component": "o.e.i.g.GeoIpDownloader", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "successfully downloaded geoip database [GeoLite2-Country.mmdb]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:22.220182937+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:22,219Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "successfully reloaded changed geoip database file [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q/GeoLite2-Country.mmdb]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:56:22.355173191+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:56:22,354Z", "level": "INFO", "component": "o.e.i.g.DatabaseNodeService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "successfully reloaded changed geoip database file [/tmp/elasticsearch-10019345711372005210/geoip-databases/yZYce4ovTba2d9ExcpMq2Q/GeoLite2-City.mmdb]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:59:24.620596968+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:59:24,620Z", "level": "INFO", "component": "o.e.c.m.MetadataCreateIndexService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[collectd_interface_if] creating index, cause [auto(bulk api)], templates [], shards [1]/[1]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:59:24.763237857+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:59:24,762Z", "level": "INFO", "component": "o.e.c.m.MetadataMappingService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[collectd_interface_if/P7BJtQCbTFSY3_7V1CWdtw] create_mapping [_doc]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:59:30.967435452+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:59:30,967Z", "level": "INFO", "component": "o.e.c.m.MetadataCreateIndexService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[ceilometer_image] creating index, cause [auto(bulk api)], templates [], shards [1]/[1]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:59:31.110206174+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:59:31,109Z", "level": "INFO", "component": "o.e.c.m.MetadataMappingService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[ceilometer_image/d9n86q5DSdeb0YeTwOYlKA] create_mapping [_doc]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } 2025-12-08T17:59:31.117562489+00:00 stdout F {"type": "server", "timestamp": "2025-12-08T17:59:31,117Z", "level": "INFO", "component": "o.e.c.m.MetadataMappingService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-es-default-0", "message": "[ceilometer_image/d9n86q5DSdeb0YeTwOYlKA] update_mapping [_doc]", "cluster.uuid": "BHO8BtUVT8i1ANtvfdqJYw", "node.id": "yZYce4ovTba2d9ExcpMq2Q" } ././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_serv0000755000175000017500000000000015115611513033161 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_serv0000755000175000017500000000000015115611521033160 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_serv0000644000175000017500000232751115115611513033176 0ustar zuulzuul2025-12-08T17:57:06.510547349+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"cmd","msg":"Version","Go Version":"go1.22.5","GOOS":"linux","GOARCH":"amd64","ansible-operator":"v1.36.1","commit":"60db7ce358a45ffb3ec8303944aaeb1a601aa560"} 2025-12-08T17:57:06.510779615+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"cmd","msg":"Environment variable OPERATOR_NAME has been deprecated, use --leader-election-id instead."} 2025-12-08T17:57:06.510779615+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"cmd","msg":"Watching namespaces","namespaces":["service-telemetry"]} 2025-12-08T17:57:06.513377903+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"watches","msg":"Environment variable not set; using default value","envVar":"ANSIBLE_VERBOSITY_SERVICETELEMETRY_INFRA_WATCH","default":2} 2025-12-08T17:57:06.513478955+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"cmd","msg":"Environment variable not set; using default value","envVar":"ANSIBLE_DEBUG_LOGS","ANSIBLE_DEBUG_LOGS":false} 2025-12-08T17:57:06.513478955+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"ansible-controller","msg":"Watching resource","Options.Group":"infra.watch","Options.Version":"v1beta1","Options.Kind":"ServiceTelemetry"} 2025-12-08T17:57:06.515097698+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"apiserver","msg":"Starting to serve metrics listener","Address":"localhost:5050"} 2025-12-08T17:57:06.515097698+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"proxy","msg":"Starting to serve","Address":"127.0.0.1:8888"} 2025-12-08T17:57:06.515259002+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"controller-runtime.metrics","msg":"Starting metrics server"} 2025-12-08T17:57:06.515378255+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","logger":"controller-runtime.metrics","msg":"Serving metrics server","bindAddress":":8443","secure":false} 2025-12-08T17:57:06.515378255+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","msg":"starting server","name":"health probe","addr":"[::]:6789"} 2025-12-08T17:57:06.515720174+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:06.515720174+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","msg":"Starting Controller","controller":"servicetelemetry-controller"} 2025-12-08T17:57:06.656690990+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:06Z","msg":"Starting workers","controller":"servicetelemetry-controller","worker count":12} 2025-12-08T17:57:12.274949757+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:12Z","logger":"KubeAPIWarningLogger","msg":"unknown field \"status.conditions[0].message\""} 2025-12-08T17:57:12.949999973+00:00 stdout F 2025-12-08T17:57:12.949999973+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:12.949999973+00:00 stdout F 2025-12-08T17:57:12.949999973+00:00 stdout F TASK [Installing service telemetry] ******************************** 2025-12-08T17:57:12.949999973+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:12.949999973+00:00 stdout F  "msg": "INSTALLING SERVICE TELEMETRY" 2025-12-08T17:57:12.949999973+00:00 stdout F } 2025-12-08T17:57:12.949999973+00:00 stdout F 2025-12-08T17:57:12.949999973+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:12.950079005+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:12Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:12.953167996+00:00 stdout F 2025-12-08T17:57:12.953167996+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:12.953167996+00:00 stdout F 2025-12-08T17:57:12.953167996+00:00 stdout F TASK [servicetelemetry : Pre-setup] ******************************************** 2025-12-08T17:57:12.953167996+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:7 2025-12-08T17:57:12.953167996+00:00 stdout F 2025-12-08T17:57:12.953167996+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:12.953233747+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:12Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Pre-setup"} 2025-12-08T17:57:13.153584162+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Clear the fact cache before looking up cluster information"} 2025-12-08T17:57:13.153640824+00:00 stdout F 2025-12-08T17:57:13.153640824+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:13.153660694+00:00 stdout F 2025-12-08T17:57:13.153660694+00:00 stdout F TASK [servicetelemetry : Clear the fact cache before looking up cluster information] *** 2025-12-08T17:57:13.153660694+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:1 2025-12-08T17:57:13.153679175+00:00 stdout F 2025-12-08T17:57:13.153679175+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:13.605164241+00:00 stdout F 2025-12-08T17:57:13.605164241+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:13.605164241+00:00 stdout F 2025-12-08T17:57:13.605164241+00:00 stdout F TASK [Show existing API groups available to us] ******************************** 2025-12-08T17:57:13.605164241+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:13.605164241+00:00 stdout F  "api_groups": [ 2025-12-08T17:57:13.605164241+00:00 stdout F  "", 2025-12-08T17:57:13.605164241+00:00 stdout F  "apiregistration.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "apps", 2025-12-08T17:57:13.605164241+00:00 stdout F  "events.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "authentication.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "authorization.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "autoscaling", 2025-12-08T17:57:13.605164241+00:00 stdout F  "batch", 2025-12-08T17:57:13.605164241+00:00 stdout F  "certificates.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "networking.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "policy", 2025-12-08T17:57:13.605164241+00:00 stdout F  "rbac.authorization.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "storage.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "admissionregistration.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "apiextensions.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "scheduling.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "coordination.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "node.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "discovery.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "flowcontrol.apiserver.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "apps.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "authorization.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "build.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "image.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "oauth.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "project.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "quota.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "route.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "security.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "template.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "user.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "packages.operators.coreos.com", 2025-12-08T17:57:13.605164241+00:00 stdout F  "config.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "operator.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "acme.cert-manager.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "apiserver.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "apm.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "autoscaling.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "cert-manager.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "console.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "elasticsearch.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "enterprisesearch.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "gateway.networking.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "imageregistry.operator.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "ingress.operator.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "k8s.cni.cncf.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "k8s.ovn.org", 2025-12-08T17:57:13.605164241+00:00 stdout F  "kibana.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "machine.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "machineconfiguration.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "monitoring.coreos.com", 2025-12-08T17:57:13.605164241+00:00 stdout F  "monitoring.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "monitoring.rhobs", 2025-12-08T17:57:13.605164241+00:00 stdout F  "network.operator.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "operators.coreos.com", 2025-12-08T17:57:13.605164241+00:00 stdout F  "samples.operator.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "security.internal.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "agent.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "autoscaling.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "controlplane.operator.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "interconnectedcloud.github.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "ipam.cluster.x-k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "logstash.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "maps.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "migration.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "observability.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "perses.dev", 2025-12-08T17:57:13.605164241+00:00 stdout F  "policy.networking.k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "stackconfigpolicy.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "whereabouts.cni.cncf.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "infrastructure.cluster.x-k8s.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "beat.k8s.elastic.co", 2025-12-08T17:57:13.605164241+00:00 stdout F  "helm.openshift.io", 2025-12-08T17:57:13.605164241+00:00 stdout F  "infra.watch", 2025-12-08T17:57:13.605164241+00:00 stdout F  "smartgateway.infra.watch" 2025-12-08T17:57:13.605164241+00:00 stdout F  ] 2025-12-08T17:57:13.605164241+00:00 stdout F } 2025-12-08T17:57:13.605164241+00:00 stdout F 2025-12-08T17:57:13.605164241+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:13.605246013+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:13Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:13.641472498+00:00 stdout F 2025-12-08T17:57:13.641472498+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:13.641472498+00:00 stdout F 2025-12-08T17:57:13.641472498+00:00 stdout F TASK [Indicate what kind of cluster we are in (OpenShift or Kubernetes).] ******************************** 2025-12-08T17:57:13.641472498+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:13.641472498+00:00 stdout F  "msg": "CLUSTER TYPE: is_openshift=True; is_k8s=False" 2025-12-08T17:57:13.641472498+00:00 stdout F } 2025-12-08T17:57:13.641472498+00:00 stdout F 2025-12-08T17:57:13.641472498+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:13.641508339+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:13Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:13.644358414+00:00 stdout F 2025-12-08T17:57:13.644358414+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:13.644358414+00:00 stdout F 2025-12-08T17:57:13.644358414+00:00 stdout F TASK [servicetelemetry : Fail when can't determine type of cluster] ************ 2025-12-08T17:57:13.644358414+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:27 2025-12-08T17:57:13.644358414+00:00 stdout F 2025-12-08T17:57:13.644358414+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:13.644376924+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Fail when can't determine type of cluster"} 2025-12-08T17:57:14.021447778+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:14Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:14.021495959+00:00 stdout F 2025-12-08T17:57:14.021495959+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:14.021495959+00:00 stdout F 2025-12-08T17:57:14.021495959+00:00 stdout F TASK [Print some debug information] ******************************** 2025-12-08T17:57:14.021495959+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:14.021495959+00:00 stdout F  "msg": [ 2025-12-08T17:57:14.021495959+00:00 stdout F  "ServiceTelemetry Variables", 2025-12-08T17:57:14.021495959+00:00 stdout F  "--------------------------------------------", 2025-12-08T17:57:14.021495959+00:00 stdout F  "alerting:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " alertmanager:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " receivers:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " snmp_traps:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " alert_oid_label: oid", 2025-12-08T17:57:14.021495959+00:00 stdout F  " community: public", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " port: 162", 2025-12-08T17:57:14.021495959+00:00 stdout F  " retries: 5", 2025-12-08T17:57:14.021495959+00:00 stdout F  " target: 192.168.24.254", 2025-12-08T17:57:14.021495959+00:00 stdout F  " timeout: 1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " trap_default_oid: 1.3.6.1.4.1.50495.15.1.2.1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " trap_default_severity: ''", 2025-12-08T17:57:14.021495959+00:00 stdout F  " trap_oid_prefix: 1.3.6.1.4.1.50495.15", 2025-12-08T17:57:14.021495959+00:00 stdout F  " storage:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " persistent:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " pvc_storage_request: 20G", 2025-12-08T17:57:14.021495959+00:00 stdout F  " storage_class: crc-csi-hostpath-provisioner", 2025-12-08T17:57:14.021495959+00:00 stdout F  " strategy: persistent", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  "backends:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " events:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " elasticsearch:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " certificates:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ca_cert_duration: 70080h", 2025-12-08T17:57:14.021495959+00:00 stdout F  " endpoint_cert_duration: 70080h", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " forwarding:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " host_url: https://elasticsearch-es-http:9200", 2025-12-08T17:57:14.021495959+00:00 stdout F  " tls_secret_name: elasticsearch-es-cert", 2025-12-08T17:57:14.021495959+00:00 stdout F  " tls_server_name: ''", 2025-12-08T17:57:14.021495959+00:00 stdout F  " use_basic_auth: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " use_tls: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " user_secret_name: elasticsearch-es-elastic-user", 2025-12-08T17:57:14.021495959+00:00 stdout F  " node_count: 1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " storage:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " persistent:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " pvc_storage_request: 20Gi", 2025-12-08T17:57:14.021495959+00:00 stdout F  " storage_class: ''", 2025-12-08T17:57:14.021495959+00:00 stdout F  " strategy: persistent", 2025-12-08T17:57:14.021495959+00:00 stdout F  " version: 7.16.1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " metrics:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " prometheus:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " scrape_interval: 30s", 2025-12-08T17:57:14.021495959+00:00 stdout F  " storage:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " persistent:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " pvc_storage_request: 20G", 2025-12-08T17:57:14.021495959+00:00 stdout F  " storage_class: crc-csi-hostpath-provisioner", 2025-12-08T17:57:14.021495959+00:00 stdout F  " retention: 24h", 2025-12-08T17:57:14.021495959+00:00 stdout F  " strategy: persistent", 2025-12-08T17:57:14.021495959+00:00 stdout F  "clouds:", 2025-12-08T17:57:14.021495959+00:00 stdout F  "- events:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collectors:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " - bridge:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:57:14.021495959+00:00 stdout F  " verbose: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collector_type: collectd", 2025-12-08T17:57:14.021495959+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " subscription_address: collectd/cloud1-notify", 2025-12-08T17:57:14.021495959+00:00 stdout F  " - bridge:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:57:14.021495959+00:00 stdout F  " verbose: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collector_type: ceilometer", 2025-12-08T17:57:14.021495959+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " subscription_address: anycast/ceilometer/cloud1-event.sample", 2025-12-08T17:57:14.021495959+00:00 stdout F  " metrics:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collectors:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " - bridge:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:57:14.021495959+00:00 stdout F  " verbose: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collector_type: collectd", 2025-12-08T17:57:14.021495959+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " subscription_address: collectd/cloud1-telemetry", 2025-12-08T17:57:14.021495959+00:00 stdout F  " - bridge:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:57:14.021495959+00:00 stdout F  " verbose: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collector_type: ceilometer", 2025-12-08T17:57:14.021495959+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " subscription_address: anycast/ceilometer/cloud1-metering.sample", 2025-12-08T17:57:14.021495959+00:00 stdout F  " - bridge:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ring_buffer_size: 65535", 2025-12-08T17:57:14.021495959+00:00 stdout F  " verbose: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " collector_type: sensubility", 2025-12-08T17:57:14.021495959+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " subscription_address: sensubility/cloud1-telemetry", 2025-12-08T17:57:14.021495959+00:00 stdout F  " name: cloud1", 2025-12-08T17:57:14.021495959+00:00 stdout F  "graphing:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " grafana:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " base_image: registry.redhat.io/rhel8/grafana:9", 2025-12-08T17:57:14.021495959+00:00 stdout F  " dashboards:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " disable_signout_menu: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ingress_enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  "high_availability:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  "transports:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " qdr:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " auth: basic", 2025-12-08T17:57:14.021495959+00:00 stdout F  " certificates:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " ca_cert_duration: 70080h", 2025-12-08T17:57:14.021495959+00:00 stdout F  " endpoint_cert_duration: 70080h", 2025-12-08T17:57:14.021495959+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: true", 2025-12-08T17:57:14.021495959+00:00 stdout F  " web:", 2025-12-08T17:57:14.021495959+00:00 stdout F  " enabled: false", 2025-12-08T17:57:14.021495959+00:00 stdout F  "" 2025-12-08T17:57:14.021495959+00:00 stdout F  ] 2025-12-08T17:57:14.021495959+00:00 stdout F } 2025-12-08T17:57:14.021495959+00:00 stdout F 2025-12-08T17:57:14.021495959+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:14.044762186+00:00 stdout F 2025-12-08T17:57:14.044762186+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:14.044762186+00:00 stdout F 2025-12-08T17:57:14.044762186+00:00 stdout F TASK [servicetelemetry : Get current Smart Gateways loaded] ******************** 2025-12-08T17:57:14.044762186+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:77 2025-12-08T17:57:14.044762186+00:00 stdout F 2025-12-08T17:57:14.044762186+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:14.044795458+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get current Smart Gateways loaded"} 2025-12-08T17:57:14.945949661+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:14Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways","Verb":"list","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"","Parts":["smartgateways"]}} 2025-12-08T17:57:15.036639887+00:00 stdout F 2025-12-08T17:57:15.036639887+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:15.036639887+00:00 stdout F 2025-12-08T17:57:15.036639887+00:00 stdout F TASK [servicetelemetry : Get current STF object] ******************************* 2025-12-08T17:57:15.036639887+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:84 2025-12-08T17:57:15.036639887+00:00 stdout F 2025-12-08T17:57:15.036639887+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:15.036995006+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get current STF object"} 2025-12-08T17:57:15.736190212+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:15Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/infra.watch/v1beta1/namespaces/service-telemetry/servicetelemetrys/default","Verb":"get","APIPrefix":"apis","APIGroup":"infra.watch","APIVersion":"v1beta1","Namespace":"service-telemetry","Resource":"servicetelemetrys","Subresource":"","Name":"default","Parts":["servicetelemetrys","default"]}} 2025-12-08T17:57:15.838159222+00:00 stdout F 2025-12-08T17:57:15.838159222+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:15.838159222+00:00 stdout F 2025-12-08T17:57:15.838159222+00:00 stdout F TASK [servicetelemetry : Get community Prometheus objects] ********************* 2025-12-08T17:57:15.838159222+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:92 2025-12-08T17:57:15.838159222+00:00 stdout F 2025-12-08T17:57:15.838159222+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:15.838216834+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get community Prometheus objects"} 2025-12-08T17:57:16.572943647+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:16Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:57:16.708044850+00:00 stdout F 2025-12-08T17:57:16.708044850+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:16.708044850+00:00 stdout F 2025-12-08T17:57:16.708044850+00:00 stdout F TASK [servicetelemetry : Apply community observabilityStrategy if missing on an STF object with an existing community prometheus] *** 2025-12-08T17:57:16.708044850+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:101 2025-12-08T17:57:16.708044850+00:00 stdout F 2025-12-08T17:57:16.708044850+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:16.708091011+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:16Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Apply community observabilityStrategy if missing on an STF object with an existing community prometheus"} 2025-12-08T17:57:16.788804227+00:00 stdout F 2025-12-08T17:57:16.788804227+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:16.788804227+00:00 stdout F 2025-12-08T17:57:16.788804227+00:00 stdout F TASK [servicetelemetry : Apply default observabilityStrategy if missing on a new STF object with no associated community prometheus] *** 2025-12-08T17:57:16.788804227+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:118 2025-12-08T17:57:16.788804227+00:00 stdout F 2025-12-08T17:57:16.788804227+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:16.788847598+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:16Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Apply default observabilityStrategy if missing on a new STF object with no associated community prometheus"} 2025-12-08T17:57:16.814029094+00:00 stdout F 2025-12-08T17:57:16.814029094+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:16.814029094+00:00 stdout F 2025-12-08T17:57:16.814029094+00:00 stdout F TASK [servicetelemetry : Get QDR objects] ************************************** 2025-12-08T17:57:16.814029094+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:132 2025-12-08T17:57:16.814029094+00:00 stdout F 2025-12-08T17:57:16.814029094+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:16.814075896+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:16Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get QDR objects"} 2025-12-08T17:57:17.606935815+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:17Z","logger":"proxy","msg":"Cache miss: interconnectedcloud.github.io/v1alpha1, Kind=Interconnect, service-telemetry/default-interconnect"} 2025-12-08T17:57:17.763455397+00:00 stdout F 2025-12-08T17:57:17.763455397+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:17.763455397+00:00 stdout F 2025-12-08T17:57:17.763455397+00:00 stdout F TASK [servicetelemetry : Apply legacy auth=none for QDR if missing on the STF object and it's currently deployed that way] *** 2025-12-08T17:57:17.763455397+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:141 2025-12-08T17:57:17.763455397+00:00 stdout F 2025-12-08T17:57:17.763455397+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:17.763497879+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Apply legacy auth=none for QDR if missing on the STF object and it's currently deployed that way"} 2025-12-08T17:57:17.825146727+00:00 stdout F 2025-12-08T17:57:17.825146727+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:17.825146727+00:00 stdout F 2025-12-08T17:57:17.825146727+00:00 stdout F TASK [servicetelemetry : Apply default auth for QDR if missing on a new STF object with no associated auth=none QDR] *** 2025-12-08T17:57:17.825146727+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:161 2025-12-08T17:57:17.825146727+00:00 stdout F 2025-12-08T17:57:17.825146727+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:17.825179277+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Apply default auth for QDR if missing on a new STF object with no associated auth=none QDR"} 2025-12-08T17:57:17.887854332+00:00 stdout F 2025-12-08T17:57:17.887854332+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:17.887854332+00:00 stdout F 2025-12-08T17:57:17.887854332+00:00 stdout F TASK [servicetelemetry : Set ServiceTelemetry object status to have ephemeralStorageEnabled status] *** 2025-12-08T17:57:17.887854332+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:185 2025-12-08T17:57:17.887854332+00:00 stdout F 2025-12-08T17:57:17.887854332+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:17.887897843+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Set ServiceTelemetry object status to have ephemeralStorageEnabled status"} 2025-12-08T17:57:17.967701405+00:00 stdout F 2025-12-08T17:57:17.967701405+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:17.967701405+00:00 stdout F 2025-12-08T17:57:17.967701405+00:00 stdout F TASK [servicetelemetry : Create QDR instance] ********************************** 2025-12-08T17:57:17.967701405+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:19 2025-12-08T17:57:17.967701405+00:00 stdout F 2025-12-08T17:57:17.967701405+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:17.967728305+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create QDR instance"} 2025-12-08T17:57:18.047741932+00:00 stdout F 2025-12-08T17:57:18.047741932+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:18.047741932+00:00 stdout F 2025-12-08T17:57:18.047741932+00:00 stdout F TASK [servicetelemetry : Create self-signed interconnect issuer] *************** 2025-12-08T17:57:18.047741932+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:6 2025-12-08T17:57:18.047741932+00:00 stdout F 2025-12-08T17:57:18.047741932+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:18.047805373+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:18Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create self-signed interconnect issuer"} 2025-12-08T17:57:19.008325546+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Issuer, service-telemetry/default-interconnect-selfsigned"} 2025-12-08T17:57:19.013245424+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:19.013460750+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"proxy","msg":"Watching child resource","kind":"cert-manager.io/v1, Kind=Issuer","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:19.013460750+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:19.124269821+00:00 stdout F 2025-12-08T17:57:19.124269821+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:19.124269821+00:00 stdout F 2025-12-08T17:57:19.124269821+00:00 stdout F TASK [servicetelemetry : Create self-signed interconnect certificate] ********** 2025-12-08T17:57:19.124269821+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:18 2025-12-08T17:57:19.124269821+00:00 stdout F 2025-12-08T17:57:19.124269821+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:19.124302911+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create self-signed interconnect certificate"} 2025-12-08T17:57:19.951316721+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Certificate, service-telemetry/default-interconnect-selfsigned"} 2025-12-08T17:57:19.956332902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:19.956635640+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","logger":"proxy","msg":"Watching child resource","kind":"cert-manager.io/v1, Kind=Certificate","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:19.956644380+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:19Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:20.086655431+00:00 stdout F 2025-12-08T17:57:20.086655431+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:20.086655431+00:00 stdout F 2025-12-08T17:57:20.086655431+00:00 stdout F TASK [servicetelemetry : Create default CA interconnect issuer using self-signed interconnect certificate] *** 2025-12-08T17:57:20.086655431+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:36 2025-12-08T17:57:20.086655431+00:00 stdout F 2025-12-08T17:57:20.086655431+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:20.086693512+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:20Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create default CA interconnect issuer using self-signed interconnect certificate"} 2025-12-08T17:57:20.790751966+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:20Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Issuer, service-telemetry/default-interconnect-ca"} 2025-12-08T17:57:20.795232922+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:20Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:20.897795838+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:20Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create OpenStack CA certificate using self-signed interconnect certificate"} 2025-12-08T17:57:20.897848059+00:00 stdout F 2025-12-08T17:57:20.897848059+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:20.897868149+00:00 stdout F 2025-12-08T17:57:20.897868149+00:00 stdout F TASK [servicetelemetry : Create OpenStack CA certificate using self-signed interconnect certificate] *** 2025-12-08T17:57:20.897868149+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:49 2025-12-08T17:57:20.897903840+00:00 stdout F 2025-12-08T17:57:20.897903840+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:21.622450447+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:21Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Certificate, service-telemetry/default-interconnect-openstack-ca"} 2025-12-08T17:57:21.627035137+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:21Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:21.747215442+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:21Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create OpenStack credentials certificate using self-signed interconnect certificate"} 2025-12-08T17:57:21.747287564+00:00 stdout F 2025-12-08T17:57:21.747287564+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:21.747338605+00:00 stdout F 2025-12-08T17:57:21.747338605+00:00 stdout F TASK [servicetelemetry : Create OpenStack credentials certificate using self-signed interconnect certificate] *** 2025-12-08T17:57:21.747338605+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:66 2025-12-08T17:57:21.747362866+00:00 stdout F 2025-12-08T17:57:21.747362866+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:22.417592896+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:22Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Certificate, service-telemetry/default-interconnect-openstack-credentials"} 2025-12-08T17:57:22.422487634+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:22Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:22.551804767+00:00 stdout F 2025-12-08T17:57:22.551804767+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:22.551804767+00:00 stdout F 2025-12-08T17:57:22.551804767+00:00 stdout F TASK [servicetelemetry : Create inter-router CA issuer] ************************ 2025-12-08T17:57:22.551804767+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:84 2025-12-08T17:57:22.551804767+00:00 stdout F 2025-12-08T17:57:22.551804767+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:22.551858348+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create inter-router CA issuer"} 2025-12-08T17:57:23.269796154+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:23Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Issuer, service-telemetry/default-interconnect-inter-router-ca"} 2025-12-08T17:57:23.274205859+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:23Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:23.400103242+00:00 stdout F 2025-12-08T17:57:23.400103242+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:23.400103242+00:00 stdout F 2025-12-08T17:57:23.400103242+00:00 stdout F TASK [servicetelemetry : Create inter-router CA certificate] ******************* 2025-12-08T17:57:23.400103242+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:97 2025-12-08T17:57:23.400103242+00:00 stdout F 2025-12-08T17:57:23.400103242+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:23.400133633+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:23Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create inter-router CA certificate"} 2025-12-08T17:57:24.145126824+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:24Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Certificate, service-telemetry/default-interconnect-inter-router-ca"} 2025-12-08T17:57:24.149171119+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:24Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:24.252769671+00:00 stdout F 2025-12-08T17:57:24.252769671+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:24.252769671+00:00 stdout F 2025-12-08T17:57:24.252769671+00:00 stdout F TASK [servicetelemetry : Create inter-router credentials certificate] ********** 2025-12-08T17:57:24.252769671+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:114 2025-12-08T17:57:24.252769671+00:00 stdout F 2025-12-08T17:57:24.252769671+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:24.252806552+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:24Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create inter-router credentials certificate"} 2025-12-08T17:57:25.009131759+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:25Z","logger":"proxy","msg":"Cache miss: cert-manager.io/v1, Kind=Certificate, service-telemetry/default-interconnect-inter-router-credentials"} 2025-12-08T17:57:25.013931534+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:25Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:25.151313847+00:00 stdout F 2025-12-08T17:57:25.151313847+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:25.151313847+00:00 stdout F 2025-12-08T17:57:25.151313847+00:00 stdout F TASK [servicetelemetry : Create Interconnect SASL ConfigMap] ******************* 2025-12-08T17:57:25.151313847+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:133 2025-12-08T17:57:25.151313847+00:00 stdout F 2025-12-08T17:57:25.151313847+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:25.151365409+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:25Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create Interconnect SASL ConfigMap"} 2025-12-08T17:57:26.299850964+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:26Z","logger":"proxy","msg":"Cache miss: /v1, Kind=ConfigMap, service-telemetry/default-interconnect-sasl-config"} 2025-12-08T17:57:26.306411174+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:26Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:26.306649921+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:26Z","logger":"proxy","msg":"Watching child resource","kind":"/v1, Kind=ConfigMap","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:26.306678012+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:26Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:26.427818872+00:00 stdout F 2025-12-08T17:57:26.427818872+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:26.427818872+00:00 stdout F 2025-12-08T17:57:26.427818872+00:00 stdout F TASK [servicetelemetry : Get QDR BasicAuth secret] ***************************** 2025-12-08T17:57:26.427818872+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:155 2025-12-08T17:57:26.427818872+00:00 stdout F 2025-12-08T17:57:26.427818872+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:26.427843622+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:26Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get QDR BasicAuth secret"} 2025-12-08T17:57:27.194203040+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:27Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Secret, service-telemetry/default-interconnect-users"} 2025-12-08T17:57:27.304482987+00:00 stdout F 2025-12-08T17:57:27.304482987+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:27.304482987+00:00 stdout F 2025-12-08T17:57:27.304482987+00:00 stdout F TASK [servicetelemetry : Perform a one-time upgrade to the default generated password for QDR BasicAuth] *** 2025-12-08T17:57:27.304482987+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:167 2025-12-08T17:57:27.304482987+00:00 stdout F 2025-12-08T17:57:27.304482987+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:27.304530728+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:27Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Perform a one-time upgrade to the default generated password for QDR BasicAuth"} 2025-12-08T17:57:27.359191363+00:00 stdout F 2025-12-08T17:57:27.359191363+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:27.359191363+00:00 stdout F 2025-12-08T17:57:27.359191363+00:00 stdout F TASK [servicetelemetry : Get the list of QDR pods] ***************************** 2025-12-08T17:57:27.359191363+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:181 2025-12-08T17:57:27.359191363+00:00 stdout F 2025-12-08T17:57:27.359191363+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:27.359229974+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:27Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get the list of QDR pods"} 2025-12-08T17:57:27.400979263+00:00 stdout F 2025-12-08T17:57:27.400979263+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:27.400979263+00:00 stdout F 2025-12-08T17:57:27.400979263+00:00 stdout F TASK [servicetelemetry : Restart QDR pods to pick up new password] ************* 2025-12-08T17:57:27.400979263+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:190 2025-12-08T17:57:27.400979263+00:00 stdout F 2025-12-08T17:57:27.400979263+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:27.401020504+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:27Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Restart QDR pods to pick up new password"} 2025-12-08T17:57:27.500402037+00:00 stdout F 2025-12-08T17:57:27.500402037+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:27.500402037+00:00 stdout F 2025-12-08T17:57:27.500402037+00:00 stdout F TASK [servicetelemetry : Create QDR instance] ********************************** 2025-12-08T17:57:27.500402037+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:268 2025-12-08T17:57:27.500402037+00:00 stdout F 2025-12-08T17:57:27.500402037+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:27.500435798+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:27Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create QDR instance"} 2025-12-08T17:57:28.252862172+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:28Z","logger":"proxy","msg":"Cache miss: interconnectedcloud.github.io/v1alpha1, Kind=Interconnect, service-telemetry/default-interconnect"} 2025-12-08T17:57:28.258169480+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:28Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:28.258737765+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:28Z","logger":"proxy","msg":"Watching child resource","kind":"interconnectedcloud.github.io/v1alpha1, Kind=Interconnect","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:28.258737765+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:28Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:28.371317002+00:00 stdout F 2025-12-08T17:57:28.371317002+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:28.371317002+00:00 stdout F 2025-12-08T17:57:28.371317002+00:00 stdout F TASK [servicetelemetry : Setup Certificates for metrics components] ************ 2025-12-08T17:57:28.371317002+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:26 2025-12-08T17:57:28.371317002+00:00 stdout F 2025-12-08T17:57:28.371317002+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:28.371424205+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:28Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Setup Certificates for metrics components"} 2025-12-08T17:57:28.428547244+00:00 stdout F 2025-12-08T17:57:28.428547244+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:28.428547244+00:00 stdout F 2025-12-08T17:57:28.428547244+00:00 stdout F TASK [servicetelemetry : Create configmap for OAUTH CA certs] ****************** 2025-12-08T17:57:28.428547244+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:1 2025-12-08T17:57:28.428547244+00:00 stdout F 2025-12-08T17:57:28.428547244+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:28.428583265+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:28Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create configmap for OAUTH CA certs"} 2025-12-08T17:57:29.156270975+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:29Z","logger":"proxy","msg":"Cache miss: /v1, Kind=ConfigMap, service-telemetry/serving-certs-ca-bundle"} 2025-12-08T17:57:29.161469581+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:29Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:29.272820304+00:00 stdout F 2025-12-08T17:57:29.272820304+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:29.272820304+00:00 stdout F 2025-12-08T17:57:29.272820304+00:00 stdout F TASK [servicetelemetry : Check for existing cookie secret] ********************* 2025-12-08T17:57:29.272820304+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:12 2025-12-08T17:57:29.272820304+00:00 stdout F 2025-12-08T17:57:29.272820304+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:29.272869906+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:29Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Check for existing cookie secret"} 2025-12-08T17:57:29.919223184+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:29Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Secret, service-telemetry/default-session-secret"} 2025-12-08T17:57:30.021331778+00:00 stdout F 2025-12-08T17:57:30.021331778+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:30.021331778+00:00 stdout F 2025-12-08T17:57:30.021331778+00:00 stdout F TASK [servicetelemetry : Create cookie secret] ********************************* 2025-12-08T17:57:30.021331778+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:20 2025-12-08T17:57:30.021331778+00:00 stdout F 2025-12-08T17:57:30.021331778+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:30.021425020+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create cookie secret"} 2025-12-08T17:57:30.689496794+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Secret, service-telemetry/default-session-secret"} 2025-12-08T17:57:30.694497385+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:30.694710761+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","logger":"proxy","msg":"Watching child resource","kind":"/v1, Kind=Secret","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:30.694710761+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:30.859692014+00:00 stdout F 2025-12-08T17:57:30.859692014+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:30.859692014+00:00 stdout F 2025-12-08T17:57:30.859692014+00:00 stdout F TASK [servicetelemetry : Create Prometheus instance] *************************** 2025-12-08T17:57:30.859692014+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:48 2025-12-08T17:57:30.859692014+00:00 stdout F 2025-12-08T17:57:30.859692014+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:30.859723215+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create Prometheus instance"} 2025-12-08T17:57:30.974272462+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:30Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ServiceAccount/prometheus-stf with oauth redirect annotation"} 2025-12-08T17:57:30.974329894+00:00 stdout F 2025-12-08T17:57:30.974329894+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:30.974356004+00:00 stdout F 2025-12-08T17:57:30.974356004+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/prometheus-stf with oauth redirect annotation] *** 2025-12-08T17:57:30.974356004+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:10 2025-12-08T17:57:30.974379325+00:00 stdout F 2025-12-08T17:57:30.974379325+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:31.766013572+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:31Z","logger":"proxy","msg":"Cache miss: /v1, Kind=ServiceAccount, service-telemetry/prometheus-stf"} 2025-12-08T17:57:31.771175687+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:31Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:31.771365481+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:31Z","logger":"proxy","msg":"Watching child resource","kind":"/v1, Kind=ServiceAccount","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:31.771365481+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:31Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:31.892346347+00:00 stdout F 2025-12-08T17:57:31.892346347+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:31.892346347+00:00 stdout F 2025-12-08T17:57:31.892346347+00:00 stdout F TASK [servicetelemetry : Create ClusterRole/prometheus-stf for non-resource URL /metrics access] *** 2025-12-08T17:57:31.892346347+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:21 2025-12-08T17:57:31.892346347+00:00 stdout F 2025-12-08T17:57:31.892346347+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:31.892380818+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:31Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ClusterRole/prometheus-stf for non-resource URL /metrics access"} 2025-12-08T17:57:32.628372414+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:32Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:32.792332326+00:00 stdout F 2025-12-08T17:57:32.792332326+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:32.792332326+00:00 stdout F 2025-12-08T17:57:32.792332326+00:00 stdout F TASK [servicetelemetry : Create ClusterRoleBinding/prometheus-stf] ************* 2025-12-08T17:57:32.792332326+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:64 2025-12-08T17:57:32.792332326+00:00 stdout F 2025-12-08T17:57:32.792332326+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:32.792368567+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:32Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ClusterRoleBinding/prometheus-stf"} 2025-12-08T17:57:33.526300586+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:33Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:33.654406726+00:00 stdout F 2025-12-08T17:57:33.654406726+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:33.654406726+00:00 stdout F 2025-12-08T17:57:33.654406726+00:00 stdout F TASK [servicetelemetry : Create Role/prometheus-stf for Prometheus operations] *** 2025-12-08T17:57:33.654406726+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:83 2025-12-08T17:57:33.654406726+00:00 stdout F 2025-12-08T17:57:33.654406726+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:33.654442547+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:33Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create Role/prometheus-stf for Prometheus operations"} 2025-12-08T17:57:34.486607186+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:34Z","logger":"proxy","msg":"Cache miss: rbac.authorization.k8s.io/v1, Kind=Role, service-telemetry/prometheus-stf"} 2025-12-08T17:57:34.493699659+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:34Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:34.494157821+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:34Z","logger":"proxy","msg":"Watching child resource","kind":"rbac.authorization.k8s.io/v1, Kind=Role","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:34.494157821+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:34Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:34.688955956+00:00 stdout F 2025-12-08T17:57:34.688955956+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:34.688955956+00:00 stdout F 2025-12-08T17:57:34.688955956+00:00 stdout F TASK [servicetelemetry : Create RoleBinding/prometheus-stf] ******************** 2025-12-08T17:57:34.688955956+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:152 2025-12-08T17:57:34.688955956+00:00 stdout F 2025-12-08T17:57:34.688955956+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:34.688984346+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:34Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create RoleBinding/prometheus-stf"} 2025-12-08T17:57:35.415289658+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:35Z","logger":"proxy","msg":"Cache miss: rbac.authorization.k8s.io/v1, Kind=RoleBinding, service-telemetry/prometheus-stf"} 2025-12-08T17:57:35.419898298+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:35Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:35.420095603+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:35Z","logger":"proxy","msg":"Watching child resource","kind":"rbac.authorization.k8s.io/v1, Kind=RoleBinding","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:35.420095603+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:35Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:35.559959978+00:00 stdout F 2025-12-08T17:57:35.559959978+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:35.559959978+00:00 stdout F 2025-12-08T17:57:35.559959978+00:00 stdout F TASK [servicetelemetry : Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef] *** 2025-12-08T17:57:35.559959978+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:172 2025-12-08T17:57:35.559959978+00:00 stdout F 2025-12-08T17:57:35.559959978+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:35.560014519+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:35Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef"} 2025-12-08T17:57:36.489778960+00:00 stdout F 2025-12-08T17:57:36.489778960+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:36.489778960+00:00 stdout F 2025-12-08T17:57:36.489778960+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:57:36.489778960+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:36.489778960+00:00 stdout F  "msg": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "apiVersion": "monitoring.rhobs/v1", 2025-12-08T17:57:36.489778960+00:00 stdout F  "kind": "Prometheus", 2025-12-08T17:57:36.489778960+00:00 stdout F  "metadata": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "labels": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "app.kubernetes.io/managed-by": "observability-operator", 2025-12-08T17:57:36.489778960+00:00 stdout F  "prometheus": "default" 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "name": "default", 2025-12-08T17:57:36.489778960+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "spec": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "alerting": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "alertmanagers": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  { 2025-12-08T17:57:36.489778960+00:00 stdout F  "bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token", 2025-12-08T17:57:36.489778960+00:00 stdout F  "name": "default-alertmanager-proxy", 2025-12-08T17:57:36.489778960+00:00 stdout F  "namespace": "service-telemetry", 2025-12-08T17:57:36.489778960+00:00 stdout F  "port": "web", 2025-12-08T17:57:36.489778960+00:00 stdout F  "scheme": "https", 2025-12-08T17:57:36.489778960+00:00 stdout F  "tlsConfig": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "caFile": "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt", 2025-12-08T17:57:36.489778960+00:00 stdout F  "serverName": "default-alertmanager-proxy.service-telemetry.svc" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  ] 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "configMaps": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  "serving-certs-ca-bundle" 2025-12-08T17:57:36.489778960+00:00 stdout F  ], 2025-12-08T17:57:36.489778960+00:00 stdout F  "containers": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  { 2025-12-08T17:57:36.489778960+00:00 stdout F  "args": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  "-https-address=:9092", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-tls-cert=/etc/tls/private/tls.crt", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-tls-key=/etc/tls/private/tls.key", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-upstream=http://localhost:9090/", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-cookie-secret-file=/etc/proxy/secrets/session_secret", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-openshift-service-account=prometheus-stf", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-openshift-sar={\"namespace\":\"service-telemetry\",\"resource\": \"prometheuses\", \"resourceAPIGroup\":\"monitoring.rhobs\", \"verb\":\"get\"}", 2025-12-08T17:57:36.489778960+00:00 stdout F  "-openshift-delegate-urls={\"/\":{\"namespace\":\"service-telemetry\",\"resource\": \"prometheuses\", \"group\":\"monitoring.rhobs\", \"verb\":\"get\"}}" 2025-12-08T17:57:36.489778960+00:00 stdout F  ], 2025-12-08T17:57:36.489778960+00:00 stdout F  "image": "quay.io/openshift/origin-oauth-proxy:latest", 2025-12-08T17:57:36.489778960+00:00 stdout F  "name": "oauth-proxy", 2025-12-08T17:57:36.489778960+00:00 stdout F  "ports": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  { 2025-12-08T17:57:36.489778960+00:00 stdout F  "containerPort": 9092, 2025-12-08T17:57:36.489778960+00:00 stdout F  "name": "https", 2025-12-08T17:57:36.489778960+00:00 stdout F  "protocol": "TCP" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  ], 2025-12-08T17:57:36.489778960+00:00 stdout F  "volumeMounts": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  { 2025-12-08T17:57:36.489778960+00:00 stdout F  "mountPath": "/etc/tls/private", 2025-12-08T17:57:36.489778960+00:00 stdout F  "name": "secret-default-prometheus-proxy-tls" 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  { 2025-12-08T17:57:36.489778960+00:00 stdout F  "mountPath": "/etc/proxy/secrets", 2025-12-08T17:57:36.489778960+00:00 stdout F  "name": "secret-default-session-secret" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  ] 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  ], 2025-12-08T17:57:36.489778960+00:00 stdout F  "image": "quay.io/prometheus/prometheus:latest", 2025-12-08T17:57:36.489778960+00:00 stdout F  "listenLocal": true, 2025-12-08T17:57:36.489778960+00:00 stdout F  "podMetadata": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "annotations": null, 2025-12-08T17:57:36.489778960+00:00 stdout F  "labels": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "prometheus": "default" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "replicas": 1, 2025-12-08T17:57:36.489778960+00:00 stdout F  "retention": "24h", 2025-12-08T17:57:36.489778960+00:00 stdout F  "ruleSelector": {}, 2025-12-08T17:57:36.489778960+00:00 stdout F  "scrapeConfigSelector": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "matchLabels": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "app": "smart-gateway" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "secrets": [ 2025-12-08T17:57:36.489778960+00:00 stdout F  "default-prometheus-proxy-tls", 2025-12-08T17:57:36.489778960+00:00 stdout F  "default-session-secret" 2025-12-08T17:57:36.489778960+00:00 stdout F  ], 2025-12-08T17:57:36.489778960+00:00 stdout F  "securityContext": {}, 2025-12-08T17:57:36.489778960+00:00 stdout F  "serviceAccountName": "prometheus-stf", 2025-12-08T17:57:36.489778960+00:00 stdout F  "storage": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "volumeClaimTemplate": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "spec": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "resources": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "requests": { 2025-12-08T17:57:36.489778960+00:00 stdout F  "storage": "20G" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "storageClassName": "crc-csi-hostpath-provisioner" 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  }, 2025-12-08T17:57:36.489778960+00:00 stdout F  "version": null 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F  } 2025-12-08T17:57:36.489778960+00:00 stdout F } 2025-12-08T17:57:36.489778960+00:00 stdout F 2025-12-08T17:57:36.489778960+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:36.489862992+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:36Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:36.573602986+00:00 stdout F 2025-12-08T17:57:36.573602986+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:36.573602986+00:00 stdout F 2025-12-08T17:57:36.573602986+00:00 stdout F TASK [servicetelemetry : Create an instance of Prometheus] ********************* 2025-12-08T17:57:36.573602986+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:191 2025-12-08T17:57:36.573602986+00:00 stdout F 2025-12-08T17:57:36.573602986+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:36.573641847+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:36Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an instance of Prometheus"} 2025-12-08T17:57:37.378247662+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:37Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:57:37.384129274+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:37Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:37.384546495+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:37Z","logger":"proxy","msg":"Watching child resource","kind":"monitoring.rhobs/v1, Kind=Prometheus","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:37.384546495+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:37Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:37.547387784+00:00 stdout F 2025-12-08T17:57:37.547387784+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:37.547387784+00:00 stdout F 2025-12-08T17:57:37.547387784+00:00 stdout F TASK [servicetelemetry : Ensure no community Prometheus is installed if not using community operator] *** 2025-12-08T17:57:37.547387784+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:197 2025-12-08T17:57:37.547387784+00:00 stdout F 2025-12-08T17:57:37.547387784+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:37.547424565+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:37Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Ensure no community Prometheus is installed if not using community operator"} 2025-12-08T17:57:38.491996457+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:38Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:57:38.619628607+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:38Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Ensure no rhobs Prometheus is installed if not using it"} 2025-12-08T17:57:38.619699758+00:00 stdout F 2025-12-08T17:57:38.619699758+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:38.619724009+00:00 stdout F 2025-12-08T17:57:38.619724009+00:00 stdout F TASK [servicetelemetry : Ensure no rhobs Prometheus is installed if not using it] *** 2025-12-08T17:57:38.619724009+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:207 2025-12-08T17:57:38.619747330+00:00 stdout F 2025-12-08T17:57:38.619747330+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:38.712079956+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:38Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create service to access the prometheus proxy"} 2025-12-08T17:57:38.712115187+00:00 stdout F 2025-12-08T17:57:38.712115187+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:38.712115187+00:00 stdout F 2025-12-08T17:57:38.712115187+00:00 stdout F TASK [servicetelemetry : Create service to access the prometheus proxy] ******** 2025-12-08T17:57:38.712115187+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:223 2025-12-08T17:57:38.712115187+00:00 stdout F 2025-12-08T17:57:38.712115187+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:39.464832272+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:39Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Service, service-telemetry/default-prometheus-proxy"} 2025-12-08T17:57:39.469938433+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:39Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:39.470307163+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:39Z","logger":"proxy","msg":"Watching child resource","kind":"/v1, Kind=Service","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:39.470307163+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:39Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:39.634848955+00:00 stdout F 2025-12-08T17:57:39.634848955+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:39.634848955+00:00 stdout F 2025-12-08T17:57:39.634848955+00:00 stdout F TASK [servicetelemetry : Create route to access the prometheus proxy] ********** 2025-12-08T17:57:39.634848955+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:234 2025-12-08T17:57:39.634848955+00:00 stdout F 2025-12-08T17:57:39.634848955+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:39.634969758+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:39Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create route to access the prometheus proxy"} 2025-12-08T17:57:40.437805239+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:40Z","logger":"proxy","msg":"Cache miss: route.openshift.io/v1, Kind=Route, service-telemetry/default-prometheus-proxy"} 2025-12-08T17:57:40.457472177+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:40Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:40.457614941+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:40Z","logger":"proxy","msg":"Watching child resource","kind":"route.openshift.io/v1, Kind=Route","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:40.457614941+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:40Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:40.574136483+00:00 stdout F 2025-12-08T17:57:40.574136483+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:40.574136483+00:00 stdout F 2025-12-08T17:57:40.574136483+00:00 stdout F TASK [servicetelemetry : Create Prometheus read-only user] ********************* 2025-12-08T17:57:40.574136483+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:50 2025-12-08T17:57:40.574136483+00:00 stdout F 2025-12-08T17:57:40.574136483+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:40.574183764+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:40Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create Prometheus read-only user"} 2025-12-08T17:57:40.612513284+00:00 stdout F 2025-12-08T17:57:40.612513284+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:40.612513284+00:00 stdout F 2025-12-08T17:57:40.612513284+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/stf-prometheus-reader] ********** 2025-12-08T17:57:40.612513284+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:1 2025-12-08T17:57:40.612540025+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:40Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ServiceAccount/stf-prometheus-reader"} 2025-12-08T17:57:40.612550185+00:00 stdout F 2025-12-08T17:57:40.612550185+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:41.255854911+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:41Z","logger":"proxy","msg":"Cache miss: /v1, Kind=ServiceAccount, service-telemetry/stf-prometheus-reader"} 2025-12-08T17:57:41.260720127+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:41Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:41.385057670+00:00 stdout F 2025-12-08T17:57:41.385057670+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:41.385057670+00:00 stdout F 2025-12-08T17:57:41.385057670+00:00 stdout F TASK [servicetelemetry : Create prometheus-reader Role] ************************ 2025-12-08T17:57:41.385057670+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:11 2025-12-08T17:57:41.385057670+00:00 stdout F 2025-12-08T17:57:41.385057670+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:41.385112602+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:41Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create prometheus-reader Role"} 2025-12-08T17:57:42.154737763+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:42Z","logger":"proxy","msg":"Cache miss: rbac.authorization.k8s.io/v1, Kind=Role, service-telemetry/prometheus-reader"} 2025-12-08T17:57:42.158996003+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:42Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:42.294994548+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:42Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create prometheus-reader RoleBinding for stf-prometheus-reader"} 2025-12-08T17:57:42.295057660+00:00 stdout F 2025-12-08T17:57:42.295057660+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:42.295078310+00:00 stdout F 2025-12-08T17:57:42.295078310+00:00 stdout F TASK [servicetelemetry : Create prometheus-reader RoleBinding for stf-prometheus-reader] *** 2025-12-08T17:57:42.295078310+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:30 2025-12-08T17:57:42.295096881+00:00 stdout F 2025-12-08T17:57:42.295096881+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:42.948230292+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:42Z","logger":"proxy","msg":"Cache miss: rbac.authorization.k8s.io/v1, Kind=RoleBinding, service-telemetry/stf-prometheus-reader"} 2025-12-08T17:57:42.957890322+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:42Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:43.084912764+00:00 stdout F 2025-12-08T17:57:43.084912764+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:43.084912764+00:00 stdout F 2025-12-08T17:57:43.084912764+00:00 stdout F TASK [servicetelemetry : Create an access token for stf-prometheus-reader] ***** 2025-12-08T17:57:43.084912764+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:47 2025-12-08T17:57:43.084912764+00:00 stdout F 2025-12-08T17:57:43.084912764+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:43.084961036+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:43Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an access token for stf-prometheus-reader"} 2025-12-08T17:57:43.829098849+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:43Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Secret, service-telemetry/stf-prometheus-reader-token"} 2025-12-08T17:57:43.833732398+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:43Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:44.046280711+00:00 stdout F 2025-12-08T17:57:44.046280711+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:44.046280711+00:00 stdout F 2025-12-08T17:57:44.046280711+00:00 stdout F TASK [servicetelemetry : Create Alertmanager instance] ************************* 2025-12-08T17:57:44.046280711+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:54 2025-12-08T17:57:44.046280711+00:00 stdout F 2025-12-08T17:57:44.046280711+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:44.046331773+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:44Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create Alertmanager instance"} 2025-12-08T17:57:44.220402522+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:44Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:44.220492074+00:00 stdout F 2025-12-08T17:57:44.220492074+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:44.220534435+00:00 stdout F 2025-12-08T17:57:44.220534435+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:57:44.220576276+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:44.220576276+00:00 stdout F  "msg": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "apiVersion": "monitoring.rhobs/v1", 2025-12-08T17:57:44.220576276+00:00 stdout F  "kind": "Alertmanager", 2025-12-08T17:57:44.220576276+00:00 stdout F  "metadata": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "labels": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "alertmanager": "default", 2025-12-08T17:57:44.220576276+00:00 stdout F  "app.kubernetes.io/managed-by": "observability-operator" 2025-12-08T17:57:44.220576276+00:00 stdout F  }, 2025-12-08T17:57:44.220576276+00:00 stdout F  "name": "default", 2025-12-08T17:57:44.220576276+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:57:44.220576276+00:00 stdout F  }, 2025-12-08T17:57:44.220576276+00:00 stdout F  "spec": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "containers": [ 2025-12-08T17:57:44.220576276+00:00 stdout F  { 2025-12-08T17:57:44.220576276+00:00 stdout F  "args": [ 2025-12-08T17:57:44.220576276+00:00 stdout F  "-https-address=:9095", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-tls-cert=/etc/tls/private/tls.crt", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-tls-key=/etc/tls/private/tls.key", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-upstream=http://localhost:9093/", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-cookie-secret-file=/etc/proxy/secrets/session_secret", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-openshift-service-account=alertmanager-stf", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-openshift-sar={\"namespace\":\"service-telemetry\", \"resource\": \"alertmanagers\", \"resourceAPIGroup\":\"monitoring.rhobs\", \"verb\":\"get\"}", 2025-12-08T17:57:44.220576276+00:00 stdout F  "-openshift-delegate-urls={\"/\": {\"namespace\":\"service-telemetry\", \"resource\": \"alertmanagers\", \"group\":\"monitoring.rhobs\", \"verb\":\"get\"}}" 2025-12-08T17:57:44.220576276+00:00 stdout F  ], 2025-12-08T17:57:44.220576276+00:00 stdout F  "image": "quay.io/openshift/origin-oauth-proxy:latest", 2025-12-08T17:57:44.220576276+00:00 stdout F  "name": "oauth-proxy", 2025-12-08T17:57:44.220576276+00:00 stdout F  "ports": [ 2025-12-08T17:57:44.220576276+00:00 stdout F  { 2025-12-08T17:57:44.220576276+00:00 stdout F  "containerPort": 9095, 2025-12-08T17:57:44.220576276+00:00 stdout F  "name": "https", 2025-12-08T17:57:44.220576276+00:00 stdout F  "protocol": "TCP" 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  ], 2025-12-08T17:57:44.220576276+00:00 stdout F  "volumeMounts": [ 2025-12-08T17:57:44.220576276+00:00 stdout F  { 2025-12-08T17:57:44.220576276+00:00 stdout F  "mountPath": "/etc/tls/private", 2025-12-08T17:57:44.220576276+00:00 stdout F  "name": "secret-default-alertmanager-proxy-tls" 2025-12-08T17:57:44.220576276+00:00 stdout F  }, 2025-12-08T17:57:44.220576276+00:00 stdout F  { 2025-12-08T17:57:44.220576276+00:00 stdout F  "mountPath": "/etc/proxy/secrets", 2025-12-08T17:57:44.220576276+00:00 stdout F  "name": "secret-default-session-secret" 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  ] 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  ], 2025-12-08T17:57:44.220576276+00:00 stdout F  "image": "quay.io/prometheus/alertmanager:latest", 2025-12-08T17:57:44.220576276+00:00 stdout F  "listenLocal": true, 2025-12-08T17:57:44.220576276+00:00 stdout F  "podMetadata": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "labels": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "alertmanager": "default" 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  }, 2025-12-08T17:57:44.220576276+00:00 stdout F  "replicas": 1, 2025-12-08T17:57:44.220576276+00:00 stdout F  "scrapeConfigSelector": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "matchLabels": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "app": "smart-gateway" 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  }, 2025-12-08T17:57:44.220576276+00:00 stdout F  "secrets": [ 2025-12-08T17:57:44.220576276+00:00 stdout F  "default-alertmanager-proxy-tls", 2025-12-08T17:57:44.220576276+00:00 stdout F  "default-session-secret" 2025-12-08T17:57:44.220576276+00:00 stdout F  ], 2025-12-08T17:57:44.220576276+00:00 stdout F  "serviceAccountName": "alertmanager-stf", 2025-12-08T17:57:44.220576276+00:00 stdout F  "storage": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "volumeClaimTemplate": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "spec": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "resources": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "requests": { 2025-12-08T17:57:44.220576276+00:00 stdout F  "storage": "20G" 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  }, 2025-12-08T17:57:44.220576276+00:00 stdout F  "storageClassName": "crc-csi-hostpath-provisioner" 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F  } 2025-12-08T17:57:44.220576276+00:00 stdout F } 2025-12-08T17:57:44.221278764+00:00 stdout F 2025-12-08T17:57:44.221278764+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:44.424154858+00:00 stdout F 2025-12-08T17:57:44.424154858+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:44.424154858+00:00 stdout F 2025-12-08T17:57:44.424154858+00:00 stdout F TASK [Lookup alertmanager configuration template] ******************************** 2025-12-08T17:57:44.424154858+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:44.424154858+00:00 stdout F  "msg": { 2025-12-08T17:57:44.424154858+00:00 stdout F  "apiVersion": "v1", 2025-12-08T17:57:44.424154858+00:00 stdout F  "kind": "Secret", 2025-12-08T17:57:44.424154858+00:00 stdout F  "metadata": { 2025-12-08T17:57:44.424154858+00:00 stdout F  "name": "alertmanager-default", 2025-12-08T17:57:44.424154858+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:57:44.424154858+00:00 stdout F  }, 2025-12-08T17:57:44.424154858+00:00 stdout F  "stringData": { 2025-12-08T17:57:44.424154858+00:00 stdout F  "alertmanager.yaml": "global:\n resolve_timeout: 5m\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 12h\n receiver: 'snmp_wh'\nreceivers:\n- name: 'snmp_wh'\n webhook_configs:\n - url: 'http://default-prometheus-webhook-snmp:9099'" 2025-12-08T17:57:44.424154858+00:00 stdout F  }, 2025-12-08T17:57:44.424154858+00:00 stdout F  "type": "Opaque" 2025-12-08T17:57:44.424154858+00:00 stdout F  } 2025-12-08T17:57:44.424154858+00:00 stdout F } 2025-12-08T17:57:44.424154858+00:00 stdout F 2025-12-08T17:57:44.424154858+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:44.424216179+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:44Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:44.490686848+00:00 stdout F 2025-12-08T17:57:44.490686848+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:44.490686848+00:00 stdout F 2025-12-08T17:57:44.490686848+00:00 stdout F TASK [servicetelemetry : Create an Alertmanager configuration secret] ********** 2025-12-08T17:57:44.490686848+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:19 2025-12-08T17:57:44.490686848+00:00 stdout F 2025-12-08T17:57:44.490686848+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:44.490753889+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:44Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an Alertmanager configuration secret"} 2025-12-08T17:57:45.289388211+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:45Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Secret, service-telemetry/alertmanager-default"} 2025-12-08T17:57:45.294817111+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:45Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:45.426540445+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:45Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an instance of Alertmanager"} 2025-12-08T17:57:45.427332676+00:00 stdout F 2025-12-08T17:57:45.427332676+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:45.427332676+00:00 stdout F 2025-12-08T17:57:45.427332676+00:00 stdout F TASK [servicetelemetry : Create an instance of Alertmanager] ******************* 2025-12-08T17:57:45.427332676+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:25 2025-12-08T17:57:45.427332676+00:00 stdout F 2025-12-08T17:57:45.427332676+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:46.246200620+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:46Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=Alertmanager, service-telemetry/default"} 2025-12-08T17:57:46.252491773+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:46Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:46.252676998+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:46Z","logger":"proxy","msg":"Watching child resource","kind":"monitoring.rhobs/v1, Kind=Alertmanager","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:46.252676998+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:46Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:46.411869511+00:00 stdout F 2025-12-08T17:57:46.411869511+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:46.411869511+00:00 stdout F 2025-12-08T17:57:46.411869511+00:00 stdout F TASK [servicetelemetry : Ensure no community Alertmanager is installed if not using community operator] *** 2025-12-08T17:57:46.411869511+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:31 2025-12-08T17:57:46.411869511+00:00 stdout F 2025-12-08T17:57:46.411869511+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:46.411957463+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:46Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Ensure no community Alertmanager is installed if not using community operator"} 2025-12-08T17:57:47.188515795+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:47Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Alertmanager, service-telemetry/default"} 2025-12-08T17:57:47.311601456+00:00 stdout F 2025-12-08T17:57:47.311601456+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:47.311601456+00:00 stdout F 2025-12-08T17:57:47.311601456+00:00 stdout F TASK [servicetelemetry : Ensure no rhobs Alertmanager is installed if not using it] *** 2025-12-08T17:57:47.311601456+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:41 2025-12-08T17:57:47.311601456+00:00 stdout F 2025-12-08T17:57:47.311601456+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:47.311672558+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:47Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Ensure no rhobs Alertmanager is installed if not using it"} 2025-12-08T17:57:47.338289075+00:00 stdout F 2025-12-08T17:57:47.338289075+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:47.338289075+00:00 stdout F 2025-12-08T17:57:47.338289075+00:00 stdout F TASK [servicetelemetry : Create SNMP traps instance] *************************** 2025-12-08T17:57:47.338289075+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:54 2025-12-08T17:57:47.338289075+00:00 stdout F 2025-12-08T17:57:47.338289075+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:47.338347257+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:47Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create SNMP traps instance"} 2025-12-08T17:57:47.505107307+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:47Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:47.505189830+00:00 stdout F 2025-12-08T17:57:47.505189830+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:47.505225400+00:00 stdout F 2025-12-08T17:57:47.505225400+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:57:47.505257741+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:47.505257741+00:00 stdout F  "msg": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "apiVersion": "apps/v1", 2025-12-08T17:57:47.505257741+00:00 stdout F  "kind": "Deployment", 2025-12-08T17:57:47.505257741+00:00 stdout F  "metadata": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "default-snmp-webhook", 2025-12-08T17:57:47.505257741+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  "spec": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "replicas": 1, 2025-12-08T17:57:47.505257741+00:00 stdout F  "selector": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "matchLabels": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "app": "default-snmp-webhook" 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  "template": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "metadata": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "labels": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "app": "default-snmp-webhook" 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  "spec": { 2025-12-08T17:57:47.505257741+00:00 stdout F  "containers": [ 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "env": [ 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "SNMP_COMMUNITY", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "public" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "SNMP_RETRIES", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "5" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "SNMP_HOST", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "192.168.24.254" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "SNMP_PORT", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "162" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "SNMP_TIMEOUT", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "1" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "ALERT_OID_LABEL", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "oid" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "TRAP_OID_PREFIX", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "1.3.6.1.4.1.50495.15" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "TRAP_DEFAULT_OID", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "1.3.6.1.4.1.50495.15.1.2.1" 2025-12-08T17:57:47.505257741+00:00 stdout F  }, 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "TRAP_DEFAULT_SEVERITY", 2025-12-08T17:57:47.505257741+00:00 stdout F  "value": "" 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  ], 2025-12-08T17:57:47.505257741+00:00 stdout F  "image": "quay.io/infrawatch/prometheus-webhook-snmp:latest", 2025-12-08T17:57:47.505257741+00:00 stdout F  "name": "prometheus-webhook-snmp", 2025-12-08T17:57:47.505257741+00:00 stdout F  "ports": [ 2025-12-08T17:57:47.505257741+00:00 stdout F  { 2025-12-08T17:57:47.505257741+00:00 stdout F  "containerPort": 9099 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  ] 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  ] 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F  } 2025-12-08T17:57:47.505257741+00:00 stdout F } 2025-12-08T17:57:47.505338193+00:00 stdout F 2025-12-08T17:57:47.505338193+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:47.573332000+00:00 stdout F 2025-12-08T17:57:47.573332000+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:47.573332000+00:00 stdout F 2025-12-08T17:57:47.573332000+00:00 stdout F TASK [servicetelemetry : Create an instance of snmp webhook] ******************* 2025-12-08T17:57:47.573332000+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_snmp_traps.yml:10 2025-12-08T17:57:47.573332000+00:00 stdout F 2025-12-08T17:57:47.573332000+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:47.573385772+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:47Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an instance of snmp webhook"} 2025-12-08T17:57:48.406383141+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:48Z","logger":"proxy","msg":"Cache miss: apps/v1, Kind=Deployment, service-telemetry/default-snmp-webhook"} 2025-12-08T17:57:48.411078612+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:48Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:48.411579955+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:48Z","logger":"proxy","msg":"Watching child resource","kind":"apps/v1, Kind=Deployment","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:48.411674608+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:48Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:48.597418179+00:00 stdout F 2025-12-08T17:57:48.597418179+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:48.597418179+00:00 stdout F 2025-12-08T17:57:48.597418179+00:00 stdout F TASK [servicetelemetry : Create an instance of snmp webhook service] *********** 2025-12-08T17:57:48.597418179+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_snmp_traps.yml:20 2025-12-08T17:57:48.597418179+00:00 stdout F 2025-12-08T17:57:48.597418179+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:48.597457880+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:48Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an instance of snmp webhook service"} 2025-12-08T17:57:49.266240054+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:49Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Service, service-telemetry/default-prometheus-webhook-snmp"} 2025-12-08T17:57:49.270144446+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:49Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:49.420805430+00:00 stdout F 2025-12-08T17:57:49.420805430+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:49.420805430+00:00 stdout F 2025-12-08T17:57:49.420805430+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/alertmanager-stf with oauth redirect annotation] *** 2025-12-08T17:57:49.420805430+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:70 2025-12-08T17:57:49.420805430+00:00 stdout F 2025-12-08T17:57:49.420805430+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:49.420838991+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:49Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ServiceAccount/alertmanager-stf with oauth redirect annotation"} 2025-12-08T17:57:50.122074364+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:50Z","logger":"proxy","msg":"Cache miss: /v1, Kind=ServiceAccount, service-telemetry/alertmanager-stf"} 2025-12-08T17:57:50.131360004+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:50Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:50.264341712+00:00 stdout F 2025-12-08T17:57:50.264341712+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:50.264341712+00:00 stdout F 2025-12-08T17:57:50.264341712+00:00 stdout F TASK [servicetelemetry : Create ClusterRole/alertmanager-stf] ****************** 2025-12-08T17:57:50.264341712+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:81 2025-12-08T17:57:50.264341712+00:00 stdout F 2025-12-08T17:57:50.264341712+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:50.264376363+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:50Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ClusterRole/alertmanager-stf"} 2025-12-08T17:57:50.960378741+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:50Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:51.130106708+00:00 stdout F 2025-12-08T17:57:51.130106708+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:51.130106708+00:00 stdout F 2025-12-08T17:57:51.130106708+00:00 stdout F TASK [servicetelemetry : Create ClusterRoleBinding/alertmanager-stf] *********** 2025-12-08T17:57:51.130106708+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:120 2025-12-08T17:57:51.130106708+00:00 stdout F 2025-12-08T17:57:51.130106708+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:51.130139149+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:51Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ClusterRoleBinding/alertmanager-stf"} 2025-12-08T17:57:52.138592923+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:52Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:52.278220082+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:52Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create Role/alertmanager-stf"} 2025-12-08T17:57:52.278311424+00:00 stdout F 2025-12-08T17:57:52.278311424+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:52.278668143+00:00 stdout F 2025-12-08T17:57:52.278668143+00:00 stdout F TASK [servicetelemetry : Create Role/alertmanager-stf] ************************* 2025-12-08T17:57:52.278668143+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:139 2025-12-08T17:57:52.278691484+00:00 stdout F 2025-12-08T17:57:52.278691484+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:53.415227657+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:53Z","logger":"proxy","msg":"Cache miss: rbac.authorization.k8s.io/v1, Kind=Role, service-telemetry/alertmanager-stf"} 2025-12-08T17:57:53.419291593+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:53Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:53.576588978+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:53Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create RoleBinding/alertmanager-stf"} 2025-12-08T17:57:53.576647760+00:00 stdout F 2025-12-08T17:57:53.576647760+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:53.576672130+00:00 stdout F 2025-12-08T17:57:53.576672130+00:00 stdout F TASK [servicetelemetry : Create RoleBinding/alertmanager-stf] ****************** 2025-12-08T17:57:53.576672130+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:177 2025-12-08T17:57:53.576700581+00:00 stdout F 2025-12-08T17:57:53.576700581+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:54.824049570+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:54Z","logger":"proxy","msg":"Cache miss: rbac.authorization.k8s.io/v1, Kind=RoleBinding, service-telemetry/alertmanager-stf"} 2025-12-08T17:57:54.828773302+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:54Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:55.058347206+00:00 stdout F 2025-12-08T17:57:55.058347206+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:55.058347206+00:00 stdout F 2025-12-08T17:57:55.058347206+00:00 stdout F TASK [servicetelemetry : Create service to access the Alertmanager proxy] ****** 2025-12-08T17:57:55.058347206+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:202 2025-12-08T17:57:55.058347206+00:00 stdout F 2025-12-08T17:57:55.058347206+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:55.058402137+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:55Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create service to access the Alertmanager proxy"} 2025-12-08T17:57:55.778429067+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:55Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Service, service-telemetry/default-alertmanager-proxy"} 2025-12-08T17:57:55.783203420+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:55Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:55.978367454+00:00 stdout F 2025-12-08T17:57:55.978367454+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:55.978367454+00:00 stdout F 2025-12-08T17:57:55.978367454+00:00 stdout F TASK [servicetelemetry : Create route to access the Alertmanager proxy] ******** 2025-12-08T17:57:55.978367454+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:213 2025-12-08T17:57:55.978367454+00:00 stdout F 2025-12-08T17:57:55.978367454+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:55.978411325+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:55Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create route to access the Alertmanager proxy"} 2025-12-08T17:57:56.681233470+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"proxy","msg":"Cache miss: route.openshift.io/v1, Kind=Route, service-telemetry/default-alertmanager-proxy"} 2025-12-08T17:57:56.687960414+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:56.812561274+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Setup Certificates for ElasticSearch"} 2025-12-08T17:57:56.812597325+00:00 stdout F 2025-12-08T17:57:56.812597325+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:56.812597325+00:00 stdout F 2025-12-08T17:57:56.812597325+00:00 stdout F TASK [servicetelemetry : Setup Certificates for ElasticSearch] ***************** 2025-12-08T17:57:56.812597325+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:67 2025-12-08T17:57:56.812597325+00:00 stdout F 2025-12-08T17:57:56.812597325+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:56.841914934+00:00 stdout F 2025-12-08T17:57:56.841914934+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:56.841914934+00:00 stdout F 2025-12-08T17:57:56.841914934+00:00 stdout F TASK [servicetelemetry : Setup ElasticSearch] ********************************** 2025-12-08T17:57:56.841914934+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:70 2025-12-08T17:57:56.841914934+00:00 stdout F 2025-12-08T17:57:56.841914934+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:56.841947854+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Setup ElasticSearch"} 2025-12-08T17:57:56.903595838+00:00 stdout F 2025-12-08T17:57:56.903595838+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:56.903595838+00:00 stdout F 2025-12-08T17:57:56.903595838+00:00 stdout F TASK [Get data about clouds] ******************************** 2025-12-08T17:57:56.903595838+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:56.903595838+00:00 stdout F  "servicetelemetry_vars.clouds": [ 2025-12-08T17:57:56.903595838+00:00 stdout F  { 2025-12-08T17:57:56.903595838+00:00 stdout F  "events": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "collectors": [ 2025-12-08T17:57:56.903595838+00:00 stdout F  { 2025-12-08T17:57:56.903595838+00:00 stdout F  "bridge": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:57:56.903595838+00:00 stdout F  "verbose": false 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "collector_type": "collectd", 2025-12-08T17:57:56.903595838+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:57:56.903595838+00:00 stdout F  "subscription_address": "collectd/cloud1-notify" 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  { 2025-12-08T17:57:56.903595838+00:00 stdout F  "bridge": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:57:56.903595838+00:00 stdout F  "verbose": false 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "collector_type": "ceilometer", 2025-12-08T17:57:56.903595838+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:57:56.903595838+00:00 stdout F  "subscription_address": "anycast/ceilometer/cloud1-event.sample" 2025-12-08T17:57:56.903595838+00:00 stdout F  } 2025-12-08T17:57:56.903595838+00:00 stdout F  ] 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "metrics": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "collectors": [ 2025-12-08T17:57:56.903595838+00:00 stdout F  { 2025-12-08T17:57:56.903595838+00:00 stdout F  "bridge": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:57:56.903595838+00:00 stdout F  "verbose": false 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "collector_type": "collectd", 2025-12-08T17:57:56.903595838+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:57:56.903595838+00:00 stdout F  "subscription_address": "collectd/cloud1-telemetry" 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  { 2025-12-08T17:57:56.903595838+00:00 stdout F  "bridge": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:57:56.903595838+00:00 stdout F  "verbose": false 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "collector_type": "ceilometer", 2025-12-08T17:57:56.903595838+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:57:56.903595838+00:00 stdout F  "subscription_address": "anycast/ceilometer/cloud1-metering.sample" 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  { 2025-12-08T17:57:56.903595838+00:00 stdout F  "bridge": { 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:57:56.903595838+00:00 stdout F  "ring_buffer_size": 65535, 2025-12-08T17:57:56.903595838+00:00 stdout F  "verbose": false 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "collector_type": "sensubility", 2025-12-08T17:57:56.903595838+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:57:56.903595838+00:00 stdout F  "subscription_address": "sensubility/cloud1-telemetry" 2025-12-08T17:57:56.903595838+00:00 stdout F  } 2025-12-08T17:57:56.903595838+00:00 stdout F  ] 2025-12-08T17:57:56.903595838+00:00 stdout F  }, 2025-12-08T17:57:56.903595838+00:00 stdout F  "name": "cloud1" 2025-12-08T17:57:56.903595838+00:00 stdout F  } 2025-12-08T17:57:56.903595838+00:00 stdout F  ] 2025-12-08T17:57:56.903595838+00:00 stdout F } 2025-12-08T17:57:56.903595838+00:00 stdout F 2025-12-08T17:57:56.903595838+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:56.903677500+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:56.905896286+00:00 stdout F 2025-12-08T17:57:56.905896286+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:56.905896286+00:00 stdout F 2025-12-08T17:57:56.905896286+00:00 stdout F TASK [servicetelemetry : Loop through cloud instances to setup transport receivers] *** 2025-12-08T17:57:56.905896286+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:83 2025-12-08T17:57:56.905896286+00:00 stdout F 2025-12-08T17:57:56.905896286+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:56.905917337+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Loop through cloud instances to setup transport receivers"} 2025-12-08T17:57:56.989820475+00:00 stdout F 2025-12-08T17:57:56.989820475+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:56.989820475+00:00 stdout F 2025-12-08T17:57:56.989820475+00:00 stdout F TASK [Cloud collector setup] ******************************** 2025-12-08T17:57:56.989820475+00:00 stdout F ok: [localhost] => { 2025-12-08T17:57:56.989820475+00:00 stdout F  "msg": "Working on cloud {'name': 'cloud1', 'metrics': {'collectors': [{'collector_type': 'collectd', 'subscription_address': 'collectd/cloud1-telemetry', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'ceilometer', 'subscription_address': 'anycast/ceilometer/cloud1-metering.sample', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'sensubility', 'subscription_address': 'sensubility/cloud1-telemetry', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 65535, 'ring_buffer_count': 15000, 'verbose': False}}]}, 'events': {'collectors': [{'collector_type': 'collectd', 'subscription_address': 'collectd/cloud1-notify', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'ceilometer', 'subscription_address': 'anycast/ceilometer/cloud1-event.sample', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}]}} to setup metrics and events Smart Gateways\n" 2025-12-08T17:57:56.989820475+00:00 stdout F } 2025-12-08T17:57:56.989820475+00:00 stdout F 2025-12-08T17:57:56.989820475+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:56.989963939+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"894904700568716324","EventData.TaskArgs":""} 2025-12-08T17:57:56.992311640+00:00 stdout F 2025-12-08T17:57:56.992311640+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:56.992311640+00:00 stdout F 2025-12-08T17:57:56.992311640+00:00 stdout F TASK [servicetelemetry : Deploy Metrics Smart Gateway instance for each collector] *** 2025-12-08T17:57:56.992311640+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:6 2025-12-08T17:57:56.992311640+00:00 stdout F 2025-12-08T17:57:56.992311640+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:56.992329560+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:56Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy Metrics Smart Gateway instance for each collector"} 2025-12-08T17:57:57.073588721+00:00 stdout F 2025-12-08T17:57:57.073588721+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:57.073588721+00:00 stdout F 2025-12-08T17:57:57.073588721+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:57:57.073588721+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:57:57.073588721+00:00 stdout F 2025-12-08T17:57:57.073588721+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:57.073624761+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:57:57.763269307+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","logger":"proxy","msg":"Cache miss: smartgateway.infra.watch/v2, Kind=SmartGateway, service-telemetry/default-cloud1-coll-meter"} 2025-12-08T17:57:57.767782773+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:57.767782773+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","logger":"proxy","msg":"Watching child resource","kind":"smartgateway.infra.watch/v2, Kind=SmartGateway","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:57.767782773+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:57.872939461+00:00 stdout F 2025-12-08T17:57:57.872939461+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:57.872939461+00:00 stdout F 2025-12-08T17:57:57.872939461+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:57:57.872939461+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:57:57.872939461+00:00 stdout F 2025-12-08T17:57:57.872939461+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:57.872967192+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:57:57.903099710+00:00 stdout F 2025-12-08T17:57:57.903099710+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:57.903099710+00:00 stdout F 2025-12-08T17:57:57.903099710+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T17:57:57.903099710+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T17:57:57.903099710+00:00 stdout F 2025-12-08T17:57:57.903099710+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:57.903127721+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:57Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T17:57:58.666934422+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:58Z","logger":"proxy","msg":"Cache miss: /v1, Kind=Secret, service-telemetry/prometheus-stf-token"} 2025-12-08T17:57:58.672106906+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:58Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:58.866318465+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:58Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T17:57:58.866380077+00:00 stdout F 2025-12-08T17:57:58.866380077+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:58.866400337+00:00 stdout F 2025-12-08T17:57:58.866400337+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T17:57:58.866400337+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T17:57:58.866418998+00:00 stdout F 2025-12-08T17:57:58.866418998+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:59.697433836+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1alpha1, Kind=ScrapeConfig, service-telemetry/default-cloud1-coll-meter"} 2025-12-08T17:57:59.703429651+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:57:59.703698758+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","logger":"proxy","msg":"Watching child resource","kind":"monitoring.rhobs/v1alpha1, Kind=ScrapeConfig","enqueue_kind":"infra.watch/v1beta1, Kind=ServiceTelemetry"} 2025-12-08T17:57:59.703740669+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","msg":"Starting EventSource","controller":"servicetelemetry-controller","source":"kind source: *unstructured.Unstructured"} 2025-12-08T17:57:59.952360235+00:00 stdout F 2025-12-08T17:57:59.952360235+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:59.952360235+00:00 stdout F 2025-12-08T17:57:59.952360235+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T17:57:59.952360235+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T17:57:59.952360235+00:00 stdout F 2025-12-08T17:57:59.952360235+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:59.952437757+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T17:57:59.968235626+00:00 stdout F 2025-12-08T17:57:59.968235626+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:59.968235626+00:00 stdout F 2025-12-08T17:57:59.968235626+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T17:57:59.968235626+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T17:57:59.968235626+00:00 stdout F 2025-12-08T17:57:59.968235626+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:59.968272916+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T17:57:59.974154288+00:00 stdout F 2025-12-08T17:57:59.974154288+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:57:59.974154288+00:00 stdout F 2025-12-08T17:57:59.974154288+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T17:57:59.974154288+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T17:57:59.974154288+00:00 stdout F 2025-12-08T17:57:59.974154288+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:57:59.974187859+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:59Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T17:58:00.820837321+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:00Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-coll-meter"} 2025-12-08T17:58:00.956127178+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:00Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:58:00.956195999+00:00 stdout F 2025-12-08T17:58:00.956195999+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:00.956217560+00:00 stdout F 2025-12-08T17:58:00.956217560+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:58:00.956217560+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:58:00.956236180+00:00 stdout F 2025-12-08T17:58:00.956236180+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:01.654494758+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:01Z","logger":"proxy","msg":"Cache miss: smartgateway.infra.watch/v2, Kind=SmartGateway, service-telemetry/default-cloud1-ceil-meter"} 2025-12-08T17:58:01.663393658+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:01Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:58:01.775999778+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:58:01.776633184+00:00 stdout F 2025-12-08T17:58:01.776633184+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:01.776682945+00:00 stdout F 2025-12-08T17:58:01.776682945+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:58:01.776682945+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:58:01.776735017+00:00 stdout F 2025-12-08T17:58:01.776735017+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:01.812809729+00:00 stdout F 2025-12-08T17:58:01.812809729+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:01.812809729+00:00 stdout F 2025-12-08T17:58:01.812809729+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T17:58:01.812809729+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T17:58:01.812809729+00:00 stdout F 2025-12-08T17:58:01.812809729+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:01.812857300+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T17:58:02.630356509+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:02Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T17:58:02.779043032+00:00 stdout F 2025-12-08T17:58:02.779043032+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:02.779043032+00:00 stdout F 2025-12-08T17:58:02.779043032+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T17:58:02.779043032+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T17:58:02.779043032+00:00 stdout F 2025-12-08T17:58:02.779043032+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:02.779082593+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:02Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T17:58:03.514292175+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:03Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1alpha1, Kind=ScrapeConfig, service-telemetry/default-cloud1-ceil-meter"} 2025-12-08T17:58:03.518058693+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:03Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:58:03.636276688+00:00 stdout F 2025-12-08T17:58:03.636276688+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:03.636276688+00:00 stdout F 2025-12-08T17:58:03.636276688+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T17:58:03.636276688+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T17:58:03.636276688+00:00 stdout F 2025-12-08T17:58:03.636276688+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:03.636310289+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T17:58:03.676005385+00:00 stdout F 2025-12-08T17:58:03.676005385+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:03.676005385+00:00 stdout F 2025-12-08T17:58:03.676005385+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T17:58:03.676005385+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T17:58:03.676005385+00:00 stdout F 2025-12-08T17:58:03.676005385+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:03.676042976+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T17:58:03.715427314+00:00 stdout F 2025-12-08T17:58:03.715427314+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:03.715427314+00:00 stdout F 2025-12-08T17:58:03.715427314+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T17:58:03.715427314+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T17:58:03.715427314+00:00 stdout F 2025-12-08T17:58:03.715427314+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:03.715512406+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T17:58:04.519427053+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:04Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-ceil-meter"} 2025-12-08T17:58:04.679279955+00:00 stdout F 2025-12-08T17:58:04.679279955+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:04.679279955+00:00 stdout F 2025-12-08T17:58:04.679279955+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:58:04.679279955+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:58:04.679279955+00:00 stdout F 2025-12-08T17:58:04.679279955+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:04.679311796+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:04Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:58:05.444957505+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:05Z","logger":"proxy","msg":"Cache miss: smartgateway.infra.watch/v2, Kind=SmartGateway, service-telemetry/default-cloud1-sens-meter"} 2025-12-08T17:58:05.449681956+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:05Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:58:05.556932199+00:00 stdout F 2025-12-08T17:58:05.556932199+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:05.556932199+00:00 stdout F 2025-12-08T17:58:05.556932199+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:58:05.556932199+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:58:05.556932199+00:00 stdout F 2025-12-08T17:58:05.556932199+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:05.556961600+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:05Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:58:05.601455120+00:00 stdout F 2025-12-08T17:58:05.601455120+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:05.601455120+00:00 stdout F 2025-12-08T17:58:05.601455120+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T17:58:05.601455120+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T17:58:05.601455120+00:00 stdout F 2025-12-08T17:58:05.601455120+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:05.601545142+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:05Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T17:58:06.353639010+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:06Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T17:58:06.532616046+00:00 stdout F 2025-12-08T17:58:06.532616046+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:06.532616046+00:00 stdout F 2025-12-08T17:58:06.532616046+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T17:58:06.532616046+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T17:58:06.532616046+00:00 stdout F 2025-12-08T17:58:06.532616046+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:06.532651527+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:06Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T17:58:07.307886824+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:07Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1alpha1, Kind=ScrapeConfig, service-telemetry/default-cloud1-sens-meter"} 2025-12-08T17:58:07.312241006+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:07Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:58:07.473340560+00:00 stdout F 2025-12-08T17:58:07.473340560+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:07.473340560+00:00 stdout F 2025-12-08T17:58:07.473340560+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T17:58:07.473340560+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T17:58:07.473340560+00:00 stdout F 2025-12-08T17:58:07.473340560+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:07.473370021+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T17:58:07.497921044+00:00 stdout F 2025-12-08T17:58:07.497921044+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:07.497921044+00:00 stdout F 2025-12-08T17:58:07.497921044+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T17:58:07.497921044+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T17:58:07.497921044+00:00 stdout F 2025-12-08T17:58:07.497921044+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:07.497952145+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T17:58:07.526924835+00:00 stdout F 2025-12-08T17:58:07.526924835+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:07.526924835+00:00 stdout F 2025-12-08T17:58:07.526924835+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T17:58:07.526924835+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T17:58:07.526924835+00:00 stdout F 2025-12-08T17:58:07.526924835+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:07.526956065+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T17:58:08.345956083+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:08Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-sens-meter"} 2025-12-08T17:58:08.461083128+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Lookup Elasticsearch BasicAuth"} 2025-12-08T17:58:08.461136079+00:00 stdout F 2025-12-08T17:58:08.461136079+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:08.461161110+00:00 stdout F 2025-12-08T17:58:08.461161110+00:00 stdout F TASK [servicetelemetry : Lookup Elasticsearch BasicAuth] *********************** 2025-12-08T17:58:08.461161110+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:24 2025-12-08T17:58:08.461183401+00:00 stdout F 2025-12-08T17:58:08.461183401+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:09.224532030+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:09Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-elastic-user","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-elastic-user","Parts":["secrets","elasticsearch-es-elastic-user"]}} 2025-12-08T17:58:09.607085247+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:09Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Get the Elasticsearch TLS materials secret"} 2025-12-08T17:58:09.607189760+00:00 stdout F 2025-12-08T17:58:09.607189760+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:09.607218481+00:00 stdout F 2025-12-08T17:58:09.607218481+00:00 stdout F TASK [servicetelemetry : Get the Elasticsearch TLS materials secret] *********** 2025-12-08T17:58:09.607218481+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:52 2025-12-08T17:58:09.607252952+00:00 stdout F 2025-12-08T17:58:09.607252952+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:10.295848419+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:10Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-cert","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-cert","Parts":["secrets","elasticsearch-es-cert"]}} 2025-12-08T17:58:10.422578054+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Load dummy certs"} 2025-12-08T17:58:10.422645267+00:00 stdout F 2025-12-08T17:58:10.422645267+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:10.422670467+00:00 stdout F 2025-12-08T17:58:10.422670467+00:00 stdout F TASK [servicetelemetry : Load dummy certs] ************************************* 2025-12-08T17:58:10.422670467+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:62 2025-12-08T17:58:10.422707418+00:00 stdout F 2025-12-08T17:58:10.422707418+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:10.512339435+00:00 stdout F 2025-12-08T17:58:10.512339435+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:10.512339435+00:00 stdout F 2025-12-08T17:58:10.512339435+00:00 stdout F TASK [servicetelemetry : Augment the secret with dummy TLS cert/key if no TLS user auth material provided] *** 2025-12-08T17:58:10.512339435+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:66 2025-12-08T17:58:10.512339435+00:00 stdout F 2025-12-08T17:58:10.512339435+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:10.512372916+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Augment the secret with dummy TLS cert/key if no TLS user auth material provided"} 2025-12-08T17:58:11.295528616+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:11Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-cert","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-cert","Parts":["secrets","elasticsearch-es-cert"]}} 2025-12-08T17:58:11.426147403+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy Events Smart Gateway instance for each collector"} 2025-12-08T17:58:11.426203924+00:00 stdout F 2025-12-08T17:58:11.426203924+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:11.426230775+00:00 stdout F 2025-12-08T17:58:11.426230775+00:00 stdout F TASK [servicetelemetry : Deploy Events Smart Gateway instance for each collector] *** 2025-12-08T17:58:11.426230775+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:78 2025-12-08T17:58:11.426254346+00:00 stdout F 2025-12-08T17:58:11.426254346+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:11.624175240+00:00 stdout F 2025-12-08T17:58:11.624175240+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:11.624175240+00:00 stdout F 2025-12-08T17:58:11.624175240+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:58:11.624175240+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:58:11.624175240+00:00 stdout F 2025-12-08T17:58:11.624175240+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:11.624219081+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:58:12.387112370+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12Z","logger":"proxy","msg":"Cache miss: smartgateway.infra.watch/v2, Kind=SmartGateway, service-telemetry/default-cloud1-coll-event"} 2025-12-08T17:58:12.389956273+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:58:12.520404564+00:00 stdout F 2025-12-08T17:58:12.520404564+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:12.520404564+00:00 stdout F 2025-12-08T17:58:12.520404564+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:58:12.520404564+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:58:12.520404564+00:00 stdout F 2025-12-08T17:58:12.520404564+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:12.520433995+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:58:12.625558912+00:00 stdout F 2025-12-08T17:58:12.625558912+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:12.625558912+00:00 stdout F 2025-12-08T17:58:12.625558912+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:58:12.625558912+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:58:12.625558912+00:00 stdout F 2025-12-08T17:58:12.625558912+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:12.625583823+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:12Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:58:13.514932008+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:13Z","logger":"proxy","msg":"Cache miss: smartgateway.infra.watch/v2, Kind=SmartGateway, service-telemetry/default-cloud1-ceil-event"} 2025-12-08T17:58:13.521197591+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:13Z","logger":"proxy","msg":"Injecting owner reference"} 2025-12-08T17:58:13.649005954+00:00 stdout F 2025-12-08T17:58:13.649005954+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:13.649005954+00:00 stdout F 2025-12-08T17:58:13.649005954+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:58:13.649005954+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:58:13.649005954+00:00 stdout F 2025-12-08T17:58:13.649005954+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:13.649044775+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:58:13.745890688+00:00 stdout F 2025-12-08T17:58:13.745890688+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:13.745890688+00:00 stdout F 2025-12-08T17:58:13.745890688+00:00 stdout F TASK [servicetelemetry : Start graphing component plays] *********************** 2025-12-08T17:58:13.745890688+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:98 2025-12-08T17:58:13.745890688+00:00 stdout F 2025-12-08T17:58:13.745890688+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:13.745924159+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Start graphing component plays"} 2025-12-08T17:58:13.762355164+00:00 stdout F 2025-12-08T17:58:13.762355164+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:13.762355164+00:00 stdout F 2025-12-08T17:58:13.762355164+00:00 stdout F TASK [servicetelemetry : Post-setup] ******************************************* 2025-12-08T17:58:13.762355164+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:104 2025-12-08T17:58:13.762355164+00:00 stdout F 2025-12-08T17:58:13.762355164+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:13.762380914+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Post-setup"} 2025-12-08T17:58:13.888160985+00:00 stdout F 2025-12-08T17:58:13.888160985+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:13.888160985+00:00 stdout F 2025-12-08T17:58:13.888160985+00:00 stdout F TASK [servicetelemetry : Remove unlisted Smart Gateway] ************************ 2025-12-08T17:58:13.888160985+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/post.yml:20 2025-12-08T17:58:13.888160985+00:00 stdout F 2025-12-08T17:58:13.888160985+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:13.888234767+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"894904700568716324","EventData.Name":"servicetelemetry : Remove unlisted Smart Gateway"} 2025-12-08T17:58:14.242830711+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:14Z","logger":"runner","msg":"Ansible-runner exited successfully","job":"894904700568716324","name":"default","namespace":"service-telemetry"} 2025-12-08T17:58:14.243213051+00:00 stdout F 2025-12-08T17:58:14.243213051+00:00 stdout F ----- Ansible Task Status Event StdOut (infra.watch/v1beta1, Kind=ServiceTelemetry, default/service-telemetry) ----- 2025-12-08T17:58:14.243213051+00:00 stdout F 2025-12-08T17:58:14.243213051+00:00 stdout F 2025-12-08T17:58:14.243213051+00:00 stdout F PLAY RECAP ********************************************************************* 2025-12-08T17:58:14.243213051+00:00 stdout F localhost : ok=127  changed=45  unreachable=0 failed=0 skipped=36  rescued=0 ignored=0 2025-12-08T17:58:14.243213051+00:00 stdout F 2025-12-08T17:58:14.243213051+00:00 stdout F ---------- 2025-12-08T17:58:14.257459839+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:14Z","logger":"KubeAPIWarningLogger","msg":"unknown field \"status.conditions[0].ansibleResult\""} 2025-12-08T17:58:14.257459839+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:14Z","logger":"KubeAPIWarningLogger","msg":"unknown field \"status.conditions[1].message\""} 2025-12-08T17:58:14.257459839+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:14Z","logger":"KubeAPIWarningLogger","msg":"unknown field \"status.conditions[2].message\""} 2025-12-08T17:58:15.089360490+00:00 stdout F 2025-12-08T17:58:15.089360490+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:15.089360490+00:00 stdout F 2025-12-08T17:58:15.089360490+00:00 stdout F TASK [Installing service telemetry] ******************************** 2025-12-08T17:58:15.089360490+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:15.089360490+00:00 stdout F  "msg": "INSTALLING SERVICE TELEMETRY" 2025-12-08T17:58:15.089360490+00:00 stdout F } 2025-12-08T17:58:15.089360490+00:00 stdout F 2025-12-08T17:58:15.089360490+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:15.089463293+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:15Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:15.092391819+00:00 stdout F 2025-12-08T17:58:15.092391819+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:15.092391819+00:00 stdout F 2025-12-08T17:58:15.092391819+00:00 stdout F TASK [servicetelemetry : Pre-setup] ******************************************** 2025-12-08T17:58:15.092391819+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:7 2025-12-08T17:58:15.092391819+00:00 stdout F 2025-12-08T17:58:15.092391819+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:15.092414999+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Pre-setup"} 2025-12-08T17:58:15.166735391+00:00 stdout F 2025-12-08T17:58:15.166735391+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:15.166735391+00:00 stdout F 2025-12-08T17:58:15.166735391+00:00 stdout F TASK [servicetelemetry : Clear the fact cache before looking up cluster information] *** 2025-12-08T17:58:15.166735391+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:1 2025-12-08T17:58:15.166735391+00:00 stdout F 2025-12-08T17:58:15.166735391+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:15.166771061+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Clear the fact cache before looking up cluster information"} 2025-12-08T17:58:15.638577155+00:00 stdout F 2025-12-08T17:58:15.638577155+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:15.638577155+00:00 stdout F 2025-12-08T17:58:15.638577155+00:00 stdout F TASK [Show existing API groups available to us] ******************************** 2025-12-08T17:58:15.638577155+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:15.638577155+00:00 stdout F  "api_groups": [ 2025-12-08T17:58:15.638577155+00:00 stdout F  "", 2025-12-08T17:58:15.638577155+00:00 stdout F  "apiregistration.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "apps", 2025-12-08T17:58:15.638577155+00:00 stdout F  "events.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "authentication.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "authorization.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "autoscaling", 2025-12-08T17:58:15.638577155+00:00 stdout F  "batch", 2025-12-08T17:58:15.638577155+00:00 stdout F  "certificates.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "networking.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "policy", 2025-12-08T17:58:15.638577155+00:00 stdout F  "rbac.authorization.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "storage.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "admissionregistration.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "apiextensions.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "scheduling.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "coordination.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "node.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "discovery.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "flowcontrol.apiserver.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "apps.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "authorization.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "build.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "image.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "oauth.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "project.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "quota.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "route.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "security.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "template.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "user.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "packages.operators.coreos.com", 2025-12-08T17:58:15.638577155+00:00 stdout F  "config.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "operator.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "acme.cert-manager.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "apiserver.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "apm.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "autoscaling.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "cert-manager.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "console.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "elasticsearch.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "enterprisesearch.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "gateway.networking.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "imageregistry.operator.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "ingress.operator.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "k8s.cni.cncf.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "k8s.ovn.org", 2025-12-08T17:58:15.638577155+00:00 stdout F  "kibana.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "machine.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "machineconfiguration.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "monitoring.coreos.com", 2025-12-08T17:58:15.638577155+00:00 stdout F  "monitoring.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "monitoring.rhobs", 2025-12-08T17:58:15.638577155+00:00 stdout F  "network.operator.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "operators.coreos.com", 2025-12-08T17:58:15.638577155+00:00 stdout F  "samples.operator.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "security.internal.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "agent.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "autoscaling.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "controlplane.operator.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "interconnectedcloud.github.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "ipam.cluster.x-k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "logstash.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "maps.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "migration.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "observability.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "perses.dev", 2025-12-08T17:58:15.638577155+00:00 stdout F  "policy.networking.k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "stackconfigpolicy.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "whereabouts.cni.cncf.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "infrastructure.cluster.x-k8s.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "beat.k8s.elastic.co", 2025-12-08T17:58:15.638577155+00:00 stdout F  "helm.openshift.io", 2025-12-08T17:58:15.638577155+00:00 stdout F  "infra.watch", 2025-12-08T17:58:15.638577155+00:00 stdout F  "smartgateway.infra.watch" 2025-12-08T17:58:15.638577155+00:00 stdout F  ] 2025-12-08T17:58:15.638577155+00:00 stdout F } 2025-12-08T17:58:15.638577155+00:00 stdout F 2025-12-08T17:58:15.638577155+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:15.638656627+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:15Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:15.700183418+00:00 stdout F 2025-12-08T17:58:15.700183418+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:15.700183418+00:00 stdout F 2025-12-08T17:58:15.700183418+00:00 stdout F TASK [Indicate what kind of cluster we are in (OpenShift or Kubernetes).] ******************************** 2025-12-08T17:58:15.700183418+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:15.700183418+00:00 stdout F  "msg": "CLUSTER TYPE: is_openshift=True; is_k8s=False" 2025-12-08T17:58:15.700183418+00:00 stdout F } 2025-12-08T17:58:15.700183418+00:00 stdout F 2025-12-08T17:58:15.700183418+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:15.700219688+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:15Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:15.701959344+00:00 stdout F 2025-12-08T17:58:15.701959344+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:15.701959344+00:00 stdout F 2025-12-08T17:58:15.701959344+00:00 stdout F TASK [servicetelemetry : Fail when can't determine type of cluster] ************ 2025-12-08T17:58:15.701959344+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:27 2025-12-08T17:58:15.701959344+00:00 stdout F 2025-12-08T17:58:15.701959344+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:15.701972564+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Fail when can't determine type of cluster"} 2025-12-08T17:58:16.262053769+00:00 stdout F 2025-12-08T17:58:16.262053769+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:16.262053769+00:00 stdout F 2025-12-08T17:58:16.262053769+00:00 stdout F TASK [Print some debug information] ******************************** 2025-12-08T17:58:16.262053769+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:16.262053769+00:00 stdout F  "msg": [ 2025-12-08T17:58:16.262053769+00:00 stdout F  "ServiceTelemetry Variables", 2025-12-08T17:58:16.262053769+00:00 stdout F  "--------------------------------------------", 2025-12-08T17:58:16.262053769+00:00 stdout F  "alerting:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " alertmanager:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " receivers:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " snmp_traps:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " alert_oid_label: oid", 2025-12-08T17:58:16.262053769+00:00 stdout F  " community: public", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " port: 162", 2025-12-08T17:58:16.262053769+00:00 stdout F  " retries: 5", 2025-12-08T17:58:16.262053769+00:00 stdout F  " target: 192.168.24.254", 2025-12-08T17:58:16.262053769+00:00 stdout F  " timeout: 1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " trap_default_oid: 1.3.6.1.4.1.50495.15.1.2.1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " trap_default_severity: ''", 2025-12-08T17:58:16.262053769+00:00 stdout F  " trap_oid_prefix: 1.3.6.1.4.1.50495.15", 2025-12-08T17:58:16.262053769+00:00 stdout F  " storage:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " persistent:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " pvc_storage_request: 20G", 2025-12-08T17:58:16.262053769+00:00 stdout F  " storage_class: crc-csi-hostpath-provisioner", 2025-12-08T17:58:16.262053769+00:00 stdout F  " strategy: persistent", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  "backends:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " events:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " elasticsearch:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " certificates:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ca_cert_duration: 70080h", 2025-12-08T17:58:16.262053769+00:00 stdout F  " endpoint_cert_duration: 70080h", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " forwarding:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " host_url: https://elasticsearch-es-http:9200", 2025-12-08T17:58:16.262053769+00:00 stdout F  " tls_secret_name: elasticsearch-es-cert", 2025-12-08T17:58:16.262053769+00:00 stdout F  " tls_server_name: ''", 2025-12-08T17:58:16.262053769+00:00 stdout F  " use_basic_auth: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " use_tls: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " user_secret_name: elasticsearch-es-elastic-user", 2025-12-08T17:58:16.262053769+00:00 stdout F  " node_count: 1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " storage:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " persistent:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " pvc_storage_request: 20Gi", 2025-12-08T17:58:16.262053769+00:00 stdout F  " storage_class: ''", 2025-12-08T17:58:16.262053769+00:00 stdout F  " strategy: persistent", 2025-12-08T17:58:16.262053769+00:00 stdout F  " version: 7.16.1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " metrics:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " prometheus:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " scrape_interval: 30s", 2025-12-08T17:58:16.262053769+00:00 stdout F  " storage:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " persistent:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " pvc_storage_request: 20G", 2025-12-08T17:58:16.262053769+00:00 stdout F  " storage_class: crc-csi-hostpath-provisioner", 2025-12-08T17:58:16.262053769+00:00 stdout F  " retention: 24h", 2025-12-08T17:58:16.262053769+00:00 stdout F  " strategy: persistent", 2025-12-08T17:58:16.262053769+00:00 stdout F  "clouds:", 2025-12-08T17:58:16.262053769+00:00 stdout F  "- events:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collectors:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " - bridge:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:58:16.262053769+00:00 stdout F  " verbose: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collector_type: collectd", 2025-12-08T17:58:16.262053769+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " subscription_address: collectd/cloud1-notify", 2025-12-08T17:58:16.262053769+00:00 stdout F  " - bridge:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:58:16.262053769+00:00 stdout F  " verbose: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collector_type: ceilometer", 2025-12-08T17:58:16.262053769+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " subscription_address: anycast/ceilometer/cloud1-event.sample", 2025-12-08T17:58:16.262053769+00:00 stdout F  " metrics:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collectors:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " - bridge:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:58:16.262053769+00:00 stdout F  " verbose: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collector_type: collectd", 2025-12-08T17:58:16.262053769+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " subscription_address: collectd/cloud1-telemetry", 2025-12-08T17:58:16.262053769+00:00 stdout F  " - bridge:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:58:16.262053769+00:00 stdout F  " verbose: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collector_type: ceilometer", 2025-12-08T17:58:16.262053769+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " subscription_address: anycast/ceilometer/cloud1-metering.sample", 2025-12-08T17:58:16.262053769+00:00 stdout F  " - bridge:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ring_buffer_size: 65535", 2025-12-08T17:58:16.262053769+00:00 stdout F  " verbose: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " collector_type: sensubility", 2025-12-08T17:58:16.262053769+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " subscription_address: sensubility/cloud1-telemetry", 2025-12-08T17:58:16.262053769+00:00 stdout F  " name: cloud1", 2025-12-08T17:58:16.262053769+00:00 stdout F  "graphing:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " grafana:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " base_image: registry.redhat.io/rhel8/grafana:9", 2025-12-08T17:58:16.262053769+00:00 stdout F  " dashboards:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " disable_signout_menu: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ingress_enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  "high_availability:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  "transports:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " qdr:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " auth: basic", 2025-12-08T17:58:16.262053769+00:00 stdout F  " certificates:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " ca_cert_duration: 70080h", 2025-12-08T17:58:16.262053769+00:00 stdout F  " endpoint_cert_duration: 70080h", 2025-12-08T17:58:16.262053769+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: true", 2025-12-08T17:58:16.262053769+00:00 stdout F  " web:", 2025-12-08T17:58:16.262053769+00:00 stdout F  " enabled: false", 2025-12-08T17:58:16.262053769+00:00 stdout F  "" 2025-12-08T17:58:16.262053769+00:00 stdout F  ] 2025-12-08T17:58:16.262053769+00:00 stdout F } 2025-12-08T17:58:16.262053769+00:00 stdout F 2025-12-08T17:58:16.262053769+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:16.262149362+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:16Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:16.291809989+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:16Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get current Smart Gateways loaded"} 2025-12-08T17:58:16.291845870+00:00 stdout F 2025-12-08T17:58:16.291845870+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:16.291845870+00:00 stdout F 2025-12-08T17:58:16.291845870+00:00 stdout F TASK [servicetelemetry : Get current Smart Gateways loaded] ******************** 2025-12-08T17:58:16.291845870+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:77 2025-12-08T17:58:16.291845870+00:00 stdout F 2025-12-08T17:58:16.291845870+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:17.223438397+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:17Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways","Verb":"list","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"","Parts":["smartgateways"]}} 2025-12-08T17:58:17.382831457+00:00 stdout F 2025-12-08T17:58:17.382831457+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:17.382831457+00:00 stdout F 2025-12-08T17:58:17.382831457+00:00 stdout F TASK [servicetelemetry : Get current STF object] ******************************* 2025-12-08T17:58:17.382831457+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:84 2025-12-08T17:58:17.382831457+00:00 stdout F 2025-12-08T17:58:17.382831457+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:17.382865837+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get current STF object"} 2025-12-08T17:58:18.216918544+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:18Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/infra.watch/v1beta1/namespaces/service-telemetry/servicetelemetrys/default","Verb":"get","APIPrefix":"apis","APIGroup":"infra.watch","APIVersion":"v1beta1","Namespace":"service-telemetry","Resource":"servicetelemetrys","Subresource":"","Name":"default","Parts":["servicetelemetrys","default"]}} 2025-12-08T17:58:18.336900226+00:00 stdout F 2025-12-08T17:58:18.336900226+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:18.336900226+00:00 stdout F 2025-12-08T17:58:18.336900226+00:00 stdout F TASK [servicetelemetry : Get community Prometheus objects] ********************* 2025-12-08T17:58:18.336900226+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:92 2025-12-08T17:58:18.336900226+00:00 stdout F 2025-12-08T17:58:18.336900226+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:18.336949127+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:18Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get community Prometheus objects"} 2025-12-08T17:58:19.024505907+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:19Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:58:19.160065950+00:00 stdout F 2025-12-08T17:58:19.160065950+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:19.160065950+00:00 stdout F 2025-12-08T17:58:19.160065950+00:00 stdout F TASK [servicetelemetry : Apply community observabilityStrategy if missing on an STF object with an existing community prometheus] *** 2025-12-08T17:58:19.160065950+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:101 2025-12-08T17:58:19.160065950+00:00 stdout F 2025-12-08T17:58:19.160065950+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:19.160096541+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:19Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Apply community observabilityStrategy if missing on an STF object with an existing community prometheus"} 2025-12-08T17:58:19.245333805+00:00 stdout F 2025-12-08T17:58:19.245333805+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:19.245333805+00:00 stdout F 2025-12-08T17:58:19.245333805+00:00 stdout F TASK [servicetelemetry : Apply default observabilityStrategy if missing on a new STF object with no associated community prometheus] *** 2025-12-08T17:58:19.245333805+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:118 2025-12-08T17:58:19.245333805+00:00 stdout F 2025-12-08T17:58:19.245333805+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:19.245390116+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:19Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Apply default observabilityStrategy if missing on a new STF object with no associated community prometheus"} 2025-12-08T17:58:19.272059035+00:00 stdout F 2025-12-08T17:58:19.272059035+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:19.272059035+00:00 stdout F 2025-12-08T17:58:19.272059035+00:00 stdout F TASK [servicetelemetry : Get QDR objects] ************************************** 2025-12-08T17:58:19.272059035+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:132 2025-12-08T17:58:19.272059035+00:00 stdout F 2025-12-08T17:58:19.272059035+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:19.272100776+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:19Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get QDR objects"} 2025-12-08T17:58:19.936603001+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:19Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/interconnectedcloud.github.io/v1alpha1/namespaces/service-telemetry/interconnects/default-interconnect","Verb":"get","APIPrefix":"apis","APIGroup":"interconnectedcloud.github.io","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"interconnects","Subresource":"","Name":"default-interconnect","Parts":["interconnects","default-interconnect"]}} 2025-12-08T17:58:20.037140059+00:00 stdout F 2025-12-08T17:58:20.037140059+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:20.037140059+00:00 stdout F 2025-12-08T17:58:20.037140059+00:00 stdout F TASK [servicetelemetry : Apply legacy auth=none for QDR if missing on the STF object and it's currently deployed that way] *** 2025-12-08T17:58:20.037140059+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:141 2025-12-08T17:58:20.037140059+00:00 stdout F 2025-12-08T17:58:20.037140059+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:20.037231491+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:20Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Apply legacy auth=none for QDR if missing on the STF object and it's currently deployed that way"} 2025-12-08T17:58:20.148155948+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:20Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Apply default auth for QDR if missing on a new STF object with no associated auth=none QDR"} 2025-12-08T17:58:20.148480466+00:00 stdout F 2025-12-08T17:58:20.148480466+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:20.148501017+00:00 stdout F 2025-12-08T17:58:20.148501017+00:00 stdout F TASK [servicetelemetry : Apply default auth for QDR if missing on a new STF object with no associated auth=none QDR] *** 2025-12-08T17:58:20.148501017+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:161 2025-12-08T17:58:20.148525147+00:00 stdout F 2025-12-08T17:58:20.148525147+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:21.396722378+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:21Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/infra.watch/v1beta1/namespaces/service-telemetry/servicetelemetrys/default","Verb":"get","APIPrefix":"apis","APIGroup":"infra.watch","APIVersion":"v1beta1","Namespace":"service-telemetry","Resource":"servicetelemetrys","Subresource":"","Name":"default","Parts":["servicetelemetrys","default"]}} 2025-12-08T17:58:21.651071282+00:00 stdout F 2025-12-08T17:58:21.651071282+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:21.651071282+00:00 stdout F 2025-12-08T17:58:21.651071282+00:00 stdout F TASK [servicetelemetry : Set ServiceTelemetry object status to have ephemeralStorageEnabled status] *** 2025-12-08T17:58:21.651071282+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:185 2025-12-08T17:58:21.651071282+00:00 stdout F 2025-12-08T17:58:21.651071282+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:21.651103813+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:21Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Set ServiceTelemetry object status to have ephemeralStorageEnabled status"} 2025-12-08T17:58:21.752440902+00:00 stdout F 2025-12-08T17:58:21.752440902+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:21.752440902+00:00 stdout F 2025-12-08T17:58:21.752440902+00:00 stdout F TASK [servicetelemetry : Create QDR instance] ********************************** 2025-12-08T17:58:21.752440902+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:19 2025-12-08T17:58:21.752440902+00:00 stdout F 2025-12-08T17:58:21.752440902+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:21.752479713+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:21Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create QDR instance"} 2025-12-08T17:58:21.826296640+00:00 stdout F 2025-12-08T17:58:21.826296640+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:21.826296640+00:00 stdout F 2025-12-08T17:58:21.826296640+00:00 stdout F TASK [servicetelemetry : Create self-signed interconnect issuer] *************** 2025-12-08T17:58:21.826296640+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:6 2025-12-08T17:58:21.826296640+00:00 stdout F 2025-12-08T17:58:21.826296640+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:21.826342662+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:21Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create self-signed interconnect issuer"} 2025-12-08T17:58:22.740299114+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:22Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/issuers/default-interconnect-selfsigned","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"issuers","Subresource":"","Name":"default-interconnect-selfsigned","Parts":["issuers","default-interconnect-selfsigned"]}} 2025-12-08T17:58:22.902375962+00:00 stdout F 2025-12-08T17:58:22.902375962+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:22.902375962+00:00 stdout F 2025-12-08T17:58:22.902375962+00:00 stdout F TASK [servicetelemetry : Create self-signed interconnect certificate] ********** 2025-12-08T17:58:22.902375962+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:18 2025-12-08T17:58:22.902375962+00:00 stdout F 2025-12-08T17:58:22.902375962+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:22.902421623+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create self-signed interconnect certificate"} 2025-12-08T17:58:23.657150340+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:23Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-selfsigned","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-selfsigned","Parts":["certificates","default-interconnect-selfsigned"]}} 2025-12-08T17:58:23.763027157+00:00 stdout F 2025-12-08T17:58:23.763027157+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:23.763027157+00:00 stdout F 2025-12-08T17:58:23.763027157+00:00 stdout F TASK [servicetelemetry : Create default CA interconnect issuer using self-signed interconnect certificate] *** 2025-12-08T17:58:23.763027157+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:36 2025-12-08T17:58:23.763027157+00:00 stdout F 2025-12-08T17:58:23.763027157+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:23.763081508+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:23Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create default CA interconnect issuer using self-signed interconnect certificate"} 2025-12-08T17:58:24.506984575+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:24Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/issuers/default-interconnect-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"issuers","Subresource":"","Name":"default-interconnect-ca","Parts":["issuers","default-interconnect-ca"]}} 2025-12-08T17:58:24.637593441+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:24Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create OpenStack CA certificate using self-signed interconnect certificate"} 2025-12-08T17:58:24.637668033+00:00 stdout F 2025-12-08T17:58:24.637668033+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:24.637704644+00:00 stdout F 2025-12-08T17:58:24.637704644+00:00 stdout F TASK [servicetelemetry : Create OpenStack CA certificate using self-signed interconnect certificate] *** 2025-12-08T17:58:24.637704644+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:49 2025-12-08T17:58:24.637723514+00:00 stdout F 2025-12-08T17:58:24.637723514+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:25.483202196+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:25Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-openstack-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-openstack-ca","Parts":["certificates","default-interconnect-openstack-ca"]}} 2025-12-08T17:58:25.624004235+00:00 stdout F 2025-12-08T17:58:25.624004235+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:25.624004235+00:00 stdout F 2025-12-08T17:58:25.624004235+00:00 stdout F TASK [servicetelemetry : Create OpenStack credentials certificate using self-signed interconnect certificate] *** 2025-12-08T17:58:25.624004235+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:66 2025-12-08T17:58:25.624004235+00:00 stdout F 2025-12-08T17:58:25.624004235+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:25.624041376+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:25Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create OpenStack credentials certificate using self-signed interconnect certificate"} 2025-12-08T17:58:26.375458976+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:26Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-openstack-credentials","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-openstack-credentials","Parts":["certificates","default-interconnect-openstack-credentials"]}} 2025-12-08T17:58:26.550043418+00:00 stdout F 2025-12-08T17:58:26.550043418+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:26.550043418+00:00 stdout F 2025-12-08T17:58:26.550043418+00:00 stdout F TASK [servicetelemetry : Create inter-router CA issuer] ************************ 2025-12-08T17:58:26.550043418+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:84 2025-12-08T17:58:26.550043418+00:00 stdout F 2025-12-08T17:58:26.550043418+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:26.550077489+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:26Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create inter-router CA issuer"} 2025-12-08T17:58:27.506942121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:27Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/issuers/default-interconnect-inter-router-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"issuers","Subresource":"","Name":"default-interconnect-inter-router-ca","Parts":["issuers","default-interconnect-inter-router-ca"]}} 2025-12-08T17:58:27.644172257+00:00 stdout F 2025-12-08T17:58:27.644172257+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:27.644172257+00:00 stdout F 2025-12-08T17:58:27.644172257+00:00 stdout F TASK [servicetelemetry : Create inter-router CA certificate] ******************* 2025-12-08T17:58:27.644172257+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:97 2025-12-08T17:58:27.644172257+00:00 stdout F 2025-12-08T17:58:27.644172257+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:27.644215439+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:27Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create inter-router CA certificate"} 2025-12-08T17:58:28.365362457+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:28Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-inter-router-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-inter-router-ca","Parts":["certificates","default-interconnect-inter-router-ca"]}} 2025-12-08T17:58:28.512171922+00:00 stdout F 2025-12-08T17:58:28.512171922+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:28.512171922+00:00 stdout F 2025-12-08T17:58:28.512171922+00:00 stdout F TASK [servicetelemetry : Create inter-router credentials certificate] ********** 2025-12-08T17:58:28.512171922+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:114 2025-12-08T17:58:28.512171922+00:00 stdout F 2025-12-08T17:58:28.512171922+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:28.512211153+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:28Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create inter-router credentials certificate"} 2025-12-08T17:58:29.285314514+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:29Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-inter-router-credentials","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-inter-router-credentials","Parts":["certificates","default-interconnect-inter-router-credentials"]}} 2025-12-08T17:58:29.429543701+00:00 stdout F 2025-12-08T17:58:29.429543701+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:29.429543701+00:00 stdout F 2025-12-08T17:58:29.429543701+00:00 stdout F TASK [servicetelemetry : Create Interconnect SASL ConfigMap] ******************* 2025-12-08T17:58:29.429543701+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:133 2025-12-08T17:58:29.429543701+00:00 stdout F 2025-12-08T17:58:29.429543701+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:29.429614523+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:29Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create Interconnect SASL ConfigMap"} 2025-12-08T17:58:30.774859142+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:30Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/configmaps/default-interconnect-sasl-config","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"configmaps","Subresource":"","Name":"default-interconnect-sasl-config","Parts":["configmaps","default-interconnect-sasl-config"]}} 2025-12-08T17:58:30.897078580+00:00 stdout F 2025-12-08T17:58:30.897078580+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:30.897078580+00:00 stdout F 2025-12-08T17:58:30.897078580+00:00 stdout F TASK [servicetelemetry : Get QDR BasicAuth secret] ***************************** 2025-12-08T17:58:30.897078580+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:155 2025-12-08T17:58:30.897078580+00:00 stdout F 2025-12-08T17:58:30.897078580+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:30.897163633+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:30Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get QDR BasicAuth secret"} 2025-12-08T17:58:31.646688815+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:31Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/default-interconnect-users","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"default-interconnect-users","Parts":["secrets","default-interconnect-users"]}} 2025-12-08T17:58:31.750204291+00:00 stdout F 2025-12-08T17:58:31.750204291+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:31.750204291+00:00 stdout F 2025-12-08T17:58:31.750204291+00:00 stdout F TASK [servicetelemetry : Perform a one-time upgrade to the default generated password for QDR BasicAuth] *** 2025-12-08T17:58:31.750204291+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:167 2025-12-08T17:58:31.750204291+00:00 stdout F 2025-12-08T17:58:31.750204291+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:31.750242202+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:31Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Perform a one-time upgrade to the default generated password for QDR BasicAuth"} 2025-12-08T17:58:32.570075950+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:32Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/default-interconnect-users","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"default-interconnect-users","Parts":["secrets","default-interconnect-users"]}} 2025-12-08T17:58:32.686317135+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:32Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get the list of QDR pods"} 2025-12-08T17:58:32.686385347+00:00 stdout F 2025-12-08T17:58:32.686385347+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:32.686416277+00:00 stdout F 2025-12-08T17:58:32.686416277+00:00 stdout F TASK [servicetelemetry : Get the list of QDR pods] ***************************** 2025-12-08T17:58:32.686416277+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:181 2025-12-08T17:58:32.686450718+00:00 stdout F 2025-12-08T17:58:32.686450718+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:33.549700799+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:33Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/pods","Verb":"list","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"pods","Subresource":"","Name":"","Parts":["pods"]}} 2025-12-08T17:58:33.668351926+00:00 stdout F 2025-12-08T17:58:33.668351926+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:33.668351926+00:00 stdout F 2025-12-08T17:58:33.668351926+00:00 stdout F TASK [servicetelemetry : Restart QDR pods to pick up new password] ************* 2025-12-08T17:58:33.668351926+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:190 2025-12-08T17:58:33.668351926+00:00 stdout F 2025-12-08T17:58:33.668351926+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:33.668387027+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:33Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Restart QDR pods to pick up new password"} 2025-12-08T17:58:34.364029047+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:34Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/pods/default-interconnect-55bf8d5cb-76n5w","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"pods","Subresource":"","Name":"default-interconnect-55bf8d5cb-76n5w","Parts":["pods","default-interconnect-55bf8d5cb-76n5w"]}} 2025-12-08T17:58:34.544845319+00:00 stdout F 2025-12-08T17:58:34.544845319+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:34.544845319+00:00 stdout F 2025-12-08T17:58:34.544845319+00:00 stdout F TASK [Restart QDR pods to pick up new password] ******************************** 2025-12-08T17:58:34.544845319+00:00 stdout P changed: [localhost] => (item={'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'annotations': {'k8s.ovn.org/pod-networks': '{"default":{"ip_addresses":["10.217.0.73/23"],"mac_address":"0a:58:0a:d9:00:49","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.73/23","gateway_ip":"10.217.0.1","role":"primary"}}', 'k8s.v1.cni.cncf.io/network-status': '[{\n "name": "ovn-kubernetes",\n "interface": "eth0",\n "ips": [\n "10.217.0.73"\n ],\n "mac": "0a:58:0a:d9:00:49",\n "default": true,\n "dns": {}\n}]', 'openshift.io/scc': 'restricted-v2', 'prometheus.io/port': '8888', 'prometheus.io/scrape': 'true', 'seccomp.security.alpha.kubernetes.io/pod': 'runtime/default', 'security.openshift.io/validated-scc-subject-type': 'user'}, 'creationTimestamp': '2025-12-08T17:57:28Z', 'generateName': 'default-interconnect-55bf8d5cb-', 'generation': 1, 'labels': {'application': 'default-interconnect', 'com.company': 'Red_Hat', 'interconnect_cr': 'default-interconnect', 'pod-template-hash': '55bf8d5cb', 'rht.comp': 'Interconnect', 'rht.comp_t': 'application', 'rht.comp_ver': '1.10', 'rht.prod_name': 'Red_Hat_Integration', 'rht.prod_ver': '2021.Q4'}, 'managedFields': [{'apiVersion': 'v1', 'fieldsType': 'FieldsV1', 'fieldsV1': {'f:metadata': {'f:annotations': {'f:k8s.ovn.org/pod-networks': {}}}}, 'manager': 'crc', 'operation': 'Update', 'subresource': 'status', 'time': '2025-12-08T17:57:28Z'}, {'apiVersion': 'v1', 'fieldsType': 'FieldsV1', 'fieldsV1': {'f:metadata': {'f:annotations': {'.': {}, 'f:prometheus.io/port': {}, 'f:prometheus.io/scrape': {}}, 'f:generateName': {}, 'f:labels': {'.': {}, 'f:application': {}, 'f:com.company': {}, 'f:interconnect_cr': {}, 'f:pod-template-hash': {}, 'f:rht.comp': {}, 'f:rht.comp_t': {}, 'f:rht.comp_ver': {}, 'f:rht.prod_name': {}, 'f:rht.prod_ver': {}}, 'f:ownerReferences': {'.': {}, 'k:{"uid":"d9c87381-9697-4964-8b68-40cbab3a00ca"}': {}}}, 'f:spec': {'f:affinity': {'.': {}, 'f:podAntiAffinity': {'.': {}, 'f:requiredDuringSchedulingIgnoredDuringExecution': {}}}, 'f:containers': {'k:{"name":"default-interconnect"}': {'.': {}, 'f:env': {'.': {}, 'k:{"name":"APPLICATION_NAME"}': {'.': {}, 'f:name': {}, 'f:value': {}}, 'k:{"name":"POD_COUNT"}': {'.': {}, 'f:name': {}, 'f:value': {}}, 'k:{"name":"POD_IP"}': {'.': {}, 'f:name': {}, 'f:valueFrom': {'.': {}, 'f:fieldRef': {}}}, 'k:{"name":"POD_NAMESPACE"}': {'.': {}, 'f:name': {}, 'f:valueFrom': {'.': {}, 'f:fieldRef': {}}}, 'k:{"name":"QDROUTERD_AUTO_CREATE_SASLDB_PATH"}': {'.': {}, 'f:name': {}, 'f:value': {}}, 'k:{"name":"QDROUTERD_AUTO_CREATE_SASLDB_SOURCE"}': {'.': {}, 'f:name': {}, 'f:value': {}}, 'k:{"name":"QDROUTERD_AUTO_MESH_DISCOVERY"}': {'.': {}, 'f:name': {}, 'f:value': {}}, 'k:{"name":"QDROUTERD_CONF"}': {'.': {}, 'f:name': {}, 'f:value': {}}}, 'f:image': {}, 'f:imagePullPolicy': {}, 'f:livenessProbe': {'.': {}, 'f:failureThreshold': {}, 'f:httpGet': {'.': {}, 'f:path': {}, 'f:port': {}, 'f:scheme': {}}, 'f:initialDelaySeconds': {}, 'f:periodSeconds': {}, 'f:successThreshold': {}, 'f:timeoutSeconds': {}}, 'f:name': {}, 'f:ports': {'.': {}, 'k:{"containerPort":55671,"protocol":"TCP"}': {'.': {}, 'f:containerPort': {}, 'f:name': {}, 'f:protocol': {}}, 'k:{"containerPort":5672,"protocol":"TCP"}': {'.': {}, 'f:containerPort': {}, 'f:name': {}, 'f:protocol': {}}}, 'f:resources': {}, 'f:terminationMessagePath': {}, 'f:terminationMessagePolicy': {}, 'f:volumeMounts': {'.': {}, 'k:{"mountPath":"/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca"}': {'.': {}, 'f:mountPath': {}, 'f:name': {}}, 'k:{"mountPath":"/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials"}': {'.': {}, 'f:mountPath': {}, 'f:name': {}}, 'k:{"mountPath":"/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca"}': {'.': {}, 'f:mountPath': {}, 'f:name': {}}, 'k:{"mountPath":"/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials"}': {'.': {}, 'f:mountPath': {}, 'f:name': {}}, 'k:{"mountPath":"/etc/qpid-dispatch/sasl-users"}': {'.': {}, 'f:mountPath': {}, 'f:name': {}}, 'k:{"mountPath":"/etc/sasl2"}': {'.': {}, 'f:mountPath': {}, 'f:name': {}}}}}, 'f:dnsPolicy': {}, 'f:enableServiceLinks': {}, 'f:restartPolicy': {}, 'f:schedulerName': {}, 'f:securityContext': {}, 'f:serviceAccount': {}, 'f:serviceAccountName': {}, 'f:terminationGracePeriodSeconds': {}, 'f:volumes': {'.': {}, 'k:{"name":"default-interconnect-inter-router-ca"}': {'.': {}, 'f:name': {}, 'f:secret': {'.': {}, 'f:defaultMode': {}, 'f:secretName': {}}}, 'k:{"name":"default-interconnect-inter-router-credentials"}': {'.': {}, 'f:name': {}, 'f:secret': {'.': {}, 'f:defaultMode': {}, 'f:secretName': {}}}, 'k:{"name":"default-interconnect-openstack-ca"}': {'.': {}, 'f:name': {}, 'f:secret': {'.': {}, 'f:defaultMode': {}, 'f:secretName': {}}}, 'k:{"name":"default-interconnect-openstack-credentials"}': {'.': {}, 'f:name': {}, 'f:secret': {'.': {}, 'f:defaultMode': {}, 'f:secretName': {}}}, 'k:{"name":"sasl-config"}': {'.': {}, 'f:configMap': {'.': {}, 'f:defaultMode': {}, 'f:name': {}}, 'f:name': {}}, 'k:{"name":"sasl-users"}': {'.': {}, 'f:name': {}, 'f:secret': {'.': {}, 'f:defaultMode': {}, 'f:secretName': {}}}}}}, 'manager': 'kube-controller-manager', 'operation': 'Update', 'time': '2025-12-08T17:57:28Z'}, {'apiVersion': 'v1', 'fieldsType': 'FieldsV1', 'fieldsV1': {'f:metadata': {'f:annotations': {'f:k8s.v1.cni.cncf.io/network-status': {}}}}, 'manager': 'multus-daemon', 'operation': 'Update', 'subresource': 'status', 'time': '2025-12-08T17:57:29Z'}, {'apiVersion': 'v1', 'fieldsType': 'FieldsV1', 'fieldsV1': {'f:status': {'f:conditions': {'k:{"type":"ContainersReady"}': {'.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {}}, 'k:{"type":"Initialized"}': {'.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {}}, 'k:{"type":"PodReadyToStartContainers"}': {'.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {}}, 'k:{"type":"Ready"}': {'.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {}}}, 'f:containerStatuses': {}, 'f:hostIP': {}, 'f:hostIPs': {}, 'f:phase': {}, 'f:podIP': {}, 'f:podIPs': {'.': {}, 'k:{"ip":"10.217.0.73"}': {'.': {}, 'f:ip': {}}}, 'f:startTime': {}}}, 'manager': 'kubelet', 'operation': 'Update', 'subresource': 'status', 'time': '2025-12-08T17:57:34Z'}], 'name': 'default-interconnect-55bf8d5cb-76n5w', 'namespace': 'service-telemetry', 'ownerReferences': [{'apiVersion': 'apps/v1', 'blockOwnerDeletion': True, 'controller': True, 'kind': 'ReplicaSet', 'name': 'default-interconnect-55bf8d5cb', 'uid': 'd9c87381-9697-4964-8b68-40cbab3a00ca'}], 'resourceVersion': '44985', 'uid': 'df9f5211-ab02-49a8-82e6-0c2f4b07bc52'}, 'spec': {'affinity': {'podAntiAffinity': {'requiredDuringSchedulingIgnoredDuringExecution': [{'labelSelector': {'matchExpressions': [{'key': 'application', 'operator': 'In', 'values': ['default-interconnect']}]}, 'topologyKey': 'kubernetes.io/hostname'}]}}, 'containers': [{'env': [{'name': 'APPLICATION_NAME', 'value': 'default-interconnect'}, {'name': 'QDROUTERD_CONF', 'value': '\nrouter {\n mode: interior\n id: ${HOSTNAME}\n}\n\nlistener {\n host: 127.0.0.1\n port: 5672\n role: normal\n}\nlistener {\n name: health-and-stats\n port: 8888\n http: true\n healthz: true\n metrics: true\n websockets: false\n httpRootDir: invalid\n}\n\nlistener {\n role: inter-router\n port: 55671\n saslMechanisms: EXTERNAL\n authenticatePeer: true\n sslProfile: inter-router\n}\n\nlistener {\n role: edge\n port: 5671\n saslMechanisms: PLAIN\n authenticatePeer: true\n sslProfile: openstack\n}\nlistener {\n role: edge\n port: 5673\n linkCapacity: 25000\n}\n\nsslProfi 2025-12-08T17:58:34.544961662+00:00 stdout P le {\n name: openstack\n certFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.crt\n privateKeyFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.key\n caCertFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca/tls.crt\n}\nsslProfile {\n name: inter-router\n certFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.crt\n privateKeyFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.key\n caCertFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca/tls.crt\n}\n\naddress {\n prefix: closest\n distribution: closest\n}\naddress {\n prefix: multicast\n distribution: multicast\n}\naddress {\n prefix: unicast\n distribution: closest\n}\naddress {\n prefix: exclusive\n distribution: closest\n}\naddress {\n prefix: broadcast\n distribution: multicast\n}\naddress {\n prefix: collectd\n distribution: multicast\n}\naddress {\n prefix: ceilometer\n distribution: multicast\n}\n\n\n\n\n'}, {'name': 'QDROUTERD_AUTO_CREATE_SASLDB_SOURCE', 'value': '/etc/qpid-dispatch/sasl-users/'}, {'name': 'QDROUTERD_AUTO_CREATE_SASLDB_PATH', 'value': '/tmp/qdrouterd.sasldb'}, {'name': 'POD_COUNT', 'value': '1'}, {'name': 'POD_NAMESPACE', 'valueFrom': {'fieldRef': {'apiVersion': 'v1', 'fieldPath': 'metadata.namespace'}}}, {'name': 'POD_IP', 'valueFrom': {'fieldRef': {'apiVersion': 'v1', 'fieldPath': 'status.podIP'}}}, {'name': 'QDROUTERD_AUTO_MESH_DISCOVERY', 'value': 'QUERY'}], 'image': 'registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9', 'imagePullPolicy': 'IfNotPresent', 'livenessProbe': {'failureThreshold': 3, 'httpGet': {'path': '/healthz', 'port': 8888, 'scheme': 'HTTP'}, 'initialDelaySeconds': 60, 'periodSeconds': 10, 'successThreshold': 1, 'timeoutSeconds': 1}, 'name': 'default-interconnect', 'ports': [{'containerPort': 5672, 'name': 'port-5672', 'protocol': 'TCP'}, {'containerPort': 55671, 'name': 'port-55671', 'protocol': 'TCP'}], 'resources': {}, 'securityContext': {'allowPrivilegeEscalation': False, 'capabilities': {'drop': ['ALL']}, 'runAsNonRoot': True, 'runAsUser': 1000670000}, 'terminationMessagePath': '/dev/termination-log', 'terminationMessagePolicy': 'File', 'volumeMounts': [{'mountPath': '/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials', 'name': 'default-interconnect-openstack-credentials'}, {'mountPath': '/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca', 'name': 'default-interconnect-openstack-ca'}, {'mountPath': '/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials', 'name': 'default-interconnect-inter-router-credentials'}, {'mountPath': '/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca', 'name': 'default-interconnect-inter-router-ca'}, {'mountPath': '/etc/qpid-dispatch/sasl-users', 'name': 'sasl-users'}, {'mountPath': '/etc/sasl2', 'name': 'sasl-config'}, {'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount', 'name': 'kube-api-access-rhpm6', 'readOnly': True}]}], 'dnsPolicy': 'ClusterFirst', 'enableServiceLinks': True, 'imagePullSecrets': [{'name': 'default-interconnect-dockercfg-nxt7g'}], 'nodeName': 'crc', 'preemptionPolicy': 'PreemptLowerPriority', 'priority': 0, 'restartPolicy': 'Always', 'schedulerName': 'default-scheduler', 'securityContext': {'fsGroup': 1000670000, 'seLinuxOptions': {'level': 's0:c26,c10'}, 'seccompProfile': {'type': 'RuntimeDefault'}}, 'serviceAccount': 'default-interconnect', 'serviceAccountName': 'default-interconnect', 'terminationGracePeriodSeconds': 30, 'tolerations': [{'effect': 'NoExecute', 'key': 'node.kubernetes.io/not-ready', 'operator': 'Exists', 'tolerationSeconds': 300}, {'effect': 'NoExecute', 'key': 'node.kubernetes.io/unreachable', 'operator': 'Exists', 'tolerationSeconds': 300}], 'volumes': [{'name': 'default-interconnect-openstack-credentials', 'secret': {'defaultMode': 420, 'secretName': 'default-interconnect-openstack-credentials'}}, {'name': 'default-interconnect-openstack-ca', 'secret': {'defaultMode': 420, 'secretName': 'default-interconnect-openstack-ca'}}, {'name': 'default-interconnect-inter-router-credentials', 'secret': {'defaultMode': 420, 'secretName': 'default-interconnect-inter-router-credentials'}}, {'name': 'default-interconnect-inter-router-ca', 'secret': {'defaultMode': 420, 'secretName': 'default-interconnect-inter-router-ca'}}, {'name': 'sasl-users', 'secret': {'defaultMode': 420, 'secretName': 'default-interconnect-users'}}, {'configMap': {'defaultMode': 420, 'name': 'default-interconnect-sasl-config'}, 'name': 'sasl-config'}, {'name': 'kube-api-access-rhpm6', 'projected': {'defaultMode': 420, 'sources': [{'serviceAccountToken': {'expirationSeconds': 3607, 'path': 'token'}}, {'configMap': {'items': [{'key': 'ca.crt', 'path': 'ca.crt'}], 'name': 'kube-root-ca.crt'}}, {'downwardAPI': {'items': [{'fieldRef': {'apiVersion': 'v1', 'fieldPath': 'metadata.namespace'}, 'path': 'namespace'}]}}, {'configMap': {'items': [{'key': 'service-ca.crt', 'path': 'service-ca.crt'}], 'name': 'openshift-service-ca.crt'}}]}}]}, 'status': {'conditions': [{'lastProbeTime': None, 'lastTransitionTime': '2025-12-08T17:57:34Z', 'status': 'True', 'type': 'PodReadyToStartContainers'}, {'lastProbeTime': None, 'lastTransitionTime': '2025-12-08T17:57:28Z', 'status': 'True', 'type': 'Initialized'}, {'lastProbeTime': None, 'lastTransitionTime': '2025-12-08T17:57:34Z', 'status': 'True', 'type': 'Ready'}, {'lastProbeTime': None, 'lastTransitionTime': '2025-12-08T17:57:34Z', 'status': 'True', 'type': 'ContainersReady'}, {'lastProbeTime': None, 'lastTransitionTime': '2025-12-08T17:57:28Z', 'status': 'True', 'type': 'PodScheduled'}], 'containerStatuses': [{'containerID': 'cri-o://23ef45f8f74a4f33cee49aff44fdf03128bfa93ad9ea0b31d1316072eb33d353', 'image': 'registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9', 'imageID': 'registry.redhat.io/amq7/amq-interconnect@sha256:06ac9dd1544cec3885a8362c7685d6c5bb22aab0e46d476480b463b699ebc192', 'lastState': {}, 'name': 'default-interconnect', 'ready': True, 'resources': {}, 'restartCount': 0, 'started': True, 'state': {'running': {'startedAt': '2025-12-08T17:57:34Z'}}, 'user': {'linux': {'gid': 0, 'supplementalGroups': [0, 1000670000], 'uid': 1000670000}}, 'volumeMounts': [{'mountPath': '/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials', 'name': 'default-interconnect-openstack-credentials'}, {'mountPath': '/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca', 'name': 'default-interconnect-openstack-ca'}, {'mountPath': '/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials', 'name': 'default-interconnect-inter-router-credentials'}, {'mountPath': '/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca', 'name': 'default-interconnect-inter-router-ca'}, {'mountPath': '/etc/qpid-dispatch/sasl-users', 'name': 'sasl-users'}, {'mountPath': '/etc/sasl2', 'name': 'sasl-config'}, {'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount', 'name': 'kube-api-access-rhpm6', 'readOnly': True, 'recursiveReadOnly': 'Disabled'}]}], 'hostIP': '192.168.126.11', 'hostIPs': [{'ip': '192.168.126.11'}], 'phase': 'Running', 'podIP': '10.217.0.73', 'podIPs': [{'ip': '10.217.0.73'}], 'qosClass': 'BestEffort', 'startTime': '2025-12-08T17:57:28Z'}}) => {"ansible_loop_var": "item", "changed": true, "item": {"apiVersion": "v1", "kind": "Pod", "metadata": {"annotations": {"k8s.ovn.org/pod-networks": "{\"default\":{\"ip_addresses\":[\"10.217.0.73/23\"],\"mac_address\":\"0a:58:0a:d9:00:49\",\"gateway_ips\":[\"10.217.0.1\"],\"routes\":[{\"dest\":\"10.217.0.0/22\",\"nextHop\":\"10.217.0.1\"},{\"dest\":\"10.217.4.0/23\",\"nextHop\":\"10.217.0.1\"},{\"dest\":\"169.254.0.5/32\",\"nextHop\":\"10.217.0.1\"},{\"dest\":\"100.64.0.0/16\",\"nextHop\":\"10.217.0.1\"}],\"ip_address\":\"10.217.0.73/23\",\"gateway_ip\":\"10.217.0.1\",\"role\": 2025-12-08T17:58:34.544985653+00:00 stdout P \"primary\"}}", "k8s.v1.cni.cncf.io/network-status": "[{\n \"name\": \"ovn-kubernetes\",\n \"interface\": \"eth0\",\n \"ips\": [\n \"10.217.0.73\"\n ],\n \"mac\": \"0a:58:0a:d9:00:49\",\n \"default\": true,\n \"dns\": {}\n}]", "openshift.io/scc": "restricted-v2", "prometheus.io/port": "8888", "prometheus.io/scrape": "true", "seccomp.security.alpha.kubernetes.io/pod": "runtime/default", "security.openshift.io/validated-scc-subject-type": "user"}, "creationTimestamp": "2025-12-08T17:57:28Z", "generateName": "default-interconnect-55bf8d5cb-", "generation": 1, "labels": {"application": "default-interconnect", "com.company": "Red_Hat", "interconnect_cr": "default-interconnect", "pod-template-hash": "55bf8d5cb", "rht.comp": "Interconnect", "rht.comp_t": "application", "rht.comp_ver": "1.10", "rht.prod_name": "Red_Hat_Integration", "rht.prod_ver": "2021.Q4"}, "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {"f:k8s.ovn.org/pod-networks": {}}}}, "manager": "crc", "operation": "Update", "subresource": "status", "time": "2025-12-08T17:57:28Z"}, {"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:prometheus.io/port": {}, "f:prometheus.io/scrape": {}}, "f:generateName": {}, "f:labels": {".": {}, "f:application": {}, "f:com.company": {}, "f:interconnect_cr": {}, "f:pod-template-hash": {}, "f:rht.comp": {}, "f:rht.comp_t": {}, "f:rht.comp_ver": {}, "f:rht.prod_name": {}, "f:rht.prod_ver": {}}, "f:ownerReferences": {".": {}, "k:{\"uid\":\"d9c87381-9697-4964-8b68-40cbab3a00ca\"}": {}}}, "f:spec": {"f:affinity": {".": {}, "f:podAntiAffinity": {".": {}, "f:requiredDuringSchedulingIgnoredDuringExecution": {}}}, "f:containers": {"k:{\"name\":\"default-interconnect\"}": {".": {}, "f:env": {".": {}, "k:{\"name\":\"APPLICATION_NAME\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"POD_COUNT\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"POD_IP\"}": {".": {}, "f:name": {}, "f:valueFrom": {".": {}, "f:fieldRef": {}}}, "k:{\"name\":\"POD_NAMESPACE\"}": {".": {}, "f:name": {}, "f:valueFrom": {".": {}, "f:fieldRef": {}}}, "k:{\"name\":\"QDROUTERD_AUTO_CREATE_SASLDB_PATH\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"QDROUTERD_AUTO_CREATE_SASLDB_SOURCE\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"QDROUTERD_AUTO_MESH_DISCOVERY\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"QDROUTERD_CONF\"}": {".": {}, "f:name": {}, "f:value": {}}}, "f:image": {}, "f:imagePullPolicy": {}, "f:livenessProbe": {".": {}, "f:failureThreshold": {}, "f:httpGet": {".": {}, "f:path": {}, "f:port": {}, "f:scheme": {}}, "f:initialDelaySeconds": {}, "f:periodSeconds": {}, "f:successThreshold": {}, "f:timeoutSeconds": {}}, "f:name": {}, "f:ports": {".": {}, "k:{\"containerPort\":55671,\"protocol\":\"TCP\"}": {".": {}, "f:containerPort": {}, "f:name": {}, "f:protocol": {}}, "k:{\"containerPort\":5672,\"protocol\":\"TCP\"}": {".": {}, "f:containerPort": {}, "f:name": {}, "f:protocol": {}}}, "f:resources": {}, "f:terminationMessagePath": {}, "f:terminationMessagePolicy": {}, "f:volumeMounts": {".": {}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch/sasl-users\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/sasl2\"}": {".": {}, "f:mountPath": {}, "f:name": {}}}}}, "f:dnsPolicy": {}, "f:enableServiceLinks": {}, "f:restartPolicy": {}, "f:schedulerName": {}, "f:securityContext": {}, "f:serviceAccount": {}, "f:serviceAccountName": {}, "f:terminationGracePeriodSeconds": {}, "f:volumes": {".": {}, "k:{\"name\":\"default-interconnect-inter-router-ca\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"default-interconnect-inter-router-credentials\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"default-interconnect-openstack-ca\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"default-interconnect-openstack-credentials\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"sasl-config\"}": {".": {}, "f:configMap": {".": {}, "f:defaultMode": {}, "f:name": {}}, "f:name": {}}, "k:{\"name\":\"sasl-users\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}}}}, "manager": "kube-controller-manager", "operation": "Update", "time": "2025-12-08T17:57:28Z"}, {"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {"f:k8s.v1.cni.cncf.io/network-status": {}}}}, "manager": "multus-daemon", "operation": "Update", "subresource": "status", "time": "2025-12-08T17:57:29Z"}, {"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {"k:{\"type\":\"ContainersReady\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}, "k:{\"type\":\"Initialized\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}, "k:{\"type\":\"PodReadyToStartContainers\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}, "k:{\"type\":\"Ready\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}}, "f:containerStatuses": {}, "f:hostIP": {}, "f:hostIPs": {}, "f:phase": {}, "f:podIP": {}, "f:podIPs": {".": {}, "k:{\"ip\":\"10.217.0.73\"}": {".": {}, "f:ip": {}}}, "f:startTime": {}}}, "manager": "kubelet", "operation": "Update", "subresource": "status", "time": "2025-12-08T17:57:34Z"}], "name": "default-interconnect-55bf8d5cb-76n5w", "namespace": "service-telemetry", "ownerReferences": [{"apiVersion": "apps/v1", "blockOwnerDeletion": true, "controller": true, "kind": "ReplicaSet", "name": "default-interconnect-55bf8d5cb", "uid": "d9c87381-9697-4964-8b68-40cbab3a00ca"}], "resourceVersion": "44985", "uid": "df9f5211-ab02-49a8-82e6-0c2f4b07bc52"}, "spec": {"affinity": {"podAntiAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{"labelSelector": {"matchExpressions": [{"key": "application", "operator": "In", "values": ["default-interconnect"]}]}, "topologyKey": "kubernetes.io/hostname"}]}}, "containers": [{"env": [{"name": "APPLICATION_NAME", "value": "default-interconnect"}, {"name": "QDROUTERD_CONF", "value": "\nrouter {\n mode: interior\n id: ${HOSTNAME}\n}\n\nlistener {\n host: 127.0.0.1\n port: 5672\n role: normal\n}\nlistener {\n name: health-and-stats\n port: 8888\n http: true\n healthz: true\n metrics: true\n websockets: false\n httpRootDir: invalid\n}\n\nlistener {\n role: inter-router\n port: 55671\n saslMechanisms: EXTERNAL\n authenticatePeer: true\n sslProfile: inter-router\n}\n\nlistener {\n role: edge\n port: 5671\n saslMechanisms: PLAIN\n authenticatePeer: true\n sslProfile: openstack\n}\nlistener {\n role: edge\n port: 5673\n linkCapacity: 25000\n}\n\nsslProfile {\n name: openstack\n certFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.crt\n privateKeyFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.key\n caCertFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca/tls.crt\n}\nsslProfile {\n name: inter-router\n certFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.crt\n privateKeyFile: /etc/qpid-dispatch-certs/inter-router/de 2025-12-08T17:58:34.545003603+00:00 stdout P fault-interconnect-inter-router-credentials/tls.key\n caCertFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca/tls.crt\n}\n\naddress {\n prefix: closest\n distribution: closest\n}\naddress {\n prefix: multicast\n distribution: multicast\n}\naddress {\n prefix: unicast\n distribution: closest\n}\naddress {\n prefix: exclusive\n distribution: closest\n}\naddress {\n prefix: broadcast\n distribution: multicast\n}\naddress {\n prefix: collectd\n distribution: multicast\n}\naddress {\n prefix: ceilometer\n distribution: multicast\n}\n\n\n\n\n"}, {"name": "QDROUTERD_AUTO_CREATE_SASLDB_SOURCE", "value": "/etc/qpid-dispatch/sasl-users/"}, {"name": "QDROUTERD_AUTO_CREATE_SASLDB_PATH", "value": "/tmp/qdrouterd.sasldb"}, {"name": "POD_COUNT", "value": "1"}, {"name": "POD_NAMESPACE", "valueFrom": {"fieldRef": {"apiVersion": "v1", "fieldPath": "metadata.namespace"}}}, {"name": "POD_IP", "valueFrom": {"fieldRef": {"apiVersion": "v1", "fieldPath": "status.podIP"}}}, {"name": "QDROUTERD_AUTO_MESH_DISCOVERY", "value": "QUERY"}], "image": "registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9", "imagePullPolicy": "IfNotPresent", "livenessProbe": {"failureThreshold": 3, "httpGet": {"path": "/healthz", "port": 8888, "scheme": "HTTP"}, "initialDelaySeconds": 60, "periodSeconds": 10, "successThreshold": 1, "timeoutSeconds": 1}, "name": "default-interconnect", "ports": [{"containerPort": 5672, "name": "port-5672", "protocol": "TCP"}, {"containerPort": 55671, "name": "port-55671", "protocol": "TCP"}], "resources": {}, "securityContext": {"allowPrivilegeEscalation": false, "capabilities": {"drop": ["ALL"]}, "runAsNonRoot": true, "runAsUser": 1000670000}, "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "volumeMounts": [{"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials", "name": "default-interconnect-openstack-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca", "name": "default-interconnect-openstack-ca"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials", "name": "default-interconnect-inter-router-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca", "name": "default-interconnect-inter-router-ca"}, {"mountPath": "/etc/qpid-dispatch/sasl-users", "name": "sasl-users"}, {"mountPath": "/etc/sasl2", "name": "sasl-config"}, {"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", "name": "kube-api-access-rhpm6", "readOnly": true}]}], "dnsPolicy": "ClusterFirst", "enableServiceLinks": true, "imagePullSecrets": [{"name": "default-interconnect-dockercfg-nxt7g"}], "nodeName": "crc", "preemptionPolicy": "PreemptLowerPriority", "priority": 0, "restartPolicy": "Always", "schedulerName": "default-scheduler", "securityContext": {"fsGroup": 1000670000, "seLinuxOptions": {"level": "s0:c26,c10"}, "seccompProfile": {"type": "RuntimeDefault"}}, "serviceAccount": "default-interconnect", "serviceAccountName": "default-interconnect", "terminationGracePeriodSeconds": 30, "tolerations": [{"effect": "NoExecute", "key": "node.kubernetes.io/not-ready", "operator": "Exists", "tolerationSeconds": 300}, {"effect": "NoExecute", "key": "node.kubernetes.io/unreachable", "operator": "Exists", "tolerationSeconds": 300}], "volumes": [{"name": "default-interconnect-openstack-credentials", "secret": {"defaultMode": 420, "secretName": "default-interconnect-openstack-credentials"}}, {"name": "default-interconnect-openstack-ca", "secret": {"defaultMode": 420, "secretName": "default-interconnect-openstack-ca"}}, {"name": "default-interconnect-inter-router-credentials", "secret": {"defaultMode": 420, "secretName": "default-interconnect-inter-router-credentials"}}, {"name": "default-interconnect-inter-router-ca", "secret": {"defaultMode": 420, "secretName": "default-interconnect-inter-router-ca"}}, {"name": "sasl-users", "secret": {"defaultMode": 420, "secretName": "default-interconnect-users"}}, {"configMap": {"defaultMode": 420, "name": "default-interconnect-sasl-config"}, "name": "sasl-config"}, {"name": "kube-api-access-rhpm6", "projected": {"defaultMode": 420, "sources": [{"serviceAccountToken": {"expirationSeconds": 3607, "path": "token"}}, {"configMap": {"items": [{"key": "ca.crt", "path": "ca.crt"}], "name": "kube-root-ca.crt"}}, {"downwardAPI": {"items": [{"fieldRef": {"apiVersion": "v1", "fieldPath": "metadata.namespace"}, "path": "namespace"}]}}, {"configMap": {"items": [{"key": "service-ca.crt", "path": "service-ca.crt"}], "name": "openshift-service-ca.crt"}}]}}]}, "status": {"conditions": [{"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:34Z", "status": "True", "type": "PodReadyToStartContainers"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:28Z", "status": "True", "type": "Initialized"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:34Z", "status": "True", "type": "Ready"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:34Z", "status": "True", "type": "ContainersReady"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:28Z", "status": "True", "type": "PodScheduled"}], "containerStatuses": [{"containerID": "cri-o://23ef45f8f74a4f33cee49aff44fdf03128bfa93ad9ea0b31d1316072eb33d353", "image": "registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9", "imageID": "registry.redhat.io/amq7/amq-interconnect@sha256:06ac9dd1544cec3885a8362c7685d6c5bb22aab0e46d476480b463b699ebc192", "lastState": {}, "name": "default-interconnect", "ready": true, "resources": {}, "restartCount": 0, "started": true, "state": {"running": {"startedAt": "2025-12-08T17:57:34Z"}}, "user": {"linux": {"gid": 0, "supplementalGroups": [0, 1000670000], "uid": 1000670000}}, "volumeMounts": [{"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials", "name": "default-interconnect-openstack-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca", "name": "default-interconnect-openstack-ca"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials", "name": "default-interconnect-inter-router-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca", "name": "default-interconnect-inter-router-ca"}, {"mountPath": "/etc/qpid-dispatch/sasl-users", "name": "sasl-users"}, {"mountPath": "/etc/sasl2", "name": "sasl-config"}, {"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", "name": "kube-api-access-rhpm6", "readOnly": true, "recursiveReadOnly": "Disabled"}]}], "hostIP": "192.168.126.11", "hostIPs": [{"ip": "192.168.126.11"}], "phase": "Running", "podIP": "10.217.0.73", "podIPs": [{"ip": "10.217.0.73"}], "qosClass": "BestEffort", "startTime": "2025-12-08T17:57:28Z"}}, "method": "delete", "result": {"apiVersion": "v1", "kind": "Pod", "metadata": {"annotations": {"k8s.ovn.org/pod-networks": "{\"default\":{\"ip_addresses\":[\"10.217.0.73/23\"],\"mac_address\":\"0a:58:0a:d9:00:49\",\"gateway_ips\":[\"10.217.0.1\"],\"routes\":[{\"dest\":\"10.217.0.0/22\",\"nextHop\":\"10.217.0.1\"},{\"dest\":\"10.217.4.0/23\",\"nextHop\":\"10.217.0.1\"},{\"dest\":\"169.254.0.5/32\",\"nextHop\":\"10.217.0.1\"},{\"dest\":\"100.64.0.0/16\",\"nextHop\":\"10.217.0.1\"}],\"ip_address\":\"10.217.0.73/23\",\"gateway_ip\":\"10.217.0.1\",\"role\":\"primary\"}}", "k8s.v1.cni.cncf.io/network-status": "[{\n \"name\": \"ovn-kubernetes\",\n \"interface\": \"eth0\",\n \"ips\": [\n \"10.217.0.73\"\n ],\n \"mac\": \"0a:58:0a:d9:00:49\",\n \"default\": true,\n \"dns\": {}\n}]", "openshift.io/scc": "restricted-v2", "prometheus.io/port": "8888", "prometheus.io/scrape": "true", "seccomp.security.alpha.kubernetes.io/pod": "runtime/default", "security.openshift.io/validated-scc-subject-type": "user"}, "creationTimestamp": "2025-12-08T17:57:28Z", "deletionGracePeriodSeconds": 30, 2025-12-08T17:58:34.545022164+00:00 stdout P "deletionTimestamp": "2025-12-08T17:59:04Z", "generateName": "default-interconnect-55bf8d5cb-", "generation": 2, "labels": {"application": "default-interconnect", "com.company": "Red_Hat", "interconnect_cr": "default-interconnect", "pod-template-hash": "55bf8d5cb", "rht.comp": "Interconnect", "rht.comp_t": "application", "rht.comp_ver": "1.10", "rht.prod_name": "Red_Hat_Integration", "rht.prod_ver": "2021.Q4"}, "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {"f:k8s.ovn.org/pod-networks": {}}}}, "manager": "crc", "operation": "Update", "subresource": "status", "time": "2025-12-08T17:57:28Z"}, {"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:prometheus.io/port": {}, "f:prometheus.io/scrape": {}}, "f:generateName": {}, "f:labels": {".": {}, "f:application": {}, "f:com.company": {}, "f:interconnect_cr": {}, "f:pod-template-hash": {}, "f:rht.comp": {}, "f:rht.comp_t": {}, "f:rht.comp_ver": {}, "f:rht.prod_name": {}, "f:rht.prod_ver": {}}, "f:ownerReferences": {".": {}, "k:{\"uid\":\"d9c87381-9697-4964-8b68-40cbab3a00ca\"}": {}}}, "f:spec": {"f:affinity": {".": {}, "f:podAntiAffinity": {".": {}, "f:requiredDuringSchedulingIgnoredDuringExecution": {}}}, "f:containers": {"k:{\"name\":\"default-interconnect\"}": {".": {}, "f:env": {".": {}, "k:{\"name\":\"APPLICATION_NAME\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"POD_COUNT\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"POD_IP\"}": {".": {}, "f:name": {}, "f:valueFrom": {".": {}, "f:fieldRef": {}}}, "k:{\"name\":\"POD_NAMESPACE\"}": {".": {}, "f:name": {}, "f:valueFrom": {".": {}, "f:fieldRef": {}}}, "k:{\"name\":\"QDROUTERD_AUTO_CREATE_SASLDB_PATH\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"QDROUTERD_AUTO_CREATE_SASLDB_SOURCE\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"QDROUTERD_AUTO_MESH_DISCOVERY\"}": {".": {}, "f:name": {}, "f:value": {}}, "k:{\"name\":\"QDROUTERD_CONF\"}": {".": {}, "f:name": {}, "f:value": {}}}, "f:image": {}, "f:imagePullPolicy": {}, "f:livenessProbe": {".": {}, "f:failureThreshold": {}, "f:httpGet": {".": {}, "f:path": {}, "f:port": {}, "f:scheme": {}}, "f:initialDelaySeconds": {}, "f:periodSeconds": {}, "f:successThreshold": {}, "f:timeoutSeconds": {}}, "f:name": {}, "f:ports": {".": {}, "k:{\"containerPort\":55671,\"protocol\":\"TCP\"}": {".": {}, "f:containerPort": {}, "f:name": {}, "f:protocol": {}}, "k:{\"containerPort\":5672,\"protocol\":\"TCP\"}": {".": {}, "f:containerPort": {}, "f:name": {}, "f:protocol": {}}}, "f:resources": {}, "f:terminationMessagePath": {}, "f:terminationMessagePolicy": {}, "f:volumeMounts": {".": {}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/qpid-dispatch/sasl-users\"}": {".": {}, "f:mountPath": {}, "f:name": {}}, "k:{\"mountPath\":\"/etc/sasl2\"}": {".": {}, "f:mountPath": {}, "f:name": {}}}}}, "f:dnsPolicy": {}, "f:enableServiceLinks": {}, "f:restartPolicy": {}, "f:schedulerName": {}, "f:securityContext": {}, "f:serviceAccount": {}, "f:serviceAccountName": {}, "f:terminationGracePeriodSeconds": {}, "f:volumes": {".": {}, "k:{\"name\":\"default-interconnect-inter-router-ca\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"default-interconnect-inter-router-credentials\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"default-interconnect-openstack-ca\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"default-interconnect-openstack-credentials\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}, "k:{\"name\":\"sasl-config\"}": {".": {}, "f:configMap": {".": {}, "f:defaultMode": {}, "f:name": {}}, "f:name": {}}, "k:{\"name\":\"sasl-users\"}": {".": {}, "f:name": {}, "f:secret": {".": {}, "f:defaultMode": {}, "f:secretName": {}}}}}}, "manager": "kube-controller-manager", "operation": "Update", "time": "2025-12-08T17:57:28Z"}, {"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {"f:k8s.v1.cni.cncf.io/network-status": {}}}}, "manager": "multus-daemon", "operation": "Update", "subresource": "status", "time": "2025-12-08T17:57:29Z"}, {"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {"k:{\"type\":\"ContainersReady\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}, "k:{\"type\":\"Initialized\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}, "k:{\"type\":\"PodReadyToStartContainers\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}, "k:{\"type\":\"Ready\"}": {".": {}, "f:lastProbeTime": {}, "f:lastTransitionTime": {}, "f:status": {}, "f:type": {}}}, "f:containerStatuses": {}, "f:hostIP": {}, "f:hostIPs": {}, "f:phase": {}, "f:podIP": {}, "f:podIPs": {".": {}, "k:{\"ip\":\"10.217.0.73\"}": {".": {}, "f:ip": {}}}, "f:startTime": {}}}, "manager": "kubelet", "operation": "Update", "subresource": "status", "time": "2025-12-08T17:57:34Z"}], "name": "default-interconnect-55bf8d5cb-76n5w", "namespace": "service-telemetry", "ownerReferences": [{"apiVersion": "apps/v1", "blockOwnerDeletion": true, "controller": true, "kind": "ReplicaSet", "name": "default-interconnect-55bf8d5cb", "uid": "d9c87381-9697-4964-8b68-40cbab3a00ca"}], "resourceVersion": "45592", "uid": "df9f5211-ab02-49a8-82e6-0c2f4b07bc52"}, "spec": {"affinity": {"podAntiAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{"labelSelector": {"matchExpressions": [{"key": "application", "operator": "In", "values": ["default-interconnect"]}]}, "topologyKey": "kubernetes.io/hostname"}]}}, "containers": [{"env": [{"name": "APPLICATION_NAME", "value": "default-interconnect"}, {"name": "QDROUTERD_CONF", "value": "\nrouter {\n mode: interior\n id: ${HOSTNAME}\n}\n\nlistener {\n host: 127.0.0.1\n port: 5672\n role: normal\n}\nlistener {\n name: health-and-stats\n port: 8888\n http: true\n healthz: true\n metrics: true\n websockets: false\n httpRootDir: invalid\n}\n\nlistener {\n role: inter-router\n port: 55671\n saslMechanisms: EXTERNAL\n authenticatePeer: true\n sslProfile: inter-router\n}\n\nlistener {\n role: edge\n port: 5671\n saslMechanisms: PLAIN\n authenticatePeer: true\n sslProfile: openstack\n}\nlistener {\n role: edge\n port: 5673\n linkCapacity: 25000\n}\n\nsslProfile {\n name: openstack\n certFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.crt\n privateKeyFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials/tls.key\n caCertFile: /etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca/tls.crt\n}\nsslProfile {\n name: inter-router\n certFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.crt\n privateKeyFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials/tls.key\n caCertFile: /etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca/tls.crt\n}\n\naddress {\n prefix: closest\n distribution: closest\n}\naddress {\n prefix: multicast\n distribution: multicast\n}\naddress {\n prefix: unicast\n distribution: closest\n}\naddress {\n prefix: exclusive\n distribution: closest\n}\naddress {\n prefix: broadcast\n distribution: multicast\n}\na 2025-12-08T17:58:34.545039954+00:00 stdout F ddress {\n prefix: collectd\n distribution: multicast\n}\naddress {\n prefix: ceilometer\n distribution: multicast\n}\n\n\n\n\n"}, {"name": "QDROUTERD_AUTO_CREATE_SASLDB_SOURCE", "value": "/etc/qpid-dispatch/sasl-users/"}, {"name": "QDROUTERD_AUTO_CREATE_SASLDB_PATH", "value": "/tmp/qdrouterd.sasldb"}, {"name": "POD_COUNT", "value": "1"}, {"name": "POD_NAMESPACE", "valueFrom": {"fieldRef": {"apiVersion": "v1", "fieldPath": "metadata.namespace"}}}, {"name": "POD_IP", "valueFrom": {"fieldRef": {"apiVersion": "v1", "fieldPath": "status.podIP"}}}, {"name": "QDROUTERD_AUTO_MESH_DISCOVERY", "value": "QUERY"}], "image": "registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9", "imagePullPolicy": "IfNotPresent", "livenessProbe": {"failureThreshold": 3, "httpGet": {"path": "/healthz", "port": 8888, "scheme": "HTTP"}, "initialDelaySeconds": 60, "periodSeconds": 10, "successThreshold": 1, "timeoutSeconds": 1}, "name": "default-interconnect", "ports": [{"containerPort": 5672, "name": "port-5672", "protocol": "TCP"}, {"containerPort": 55671, "name": "port-55671", "protocol": "TCP"}], "resources": {}, "securityContext": {"allowPrivilegeEscalation": false, "capabilities": {"drop": ["ALL"]}, "runAsNonRoot": true, "runAsUser": 1000670000}, "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "volumeMounts": [{"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials", "name": "default-interconnect-openstack-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca", "name": "default-interconnect-openstack-ca"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials", "name": "default-interconnect-inter-router-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca", "name": "default-interconnect-inter-router-ca"}, {"mountPath": "/etc/qpid-dispatch/sasl-users", "name": "sasl-users"}, {"mountPath": "/etc/sasl2", "name": "sasl-config"}, {"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", "name": "kube-api-access-rhpm6", "readOnly": true}]}], "dnsPolicy": "ClusterFirst", "enableServiceLinks": true, "imagePullSecrets": [{"name": "default-interconnect-dockercfg-nxt7g"}], "nodeName": "crc", "preemptionPolicy": "PreemptLowerPriority", "priority": 0, "restartPolicy": "Always", "schedulerName": "default-scheduler", "securityContext": {"fsGroup": 1000670000, "seLinuxOptions": {"level": "s0:c26,c10"}, "seccompProfile": {"type": "RuntimeDefault"}}, "serviceAccount": "default-interconnect", "serviceAccountName": "default-interconnect", "terminationGracePeriodSeconds": 30, "tolerations": [{"effect": "NoExecute", "key": "node.kubernetes.io/not-ready", "operator": "Exists", "tolerationSeconds": 300}, {"effect": "NoExecute", "key": "node.kubernetes.io/unreachable", "operator": "Exists", "tolerationSeconds": 300}], "volumes": [{"name": "default-interconnect-openstack-credentials", "secret": {"defaultMode": 420, "secretName": "default-interconnect-openstack-credentials"}}, {"name": "default-interconnect-openstack-ca", "secret": {"defaultMode": 420, "secretName": "default-interconnect-openstack-ca"}}, {"name": "default-interconnect-inter-router-credentials", "secret": {"defaultMode": 420, "secretName": "default-interconnect-inter-router-credentials"}}, {"name": "default-interconnect-inter-router-ca", "secret": {"defaultMode": 420, "secretName": "default-interconnect-inter-router-ca"}}, {"name": "sasl-users", "secret": {"defaultMode": 420, "secretName": "default-interconnect-users"}}, {"configMap": {"defaultMode": 420, "name": "default-interconnect-sasl-config"}, "name": "sasl-config"}, {"name": "kube-api-access-rhpm6", "projected": {"defaultMode": 420, "sources": [{"serviceAccountToken": {"expirationSeconds": 3607, "path": "token"}}, {"configMap": {"items": [{"key": "ca.crt", "path": "ca.crt"}], "name": "kube-root-ca.crt"}}, {"downwardAPI": {"items": [{"fieldRef": {"apiVersion": "v1", "fieldPath": "metadata.namespace"}, "path": "namespace"}]}}, {"configMap": {"items": [{"key": "service-ca.crt", "path": "service-ca.crt"}], "name": "openshift-service-ca.crt"}}]}}]}, "status": {"conditions": [{"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:34Z", "status": "True", "type": "PodReadyToStartContainers"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:28Z", "status": "True", "type": "Initialized"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:34Z", "status": "True", "type": "Ready"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:34Z", "status": "True", "type": "ContainersReady"}, {"lastProbeTime": null, "lastTransitionTime": "2025-12-08T17:57:28Z", "status": "True", "type": "PodScheduled"}], "containerStatuses": [{"containerID": "cri-o://23ef45f8f74a4f33cee49aff44fdf03128bfa93ad9ea0b31d1316072eb33d353", "image": "registry.redhat.io/amq7/amq-interconnect@sha256:31d87473fa684178a694f9ee331d3c80f2653f9533cb65c2a325752166a077e9", "imageID": "registry.redhat.io/amq7/amq-interconnect@sha256:06ac9dd1544cec3885a8362c7685d6c5bb22aab0e46d476480b463b699ebc192", "lastState": {}, "name": "default-interconnect", "ready": true, "resources": {}, "restartCount": 0, "started": true, "state": {"running": {"startedAt": "2025-12-08T17:57:34Z"}}, "user": {"linux": {"gid": 0, "supplementalGroups": [0, 1000670000], "uid": 1000670000}}, "volumeMounts": [{"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-credentials", "name": "default-interconnect-openstack-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/openstack/default-interconnect-openstack-ca", "name": "default-interconnect-openstack-ca"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-credentials", "name": "default-interconnect-inter-router-credentials"}, {"mountPath": "/etc/qpid-dispatch-certs/inter-router/default-interconnect-inter-router-ca", "name": "default-interconnect-inter-router-ca"}, {"mountPath": "/etc/qpid-dispatch/sasl-users", "name": "sasl-users"}, {"mountPath": "/etc/sasl2", "name": "sasl-config"}, {"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", "name": "kube-api-access-rhpm6", "readOnly": true, "recursiveReadOnly": "Disabled"}]}], "hostIP": "192.168.126.11", "hostIPs": [{"ip": "192.168.126.11"}], "phase": "Running", "podIP": "10.217.0.73", "podIPs": [{"ip": "10.217.0.73"}], "qosClass": "BestEffort", "startTime": "2025-12-08T17:57:28Z"}}} 2025-12-08T17:58:34.545039954+00:00 stdout F 2025-12-08T17:58:34.545039954+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:34.641192810+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:34Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create QDR instance"} 2025-12-08T17:58:34.641376285+00:00 stdout F 2025-12-08T17:58:34.641376285+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:34.641676292+00:00 stdout F 2025-12-08T17:58:34.641676292+00:00 stdout F TASK [servicetelemetry : Create QDR instance] ********************************** 2025-12-08T17:58:34.641676292+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:268 2025-12-08T17:58:34.648938300+00:00 stdout F 2025-12-08T17:58:34.648938300+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:35.372417519+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:35Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/interconnectedcloud.github.io/v1alpha1/namespaces/service-telemetry/interconnects/default-interconnect","Verb":"get","APIPrefix":"apis","APIGroup":"interconnectedcloud.github.io","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"interconnects","Subresource":"","Name":"default-interconnect","Parts":["interconnects","default-interconnect"]}} 2025-12-08T17:58:35.571820133+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:35Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Setup Certificates for metrics components"} 2025-12-08T17:58:35.571897015+00:00 stdout F 2025-12-08T17:58:35.571897015+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:35.571940106+00:00 stdout F 2025-12-08T17:58:35.571940106+00:00 stdout F TASK [servicetelemetry : Setup Certificates for metrics components] ************ 2025-12-08T17:58:35.571940106+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:26 2025-12-08T17:58:35.571962586+00:00 stdout F 2025-12-08T17:58:35.571962586+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:35.620957142+00:00 stdout F 2025-12-08T17:58:35.620957142+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:35.620957142+00:00 stdout F 2025-12-08T17:58:35.620957142+00:00 stdout F TASK [servicetelemetry : Create configmap for OAUTH CA certs] ****************** 2025-12-08T17:58:35.620957142+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:1 2025-12-08T17:58:35.620957142+00:00 stdout F 2025-12-08T17:58:35.620957142+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:35.621000143+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:35Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create configmap for OAUTH CA certs"} 2025-12-08T17:58:36.266721603+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:36Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/configmaps/serving-certs-ca-bundle","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"configmaps","Subresource":"","Name":"serving-certs-ca-bundle","Parts":["configmaps","serving-certs-ca-bundle"]}} 2025-12-08T17:58:36.371135521+00:00 stdout F 2025-12-08T17:58:36.371135521+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:36.371135521+00:00 stdout F 2025-12-08T17:58:36.371135521+00:00 stdout F TASK [servicetelemetry : Check for existing cookie secret] ********************* 2025-12-08T17:58:36.371135521+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:12 2025-12-08T17:58:36.371135521+00:00 stdout F 2025-12-08T17:58:36.371135521+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:36.371187092+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:36Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Check for existing cookie secret"} 2025-12-08T17:58:37.201364540+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:37Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/default-session-secret","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"default-session-secret","Parts":["secrets","default-session-secret"]}} 2025-12-08T17:58:37.353721487+00:00 stdout F 2025-12-08T17:58:37.353721487+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:37.353721487+00:00 stdout F 2025-12-08T17:58:37.353721487+00:00 stdout F TASK [servicetelemetry : Create cookie secret] ********************************* 2025-12-08T17:58:37.353721487+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:20 2025-12-08T17:58:37.353721487+00:00 stdout F 2025-12-08T17:58:37.353721487+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:37.353762838+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:37Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create cookie secret"} 2025-12-08T17:58:37.470976028+00:00 stdout F 2025-12-08T17:58:37.470976028+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:37.470976028+00:00 stdout F 2025-12-08T17:58:37.470976028+00:00 stdout F TASK [servicetelemetry : Create Prometheus instance] *************************** 2025-12-08T17:58:37.470976028+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:48 2025-12-08T17:58:37.470976028+00:00 stdout F 2025-12-08T17:58:37.470976028+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:37.471007868+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:37Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create Prometheus instance"} 2025-12-08T17:58:37.614943088+00:00 stdout F 2025-12-08T17:58:37.614943088+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:37.614943088+00:00 stdout F 2025-12-08T17:58:37.614943088+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/prometheus-stf with oauth redirect annotation] *** 2025-12-08T17:58:37.614943088+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:10 2025-12-08T17:58:37.614943088+00:00 stdout F 2025-12-08T17:58:37.614943088+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:37.614972979+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:37Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ServiceAccount/prometheus-stf with oauth redirect annotation"} 2025-12-08T17:58:38.259262728+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:38Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/serviceaccounts/prometheus-stf","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"serviceaccounts","Subresource":"","Name":"prometheus-stf","Parts":["serviceaccounts","prometheus-stf"]}} 2025-12-08T17:58:38.367327346+00:00 stdout F 2025-12-08T17:58:38.367327346+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:38.367327346+00:00 stdout F 2025-12-08T17:58:38.367327346+00:00 stdout F TASK [servicetelemetry : Create ClusterRole/prometheus-stf for non-resource URL /metrics access] *** 2025-12-08T17:58:38.367327346+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:21 2025-12-08T17:58:38.367327346+00:00 stdout F 2025-12-08T17:58:38.367327346+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:38.367363827+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:38Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ClusterRole/prometheus-stf for non-resource URL /metrics access"} 2025-12-08T17:58:39.221978931+00:00 stdout F 2025-12-08T17:58:39.221978931+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:39.221978931+00:00 stdout F 2025-12-08T17:58:39.221978931+00:00 stdout F TASK [servicetelemetry : Create ClusterRoleBinding/prometheus-stf] ************* 2025-12-08T17:58:39.221978931+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:64 2025-12-08T17:58:39.221978931+00:00 stdout F 2025-12-08T17:58:39.221978931+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:39.222010252+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:39Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ClusterRoleBinding/prometheus-stf"} 2025-12-08T17:58:40.086149346+00:00 stdout F 2025-12-08T17:58:40.086149346+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:40.086149346+00:00 stdout F 2025-12-08T17:58:40.086149346+00:00 stdout F TASK [servicetelemetry : Create Role/prometheus-stf for Prometheus operations] *** 2025-12-08T17:58:40.086149346+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:83 2025-12-08T17:58:40.086149346+00:00 stdout F 2025-12-08T17:58:40.086149346+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:40.086182957+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:40Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create Role/prometheus-stf for Prometheus operations"} 2025-12-08T17:58:40.784465962+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:40Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/roles/prometheus-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"roles","Subresource":"","Name":"prometheus-stf","Parts":["roles","prometheus-stf"]}} 2025-12-08T17:58:40.933864539+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:40Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create RoleBinding/prometheus-stf"} 2025-12-08T17:58:40.933942851+00:00 stdout F 2025-12-08T17:58:40.933942851+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:40.933963821+00:00 stdout F 2025-12-08T17:58:40.933963821+00:00 stdout F TASK [servicetelemetry : Create RoleBinding/prometheus-stf] ******************** 2025-12-08T17:58:40.933963821+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:152 2025-12-08T17:58:40.933982882+00:00 stdout F 2025-12-08T17:58:40.933982882+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:41.613782689+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:41Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/rolebindings/prometheus-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"rolebindings","Subresource":"","Name":"prometheus-stf","Parts":["rolebindings","prometheus-stf"]}} 2025-12-08T17:58:41.726941371+00:00 stdout F 2025-12-08T17:58:41.726941371+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:41.726941371+00:00 stdout F 2025-12-08T17:58:41.726941371+00:00 stdout F TASK [servicetelemetry : Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef] *** 2025-12-08T17:58:41.726941371+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:172 2025-12-08T17:58:41.726941371+00:00 stdout F 2025-12-08T17:58:41.726941371+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:41.726969091+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:41Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef"} 2025-12-08T17:58:42.587472290+00:00 stdout F 2025-12-08T17:58:42.587472290+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:42.587472290+00:00 stdout F 2025-12-08T17:58:42.587472290+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:58:42.587472290+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:42.587472290+00:00 stdout F  "msg": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "apiVersion": "monitoring.rhobs/v1", 2025-12-08T17:58:42.587472290+00:00 stdout F  "kind": "Prometheus", 2025-12-08T17:58:42.587472290+00:00 stdout F  "metadata": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "labels": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "app.kubernetes.io/managed-by": "observability-operator", 2025-12-08T17:58:42.587472290+00:00 stdout F  "prometheus": "default" 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "name": "default", 2025-12-08T17:58:42.587472290+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "spec": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "alerting": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "alertmanagers": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  { 2025-12-08T17:58:42.587472290+00:00 stdout F  "bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token", 2025-12-08T17:58:42.587472290+00:00 stdout F  "name": "default-alertmanager-proxy", 2025-12-08T17:58:42.587472290+00:00 stdout F  "namespace": "service-telemetry", 2025-12-08T17:58:42.587472290+00:00 stdout F  "port": "web", 2025-12-08T17:58:42.587472290+00:00 stdout F  "scheme": "https", 2025-12-08T17:58:42.587472290+00:00 stdout F  "tlsConfig": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "caFile": "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt", 2025-12-08T17:58:42.587472290+00:00 stdout F  "serverName": "default-alertmanager-proxy.service-telemetry.svc" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  ] 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "configMaps": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  "serving-certs-ca-bundle" 2025-12-08T17:58:42.587472290+00:00 stdout F  ], 2025-12-08T17:58:42.587472290+00:00 stdout F  "containers": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  { 2025-12-08T17:58:42.587472290+00:00 stdout F  "args": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  "-https-address=:9092", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-tls-cert=/etc/tls/private/tls.crt", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-tls-key=/etc/tls/private/tls.key", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-upstream=http://localhost:9090/", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-cookie-secret-file=/etc/proxy/secrets/session_secret", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-openshift-service-account=prometheus-stf", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-openshift-sar={\"namespace\":\"service-telemetry\",\"resource\": \"prometheuses\", \"resourceAPIGroup\":\"monitoring.rhobs\", \"verb\":\"get\"}", 2025-12-08T17:58:42.587472290+00:00 stdout F  "-openshift-delegate-urls={\"/\":{\"namespace\":\"service-telemetry\",\"resource\": \"prometheuses\", \"group\":\"monitoring.rhobs\", \"verb\":\"get\"}}" 2025-12-08T17:58:42.587472290+00:00 stdout F  ], 2025-12-08T17:58:42.587472290+00:00 stdout F  "image": "quay.io/openshift/origin-oauth-proxy:latest", 2025-12-08T17:58:42.587472290+00:00 stdout F  "name": "oauth-proxy", 2025-12-08T17:58:42.587472290+00:00 stdout F  "ports": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  { 2025-12-08T17:58:42.587472290+00:00 stdout F  "containerPort": 9092, 2025-12-08T17:58:42.587472290+00:00 stdout F  "name": "https", 2025-12-08T17:58:42.587472290+00:00 stdout F  "protocol": "TCP" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  ], 2025-12-08T17:58:42.587472290+00:00 stdout F  "volumeMounts": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  { 2025-12-08T17:58:42.587472290+00:00 stdout F  "mountPath": "/etc/tls/private", 2025-12-08T17:58:42.587472290+00:00 stdout F  "name": "secret-default-prometheus-proxy-tls" 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  { 2025-12-08T17:58:42.587472290+00:00 stdout F  "mountPath": "/etc/proxy/secrets", 2025-12-08T17:58:42.587472290+00:00 stdout F  "name": "secret-default-session-secret" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  ] 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  ], 2025-12-08T17:58:42.587472290+00:00 stdout F  "image": "quay.io/prometheus/prometheus:latest", 2025-12-08T17:58:42.587472290+00:00 stdout F  "listenLocal": true, 2025-12-08T17:58:42.587472290+00:00 stdout F  "podMetadata": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "annotations": null, 2025-12-08T17:58:42.587472290+00:00 stdout F  "labels": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "prometheus": "default" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "replicas": 1, 2025-12-08T17:58:42.587472290+00:00 stdout F  "retention": "24h", 2025-12-08T17:58:42.587472290+00:00 stdout F  "ruleSelector": {}, 2025-12-08T17:58:42.587472290+00:00 stdout F  "scrapeConfigSelector": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "matchLabels": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "app": "smart-gateway" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "secrets": [ 2025-12-08T17:58:42.587472290+00:00 stdout F  "default-prometheus-proxy-tls", 2025-12-08T17:58:42.587472290+00:00 stdout F  "default-session-secret" 2025-12-08T17:58:42.587472290+00:00 stdout F  ], 2025-12-08T17:58:42.587472290+00:00 stdout F  "securityContext": {}, 2025-12-08T17:58:42.587472290+00:00 stdout F  "serviceAccountName": "prometheus-stf", 2025-12-08T17:58:42.587472290+00:00 stdout F  "storage": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "volumeClaimTemplate": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "spec": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "resources": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "requests": { 2025-12-08T17:58:42.587472290+00:00 stdout F  "storage": "20G" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "storageClassName": "crc-csi-hostpath-provisioner" 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  }, 2025-12-08T17:58:42.587472290+00:00 stdout F  "version": null 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F  } 2025-12-08T17:58:42.587472290+00:00 stdout F } 2025-12-08T17:58:42.587472290+00:00 stdout F 2025-12-08T17:58:42.587472290+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:42.587552452+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:42Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:42.676144098+00:00 stdout F 2025-12-08T17:58:42.676144098+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:42.676144098+00:00 stdout F 2025-12-08T17:58:42.676144098+00:00 stdout F TASK [servicetelemetry : Create an instance of Prometheus] ********************* 2025-12-08T17:58:42.676144098+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:191 2025-12-08T17:58:42.676144098+00:00 stdout F 2025-12-08T17:58:42.676144098+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:42.676198299+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:42Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an instance of Prometheus"} 2025-12-08T17:58:43.428072305+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:43Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1/namespaces/service-telemetry/prometheuses/default","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1","Namespace":"service-telemetry","Resource":"prometheuses","Subresource":"","Name":"default","Parts":["prometheuses","default"]}} 2025-12-08T17:58:43.575847520+00:00 stdout F 2025-12-08T17:58:43.575847520+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:43.575847520+00:00 stdout F 2025-12-08T17:58:43.575847520+00:00 stdout F TASK [servicetelemetry : Ensure no community Prometheus is installed if not using community operator] *** 2025-12-08T17:58:43.575847520+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:197 2025-12-08T17:58:43.575847520+00:00 stdout F 2025-12-08T17:58:43.575847520+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:43.575893381+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:43Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Ensure no community Prometheus is installed if not using community operator"} 2025-12-08T17:58:44.346033508+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:44Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:58:44.465941459+00:00 stdout F 2025-12-08T17:58:44.465941459+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:44.465941459+00:00 stdout F 2025-12-08T17:58:44.465941459+00:00 stdout F TASK [servicetelemetry : Ensure no rhobs Prometheus is installed if not using it] *** 2025-12-08T17:58:44.465941459+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:207 2025-12-08T17:58:44.465941459+00:00 stdout F 2025-12-08T17:58:44.465941459+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:44.466008891+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:44Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Ensure no rhobs Prometheus is installed if not using it"} 2025-12-08T17:58:44.558488808+00:00 stdout F 2025-12-08T17:58:44.558488808+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:44.558488808+00:00 stdout F 2025-12-08T17:58:44.558488808+00:00 stdout F TASK [servicetelemetry : Create service to access the prometheus proxy] ******** 2025-12-08T17:58:44.558488808+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:223 2025-12-08T17:58:44.558488808+00:00 stdout F 2025-12-08T17:58:44.558488808+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:44.558562980+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:44Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create service to access the prometheus proxy"} 2025-12-08T17:58:45.246756348+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:45Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/services/default-prometheus-proxy","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"services","Subresource":"","Name":"default-prometheus-proxy","Parts":["services","default-prometheus-proxy"]}} 2025-12-08T17:58:45.391232805+00:00 stdout F 2025-12-08T17:58:45.391232805+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:45.391232805+00:00 stdout F 2025-12-08T17:58:45.391232805+00:00 stdout F TASK [servicetelemetry : Create route to access the prometheus proxy] ********** 2025-12-08T17:58:45.391232805+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:234 2025-12-08T17:58:45.391232805+00:00 stdout F 2025-12-08T17:58:45.391232805+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:45.391321267+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:45Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create route to access the prometheus proxy"} 2025-12-08T17:58:46.212336496+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:46Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/route.openshift.io/v1/namespaces/service-telemetry/routes/default-prometheus-proxy","Verb":"get","APIPrefix":"apis","APIGroup":"route.openshift.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"routes","Subresource":"","Name":"default-prometheus-proxy","Parts":["routes","default-prometheus-proxy"]}} 2025-12-08T17:58:46.343477422+00:00 stdout F 2025-12-08T17:58:46.343477422+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:46.343477422+00:00 stdout F 2025-12-08T17:58:46.343477422+00:00 stdout F TASK [servicetelemetry : Create Prometheus read-only user] ********************* 2025-12-08T17:58:46.343477422+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:50 2025-12-08T17:58:46.343477422+00:00 stdout F 2025-12-08T17:58:46.343477422+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:46.343529864+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:46Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create Prometheus read-only user"} 2025-12-08T17:58:46.391816936+00:00 stdout F 2025-12-08T17:58:46.391816936+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:46.391816936+00:00 stdout F 2025-12-08T17:58:46.391816936+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/stf-prometheus-reader] ********** 2025-12-08T17:58:46.391816936+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:1 2025-12-08T17:58:46.391816936+00:00 stdout F 2025-12-08T17:58:46.391816936+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:46.391892848+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:46Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ServiceAccount/stf-prometheus-reader"} 2025-12-08T17:58:47.132165479+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:47Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/serviceaccounts/stf-prometheus-reader","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"serviceaccounts","Subresource":"","Name":"stf-prometheus-reader","Parts":["serviceaccounts","stf-prometheus-reader"]}} 2025-12-08T17:58:47.274899741+00:00 stdout F 2025-12-08T17:58:47.274899741+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:47.274899741+00:00 stdout F 2025-12-08T17:58:47.274899741+00:00 stdout F TASK [servicetelemetry : Create prometheus-reader Role] ************************ 2025-12-08T17:58:47.274899741+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:11 2025-12-08T17:58:47.274899741+00:00 stdout F 2025-12-08T17:58:47.274899741+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:47.274931842+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:47Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create prometheus-reader Role"} 2025-12-08T17:58:48.027654339+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:48Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/roles/prometheus-reader","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"roles","Subresource":"","Name":"prometheus-reader","Parts":["roles","prometheus-reader"]}} 2025-12-08T17:58:48.150424865+00:00 stdout F 2025-12-08T17:58:48.150424865+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:48.150424865+00:00 stdout F 2025-12-08T17:58:48.150424865+00:00 stdout F TASK [servicetelemetry : Create prometheus-reader RoleBinding for stf-prometheus-reader] *** 2025-12-08T17:58:48.150424865+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:30 2025-12-08T17:58:48.150424865+00:00 stdout F 2025-12-08T17:58:48.150424865+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:48.150466866+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:48Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create prometheus-reader RoleBinding for stf-prometheus-reader"} 2025-12-08T17:58:48.833071267+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:48Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/rolebindings/stf-prometheus-reader","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"rolebindings","Subresource":"","Name":"stf-prometheus-reader","Parts":["rolebindings","stf-prometheus-reader"]}} 2025-12-08T17:58:48.955259707+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:48Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an access token for stf-prometheus-reader"} 2025-12-08T17:58:48.955426741+00:00 stdout F 2025-12-08T17:58:48.955426741+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:48.955451082+00:00 stdout F 2025-12-08T17:58:48.955451082+00:00 stdout F TASK [servicetelemetry : Create an access token for stf-prometheus-reader] ***** 2025-12-08T17:58:48.955451082+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:47 2025-12-08T17:58:48.955514503+00:00 stdout F 2025-12-08T17:58:48.955514503+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:49.673095187+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:49Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/stf-prometheus-reader-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"stf-prometheus-reader-token","Parts":["secrets","stf-prometheus-reader-token"]}} 2025-12-08T17:58:49.823867569+00:00 stdout F 2025-12-08T17:58:49.823867569+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:49.823867569+00:00 stdout F 2025-12-08T17:58:49.823867569+00:00 stdout F TASK [servicetelemetry : Create Alertmanager instance] ************************* 2025-12-08T17:58:49.823867569+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:54 2025-12-08T17:58:49.823867569+00:00 stdout F 2025-12-08T17:58:49.823867569+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:49.823957882+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:49Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create Alertmanager instance"} 2025-12-08T17:58:49.997683580+00:00 stdout F 2025-12-08T17:58:49.997683580+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:49.997683580+00:00 stdout F 2025-12-08T17:58:49.997683580+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:58:49.997683580+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:49.997683580+00:00 stdout F  "msg": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "apiVersion": "monitoring.rhobs/v1", 2025-12-08T17:58:49.997683580+00:00 stdout F  "kind": "Alertmanager", 2025-12-08T17:58:49.997683580+00:00 stdout F  "metadata": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "labels": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "alertmanager": "default", 2025-12-08T17:58:49.997683580+00:00 stdout F  "app.kubernetes.io/managed-by": "observability-operator" 2025-12-08T17:58:49.997683580+00:00 stdout F  }, 2025-12-08T17:58:49.997683580+00:00 stdout F  "name": "default", 2025-12-08T17:58:49.997683580+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:58:49.997683580+00:00 stdout F  }, 2025-12-08T17:58:49.997683580+00:00 stdout F  "spec": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "containers": [ 2025-12-08T17:58:49.997683580+00:00 stdout F  { 2025-12-08T17:58:49.997683580+00:00 stdout F  "args": [ 2025-12-08T17:58:49.997683580+00:00 stdout F  "-https-address=:9095", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-tls-cert=/etc/tls/private/tls.crt", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-tls-key=/etc/tls/private/tls.key", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-upstream=http://localhost:9093/", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-cookie-secret-file=/etc/proxy/secrets/session_secret", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-openshift-service-account=alertmanager-stf", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-openshift-sar={\"namespace\":\"service-telemetry\", \"resource\": \"alertmanagers\", \"resourceAPIGroup\":\"monitoring.rhobs\", \"verb\":\"get\"}", 2025-12-08T17:58:49.997683580+00:00 stdout F  "-openshift-delegate-urls={\"/\": {\"namespace\":\"service-telemetry\", \"resource\": \"alertmanagers\", \"group\":\"monitoring.rhobs\", \"verb\":\"get\"}}" 2025-12-08T17:58:49.997683580+00:00 stdout F  ], 2025-12-08T17:58:49.997683580+00:00 stdout F  "image": "quay.io/openshift/origin-oauth-proxy:latest", 2025-12-08T17:58:49.997683580+00:00 stdout F  "name": "oauth-proxy", 2025-12-08T17:58:49.997683580+00:00 stdout F  "ports": [ 2025-12-08T17:58:49.997683580+00:00 stdout F  { 2025-12-08T17:58:49.997683580+00:00 stdout F  "containerPort": 9095, 2025-12-08T17:58:49.997683580+00:00 stdout F  "name": "https", 2025-12-08T17:58:49.997683580+00:00 stdout F  "protocol": "TCP" 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  ], 2025-12-08T17:58:49.997683580+00:00 stdout F  "volumeMounts": [ 2025-12-08T17:58:49.997683580+00:00 stdout F  { 2025-12-08T17:58:49.997683580+00:00 stdout F  "mountPath": "/etc/tls/private", 2025-12-08T17:58:49.997683580+00:00 stdout F  "name": "secret-default-alertmanager-proxy-tls" 2025-12-08T17:58:49.997683580+00:00 stdout F  }, 2025-12-08T17:58:49.997683580+00:00 stdout F  { 2025-12-08T17:58:49.997683580+00:00 stdout F  "mountPath": "/etc/proxy/secrets", 2025-12-08T17:58:49.997683580+00:00 stdout F  "name": "secret-default-session-secret" 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  ] 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  ], 2025-12-08T17:58:49.997683580+00:00 stdout F  "image": "quay.io/prometheus/alertmanager:latest", 2025-12-08T17:58:49.997683580+00:00 stdout F  "listenLocal": true, 2025-12-08T17:58:49.997683580+00:00 stdout F  "podMetadata": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "labels": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "alertmanager": "default" 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  }, 2025-12-08T17:58:49.997683580+00:00 stdout F  "replicas": 1, 2025-12-08T17:58:49.997683580+00:00 stdout F  "scrapeConfigSelector": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "matchLabels": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "app": "smart-gateway" 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  }, 2025-12-08T17:58:49.997683580+00:00 stdout F  "secrets": [ 2025-12-08T17:58:49.997683580+00:00 stdout F  "default-alertmanager-proxy-tls", 2025-12-08T17:58:49.997683580+00:00 stdout F  "default-session-secret" 2025-12-08T17:58:49.997683580+00:00 stdout F  ], 2025-12-08T17:58:49.997683580+00:00 stdout F  "serviceAccountName": "alertmanager-stf", 2025-12-08T17:58:49.997683580+00:00 stdout F  "storage": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "volumeClaimTemplate": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "spec": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "resources": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "requests": { 2025-12-08T17:58:49.997683580+00:00 stdout F  "storage": "20G" 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  }, 2025-12-08T17:58:49.997683580+00:00 stdout F  "storageClassName": "crc-csi-hostpath-provisioner" 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F  } 2025-12-08T17:58:49.997683580+00:00 stdout F } 2025-12-08T17:58:49.997683580+00:00 stdout F 2025-12-08T17:58:49.997683580+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:49.997775474+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:49Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:50.142935139+00:00 stdout F 2025-12-08T17:58:50.142935139+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:50.142935139+00:00 stdout F 2025-12-08T17:58:50.142935139+00:00 stdout F TASK [Lookup alertmanager configuration template] ******************************** 2025-12-08T17:58:50.142935139+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:50.142935139+00:00 stdout F  "msg": { 2025-12-08T17:58:50.142935139+00:00 stdout F  "apiVersion": "v1", 2025-12-08T17:58:50.142935139+00:00 stdout F  "kind": "Secret", 2025-12-08T17:58:50.142935139+00:00 stdout F  "metadata": { 2025-12-08T17:58:50.142935139+00:00 stdout F  "name": "alertmanager-default", 2025-12-08T17:58:50.142935139+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:58:50.142935139+00:00 stdout F  }, 2025-12-08T17:58:50.142935139+00:00 stdout F  "stringData": { 2025-12-08T17:58:50.142935139+00:00 stdout F  "alertmanager.yaml": "global:\n resolve_timeout: 5m\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 12h\n receiver: 'snmp_wh'\nreceivers:\n- name: 'snmp_wh'\n webhook_configs:\n - url: 'http://default-prometheus-webhook-snmp:9099'" 2025-12-08T17:58:50.142935139+00:00 stdout F  }, 2025-12-08T17:58:50.142935139+00:00 stdout F  "type": "Opaque" 2025-12-08T17:58:50.142935139+00:00 stdout F  } 2025-12-08T17:58:50.142935139+00:00 stdout F } 2025-12-08T17:58:50.142935139+00:00 stdout F 2025-12-08T17:58:50.142935139+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:50.142985360+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:50Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:50.219795555+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:50Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an Alertmanager configuration secret"} 2025-12-08T17:58:50.219843966+00:00 stdout F 2025-12-08T17:58:50.219843966+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:50.219865367+00:00 stdout F 2025-12-08T17:58:50.219865367+00:00 stdout F TASK [servicetelemetry : Create an Alertmanager configuration secret] ********** 2025-12-08T17:58:50.219865367+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:19 2025-12-08T17:58:50.219922658+00:00 stdout F 2025-12-08T17:58:50.219922658+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:50.973023597+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:50Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/alertmanager-default","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"alertmanager-default","Parts":["secrets","alertmanager-default"]}} 2025-12-08T17:58:51.092987198+00:00 stdout F 2025-12-08T17:58:51.092987198+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:51.092987198+00:00 stdout F 2025-12-08T17:58:51.092987198+00:00 stdout F TASK [servicetelemetry : Create an instance of Alertmanager] ******************* 2025-12-08T17:58:51.092987198+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:25 2025-12-08T17:58:51.092987198+00:00 stdout F 2025-12-08T17:58:51.092987198+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:51.093017618+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:51Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an instance of Alertmanager"} 2025-12-08T17:58:51.862049507+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:51Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1/namespaces/service-telemetry/alertmanagers/default","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1","Namespace":"service-telemetry","Resource":"alertmanagers","Subresource":"","Name":"default","Parts":["alertmanagers","default"]}} 2025-12-08T17:58:51.984531626+00:00 stdout F 2025-12-08T17:58:51.984531626+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:51.984531626+00:00 stdout F 2025-12-08T17:58:51.984531626+00:00 stdout F TASK [servicetelemetry : Ensure no community Alertmanager is installed if not using community operator] *** 2025-12-08T17:58:51.984531626+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:31 2025-12-08T17:58:51.984531626+00:00 stdout F 2025-12-08T17:58:51.984531626+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:51.984594657+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:51Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Ensure no community Alertmanager is installed if not using community operator"} 2025-12-08T17:58:52.659450484+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:52Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Alertmanager, service-telemetry/default"} 2025-12-08T17:58:52.790204709+00:00 stdout F 2025-12-08T17:58:52.790204709+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:52.790204709+00:00 stdout F 2025-12-08T17:58:52.790204709+00:00 stdout F TASK [servicetelemetry : Ensure no rhobs Alertmanager is installed if not using it] *** 2025-12-08T17:58:52.790204709+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:41 2025-12-08T17:58:52.790204709+00:00 stdout F 2025-12-08T17:58:52.790204709+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:52.790253860+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:52Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Ensure no rhobs Alertmanager is installed if not using it"} 2025-12-08T17:58:52.834686491+00:00 stdout F 2025-12-08T17:58:52.834686491+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:52.834686491+00:00 stdout F 2025-12-08T17:58:52.834686491+00:00 stdout F TASK [servicetelemetry : Create SNMP traps instance] *************************** 2025-12-08T17:58:52.834686491+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:54 2025-12-08T17:58:52.834686491+00:00 stdout F 2025-12-08T17:58:52.834686491+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:52.834731603+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:52Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create SNMP traps instance"} 2025-12-08T17:58:52.994345619+00:00 stdout F 2025-12-08T17:58:52.994345619+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:52.994345619+00:00 stdout F 2025-12-08T17:58:52.994345619+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:58:52.994345619+00:00 stdout F ok: [localhost] => { 2025-12-08T17:58:52.994345619+00:00 stdout F  "msg": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "apiVersion": "apps/v1", 2025-12-08T17:58:52.994345619+00:00 stdout F  "kind": "Deployment", 2025-12-08T17:58:52.994345619+00:00 stdout F  "metadata": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "default-snmp-webhook", 2025-12-08T17:58:52.994345619+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  "spec": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "replicas": 1, 2025-12-08T17:58:52.994345619+00:00 stdout F  "selector": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "matchLabels": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "app": "default-snmp-webhook" 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  "template": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "metadata": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "labels": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "app": "default-snmp-webhook" 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  "spec": { 2025-12-08T17:58:52.994345619+00:00 stdout F  "containers": [ 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "env": [ 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "SNMP_COMMUNITY", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "public" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "SNMP_RETRIES", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "5" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "SNMP_HOST", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "192.168.24.254" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "SNMP_PORT", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "162" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "SNMP_TIMEOUT", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "1" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "ALERT_OID_LABEL", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "oid" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "TRAP_OID_PREFIX", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "1.3.6.1.4.1.50495.15" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "TRAP_DEFAULT_OID", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "1.3.6.1.4.1.50495.15.1.2.1" 2025-12-08T17:58:52.994345619+00:00 stdout F  }, 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "TRAP_DEFAULT_SEVERITY", 2025-12-08T17:58:52.994345619+00:00 stdout F  "value": "" 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  ], 2025-12-08T17:58:52.994345619+00:00 stdout F  "image": "quay.io/infrawatch/prometheus-webhook-snmp:latest", 2025-12-08T17:58:52.994345619+00:00 stdout F  "name": "prometheus-webhook-snmp", 2025-12-08T17:58:52.994345619+00:00 stdout F  "ports": [ 2025-12-08T17:58:52.994345619+00:00 stdout F  { 2025-12-08T17:58:52.994345619+00:00 stdout F  "containerPort": 9099 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  ] 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  ] 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F  } 2025-12-08T17:58:52.994345619+00:00 stdout F } 2025-12-08T17:58:52.994345619+00:00 stdout F 2025-12-08T17:58:52.994345619+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:52.994422121+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:52Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:58:53.066707577+00:00 stdout F 2025-12-08T17:58:53.066707577+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:53.066707577+00:00 stdout F 2025-12-08T17:58:53.066707577+00:00 stdout F TASK [servicetelemetry : Create an instance of snmp webhook] ******************* 2025-12-08T17:58:53.066707577+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_snmp_traps.yml:10 2025-12-08T17:58:53.066707577+00:00 stdout F 2025-12-08T17:58:53.066707577+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:53.066738477+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:53Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an instance of snmp webhook"} 2025-12-08T17:58:53.793279475+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:53Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/apps/v1/namespaces/service-telemetry/deployments/default-snmp-webhook","Verb":"get","APIPrefix":"apis","APIGroup":"apps","APIVersion":"v1","Namespace":"service-telemetry","Resource":"deployments","Subresource":"","Name":"default-snmp-webhook","Parts":["deployments","default-snmp-webhook"]}} 2025-12-08T17:58:54.014657300+00:00 stdout F 2025-12-08T17:58:54.014657300+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:54.014657300+00:00 stdout F 2025-12-08T17:58:54.014657300+00:00 stdout F TASK [servicetelemetry : Create an instance of snmp webhook service] *********** 2025-12-08T17:58:54.014657300+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_snmp_traps.yml:20 2025-12-08T17:58:54.014657300+00:00 stdout F 2025-12-08T17:58:54.014657300+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:54.014981429+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:54Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an instance of snmp webhook service"} 2025-12-08T17:58:54.719166408+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:54Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/services/default-prometheus-webhook-snmp","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"services","Subresource":"","Name":"default-prometheus-webhook-snmp","Parts":["services","default-prometheus-webhook-snmp"]}} 2025-12-08T17:58:54.888154582+00:00 stdout F 2025-12-08T17:58:54.888154582+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:54.888154582+00:00 stdout F 2025-12-08T17:58:54.888154582+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/alertmanager-stf with oauth redirect annotation] *** 2025-12-08T17:58:54.888154582+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:70 2025-12-08T17:58:54.888154582+00:00 stdout F 2025-12-08T17:58:54.888154582+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:54.888211723+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:54Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ServiceAccount/alertmanager-stf with oauth redirect annotation"} 2025-12-08T17:58:55.671071896+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:55Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/serviceaccounts/alertmanager-stf","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"serviceaccounts","Subresource":"","Name":"alertmanager-stf","Parts":["serviceaccounts","alertmanager-stf"]}} 2025-12-08T17:58:55.808062647+00:00 stdout F 2025-12-08T17:58:55.808062647+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:55.808062647+00:00 stdout F 2025-12-08T17:58:55.808062647+00:00 stdout F TASK [servicetelemetry : Create ClusterRole/alertmanager-stf] ****************** 2025-12-08T17:58:55.808062647+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:81 2025-12-08T17:58:55.808062647+00:00 stdout F 2025-12-08T17:58:55.808062647+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:55.808129879+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:55Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ClusterRole/alertmanager-stf"} 2025-12-08T17:58:56.665922696+00:00 stdout F 2025-12-08T17:58:56.665922696+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:56.665922696+00:00 stdout F 2025-12-08T17:58:56.665922696+00:00 stdout F TASK [servicetelemetry : Create ClusterRoleBinding/alertmanager-stf] *********** 2025-12-08T17:58:56.665922696+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:120 2025-12-08T17:58:56.665922696+00:00 stdout F 2025-12-08T17:58:56.665922696+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:56.665972457+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:56Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ClusterRoleBinding/alertmanager-stf"} 2025-12-08T17:58:57.924267260+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:57Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create Role/alertmanager-stf"} 2025-12-08T17:58:57.924378983+00:00 stdout F 2025-12-08T17:58:57.924378983+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:57.924401224+00:00 stdout F 2025-12-08T17:58:57.924401224+00:00 stdout F TASK [servicetelemetry : Create Role/alertmanager-stf] ************************* 2025-12-08T17:58:57.924401224+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:139 2025-12-08T17:58:57.924421374+00:00 stdout F 2025-12-08T17:58:57.924421374+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:58.581460441+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:58Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/roles/alertmanager-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"roles","Subresource":"","Name":"alertmanager-stf","Parts":["roles","alertmanager-stf"]}} 2025-12-08T17:58:58.722900249+00:00 stdout F 2025-12-08T17:58:58.722900249+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:58.722900249+00:00 stdout F 2025-12-08T17:58:58.722900249+00:00 stdout F TASK [servicetelemetry : Create RoleBinding/alertmanager-stf] ****************** 2025-12-08T17:58:58.722900249+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:177 2025-12-08T17:58:58.722900249+00:00 stdout F 2025-12-08T17:58:58.722900249+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:58.722936810+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:58Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create RoleBinding/alertmanager-stf"} 2025-12-08T17:58:59.465493630+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:59Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/rolebindings/alertmanager-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"rolebindings","Subresource":"","Name":"alertmanager-stf","Parts":["rolebindings","alertmanager-stf"]}} 2025-12-08T17:58:59.651636657+00:00 stdout F 2025-12-08T17:58:59.651636657+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:58:59.651636657+00:00 stdout F 2025-12-08T17:58:59.651636657+00:00 stdout F TASK [servicetelemetry : Create service to access the Alertmanager proxy] ****** 2025-12-08T17:58:59.651636657+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:202 2025-12-08T17:58:59.651636657+00:00 stdout F 2025-12-08T17:58:59.651636657+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:58:59.651672328+00:00 stderr F {"level":"info","ts":"2025-12-08T17:58:59Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create service to access the Alertmanager proxy"} 2025-12-08T17:59:00.346756807+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:00Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/services/default-alertmanager-proxy","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"services","Subresource":"","Name":"default-alertmanager-proxy","Parts":["services","default-alertmanager-proxy"]}} 2025-12-08T17:59:00.508510599+00:00 stdout F 2025-12-08T17:59:00.508510599+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:00.508510599+00:00 stdout F 2025-12-08T17:59:00.508510599+00:00 stdout F TASK [servicetelemetry : Create route to access the Alertmanager proxy] ******** 2025-12-08T17:59:00.508510599+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:213 2025-12-08T17:59:00.508510599+00:00 stdout F 2025-12-08T17:59:00.508510599+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:00.508841739+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:00Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create route to access the Alertmanager proxy"} 2025-12-08T17:59:01.241294193+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/route.openshift.io/v1/namespaces/service-telemetry/routes/default-alertmanager-proxy","Verb":"get","APIPrefix":"apis","APIGroup":"route.openshift.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"routes","Subresource":"","Name":"default-alertmanager-proxy","Parts":["routes","default-alertmanager-proxy"]}} 2025-12-08T17:59:01.412578377+00:00 stdout F 2025-12-08T17:59:01.412578377+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.412578377+00:00 stdout F 2025-12-08T17:59:01.412578377+00:00 stdout F TASK [servicetelemetry : Setup Certificates for ElasticSearch] ***************** 2025-12-08T17:59:01.412578377+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:67 2025-12-08T17:59:01.412578377+00:00 stdout F 2025-12-08T17:59:01.412578377+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:01.412626218+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Setup Certificates for ElasticSearch"} 2025-12-08T17:59:01.450510327+00:00 stdout F 2025-12-08T17:59:01.450510327+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.450510327+00:00 stdout F 2025-12-08T17:59:01.450510327+00:00 stdout F TASK [servicetelemetry : Setup ElasticSearch] ********************************** 2025-12-08T17:59:01.450510327+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:70 2025-12-08T17:59:01.450510327+00:00 stdout F 2025-12-08T17:59:01.450510327+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:01.450543898+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Setup ElasticSearch"} 2025-12-08T17:59:01.533164745+00:00 stdout F 2025-12-08T17:59:01.533164745+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.533164745+00:00 stdout F 2025-12-08T17:59:01.533164745+00:00 stdout F TASK [Get data about clouds] ******************************** 2025-12-08T17:59:01.533164745+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:01.533164745+00:00 stdout F  "servicetelemetry_vars.clouds": [ 2025-12-08T17:59:01.533164745+00:00 stdout F  { 2025-12-08T17:59:01.533164745+00:00 stdout F  "events": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "collectors": [ 2025-12-08T17:59:01.533164745+00:00 stdout F  { 2025-12-08T17:59:01.533164745+00:00 stdout F  "bridge": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:59:01.533164745+00:00 stdout F  "verbose": false 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "collector_type": "collectd", 2025-12-08T17:59:01.533164745+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:59:01.533164745+00:00 stdout F  "subscription_address": "collectd/cloud1-notify" 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  { 2025-12-08T17:59:01.533164745+00:00 stdout F  "bridge": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:59:01.533164745+00:00 stdout F  "verbose": false 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "collector_type": "ceilometer", 2025-12-08T17:59:01.533164745+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:59:01.533164745+00:00 stdout F  "subscription_address": "anycast/ceilometer/cloud1-event.sample" 2025-12-08T17:59:01.533164745+00:00 stdout F  } 2025-12-08T17:59:01.533164745+00:00 stdout F  ] 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "metrics": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "collectors": [ 2025-12-08T17:59:01.533164745+00:00 stdout F  { 2025-12-08T17:59:01.533164745+00:00 stdout F  "bridge": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:59:01.533164745+00:00 stdout F  "verbose": false 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "collector_type": "collectd", 2025-12-08T17:59:01.533164745+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:59:01.533164745+00:00 stdout F  "subscription_address": "collectd/cloud1-telemetry" 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  { 2025-12-08T17:59:01.533164745+00:00 stdout F  "bridge": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T17:59:01.533164745+00:00 stdout F  "verbose": false 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "collector_type": "ceilometer", 2025-12-08T17:59:01.533164745+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:59:01.533164745+00:00 stdout F  "subscription_address": "anycast/ceilometer/cloud1-metering.sample" 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  { 2025-12-08T17:59:01.533164745+00:00 stdout F  "bridge": { 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T17:59:01.533164745+00:00 stdout F  "ring_buffer_size": 65535, 2025-12-08T17:59:01.533164745+00:00 stdout F  "verbose": false 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "collector_type": "sensubility", 2025-12-08T17:59:01.533164745+00:00 stdout F  "debug_enabled": false, 2025-12-08T17:59:01.533164745+00:00 stdout F  "subscription_address": "sensubility/cloud1-telemetry" 2025-12-08T17:59:01.533164745+00:00 stdout F  } 2025-12-08T17:59:01.533164745+00:00 stdout F  ] 2025-12-08T17:59:01.533164745+00:00 stdout F  }, 2025-12-08T17:59:01.533164745+00:00 stdout F  "name": "cloud1" 2025-12-08T17:59:01.533164745+00:00 stdout F  } 2025-12-08T17:59:01.533164745+00:00 stdout F  ] 2025-12-08T17:59:01.533164745+00:00 stdout F } 2025-12-08T17:59:01.533164745+00:00 stdout F 2025-12-08T17:59:01.533164745+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:01.533243077+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:59:01.535212709+00:00 stdout F 2025-12-08T17:59:01.535212709+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.535212709+00:00 stdout F 2025-12-08T17:59:01.535212709+00:00 stdout F TASK [servicetelemetry : Loop through cloud instances to setup transport receivers] *** 2025-12-08T17:59:01.535212709+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:83 2025-12-08T17:59:01.535212709+00:00 stdout F 2025-12-08T17:59:01.535212709+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:01.535228229+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Loop through cloud instances to setup transport receivers"} 2025-12-08T17:59:01.646548094+00:00 stdout F 2025-12-08T17:59:01.646548094+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.646548094+00:00 stdout F 2025-12-08T17:59:01.646548094+00:00 stdout F TASK [Cloud collector setup] ******************************** 2025-12-08T17:59:01.646548094+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:01.646548094+00:00 stdout F  "msg": "Working on cloud {'name': 'cloud1', 'metrics': {'collectors': [{'collector_type': 'collectd', 'subscription_address': 'collectd/cloud1-telemetry', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'ceilometer', 'subscription_address': 'anycast/ceilometer/cloud1-metering.sample', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'sensubility', 'subscription_address': 'sensubility/cloud1-telemetry', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 65535, 'ring_buffer_count': 15000, 'verbose': False}}]}, 'events': {'collectors': [{'collector_type': 'collectd', 'subscription_address': 'collectd/cloud1-notify', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'ceilometer', 'subscription_address': 'anycast/ceilometer/cloud1-event.sample', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}]}} to setup metrics and events Smart Gateways\n" 2025-12-08T17:59:01.646548094+00:00 stdout F } 2025-12-08T17:59:01.646548094+00:00 stdout F 2025-12-08T17:59:01.646548094+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:01.646611035+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"4191063253195287734","EventData.TaskArgs":""} 2025-12-08T17:59:01.649375958+00:00 stdout F 2025-12-08T17:59:01.649375958+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.649375958+00:00 stdout F 2025-12-08T17:59:01.649375958+00:00 stdout F TASK [servicetelemetry : Deploy Metrics Smart Gateway instance for each collector] *** 2025-12-08T17:59:01.649375958+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:6 2025-12-08T17:59:01.649375958+00:00 stdout F 2025-12-08T17:59:01.649375958+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:01.649430360+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy Metrics Smart Gateway instance for each collector"} 2025-12-08T17:59:01.742356298+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:59:01.742522753+00:00 stdout F 2025-12-08T17:59:01.742522753+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:01.742553804+00:00 stdout F 2025-12-08T17:59:01.742553804+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:59:01.742553804+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:59:01.742604955+00:00 stdout F 2025-12-08T17:59:01.742604955+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:02.647851413+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:02Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-coll-meter","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-coll-meter","Parts":["smartgateways","default-cloud1-coll-meter"]}} 2025-12-08T17:59:02.781049594+00:00 stdout F 2025-12-08T17:59:02.781049594+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:02.781049594+00:00 stdout F 2025-12-08T17:59:02.781049594+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:59:02.781049594+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:59:02.781049594+00:00 stdout F 2025-12-08T17:59:02.781049594+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:02.781093995+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:02Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:59:02.824006296+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:02Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T17:59:02.824072248+00:00 stdout F 2025-12-08T17:59:02.824072248+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:02.824100548+00:00 stdout F 2025-12-08T17:59:02.824100548+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T17:59:02.824100548+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T17:59:02.824119499+00:00 stdout F 2025-12-08T17:59:02.824119499+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:03.495940955+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:03Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T17:59:03.681527126+00:00 stdout F 2025-12-08T17:59:03.681527126+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:03.681527126+00:00 stdout F 2025-12-08T17:59:03.681527126+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T17:59:03.681527126+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T17:59:03.681527126+00:00 stdout F 2025-12-08T17:59:03.681527126+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:03.681599558+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T17:59:04.397403054+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:04Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1alpha1/namespaces/service-telemetry/scrapeconfigs/default-cloud1-coll-meter","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"scrapeconfigs","Subresource":"","Name":"default-cloud1-coll-meter","Parts":["scrapeconfigs","default-cloud1-coll-meter"]}} 2025-12-08T17:59:04.523061556+00:00 stdout F 2025-12-08T17:59:04.523061556+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:04.523061556+00:00 stdout F 2025-12-08T17:59:04.523061556+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T17:59:04.523061556+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T17:59:04.523061556+00:00 stdout F 2025-12-08T17:59:04.523061556+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:04.523124328+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:04Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T17:59:04.561603701+00:00 stdout F 2025-12-08T17:59:04.561603701+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:04.561603701+00:00 stdout F 2025-12-08T17:59:04.561603701+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T17:59:04.561603701+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T17:59:04.561603701+00:00 stdout F 2025-12-08T17:59:04.561603701+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:04.561677383+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:04Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T17:59:04.605176850+00:00 stdout F 2025-12-08T17:59:04.605176850+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:04.605176850+00:00 stdout F 2025-12-08T17:59:04.605176850+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T17:59:04.605176850+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T17:59:04.605176850+00:00 stdout F 2025-12-08T17:59:04.605176850+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:04.605220711+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:04Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T17:59:05.299008466+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:05Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-coll-meter"} 2025-12-08T17:59:05.445823425+00:00 stdout F 2025-12-08T17:59:05.445823425+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:05.445823425+00:00 stdout F 2025-12-08T17:59:05.445823425+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:59:05.445823425+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:59:05.445823425+00:00 stdout F 2025-12-08T17:59:05.445823425+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:05.445867967+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:05Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:59:06.470140602+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:06Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-ceil-meter","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-ceil-meter","Parts":["smartgateways","default-cloud1-ceil-meter"]}} 2025-12-08T17:59:06.600826526+00:00 stdout F 2025-12-08T17:59:06.600826526+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:06.600826526+00:00 stdout F 2025-12-08T17:59:06.600826526+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:59:06.600826526+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:59:06.600826526+00:00 stdout F 2025-12-08T17:59:06.600826526+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:06.600904438+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:06Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:59:06.637162604+00:00 stdout F 2025-12-08T17:59:06.637162604+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:06.637162604+00:00 stdout F 2025-12-08T17:59:06.637162604+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T17:59:06.637162604+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T17:59:06.637162604+00:00 stdout F 2025-12-08T17:59:06.637162604+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:06.637215745+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:06Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T17:59:07.343755736+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:07Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T17:59:07.507557044+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T17:59:07.507657516+00:00 stdout F 2025-12-08T17:59:07.507657516+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:07.507704907+00:00 stdout F 2025-12-08T17:59:07.507704907+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T17:59:07.507704907+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T17:59:07.507749269+00:00 stdout F 2025-12-08T17:59:07.507749269+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:08.168573535+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:08Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1alpha1/namespaces/service-telemetry/scrapeconfigs/default-cloud1-ceil-meter","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"scrapeconfigs","Subresource":"","Name":"default-cloud1-ceil-meter","Parts":["scrapeconfigs","default-cloud1-ceil-meter"]}} 2025-12-08T17:59:08.289152943+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T17:59:08.289208295+00:00 stdout F 2025-12-08T17:59:08.289208295+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:08.289236085+00:00 stdout F 2025-12-08T17:59:08.289236085+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T17:59:08.289236085+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T17:59:08.289256526+00:00 stdout F 2025-12-08T17:59:08.289256526+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:08.315826035+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T17:59:08.315911618+00:00 stdout F 2025-12-08T17:59:08.315911618+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:08.315934898+00:00 stdout F 2025-12-08T17:59:08.315934898+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T17:59:08.315934898+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T17:59:08.315953799+00:00 stdout F 2025-12-08T17:59:08.315953799+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:08.344851321+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T17:59:08.344913073+00:00 stdout F 2025-12-08T17:59:08.344913073+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:08.344937553+00:00 stdout F 2025-12-08T17:59:08.344937553+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T17:59:08.344937553+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T17:59:08.344956384+00:00 stdout F 2025-12-08T17:59:08.344956384+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:09.077956883+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:09Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-ceil-meter"} 2025-12-08T17:59:09.264184050+00:00 stdout F 2025-12-08T17:59:09.264184050+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:09.264184050+00:00 stdout F 2025-12-08T17:59:09.264184050+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:59:09.264184050+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:59:09.264184050+00:00 stdout F 2025-12-08T17:59:09.264184050+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:09.264219271+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:09Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:59:09.961309124+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:09Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-sens-meter","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-sens-meter","Parts":["smartgateways","default-cloud1-sens-meter"]}} 2025-12-08T17:59:10.083503974+00:00 stdout F 2025-12-08T17:59:10.083503974+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:10.083503974+00:00 stdout F 2025-12-08T17:59:10.083503974+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:59:10.083503974+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:59:10.083503974+00:00 stdout F 2025-12-08T17:59:10.083503974+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:10.083558135+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:59:10.145230630+00:00 stdout F 2025-12-08T17:59:10.145230630+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:10.145230630+00:00 stdout F 2025-12-08T17:59:10.145230630+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T17:59:10.145230630+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T17:59:10.145230630+00:00 stdout F 2025-12-08T17:59:10.145230630+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:10.145266311+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T17:59:10.901417970+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:10Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T17:59:11.064282234+00:00 stdout F 2025-12-08T17:59:11.064282234+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:11.064282234+00:00 stdout F 2025-12-08T17:59:11.064282234+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T17:59:11.064282234+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T17:59:11.064282234+00:00 stdout F 2025-12-08T17:59:11.064282234+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:11.064314964+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T17:59:11.760709308+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:11Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1alpha1/namespaces/service-telemetry/scrapeconfigs/default-cloud1-sens-meter","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"scrapeconfigs","Subresource":"","Name":"default-cloud1-sens-meter","Parts":["scrapeconfigs","default-cloud1-sens-meter"]}} 2025-12-08T17:59:11.897958485+00:00 stdout F 2025-12-08T17:59:11.897958485+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:11.897958485+00:00 stdout F 2025-12-08T17:59:11.897958485+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T17:59:11.897958485+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T17:59:11.897958485+00:00 stdout F 2025-12-08T17:59:11.897958485+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:11.897993596+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T17:59:11.921436134+00:00 stdout F 2025-12-08T17:59:11.921436134+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:11.921436134+00:00 stdout F 2025-12-08T17:59:11.921436134+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T17:59:11.921436134+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T17:59:11.921436134+00:00 stdout F 2025-12-08T17:59:11.921436134+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:11.921464655+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T17:59:11.946543655+00:00 stdout F 2025-12-08T17:59:11.946543655+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:11.946543655+00:00 stdout F 2025-12-08T17:59:11.946543655+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T17:59:11.946543655+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T17:59:11.946543655+00:00 stdout F 2025-12-08T17:59:11.946543655+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:11.946593007+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T17:59:12.605967875+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:12Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-sens-meter"} 2025-12-08T17:59:12.704348508+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:12Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Lookup Elasticsearch BasicAuth"} 2025-12-08T17:59:12.704416480+00:00 stdout F 2025-12-08T17:59:12.704416480+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:12.704436810+00:00 stdout F 2025-12-08T17:59:12.704436810+00:00 stdout F TASK [servicetelemetry : Lookup Elasticsearch BasicAuth] *********************** 2025-12-08T17:59:12.704436810+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:24 2025-12-08T17:59:12.704454981+00:00 stdout F 2025-12-08T17:59:12.704454981+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:13.362370691+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:13Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-elastic-user","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-elastic-user","Parts":["secrets","elasticsearch-es-elastic-user"]}} 2025-12-08T17:59:13.713382311+00:00 stdout F 2025-12-08T17:59:13.713382311+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:13.713382311+00:00 stdout F 2025-12-08T17:59:13.713382311+00:00 stdout F TASK [servicetelemetry : Get the Elasticsearch TLS materials secret] *********** 2025-12-08T17:59:13.713382311+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:52 2025-12-08T17:59:13.713382311+00:00 stdout F 2025-12-08T17:59:13.713382311+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:13.713433643+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Get the Elasticsearch TLS materials secret"} 2025-12-08T17:59:14.487286598+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:14Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-cert","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-cert","Parts":["secrets","elasticsearch-es-cert"]}} 2025-12-08T17:59:14.632427643+00:00 stdout F 2025-12-08T17:59:14.632427643+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:14.632427643+00:00 stdout F 2025-12-08T17:59:14.632427643+00:00 stdout F TASK [servicetelemetry : Load dummy certs] ************************************* 2025-12-08T17:59:14.632427643+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:62 2025-12-08T17:59:14.632427643+00:00 stdout F 2025-12-08T17:59:14.632427643+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:14.632453744+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Load dummy certs"} 2025-12-08T17:59:14.684748822+00:00 stdout F 2025-12-08T17:59:14.684748822+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:14.684748822+00:00 stdout F 2025-12-08T17:59:14.684748822+00:00 stdout F TASK [servicetelemetry : Augment the secret with dummy TLS cert/key if no TLS user auth material provided] *** 2025-12-08T17:59:14.684748822+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:66 2025-12-08T17:59:14.684748822+00:00 stdout F 2025-12-08T17:59:14.684748822+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:14.684781743+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Augment the secret with dummy TLS cert/key if no TLS user auth material provided"} 2025-12-08T17:59:14.763653932+00:00 stdout F 2025-12-08T17:59:14.763653932+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:14.763653932+00:00 stdout F 2025-12-08T17:59:14.763653932+00:00 stdout F TASK [servicetelemetry : Deploy Events Smart Gateway instance for each collector] *** 2025-12-08T17:59:14.763653932+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:78 2025-12-08T17:59:14.763653932+00:00 stdout F 2025-12-08T17:59:14.763653932+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:14.763681883+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy Events Smart Gateway instance for each collector"} 2025-12-08T17:59:14.885646388+00:00 stdout F 2025-12-08T17:59:14.885646388+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:14.885646388+00:00 stdout F 2025-12-08T17:59:14.885646388+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:59:14.885646388+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:59:14.885646388+00:00 stdout F 2025-12-08T17:59:14.885646388+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:14.885683538+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:59:15.764297234+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:15Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-coll-event","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-coll-event","Parts":["smartgateways","default-cloud1-coll-event"]}} 2025-12-08T17:59:15.891185579+00:00 stdout F 2025-12-08T17:59:15.891185579+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:15.891185579+00:00 stdout F 2025-12-08T17:59:15.891185579+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:59:15.891185579+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:59:15.891185579+00:00 stdout F 2025-12-08T17:59:15.891185579+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:15.891215470+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:59:16.005818420+00:00 stdout F 2025-12-08T17:59:16.005818420+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:16.005818420+00:00 stdout F 2025-12-08T17:59:16.005818420+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T17:59:16.005818420+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T17:59:16.005818420+00:00 stdout F 2025-12-08T17:59:16.005818420+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:16.005849251+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:16Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T17:59:16.761398523+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:16Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-ceil-event","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-ceil-event","Parts":["smartgateways","default-cloud1-ceil-event"]}} 2025-12-08T17:59:16.888351030+00:00 stdout F 2025-12-08T17:59:16.888351030+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:16.888351030+00:00 stdout F 2025-12-08T17:59:16.888351030+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T17:59:16.888351030+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T17:59:16.888351030+00:00 stdout F 2025-12-08T17:59:16.888351030+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:16.888382911+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:16Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T17:59:17.008783844+00:00 stdout F 2025-12-08T17:59:17.008783844+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:17.008783844+00:00 stdout F 2025-12-08T17:59:17.008783844+00:00 stdout F TASK [servicetelemetry : Start graphing component plays] *********************** 2025-12-08T17:59:17.008783844+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:98 2025-12-08T17:59:17.008783844+00:00 stdout F 2025-12-08T17:59:17.008783844+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:17.008821075+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Start graphing component plays"} 2025-12-08T17:59:17.034866681+00:00 stdout F 2025-12-08T17:59:17.034866681+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:17.034866681+00:00 stdout F 2025-12-08T17:59:17.034866681+00:00 stdout F TASK [servicetelemetry : Post-setup] ******************************************* 2025-12-08T17:59:17.034866681+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:104 2025-12-08T17:59:17.034866681+00:00 stdout F 2025-12-08T17:59:17.034866681+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:17.034945573+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Post-setup"} 2025-12-08T17:59:17.175578110+00:00 stdout F 2025-12-08T17:59:17.175578110+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:17.175578110+00:00 stdout F 2025-12-08T17:59:17.175578110+00:00 stdout F TASK [servicetelemetry : Remove unlisted Smart Gateway] ************************ 2025-12-08T17:59:17.175578110+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/post.yml:20 2025-12-08T17:59:17.175578110+00:00 stdout F 2025-12-08T17:59:17.175578110+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:17.175613331+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:17Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"4191063253195287734","EventData.Name":"servicetelemetry : Remove unlisted Smart Gateway"} 2025-12-08T17:59:17.430984381+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:17Z","logger":"runner","msg":"Ansible-runner exited successfully","job":"4191063253195287734","name":"default","namespace":"service-telemetry"} 2025-12-08T17:59:17.431190336+00:00 stdout F 2025-12-08T17:59:17.431190336+00:00 stdout F ----- Ansible Task Status Event StdOut (infra.watch/v1beta1, Kind=ServiceTelemetry, default/service-telemetry) ----- 2025-12-08T17:59:17.431190336+00:00 stdout F 2025-12-08T17:59:17.431190336+00:00 stdout F 2025-12-08T17:59:17.431190336+00:00 stdout F PLAY RECAP ********************************************************************* 2025-12-08T17:59:17.431190336+00:00 stdout F localhost : ok=128  changed=3  unreachable=0 failed=0 skipped=35  rescued=0 ignored=0 2025-12-08T17:59:17.431190336+00:00 stdout F 2025-12-08T17:59:17.431190336+00:00 stdout F ---------- 2025-12-08T17:59:18.146932370+00:00 stdout F 2025-12-08T17:59:18.146932370+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:18.146932370+00:00 stdout F 2025-12-08T17:59:18.146932370+00:00 stdout F TASK [Installing service telemetry] ******************************** 2025-12-08T17:59:18.146932370+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:18.146932370+00:00 stdout F  "msg": "INSTALLING SERVICE TELEMETRY" 2025-12-08T17:59:18.146932370+00:00 stdout F } 2025-12-08T17:59:18.146932370+00:00 stdout F 2025-12-08T17:59:18.146932370+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:18.146980042+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:18Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:18.149974290+00:00 stdout F 2025-12-08T17:59:18.149974290+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:18.149974290+00:00 stdout F 2025-12-08T17:59:18.149974290+00:00 stdout F TASK [servicetelemetry : Pre-setup] ******************************************** 2025-12-08T17:59:18.149974290+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:7 2025-12-08T17:59:18.149974290+00:00 stdout F 2025-12-08T17:59:18.149974290+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:18.150033652+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:18Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Pre-setup"} 2025-12-08T17:59:18.209629232+00:00 stdout F 2025-12-08T17:59:18.209629232+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:18.209629232+00:00 stdout F 2025-12-08T17:59:18.209629232+00:00 stdout F TASK [servicetelemetry : Clear the fact cache before looking up cluster information] *** 2025-12-08T17:59:18.209629232+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:1 2025-12-08T17:59:18.209629232+00:00 stdout F 2025-12-08T17:59:18.209629232+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:18.209675994+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:18Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Clear the fact cache before looking up cluster information"} 2025-12-08T17:59:18.654559849+00:00 stdout F 2025-12-08T17:59:18.654559849+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:18.654559849+00:00 stdout F 2025-12-08T17:59:18.654559849+00:00 stdout F TASK [Show existing API groups available to us] ******************************** 2025-12-08T17:59:18.654559849+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:18.654559849+00:00 stdout F  "api_groups": [ 2025-12-08T17:59:18.654559849+00:00 stdout F  "", 2025-12-08T17:59:18.654559849+00:00 stdout F  "apiregistration.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "apps", 2025-12-08T17:59:18.654559849+00:00 stdout F  "events.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "authentication.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "authorization.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "autoscaling", 2025-12-08T17:59:18.654559849+00:00 stdout F  "batch", 2025-12-08T17:59:18.654559849+00:00 stdout F  "certificates.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "networking.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "policy", 2025-12-08T17:59:18.654559849+00:00 stdout F  "rbac.authorization.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "storage.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "admissionregistration.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "apiextensions.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "scheduling.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "coordination.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "node.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "discovery.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "flowcontrol.apiserver.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "apps.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "authorization.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "build.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "image.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "oauth.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "project.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "quota.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "route.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "security.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "template.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "user.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "packages.operators.coreos.com", 2025-12-08T17:59:18.654559849+00:00 stdout F  "config.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "operator.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "acme.cert-manager.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "apiserver.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "apm.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "autoscaling.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "cert-manager.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "console.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "elasticsearch.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "enterprisesearch.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "gateway.networking.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "imageregistry.operator.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "ingress.operator.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "k8s.cni.cncf.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "k8s.ovn.org", 2025-12-08T17:59:18.654559849+00:00 stdout F  "kibana.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "machine.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "machineconfiguration.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "monitoring.coreos.com", 2025-12-08T17:59:18.654559849+00:00 stdout F  "monitoring.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "monitoring.rhobs", 2025-12-08T17:59:18.654559849+00:00 stdout F  "network.operator.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "operators.coreos.com", 2025-12-08T17:59:18.654559849+00:00 stdout F  "samples.operator.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "security.internal.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "agent.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "autoscaling.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "controlplane.operator.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "interconnectedcloud.github.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "ipam.cluster.x-k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "logstash.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "maps.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "migration.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "observability.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "perses.dev", 2025-12-08T17:59:18.654559849+00:00 stdout F  "policy.networking.k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "stackconfigpolicy.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "whereabouts.cni.cncf.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "infrastructure.cluster.x-k8s.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "beat.k8s.elastic.co", 2025-12-08T17:59:18.654559849+00:00 stdout F  "helm.openshift.io", 2025-12-08T17:59:18.654559849+00:00 stdout F  "infra.watch", 2025-12-08T17:59:18.654559849+00:00 stdout F  "smartgateway.infra.watch" 2025-12-08T17:59:18.654559849+00:00 stdout F  ] 2025-12-08T17:59:18.654559849+00:00 stdout F } 2025-12-08T17:59:18.654559849+00:00 stdout F 2025-12-08T17:59:18.654559849+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:18.654636861+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:18Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:18.698546879+00:00 stdout F 2025-12-08T17:59:18.698546879+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:18.698546879+00:00 stdout F 2025-12-08T17:59:18.698546879+00:00 stdout F TASK [Indicate what kind of cluster we are in (OpenShift or Kubernetes).] ******************************** 2025-12-08T17:59:18.698546879+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:18.698546879+00:00 stdout F  "msg": "CLUSTER TYPE: is_openshift=True; is_k8s=False" 2025-12-08T17:59:18.698546879+00:00 stdout F } 2025-12-08T17:59:18.698546879+00:00 stdout F 2025-12-08T17:59:18.698546879+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:18.698588720+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:18Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:18.701728402+00:00 stdout F 2025-12-08T17:59:18.701728402+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:18.701728402+00:00 stdout F 2025-12-08T17:59:18.701728402+00:00 stdout F TASK [servicetelemetry : Fail when can't determine type of cluster] ************ 2025-12-08T17:59:18.701728402+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:27 2025-12-08T17:59:18.701728402+00:00 stdout F 2025-12-08T17:59:18.701728402+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:18.701752323+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:18Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Fail when can't determine type of cluster"} 2025-12-08T17:59:19.075919744+00:00 stdout F 2025-12-08T17:59:19.075919744+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:19.075919744+00:00 stdout F 2025-12-08T17:59:19.075919744+00:00 stdout F TASK [Print some debug information] ******************************** 2025-12-08T17:59:19.075919744+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:19.075919744+00:00 stdout F  "msg": [ 2025-12-08T17:59:19.075919744+00:00 stdout F  "ServiceTelemetry Variables", 2025-12-08T17:59:19.075919744+00:00 stdout F  "--------------------------------------------", 2025-12-08T17:59:19.075919744+00:00 stdout F  "alerting:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " alertmanager:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " receivers:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " snmp_traps:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " alert_oid_label: oid", 2025-12-08T17:59:19.075919744+00:00 stdout F  " community: public", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " port: 162", 2025-12-08T17:59:19.075919744+00:00 stdout F  " retries: 5", 2025-12-08T17:59:19.075919744+00:00 stdout F  " target: 192.168.24.254", 2025-12-08T17:59:19.075919744+00:00 stdout F  " timeout: 1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " trap_default_oid: 1.3.6.1.4.1.50495.15.1.2.1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " trap_default_severity: ''", 2025-12-08T17:59:19.075919744+00:00 stdout F  " trap_oid_prefix: 1.3.6.1.4.1.50495.15", 2025-12-08T17:59:19.075919744+00:00 stdout F  " storage:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " persistent:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " pvc_storage_request: 20G", 2025-12-08T17:59:19.075919744+00:00 stdout F  " storage_class: crc-csi-hostpath-provisioner", 2025-12-08T17:59:19.075919744+00:00 stdout F  " strategy: persistent", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  "backends:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " events:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " elasticsearch:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " certificates:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ca_cert_duration: 70080h", 2025-12-08T17:59:19.075919744+00:00 stdout F  " endpoint_cert_duration: 70080h", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " forwarding:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " host_url: https://elasticsearch-es-http:9200", 2025-12-08T17:59:19.075919744+00:00 stdout F  " tls_secret_name: elasticsearch-es-cert", 2025-12-08T17:59:19.075919744+00:00 stdout F  " tls_server_name: ''", 2025-12-08T17:59:19.075919744+00:00 stdout F  " use_basic_auth: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " use_tls: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " user_secret_name: elasticsearch-es-elastic-user", 2025-12-08T17:59:19.075919744+00:00 stdout F  " node_count: 1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " storage:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " persistent:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " pvc_storage_request: 20Gi", 2025-12-08T17:59:19.075919744+00:00 stdout F  " storage_class: ''", 2025-12-08T17:59:19.075919744+00:00 stdout F  " strategy: persistent", 2025-12-08T17:59:19.075919744+00:00 stdout F  " version: 7.16.1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " metrics:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " prometheus:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " scrape_interval: 30s", 2025-12-08T17:59:19.075919744+00:00 stdout F  " storage:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " persistent:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " pvc_storage_request: 20G", 2025-12-08T17:59:19.075919744+00:00 stdout F  " storage_class: crc-csi-hostpath-provisioner", 2025-12-08T17:59:19.075919744+00:00 stdout F  " retention: 24h", 2025-12-08T17:59:19.075919744+00:00 stdout F  " strategy: persistent", 2025-12-08T17:59:19.075919744+00:00 stdout F  "clouds:", 2025-12-08T17:59:19.075919744+00:00 stdout F  "- events:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collectors:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " - bridge:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:59:19.075919744+00:00 stdout F  " verbose: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collector_type: collectd", 2025-12-08T17:59:19.075919744+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " subscription_address: collectd/cloud1-notify", 2025-12-08T17:59:19.075919744+00:00 stdout F  " - bridge:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:59:19.075919744+00:00 stdout F  " verbose: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collector_type: ceilometer", 2025-12-08T17:59:19.075919744+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " subscription_address: anycast/ceilometer/cloud1-event.sample", 2025-12-08T17:59:19.075919744+00:00 stdout F  " metrics:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collectors:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " - bridge:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:59:19.075919744+00:00 stdout F  " verbose: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collector_type: collectd", 2025-12-08T17:59:19.075919744+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " subscription_address: collectd/cloud1-telemetry", 2025-12-08T17:59:19.075919744+00:00 stdout F  " - bridge:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_size: 16384", 2025-12-08T17:59:19.075919744+00:00 stdout F  " verbose: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collector_type: ceilometer", 2025-12-08T17:59:19.075919744+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " subscription_address: anycast/ceilometer/cloud1-metering.sample", 2025-12-08T17:59:19.075919744+00:00 stdout F  " - bridge:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_count: 15000", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ring_buffer_size: 65535", 2025-12-08T17:59:19.075919744+00:00 stdout F  " verbose: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " collector_type: sensubility", 2025-12-08T17:59:19.075919744+00:00 stdout F  " debug_enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " subscription_address: sensubility/cloud1-telemetry", 2025-12-08T17:59:19.075919744+00:00 stdout F  " name: cloud1", 2025-12-08T17:59:19.075919744+00:00 stdout F  "graphing:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " grafana:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " base_image: registry.redhat.io/rhel8/grafana:9", 2025-12-08T17:59:19.075919744+00:00 stdout F  " dashboards:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " disable_signout_menu: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ingress_enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  "high_availability:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  "transports:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " qdr:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " auth: basic", 2025-12-08T17:59:19.075919744+00:00 stdout F  " certificates:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " ca_cert_duration: 70080h", 2025-12-08T17:59:19.075919744+00:00 stdout F  " endpoint_cert_duration: 70080h", 2025-12-08T17:59:19.075919744+00:00 stdout F  " deployment_size: 1", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: true", 2025-12-08T17:59:19.075919744+00:00 stdout F  " web:", 2025-12-08T17:59:19.075919744+00:00 stdout F  " enabled: false", 2025-12-08T17:59:19.075919744+00:00 stdout F  "" 2025-12-08T17:59:19.075919744+00:00 stdout F  ] 2025-12-08T17:59:19.075919744+00:00 stdout F } 2025-12-08T17:59:19.075919744+00:00 stdout F 2025-12-08T17:59:19.075919744+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:19.076009106+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:19Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:19.092083410+00:00 stdout F 2025-12-08T17:59:19.092083410+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:19.092083410+00:00 stdout F 2025-12-08T17:59:19.092083410+00:00 stdout F TASK [servicetelemetry : Get current Smart Gateways loaded] ******************** 2025-12-08T17:59:19.092083410+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:77 2025-12-08T17:59:19.092083410+00:00 stdout F 2025-12-08T17:59:19.092083410+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:19.092126231+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:19Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get current Smart Gateways loaded"} 2025-12-08T17:59:19.971733894+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:19Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways","Verb":"list","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"","Parts":["smartgateways"]}} 2025-12-08T17:59:20.105914350+00:00 stdout F 2025-12-08T17:59:20.105914350+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:20.105914350+00:00 stdout F 2025-12-08T17:59:20.105914350+00:00 stdout F TASK [servicetelemetry : Get current STF object] ******************************* 2025-12-08T17:59:20.105914350+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:84 2025-12-08T17:59:20.105914350+00:00 stdout F 2025-12-08T17:59:20.105914350+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:20.105940071+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:20Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get current STF object"} 2025-12-08T17:59:20.806370111+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:20Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/infra.watch/v1beta1/namespaces/service-telemetry/servicetelemetrys/default","Verb":"get","APIPrefix":"apis","APIGroup":"infra.watch","APIVersion":"v1beta1","Namespace":"service-telemetry","Resource":"servicetelemetrys","Subresource":"","Name":"default","Parts":["servicetelemetrys","default"]}} 2025-12-08T17:59:20.894980137+00:00 stdout F 2025-12-08T17:59:20.894980137+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:20.894980137+00:00 stdout F 2025-12-08T17:59:20.894980137+00:00 stdout F TASK [servicetelemetry : Get community Prometheus objects] ********************* 2025-12-08T17:59:20.894980137+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:92 2025-12-08T17:59:20.894980137+00:00 stdout F 2025-12-08T17:59:20.894980137+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:20.895026178+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:20Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get community Prometheus objects"} 2025-12-08T17:59:21.980494206+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:21Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:59:22.080367768+00:00 stdout F 2025-12-08T17:59:22.080367768+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:22.080367768+00:00 stdout F 2025-12-08T17:59:22.080367768+00:00 stdout F TASK [servicetelemetry : Apply community observabilityStrategy if missing on an STF object with an existing community prometheus] *** 2025-12-08T17:59:22.080367768+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:101 2025-12-08T17:59:22.080367768+00:00 stdout F 2025-12-08T17:59:22.080367768+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:22.080413080+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Apply community observabilityStrategy if missing on an STF object with an existing community prometheus"} 2025-12-08T17:59:22.141986472+00:00 stdout F 2025-12-08T17:59:22.141986472+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:22.141986472+00:00 stdout F 2025-12-08T17:59:22.141986472+00:00 stdout F TASK [servicetelemetry : Apply default observabilityStrategy if missing on a new STF object with no associated community prometheus] *** 2025-12-08T17:59:22.141986472+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:118 2025-12-08T17:59:22.141986472+00:00 stdout F 2025-12-08T17:59:22.141986472+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:22.142018433+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Apply default observabilityStrategy if missing on a new STF object with no associated community prometheus"} 2025-12-08T17:59:22.169925849+00:00 stdout F 2025-12-08T17:59:22.169925849+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:22.169925849+00:00 stdout F 2025-12-08T17:59:22.169925849+00:00 stdout F TASK [servicetelemetry : Get QDR objects] ************************************** 2025-12-08T17:59:22.169925849+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:132 2025-12-08T17:59:22.169925849+00:00 stdout F 2025-12-08T17:59:22.169925849+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:22.169967370+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get QDR objects"} 2025-12-08T17:59:22.824421839+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:22Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/interconnectedcloud.github.io/v1alpha1/namespaces/service-telemetry/interconnects/default-interconnect","Verb":"get","APIPrefix":"apis","APIGroup":"interconnectedcloud.github.io","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"interconnects","Subresource":"","Name":"default-interconnect","Parts":["interconnects","default-interconnect"]}} 2025-12-08T17:59:22.917044070+00:00 stdout F 2025-12-08T17:59:22.917044070+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:22.917044070+00:00 stdout F 2025-12-08T17:59:22.917044070+00:00 stdout F TASK [servicetelemetry : Apply legacy auth=none for QDR if missing on the STF object and it's currently deployed that way] *** 2025-12-08T17:59:22.917044070+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:141 2025-12-08T17:59:22.917044070+00:00 stdout F 2025-12-08T17:59:22.917044070+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:22.917091941+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Apply legacy auth=none for QDR if missing on the STF object and it's currently deployed that way"} 2025-12-08T17:59:22.965051175+00:00 stdout F 2025-12-08T17:59:22.965051175+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:22.965051175+00:00 stdout F 2025-12-08T17:59:22.965051175+00:00 stdout F TASK [servicetelemetry : Apply default auth for QDR if missing on a new STF object with no associated auth=none QDR] *** 2025-12-08T17:59:22.965051175+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:161 2025-12-08T17:59:22.965051175+00:00 stdout F 2025-12-08T17:59:22.965051175+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:22.965089806+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:22Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Apply default auth for QDR if missing on a new STF object with no associated auth=none QDR"} 2025-12-08T17:59:23.043572024+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:23Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Set ServiceTelemetry object status to have ephemeralStorageEnabled status"} 2025-12-08T17:59:23.044219181+00:00 stdout F 2025-12-08T17:59:23.044219181+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:23.044250872+00:00 stdout F 2025-12-08T17:59:23.044250872+00:00 stdout F TASK [servicetelemetry : Set ServiceTelemetry object status to have ephemeralStorageEnabled status] *** 2025-12-08T17:59:23.044250872+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/pre.yml:185 2025-12-08T17:59:23.044269922+00:00 stdout F 2025-12-08T17:59:23.044269922+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:23.112036758+00:00 stdout F 2025-12-08T17:59:23.112036758+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:23.112036758+00:00 stdout F 2025-12-08T17:59:23.112036758+00:00 stdout F TASK [servicetelemetry : Create QDR instance] ********************************** 2025-12-08T17:59:23.112036758+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:19 2025-12-08T17:59:23.112036758+00:00 stdout F 2025-12-08T17:59:23.112036758+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:23.112078869+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:23Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create QDR instance"} 2025-12-08T17:59:23.169727759+00:00 stdout F 2025-12-08T17:59:23.169727759+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:23.169727759+00:00 stdout F 2025-12-08T17:59:23.169727759+00:00 stdout F TASK [servicetelemetry : Create self-signed interconnect issuer] *************** 2025-12-08T17:59:23.169727759+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:6 2025-12-08T17:59:23.169727759+00:00 stdout F 2025-12-08T17:59:23.169727759+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:23.169766140+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:23Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create self-signed interconnect issuer"} 2025-12-08T17:59:24.064157192+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:24Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/issuers/default-interconnect-selfsigned","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"issuers","Subresource":"","Name":"default-interconnect-selfsigned","Parts":["issuers","default-interconnect-selfsigned"]}} 2025-12-08T17:59:24.185925152+00:00 stdout F 2025-12-08T17:59:24.185925152+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:24.185925152+00:00 stdout F 2025-12-08T17:59:24.185925152+00:00 stdout F TASK [servicetelemetry : Create self-signed interconnect certificate] ********** 2025-12-08T17:59:24.185925152+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:18 2025-12-08T17:59:24.185925152+00:00 stdout F 2025-12-08T17:59:24.185925152+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:24.185954453+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:24Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create self-signed interconnect certificate"} 2025-12-08T17:59:24.865056960+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:24Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-selfsigned","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-selfsigned","Parts":["certificates","default-interconnect-selfsigned"]}} 2025-12-08T17:59:24.963921206+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:24Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create default CA interconnect issuer using self-signed interconnect certificate"} 2025-12-08T17:59:24.963976697+00:00 stdout F 2025-12-08T17:59:24.963976697+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:24.964007258+00:00 stdout F 2025-12-08T17:59:24.964007258+00:00 stdout F TASK [servicetelemetry : Create default CA interconnect issuer using self-signed interconnect certificate] *** 2025-12-08T17:59:24.964007258+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:36 2025-12-08T17:59:24.964026178+00:00 stdout F 2025-12-08T17:59:24.964026178+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:25.672467250+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:25Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/issuers/default-interconnect-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"issuers","Subresource":"","Name":"default-interconnect-ca","Parts":["issuers","default-interconnect-ca"]}} 2025-12-08T17:59:25.823082250+00:00 stdout F 2025-12-08T17:59:25.823082250+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:25.823082250+00:00 stdout F 2025-12-08T17:59:25.823082250+00:00 stdout F TASK [servicetelemetry : Create OpenStack CA certificate using self-signed interconnect certificate] *** 2025-12-08T17:59:25.823082250+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:49 2025-12-08T17:59:25.823082250+00:00 stdout F 2025-12-08T17:59:25.823082250+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:25.823128301+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:25Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create OpenStack CA certificate using self-signed interconnect certificate"} 2025-12-08T17:59:26.510235400+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:26Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-openstack-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-openstack-ca","Parts":["certificates","default-interconnect-openstack-ca"]}} 2025-12-08T17:59:26.642306720+00:00 stdout F 2025-12-08T17:59:26.642306720+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:26.642306720+00:00 stdout F 2025-12-08T17:59:26.642306720+00:00 stdout F TASK [servicetelemetry : Create OpenStack credentials certificate using self-signed interconnect certificate] *** 2025-12-08T17:59:26.642306720+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:66 2025-12-08T17:59:26.642306720+00:00 stdout F 2025-12-08T17:59:26.642306720+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:26.642362432+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:26Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create OpenStack credentials certificate using self-signed interconnect certificate"} 2025-12-08T17:59:27.393333594+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:27Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-openstack-credentials","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-openstack-credentials","Parts":["certificates","default-interconnect-openstack-credentials"]}} 2025-12-08T17:59:27.516655615+00:00 stdout F 2025-12-08T17:59:27.516655615+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:27.516655615+00:00 stdout F 2025-12-08T17:59:27.516655615+00:00 stdout F TASK [servicetelemetry : Create inter-router CA issuer] ************************ 2025-12-08T17:59:27.516655615+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:84 2025-12-08T17:59:27.516655615+00:00 stdout F 2025-12-08T17:59:27.516655615+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:27.516710187+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:27Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create inter-router CA issuer"} 2025-12-08T17:59:28.245318999+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:28Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/issuers/default-interconnect-inter-router-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"issuers","Subresource":"","Name":"default-interconnect-inter-router-ca","Parts":["issuers","default-interconnect-inter-router-ca"]}} 2025-12-08T17:59:28.380852651+00:00 stdout F 2025-12-08T17:59:28.380852651+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:28.380852651+00:00 stdout F 2025-12-08T17:59:28.380852651+00:00 stdout F TASK [servicetelemetry : Create inter-router CA certificate] ******************* 2025-12-08T17:59:28.380852651+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:97 2025-12-08T17:59:28.380852651+00:00 stdout F 2025-12-08T17:59:28.380852651+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:28.380935753+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:28Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create inter-router CA certificate"} 2025-12-08T17:59:29.088372769+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:29Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-inter-router-ca","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-inter-router-ca","Parts":["certificates","default-interconnect-inter-router-ca"]}} 2025-12-08T17:59:29.207686463+00:00 stdout F 2025-12-08T17:59:29.207686463+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:29.207686463+00:00 stdout F 2025-12-08T17:59:29.207686463+00:00 stdout F TASK [servicetelemetry : Create inter-router credentials certificate] ********** 2025-12-08T17:59:29.207686463+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:114 2025-12-08T17:59:29.207686463+00:00 stdout F 2025-12-08T17:59:29.207686463+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:29.207721224+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:29Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create inter-router credentials certificate"} 2025-12-08T17:59:29.902834723+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:29Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/cert-manager.io/v1/namespaces/service-telemetry/certificates/default-interconnect-inter-router-credentials","Verb":"get","APIPrefix":"apis","APIGroup":"cert-manager.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"certificates","Subresource":"","Name":"default-interconnect-inter-router-credentials","Parts":["certificates","default-interconnect-inter-router-credentials"]}} 2025-12-08T17:59:30.026161084+00:00 stdout F 2025-12-08T17:59:30.026161084+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:30.026161084+00:00 stdout F 2025-12-08T17:59:30.026161084+00:00 stdout F TASK [servicetelemetry : Create Interconnect SASL ConfigMap] ******************* 2025-12-08T17:59:30.026161084+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:133 2025-12-08T17:59:30.026161084+00:00 stdout F 2025-12-08T17:59:30.026161084+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:30.026192745+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:30Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create Interconnect SASL ConfigMap"} 2025-12-08T17:59:30.700621140+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:30Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/configmaps/default-interconnect-sasl-config","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"configmaps","Subresource":"","Name":"default-interconnect-sasl-config","Parts":["configmaps","default-interconnect-sasl-config"]}} 2025-12-08T17:59:30.819307587+00:00 stdout F 2025-12-08T17:59:30.819307587+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:30.819307587+00:00 stdout F 2025-12-08T17:59:30.819307587+00:00 stdout F TASK [servicetelemetry : Get QDR BasicAuth secret] ***************************** 2025-12-08T17:59:30.819307587+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:155 2025-12-08T17:59:30.819307587+00:00 stdout F 2025-12-08T17:59:30.819307587+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:30.819377819+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:30Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get QDR BasicAuth secret"} 2025-12-08T17:59:31.482175698+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:31Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/default-interconnect-users","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"default-interconnect-users","Parts":["secrets","default-interconnect-users"]}} 2025-12-08T17:59:31.599541401+00:00 stdout F 2025-12-08T17:59:31.599541401+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:31.599541401+00:00 stdout F 2025-12-08T17:59:31.599541401+00:00 stdout F TASK [servicetelemetry : Perform a one-time upgrade to the default generated password for QDR BasicAuth] *** 2025-12-08T17:59:31.599541401+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:167 2025-12-08T17:59:31.599541401+00:00 stdout F 2025-12-08T17:59:31.599541401+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:31.599600993+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:31Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Perform a one-time upgrade to the default generated password for QDR BasicAuth"} 2025-12-08T17:59:31.646309784+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:31Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get the list of QDR pods"} 2025-12-08T17:59:31.646412437+00:00 stdout F 2025-12-08T17:59:31.646412437+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:31.646470419+00:00 stdout F 2025-12-08T17:59:31.646470419+00:00 stdout F TASK [servicetelemetry : Get the list of QDR pods] ***************************** 2025-12-08T17:59:31.646470419+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:181 2025-12-08T17:59:31.646496739+00:00 stdout F 2025-12-08T17:59:31.646496739+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:31.700989955+00:00 stdout F 2025-12-08T17:59:31.700989955+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:31.700989955+00:00 stdout F 2025-12-08T17:59:31.700989955+00:00 stdout F TASK [servicetelemetry : Restart QDR pods to pick up new password] ************* 2025-12-08T17:59:31.700989955+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:190 2025-12-08T17:59:31.700989955+00:00 stdout F 2025-12-08T17:59:31.700989955+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:31.701044986+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:31Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Restart QDR pods to pick up new password"} 2025-12-08T17:59:31.840594244+00:00 stdout F 2025-12-08T17:59:31.840594244+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:31.840594244+00:00 stdout F 2025-12-08T17:59:31.840594244+00:00 stdout F TASK [servicetelemetry : Create QDR instance] ********************************** 2025-12-08T17:59:31.840594244+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_qdr.yml:268 2025-12-08T17:59:31.840594244+00:00 stdout F 2025-12-08T17:59:31.840594244+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:31.840638226+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:31Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create QDR instance"} 2025-12-08T17:59:32.642084398+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:32Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/interconnectedcloud.github.io/v1alpha1/namespaces/service-telemetry/interconnects/default-interconnect","Verb":"get","APIPrefix":"apis","APIGroup":"interconnectedcloud.github.io","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"interconnects","Subresource":"","Name":"default-interconnect","Parts":["interconnects","default-interconnect"]}} 2025-12-08T17:59:32.782465608+00:00 stdout F 2025-12-08T17:59:32.782465608+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:32.782465608+00:00 stdout F 2025-12-08T17:59:32.782465608+00:00 stdout F TASK [servicetelemetry : Setup Certificates for metrics components] ************ 2025-12-08T17:59:32.782465608+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:26 2025-12-08T17:59:32.782465608+00:00 stdout F 2025-12-08T17:59:32.782465608+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:32.782504329+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:32Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Setup Certificates for metrics components"} 2025-12-08T17:59:32.850094190+00:00 stdout F 2025-12-08T17:59:32.850094190+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:32.850094190+00:00 stdout F 2025-12-08T17:59:32.850094190+00:00 stdout F TASK [servicetelemetry : Create configmap for OAUTH CA certs] ****************** 2025-12-08T17:59:32.850094190+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:1 2025-12-08T17:59:32.850094190+00:00 stdout F 2025-12-08T17:59:32.850094190+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:32.850126381+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:32Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create configmap for OAUTH CA certs"} 2025-12-08T17:59:33.562354913+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:33Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/configmaps/serving-certs-ca-bundle","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"configmaps","Subresource":"","Name":"serving-certs-ca-bundle","Parts":["configmaps","serving-certs-ca-bundle"]}} 2025-12-08T17:59:33.678076872+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:33Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Check for existing cookie secret"} 2025-12-08T17:59:33.678129774+00:00 stdout F 2025-12-08T17:59:33.678129774+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:33.678149804+00:00 stdout F 2025-12-08T17:59:33.678149804+00:00 stdout F TASK [servicetelemetry : Check for existing cookie secret] ********************* 2025-12-08T17:59:33.678149804+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:12 2025-12-08T17:59:33.678168615+00:00 stdout F 2025-12-08T17:59:33.678168615+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:34.374782905+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:34Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/default-session-secret","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"default-session-secret","Parts":["secrets","default-session-secret"]}} 2025-12-08T17:59:34.493551115+00:00 stdout F 2025-12-08T17:59:34.493551115+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:34.493551115+00:00 stdout F 2025-12-08T17:59:34.493551115+00:00 stdout F TASK [servicetelemetry : Create cookie secret] ********************************* 2025-12-08T17:59:34.493551115+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_certificates.yml:20 2025-12-08T17:59:34.493551115+00:00 stdout F 2025-12-08T17:59:34.493551115+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:34.493597666+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:34Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create cookie secret"} 2025-12-08T17:59:34.596966750+00:00 stdout F 2025-12-08T17:59:34.596966750+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:34.596966750+00:00 stdout F 2025-12-08T17:59:34.596966750+00:00 stdout F TASK [servicetelemetry : Create Prometheus instance] *************************** 2025-12-08T17:59:34.596966750+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:48 2025-12-08T17:59:34.596966750+00:00 stdout F 2025-12-08T17:59:34.596966750+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:34.596995841+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:34Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create Prometheus instance"} 2025-12-08T17:59:34.708437338+00:00 stdout F 2025-12-08T17:59:34.708437338+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:34.708437338+00:00 stdout F 2025-12-08T17:59:34.708437338+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/prometheus-stf with oauth redirect annotation] *** 2025-12-08T17:59:34.708437338+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:10 2025-12-08T17:59:34.708437338+00:00 stdout F 2025-12-08T17:59:34.708437338+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:34.708485560+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:34Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ServiceAccount/prometheus-stf with oauth redirect annotation"} 2025-12-08T17:59:35.399288145+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:35Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/serviceaccounts/prometheus-stf","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"serviceaccounts","Subresource":"","Name":"prometheus-stf","Parts":["serviceaccounts","prometheus-stf"]}} 2025-12-08T17:59:35.520520111+00:00 stdout F 2025-12-08T17:59:35.520520111+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:35.520520111+00:00 stdout F 2025-12-08T17:59:35.520520111+00:00 stdout F TASK [servicetelemetry : Create ClusterRole/prometheus-stf for non-resource URL /metrics access] *** 2025-12-08T17:59:35.520520111+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:21 2025-12-08T17:59:35.520520111+00:00 stdout F 2025-12-08T17:59:35.520520111+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:35.520557732+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:35Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ClusterRole/prometheus-stf for non-resource URL /metrics access"} 2025-12-08T17:59:36.391917417+00:00 stdout F 2025-12-08T17:59:36.391917417+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:36.391917417+00:00 stdout F 2025-12-08T17:59:36.391917417+00:00 stdout F TASK [servicetelemetry : Create ClusterRoleBinding/prometheus-stf] ************* 2025-12-08T17:59:36.391917417+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:64 2025-12-08T17:59:36.391917417+00:00 stdout F 2025-12-08T17:59:36.391917417+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:36.391962279+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:36Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ClusterRoleBinding/prometheus-stf"} 2025-12-08T17:59:37.224935972+00:00 stdout F 2025-12-08T17:59:37.224935972+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:37.224935972+00:00 stdout F 2025-12-08T17:59:37.224935972+00:00 stdout F TASK [servicetelemetry : Create Role/prometheus-stf for Prometheus operations] *** 2025-12-08T17:59:37.224935972+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:83 2025-12-08T17:59:37.224935972+00:00 stdout F 2025-12-08T17:59:37.224935972+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:37.224972523+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:37Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create Role/prometheus-stf for Prometheus operations"} 2025-12-08T17:59:37.879050452+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:37Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/roles/prometheus-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"roles","Subresource":"","Name":"prometheus-stf","Parts":["roles","prometheus-stf"]}} 2025-12-08T17:59:38.020081018+00:00 stdout F 2025-12-08T17:59:38.020081018+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:38.020081018+00:00 stdout F 2025-12-08T17:59:38.020081018+00:00 stdout F TASK [servicetelemetry : Create RoleBinding/prometheus-stf] ******************** 2025-12-08T17:59:38.020081018+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:152 2025-12-08T17:59:38.020081018+00:00 stdout F 2025-12-08T17:59:38.020081018+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:38.020123139+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:38Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create RoleBinding/prometheus-stf"} 2025-12-08T17:59:38.752844280+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:38Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/rolebindings/prometheus-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"rolebindings","Subresource":"","Name":"prometheus-stf","Parts":["rolebindings","prometheus-stf"]}} 2025-12-08T17:59:38.896559289+00:00 stdout F 2025-12-08T17:59:38.896559289+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:38.896559289+00:00 stdout F 2025-12-08T17:59:38.896559289+00:00 stdout F TASK [servicetelemetry : Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef] *** 2025-12-08T17:59:38.896559289+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:172 2025-12-08T17:59:38.896559289+00:00 stdout F 2025-12-08T17:59:38.896559289+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:38.896596620+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:38Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef"} 2025-12-08T17:59:39.748051230+00:00 stdout F 2025-12-08T17:59:39.748051230+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:39.748051230+00:00 stdout F 2025-12-08T17:59:39.748051230+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:59:39.748051230+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:39.748051230+00:00 stdout F  "msg": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "apiVersion": "monitoring.rhobs/v1", 2025-12-08T17:59:39.748051230+00:00 stdout F  "kind": "Prometheus", 2025-12-08T17:59:39.748051230+00:00 stdout F  "metadata": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "labels": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "app.kubernetes.io/managed-by": "observability-operator", 2025-12-08T17:59:39.748051230+00:00 stdout F  "prometheus": "default" 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "name": "default", 2025-12-08T17:59:39.748051230+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "spec": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "alerting": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "alertmanagers": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  { 2025-12-08T17:59:39.748051230+00:00 stdout F  "bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token", 2025-12-08T17:59:39.748051230+00:00 stdout F  "name": "default-alertmanager-proxy", 2025-12-08T17:59:39.748051230+00:00 stdout F  "namespace": "service-telemetry", 2025-12-08T17:59:39.748051230+00:00 stdout F  "port": "web", 2025-12-08T17:59:39.748051230+00:00 stdout F  "scheme": "https", 2025-12-08T17:59:39.748051230+00:00 stdout F  "tlsConfig": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "caFile": "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt", 2025-12-08T17:59:39.748051230+00:00 stdout F  "serverName": "default-alertmanager-proxy.service-telemetry.svc" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  ] 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "configMaps": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  "serving-certs-ca-bundle" 2025-12-08T17:59:39.748051230+00:00 stdout F  ], 2025-12-08T17:59:39.748051230+00:00 stdout F  "containers": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  { 2025-12-08T17:59:39.748051230+00:00 stdout F  "args": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  "-https-address=:9092", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-tls-cert=/etc/tls/private/tls.crt", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-tls-key=/etc/tls/private/tls.key", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-upstream=http://localhost:9090/", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-cookie-secret-file=/etc/proxy/secrets/session_secret", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-openshift-service-account=prometheus-stf", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-openshift-sar={\"namespace\":\"service-telemetry\",\"resource\": \"prometheuses\", \"resourceAPIGroup\":\"monitoring.rhobs\", \"verb\":\"get\"}", 2025-12-08T17:59:39.748051230+00:00 stdout F  "-openshift-delegate-urls={\"/\":{\"namespace\":\"service-telemetry\",\"resource\": \"prometheuses\", \"group\":\"monitoring.rhobs\", \"verb\":\"get\"}}" 2025-12-08T17:59:39.748051230+00:00 stdout F  ], 2025-12-08T17:59:39.748051230+00:00 stdout F  "image": "quay.io/openshift/origin-oauth-proxy:latest", 2025-12-08T17:59:39.748051230+00:00 stdout F  "name": "oauth-proxy", 2025-12-08T17:59:39.748051230+00:00 stdout F  "ports": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  { 2025-12-08T17:59:39.748051230+00:00 stdout F  "containerPort": 9092, 2025-12-08T17:59:39.748051230+00:00 stdout F  "name": "https", 2025-12-08T17:59:39.748051230+00:00 stdout F  "protocol": "TCP" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  ], 2025-12-08T17:59:39.748051230+00:00 stdout F  "volumeMounts": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  { 2025-12-08T17:59:39.748051230+00:00 stdout F  "mountPath": "/etc/tls/private", 2025-12-08T17:59:39.748051230+00:00 stdout F  "name": "secret-default-prometheus-proxy-tls" 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  { 2025-12-08T17:59:39.748051230+00:00 stdout F  "mountPath": "/etc/proxy/secrets", 2025-12-08T17:59:39.748051230+00:00 stdout F  "name": "secret-default-session-secret" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  ] 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  ], 2025-12-08T17:59:39.748051230+00:00 stdout F  "image": "quay.io/prometheus/prometheus:latest", 2025-12-08T17:59:39.748051230+00:00 stdout F  "listenLocal": true, 2025-12-08T17:59:39.748051230+00:00 stdout F  "podMetadata": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "annotations": null, 2025-12-08T17:59:39.748051230+00:00 stdout F  "labels": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "prometheus": "default" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "replicas": 1, 2025-12-08T17:59:39.748051230+00:00 stdout F  "retention": "24h", 2025-12-08T17:59:39.748051230+00:00 stdout F  "ruleSelector": {}, 2025-12-08T17:59:39.748051230+00:00 stdout F  "scrapeConfigSelector": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "matchLabels": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "app": "smart-gateway" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "secrets": [ 2025-12-08T17:59:39.748051230+00:00 stdout F  "default-prometheus-proxy-tls", 2025-12-08T17:59:39.748051230+00:00 stdout F  "default-session-secret" 2025-12-08T17:59:39.748051230+00:00 stdout F  ], 2025-12-08T17:59:39.748051230+00:00 stdout F  "securityContext": {}, 2025-12-08T17:59:39.748051230+00:00 stdout F  "serviceAccountName": "prometheus-stf", 2025-12-08T17:59:39.748051230+00:00 stdout F  "storage": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "volumeClaimTemplate": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "spec": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "resources": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "requests": { 2025-12-08T17:59:39.748051230+00:00 stdout F  "storage": "20G" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "storageClassName": "crc-csi-hostpath-provisioner" 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  }, 2025-12-08T17:59:39.748051230+00:00 stdout F  "version": null 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F  } 2025-12-08T17:59:39.748051230+00:00 stdout F } 2025-12-08T17:59:39.748051230+00:00 stdout F 2025-12-08T17:59:39.748051230+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:39.749158949+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:39Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:39.848223190+00:00 stdout F 2025-12-08T17:59:39.848223190+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:39.848223190+00:00 stdout F 2025-12-08T17:59:39.848223190+00:00 stdout F TASK [servicetelemetry : Create an instance of Prometheus] ********************* 2025-12-08T17:59:39.848223190+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:191 2025-12-08T17:59:39.848223190+00:00 stdout F 2025-12-08T17:59:39.848223190+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:39.848278391+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:39Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an instance of Prometheus"} 2025-12-08T17:59:40.524092263+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:40Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1/namespaces/service-telemetry/prometheuses/default","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1","Namespace":"service-telemetry","Resource":"prometheuses","Subresource":"","Name":"default","Parts":["prometheuses","default"]}} 2025-12-08T17:59:40.667098812+00:00 stdout F 2025-12-08T17:59:40.667098812+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:40.667098812+00:00 stdout F 2025-12-08T17:59:40.667098812+00:00 stdout F TASK [servicetelemetry : Ensure no community Prometheus is installed if not using community operator] *** 2025-12-08T17:59:40.667098812+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:197 2025-12-08T17:59:40.667098812+00:00 stdout F 2025-12-08T17:59:40.667098812+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:40.667139143+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:40Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Ensure no community Prometheus is installed if not using community operator"} 2025-12-08T17:59:41.823143370+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:41Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Prometheus, service-telemetry/default"} 2025-12-08T17:59:41.948527285+00:00 stdout F 2025-12-08T17:59:41.948527285+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:41.948527285+00:00 stdout F 2025-12-08T17:59:41.948527285+00:00 stdout F TASK [servicetelemetry : Ensure no rhobs Prometheus is installed if not using it] *** 2025-12-08T17:59:41.948527285+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:207 2025-12-08T17:59:41.948527285+00:00 stdout F 2025-12-08T17:59:41.948527285+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:41.948608537+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:41Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Ensure no rhobs Prometheus is installed if not using it"} 2025-12-08T17:59:42.077562836+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:42Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create service to access the prometheus proxy"} 2025-12-08T17:59:42.077642998+00:00 stdout F 2025-12-08T17:59:42.077642998+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:42.077663548+00:00 stdout F 2025-12-08T17:59:42.077663548+00:00 stdout F TASK [servicetelemetry : Create service to access the prometheus proxy] ******** 2025-12-08T17:59:42.077663548+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:223 2025-12-08T17:59:42.077683189+00:00 stdout F 2025-12-08T17:59:42.077683189+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:43.041490208+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:43Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/services/default-prometheus-proxy","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"services","Subresource":"","Name":"default-prometheus-proxy","Parts":["services","default-prometheus-proxy"]}} 2025-12-08T17:59:43.302726969+00:00 stdout F 2025-12-08T17:59:43.302726969+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:43.302726969+00:00 stdout F 2025-12-08T17:59:43.302726969+00:00 stdout F TASK [servicetelemetry : Create route to access the prometheus proxy] ********** 2025-12-08T17:59:43.302726969+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus.yml:234 2025-12-08T17:59:43.302726969+00:00 stdout F 2025-12-08T17:59:43.302726969+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:43.302766990+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:43Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create route to access the prometheus proxy"} 2025-12-08T17:59:44.253811350+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:44Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/route.openshift.io/v1/namespaces/service-telemetry/routes/default-prometheus-proxy","Verb":"get","APIPrefix":"apis","APIGroup":"route.openshift.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"routes","Subresource":"","Name":"default-prometheus-proxy","Parts":["routes","default-prometheus-proxy"]}} 2025-12-08T17:59:44.371828643+00:00 stdout F 2025-12-08T17:59:44.371828643+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:44.371828643+00:00 stdout F 2025-12-08T17:59:44.371828643+00:00 stdout F TASK [servicetelemetry : Create Prometheus read-only user] ********************* 2025-12-08T17:59:44.371828643+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:50 2025-12-08T17:59:44.371828643+00:00 stdout F 2025-12-08T17:59:44.371828643+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:44.371914535+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:44Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create Prometheus read-only user"} 2025-12-08T17:59:44.411853236+00:00 stdout F 2025-12-08T17:59:44.411853236+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:44.411853236+00:00 stdout F 2025-12-08T17:59:44.411853236+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/stf-prometheus-reader] ********** 2025-12-08T17:59:44.411853236+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:1 2025-12-08T17:59:44.411853236+00:00 stdout F 2025-12-08T17:59:44.411853236+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:44.411913318+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:44Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ServiceAccount/stf-prometheus-reader"} 2025-12-08T17:59:45.343642700+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:45Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/serviceaccounts/stf-prometheus-reader","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"serviceaccounts","Subresource":"","Name":"stf-prometheus-reader","Parts":["serviceaccounts","stf-prometheus-reader"]}} 2025-12-08T17:59:45.477572072+00:00 stdout F 2025-12-08T17:59:45.477572072+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:45.477572072+00:00 stdout F 2025-12-08T17:59:45.477572072+00:00 stdout F TASK [servicetelemetry : Create prometheus-reader Role] ************************ 2025-12-08T17:59:45.477572072+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:11 2025-12-08T17:59:45.477572072+00:00 stdout F 2025-12-08T17:59:45.477572072+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:45.477610003+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:45Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create prometheus-reader Role"} 2025-12-08T17:59:46.367722542+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:46Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/roles/prometheus-reader","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"roles","Subresource":"","Name":"prometheus-reader","Parts":["roles","prometheus-reader"]}} 2025-12-08T17:59:46.495164002+00:00 stdout F 2025-12-08T17:59:46.495164002+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:46.495164002+00:00 stdout F 2025-12-08T17:59:46.495164002+00:00 stdout F TASK [servicetelemetry : Create prometheus-reader RoleBinding for stf-prometheus-reader] *** 2025-12-08T17:59:46.495164002+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:30 2025-12-08T17:59:46.495164002+00:00 stdout F 2025-12-08T17:59:46.495164002+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:46.495200293+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:46Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create prometheus-reader RoleBinding for stf-prometheus-reader"} 2025-12-08T17:59:48.092807656+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:48Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/rolebindings/stf-prometheus-reader","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"rolebindings","Subresource":"","Name":"stf-prometheus-reader","Parts":["rolebindings","stf-prometheus-reader"]}} 2025-12-08T17:59:48.205791258+00:00 stdout F 2025-12-08T17:59:48.205791258+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:48.205791258+00:00 stdout F 2025-12-08T17:59:48.205791258+00:00 stdout F TASK [servicetelemetry : Create an access token for stf-prometheus-reader] ***** 2025-12-08T17:59:48.205791258+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_prometheus_reader.yml:47 2025-12-08T17:59:48.205791258+00:00 stdout F 2025-12-08T17:59:48.205791258+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:48.206033235+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:48Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an access token for stf-prometheus-reader"} 2025-12-08T17:59:48.927410785+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:48Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/stf-prometheus-reader-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"stf-prometheus-reader-token","Parts":["secrets","stf-prometheus-reader-token"]}} 2025-12-08T17:59:49.051758625+00:00 stdout F 2025-12-08T17:59:49.051758625+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:49.051758625+00:00 stdout F 2025-12-08T17:59:49.051758625+00:00 stdout F TASK [servicetelemetry : Create Alertmanager instance] ************************* 2025-12-08T17:59:49.051758625+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:54 2025-12-08T17:59:49.051758625+00:00 stdout F 2025-12-08T17:59:49.051758625+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:49.051829367+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:49Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create Alertmanager instance"} 2025-12-08T17:59:49.221674533+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:49Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:49.221778277+00:00 stdout F 2025-12-08T17:59:49.221778277+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:49.221837808+00:00 stdout F 2025-12-08T17:59:49.221837808+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:59:49.221920991+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:49.221920991+00:00 stdout F  "msg": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "apiVersion": "monitoring.rhobs/v1", 2025-12-08T17:59:49.221920991+00:00 stdout F  "kind": "Alertmanager", 2025-12-08T17:59:49.221920991+00:00 stdout F  "metadata": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "labels": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "alertmanager": "default", 2025-12-08T17:59:49.221920991+00:00 stdout F  "app.kubernetes.io/managed-by": "observability-operator" 2025-12-08T17:59:49.221920991+00:00 stdout F  }, 2025-12-08T17:59:49.221920991+00:00 stdout F  "name": "default", 2025-12-08T17:59:49.221920991+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:59:49.221920991+00:00 stdout F  }, 2025-12-08T17:59:49.221920991+00:00 stdout F  "spec": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "containers": [ 2025-12-08T17:59:49.221920991+00:00 stdout F  { 2025-12-08T17:59:49.221920991+00:00 stdout F  "args": [ 2025-12-08T17:59:49.221920991+00:00 stdout F  "-https-address=:9095", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-tls-cert=/etc/tls/private/tls.crt", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-tls-key=/etc/tls/private/tls.key", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-upstream=http://localhost:9093/", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-cookie-secret-file=/etc/proxy/secrets/session_secret", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-openshift-service-account=alertmanager-stf", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-openshift-sar={\"namespace\":\"service-telemetry\", \"resource\": \"alertmanagers\", \"resourceAPIGroup\":\"monitoring.rhobs\", \"verb\":\"get\"}", 2025-12-08T17:59:49.221920991+00:00 stdout F  "-openshift-delegate-urls={\"/\": {\"namespace\":\"service-telemetry\", \"resource\": \"alertmanagers\", \"group\":\"monitoring.rhobs\", \"verb\":\"get\"}}" 2025-12-08T17:59:49.221920991+00:00 stdout F  ], 2025-12-08T17:59:49.221920991+00:00 stdout F  "image": "quay.io/openshift/origin-oauth-proxy:latest", 2025-12-08T17:59:49.221920991+00:00 stdout F  "name": "oauth-proxy", 2025-12-08T17:59:49.221920991+00:00 stdout F  "ports": [ 2025-12-08T17:59:49.221920991+00:00 stdout F  { 2025-12-08T17:59:49.221920991+00:00 stdout F  "containerPort": 9095, 2025-12-08T17:59:49.221920991+00:00 stdout F  "name": "https", 2025-12-08T17:59:49.221920991+00:00 stdout F  "protocol": "TCP" 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  ], 2025-12-08T17:59:49.221920991+00:00 stdout F  "volumeMounts": [ 2025-12-08T17:59:49.221920991+00:00 stdout F  { 2025-12-08T17:59:49.221920991+00:00 stdout F  "mountPath": "/etc/tls/private", 2025-12-08T17:59:49.221920991+00:00 stdout F  "name": "secret-default-alertmanager-proxy-tls" 2025-12-08T17:59:49.221920991+00:00 stdout F  }, 2025-12-08T17:59:49.221920991+00:00 stdout F  { 2025-12-08T17:59:49.221920991+00:00 stdout F  "mountPath": "/etc/proxy/secrets", 2025-12-08T17:59:49.221920991+00:00 stdout F  "name": "secret-default-session-secret" 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  ] 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  ], 2025-12-08T17:59:49.221920991+00:00 stdout F  "image": "quay.io/prometheus/alertmanager:latest", 2025-12-08T17:59:49.221920991+00:00 stdout F  "listenLocal": true, 2025-12-08T17:59:49.221920991+00:00 stdout F  "podMetadata": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "labels": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "alertmanager": "default" 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  }, 2025-12-08T17:59:49.221920991+00:00 stdout F  "replicas": 1, 2025-12-08T17:59:49.221920991+00:00 stdout F  "scrapeConfigSelector": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "matchLabels": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "app": "smart-gateway" 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  }, 2025-12-08T17:59:49.221920991+00:00 stdout F  "secrets": [ 2025-12-08T17:59:49.221920991+00:00 stdout F  "default-alertmanager-proxy-tls", 2025-12-08T17:59:49.221920991+00:00 stdout F  "default-session-secret" 2025-12-08T17:59:49.221920991+00:00 stdout F  ], 2025-12-08T17:59:49.221920991+00:00 stdout F  "serviceAccountName": "alertmanager-stf", 2025-12-08T17:59:49.221920991+00:00 stdout F  "storage": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "volumeClaimTemplate": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "spec": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "resources": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "requests": { 2025-12-08T17:59:49.221920991+00:00 stdout F  "storage": "20G" 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  }, 2025-12-08T17:59:49.221920991+00:00 stdout F  "storageClassName": "crc-csi-hostpath-provisioner" 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F  } 2025-12-08T17:59:49.221920991+00:00 stdout F } 2025-12-08T17:59:49.222030503+00:00 stdout F 2025-12-08T17:59:49.222030503+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:49.368645989+00:00 stdout F 2025-12-08T17:59:49.368645989+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:49.368645989+00:00 stdout F 2025-12-08T17:59:49.368645989+00:00 stdout F TASK [Lookup alertmanager configuration template] ******************************** 2025-12-08T17:59:49.368645989+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:49.368645989+00:00 stdout F  "msg": { 2025-12-08T17:59:49.368645989+00:00 stdout F  "apiVersion": "v1", 2025-12-08T17:59:49.368645989+00:00 stdout F  "kind": "Secret", 2025-12-08T17:59:49.368645989+00:00 stdout F  "metadata": { 2025-12-08T17:59:49.368645989+00:00 stdout F  "name": "alertmanager-default", 2025-12-08T17:59:49.368645989+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:59:49.368645989+00:00 stdout F  }, 2025-12-08T17:59:49.368645989+00:00 stdout F  "stringData": { 2025-12-08T17:59:49.368645989+00:00 stdout F  "alertmanager.yaml": "global:\n resolve_timeout: 5m\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 12h\n receiver: 'snmp_wh'\nreceivers:\n- name: 'snmp_wh'\n webhook_configs:\n - url: 'http://default-prometheus-webhook-snmp:9099'" 2025-12-08T17:59:49.368645989+00:00 stdout F  }, 2025-12-08T17:59:49.368645989+00:00 stdout F  "type": "Opaque" 2025-12-08T17:59:49.368645989+00:00 stdout F  } 2025-12-08T17:59:49.368645989+00:00 stdout F } 2025-12-08T17:59:49.368645989+00:00 stdout F 2025-12-08T17:59:49.368645989+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:49.368689240+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:49Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:49.457131416+00:00 stdout F 2025-12-08T17:59:49.457131416+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:49.457131416+00:00 stdout F 2025-12-08T17:59:49.457131416+00:00 stdout F TASK [servicetelemetry : Create an Alertmanager configuration secret] ********** 2025-12-08T17:59:49.457131416+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:19 2025-12-08T17:59:49.457131416+00:00 stdout F 2025-12-08T17:59:49.457131416+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:49.457184768+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:49Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an Alertmanager configuration secret"} 2025-12-08T17:59:50.139775427+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:50Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/alertmanager-default","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"alertmanager-default","Parts":["secrets","alertmanager-default"]}} 2025-12-08T17:59:50.238624027+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:50Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an instance of Alertmanager"} 2025-12-08T17:59:50.238666528+00:00 stdout F 2025-12-08T17:59:50.238666528+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:50.238666528+00:00 stdout F 2025-12-08T17:59:50.238666528+00:00 stdout F TASK [servicetelemetry : Create an instance of Alertmanager] ******************* 2025-12-08T17:59:50.238666528+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:25 2025-12-08T17:59:50.238666528+00:00 stdout F 2025-12-08T17:59:50.238666528+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:50.930427690+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:50Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1/namespaces/service-telemetry/alertmanagers/default","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1","Namespace":"service-telemetry","Resource":"alertmanagers","Subresource":"","Name":"default","Parts":["alertmanagers","default"]}} 2025-12-08T17:59:51.085319564+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:51Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Ensure no community Alertmanager is installed if not using community operator"} 2025-12-08T17:59:51.085427407+00:00 stdout F 2025-12-08T17:59:51.085427407+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:51.085450657+00:00 stdout F 2025-12-08T17:59:51.085450657+00:00 stdout F TASK [servicetelemetry : Ensure no community Alertmanager is installed if not using community operator] *** 2025-12-08T17:59:51.085450657+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:31 2025-12-08T17:59:51.085470388+00:00 stdout F 2025-12-08T17:59:51.085470388+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:51.785212290+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:51Z","logger":"proxy","msg":"Cache miss: monitoring.coreos.com/v1, Kind=Alertmanager, service-telemetry/default"} 2025-12-08T17:59:51.904406244+00:00 stdout F 2025-12-08T17:59:51.904406244+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:51.904406244+00:00 stdout F 2025-12-08T17:59:51.904406244+00:00 stdout F TASK [servicetelemetry : Ensure no rhobs Alertmanager is installed if not using it] *** 2025-12-08T17:59:51.904406244+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:41 2025-12-08T17:59:51.904406244+00:00 stdout F 2025-12-08T17:59:51.904406244+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:51.904474705+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:51Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Ensure no rhobs Alertmanager is installed if not using it"} 2025-12-08T17:59:51.942170097+00:00 stdout F 2025-12-08T17:59:51.942170097+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:51.942170097+00:00 stdout F 2025-12-08T17:59:51.942170097+00:00 stdout F TASK [servicetelemetry : Create SNMP traps instance] *************************** 2025-12-08T17:59:51.942170097+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:54 2025-12-08T17:59:51.942170097+00:00 stdout F 2025-12-08T17:59:51.942170097+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:51.942239159+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:51Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create SNMP traps instance"} 2025-12-08T17:59:52.093790454+00:00 stdout F 2025-12-08T17:59:52.093790454+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:52.093790454+00:00 stdout F 2025-12-08T17:59:52.093790454+00:00 stdout F TASK [Lookup template] ******************************** 2025-12-08T17:59:52.093790454+00:00 stdout F ok: [localhost] => { 2025-12-08T17:59:52.093790454+00:00 stdout F  "msg": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "apiVersion": "apps/v1", 2025-12-08T17:59:52.093790454+00:00 stdout F  "kind": "Deployment", 2025-12-08T17:59:52.093790454+00:00 stdout F  "metadata": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "default-snmp-webhook", 2025-12-08T17:59:52.093790454+00:00 stdout F  "namespace": "service-telemetry" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  "spec": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "replicas": 1, 2025-12-08T17:59:52.093790454+00:00 stdout F  "selector": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "matchLabels": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "app": "default-snmp-webhook" 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  "template": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "metadata": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "labels": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "app": "default-snmp-webhook" 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  "spec": { 2025-12-08T17:59:52.093790454+00:00 stdout F  "containers": [ 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "env": [ 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "SNMP_COMMUNITY", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "public" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "SNMP_RETRIES", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "5" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "SNMP_HOST", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "192.168.24.254" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "SNMP_PORT", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "162" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "SNMP_TIMEOUT", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "1" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "ALERT_OID_LABEL", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "oid" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "TRAP_OID_PREFIX", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "1.3.6.1.4.1.50495.15" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "TRAP_DEFAULT_OID", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "1.3.6.1.4.1.50495.15.1.2.1" 2025-12-08T17:59:52.093790454+00:00 stdout F  }, 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "TRAP_DEFAULT_SEVERITY", 2025-12-08T17:59:52.093790454+00:00 stdout F  "value": "" 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  ], 2025-12-08T17:59:52.093790454+00:00 stdout F  "image": "quay.io/infrawatch/prometheus-webhook-snmp:latest", 2025-12-08T17:59:52.093790454+00:00 stdout F  "name": "prometheus-webhook-snmp", 2025-12-08T17:59:52.093790454+00:00 stdout F  "ports": [ 2025-12-08T17:59:52.093790454+00:00 stdout F  { 2025-12-08T17:59:52.093790454+00:00 stdout F  "containerPort": 9099 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  ] 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  ] 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F  } 2025-12-08T17:59:52.093790454+00:00 stdout F } 2025-12-08T17:59:52.093790454+00:00 stdout F 2025-12-08T17:59:52.093790454+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:52.093862106+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:52Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T17:59:52.226433122+00:00 stdout F 2025-12-08T17:59:52.226433122+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:52.226433122+00:00 stdout F 2025-12-08T17:59:52.226433122+00:00 stdout F TASK [servicetelemetry : Create an instance of snmp webhook] ******************* 2025-12-08T17:59:52.226433122+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_snmp_traps.yml:10 2025-12-08T17:59:52.226433122+00:00 stdout F 2025-12-08T17:59:52.226433122+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:52.226466063+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:52Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an instance of snmp webhook"} 2025-12-08T17:59:53.224294874+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:53Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/apps/v1/namespaces/service-telemetry/deployments/default-snmp-webhook","Verb":"get","APIPrefix":"apis","APIGroup":"apps","APIVersion":"v1","Namespace":"service-telemetry","Resource":"deployments","Subresource":"","Name":"default-snmp-webhook","Parts":["deployments","default-snmp-webhook"]}} 2025-12-08T17:59:53.406069324+00:00 stdout F 2025-12-08T17:59:53.406069324+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:53.406069324+00:00 stdout F 2025-12-08T17:59:53.406069324+00:00 stdout F TASK [servicetelemetry : Create an instance of snmp webhook service] *********** 2025-12-08T17:59:53.406069324+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_snmp_traps.yml:20 2025-12-08T17:59:53.406069324+00:00 stdout F 2025-12-08T17:59:53.406069324+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:53.406152776+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:53Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an instance of snmp webhook service"} 2025-12-08T17:59:54.045098389+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:54Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/services/default-prometheus-webhook-snmp","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"services","Subresource":"","Name":"default-prometheus-webhook-snmp","Parts":["services","default-prometheus-webhook-snmp"]}} 2025-12-08T17:59:54.200854465+00:00 stdout F 2025-12-08T17:59:54.200854465+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:54.200854465+00:00 stdout F 2025-12-08T17:59:54.200854465+00:00 stdout F TASK [servicetelemetry : Create ServiceAccount/alertmanager-stf with oauth redirect annotation] *** 2025-12-08T17:59:54.200854465+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:70 2025-12-08T17:59:54.200854465+00:00 stdout F 2025-12-08T17:59:54.200854465+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:54.200935308+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:54Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ServiceAccount/alertmanager-stf with oauth redirect annotation"} 2025-12-08T17:59:54.924810144+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:54Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/serviceaccounts/alertmanager-stf","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"serviceaccounts","Subresource":"","Name":"alertmanager-stf","Parts":["serviceaccounts","alertmanager-stf"]}} 2025-12-08T17:59:55.055924332+00:00 stdout F 2025-12-08T17:59:55.055924332+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:55.055924332+00:00 stdout F 2025-12-08T17:59:55.055924332+00:00 stdout F TASK [servicetelemetry : Create ClusterRole/alertmanager-stf] ****************** 2025-12-08T17:59:55.055924332+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:81 2025-12-08T17:59:55.055924332+00:00 stdout F 2025-12-08T17:59:55.055924332+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:55.055972813+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:55Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ClusterRole/alertmanager-stf"} 2025-12-08T17:59:56.334138166+00:00 stdout F 2025-12-08T17:59:56.334138166+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:56.334138166+00:00 stdout F 2025-12-08T17:59:56.334138166+00:00 stdout F TASK [servicetelemetry : Create ClusterRoleBinding/alertmanager-stf] *********** 2025-12-08T17:59:56.334138166+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:120 2025-12-08T17:59:56.334138166+00:00 stdout F 2025-12-08T17:59:56.334138166+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:56.334202018+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:56Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ClusterRoleBinding/alertmanager-stf"} 2025-12-08T17:59:57.160329923+00:00 stdout F 2025-12-08T17:59:57.160329923+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:57.160329923+00:00 stdout F 2025-12-08T17:59:57.160329923+00:00 stdout F TASK [servicetelemetry : Create Role/alertmanager-stf] ************************* 2025-12-08T17:59:57.160329923+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:139 2025-12-08T17:59:57.160329923+00:00 stdout F 2025-12-08T17:59:57.160329923+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:57.160377784+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:57Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create Role/alertmanager-stf"} 2025-12-08T17:59:57.785189465+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:57Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/roles/alertmanager-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"roles","Subresource":"","Name":"alertmanager-stf","Parts":["roles","alertmanager-stf"]}} 2025-12-08T17:59:57.926885272+00:00 stdout F 2025-12-08T17:59:57.926885272+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:57.926885272+00:00 stdout F 2025-12-08T17:59:57.926885272+00:00 stdout F TASK [servicetelemetry : Create RoleBinding/alertmanager-stf] ****************** 2025-12-08T17:59:57.926885272+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:177 2025-12-08T17:59:57.926885272+00:00 stdout F 2025-12-08T17:59:57.926885272+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:57.926926543+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:57Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create RoleBinding/alertmanager-stf"} 2025-12-08T17:59:58.606929806+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:58Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/rbac.authorization.k8s.io/v1/namespaces/service-telemetry/rolebindings/alertmanager-stf","Verb":"get","APIPrefix":"apis","APIGroup":"rbac.authorization.k8s.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"rolebindings","Subresource":"","Name":"alertmanager-stf","Parts":["rolebindings","alertmanager-stf"]}} 2025-12-08T17:59:58.767608131+00:00 stdout F 2025-12-08T17:59:58.767608131+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:58.767608131+00:00 stdout F 2025-12-08T17:59:58.767608131+00:00 stdout F TASK [servicetelemetry : Create service to access the Alertmanager proxy] ****** 2025-12-08T17:59:58.767608131+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:202 2025-12-08T17:59:58.767608131+00:00 stdout F 2025-12-08T17:59:58.767608131+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:58.767644342+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:58Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create service to access the Alertmanager proxy"} 2025-12-08T17:59:59.521165488+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:59Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/services/default-alertmanager-proxy","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"services","Subresource":"","Name":"default-alertmanager-proxy","Parts":["services","default-alertmanager-proxy"]}} 2025-12-08T17:59:59.724559627+00:00 stdout F 2025-12-08T17:59:59.724559627+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T17:59:59.724559627+00:00 stdout F 2025-12-08T17:59:59.724559627+00:00 stdout F TASK [servicetelemetry : Create route to access the Alertmanager proxy] ******** 2025-12-08T17:59:59.724559627+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_alertmanager.yml:213 2025-12-08T17:59:59.724559627+00:00 stdout F 2025-12-08T17:59:59.724559627+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T17:59:59.724597298+00:00 stderr F {"level":"info","ts":"2025-12-08T17:59:59Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create route to access the Alertmanager proxy"} 2025-12-08T18:00:00.695839219+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:00Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/route.openshift.io/v1/namespaces/service-telemetry/routes/default-alertmanager-proxy","Verb":"get","APIPrefix":"apis","APIGroup":"route.openshift.io","APIVersion":"v1","Namespace":"service-telemetry","Resource":"routes","Subresource":"","Name":"default-alertmanager-proxy","Parts":["routes","default-alertmanager-proxy"]}} 2025-12-08T18:00:00.833248493+00:00 stdout F 2025-12-08T18:00:00.833248493+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:00.833248493+00:00 stdout F 2025-12-08T18:00:00.833248493+00:00 stdout F TASK [servicetelemetry : Setup Certificates for ElasticSearch] ***************** 2025-12-08T18:00:00.833248493+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:67 2025-12-08T18:00:00.833248493+00:00 stdout F 2025-12-08T18:00:00.833248493+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:00.833283704+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:00Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Setup Certificates for ElasticSearch"} 2025-12-08T18:00:00.874928619+00:00 stdout F 2025-12-08T18:00:00.874928619+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:00.874928619+00:00 stdout F 2025-12-08T18:00:00.874928619+00:00 stdout F TASK [servicetelemetry : Setup ElasticSearch] ********************************** 2025-12-08T18:00:00.874928619+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:70 2025-12-08T18:00:00.874928619+00:00 stdout F 2025-12-08T18:00:00.874928619+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:00.874962260+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:00Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Setup ElasticSearch"} 2025-12-08T18:00:00.955301413+00:00 stdout F 2025-12-08T18:00:00.955301413+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:00.955301413+00:00 stdout F 2025-12-08T18:00:00.955301413+00:00 stdout F TASK [Get data about clouds] ******************************** 2025-12-08T18:00:00.955301413+00:00 stdout F ok: [localhost] => { 2025-12-08T18:00:00.955301413+00:00 stdout F  "servicetelemetry_vars.clouds": [ 2025-12-08T18:00:00.955301413+00:00 stdout F  { 2025-12-08T18:00:00.955301413+00:00 stdout F  "events": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "collectors": [ 2025-12-08T18:00:00.955301413+00:00 stdout F  { 2025-12-08T18:00:00.955301413+00:00 stdout F  "bridge": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T18:00:00.955301413+00:00 stdout F  "verbose": false 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "collector_type": "collectd", 2025-12-08T18:00:00.955301413+00:00 stdout F  "debug_enabled": false, 2025-12-08T18:00:00.955301413+00:00 stdout F  "subscription_address": "collectd/cloud1-notify" 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  { 2025-12-08T18:00:00.955301413+00:00 stdout F  "bridge": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T18:00:00.955301413+00:00 stdout F  "verbose": false 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "collector_type": "ceilometer", 2025-12-08T18:00:00.955301413+00:00 stdout F  "debug_enabled": false, 2025-12-08T18:00:00.955301413+00:00 stdout F  "subscription_address": "anycast/ceilometer/cloud1-event.sample" 2025-12-08T18:00:00.955301413+00:00 stdout F  } 2025-12-08T18:00:00.955301413+00:00 stdout F  ] 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "metrics": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "collectors": [ 2025-12-08T18:00:00.955301413+00:00 stdout F  { 2025-12-08T18:00:00.955301413+00:00 stdout F  "bridge": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T18:00:00.955301413+00:00 stdout F  "verbose": false 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "collector_type": "collectd", 2025-12-08T18:00:00.955301413+00:00 stdout F  "debug_enabled": false, 2025-12-08T18:00:00.955301413+00:00 stdout F  "subscription_address": "collectd/cloud1-telemetry" 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  { 2025-12-08T18:00:00.955301413+00:00 stdout F  "bridge": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_size": 16384, 2025-12-08T18:00:00.955301413+00:00 stdout F  "verbose": false 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "collector_type": "ceilometer", 2025-12-08T18:00:00.955301413+00:00 stdout F  "debug_enabled": false, 2025-12-08T18:00:00.955301413+00:00 stdout F  "subscription_address": "anycast/ceilometer/cloud1-metering.sample" 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  { 2025-12-08T18:00:00.955301413+00:00 stdout F  "bridge": { 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_count": 15000, 2025-12-08T18:00:00.955301413+00:00 stdout F  "ring_buffer_size": 65535, 2025-12-08T18:00:00.955301413+00:00 stdout F  "verbose": false 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "collector_type": "sensubility", 2025-12-08T18:00:00.955301413+00:00 stdout F  "debug_enabled": false, 2025-12-08T18:00:00.955301413+00:00 stdout F  "subscription_address": "sensubility/cloud1-telemetry" 2025-12-08T18:00:00.955301413+00:00 stdout F  } 2025-12-08T18:00:00.955301413+00:00 stdout F  ] 2025-12-08T18:00:00.955301413+00:00 stdout F  }, 2025-12-08T18:00:00.955301413+00:00 stdout F  "name": "cloud1" 2025-12-08T18:00:00.955301413+00:00 stdout F  } 2025-12-08T18:00:00.955301413+00:00 stdout F  ] 2025-12-08T18:00:00.955301413+00:00 stdout F } 2025-12-08T18:00:00.955301413+00:00 stdout F 2025-12-08T18:00:00.955301413+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:00.955369205+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:00Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T18:00:00.957156822+00:00 stdout F 2025-12-08T18:00:00.957156822+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:00.957156822+00:00 stdout F 2025-12-08T18:00:00.957156822+00:00 stdout F TASK [servicetelemetry : Loop through cloud instances to setup transport receivers] *** 2025-12-08T18:00:00.957156822+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:83 2025-12-08T18:00:00.957156822+00:00 stdout F 2025-12-08T18:00:00.957156822+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:00.957176183+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:00Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Loop through cloud instances to setup transport receivers"} 2025-12-08T18:00:01.042315761+00:00 stdout F 2025-12-08T18:00:01.042315761+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:01.042315761+00:00 stdout F 2025-12-08T18:00:01.042315761+00:00 stdout F TASK [Cloud collector setup] ******************************** 2025-12-08T18:00:01.042315761+00:00 stdout F ok: [localhost] => { 2025-12-08T18:00:01.042315761+00:00 stdout F  "msg": "Working on cloud {'name': 'cloud1', 'metrics': {'collectors': [{'collector_type': 'collectd', 'subscription_address': 'collectd/cloud1-telemetry', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'ceilometer', 'subscription_address': 'anycast/ceilometer/cloud1-metering.sample', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'sensubility', 'subscription_address': 'sensubility/cloud1-telemetry', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 65535, 'ring_buffer_count': 15000, 'verbose': False}}]}, 'events': {'collectors': [{'collector_type': 'collectd', 'subscription_address': 'collectd/cloud1-notify', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}, {'collector_type': 'ceilometer', 'subscription_address': 'anycast/ceilometer/cloud1-event.sample', 'debug_enabled': False, 'bridge': {'ring_buffer_size': 16384, 'ring_buffer_count': 15000, 'verbose': False}}]}} to setup metrics and events Smart Gateways\n" 2025-12-08T18:00:01.042315761+00:00 stdout F } 2025-12-08T18:00:01.042315761+00:00 stdout F 2025-12-08T18:00:01.042315761+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:01.042360712+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:01Z","logger":"logging_event_handler","msg":"[playbook debug]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"runner_on_ok","job":"6231893141681640340","EventData.TaskArgs":""} 2025-12-08T18:00:01.044860038+00:00 stdout F 2025-12-08T18:00:01.044860038+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:01.044860038+00:00 stdout F 2025-12-08T18:00:01.044860038+00:00 stdout F TASK [servicetelemetry : Deploy Metrics Smart Gateway instance for each collector] *** 2025-12-08T18:00:01.044860038+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:6 2025-12-08T18:00:01.044860038+00:00 stdout F 2025-12-08T18:00:01.044860038+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:01.044893209+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy Metrics Smart Gateway instance for each collector"} 2025-12-08T18:00:01.125714065+00:00 stdout F 2025-12-08T18:00:01.125714065+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:01.125714065+00:00 stdout F 2025-12-08T18:00:01.125714065+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T18:00:01.125714065+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T18:00:01.125714065+00:00 stdout F 2025-12-08T18:00:01.125714065+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:01.125753776+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T18:00:01.861361190+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:01Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-coll-meter","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-coll-meter","Parts":["smartgateways","default-cloud1-coll-meter"]}} 2025-12-08T18:00:01.983921823+00:00 stdout F 2025-12-08T18:00:01.983921823+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:01.983921823+00:00 stdout F 2025-12-08T18:00:01.983921823+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T18:00:01.983921823+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T18:00:01.983921823+00:00 stdout F 2025-12-08T18:00:01.983921823+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:01.983967044+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:01Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T18:00:02.045821901+00:00 stdout F 2025-12-08T18:00:02.045821901+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:02.045821901+00:00 stdout F 2025-12-08T18:00:02.045821901+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T18:00:02.045821901+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T18:00:02.045821901+00:00 stdout F 2025-12-08T18:00:02.045821901+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:02.045857252+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:02Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T18:00:02.747841573+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:02Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T18:00:02.909173765+00:00 stdout F 2025-12-08T18:00:02.909173765+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:02.909173765+00:00 stdout F 2025-12-08T18:00:02.909173765+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T18:00:02.909173765+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T18:00:02.909173765+00:00 stdout F 2025-12-08T18:00:02.909173765+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:02.909221086+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:02Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T18:00:03.613352464+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:03Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1alpha1/namespaces/service-telemetry/scrapeconfigs/default-cloud1-coll-meter","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"scrapeconfigs","Subresource":"","Name":"default-cloud1-coll-meter","Parts":["scrapeconfigs","default-cloud1-coll-meter"]}} 2025-12-08T18:00:03.726250603+00:00 stdout F 2025-12-08T18:00:03.726250603+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:03.726250603+00:00 stdout F 2025-12-08T18:00:03.726250603+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T18:00:03.726250603+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T18:00:03.726250603+00:00 stdout F 2025-12-08T18:00:03.726250603+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:03.726301264+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T18:00:03.745627413+00:00 stdout F 2025-12-08T18:00:03.745627413+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:03.745627413+00:00 stdout F 2025-12-08T18:00:03.745627413+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T18:00:03.745627413+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T18:00:03.745627413+00:00 stdout F 2025-12-08T18:00:03.745627413+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:03.745660063+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T18:00:03.769608944+00:00 stdout F 2025-12-08T18:00:03.769608944+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:03.769608944+00:00 stdout F 2025-12-08T18:00:03.769608944+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T18:00:03.769608944+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T18:00:03.769608944+00:00 stdout F 2025-12-08T18:00:03.769608944+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:03.769643185+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:03Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T18:00:04.401861750+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:04Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-coll-meter"} 2025-12-08T18:00:04.535975187+00:00 stdout F 2025-12-08T18:00:04.535975187+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:04.535975187+00:00 stdout F 2025-12-08T18:00:04.535975187+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T18:00:04.535975187+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T18:00:04.535975187+00:00 stdout F 2025-12-08T18:00:04.535975187+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:04.536018968+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:04Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T18:00:05.220542000+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:05Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-ceil-meter","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-ceil-meter","Parts":["smartgateways","default-cloud1-ceil-meter"]}} 2025-12-08T18:00:05.360544082+00:00 stdout F 2025-12-08T18:00:05.360544082+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:05.360544082+00:00 stdout F 2025-12-08T18:00:05.360544082+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T18:00:05.360544082+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T18:00:05.360544082+00:00 stdout F 2025-12-08T18:00:05.360544082+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:05.360589343+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:05Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T18:00:05.412783535+00:00 stdout F 2025-12-08T18:00:05.412783535+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:05.412783535+00:00 stdout F 2025-12-08T18:00:05.412783535+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T18:00:05.412783535+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T18:00:05.412783535+00:00 stdout F 2025-12-08T18:00:05.412783535+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:05.412828756+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:05Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T18:00:06.094766090+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:06Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T18:00:06.277597778+00:00 stdout F 2025-12-08T18:00:06.277597778+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:06.277597778+00:00 stdout F 2025-12-08T18:00:06.277597778+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T18:00:06.277597778+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T18:00:06.277597778+00:00 stdout F 2025-12-08T18:00:06.277597778+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:06.277668330+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:06Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T18:00:06.987634030+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:06Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1alpha1/namespaces/service-telemetry/scrapeconfigs/default-cloud1-ceil-meter","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"scrapeconfigs","Subresource":"","Name":"default-cloud1-ceil-meter","Parts":["scrapeconfigs","default-cloud1-ceil-meter"]}} 2025-12-08T18:00:07.102690126+00:00 stdout F 2025-12-08T18:00:07.102690126+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:07.102690126+00:00 stdout F 2025-12-08T18:00:07.102690126+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T18:00:07.102690126+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T18:00:07.102690126+00:00 stdout F 2025-12-08T18:00:07.102690126+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:07.102733667+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T18:00:07.124127540+00:00 stdout F 2025-12-08T18:00:07.124127540+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:07.124127540+00:00 stdout F 2025-12-08T18:00:07.124127540+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T18:00:07.124127540+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T18:00:07.124127540+00:00 stdout F 2025-12-08T18:00:07.124127540+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:07.124185981+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T18:00:07.147644798+00:00 stdout F 2025-12-08T18:00:07.147644798+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:07.147644798+00:00 stdout F 2025-12-08T18:00:07.147644798+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T18:00:07.147644798+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T18:00:07.147644798+00:00 stdout F 2025-12-08T18:00:07.147644798+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:07.147690559+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:07Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T18:00:07.865585449+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:07Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-ceil-meter"} 2025-12-08T18:00:08.029232412+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T18:00:08.029306464+00:00 stdout F 2025-12-08T18:00:08.029306464+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:08.029306464+00:00 stdout F 2025-12-08T18:00:08.029306464+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T18:00:08.029306464+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T18:00:08.029306464+00:00 stdout F 2025-12-08T18:00:08.029306464+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:08.734257403+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:08Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-sens-meter","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-sens-meter","Parts":["smartgateways","default-cloud1-sens-meter"]}} 2025-12-08T18:00:08.849332459+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T18:00:08.849398111+00:00 stdout F 2025-12-08T18:00:08.849398111+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:08.849422211+00:00 stdout F 2025-12-08T18:00:08.849422211+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T18:00:08.849422211+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T18:00:08.849444282+00:00 stdout F 2025-12-08T18:00:08.849444282+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:08.910169699+00:00 stdout F 2025-12-08T18:00:08.910169699+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:08.910169699+00:00 stdout F 2025-12-08T18:00:08.910169699+00:00 stdout F TASK [servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs] *** 2025-12-08T18:00:08.910169699+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:1 2025-12-08T18:00:08.910169699+00:00 stdout F 2025-12-08T18:00:08.910169699+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:08.910240071+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:08Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create an access token for prometheus-stf to use in scrapeconfigs"} 2025-12-08T18:00:09.642861827+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:09Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/prometheus-stf-token","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"prometheus-stf-token","Parts":["secrets","prometheus-stf-token"]}} 2025-12-08T18:00:09.816404141+00:00 stdout F 2025-12-08T18:00:09.816404141+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:09.816404141+00:00 stdout F 2025-12-08T18:00:09.816404141+00:00 stdout F TASK [servicetelemetry : Create ScrapeConfig to scrape Smart Gateway] ********** 2025-12-08T18:00:09.816404141+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:64 2025-12-08T18:00:09.816404141+00:00 stdout F 2025-12-08T18:00:09.816404141+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:09.816460852+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:09Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create ScrapeConfig to scrape Smart Gateway"} 2025-12-08T18:00:10.472802922+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:10Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/monitoring.rhobs/v1alpha1/namespaces/service-telemetry/scrapeconfigs/default-cloud1-sens-meter","Verb":"get","APIPrefix":"apis","APIGroup":"monitoring.rhobs","APIVersion":"v1alpha1","Namespace":"service-telemetry","Resource":"scrapeconfigs","Subresource":"","Name":"default-cloud1-sens-meter","Parts":["scrapeconfigs","default-cloud1-sens-meter"]}} 2025-12-08T18:00:10.614738045+00:00 stdout F 2025-12-08T18:00:10.614738045+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:10.614738045+00:00 stdout F 2025-12-08T18:00:10.614738045+00:00 stdout F TASK [servicetelemetry : Create additional ScrapeConfig if provided] *********** 2025-12-08T18:00:10.614738045+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:70 2025-12-08T18:00:10.614738045+00:00 stdout F 2025-12-08T18:00:10.614738045+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:10.614779616+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create additional ScrapeConfig if provided"} 2025-12-08T18:00:10.650103336+00:00 stdout F 2025-12-08T18:00:10.650103336+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:10.650103336+00:00 stdout F 2025-12-08T18:00:10.650103336+00:00 stdout F TASK [servicetelemetry : Create additional ServiceMonitor if provided (legacy)] *** 2025-12-08T18:00:10.650103336+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:77 2025-12-08T18:00:10.650103336+00:00 stdout F 2025-12-08T18:00:10.650103336+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:10.650174458+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Create additional ServiceMonitor if provided (legacy)"} 2025-12-08T18:00:10.676310335+00:00 stdout F 2025-12-08T18:00:10.676310335+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:10.676310335+00:00 stdout F 2025-12-08T18:00:10.676310335+00:00 stdout F TASK [servicetelemetry : Remove (legacy) default ServiceMonitors] ************** 2025-12-08T18:00:10.676310335+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_scrapeconfig.yml:84 2025-12-08T18:00:10.676310335+00:00 stdout F 2025-12-08T18:00:10.676310335+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:10.676386207+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:10Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Remove (legacy) default ServiceMonitors"} 2025-12-08T18:00:11.446176060+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:11Z","logger":"proxy","msg":"Cache miss: monitoring.rhobs/v1, Kind=ServiceMonitor, service-telemetry/default-cloud1-sens-meter"} 2025-12-08T18:00:11.553556084+00:00 stdout F 2025-12-08T18:00:11.553556084+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:11.553556084+00:00 stdout F 2025-12-08T18:00:11.553556084+00:00 stdout F TASK [servicetelemetry : Lookup Elasticsearch BasicAuth] *********************** 2025-12-08T18:00:11.553556084+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:24 2025-12-08T18:00:11.553556084+00:00 stdout F 2025-12-08T18:00:11.553556084+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:11.553605245+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:11Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Lookup Elasticsearch BasicAuth"} 2025-12-08T18:00:12.270353204+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:12Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-elastic-user","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-elastic-user","Parts":["secrets","elasticsearch-es-elastic-user"]}} 2025-12-08T18:00:12.653361407+00:00 stdout F 2025-12-08T18:00:12.653361407+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:12.653361407+00:00 stdout F 2025-12-08T18:00:12.653361407+00:00 stdout F TASK [servicetelemetry : Get the Elasticsearch TLS materials secret] *********** 2025-12-08T18:00:12.653361407+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:52 2025-12-08T18:00:12.653361407+00:00 stdout F 2025-12-08T18:00:12.653361407+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:12.653411428+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:12Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Get the Elasticsearch TLS materials secret"} 2025-12-08T18:00:13.363568283+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:13Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/api/v1/namespaces/service-telemetry/secrets/elasticsearch-es-cert","Verb":"get","APIPrefix":"api","APIGroup":"","APIVersion":"v1","Namespace":"service-telemetry","Resource":"secrets","Subresource":"","Name":"elasticsearch-es-cert","Parts":["secrets","elasticsearch-es-cert"]}} 2025-12-08T18:00:13.491079387+00:00 stdout F 2025-12-08T18:00:13.491079387+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:13.491079387+00:00 stdout F 2025-12-08T18:00:13.491079387+00:00 stdout F TASK [servicetelemetry : Load dummy certs] ************************************* 2025-12-08T18:00:13.491079387+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:62 2025-12-08T18:00:13.491079387+00:00 stdout F 2025-12-08T18:00:13.491079387+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:13.491130348+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Load dummy certs"} 2025-12-08T18:00:13.550172261+00:00 stdout F 2025-12-08T18:00:13.550172261+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:13.550172261+00:00 stdout F 2025-12-08T18:00:13.550172261+00:00 stdout F TASK [servicetelemetry : Augment the secret with dummy TLS cert/key if no TLS user auth material provided] *** 2025-12-08T18:00:13.550172261+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:66 2025-12-08T18:00:13.550172261+00:00 stdout F 2025-12-08T18:00:13.550172261+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:13.550202122+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Augment the secret with dummy TLS cert/key if no TLS user auth material provided"} 2025-12-08T18:00:13.608188217+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy Events Smart Gateway instance for each collector"} 2025-12-08T18:00:13.608304050+00:00 stdout F 2025-12-08T18:00:13.608304050+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:13.608348931+00:00 stdout F 2025-12-08T18:00:13.608348931+00:00 stdout F TASK [servicetelemetry : Deploy Events Smart Gateway instance for each collector] *** 2025-12-08T18:00:13.608348931+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/component_clouds.yml:78 2025-12-08T18:00:13.608389762+00:00 stdout F 2025-12-08T18:00:13.608389762+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:13.742660973+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:13Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T18:00:13.742747196+00:00 stdout F 2025-12-08T18:00:13.742747196+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:13.742774086+00:00 stdout F 2025-12-08T18:00:13.742774086+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T18:00:13.742774086+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T18:00:13.742797227+00:00 stdout F 2025-12-08T18:00:13.742797227+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:14.425932512+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:14Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-coll-event","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-coll-event","Parts":["smartgateways","default-cloud1-coll-event"]}} 2025-12-08T18:00:14.549212054+00:00 stdout F 2025-12-08T18:00:14.549212054+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:14.549212054+00:00 stdout F 2025-12-08T18:00:14.549212054+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T18:00:14.549212054+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T18:00:14.549212054+00:00 stdout F 2025-12-08T18:00:14.549212054+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:14.549243415+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T18:00:14.661760304+00:00 stdout F 2025-12-08T18:00:14.661760304+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:14.661760304+00:00 stdout F 2025-12-08T18:00:14.661760304+00:00 stdout F TASK [servicetelemetry : Deploy instance of Smart Gateway] ********************* 2025-12-08T18:00:14.661760304+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:5 2025-12-08T18:00:14.661760304+00:00 stdout F 2025-12-08T18:00:14.661760304+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:14.661799905+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:14Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy instance of Smart Gateway"} 2025-12-08T18:00:15.414616011+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:15Z","logger":"proxy","msg":"Read object from cache","resource":{"IsResourceRequest":true,"Path":"/apis/smartgateway.infra.watch/v2/namespaces/service-telemetry/smartgateways/default-cloud1-ceil-event","Verb":"get","APIPrefix":"apis","APIGroup":"smartgateway.infra.watch","APIVersion":"v2","Namespace":"service-telemetry","Resource":"smartgateways","Subresource":"","Name":"default-cloud1-ceil-event","Parts":["smartgateways","default-cloud1-ceil-event"]}} 2025-12-08T18:00:15.539824294+00:00 stdout F 2025-12-08T18:00:15.539824294+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:15.539824294+00:00 stdout F 2025-12-08T18:00:15.539824294+00:00 stdout F TASK [servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs] ****** 2025-12-08T18:00:15.539824294+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/base_smartgateway.yml:9 2025-12-08T18:00:15.539824294+00:00 stdout F 2025-12-08T18:00:15.539824294+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:15.539873775+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Deploy SG-specific ScrapeConfig for metrics SGs"} 2025-12-08T18:00:15.616383977+00:00 stdout F 2025-12-08T18:00:15.616383977+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:15.616383977+00:00 stdout F 2025-12-08T18:00:15.616383977+00:00 stdout F TASK [servicetelemetry : Start graphing component plays] *********************** 2025-12-08T18:00:15.616383977+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:98 2025-12-08T18:00:15.616383977+00:00 stdout F 2025-12-08T18:00:15.616383977+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:15.616432270+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Start graphing component plays"} 2025-12-08T18:00:15.640556033+00:00 stdout F 2025-12-08T18:00:15.640556033+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:15.640556033+00:00 stdout F 2025-12-08T18:00:15.640556033+00:00 stdout F TASK [servicetelemetry : Post-setup] ******************************************* 2025-12-08T18:00:15.640556033+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/main.yml:104 2025-12-08T18:00:15.640556033+00:00 stdout F 2025-12-08T18:00:15.640556033+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:15.640588914+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Post-setup"} 2025-12-08T18:00:15.798927009+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:15Z","logger":"logging_event_handler","msg":"[playbook task start]","name":"default","namespace":"service-telemetry","gvk":"infra.watch/v1beta1, Kind=ServiceTelemetry","event_type":"playbook_on_task_start","job":"6231893141681640340","EventData.Name":"servicetelemetry : Remove unlisted Smart Gateway"} 2025-12-08T18:00:15.798960110+00:00 stdout F 2025-12-08T18:00:15.798960110+00:00 stdout F --------------------------- Ansible Task StdOut ------------------------------- 2025-12-08T18:00:15.798960110+00:00 stdout F 2025-12-08T18:00:15.798960110+00:00 stdout F TASK [servicetelemetry : Remove unlisted Smart Gateway] ************************ 2025-12-08T18:00:15.798960110+00:00 stdout F task path: /opt/ansible/roles/servicetelemetry/tasks/post.yml:20 2025-12-08T18:00:15.798960110+00:00 stdout F 2025-12-08T18:00:15.798960110+00:00 stdout F ------------------------------------------------------------------------------- 2025-12-08T18:00:16.052509106+00:00 stderr F {"level":"info","ts":"2025-12-08T18:00:16Z","logger":"runner","msg":"Ansible-runner exited successfully","job":"6231893141681640340","name":"default","namespace":"service-telemetry"} 2025-12-08T18:00:16.052698391+00:00 stdout F 2025-12-08T18:00:16.052698391+00:00 stdout F ----- Ansible Task Status Event StdOut (infra.watch/v1beta1, Kind=ServiceTelemetry, default/service-telemetry) ----- 2025-12-08T18:00:16.052698391+00:00 stdout F 2025-12-08T18:00:16.052698391+00:00 stdout F 2025-12-08T18:00:16.052698391+00:00 stdout F PLAY RECAP ********************************************************************* 2025-12-08T18:00:16.052698391+00:00 stdout F localhost : ok=124  changed=0 unreachable=0 failed=0 skipped=39  rescued=0 ignored=0 2025-12-08T18:00:16.052698391+00:00 stdout F 2025-12-08T18:00:16.052698391+00:00 stdout F ---------- ././@LongLink0000644000000000000000000000024100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_down0000755000175000017500000000000015115611514033141 5ustar zuulzuul././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_down0000755000175000017500000000000015115611521033137 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_down0000644000175000017500000006604515115611514033156 0ustar zuulzuul2025-12-08T17:44:35.274444394+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:44:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:44:38.445200666+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:44:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:44:45.275115849+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:44:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:44:48.454514936+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:44:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:44:55.277460944+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:44:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:44:58.445932377+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:44:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:05.274536802+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:08.445168405+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:15.273177614+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:18.445610477+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:25.275829838+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:28.447247995+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:35.272753928+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:38.445478830+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:45.272283294+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:48.445303704+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:55.274489278+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:45:58.444847679+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:45:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:05.276922587+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:08.444983728+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:15.273271663+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:18.445952183+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:25.273810305+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:28.444406783+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:35.273794552+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:38.447017778+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:45.273547731+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:48.444911642+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:55.273459788+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:46:58.444290823+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:46:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:05.273824001+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:08.445302926+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:15.274285927+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:18.453749964+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:25.273926047+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:28.445853476+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:35.272511953+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:38.446795796+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:45.273786742+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:48.446296120+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:55.273580679+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:47:58.444307941+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:47:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:05.275063899+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:08.445909645+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:15.272848897+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:18.446305362+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:25.273144223+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:28.444827614+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:35.273066726+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:38.444727607+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:45.273417902+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:48.445944127+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:55.272669310+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:48:58.445683225+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:48:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:05.274863742+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:08.445682455+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:15.273619463+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:18.444787503+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:25.274171366+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:28.445370974+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:35.273134447+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:38.444661606+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:45.272739742+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:48.445288790+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:55.272635490+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:49:58.443984436+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:49:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:05.273933890+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:08.445690650+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:15.273353919+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:18.445552490+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:25.273750740+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:28.446652294+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:35.273549826+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:38.445101303+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:45.274029580+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:48.445056014+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:55.273469060+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:50:58.445029336+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:50:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:05.273554362+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:08.445411801+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:15.272766256+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:18.445791176+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:25.274010644+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:28.445929685+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:35.273276750+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:38.444629252+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:45.273609263+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:48.445291480+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:55.274049268+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:51:58.445856532+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:51:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:05.273459623+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:08.445460083+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:15.273284013+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:18.445486296+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:25.274206369+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:28.444737658+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:35.273551053+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:38.445900101+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:45.273549501+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:48.446097164+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:55.272857823+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:52:58.444348856+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:52:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:05.271921798+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:08.445201541+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:15.272957444+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:18.445465645+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:25.273482473+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:28.445318906+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:35.273632830+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:38.445058842+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:45.273373554+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:48.445038902+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:55.274005014+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:53:58.444967186+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:53:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:05.273677593+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:08.445045843+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:15.273950481+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:18.445337972+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:25.273702687+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:28.444963479+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:35.273510613+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:38.446139631+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:45.274457776+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:48.445971426+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:55.272778674+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:54:58.445400492+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:54:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:05.273150735+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:08.444888751+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:15.274372756+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:18.446820901+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:25.272655371+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:28.445915534+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:35.273099810+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:38.451277889+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:45.272963534+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:48.445704604+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:55.273009323+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:55:58.445220398+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:55:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:05.272856727+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:08.444553097+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:15.273510992+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:18.446058477+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:25.274438676+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:28.445052007+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:35.273641759+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:38.445557899+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:45.274038749+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:48.444861510+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:55.273425163+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:56:58.447544360+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:56:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:05.272526529+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:08.446519243+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:15.273797142+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:18.445517637+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:25.273813453+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:28.445480316+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:35.277138548+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:38.445830255+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:45.273197212+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:48.445696147+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:55.273449565+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:57:58.446054133+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:57:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:05.273192325+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:08.444636303+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:15.274167857+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:18.444315251+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:25.273470845+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:28.445400236+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:35.273520692+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:38.446068182+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:45.274931320+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:48.444392223+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:55.273094628+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:58:58.445481097+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:58:58] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:05.275086365+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:05] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:08.446175821+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:08] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:15.275709008+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:15] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:18.445618812+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:18] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:25.273437484+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:25] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:28.446257344+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:28] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:35.274097397+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:35] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:38.445146632+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:38] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:45.273649739+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:45] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:48.444792673+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:48] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:55.272909489+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:55] "GET / HTTP/1.1" 200 - 2025-12-08T17:59:58.445909171+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 17:59:58] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:05.274213941+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:05] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:08.444848671+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:08] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:15.273425309+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:15] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:18.446753428+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:18] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:25.275311163+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:25] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:28.444784593+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:28] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:35.273480192+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:35] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:38.444287336+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:38] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:45.274113365+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:45] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:48.443929650+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:48] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:55.273709937+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:55] "GET / HTTP/1.1" 200 - 2025-12-08T18:00:58.445618422+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:00:58] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:05.273055176+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:05] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:08.444041466+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:08] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:15.274410367+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:15] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:18.444914645+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:18] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:25.274351652+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:25] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:28.444892949+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:28] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:35.273623176+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:35] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:38.445142272+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:38] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:45.274503617+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:45] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:48.445372774+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:48] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:55.273227530+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:55] "GET / HTTP/1.1" 200 - 2025-12-08T18:01:58.445626877+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:01:58] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:05.273664868+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:05] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:08.446050306+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:08] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:15.275113698+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:15] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:18.445243287+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:18] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:25.273842675+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:25] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:28.444764684+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:28] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:35.273852546+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:35] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:38.445437681+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:38] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:45.273215916+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:45] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:48.445406707+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:48] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:55.273118431+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:55] "GET / HTTP/1.1" 200 - 2025-12-08T18:02:58.445044291+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:02:58] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:05.274142667+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:05] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:08.444305384+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:08] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:15.272788104+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:15] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:18.444832611+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:18] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:25.273306491+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:25] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:28.444951637+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:28] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:35.272646755+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:35] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:38.445289099+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:38] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:45.272764801+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:45] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:48.445037544+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:48] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:55.273406851+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:55] "GET / HTTP/1.1" 200 - 2025-12-08T18:03:58.444720959+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:03:58] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:05.274056589+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:05] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:08.445044364+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:08] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:15.272811747+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:15] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:18.444419829+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:18] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:25.274764751+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:25] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:28.445173291+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:28] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:35.273932922+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:35] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:38.444785603+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:38] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:45.273204985+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:45] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:48.445101844+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:48] "GET / HTTP/1.1" 200 - 2025-12-08T18:04:55.276940765+00:00 stderr F ::ffff:10.217.0.2 - - [08/Dec/2025 18:04:55] "GET / HTTP/1.1" 200 - ././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611513033226 5ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000061015115611513033225 0ustar zuulzuul2025-12-08T17:44:04.760485997+00:00 stdout F 2025-12-08T17:44:04+00:00 [cnibincopy] Successfully copied files in /bondcni/rhel9/ to /host/opt/cni/bin/upgrade_c6d6ae6c-719c-49ba-b8cb-476d10337ff5 2025-12-08T17:44:04.771650631+00:00 stdout F 2025-12-08T17:44:04+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c6d6ae6c-719c-49ba-b8cb-476d10337ff5 to /host/opt/cni/bin/ ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000012015115611513033221 0ustar zuulzuul2025-12-08T17:44:08.082637005+00:00 stdout F Done configuring CNI. Sleep=false ././@LongLink0000644000000000000000000000026500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000027200000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000062415115611513033232 0ustar zuulzuul2025-12-08T17:44:04.091197470+00:00 stdout F 2025-12-08T17:44:04+00:00 [cnibincopy] Successfully copied files in /usr/src/plugins/rhel9/bin/ to /host/opt/cni/bin/upgrade_4355f6de-9b97-486b-8464-ec6f95eac29b 2025-12-08T17:44:04.103390593+00:00 stdout F 2025-12-08T17:44:04+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4355f6de-9b97-486b-8464-ec6f95eac29b to /host/opt/cni/bin/ ././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000063015115611513033227 0ustar zuulzuul2025-12-08T17:44:07.090758609+00:00 stdout F 2025-12-08T17:44:07+00:00 [cnibincopy] Successfully copied files in /usr/src/whereabouts/rhel9/bin/ to /host/opt/cni/bin/upgrade_446066d3-0c91-4f2d-a9c9-a8c5a6732faa 2025-12-08T17:44:07.095450007+00:00 stdout F 2025-12-08T17:44:07+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_446066d3-0c91-4f2d-a9c9-a8c5a6732faa to /host/opt/cni/bin/ ././@LongLink0000644000000000000000000000030300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000063615115611513033235 0ustar zuulzuul2025-12-08T17:44:02.434861251+00:00 stdout F 2025-12-08T17:44:02+00:00 [cnibincopy] Successfully copied files in /usr/src/egress-router-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_df24a4f9-95fa-4c35-abff-22b29ca1b92f 2025-12-08T17:44:02.444937025+00:00 stdout F 2025-12-08T17:44:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_df24a4f9-95fa-4c35-abff-22b29ca1b92f to /host/opt/cni/bin/ ././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000063315115611513033232 0ustar zuulzuul2025-12-08T17:44:05.714956731+00:00 stdout F 2025-12-08T17:44:05+00:00 [cnibincopy] Successfully copied files in /usr/src/route-override/rhel9/bin/ to /host/opt/cni/bin/upgrade_cfca5beb-98d4-4779-8d01-f650d0e5ab14 2025-12-08T17:44:05.720776250+00:00 stdout F 2025-12-08T17:44:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cfca5beb-98d4-4779-8d01-f650d0e5ab14 to /host/opt/cni/bin/ ././@LongLink0000644000000000000000000000031400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000755000175000017500000000000015115611520033224 5ustar zuulzuul././@LongLink0000644000000000000000000000032100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multu0000644000175000017500000000000015115611513033216 0ustar zuulzuul././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-opera0000755000175000017500000000000015115611514033021 5ustar zuulzuul././@LongLink0000644000000000000000000000032300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-opera0000755000175000017500000000000015115611521033017 5ustar zuulzuul././@LongLink0000644000000000000000000000033000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-opera0000644000175000017500000010260315115611514033025 0ustar zuulzuul2025-12-08T17:44:25.452850261+00:00 stderr F I1208 17:44:25.446784 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:25.470644407+00:00 stderr F I1208 17:44:25.466770 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:25.477020071+00:00 stderr F I1208 17:44:25.475760 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:25.609970557+00:00 stderr F I1208 17:44:25.604937 1 builder.go:304] config-operator version 4.20.0-202510211040.p2.g657754e.assembly.stream.el9-657754e-657754e2beaed6295ff28bb0f7813cb1c9ce35b2 2025-12-08T17:44:26.431633710+00:00 stderr F I1208 17:44:26.430684 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:26.435097674+00:00 stderr F I1208 17:44:26.432624 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:26.435097674+00:00 stderr F W1208 17:44:26.432653 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:26.435097674+00:00 stderr F W1208 17:44:26.432659 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:26.435097674+00:00 stderr F W1208 17:44:26.432663 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:26.435097674+00:00 stderr F W1208 17:44:26.432666 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:26.435097674+00:00 stderr F W1208 17:44:26.432669 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:26.435097674+00:00 stderr F W1208 17:44:26.432671 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:26.435163966+00:00 stderr F I1208 17:44:26.435147 1 leaderelection.go:257] attempting to acquire leader lease openshift-config-operator/config-operator-lock... 2025-12-08T17:44:26.439733391+00:00 stderr F I1208 17:44:26.439351 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:26.439733391+00:00 stderr F I1208 17:44:26.439652 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:26.440004008+00:00 stderr F I1208 17:44:26.439973 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:26.440378648+00:00 stderr F I1208 17:44:26.440213 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:26.442496316+00:00 stderr F I1208 17:44:26.440809 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:26.443470143+00:00 stderr F I1208 17:44:26.443450 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:26.443517354+00:00 stderr F I1208 17:44:26.443478 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:26.443526544+00:00 stderr F I1208 17:44:26.441443 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:26.443575965+00:00 stderr F I1208 17:44:26.443554 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:26.448475229+00:00 stderr F I1208 17:44:26.447981 1 leaderelection.go:271] successfully acquired lease openshift-config-operator/config-operator-lock 2025-12-08T17:44:26.448475229+00:00 stderr F I1208 17:44:26.448163 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-config-operator", Name:"config-operator-lock", UID:"eaca86ba-59a0-4dc0-8a34-50e701093a5b", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37692", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' openshift-config-operator-5777786469-v69x6_e14dbae2-6ff5-4d60-82ef-889df7e8c952 became leader 2025-12-08T17:44:26.510097510+00:00 stderr F I1208 17:44:26.510015 1 base_controller.go:76] Waiting for caches to sync for FeatureUpgradeableController 2025-12-08T17:44:26.510231404+00:00 stderr F I1208 17:44:26.510193 1 base_controller.go:76] Waiting for caches to sync for MigrationPlatformStatusController 2025-12-08T17:44:26.510274345+00:00 stderr F I1208 17:44:26.510254 1 base_controller.go:76] Waiting for caches to sync for StaleConditionController-RemoveStaleConditions 2025-12-08T17:44:26.510282735+00:00 stderr F I1208 17:44:26.510276 1 base_controller.go:76] Waiting for caches to sync for FeatureGateController 2025-12-08T17:44:26.510318316+00:00 stderr F I1208 17:44:26.510289 1 base_controller.go:76] Waiting for caches to sync for LatencySensitiveRemovalController 2025-12-08T17:44:26.510318316+00:00 stderr F I1208 17:44:26.510303 1 base_controller.go:82] Caches are synced for LatencySensitiveRemovalController 2025-12-08T17:44:26.510318316+00:00 stderr F I1208 17:44:26.510312 1 base_controller.go:119] Starting #1 worker of LatencySensitiveRemovalController controller ... 2025-12-08T17:44:26.510373498+00:00 stderr F I1208 17:44:26.510081 1 base_controller.go:76] Waiting for caches to sync for ConfigOperatorController 2025-12-08T17:44:26.510373498+00:00 stderr F I1208 17:44:26.510365 1 base_controller.go:82] Caches are synced for ConfigOperatorController 2025-12-08T17:44:26.510373498+00:00 stderr F I1208 17:44:26.510369 1 base_controller.go:119] Starting #1 worker of ConfigOperatorController controller ... 2025-12-08T17:44:26.510615084+00:00 stderr F I1208 17:44:26.510369 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_config-operator 2025-12-08T17:44:26.510944033+00:00 stderr F E1208 17:44:26.510914 1 base_controller.go:279] "Unhandled Error" err="ConfigOperatorController reconciliation failed: configs.operator.openshift.io \"cluster\" not found" 2025-12-08T17:44:26.511416906+00:00 stderr F I1208 17:44:26.511372 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-config-operator", Name:"openshift-config-operator", UID:"dc451fc9-e781-493f-8e7d-55e9072cc784", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'FastControllerResync' Controller "ConfigOperatorController" resync interval is set to 10s which might lead to client request throttling 2025-12-08T17:44:26.511416906+00:00 stderr F I1208 17:44:26.510097 1 base_controller.go:76] Waiting for caches to sync for AWSPlatformServiceLocationController 2025-12-08T17:44:26.511416906+00:00 stderr F I1208 17:44:26.510116 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:26.511429036+00:00 stderr F I1208 17:44:26.510105 1 base_controller.go:76] Waiting for caches to sync for KubeCloudConfigController 2025-12-08T17:44:26.543203612+00:00 stderr F E1208 17:44:26.542173 1 base_controller.go:279] "Unhandled Error" err="ConfigOperatorController reconciliation failed: configs.operator.openshift.io \"cluster\" not found" 2025-12-08T17:44:26.575919995+00:00 stderr F I1208 17:44:26.575830 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:26.575959276+00:00 stderr F I1208 17:44:26.575920 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:26.575959276+00:00 stderr F I1208 17:44:26.575937 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.610753 1 base_controller.go:82] Caches are synced for FeatureUpgradeableController 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.610783 1 base_controller.go:119] Starting #1 worker of FeatureUpgradeableController controller ... 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.610975 1 base_controller.go:82] Caches are synced for FeatureGateController 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.610985 1 base_controller.go:119] Starting #1 worker of FeatureGateController controller ... 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.611022 1 base_controller.go:82] Caches are synced for MigrationPlatformStatusController 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.611028 1 base_controller.go:119] Starting #1 worker of MigrationPlatformStatusController controller ... 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.611205 1 base_controller.go:82] Caches are synced for StaleConditionController-RemoveStaleConditions 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.611214 1 base_controller.go:119] Starting #1 worker of StaleConditionController-RemoveStaleConditions controller ... 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.611267 1 base_controller.go:82] Caches are synced for StatusSyncer_config-operator 2025-12-08T17:44:26.611692721+00:00 stderr F I1208 17:44:26.611296 1 base_controller.go:119] Starting #1 worker of StatusSyncer_config-operator controller ... 2025-12-08T17:44:26.612065731+00:00 stderr F I1208 17:44:26.611975 1 base_controller.go:82] Caches are synced for KubeCloudConfigController 2025-12-08T17:44:26.612065731+00:00 stderr F I1208 17:44:26.612014 1 base_controller.go:119] Starting #1 worker of KubeCloudConfigController controller ... 2025-12-08T17:44:26.612080232+00:00 stderr F I1208 17:44:26.612072 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:26.612087862+00:00 stderr F I1208 17:44:26.612079 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:26.614633981+00:00 stderr F I1208 17:44:26.612269 1 base_controller.go:82] Caches are synced for AWSPlatformServiceLocationController 2025-12-08T17:44:26.614633981+00:00 stderr F I1208 17:44:26.612280 1 base_controller.go:119] Starting #1 worker of AWSPlatformServiceLocationController controller ... 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.619066 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.618836923 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.619974 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.619947184 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620010 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.619984735 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620037 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.620018296 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620061 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.620048577 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620079 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.620066497 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620095 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.620084228 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620111 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.620100798 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620128 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.620117129 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620147 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.620136369 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620478 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-config-operator.svc\" [serving] validServingFor=[metrics.openshift-config-operator.svc,metrics.openshift-config-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:30.620460978 +0000 UTC))" 2025-12-08T17:44:30.621250159+00:00 stderr F I1208 17:44:30.620760 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:44:30.620738175 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046424 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.046394178 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046818 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.04679977 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046851 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.046827391 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046865 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.046858062 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046894 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.046869822 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046907 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.046898523 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046919 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.046910874 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046932 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.046923694 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046944 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.046936544 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046971 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.046950815 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.046985 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.046977145 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.047217 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-config-operator.svc\" [serving] validServingFor=[metrics.openshift-config-operator.svc,metrics.openshift-config-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:45:16.047205702 +0000 UTC))" 2025-12-08T17:45:16.048052185+00:00 stderr F I1208 17:45:16.047356 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215866\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215866\" (2025-12-08 16:44:25 +0000 UTC to 2028-12-08 16:44:25 +0000 UTC (now=2025-12-08 17:45:16.047346256 +0000 UTC))" 2025-12-08T17:46:26.461420412+00:00 stderr F E1208 17:46:26.460908 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-config-operator/leases/config-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:26.461975438+00:00 stderr F E1208 17:46:26.461893 1 leaderelection.go:436] error retrieving resource lock openshift-config-operator/config-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-config-operator/leases/config-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.617024383+00:00 stderr F W1208 17:46:26.616866 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.617024383+00:00 stderr F E1208 17:46:26.617007 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.626658242+00:00 stderr F W1208 17:46:26.626571 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.626791566+00:00 stderr F E1208 17:46:26.626763 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.641629742+00:00 stderr F W1208 17:46:26.641564 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.641670333+00:00 stderr F E1208 17:46:26.641627 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.667427386+00:00 stderr F W1208 17:46:26.667351 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.667427386+00:00 stderr F E1208 17:46:26.667407 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.712362685+00:00 stderr F W1208 17:46:26.712251 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.712362685+00:00 stderr F E1208 17:46:26.712309 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.796311694+00:00 stderr F W1208 17:46:26.796245 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.796311694+00:00 stderr F E1208 17:46:26.796290 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.961614656+00:00 stderr F W1208 17:46:26.961543 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:26.961614656+00:00 stderr F E1208 17:46:26.961589 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.288505777+00:00 stderr F W1208 17:46:27.288438 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.288505777+00:00 stderr F E1208 17:46:27.288490 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.932603850+00:00 stderr F W1208 17:46:27.932527 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:27.932650492+00:00 stderr F E1208 17:46:27.932606 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.218376814+00:00 stderr F W1208 17:46:29.217666 1 base_controller.go:242] Updating status of "KubeCloudConfigController" failed: unable to ApplyStatus for operator using fieldManager "KubeCloudConfigController-reportDegraded": Patch "https://10.217.4.1:443/apis/operator.openshift.io/v1/configs/cluster/status?fieldManager=KubeCloudConfigController-reportDegraded&force=true": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:29.218428926+00:00 stderr F E1208 17:46:29.218359 1 base_controller.go:279] "Unhandled Error" err="KubeCloudConfigController reconciliation failed: Delete \"https://10.217.4.1:443/api/v1/namespaces/openshift-config-managed/configmaps/kube-cloud-config\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:01.142653455+00:00 stderr F I1208 17:47:01.141708 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:02.977590027+00:00 stderr F I1208 17:47:02.977293 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:08.243707480+00:00 stderr F I1208 17:47:08.243367 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:09.086475110+00:00 stderr F I1208 17:47:09.086020 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:09.131068834+00:00 stderr F I1208 17:47:09.129326 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:13.596752820+00:00 stderr F I1208 17:47:13.596430 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:14.346793711+00:00 stderr F I1208 17:47:14.346727 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:20.644498397+00:00 stderr F I1208 17:47:20.643675 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:20.987336129+00:00 stderr F I1208 17:47:20.986911 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:47:59.857497607+00:00 stderr F I1208 17:47:59.856947 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" 2025-12-08T17:48:06.797873843+00:00 stderr F I1208 17:48:06.796834 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=configs" reflector="k8s.io/client-go@v0.33.3/tools/cache/reflector.go:285" ././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-opera0000755000175000017500000000000015115611521033017 5ustar zuulzuul././@LongLink0000644000000000000000000000031400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-opera0000644000175000017500000000000015115611514033011 0ustar zuulzuul././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smar0000755000175000017500000000000015115611514033145 5ustar zuulzuul././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smar0000755000175000017500000000000015115611547033153 5ustar zuulzuul././@LongLink0000644000000000000000000000027700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator/0.log.gzhome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smar0000644000175000017500000165327315115611523033170 0ustar zuulzuul‹S7i0.logì]ëoÛH’ÿ~ÿÂ~!r»gI|?´ÈÞ$3;˜Ib8™YÜÆ†@‘m™cŠÔ”Ÿ¡ÿýªù¥Xn²š~Œ<À ¢Z¿ªî®®ªîê*jŠföT­§8ŸU{hÚCÅê[¦jºm¨ÿ­(CE‘²< i*ý ÝÉYHÊa|•È'ržÁ¿µ‡ÿ†ï¢d2!)|ïOø8Í&ðïßHš…I ŸL¤úÃPž$j_Óú&}þñã'x…ñü[ññôüÍ?á7 ,xqŽ#ÒKf$õò„X¨}Ýê«ð¥ŸL§a,%Û>ÑMÇ3Ì««±N|GWt×0<ŒUÏRTÏ3-E^þ‡¶}\EÓÞÅ‹0Mâ)‰siᥡ‘>ž½;?ýüñ|ôáôý;éÚˤ1!±YJ|/'Á‰4ψÔëEÄNz$"~C× )Œ³žö÷tÅ´UÅÁïÊ¿¼Ü¿ã‰{S’Í<ŸdðÝÚ‡á9#é"ôI/–§$OoåË]|jŽ¥k6’ÐÕbâ'qž&Q°’,™§>o>ÎèpfýÓd>ƒ6ÙÔKó ŒüWï¶tS¯ÿ•þl­é½ô.´µÇ?‡qÏ>Q€K€3c®ª¨:NgiòívÕÉO9Ч̉Îíäi@—)–ªÙ}þS‡üíæÏT,EGâÏ›…#én%*!¡ŸIQ2Mß‹®“,šŠ¹{å ©¦!‰ûJ|zé<ÎÃ)éW<>ìEÍ|ÕËÝü¹¶a­ø+)g5åÕ¸Òµ_\/ʯ%ˆ1w†ž~/‡–í¸»Ys-PzVwCG•ÃÑ;‘ǰˆî'~è†O3âÏSèß•edWLL—® ïjbß-@YªUÅšFÙÔ›º¦R-Cùú"•Ÿ†Òßæ ­Ó¹ŸCG‚þ¯käÝ2A+ª˜z³Î(S¶3gÛªnjª‹ÊÜ×$½ÕÊ>Òå$?)“‡ª¶…WÓîÛŽ©ëM¥Û´7¤ûçù˜œžýô//é_ÊÇu_æñMœ|¥«DtAj>ÏúÀuVâ‹r ‹!˼ ¹· ®éô]Wµm×nl¸MgƒaúàtD¨0®½8X·‰_f‘w;N’ð9ÆóÉå½. È•7òž%ó@…‘¢¬×•º)L=Ÿ†þDž,nöØÒÁB;‘¨¹|½a,Oä’ÅüvFqAw€!%ñ(¹ï~OÆ´³®¢«®K]Ó-Åu5Å‚ïŠuúÖ˽þg/»9M't\v®.®éÞn2ÏatùZ÷vÿI§¥ "Qf¤Oyð~ÑÛÿ·“¸¥™ŽÂÌêÖÖÒçÓO?K_ÎÒ<Ï,™’rª%*DéÔ£ry)ýíÀßNš®ÞŒú=Í¿|Qþ®kÓäf(}Y™ïKéõÿHwðÕ´˜¥ÔJ_Z@ë¢'ýVùã |Ú#÷8þ0èÓ0˜!ýŸ7ýc6äÁÜxžFÃâ_ÃÁ Ö0 —$ÃÞ¦ÿ@ô³…ß÷£98†i¿¥¡iÙú€*$øA0¨ÔšþÁá—ÄT$‚¡¦mRªŠÇó«+Pm…áJª©(Šü,ü?ð¦ þ]]3-K×DðÿSü;±5H¾Æ$•R{)ƒ@ìdưlS±E0³ YÁÿ£`=puS†›¾a*jñs2'£ª§¨ŠìäÓ8^/:Ϩ*4 ͹§µx+ˆÆê>+XH€TŸ†}*×è©_l›wE>ê­­ ©î°‚]Ì%’,ˆ«šcÛ޲8ZA™_ wh×quÅÑuµK£¸Î×A㸋q×0-Õ¶í§` Ût\[{tk¸1åb¬"NgŸŒUÔúªn¨Ž¡¹ƒ&ÝÓZ¼UDc•Ý*žƒ‰:¼Aä£ÞÚ*Z»¬bs‰d â`M³±UÔŽV‘ß*RÞ¡ü}U±4œ icR4в¶kª­0 òÎÖ](%$VÅDì÷ÓäŒØ3ŠØ3ƒpGìùÛGìùèíØóAqDìãEì½øÖ÷²|à“0J ZÇî‹ ú™7Ehü3DðùAù"øíðFðùáY#ø ™"øMpFðùAy"øüè,|>T¹Äeã_/ù‰f–ÑüK×uTØ¥uí_ºQ jðñoTªªn›†¡[ºbÛŽ¢š®Ã~£†Fï+¦îªŠn5]ýè½7óÞÿìÀPí™KmÃÝÓZ¼ß\wË`gukk¾Í|5z÷VöSo¿™·v([JÔC×]ÆÚÑZ¼²Õûªf©¶k3ÆïiÝ…Ø!±*úºÔ~ê‚®Ku4—˜b:µ…h…º6ô¬Ð!“x]Jﻆá€`é8óÒæ(Éè+ªf¸ÐýÆ1c3çÄ ¤dL9’®Òd*ù4è_¯âÃ;ù§ì¼útNþ˜“,—‡tv"ŸÁÒ¥ oêà>™v°%*T x¶ÿêÄ Í¹¦Ó=)>œžýtÃ~+32Ë'uÖkùi-±•æT8 ©ç«nÉCððÓ||ß]¹B©…i7«gðm•6\#íýÅårû«†fkŽ£³8{Z‹7>h¬vp¹muq—Ûº˜K$ãSwTÇtqôÊÑø4¾Üsa¹°ÍrtƒMlvµîDà°Zé€IþÝ.çM_…“÷ÞLZ+>Ñ,r°‡üîÈÁ…9™¾¾{EÅîÕPz•M`Gžµ€¯©7ëßzÓ¨ÿ»öjù×}‘†ÃÄ‹3²J댮<Ÿ.³–p于jÃ6ZñB¤ DâX9³peϥ ãa°PQ¹-Ǻ”š-}Ue|ÍA¾ Ó¡t!æY:ˆÂ±e ªÆòE Úòªdi ªeé³Rg¾KÓ$͆Ògð°.ââÈ»P4ÿ˜ÃÃèÑìEœ§ 6³ÜáE,I½¢¦É°:&§O$©œnèï+j⨠ӳ⠳öjY6­tu‰Eÿj¼:[±W•<¸ˆ½Ù, ý"˜·I¼JhtMæÙ÷äé¢äWU<^H¯(ïðÐQ>} óëÏá”|ʽéìUÙuàMF›ª%¢•wq$y¥§0…ÆÐ#¨¥¡•v6ö(:ÄŽ­X¨ ¹´Å’ãÍ6 R{”v-eveX¢$™^Jš¶–³*û .&„."NÔ­àÅêÕ¾Ó^·°æ“Ùà—h^ÞfÁV4ð­ãfAìfûp95¶¢Á&E»Zw²wâ® »NfV·¶î +`uqYQ]Sy€¶·îF#¶bk£òÚÕº±Cau«Ø½%³(¹Ý»V§V»¨·;×Ü#v¢çUì4ÝR'hˆ^Dà~›@oµ–äG<ÆÏ)~Î?UKD9bŸ7gˆŸ7TÌøy øãç fi‰¦+#Þ‘EèJ¸NÊWá૤8{@åwBß1šgT‰""»×axT¼y$¨úÂE §3P¶IL‡bB^3qøóŒnŸ1Ñ+õ‡‰8“RçàâFÞ˜D£¬x{XT\ð)I'¤Úàcââ¯u­‹ ›ŒÊC\TÔ™—e_“YÂèz³|$BÊD …]/ŸCÛÕmÚ£ôàüу?zð‡•fù¢¼QàÆWaˆ:9K}dÀ´C¡Å@ÝZˆ7'ÓYTB#rK]T|¯gáEa€ÎkZœ/"›æ¯}—.ꎋBŽVïÁÃgw”E„Ð|”¾¼š¬ª)­qÛ©’eÛ ÇþëááÆ×ûé(u’ªúE`²m¨ 4Ø8œ¡à“âú¢GôIû ~„Ÿß×,/L ÇŒŽ#åyûå ¤s/ö&$ø¡>¹û‚æC"{ß7¶å‰k1ŸË%¹ßQ\ìÝI‰>²óYá÷)ôòD øÕp›¾[¢SÐ`^ؾsFqUô¼¾)š #%|²K•<¼»çap!/dš+£éºÓóŒ èÖXé‰éõ|‡æÐ8ŠoYÖ…¼#x[^qˆÈ3Q)þâjÒljOÏ~êýXˆ’[ë%3Š[©î_gA{ÿô ºiÙi+ŸØ¹È%n¬â¤D!Qy¨ïž‰{²ç¾*¶à׎íæØŽO>± ‚U ôXŒÂK]Üu¤hMª Ss¬µU ñ•ë+æ•gõ\ÍRz†;Öz®éé0؆v¥ºc϶Tù±ÿºÁ/Ñ®c#]å·úŠ¥kŽfª&«{Z‹¿9ŽÆj%m÷PWÒ¶‹¹Ä;C5-½qÅë˜A‚_áÚ*-M×UœyÙhéÍfÙ}â_™ÆAk©qf[®uzg,Ý6TCÑ#¾DKÁÌL´Ü5æB²-±:ûd²-­¾æè–«Ù®Æ¤_w¶îÂV"±Šžeµ‡g–U°_ÆôªÃ¤›¥W1â²¥W1‚1¥W±aq¤W±r¤W5eK¯âfN¯âG>œ^ʼnɖ^Å Êœ^Õ ]ØÒ«€r¥Wqâ3]ÎäÃÜrHTÙz¤Õ±q´î8à¡3„8øQYO2×ÝÈ|êü·êø‰-ç"›À[€Sš½%^…1ùDЏ°¥(È£ž’âWÑ¡!]eÿ ³ß˜'-™kwŽE¨8QÅfš8ýÙµ½¿­Hº½ú©× ÜËmÍÎM+ŸB™}D¥¯øEi!îÜÆ~Éw.ÝBo?×â–X¹ÌÊ£/B©”¯Æ¾ÎóYÖó‚€:âè×È=Ý<Êztoÿz@rŸ³4\ÀÚ¡ÿîûiÞ7äv;ô„  &Ê·ÞT¯¾ê]…)ù)Ôï_V¾!kT¿S«Æ’‰³ëð*ïÕ[ŪVæë® À=/óxŽÄ›¾¦; Vç±TdÕAÇãA "ÐÞ÷æi”½¾»T§]Üo°é狇[ì ùׯ¬j´¶áÏÊE²þÃo×£eCú&ê²Ý„äòr) ‡K±Ã,‡S¯ôªÿ˜ÓÞ&ƒÕ’4œ„q/ñæùu¯XCê­e¢—ÂêÄf²h’E¦D'àN8…M³|–ǹÔÌœtCºž¾ÂÐÉÂi.Ÿõú[$Ñ|JÞ—õ_üMiÊB×’ü½±—;–ÄÝ;—B¹P§¤9=y¹s»á8u=»ß½Éô¹+aèðåìÎzÞôÙœÌ.ü[J <ëZ=…1¨$0Ü1ñóþG¶Ÿ-ü¾Í3hÔ/œñ¡iÙú NÁTÚíðMyÔñ*ÒO»!EkŒf$ “  ‚–ÒM·Ò±ß!ÕT”ŽzT¾ªRõgòu4Ão]{‚Ý• fÈ ¥;Æ Û0'åõÈlÒ§a0!o”K¢òq«‚àÎÂúëÜ-Š2ô‚0=:¯GçµÕ¹vQ± 3Ew}UØoð0·þYkøòÍá/F½ÕZæýÿŽÎ>¾}8}ÿ®+·ð¢9ù!M¦BcäÛ aœ“«N©òD¿èymÊê ýB:cdÙ ¥åñ\¹›JõuÇNjEõÉy~‘Zº«¼·Ð•åŠyâ'E­´ÏoÎþ|Q¤³[³]øŽ¢Ûƃ<_¶Üößb® M6rx„97r(„—'/sÚÖ7Ï߯=Æ®¨§Ä >ÆEŽÍ /9B)\ CÃ÷¥˜Y®/"–÷·>ÔÆ~«9éÒˆUíbuWä^ì2•«°ò°uXw÷}ÐÞš"^Uýû€tU¢5Oâ´“@‹)x5é,¿}¦{«&"¯¼.|™ç;#þªÔo'z¡I5>±«´+)ÜôÈŽ’Èz êÅMµáAõ8î~=eË$ƳEGÅå  Ip¼²0ÛK!9±™^ ɉÉóRHNhž—BòB~)$'"÷K![à3¼’᥼ˆ¬/…äÄå{)$'8óK!9qñ×ëK!yaÙ^ É‰ÊøRH^Tž—Bòb Ž—BrBó½’ßô6©;ÄG€·î:k•nïµîP3ê ëñ¹^ˆsÁ’¨Î?ÍëñÓb®;ÔºIÝ!~:¬u‡m xê5?ÿ毙Ñ☧ÅSØŽa cÝ¡f‚ÄUw¨ùôrÕjE†³îP3Ág¿AÝ¡G—LÖºC Uœ(bsŸ“ =|ëR‡–Ú¢ÛºC-™}D¥/æ¼P„fâÎ=¼÷¬ãÜ,É8§èüu‡è¶¬;„ÈEóºCHLà×Bb ¥î/­ê¡Ç3­;Ôþ—à"~Ý!¼P*sÝ¡ö$™n«?§ËœÜ·Õñoq®;ô®Q <ždÞç}™˜¹î¾$>úeº—~Qœ£î€ÛâÞ=yÖ÷¶OŽ»³ƒ'cÝ!jYwk¼êa‘b®;„CðPÝ!¬n¬;„Cˆ¡îVêm#Åžk·ÕU4×~ðú´Š±C½ßúcAUŠpˆqV)ÂêáÁ*E8„W)jMçRl7«µç…§JQ{j\ q¹N¡Qr+aÞ„ ÂK±‹OúRÜœˆ36WG3e×☩æUŠž¼†g¨RôœÔw•"<ÒÌUŠÐH–7î«¡RÝreóàmMtú «¡2²ì„Òò¹[ÊÇuS™«á9©LUŠÚ“c‹û<#ýÍ÷A7¬UŠð(óT):®öãÖìElÍN^æ´qÔŸ?žºChÔyêý©7õB)[„ºCI³ÖzÂÇ xu‡Z³ÂXbå „³î MäºCmyZž1±×ÂÛ€uàË<ßa¯;„²FÑëµ^¥Ý0ÕúsJb£ºCÏÊ(bÕz–IŒg‹ŽŠË'’(<’޲0 #o6‹ØŠT°Ã§>2`^æ`ʳ”d‡Ë:0㮥w"rK ë0×jaF]xQ óZ£ŽhÞU†‹ýÕ+ê0ԉ₤¥Z‚º< 2»£,"„zL&&hN 4¡54¥5n;UÒR…È!¬£ºæ1ÖÕZŠX{ÿwM~Ô‚·zudÀ`N8ˆ* bœ1kRB‡ ˜»t*s‚;óz*¦ºÿ–X…i‘z¾õßX&¢ªLWޏŠ:õboB‚êštx‡`¸Î®0y{äÏ>WÕ!ÊAùMÀ±#¯†è²¾…Fò„ñœ\‘”Ä>É„nnä¾à³ùfxw!ÏÃàB^Èdjºîô<#z†5Vzcbz=ßqMÛqß²¬ yYð$„%|Tc2ÀT+¡…Œí.‚%J€èZ5,¡T¶ÆHq½D–@*¬õ“Z-W?®JJ­Opú]&ƒâ¾/ÜÔ µÍúMwÏ2œ=©Ê9‰RÇÂV sM§–ã#Ü!X£ÅXç å°³‹eYu«,-Õ±Ýu¥ºbà»ZQwËc³khÎFÙ¨ÖUáS‰+üá2ªòxy‰ïLSÜ>¶âº;²«í¨M. *(³N:¤Xß8îšäÙ<ŠÎp‡o»%Φì0)Öz¶ã‰ÍI: ãâLç=ltè—Wû—‡G˜òï®ÙÞuB·síq¯]W÷8 Kïß2(XT>¥óµCºv‹õȯôKg„»¡$žJsõ÷±V׬Ërt*ŽNE[Šu¾Ô‹7fyT°j‹<ª‹U¦Q±Ž?ÿ?{×Þܶ‘ä?Èý3…ªì=‚¯;¥NQo*®Xe9¹»•T\’A€ÆC²’Ò}ö›@A$Ab€êa:‹"ðëž™žžîžžž³óoi…[=Zö”«ÜÞ)玘½¾5öà“|’oÂ'Y«UxðP^³‡ò ¤-Wõô koøà ¯yÃéƒ' I‘W%zõËûRˆr劾%5ûtþPébMàÄŸ¬¿Ó–çëEÝßy ƒ‡Àà!0x ƒ‡Àà!0ªÉªNâ4߀ÚÎÝú†"’‡œœCòED!¥R, dä?ÚÛ"έ§Ø ã}’Œì)v‡¿îG…³ã^vºñÝq7ð×x_Dóeüž‚æþ:8g‰¾-Ÿ³«–JQ„`á¡t¡üjo«ÒJ¬ºD‰½×¼²ÿØT¹bU/Ü«!‹Î£â‘ˆ×¾«­iŽ¡í8鲑_KØGÀV •­=CbuïôQù³ÝZ®7K8 ±’Z”b)­GqïZAk€?èЊôˆíkÖe ¯&f-âþ~§äþ—‘ר–›_³(] û¥ÊT òÐå ãÂ"K`Z»Ž ·òqýÓóŸÕ÷¼F^*À´‚ÅCí=%-;L‚Öæ,¬!F fH¯aÚ^¦V{Ù ›©F ËNk¯ÜEe½DÞ%Ø(Â.W+Åó]Þ`—Û5øív7-h™ÏŠ}^P’ïAG¹¼ŒAL\Ö¦2u_ë¼ÎRTrRÕ6[ý.ÔÜJ»Øv«ÝïtUM·Ôv×°TK·,µgô <6¬qO7êw1Àº¥”($ÐüâʈMÂe!D rcÕCŠå|C°‰EÌ ¢û>”)kVIó.Ä®ºª@cµš×MøÌÔÑ=”`”+K)("E(+™·ËjŠ©ýM·žWÊ?2"pÇ®’ÖwT>=êðg%a%K9Šªžr…+Œþ¦êå»n0¨@Æ“£6÷¥:¹ò(¬Æ(rÅ”w[ÝØìÄG “O ãa# ÔÚz˜3ù2Œ—àc('©¤ç)/¥³~µj9µIOØ)åX"½Ø‹TzKÌÉê©qúsÃcéÔgøn3qò…TâvÌ\œîa«c×Ã'ë…šéV÷°Äýfu ئî8V³@ŠÅS±Nd+æ’1ð°5?¡‚8h6½À¶¼iÅTõæžÚïVMB/:ùëJi^Qtõn¢Ÿ¯ÖNWÊ1ùu¶¸¤åb0`ÉbýÛ|„†?xƒÃnÂòî)í¿–Ó­JvþPù’ÐÖÍe7ƒÐ¸¾š« 7 FW$KÄ•õƒ‰Ê©G‡DÁ̲È5Q©ì¢¼•A²z”Ž.”K2¶â)’‰åNËÑS’Ê ÛŽ’¥Ê«A¤ð°›ÒtðM3÷µêe\,õÐOdí—Ewå€Û‹Õù3-k%†”=銚·„לÚǯgìÙ¸û½ò—æ>K Ÿp|p|—Æ¿jÍ¿,†ÄÞ—ébPÄ©ÉÔˆëÕALÛqcÍ—hD7vÃö’ˆ<Ô`~ÐÀìt&Õ:ä§™j!èÝî‚þfrIÐy‡ vúF&¡Ž&·áÈ–I@75Mr ø¡/‰$$ó?¹&¾ûU&z¦¸)!£¥P¦"©2Õ1zí[`ÁžÝMT~ÕÍk)ðÖ)·ìà–='ÓžèDñ;¯fÌv!‘N[k·V[•æˆoWrß1û«"_ƒ£î˜ÐU ÿ’Òôrf!SÒM]åX‰G5vÒ¿”,»˜^Ž8^ËiÂ˫ʿ¤!?Ò¹ª©Gžëù’õBë“â'6rE:¥SÛo¼ãw@º¬¥Ù“%×4ØKgà^*…û—¶BIR6; |ªGË™÷õ8¨`ÞW&¸4nxód‘)·]øŒ¹øv!ØÚ± º\-³gX›¢Ðžá·£DœÓÊD@SH.DœÓÊt…œÓg¬&DS0%ñp$Yâ¬=~Ã’wö6>RzeSÍÕ܌àˆÀ댒¯a¨œñí·ŸÜÜÊTV«ʱ-•ÕŠ…Ë$†åà V"±RžPèì@MrYGJ%¹«(¡¡|(AxùRâƒ"i9’L¥’Õ²jEWÄi·4‰ÑÊG•©êç;UååþŽz Tz©hh¨¾œ/_\ëÅÌã ÇÚ$Íäý„Ê2Sþ–â‚4ÇÃÂWaá{dÚ©"‰£ÏhŃu³ÀÐ`øª0³š±˜D»+¤í«É‹rk…¾ëí–³3·ì|ƒ}z80{Ùºn° „Kíz€œ„•‰1¢¿Cdƺ¾KtëÆS´I{@ª7©ª g…a{Z…ý³™x¿ÛÑ:Z«$«EOo!‹NýÈy}¶¢ºˆä'dõóéÅ/è2Ÿ~‰èG¼ð‚;”:ë ñ—G1vÐèÅS7B¬6J‹S]£¿/ÿˆQÿ·Ký? mÓ¾XXñt€šÁ"nZ¼šaàáèQnh“>5çDîwso kíb¹“>˜ r×Ö{-ã8CBü/ÅÃ7ØSŠëåX¡û›«á+^0a¥ÿØD‰ É«~<œZ¾Ã‚SÊ<šo/‰¹Á ±~g±³kò-7{¶•ˉÛT#N™ÜÌè7ŵҎÑ/®ïœ<.m¦p>yE%ãnøCÊà1Hžú#Ѷ÷5Cï·ô¶Ñ2:Z¿ßÒ:ä»wáGš¸À ¥žD+÷‡É ”»f¯¥•’¨Â§÷¡€Xݦ.xg¢q냕?bÔ«ª‡‘áa:Ö\M* ùC ¦$ £¥i½v_?( ©J¢P® ´B§¯wtC7:¥D¨ðé}h VSµpº~Œ¢`މ¹:J&ˆŠ_8g¡ö-: ý#H“™™­y0#†ù²–Å5:ù~‹ß¼ Œo¢Ryßbì—a@yyE¿[¡Kkˆm‹——FQôÒ#Pôì@ ;õ6@cË‹0(pzr0Â~”Œ\ÏïDŽ ±Œ}*ßÀ† ©%ã1‡lcm€vm«Œ¹â꘦aBÁóØy&!C£ Qz’Ö…à YOƒÒƒvô/Rkg{¦:-€Dp*µ ®+¿}/þ&ØJ fÌQâ¦Öít÷ḵuºØŽ£p¯v\˜R+.˜-­7³OŒ\³ß7[­Ž¡é]V#9g½Q+è”hQ L²žÞë·^·œ >½“ ˆÕŽÚÙÛ3fÇâ¯nA¼VâxQG­zí8Ž¡Njùc 6©)ñ®ft»ÏÏCÛûÌÞá¡íšã›=´]½Qt1†:-M/ãäoyZ¾:c5ÕïqüØ…EgÙ=zç߸aàÏIWsÖ¶/vÖÞ¸1žŸüuDEðh€ŽÖ6ÏÙa¾Æ­£û·Ûœ»ÝÄù¡*>,ñeÇÛJî–ƒã{:9‘f9ôÇÍ"¹3‚0`b'Ì*`ï¨È,ŽÈûzí”'=8â%D-þ膴šc3‰Â¦çŽ:í,÷ùJ¹ò‰æü@.}‚j\ú;®?ß…aFô™xW>s9˜Òù!!¿ü‰šÆW~±aûiƒ+!Qa¤n ý B|¸I{èrGy­ŒÅÑ=4ÕÛ‹þÉðlìzO!¡Š×µ£+ßZð]@2F‰ÓÓ28žâ$Ze€N-Ê€Þ"LþÑŽÑåþˆçÑÉ'ºƒH‹Z_ТÖG¼ñ„;l°î%)»þd©© Å´DñðŠ¢Z”'D¹0Oèž…-WøÀ6R}”z5ev¹´xA°ÞXìÊ-º¸ÕUÅ& c‚é`žzM4ÆȺ·9O,·b×°êƒYáM0»Èu Ä»}­cö[%Y-xz?&*«[·Kn'îðX·P¯í±öÍ-b'{,AÅ®Ý2Ú•ºòö—+çóðXuÝèší¶AÜÕn·§çµ'°§¸s±ÛÐ;]£×7ôÌX¤u²Ó¾?³ì)Fs7"fnóFϺƒwå1ZëÓfño÷]³ešºfÊàþgÿl3wŸ] ˆÂìÁbfÚ¾iè2˜ùo*R,ô0u=e. ùž›®ÊZÿRIó¿$8ÁÃôAI-ne·ßÑêµ’7ê‚Ê:m苬E4Ç^éĺ!Ï´šûæXI{`ÀzñOô÷Äâ0±ã$ÄNã·Ü‡‚uu]ïývfØxpyÙÄtýSù¯i‰æÿD‰mã('žw·# vîCa«MCkõ;¥–‹Â§7.ò4©–i¶â¿½cTØÜæZߢ╨•bOŸ8ý_ôéÝÙéy9¢’QH=ÍŒ]À¨Á°î¶ ¢e'z!öTújê œtPúûÄ1ÑätùD£o-×#°Ÿ£™»XdˆZ²“ìƒ;ñ‰ÉÎ?H—Á­2ÐÕ´nG+KeóÓ%ýG–zò GDHŸRº!ZƆ½=O­Pui/]R µˆÆo˜×…vç.Ø,j„è9豊™Å›þݰǓÚ¨ŠG󀞉&–…ÚSfc’—G,`M­pIòá.“£:mKÑÐâ.ž~FžN>–EOšËB…t2Ò€a“?g4ôV3"+J´öÌš«?`%-0MÉçùÈ;쇇g£Á†>Ê}]ƒ1üÛILµGAÇŒ\¿¹*}5è¥cqã΄íÞP7ê˜L]!ìö1bäß·èòýÙÒõ†ÙÐØwZWï£7ŸˆLýÊÓ/ÔÖÛkôf…ótHßÖàø×ÿÃzÌp£]H ¤ÏÅ+výQÛ¬hÂ"+ÊOíÊT"30Y ×§FCÞ!n¯.uËáuËɱ>J\Dã0`K^5tiznDÍØôæ_‡h›w Êgò̯«Â&7›[–Šf~‘h 8GÍe¿5)[—0æ:ÈbÂWD?GTÀÜñÝŽÖ›·ˆFö§ú°Ù¡»øf[o%qð­¶ýÇ™ƒ–f&"ýñÈæ¬}N:’ôçÓ÷ãƒ&â~%?çúnª™*·)n˜†ÞÐÐ 5¢·«ª1ëECêzÒ¾%#ú„רÁ6'³ï 5ä ®!wÙ—Ãüï šj_ÝUÐSòòÕ'ˆåy#b¦§3©ú‚1¢L¥ï£›±ù”³ù›©±ÓÌ(ezª±¸çÞºý:t܈šÙp-¦ª9.²ü!G¶N«.h°áQ³ŽrÈGÇtý»ÅÈòhѶ;4µnȇ %{§I>ª§ ='ËÑÜòž‚tàcjùÈ#-ö4¸ýðñã/4ÒûöØ6ûÝ^wÜ6{ZÏjw4c4îõÌ6uÕŠ¸m·]‚ˆ·|<;ý€Î>þú뻳Ï?üýôñúíâÝ'⢰Ó%ÿÓ`hþÏ»3Ä*œDS¤ÚèÛÓýßôÝw(ò0^ íHÅ7(™Sèv)­ùŒŒ RHùb¼l>šN\(ô/eùü·—zÈϪÞí˜-½ÓéözËÔój«£êºÙÖûäf»¯fþ¥P~¬ðË'’z 2ÏFúyí™´ž+Rˆ`yÖ:mU3ctשä²2|þÛç#Ë^Põ^{hÎÌHû“~ùå‹éŽæÉ }þ)MV²Ìò¼¡Ûº6_í)éV”üûWÐÈï ùšlsͨ×вpŽÔ1RCØ!ÿ5|ÓôÏC­ï¿ÓÚ!ÅÀ]¸Ãq°»hÖ<¬‰æú7i¹5Ó›«Ì¡Å.Á\öà ß@:ÖÇp 7%/¶­!MJ…åÖö\ZZ0x÷Ò þ ÌìØÅž3Œ0]¹ƒϼ‚žºŽƒý!£ÁrM ,¢;§Ù'O›‡“ª’ðéQXôì• ~3j2Js>a¹õ¬öäÈÜòâ ꥪYýê³ó|jÚ=;®`¢ð° ìÐu·Aè£RåÅCB#£X8kŠ-‡ÀÓNñTQo,Ïu¨¡‹0Ç·–ÃX@ ©,8nj ³;d¦)Á5!AcwŽÉ#VoiµqïŸ4ð™ÝWÀuìK‹{†˜ü\:Öù”qÏÞ¾"?Uª‡¢Ï4ÚÙÓ-C7;¡¶uµÝnõ ­ßêè]³ÛßìÜöî‰vž$Ô -ð{ p>}ls6î8ßOÄc›ÛdáqÈëÙE5·².³^H8󙵩b sû(Kˆc./h fiôû½°…z‘´0ybAšZaì#FAN‚©.¤OÐ}}òƒ£#âzÁ„GAÐ-y'Z`Û»Øa;ÆñÔèõ0‰WÝí5V£¤ñ®Û ¶a=©-ù8©*½±ëÔf¦E¦åžmIS?Ø’[RÜxÓ­–iÝ®jvT½­™ZÏ4ºmºç³sç|ëË'’:˜“/Íœt{­$üjµ*˜“Û„¡¶=y¾ÖÌ¢·{í~«ß^í$PëskK¥v[Ùù´¹ög>³FU5@·tU tK;*Z [7XvÇõÀÊoïÄÝþ.(ºý-*°ý-¼À¾3$Á´T„^Ù»ÆجÛ¬Íz1P×O0½µ‡aƒÇ®ï‚ÍÝüÌø]ÂÄXÙúÎ;sæ8¶+¶@û¤x·õ2ÛÔ`ÄîAîÁDÙ#ÖôXàs„[î°ó{°›µAù`ŸúÁòÛÑå¡…| K&ø”G¬–àS ¿l‚OyôTËA" %÷”Ç]OîŸãp‚‡Ô͇ŅŸy­ + TU$HU8H[B'ˆf•‡ÎÒ0†ë``½‹—NF®C¬¬Ôœ2QhÆdôé-ÈlÄ`¼,:Ë­Hþ`yÔ,Pjù¬ÄòØ¥³ŠŲ±Ëe% ‚–ÎJ,…[O•ÔT! ¿5ÎE›mËÖœŠJº¿ n”°’Ê[AáWN>"¤õŸ‰¼•9§‚¬¬£êÿ„ÒŸ¼Ð¼óSæD^‚­—À¶„Dëá$Nô™û6 ïß%Q!¸Ð¶GH³år4‚[‡Ÿð‡Øç¹ÍrH1r ,Àlðו’¸Î•2¸R´VßlFOµÚŽ£¶;#MaÓRí^ßìözšÝét®”{Æ“–àQaG"U?4Z£|\`ÿôügõ=˜!ôÜã%·SòÛ©¿ô­‘ fB¡Æ~fžÈ5lðFŠ“WëWy*ëí…,ƒ[nß…í¥•«ý*Õ§4 o†&øÿì}msÛH’æ¹/D츽'‚ ï4q·ÝÓ1n[a»çfÛRpA HÁ"6@ÊÖ(x¿ýªðB‘_* Y )û3cKä“õ’••••ùµûLÙ®Ë·Ž­h•Õ6m,ý)†Øk kÐikštˆ»m0´[ZÛ°º–9êÄ3ÕC¬„¿-ðMاù³j?FcŽ6h/KfÕÚ†pòÐ[–Õ—=è¬ÌÓt¿½²óPÂÈÙïwîdÜøj*n’& ·E~¶#^†¯xî–h†¯¸Äføš £«;m½­u,–pƤowVË´ögøîúò¹¤½D™Žëüñdør+­x†/=“|ÿfüù§@†ï.eeøŠ¯MáœÝm—:§’³{dÍÙÝ=ÑuÎn³[çìÖ9»uÎî“8OhÎ~D±1|Þ~tÞ"¨Ä‡W Ä‡VâË¿6@%>X@‰?*¤Ä€ .ñ`Kh‰?´@‰hëE‹ÇÀ¤‚ã10xÞx ØçˆÇ€d,+{|oš;Sî‹ÇüH1îxL};ªØY½²ëKa©#,'aªñÀóÕøA5¾ü¸_~TH/?*¤ÆŽÊ_ãËÍ]ã ƒ„Õø±ùj| Ü5¾\¸åŒRI¬ñÝÇWãË50Õ¥%¡ñΪ†ÑÌ-bÞˆ¡‹¬î-ŠµÄ¿iNã€Éï´Â«ÚXWÕ,ÀAl—–‡ ×êÛêÔ£Ú·9‹¸Í?°6HP7h’ÃB‰¢ —w‹JÙ_Þ-æìó—w‹{üÞ,;$RÚ]NÀ°·ÏôR|aáîRš€*a¬èáWäa@nl‰Bu¸,p¡:\ÄöBuóÄ0= Òî$¤P~µ^FÛ1±ðó2ZWï¶mËÒ5Ç7ˆf¹ÄÒº¶åknGïø†áé¶Ñ*?‹ùZ÷&OR°UöõÉæ­—´¨ª¤kÌÎˉš¢GK*y:+PÞ5lj,@¹×õB]ˆ¥QʵC\¾À7aŸ†Ñ[ü-ý í`¹€ö²,½…-8´U”×ï/‰R‚_&¥„¸ÄRJ´ºcµ»fKëZšÙr:íœe9vË6öRJìúò¹¤½D™ŽëüñPJp+­8¥·;Ž7(%v)ˆRB|m SJìl»Ôo°p(%ެS¢”»'Z”Rb§7$D)±ÿŠ@)±ŒŸR‚󺓟RpãÉO)PJ@€a” dNJ À$?¥F)FW¥ N)©+S°­úw+F·«·ÇíB¼ªÝˆ(xÈO{˜R¸¯Q!¨µY :na&+~0{/šL£eÚ¯5ä‹Ü¨ ‰ž2‰ÑpcúŠ6ŒiþEñŠÚ”nÏDŠd 2›«²ÈG2Ä_0E,x§‘AV)œ°ç<·y¸x:l È[b’*³m%Þ¯‚[04´+$$5s@!\ÜØ.nL874œ‹€š—‹€Q‹@Ÿ›‹€›‹€ÆEÀ+ÂEÀ ä"àÆÅ_0.~X7*ˆ‹€ÎEÀ-aÀ\ÜÐ"\ ‡­>š×Góúh^Íë£y}4¯æõÑü¨æbEìüðœEìü€°"vn\P;7*¨ˆTÄF±scó±ƒ Eì0lÎ"v(;n9£TÒ„@‹Ø÷âq±ó Lµg0¹g/ü3¨–äUXËŽ×.ÌZöª·oÎZöÖ*fD1Á˜äx~€cùãXZ#4€ þ¹úPœ8¡ B¡ó€Ð@_€Ð@T 7¡A)å dŠç$4m‚¤žÑ^IŒ W©»w¹Ê…ç[]årö;YŠˆŽ*aˆ³ÆC`XŽ€•è$òPc‡óK°w€eÁÙ;À"¶³w´þ@ d£ ]á^ÚKñœ·°w8m,üœ½£KÌöÀµºZËhw5Kv5ÇêµÖÀó\£Ý²u§S~þú¥R¥—I/‘d]aÌúeÑ=~ò€´Ë!é—B•\•7¹å.Äå |öé¿Ù#~€×iúÑoîTyÞqN¨û»‰)ÚË’NGph­É[V¢<Ž¢›ùT™§µË{eçwMôÇ#Õ&kYVÖÄ6îÜɸñÕTÜ$­}l•9‰nž@þPÁç÷/ØJyÑS^lþbñrÁä.áy9«Éº¹Z .3¡+£Ø_æq-Û"$DÂ[4° ¨ᦠØ%4æ–šL;ltSÅH/]ÓúŸƒ¸§\ªY¡s0°­B/ÕËpÞ‘[2fŸÂaÄ~ví†þ˜¼aÅxIOùÏÉe8 Êyóæ–.ÿ¿Íéß²«ÁËpSµ™Fñ,é]†Š¢)LY{JB?Kfì'Š’M7íï ¶Ò™"§åÇ«ºñb‘}4“gXìO—óøì¦!¼ä2dwß91غèi±‹.2O‹g ‹‰7ÌNC§ÿg¼8S^°¶Ó:ºcÐ} f×,4ÿ‰…æ_d]§mSѦj¨GK·¬°Ñ˜JŠI4wç m½HÒž:ÄŽIô=L´#Ÿ9ÁE)‡PRg— Ýs§ý[7 °­ >«k¸Þõ–ﶺF»í@ ‡”ŸÃ  âð‚!óqx0ù9¼   /80º2ðsxAÁ^|îÒ_~Lx˜ˆÍ=ƒ!ÖaÞ†yaSµ@Ô#X˜ÎæPTì0¯`ļÀÙFAZ ÙJy˜Ÿ¼€ÍMÞÀ„’÷ ¡ä=h>ò¢y >'y“¼‚!ïàÂÉ{à ò.þÚ€÷@`ùÉ{¨ò*”¼‚-a€ä=h8yOíÁ×|íÁ×üìÁ‹p|@à¹8> €Ž.€ã€ àø 8>P¹9>ؼ@HÇ›‹ã ÊËñÁ‡[Δ,Ê^r”ËKjU—„ÓQ™ ™ çÐIðIq}ÑÚ=jÁâsâûš¼|(í?°| ̇Dö¾%lE /„¥ì%¼€#ç5ÓRN>–àÃÞ&+|¿@—&¡üT#¥(ú’ø#’˜„ÙÓÎrDIŸìÌ$÷î/Yéò¥Ú»Tu³Û6[-Gs-ß×,{ kÒv5Ïé¶;Ž£{¶m_ª 9w䕇ˆ<✲ œ"¶s´ÿ8¶¸Èî]ÅRûQHRžÚ»qO¶‘ï5oMlÅ_–¾¯ÿÉ;îT4l9¡Ϧð\÷ÎkmåC<ìzz{èÚZ×´uÍêL­Ûv[t°-shtnÇ6ÔC„¿ þæ¨$i ²¥ë]ÝзiK“ŸVx1@åÒÛ«8(]§‹8òkÙ‡‚t¶Ü·Tvlü,f;q«Mv‹—QíTBâ«Ú ³£FÛHSõ®Ñé˜f»åt:]goµÓ®/ŸKjÐK”é8°þMµ¿ÒŠW;]·ÝÛé¿£j§]Ê©v*±6…«v¶]ê@È7X8ÕNGÖ)Ñj§Ý-Xí´«üÕNO= Ž2§Ý¢ÅËœ8pùËœ8À¸ËœöcËœøeN@Pþ2'0¨Ì †ÌWæÀä/s‚€‚ÊœàÀèÊÀ_æ—9ð¹“$ù17k¨Ò%M´Õ±”ùyiy1Ñ9¯`¨¼Å•C,bŸJf·Á„-ç"™¦q4¢Gìägâúã $ŸX‚~z?kë:ò¨Ç$M`à:ômÀVÙ߃dÅwï‚Išbe`w¡¨1@Ÿˆü¦dæ]¿ã!qPì|0zѲօìŒë¢7`ЏÀV¤+ÝÉQ¤˜Ž+=ñç`ÒTiâ~ÿ4G©mëÿ!K‹¨˜ßC÷Ö Æ.=Âð¿ùjñ`Òǵ 9jÍ\Éý•aâdy°w—Äñ«¶¡Yƒ,þðF‰À•¬6ë˜YÛž|©= ÑÇ7ü²,ƒwní¼äRw.Þw•]n®å-±l™e¡/R¥¤’´ëÙlšh®ï3gø¼çèNKây;';ÛŸ7ÉÌkұ׈néÚaoxñ¬²VÜ»Í`‚*áEÑM@´„x1™iì–"kOZ$ÙÌ~œÐÿM˜£ßÏþ]Iâ) “ë`8ÓŠ£¢›ÑþŸWµ<´e>¥ž#q'çLa{Íæ2ËTÖhV<Œ˜€õ^›ÇãäüþRm^2›vùpÀfÿ¾|zľTÏè‹Í,ÿÐÊ?É>Í?ýíj¢KöÁ[²ÏÈìR],T©ãp%w˜Õ`âf^õŸsÖÛ¨¹òf£ Ô"ö숖.^z»/{),#6+’e‹L+*Ùî¥KXß–/¢4œË¶™³jDÓ—ntªt™‹“^·Ñx>!¿1ÿ¬ôoÂztáÎØmŠúx³W+ÖÄí'—Ô¸0§¤=={¾s»æ8U=»¹Ÿ–»s'op¤¡KTÀçs:ÓÜÉŸÓ>u2«ðo™,êYæ)©I¢wH¼Yã‰#ÛHn½†—=ÏÖHñ^Ûî´ŠL*¿™[7¾Œu´ñJË@«Å8úS‘_…@[¯¦[ñÀ«BÑÖõŠz´ÿ¥:$Qõgô­?ƒïU{R ]•&•è Ýr¬gv`N#Y%L2ÒqàHÅåL¨ZUÜYºþ*w`SrÍâÚy­×Rq픋î‹WPŒ4ŸVÄïlîF4¼ý ,|þ,å©Bök)†„5›ôÛõ/>üÜÿê·7êY5¢oÝñœ¼£ÉÞËJ,‘ôûîÆ• §ÿ ?íy±ñéT*kÈ¢Iò¥,NÚˆíqj™½æsiK·ä%™lq|÷>'d¿á÷>è[#ÜʲªÚ9¨ÄYäE)ÃÙç×j½Úñr'¤ûЃºæóäPTðìyN»N*Ž •ÏßZcUÒcâú´FŒÕžºñ’†.§Ý’Œm‘ˆô*Ëßz_h KXtfßåšöûjÜ@ž›{É»e~­\Åé°èîo‘Ïzo™º|”÷ï=VªDÙ6ɳNwLÉ«L¦³»Ÿƒx/{!æ¬_ætgÄ[RîVbDXñä®ÒjÃ…GVk"oÔ3Ú×7¨5Jƒä~ñÎ$dzEGÅm'’ >Îæœ€Íý8#ú8#ú8#šïqF¢Ð㌂øœ3Ð9g„ BgàÂg€ƒgà⯠ÈãŒXþǨ€Ç!¨ÐÇ!Øø8#þ8#lëâ óйY Þ .ïtQÞ!ë…8\…êà‘(Á;–ÅÏ;$-Ä;–ÃÍ;$r€ñ Ç¿83ÄÃüHGpÃV$^Þ!!E‚ñ O/Œw¨Œ(ïÂà7_„wèКÉÍ;$fâdyëçœDjð­JšY‹Šy‡Ê5ö€F_N¼P†eâÎ=Mt;é{n®â”(ºïŽÜ²¼Cx­(Á;„Ó ¼C8 ÃáÂiK9Þ!ìñ8UÞ¡“Ë8/Ï;„v•ÊÏ;TçŸaþ9ïÐAÒ(/ ëo¸Ñ5u»ý¸ùÛ³ùó²2ÆKKÛ«­(¶¤s³á5‹¥¥E‹³ç;·–"üÙdªàL¤T WÒÐ%* \Åã:Ë¡L-7KŽ´C²aK–(n–"ûXаºµ—¥GKV8XŠpDUÔ.–"a@–"<L*ÑÁ½,Eå÷O¹Ý@d)*ßKQ}TA*n•àÀò„ÔÎkí¼î‹‚ﯮEÚ¦V‹‰a¼CGoá9x‡Nɼy‡ðDsó¡‰Ì2î9y‡P¥nHÙÜ—­‰/_wµ!‹J$-N}§<¬›ÊÍ;„ç¤rñ•ÇuïsJö|uðòáI†ðÕ«½>š=‹£Ù3¾ âã’0Þ!<éÞ¡ûP/Y’±-Ï;$.š“wè˜Ã h¼Cå›ÂG±€(ñáÈÄå*ݦE|câæB<€UàËœîŒpóá¬QlÞ¡ò«´Ú0ïЪ‰"¼C§µ)"ñóÎ$dzEGÅm'’ H=÷“À'}w:s‘Tàcp–Õ`ªÓ˜$-gµ¼±µŒX‡—«…õÖ>z[ Ô>«»Jp±¿¹)ïÀ~ž($£jñ zäæö“1!Ìcjc‚΂ ¡a ¦^·œ))iBÔ€®”MuÅc,ØZÒ»öÆWS=h©·z¥<2tÃ.ŸÜÏ »¨CŒ3f:|ÀPê>TÞwþõ”N íþgº¤è®0IKÏŸ¢Ú`m93]6âèÄ ÝñßœtxA0\gWš¾=½äO>çìÙ üS’Š+ãD {躾AFô*ãG2$1 =’H=ܨ ɱõ¦w©ÎÿRí]ªºÙm›­–£¹–ïk–=еi»šçtÛÇÑ=Û¶/ÕEÚ&)MÂG•0vTx¸6aï¨)Üc¶»-³ó¸‰k­ÙN™µGu6có-ƒî,©R6ÒhI”¸J¨%Q 'Û’°Œbü ¼K¥„ñš«òB¨™ yªFÚ:ÛÓnãWJ¤ÄÞОääO’Ú/oµð2@•^÷C«øX¡JKªlYæÝʈ¨*¶…ªªg;ÀM¼Z/Ì2« F`yŽF`ÉØüœ ?ö‰êÜ`–4t9Èø¨¸ˆÈ˜s×1åV?LIøêâWí—Œ#/ŠUdYÑô{O͉„E0nέ‚h¢p¦ô §ï<\íÜ6ä&S×˹À½%å)EÞ´YÄÝ®‘+®y#}Ü®‘½nwkbë|Aöù‰‰üu–ùu [àÄžï»V×îhº>45«Ór5×p]Íi9-2l¹CÇh•b„}Kå!ÿáïþv®C[×ѦpImh Anä14°š¼ÊZˆ¶°¨›Amß;.¢2Ë;>u—ñ˜Ÿk‡%<2s´ÀR N¢I˜b€h%EÜÛ%¿£ÚÖÿCE÷ž:¦Bð¯¹jÎØ¨~\ð£Ò0^rF éá¤b„Ïþ&öò½/ÀÅŒå˜ÍªLgf<¶ò+Bž˜Òf³3kÛ³‘/¥‘°ñ¸Lƒêm=¬™UbÅ/ès('©æõ”_¤³ÔìýjÍõ}擞§Uê™Dy³q¢±WbÎW³¿7¼x&]ú ¹Û,œþBªp/Šn’ßakÃ`LΟ54ó«î>Ïûf%MI˜\ÙVRÜ,¹ê\¶a~hÃ|J<âNΙ"öšÍqä¹ãë(™1U4šõß§§Ö[m“óûKµyÉlÐåC¸‰ýûòiÀéR=£?.6—üC+1˜$ûÀ(ŽæÓ§¿]Ðd¼%ñ ûÜ(ÍsX¨Rú%gXÕ¢þPýsÎz5—CÜŒâ`„Ú '\9]‰,WŸ&ª¯ÆlJd ,<‹•.ÊUT!ÊÛä^ª©KéÒêB¹"‹éIwnU³x4÷m0~JÀÙs?øî+¤Û†eé¶iè[«æTMs'Nûôt°[w7ãðH˜ z*ŒNRCC¥x³Æ““G#¹õÞxžÐ5ÒSS¯mwZMf£èüfn³8ïÆËµ\Ó´ 7rE°÷{ûÓ´úF¦ [—ÛxàÉ`´u]r²¢/‰"$·ô­?ƒï2…° ä&þKyJ¤ê”Ýr,U þ•œf?D¤¡–,&iÙS7\ˆ’m€G ÄŽOÖ;YR‡8q!˜‡8ÔVqâr!‡¸RKû^ò†ƒµ‰­Ôºë¿ÌW¥ \È1ˆè¨–¨µárûKn•Ù¥®Ô­•c'ÿß›>LZ»hF¯z²æ§°+¬©² 7_hiQYÅÆ I§tiò÷½ãŽ'7íi±•É5t†¥7`!UÂâÔv¨Ã¸ìÌŽ>W‡=ïž,1\×…ÇlÈÁ×…x{Ç”Úrãΰ¼DÈádêãf}ܬ›RŽq™–ÕãQeó³–Ò+[ê çæ,ž“:Šˆ¼Ï¨«¬„êëìBím£{™êc>B9¾¥ú˜ƒp™Ä°üºD(¡ˆˆG„ƒÚ²âŠ”*rÍ`KGø@*øåT⃴I®[V¹â:#Že꣕kÌT¥ó„Û²ø£ÞüL/‰ŽêéŒ87¹Öé¬c²6I+¹šPYáÊÿÀZ¼%Í±Þø6¾5×N$ŽÓއ{ÌBCÃiWy„•­²Äy²—!m/Xɶ¨ßÜ8 B¤Ûò´æ¶Q”Á7Ò=Ì~1¯éÂýª§øó”ZdFö3…®Ø Ø¡EùÌ®•ÝHúR¹E%®œBÓfü¯V{ë\ ìÓŸ_}ú‡òe5ÃPé)?“é8ºSòÓk¢ÄäÏ9IfÄWwÊì:H””¬IÉÙš®”ÿ\þ}2s“eêή{J3šÎšn˜ƒ1iÆÑ˜$kéMöѤ9¡Õ¸›Œ{†n N¡5‰ u_‰7£ú¯üÄø%zÊ$b䥒· 1˜cúÆ“(³H¹™HR7(i¤éôÇ‚âmzH÷ÆsŸø¼}¸ éçSÂú® £XY–¹*çU~ ¨Fß¿`ú‹žòb«÷âLy‘*ýÔ—‡Ï?\Qx‘_‘¥¿K/•Øç>¿¾`˜¹ñˆÌú+Ÿ[\-^Ra?e£“ŽÌV }ô§…}Uæ SÞ½MÈí + Ï[àNƯ¦â& «?Í%dMþ÷Rýÿª¼ùôùÕßÞýúéïÊ»¯_½S^xÿþÍëÏ¿~x¯¼ýðQùýÓ›ŒfJ×íý/Gæ¿Þ¼Všƒ l&׊æ)/ˆw)ÿïAŠò—¿(ɘ©¢¿#ñ'e>aÊØé0Y“:Š6UÔÿVÒ¶¬©F£øËl2Uþ[]~~jôïšÑ±Û¦aÛŽÝhÙm˶mÍÔÛše·:FË0;VË´M Ïړ‚¿|.©A/Q¦ãÀ:ÿ{ºä3‰éâÝ2Jyýõ¨’â—ýÕŸ=´©™^¡MÇóQ@ž÷¥IÛØ˜Þ¡(íÅïŸ9¦2ý‚f8V¿}ÓNô³_·ÑàϾ¡|þ€© ÍWìîxüG_¬›\kÓ»¦ƒ©Ìÿçwá¶om“­·Ûæã¡ßÔS©Ãƽž66—g›ó$θ<ÒŸNïf×QØ:ñNÅE*ZŒÚ å¯Jšb¼ó¯1úÑšx×n8b>ê—e‡®˜«y/Œ˜N3Øübû¬XÞÒ¶åÄÌå–§0 ØÏ“ÈËé®}F ÄA±½$Ñ€§$ôû×nÂr-†î8!xÈiB&¦çöîØz〄3yÀèÊà-oÍ‘AƒpNúQØ'qœòªbNÝCÜm©‰d—ƒ°—„ÕÙa š—섚 HF³ÔH&èà«©íBcI×ÄÒ¯Óß„£”®­`-Ã"Æ·uq†;®|ì–bC g·œ:âȃÏEI–w1;@†8P\¼˜ð‘à« ÑNüu%––/,iÊ[j .áQ¿$1Y¼w¥r]Ìñ yµóRËׯ^îÜâ -Ð<Ø1¬¾;£»Æ)p4-œ6D·~Å馅ÙÞQöîé禱¹×ï“°Ÿ–n"‹hà"“)=yD!Š”¯5‘‡?OÒ·¯Ñóà &â|@ò$+TÜÔóîFy'„îuýüÑD\üµ±êƒbÂFýŒyuê&É·(FÖ0¶‚dÖ—¡e2Aö¯‰ëlµ-jûu\¥Ž«Ôq•:®RÇUê¸JW©ã*u\¥Ž«œV\…™}÷“À'ýâÒÑSNbp–=ýF­IÈ^^[nÜ•Wå[ËøgÑ[wøèm-PÓ»xäÓ77}~3Æ ÙQÔ/Ž_ÈÍí§I=· Ê^§aO±šziÜr¦¤¤ a§½ë({˜=qXö$ÉNÕÔã$I8@K88㘥”ý€Œµnz­SÝ€ú‡Š_M°ÃGR±Ê0¬É¸¡;"þÛâ‚︇î’ÿ{`çL>çýfãñOIR(®”ǰ'-ܸ"Cvàq)ªÁÁ'QJÀ°'dÂïRš$©§´—CšUNVfÃekÄ#C~r“} Iü‘ ILœsò„gý¦w©ÎÿRí]ªºÙm›­–£¹–ïk–=еi»šçtÛÇÑ=Û¶/Õ…ÄYÃÉØ¤„„WðÓ‡Bwü9v‡ÃÀ[ÂÝËSë"}òÊÌ:Bµ9_.#½©~~}‘«¯´6TÒÑ|Ê–Û¥tIyD¾ I‘ù ¤­è%Ù49vMî·¸—)%%Ñy5L3$Û·<ãGÂ$/ŽúŠ7;€²™T?PÿûÕůÚ/Y>_„~»Kü¸(×SŸúBº‹Ý$˜AåÙÙ%ªmš¶Óâe¨Øüi‘ën¤Ý—oÜ0eïôa‚vÜå#IyênAÞB!m¡¬u4Ò‡*ÙK·&ÒØ< 3‘¿ðßà2ªR«‡‹¬ íÏ™DE“|\K¹Hú[Ñ*«Ý2[Xk+âAÇu ËèjƒŽÞÒ,oàhŽÛîh¾o ìnÛ%naˆåÍ•ŠÃßýümÜ_/Ø zÃ4: «aXûÄǵZkM=*}Ývl-HÓ±6˜¾u'Á8@ÞÔ_/n­#Ѭ§wCù)Gcòiæî{L™_W*Ú!7Vhê\oÊ™2$Õ J1;á­7• ¶àá·–0|¤[Kn¡ ´Ñ~|2WßG!Á28ùY¼Ø~½8ïbIÝŠÓÍqäúsÇ.=·Ä{C|½,… þí…ì“õÅ»Wÿ¥||óúÕÅv>GÈ¡^.‰XoŸÚÚ^ö9sÝœ–¢¤ŸZgS:·•üçó0&.ýé`LÎuöí¡ŒéÒ¿'7ÁtZüƒ:ùÞ¼øG0 £8ÿ‡Ìñ†}Z[þÙü=³Ó±LËi?|Ä1ýÞ½:&·„n}Ô3Fê™ÊÜŒ-©-tµŒÒ8•úù€¼ºøõÿfÆï²Ÿ©“dD9oÂè[¨¤ýÊe¾fˬ¼”b7çØú˜fw]ªêbk³»ŽnuŽ¢ÙÆUc’½è¶«ÁŽiúq4ØÜß`§ÛµLÇ0Lœg{E ?×W¢ÁWê(Cê*]ot‹ZžšÕÞ½úkò1ÿ×ÇŒéXí¥¼djöؘÚt§AóÖh.cYK&Õ‡HV3ËNÈ©W·QÀŸ©ô”> £ôt`/b2 ¾ÓŸPÙO~aEÃôÙ¿–§ú4ÛF}ÿNÛMS?.»•3ÓS'Xý4!aRÄCW#ä—ôèö¦X«›ÅgÊ?‚Ð?_ëªY;3'Z-ZÇxÜXûié§¾F̵»›¦.ÛØ[ºÑ1»ú»7 ágö<öª½Ø­Ðm¼ÓÐu›nKŽc‰Î…³ÃÆ¿f&]™IB5ýÖȇãuñ8Ï™òÔX?>a‘`ûó6[zdètÈÚ¦”ý~͆Ӌ êýå7[cv‹N¡ÝåX•;>-ߢ557±ñ;'Ù{þ(kêcÚý'TЭ¿X¼Üź¿y"}Fæ<Ž¢iÿÖMïlY ¶»9qù8Y9Á¸8Yù°œ¬ü€NVP>NV007'+y?'+““ ÊÍÉ*Œ® |œ¬  NV >w V; €½'ÛŽ˜…ú2KÎγÃÔˆ§× )QþÏAÜ£ÞŒŸ<ØVñõ¥zÒ]ýóØ'˜À~–9ŒoØ|Q7å35±—á€n$7©—õ·9ýá[6‰—á,¦&>½mé]†Š¢)lcé)Ù“‹ì'Š’m-´¿/˜Ï6”5|Õ?{±È>š;ªûSà1Ÿ'Ê|æ^r2Kdv{]8»S!³k2O7€mƒ¬†ÙièôÿŒõ·c¨7ö‚½xÄÊ–>±²¥YçiëT´ÉZ jR‘%±t1Õ”£0DPU·%^ìrlÏð›gí˜m¤šµäåÿæãÿbsñÿ1!ü@hÿz?ÿÌÿWŸƒÿˆÎÁÿEäåÿâÂøÿ€àÜü@\üµÁËÿ…åãÿ¢ròÿAQ!üPl ƒàÿBÃøÿj¾öák¾öáxÊ5…ßË5äåšârrMQ9¹¦€¨œ\S‚¨\\S@l®)Hn®)ì½\S" <\Sü¸åLÉ¢ìE»yA¹â(¬ÿÖë#õ å&ÕâÄÛKª: ^)®7Z{¡Gî…"xR¼N|o“—HÌùË«à%·‚{‘Èþ·Äí¹U));É­Äóy)gNRŒ’àÃÞ&;|¿@—&¡|´b¥ˆŸ·¨œ(é“ý”·Èð†úÀ!Ž6lû¶fu†m`ø]­ÛµZAw`ÄÛÃ[T®Iô y&ÄH%eAH%El'•pþ8¶ÈÈî}ÅBR¢Ä@)üÄG枈G GÈŠ}b ÌY x6…纸7G´:Xk+b»ã›CB†šÞõZšå¶L͵[¾F|Ói{ö©S‘‡ [à›hIÙH¥N£Õ¢çk½Û5¹šºõÓUä#5uW…ÎZ6¹’³ª¿ò¼hn-ØI/]¡Ó6¶ªü¹ÄT».+ Aª¡¨+tT¸Bo©(qì®Ñ²,³-»`ÇN“‡¢¬žcB{¬ÚYé÷¶uZv×n›­c(ØévmË´º-‹k‘nýt©©¹Å½ˆƒp¦$Ñ„(>ÌG óx’ÝD w¶Ê\Vâ?©ÙÙîšïËN¶lÒwœx@R U‹ üÓFìpÀ¸‘!ÆCÞ üéa@±ÿr'Nûé­F~cŠ <ǽôo½æÒ¨¤´TôÒ…Üxb{É­×È©º©.õÚv§ÕdÜ,ô ~3·I<TP{IÈTÂϪ²°@c¶MæCj¤úé.ÐSŒ¶®ë2ð“àß„ÂÛ-Ç‚Ï.° õÀ–Y‘ô§$":ä¶Ž‡M‹‘F‡Á÷~>¹9›L±ÐoI<ˆ‚¸üÔRWÂß^À¿‰¶ £ùÊL¸­wôvµ¾rºIow“W‰=+q“ã95Ð1s’£›¥slPÛb´[-“žd:V§eXöšsÌ\ Wiéèf’nƒžBlËè¶xh v|Z¾?†ÖTôê²€ÔþòÀY9½_´Xå4'._å4'Wå4 ršP9-ÊW9 殜†#ﯜbòUNCA¹+§Å€Ñ•¯rZT9 Ä窺€an¸ùÉ#&H«cí†ç!ü‚‰Î‘·G彞\ áK‡'ËÃ…-çb7¶àHLãh“$ù™¸þ8É'Vñ—&{ÙºŽ<ê1I3 ¸}°Uö÷€ž×â»wÁ$ÍØ6°»ÀAS+†œ§]PoúÝ¾ÇæÄÀ9qKÊÚËûpIŒÞ€*â[‘f¬âzt'G‘bz¸£gÄ<›Fš*MÜïŸæñ(µ¢mý?dió{èÞºÁ˜Ô aø ƒßü%òǵ 9jÍ\)%’aâdyü¯—ïچ>!°,-­ÈüÐò»4ëfâ ­áw-µ±4úø†_–eâέ—\êÎÅ»òâÊϵ¼%–-³,ôñEª”ìÎ-}uBs}Ÿ9Ãçéë’×ȃÜÙ8IŸH>o’™×¤ÿjNãà–®ö÷†Ï*kÅ ¹ÛÜ!¨¢iñVô0“¬=é}ýIsÆ\Ü/¸Ž+hØò]k­8*ºYÆyUÀC[æSê9wrζ×l.ã±LefÅãÁ˜ŽXïµyddW^ÙçFdv©.ªÔq¸’;Ìj0q3¯úÏ9ëmÔ\y3ŠƒQj‘;Ÿ]kéÒè1o-‘½–›ɲEî{¸O©ï¥KXß–/ö¿G„,º˜¾ìy%é2'½þn£ñ|B~c6þYéß„õ({AQoöjÅš¸ýä’æ”T §gÏwn×§ªg÷Ñ §np¤¡KTÀçs:ÓŠÜ¿*ü[”ìB7¼óÜdÖ|`X(ò Óᨑ¸“é˜Tâ±kiÂ]5¢X"^ž‡W…À™~¨ÝŠ^‚v's¢ö(+"¨@TEý}ë³¼Ê*„=aZ©J“Jt0Íø}^è4be+&#-Oy¯öàœ Uë£ ‚{»+[Z–C›¦kk~×ÎlíÌ–Šs§dÇUlSì˜_6Ÿ蜴…'áís2o…•ùí¿ú~î¿õÛ›ª Ü­;ž“·q4‘zg¾™íê#V*Æ&I~Úób++"©TÖE%’uœ¹Œ›ÊìuÅNj.µ¾Ù9½›ÆÏ™å1TµsP‰³È‹RJÔϯ/~¼[¥ ømxŽÕµtãIq—ÐAn3æJA‘ rà <È¡^œ=Ïi[=XT>kyUI‰ëÓš1V+(_s¤J¸’†.§ÝWrf¹HLÊuÞÆŸÖ%.:³ïrM»\ۺɗ»LÕüš¹W9,ºû[ä³Þ[¦.ßåý{”:QºMò¬“ÄSòj “éìîç ÞIŒ¼òªðeNwF¼%Ÿ%vA„rWî*­J ×=²Zy³¢žÑ¦¸¾A­yP ì˜w&9ž-:*n;ñÐplàõg1`¾×ŸØ\¯?1!¯?¡!¯?C¡÷¿þ D¿þ\Ÿãõg :ÇëÏPDÞן¸°×Ÿàܯ?qñ×ïëÏPX¾×Ÿ¨œ¯?CQ!¯?C±% àõg 4ìõgøÖ+ÂCå!‚¡ó²€½T"1é‚'ÊCT>Kò">ÞU*7Qy‘\Ùê§”Ì ÎVÇÏâÜÏCt i”’ïÇÈñákâÁ“éž{¢8€‡HB¶x…¹''·}VŸÎözœœÊœ@ Œ‡G&.Qé6-ê àÓ7â¬_ætg„›‡gbó•_¥Õ†xxˆ~PMá!:­M‰‡è˜w&9ž-:*n;ñÐpl@êù“¸Ÿ>é»Ó阋´{È€³¬&SÆ$ÙKóÀ»Rî‰ØZF´ÃËÝÂz뎽­jŸÕa%¸ØßÜ”‡`?o ’Q·ø] rsûɘæ1µ1AgÁ„Ð0NS/[Δ”4!j@× Ê¦ºâ1ì-é]{ã«©´‡Ô[½ŽR^ºaï- ØGMuˆqÆL€R‡J¥Ã‡Ê[ðοžÒ)¡ÝÿL—Ý&i)úSTç¬-"gªËFÜ@¸¡;"þÛ‚£/†ëìJÓ·§—üÉçœ-"”J’BqeœÔa]×7Ȉ¾QeüH†$&¡G©‡µ!9–£Þôî/Õyà_ª½KÕð†úÀ!Ž6lû¶fu†m`ø]­ÛµZAw`Ä»Ti›¤4 UÂØQàáN(£cÛI±d)ºÂŽ%UÊF¢,‰W)³$JáäS*·\éøA˜•ÊGpU“©á~ rªFÚ:ŸÓýI^'ОäôN²Ì±´ÕÂËñTv|¤;+²øxŸp‚U,˼[ÕTE¶óLUÕ€GÜQ÷‹ú³jhEÎXÚXRw˜i\êg%PÌãÝ*µëØvûq÷5uã—Dr‚xÌš0j ²ª“ %ÇU‹¼˜Çu‡ïªÎiì%v¶â‰‘x„iLç7zÐaCž¥ö¶ ˜òGi¶ÕXŽÊ­Çƒu]æq¦&–åßæ!…ÊÚqÎéJëÄ/íKe‚«‘$_JsõÈûXá9«r…ÔNEíT”•XÔK=ûÍl­ŽŠ®Ú´ŽêrYi”®ãϯ/~¤îqiÙ!w¹Ê%¯”˜=¿=¶>“Ôg’âLò„»°>¡<çÊhÛ j­kõi¸> ?9 ç%õISbÆJôì·÷¥­ÐýHföpç!~²&láï¼ç«|Q‹JPŸ;ëÀ`¬ƒu`° ÖÁ:0XQC5ëT§ùÌö éÖ‘¬srê(äID!¥J¬ tæý0©lϼ§™Ϫ™x×ÄŸIü¾ž–{yó8˜Ý½¦Ç ò}V•ÐU¿CȬn€W<Ñ_b×#éSKü%ŠMx .”¿CU¶+=ŠUsPì=çËêcSœdU§}ª¡›Îy¤€ð½ïoµô¶Þu:‚ïo=™‘÷þÑ‘È^ Ž”#Ž#âñÝéýÙ~«7š Ž(ŒÓj Jä²x+T«.¼¶¨9y‰ís¶¼Ä«õÂ,%<¬vIV¿ÄÇXºcjÃ!QË&žÖ%-z´#f§Ûn }Û–b„}Kå  t;3¢­ëhS¸$B4Ð 7²XM^å8D[XÔÍ ¶ï­™åžºñ˜Ík‡%<2s´ÀR >ZJ b@H(…ÜÛ%¤ÚÖÿCE÷žÑ?¦Bð¯¹jÎï¨~\ð£Ò0N*G¨éá#n˜ýMìåû^03–c6«2™ñØÊÆybJ³n&ÎÐÞx×Ry!À4¨ÞÖÚY¥aü‚>‡rBj^OùEZ0KÍÞ¯Ö\ßg>éyZ塞I”7'{%æüqÕ8û{Ëgҥ߻ÍÂé/¤ ÷¢è& ù¶6 Æäü)QC3¿êîs¼oV¶AÑ”„Éu0œiE ÅÍR±Îeæ‡6̧ÔÁ#îäœ)b¯ÙGž;¾Ž’SE£YQÿ}zú`½Õæñ89¿¿T›—Ì]>„›Ø¿/Ÿœ.Õ3úãbsÉ?´ƒI²Œâh>}úÛÕMöÁ[²ÏÒ<‡…*¥ÿWr†U-êÕ?ç¬wQs9ÄÍ(FA¨­pÂõ˜Ó•ÈRqõia¢új̦D–À³Xé¢,QE¢¼Aú¸”.­.”+²˜žtÇS% [©–cU’ê‰]Gɲüåo%„l-vS›>¹m®üZG#µºV,íÐ[º÷Ë’û¨Àíd-ÄjMËŠ!µ"[Qò•ð’KûìùÌÝš[Õìñ?š{”6?%à¬>ø.Íü9íS_æƒÉ ‡šÂŒ!5Ôõ ‰7k<9K4’[¯áç ýP#=õÚv§ÕtÃ;ÏMfMf}¢Ôú4s{”þ#GÄLÇDêaIÐ&ÝÈÁ^èíOÓj™‚l]n7â'S€ÑÖuÉ=ÈŠÀ$ŠÜþÑ·þ< ¾ËÂjŒ›2\¶êT"U§ì–cx¬! ¦dÙ.ÉHËž¾y®1†¬wj}L«iÇäêS›°e9Þ¶/FÛ´‘j¼‡£À2g|§‘Û, ã Œ|…Žº{A AÞKé:×1 Ô”©ù%¯z&OÄçNþ¿ &6}¨^Ž:^ÉéšÑ­Ê›{iÈkÖ`…EUâÌgvž“?´¼¨¬‚c…´Sº´ ùÇ{ÞuG”›ö´ØxŠd›F:ÃÒ°*aqj;”$c³ÇÁgv”˽/Ù¸{/.pé®dÝ“%†ëúð˜ 9øúoï˜R[®qÜ!–—¹CüÌàp*.ópŠÚ ÀáT\.äpzÌfx8Å3•7ϞǴ¬*›Ÿµ_ÙRW88gñœÔTDä}F]å4T_g×qoƒÝËT³Êñ-ÕÇ †Ë¤†å/Ð% …D<¢+„Ô”W ¤T‘ûH [:²ÀJÂ/§„¤éHr•øØ³ÊE×r,S—­\cª*ÿ$Ü–Åõæg~A4HtTOgĹɶNg ”¹IZÉÕ„Ê WþÖâ-iõÆ'°ñ­¹v ‘ô˜v<ÜcN»Ê£ ¬l•e,Γ½Œi{ÁJ¶EýæÆa"Ý–§5¸¢,¾‘þë¡€ö‹yÕH/¾èW=ÅŸ§T#3¢°Ÿ)tÅaÀ-Ê·`v­ìFÒÊ-*qå˜6ا5Ü?›…wìŽe:¦Î×ÔmŸÞ!Vy&Á`L”Ïnr£|šùè7ØÔϯ>ýCù²š~©ô”ŸÉtÝ)ùa=Qbòçœ$3â+ƒ;ev$JÊU¥ädUWÊ.ÿÀ¤ÿ/Æÿjé“‹©;»î)Íh:kºÙ5ãhL’µÜÐ&ûhÒœP½oÜMÆ=C·¶ëôÉDÕ;Çî˜æƒpÇTø½:&·d¬öÔ Fê™Êî6°ãuÿ ¿G£” 0ý 5b}úÕpÖ¿vC? N©“dDû…Ú»AÝ(鸧±³+úÛÌíÙE¶J·‰3NÝÞ°ßlçN;Sþ„þù:Õ™šµ3c8Q‹Öõ£°ÏØOH?õ5Ð_F«Ó¶¬–ÝÒ;G7Ú]‡þî Cø™%.dއZN£ÕÅÆi2 ]oÛŽeriÔÖOWašºË<|ÊSFñv{ðèLº¨y¸ éçs™‰­FBþ”¢ &¼ÛmY­ÚHH5[õz›Uh™¶Ó± ›O…¶}º«€ÓÔVáõ5ñnÒA#߃dFUKɈ:”ì´¶ Û¤—vZúvs }.ñÌAËì˜zÛì¡9ˆÆÇeZŽe´[-Óл«Ó2,›ÃìSè-æÀ´©Sj´Úμäôù<|$®¯Dƒ¯Ä›)Ã8š(žë]úë"™GíÝ«¿&ó}ÌܵǮËÏÔ,U@mºÓ yk4—3ÃÈrMLñ“œÈ&_Pwtê?ĬvÄcê«‹_/bzbýNBÁ³Ÿü¸Zè²-s[ÓÔVõý­ø¸ìú¨%ô—Ÿæƒ‡þª9Z1+m¼pÓô¼/ Ö?zµØ<—­®cvtÇ1¸ÌÁÖOWaÚ‘šºÙ´3¢<²¾*©ÝaÓw™ö­ÒK›v{»§'.ÑL;ÞÒÛ]ム“c2íí®Õ¥î]·mšÔÙ3:f·ÃcÚ·)ô›îÐ]Ãìv­¶ ›þš™pe$ U÷[£“l<Ï”§¦y»ó½¥õN§cÚzÇÑú_C¶±Í1åz§'ëœì}Sc ½¡[ºåXŽÙlŒ¡Éö˜³Ìíºß¨v—Ì)AøvǧM]Ù, ¤ßظgÒ 6Úm‡z?GØm×§¥ï™xMݵgòvï™»¤—?9›÷ÌJægÏÌ„wM«ã˜8v¥>© ½ÅÆiÐÅè8|j³íÓ•˜œ¦ò»ÍŸÖ2 ·Y˜ôÒ& ml7ÒçÏPáÔ¬^áÔ&@‚ Ø¥Ð[,‚iwõ–Ýjwp¦&ž‡áÊ\äÆ@Ë~ÌÂ5ìf'™{ÔwK†óñønOd¸tT{k¯¶cØ|ÛçÖOo´x,]BI'¬0?Á4æLÙÚݧþîKeû²Cê%ìÓï^ý—òñÍëW|>•ϵUzžó°äXfÖói¦O/ûœ9‰nΖ¢¤ŸÊ¿êQ{2"þ¹©ä?Ÿ‡tqÑŸÒ)>×Ù·‡n0¦Hÿž~Éž$7ÁtJf_¢~¾7/>ŒÂ(Îÿ!]wªCzoñJÙüéu¥ÿ™ æ#…w¨¾RÑ1z–N¦5Éwgm¹|aé—ŠI·±Fûjë~¼VQ²N…ñ¢+çJZVxùÿ6¼á¨´€9Õ5e±Âê º±wúTä—ÍyBÇöÚ—"¬_”é[ަLïf×QXˆgë0M•¢Ýe’ÓuÙƒfö¹VÃ0› Ý4jÀoÜõ†r „¦°­E@¨øÕv¬:`â/ŸMzÆhå×%F¾o>c†dËÀ ‚°ùXûJÈËçâ6 ]PQl¸må'æ^žÑ¥ë)JK¡ØÖ™’bÐÿ©|ùåõkÅ0톞þNï]å§T§þîÎò_hæË+å§G-ϧôe‰ ¯îzƒV <ªiŒ‚†"}ÞZ–Èô{ÂÂhÛ¬â&«K[XJBfi¸n>U‚ùQ|§LÇsêëŠkÝ»Èõ³ ²åb(š>˜c*0‹Ç `»l$,3ÒcϽùÔÚÄÙ€1ÁBŸ³7J£¸™ù‚Í[Esu“h¬Íå¸5Y³63mXà+a4£íJØ¿¦`Áð®Ïfë§— ýêuä‹O›Ó¶÷î|ý¨}ÿDf…£,=N…ŽÇšû)}A’ŽçáÇñÁeë,é9ƒÜ2 ÷:)Aœ_PüÄÎÆ½ÜiH^>¶P'é³S(Û›ù€Ä!mkÒHyÆŠß•·îx< ^HÞ±åÌFC…5/ÿÞ™rkÒ1µ™@—¦™Ûòf!©˜†Æô½õî·ï}?H˜׃~®y9pÙ‘å_VÄ–éÕ'vžZëÖ‹ägly#Š;fÄwʵ{KÿQ ßi`ŠÏ§ê¢'tµMÜñ!DG!a†]žhاYláo>ü£§øŽÑÇõ[žÕ5ünËéø¶c¸ÝaÇõG)[Ømš£$-ÿpÇŠæjÙýpå–zGì:µ¹o(„õ½ÔÄúz×SÄ}𜠞ŽWOI’kaœñ¢ ég;6…šûâæŽèŒýô‚ªí‹3ñãÊrkdXÜ©Ëô|ŸÅnÿ–z¬ÿ§ú^ð¶™6‚µ¡LO†Q|C;Ñ0¶·0çY-f¸…U¾,ý×+ERô£­¯ö~¡>9sãI_qìók¢äïNlïTkÊÞîcºÎã(º™O7zÎ[œfAé,fÞ[Óó¿n+æËVsïrèz³Ïñrœ­ýôIÂõ¢¼s[9‡¶-&£ }ö›ª(›ŒFá¡oçm„b¥— vçt5†³À“Ò{ ÅÁ¿%aG U{ºì° lãÀcÎôT{ΨéøF÷Wö.ð4£CB‹é)¤!SêÐ1¶bù,Ç”¶VªY¨›EB&[;rš5|íð¢(ösŠ0lŽ|ìiôƒÄ‹è¡éw8޾1JêR4è42·’ÄvÆò&PšA–!¹:¾ä”[ rú€®äü !ûÏy4s¥ SOm&g¤ ¦F)àKÚàsºÐå¨HÕm'ÛìP±ÿ™à™nv!*gñäÍ–c¯¼ i0OJ›¸!¥wη”–O'é¦@ÆnB½k:™_)-—4ÜTz —Òä|³ôIÃNØ[šÓ˜êŒT1E(KšŸn£¹k{׺ziãc’$r…°Ññ€þÇ"ÃF·ÔGŠÑŽ—7Á€®*Iz3q½ë ”³¼rì"ŸMžó8‰Â€ê Szü=p\rÛãëh öå."™¾GöV°d+°t,Ó'Cw,gSÑ] ‚m]Ž„üÔJ}äÌNÈ4üïN?½_iŒ‚Ùõ|€)dêN–ÏW×pwÉq4Jfnr-ÍZOYÐÁHJÈŠš4ê-»ƒ`,ëüF5’:W ŸÜâÆ3åùRTE¼›l[ÌEÉ™Óo×ÔcKïΤø:éµ)uçÞlÎÞŠ”´¦Ä•e<¯Éx"É’-o”Ñ”f˵Zþª¢\ÿÔ¼TGí)Cwœa°…ì[õ7ÕÔs›)I4!ŠŸѬ\[ï¹w¯ø¦ºsDwÅi]!®ºšª¡üÓV—áC«rY yù;ðPì¿ÜÉŸÓþ€NÚM¾®Pçñ¸—þ­×\¦Ã¬:o'Y3äÖ[î©.õÚv§Eõ÷ΣV½É²h¢,‹&Ï«IÿÁöÞì €Õ~2ñ³'®°@Y;ûƒùpHâ~Z§ÛSŒ¶®ë{ð»ºiÙPü$ø7¡ðv˱°à³×5 uá~ä™;KúÓôM žbëx¸Ñ´i4Ðy|ïçc‘›ØoñAѳDL·¹PÕRWÂß^À¿ ûôÆ–Õi¬—Ƨi\Rt§ÝÒ˲¬Ù¶ðЂ·Ú}³ LËâÒ;6îý%=~?tÁð=‚\ª‹rëämžÂÈÜŽUPñ0ç½²q®‚ÜÇ«×Æ:øðj>»¦ç¯èû×&p uðÌ÷‚4¦ŸÒfK`Uû—‘•f£ ÔÒOgoòÕ« ô*@¥—º ¶Q‚ïÚŠ+Lw5ä/—â_•7Ÿ>¿úÛ»_?ý]y÷áõ«wÊëïß¿yýù×ï•·>*¿zó‘ÑèÐîÐÿÒqdþëÍk%¥hH®ÍS^ï:Rþ߃å/Q’1!SE!GâOÊ|Ât Óa²&7tfmª¨ÿ­¤mÙLÊAPÊ«ËÏïýð²d‡þ]3:vÛ4lÛi5 «c[Ž£:ýi96ýyÛ蚦ޡø¬A).üÛ粚ôeFŽfdü9'LJ³—…eYr¼ú³‡†5ÓÇÔ‹‚ê¼WÍ¢¡ûJyÕøâ÷ÏS›~A£ )öo»3b¿ì„vǾucåóTåh¦¼KîxüG»³OÖ¬wMÇU™ÿÏï¸=¨`<äÛ³ÍÔ7Ï¢kñDцŠ#Ïú_•¦On›á|ê&–D ÂÛœ7 ÇÏìfßG8>ýroÈds}†z[Pp+)7°çöYú)nk½qÀ?¥£/ FïÈdì÷¶ï(N 1¯ ¯ß'a?•à¶š\Ä`ÂòL¢pjR%á³¢\ô› µ…ê§o-ƒQçƒ<»·µcw@Ært.äy8QVÎi«lb"F’eáÜ)`\[ߢØGFeÆ?™õe(ŒAHÃZ×ÄõIŒ¼ÜÙÏuõÖ>³ lÓBnñ77˜áx@kLü ÷†›ÛO]SŠÛÆB?Ba S/»8h´xÉd?ÁO_ð|ÏÊðéÊpÑ<ä<}<Ý7úù¾A[ûÛÿ5¾}þ×û Þx3oòþ_Þçë2ù~ññ—è\E¼8j—€nm.þ8{ŒŸôgºô’™;akzÃCJÆHÝÈ“­Y-›ÿ¶pO¿ Aã­•*ÖÍÓã@ò9}AQ³±ù§DI»'a¬2 =tM}"£Áàgò {OŒÐýBŠ8I½ =b46ÈIŸPûX¼ –HWÉìg†¼w©ÎÿRí]ªºÙm›­–£¹–ïk–=еi»šçtÛÇÑ=Û¶/Õ…<-)ïÓT¯Ù£.2FDB›ó}‰ÔS¾ºøUû…„YY– Cœ•|å»ÊïSv¾!†¹Õ[·uTqxÓ|…ènT™¨.:‘)г{b®Ô²H+a陯=@…/§Ð¸iá ¥“ɳOý«¾8€¯h›Õ6Û:æú+†Ü°ˆÓq}Í´M[³ÃÐ\_oim·k¶=b8fÛ8®#j¾/ÓmÎýsNÔÇZN-aû‰T‰éaŽàÐæováíÙί|I˜°ç¾˜ú1ÞYú‰)ýе›(BB%»»Rü9a¹6ì,©’þÅ)/¨?ŽFÙÕó åýN2%^@O´~š®7»ö¸5ºÂëÜÆÌ`´«¢hOxÊSѶQU—Óo·x¬y‹VÃèZ­Nצêh†ÑÕ-ºóv ºå:æÞ¼Åß>—Õ¤Ry‹8j¿M¤e·½óX xÉæoVŸÛ¸'Óog3qÒÝÈ3oÿݺHkÜ©;ë¹nXýÄËhÜÝx¹CÁ»¤Ä‡'™ñØz%šÇ¸g®EówtžÇ¸ œx¸?ñp/4ñ šx$B€§$ôûôXp3I«Èã;dL@š$–& FW@š$ 4ç„=Oâ8Š‘§Î'à ÐÖ®à1 û!Ã`í {D ¼‡ ÔíÑxĦó„àÑ„-Phª<¦rŸºèk$¦ŽóÅq×÷0ŠÓiÂlï(»µ#}ÞD@~hpj5š3µšQ,µZ Ÿ7µš=·r˜ˆ ´j~ܧiÕˆàH?Ø#ââ¯U«‹ ÈÃæG…äaPÁyØl ƒÍÃæ‡.îßúµƒU;XÏÄÁÊž§è'O½¬ü‰¸d’ØCœ±”¢[ç$!h ¼|Ø·µÊ ~Ô¢rC*=?6w= VÄæ«‚r׃pá–3%%Mˆš½ÌÌÖâœ#Ño?^~Én”°“f¯š\P-˪ öæ¦q«¢±91Ðg<•±7i·_ãðg/+¿]!LŠ4·ßQÝ0Œfnp@œõUVòfñ:…V<À£M©ÎæI¡ô‡Ãªô+ZŠSÏ8Bjy@…ìK<.×ïˆJÙ_»FÎö7˜íûÅ™ IÒ@+2dÙM0P«r¿Ò|T9ÓŸ<è—œÙO‹wØŸVïì4á,Ÿ,·W5$íérË&iC<«,b,i£Ljkà/ Ì|…{3'%‚_ª:‘_ °2ñx¶AáŠD¸`x5¢øe`%"\ Vâ /ßÍ•‡v kmåC쵆5è´5‡ M:ÄÝŽ6Ú-­mX]Ëuâ™å‡a{R“:Œ.wƒ!ümoÂ>Í_Âö1sr¾Ý ïªãv)Cé⸠¹ÅqOÚ¾­mí–A½¸ÇS°ÚS©ÃÆ»ž67³ºBº#ë”hÝÎn—Ñí輌n'¸Œn?Ý^,h ´Œ (£ƒÃÊè@Èœetü˜€2:(¬Œ Œ® €2:(¼ŒŽŸ?Ë›sCt.¸^ƒ=uÅÁ¿SKÁÜ/vi‰¶dÖBs¯³wµÙ©ž÷N „º#tŒ&c81óL÷Å„À#€;ž… þRO}A‡ÏDP•¦¶$ðVuZE—uu†?:ÜDçâ£3‹nH“Û€|KNcPØ“ä$Íò#†qqV¯luÓfU/ìâb`>øJ¼™ëQ)I½ÀOn£¡á´kæË‚êèÁÀ€:z~lþ:z~Lp=?4¸ŽÍYGÏ(VG/†Ï[GÏÎ[G@ÕÑóã ÔÑóƒÃêèùqñ×¨Ž ¨£çG…ÔÑPÁuôl ƒ­£ç‡¨£¯#,u„¥Ž°Ô–:ÂRGXêKa©#,u„å¸",BD:x>" ˆH‡B¤Ã !ÒáG…éÀQù‰tø±¹‰t`0" 6‘”›H‡ ·œQ*iB€D:ûñøˆtøŽ*=JK:Bã!T.°nVÅZâß4§q@äwZáUm¬ok B!¶K+ßZ­ûjl«;Sjßæ¤—1ÿÀÚ Aô2 IB $Š&\f/*e™½˜³Ï_f/îñƒè`„Åð–Ø—òÍì4}‡¯q=û+Ü|T cEǨ¿"rcKÀe à"„Y]`az¤+Ü+Haüj½œ¹cbáçåÌ®ÞmÛ–¥kŽoÍr‰¥umË×ÜŽÞñ ÃÓm£U~þòµ îMž¤`«ìë“+Ì[/iQUI×$˜—5E–U4òtV ¼kŽY€r¯3ê…(ºK£”k‡¸|oÂ> £¶ø[úA8ÚÁpíeYj [¸jV~i=Ñ.„x¡°(Ä&‰yÉú¶¯Ø†eYÆãF/ù„ÝÐNÛ±mÍpX=t˶mÓhµ-úCk/ûÄÎoŸËj7ÿĿɍl‘ì<ü;›‰Ã?a„óÁ(âÁ{ °Jñß {Öc›‡ŒÆb¹;A‚ ©Ï&&©2ÛV¢P,¿C»Aš9A\|ÜÀ|ìØì|ì˜Ü|ìÐÜ|ÐŒ|ìˆb|bø¬|ìè¬|ˆ\|ì¸|ìà||ì¸ðsƒ‹€–ƒ€•‡€•›€[B'ðò°C ðœ·æç­ùyk~ÞšŸ·æç­ùyk~Þš?£­¹P";<["; W";;.O";;*O";;*O";?*{";;6s";$_";'6[";'(s";n9£TÒ„p&²ÆcKdg»âPéLîÞ ~ÏÅ“ÏÎçU—ÏØ.À|öÊ—oÆ|vëw2¢˜@L޲=?¶üÈÛq(­ã"5àê ø=rõ¡8aRQ)‡I ÄöWì¤Bøü¤ÂbXI Ê (Mj U<©p$½~+‰‘á*ucÿ*W¹ð|©«\îb½“¥ˆà¨ºˆ°k,Ãr,E'»:œ/ÎàÁ/‹›Áƒ_ÄnëwÀ@6Ò-졽ÏyƒGËÂÏ<ÚÈtz®ÝÖ,Ãik¶>hk-»=Ьžç¹$Eo5¹Çïz#h³óÛ­¦®[ëíÝÿŠ[¿²Á÷ÁxÅvôÄ,ŒÿȉZਉ{0fÌGK¬/ 3ôâGI\D¬ ·Ñ£#VA³Ò(·¥Äå |“ïé­¤?£tQཬô‹;Q^‡÷~…cì,oãà}Ë’¤­¦`×Úã7$G9ˆ¢»éD™Òä僲ó“)üñP#ÉÉZv‡kìNjî8¨ýa*nB“Ÿ…|Kä>|õxAfÊEG¹Ø)üböâpÆä!]U‹¤ì›{¬&t©»ó[_óöÃ-‡(¼Û¶:œâÂf çð f}iGô.U zDK“èòãŽr£fiÎ~¯az|£Þ„A4|‡îQ@žðÃAD>¹a?@¯I2^ÒQ>ÇStö°rÞ½¾ÇÓÿÇ)þð 9H¼ Ó«Í$ŠÓ¤s*Цeí( ~¥äEɆ¿ï™éD‘iîñ²n\̲G3¹q†E~ <ùADÏ$ÈÉDì{ÉMHÎÊs2±Uᓘ<:BÓd½dj‘f³¦ãÿ—Êi=þ°¥· üÛW?‘Pþ'Ê¿È^·N¬Ù%¼0·ÒjÊyäVÕ·X?xƒœ`<”r%uv¾´àUwÒ½wi˜,n%G•/Éÿ mȺ7wžw­ØêwåVþ„&Aô°êYîá§:¢[Ùvž•[Y 9Ð>ñ’´ØeB1h‰K<"'V³f˜-³Ý65Ó"l.z³Ùtºm–cäÄÚûí+YMz2"GVû'ĉŪ·âœXM3ý³ýõß¡'Ö^màãÄžŸÂœXû/·+ä›-N¬§öV¢œXÆZ˜k÷‹Xãܹ=ZXÈ’ä»[ë¾{Š·Ø%Á8ø¹aqós±rósqÞƒDª¬’ü\<Ȭü\̘<ü\ì œü\¼ÀàÊÀÃÏÅ*ÀÏŌϑÌŠ µÀC¸Ö9„{:!\ .„kÉ áZRB¸ÖñC¸V•!\ ,„k%è 7‘/0‘36‘3&?‘34?‘;4+‘3¢ ‘>3‘3:3‘;"‘3®‘38'‘3.üÜà#òa‡å!òaFå"òaGå'òaÇ–Ð ÜD>ÌÐ"D>gþìß}ø³ÿûðbŒìðŒŒì€|Œ̸\Œ̨\Œ̨\ŒܨŒÌØìŒ\œŒ|ØŒŒ| ìŒ,¸åLɬìAG¹;JVw”`^tAm’×+,‰ÇHmÂÔu¼RXoôì…>q/À“€ð:¯7@w}»é4ÓYo«oºýÛ[›ØØTZØTØri0ÃL÷âB0:üø"t¢RÓep#ç× Ú(ÂQ >èl³ÚûóÆ…¤Ix¢’2} Qü PŒÂ¬8´QÒ;3àÇ’ø|£vnTÃè½ji§ßÐìæ ©õŒ~[k·m«Ùk÷Ly7êLŽFôkÅô x$J0ðËâf<à±›ñ µŸñ€KÌÞ­Ýà,>Ñ{W@R6í݉¸'»¨ûê÷&´âÏSá—¨¡eT4h9Ë¢ð\'÷VÆËjBÍ­¼‹;9@h émÏÒl×25·aõ5Ô7[Ž7pLÝj–ïâr]+þmÑo–KgêÓ|/ÂÀ“Ç´·9[:0žy[ó9ò£ÙEÊ9™À;’<¶> ÙNàÄ“½â¥ä>‰K1ë­xîj£ä[0šä>íÕ®Ü'ñù)œû´¿ñr»B¾Ù‚É}zjo%šût`¬EsŸöú-¬¹O›žKÒÓA‡K0éé0.GÒÓa0ö¤'&•'é‰ÝÍæHzâ½— çNzâBfLzâ8,aOzâåKzâWޤ'>Pþ¤'v|ö “\[øµ0 Vº¤zT4·ü4·¼èÒXb…K›Y$Á{n|!À±H&ȃ‡I ñV;ù ¹ýÀÑ'rYŸž¼6t¸×cD/púÞ'³ìï~’FñÃ;L/[ЯPäÈ9/“˜ì;&rwñc;l„8‰DÅe­ Ù±ýÎŽ»’”$K ä(RŒ7îxÛŸmIS¥±ûíÓ4R+êè‘¥EX̯¡{ïú‹7…°“8‡/ )}\'­™K·€e˜8é×¹ö³jº¸?HÂâ‹Ú%‡­š}7n ìÁ7’ÚØ#}I7'$X)îÜÊ~ÉÅî\ {H-÷Ðzsše¡/R¥PIÚ(M'‰æöûľê´ô–%qŽ,ä¦A¢‘½ýU¥^ÿFªÝã¹Cþ]óâ´²VÜ¡‡í ‚*áEÑ´y1J5rT‘µ‡&LÖ³ü¿ qô»Ùï•4,š 0ùƒT+¶ŠnFðUÕ°hËt‚=G䎯ˆÂvêõy<–¨¬Q¯¸?Iy{mÉÕãZ¿!6íf±Á&¿ßln±oÔKüq±˜å-mø“ìš@¿ù×å+,Ùƒ÷(îeÏ Qz£ÎfªÔ~¸•Ûͪ?v3¯úÏ)yÛ¨>ïòzûC?Ô"R`D£S£COùeO…yÄfI²l‘4w¡’àQº„Õeù:¢á\²Ì\V#º>ºÐ©ÒeÎNzþÝGÁtŒ~!6þYéߘ¼Ñµ›’Óu}±W+ÖÄÝ;j\ˆSRž^>ß±]qœªÝÜOËݹ'hp®¹ Îì‚nYF[o®7~O[¶±-ûÕµô£4dæ½\i)™ª¹ã?']ì’ª—ò¥YØ/Œ™b†—ùyimÃí­%÷^ÍËʶըëÞqM«î†ž›¤õE*e=·Šô?Öw<9Tç ªÿh:i5¢—Aw‚_1êW!°¡WóZqÏ«BáèzEoÄPÑFTEï3üÚ†þ·*„m¤TW¥ƒI%:ذZ¶*UέÜרÜnÓøB–!“ µ^ì÷‡ˆi›]¾-…ÿ” •-g£¢.Ò%lq†ñü«`&¬º¿”dAëû±*]ðLîäû³3{Dg6g5¬b™"{Æüа¾™û~Ò>+HùlÌ[ae~ùW÷úÃOÝ÷/y]•»wƒ)zGiÀD.ˆ*JvƒJ ú€—Oß¼XÊŠë5ª•5dV‰¤Ù©¯”ÇuS‰½®ØIÍ¥ÊÇttJö›û~é D\Ù­‡ªV,1¼ˆrŸ}~u­žgûyköì·f—ÏsØ–· •ßʽƪ¤ÇÈíiÎÉ× p,X×ù{Bœ‡ˆ_3‘´¿V"¡ ‘xü›ŸC£D˜†’â)lÇ ‰‘‡HL‘¸xˆÄ‡—‹‡¨”N"1…o¾ÑÑ5“•‡HÐÄ•ð(®ùyˆvÁ· «ÕjoÔqã{\kQ9y³•l,Ì!܈8ê ¾·˜œ?á.à`-*5ÖÒ™“JKÉ’¸Y‹€ä–d-l…8kP#àY‹€ÂZÔ–R¬Eàýq¢¬E¥ûáVn7K`-*ß&nÖ¢ò"™n«ƒ(õ£t b·ÕáD³³Èœôüã¹1~JúW‚µ^Ë^½ƒÑÓËç;¶¬Eð£ËqSå Ž4t‰ ø|vgŒx[ Ô.ÉÃJ`±¿º”µà0Ë$!zéä&ÀÍí&BÄ¿r ASŒð#„ÁÁÔKã–3%%Mˆêã9²¨.ù—× =™¯ýaªG}CìÛŽ"ÊBƒìéñ°ÉÂî3LŸ ð°óï°¡²¦Ç³Ï':$øõ?ã)…W…1MEß@Õ[¿C-9¯]ÖãèØ Ý!ê¿)í¾€ù°Î®4}Û¼|ι%²NùM’Œ+cG :຾EFô+ãG4@1 =”HÝܨ5†ÈO)wÇuê÷oÔÎjx½×B-màôšÝ4µžÑokí¶m5{íži ïFÑ6Ii<ª„¾Ã:ÀÄPBÇvShÉR,t‰KKª”­´Z%.lI”ÂȾTnºâþãáa*Ìn€ ý }ªFÚ*ûӣĠͥLmÈÉ d™ci³…•ªlÿHw–dL,{0ÁÎ*¦eþZÕTEÂvóLUÕ€5î(‰–Ar°XªÝ‘ê-ÉY¡‘ª`^Qw˜hõ‡³„©Üã•.¼Ò‰]Lî,ë¤B‰ÅãªE^Oƒà:ÂîCµÂ+4_s;³œlŠâ±Ò(Í/xëBº<»ÚwÜ6aÈ×®ÙVc9*· {9¿ÇI&¹[¥É<ÚË.æÒ-Ö#ˆŸÛ—ÊW#I¾”*ÊUb‰¹ììTœŠSr*Š|©g¿˜­äQáYKó¨næ™Ft~uý=­pë©eÇ\å*—¼”böüÖØóžä¼'ù.ö$l„çÊsÞ¡<m[â5=ëÚy7|Þ oì†óóNRbÆJôì—÷¹-Ñ}Oföxû!v²&háGëïüÍ—ù¢f•6à¼ï¼3‡ÛE4uݱLÇ\¶0â®oWF„ÎbE`%2„²Y h aDàÆ¥GDˆršP±Âó‡…W#©:óÁF„+F”Þ†#¬ìaÄçaÆ·v ŽªïÉt3Guž›Ù^¢èªÒlYÍYn𜵻˜åiºҥȕ YðÈ÷ä²E<óžR7N«™x#ÔŸ(~_ §é^Þ4öÓ‡Wx»¾¥U ]&ý;†Ìê:xÉý9v=tMK-1¦(Â4aAt(…ªlUZ‹l3òU¹HWº8%6ÅBVuò»¼è¬PMV,|…¤ëñÙø!•‡UB³³8Û³ø.íÀ Mæs¶ŒÄ«ç‰YNxµÁˆ#,#ÏÑÌ›Ÿ³`åÂ>Y½‘Ì’†.Xs6:¢Üê‡ _^¿Õ~ÎXï¢X–MlzjN$,‚°mîd3¤·0ïÎÂÕÎX°™Wj2q½œ |­:<””MÒ»/`£»\­Ñ%.wy·«eÕíîMh/è;?‘?ƒŽ2»ŽA œúôX˜Ÿë¼..,i•íX¶5·ò.îµ=[o™Ú`€GÔn Ok# oíÙl;Ö ßh Êw1Àº¥&ÿáyýÝ\‡ ]Â9µ¡¹•ÇЀjò2k!ØÄÂn¶}ïˆÊÄ,ï„«Ô•ˆŒu~®=–ð‰™£”b0Mò*­¤˜{;çwTý/*¸÷¼FèH…À)\sÕœ±Qý¸ÒáOJÃØÈ¹M£Èèoã#?P“@DL ÇlVe:3㱓_‘§Ä”fß[{pç¤4ò66Bi0@½­ÅœY&Vü>†rBjžOùEZ0KÍêWkn¿O|Ò+š·¡^J”—‰Fê¾\­g“×¼8•.ý=lŽÿ U¸Ew>Êϰµ «M¢†z~ÔÝ=\ßl{ƒ®ÙMP˜ŒüAª7»\uµa˜wµÀ0LǶ׻dw‹·?ÏÜâ黃È_µíÔëAä¹Á(JR¢¸F}o•”½è­>Þ«¾Ñ¦q\=Þ¨õb±nÁ)òûÍfxêF½ÄKQþÐRÄ&ÉÆÑt²ù×åxNöà=Š{ÙsCz+b¦Jyÿ[9ݪù‡êŸSòvQ}ÞÅõ(ö‡~¨-1Èuˆ‹–¤ªÌ¶¬$&ª/2$²~ÈÒ+ÊUd!~‘6;¥!¯®Þy*Í.”+²º>ª’…-eË‘,IUš´ÙIÙŽô7q!;“ÝÔzÝ×—þ¬ÑP­®s;ô{ ²ä®%¸¬…XÎiÙ $R+²åjŠ—Ú—ÏgìV<âªF¹hîÓ´Áà¨JŽ"1m“K:ÿš;þsÒÅþ¾Ì-‘75…ñCl:°ë"/­mì%jɽWó‚i‚ªÑ}PÇi4­º>xn’Ö‰õ‰¨õ©çöˆþâ‡ÃZâŽ'’ºYÒz¸IwrE ½Ý ÍÆ‘)¨¡Ë}¸çÉ`8º.ù ²$0‰"$·øµ; ýo2…ã:û}˜ò:•HÕ©†Õ²O<Ö@ƒ)Ùݘd¨e¥ožkŒ!{;õ¼M;oÓž’«mbeÎýüÎøÙ±?;ö+‡2äWêÒ¿Ì¢“ÿoˆ¥¥çOkÍèVOÖüÖ`‰EU¶bä•ep,‘vJ—¶åþñþJíré›KIqÙ¦FGXzfR%ÌÎá—ØÑçê°ç¯w>|z‚l˵ó©àùTð¼ÝNÝäN™¸é¨£Ô£IZwÃÄï¨GJV.KÖÉ£I}Œ;ªö00¸-8„ö8FØÙE^Šg‹òa§è(㈄A’JÞ‚ZoêøÚ]+QÒH¹›öPb§)©ÑËøcAñ ¼¥÷‚iõYß¹ç&¨› yweÅÊ<íU¹ú›òƒ5êêñ‚,ÿåb§Çwq©\PÃO}Y<¿8Ð*¸ÈÔèßèyîó«kò@êÆC”v—ž›ÝÎ^Sa?e½C{f§†®ýT£°ëƒ×1„Õ–÷àqî¦ešå=Ø„ÜΔò¼î8¨ýa*n¢ìóÓœBöøÿÌÕÿoÊëOŸ_þøîí§¿+ï>¼zùNyõáýûׯ>¿ýð^yóá£òë§× I•®7šø¿t™ÿ|ýJ©÷ü°žŒÍS.7Š”ÿ]HQþúW% š(ú…‰?(Ó1QÆf“ÈßáñP´‰¢þBÛ²¢µâéx¢ü:þàÃŇþ·f4Ži4­vÍplݱ4³ßÉ2e;í¶­[ž´‡ÂrùJRƒ^€ Ç‘uþW:å3‰tòîè%l« ö¨’âÝåÏmªÓ·I0úøóü]긵ɈÒ^ÿú™a(é4($lÔ͸eAQ'n’|b` #óÁOÒ® -“Ñ ²;BnA«m‘ãØ=ÇUÎq•s\åW9ÇUÎq•s\åW9ÇUÎq•ÓŠ«³ânâ÷Q·84ô”“ØL³RpØj äµeÆ]ª2ØZ8€ß‹Þ»ßokJÏâ7L_]ZŽ2F ÉV´_l¿€›Û¥—z0® J*‘ãGHiVS/[Δ”4!d·7ŠèV’–<,»“$»jlÈa.IØ@KØ8Ão˜¥m”Ÿúj=ÜV½SÝ@ÕÛ¿«ñÕ:|$Õ«Ü ƒܱºCÔSpÀm÷ Ã]òâ vÎäs^ü7ëß$IÁ¸RvƒNë¹”U¹ëk¦c4š­õfíã<ípXßíè¬éеÃìå :Bž­p“$½)~K6óûä+³ø²5bÍìŸÜ`G_CD£0g¨<áQ¿ë<Þ¨S¿£vnTÃè½ji§ßÐìæ ©õŒ~[k·m«Ùk÷Ly7êL⨣Jè;¢Ä ä2:FËB…nð9vß›ÓÇ=ÊSë"z}òÊL^k35ßÌãÂT¿?¿ºÎÕWZ*yÑ|ÈæË¥tIyü¾ I‹8~ÒVÂù’lš»&w[óË”B)w^èýÉö-¿$aa{ ß®’‘T?`ÿûåõ[íçìö_tà,˜_vðã"¹OýuÒ?ÏãAbŸ;£)`¢`†ôæÝËžéó Ús–$eÓþ6p° ûZe™z£F UÔ²J÷&´šÏÃÄDäÏ,Á7~U©Õâ ˧ïIJËx®S¹¸ô·¤U¶c9m¨¹Ut±Õ4ÓÄëY¶ftKsõ¾©µ=4´í62ìFù.XªTДš×Ê}{M;ÙhÖìšaAÍÏ…X³µÚÖ'¥±»¶¢m:TÏú“7îØ|àA}{}o?±ÍÞôaÑ•Ÿüp O©{¨œ2»¦ËhÇ\Zy/ŸñK`½t&€ÌsÙLÀÇå¹dv‹!ëe2¾ ÏnɇtnÉ,tÖÛë»mõ}BmÐÔ|], o¯Ÿˆ1'o…yÍ rû?º‹w.1L8¡‚ø·¾É÷ôõ»—ÿR>¾~õòz7G#ÏÐ[ΩÈÛnÚÚNöœ9Žî® KQèS«|JW¦’> cäâO{ºÒÉ·®àè¿sîÏäΟLðgFñ%ìñ{Óâ!Fqþ‹Ì®ç{Z›ÿlýžm7ÚNÓj:LRv>­íþQ^fZÊgBxø)íÀßÐöÿÈlêV&ÒWô~Ú*û¨ò1 Ðx¯Ž8^&ÒÒKSç6;§¶ü±ùÙ!ë«n·—î\ 8ÆÂÕÝ#ì|á½É R/Uâèª[ ~Ç ¢!~ÒàQê⯆iOí>­Š¤Ž“!þë—Ià>ô¢èN¡ÝN‹6Ýâ¿f^êúŸ 0)B$ËA³m13uxGþ²;vt©ükÐÕj¨GÍÚ™­ªjÑ:BíDØ¥ ÄOýõ🼥7œvÛ1͆¥M³ÝÄ{M~"s3Æ{•S¡ÕÙ–q1jz³mvÛj‰Ž‹±2.YÒf1‘ÛW¢ÞØ/Txg xØàbe8Q;êÛäcþÛÇŒ¾ZíPj:5«7§Ö݉_¿7êó‘™Óã.¦øÄÍÊ=åjá/ª¿¡˜tíTWP_^¿½ŽÑÀÿ†?ÁàÙ'?“„qüAöÛ<¢CoZ©ïhÅÇùû¨k-Áü4í-ÞWÍÑŠ\jãµK7y_¶@¬>z;Û>–¦Þ0MÛÐY=OË7í`Må3í·Š€S²GzyÓ¾‹]ºŠ±2í™pÇnØmiÚ ‘îS2í†Õ² DzLCo7í¦E·%Lû›n9v GgÒOWa€šš‚ŸQºÖK¯Š:QÊëðÞ£pŒ»ú°à?ßul°µ®Sýo”p*(Þ/f/ö±»ž_õϤ®·÷úÜ"@’ëbwÎr°(A!$+¢ÞƒñݘÀ>pǘ1ëëL;H‰‘U R¥„ß?ùqG¹Q3Vf¿×°‹ ¼7êMˆMæ;biÉÄÔ’Ï2Ãùš0‡&å3ö¨nÂVÎ;jm~œâßÜ«›0±ÚÐsç&TM!ÊÚQ²BsäEɆ¿ïYçˆ"S®äeݸ˜eæ;Ã"?s<ìfO{~à§Ä$Ǿ—Ü„$¿ÐÏ(„W¥“@2JGh𬷀Ì-ÒÃlÖtücµ†#¥[H¶Æ'’­q‘½=nž 6Z3@U*‡ç¦ ROîà êêŽóæÅfjK¹ºKø&ð“Œ6Ry”r%uv¾¶Q4éÞ»ôYÝJŽê#ùÞ\PF4Ú"…o{¹Â¥%»œ‚‰¦À7Á?°MƒÕpF«á°íov>]‰ ÓÔ½‘‹ Ü«î”^z¯Úvv«ô±U»FËi=Á½êóCnSèÛÔ†®;f³e°Y«OWa€ššÛ€ëØSì_‘ÒG½éP!*©,º7Ý)s÷Þt÷y,s-‰Žï¼‘ÂB–uTùÍ}r$—ìq+˜‘yÌ„¼^ì÷‡¨EþËÿ9éÒ=[îOc¼_$ÿêÔë…)¢×ð£"/­mØZrïÕò;s5ªK§Ñ´ð"òà¹IZ'w%"jÁê¹M£¿`YKðF,k? ‰Šô³6P ¤ÝÞt0@q—Þ; öt]~âÿïq†Õ²¡à³íz¡.C.]$Ý ÈwyC‡Ãʼn¼§Á@§¡ÿ­›÷Eîç¤ã ú=Š{Q‚§£Z âVøÛ3þo‚-Ë`ÞÞ°°sbVëÍÑE{Ï¡ÃÒ•ûJ¹xŠ vLܸènî¾™†Õ²Z¦ÕÔí†Þ"ÜŠûF\¢—´Ð–ºÃ?kZ [·,Û`R„OWáŸ5uûqâyw´T-úæ'´œ¤‡µÀGJÆsÀwSdôÒ[4Kß9©å%ؤ&Â[ÍfÛ~‚ljUÏì[´Cs|Çqâ…ÞjÌšŽ·íX%tGt\Ì=7E^‘‹!ÊØO¬õ÷FÞ5óø¥²yåƒ'ð¼ã^¹¬¶!ãÞ†äÅgi†œ)r»Ó6Í–e5X–Ý=OË·¶`M­à^ÞéòîåU1–@Ö– Ç’[¦ 3Η7@îå™5ƒÞÚØ<1©ÐΧ«0@MÍíXDœSÖ»P·6·@ìd—­Ö/#S­_6,ŽZ¿ì€µ~@Ùjýr3×úåG>\ë—“­Ö//(s­_1`pe`«õ+ÊUë—Ÿ©& &ß +ì·VøÏ7¬ŽwÊ´f€ªÄ~ÃJœá†• ®BÞ°*ÑþV£ ‚43—¬…%Å€Ù Krb3–äÄä),É ÍSX’úpaINDîÂ’%ð Kr¢3–äEd-,ɉËWX’œ¹°$'.üÜ`-,É ËVX’•±°$/*OaI^l ÀQX’š¯°äÙ‰?;ñg'þìÄŸxÞ*f¼ð«˜ñ²V1ãÄe¬bƉÊXÅŒ•±Š™ *S3Nl–*fÌUÌD°V1e©bÆŽ[ΔÌÊuìÏ!â8äÎ!ªæE™Ëµ1â,ׯÑuÜRXwôì†>u7À•âv»›Œ5êèe  h cÝ4~7Ø—¸½©›VJÊÞºibÈy=)›Æ *%Ám†øq.M °U¬+ÝAìE®Ê‰’>Ø›E®lÝÖõ†1ÐlÃík¶îYZÛ4lÍn ¬žáš=Ûè(rU®Iô x$Ä*Êâ©@"(bg²V>±ÐÈ-ì‰ÅbR¢5I8¥°×$ybî‰HMAÁ\5Id?¡æ3X…ç:¹·×$iÛPs+ïb¯Õk7<ËÐz[³M×Ò\«ÕÒ,»×n7m½åè õØq áo |ì^6ØÕþ¦®ëFÛl5d¤·TIíJ’’Ö(]µœÿ>¯5\1Ã+m+³ëfS—ˆ^ ¤½ßØÎ÷jÖZþ±›m“Iw>]EŽPS÷¥ ±&eLÚ)½|‚fk§]‘?–`v wŒ¶ã4Ÿ`ÊÐóHÐÜ¢Ð;Ò„ZN «ÑdT›]OWb`šÊž5ø)ó—ö.‹À'½´ pŒÝ&@úX™§åØVCÿÓz’*ÍXÿ‰ÕóÕ PØîÀŽíuW!ç9Gt¾DøoŒ Œ6Ry”r%u–?ýŸ—‘X›íÀ¥8ŽUOøRãp[à›`^ÐÖ ·l§i96cSw=]‰ƒ ÓÔ ˆµ÷H—G¬]ÅXBª]C×uSx“fy„`ˆµñX¢6ü¹Í¦6»ž®ÄÀ4œ;h,Nî þ<0¾= ZØk`Áe# bc" bÃâ bä ½gÝ!ó3“ñ#& âÄd# âe& W6Ò P.Ò N|¦|c>Ì-¡£<Š 4;Vöý‹4$:ÛÆŸ•õZÞr°ü^ºÐžŸSØ p,’ òàÇaGÃ%ÉOÈí~ˆ>® šä€=\à^ ÓpúÞ'³ìï~’FñÃ;LS èW(è:À"¿nŒÝîw„$‘" Xùæ·/59ók5æ?²÷r$xf ˆ3hEJ ×ÐðAŽ"á8žÌÃü¹4U»ß>Mã!µ¢ŽþYZ„Åüº÷®J(…0x…o¾š3ð¨WäIkæR½ 'Ë£XåU’g?«¶¡‹ó<8ÖŠƒSM ¯Azwƒ¡7h7R©=¢Ñ‡7ü²,ƒwne¿äbw.Þ—R~¬åM±lše¡/R¥dÅÓFi:I4·ß'ÎðU§¥·,‰sd!7 ìí¯ê(õêø·ú$öïñÜ!ÿ®yqZY+îÐÃöFAÈ®€æ÷Àµ ¬=ôÓâf|v]¼[\0¯ aÑ[¬‘?Hµb«èf·Ò®ªZm™N°çˆÜñQØN½>Ç•5ê÷áø$o¯Mã ¹z¼Që7ĦÝ,6Øä÷›Í-öz‰?.³ü¡¥ ’=@ù'7ÿº|J‘=Hj•eÏ Qz£ÎfªÔ~¸•Ûͪ?v3¯úÏ)yÛ¨>ïòzûC?Ô"wšŽ4:5:Ä[KdO…yÄfI²l‘ô.Q% À£t «ËòuDùd™¹¬Ft1|t¡S¥Ëœôü»‚éýBlü³Ò¿1y£,MQ×{µbMܽs¡Æ…8%èéåóÛÇ©êÑ]Kë;uƒ# ]¢>ŸÝ™Vq®Â¿)½D)VÔ‡>ÌüÚeôŽv5¢g^3¹ {ª2ƒ¾VÜóª´¿ð6èe¹Tˆªè}†_»¤vÂ6òªÒÁ¤’)å8–óÌöÌ4HÝdL†Z/öûCTñ^9ªžw+í¾Êö²|Xš¹¤õýøì¿žý×R¡mZÙ£ŠeŠlüò“¿úfàI[ø,±óÙ˜·ÂÊüò¯îõ‡Ÿºï_þòº*wïSô&ŽÆRÉ·»~DƒJ¥r§ðÊOß¼XÊŠ;5ª•5dV‰¤Ù9´\ÆM%öºb'5—z>Ì9½Ã’ßž]]¨jåÀÓÈ‹(¡ÀçW×ßßAÒ5ïÖl¸ÝtZFk½í̹í_‡œ) D6rp‚97r ‚g—ÏsØ–7•ßÊUƪ¤ÇÈíišI”¯9R%ÜJC—Óî[9£\ÜEʉÅÞÆ~“K\t¨“kÚåÚ®Ã{¹Ó4'”=´—™™ÅëþõÉÛÛ¦.ßåï÷è¶Dé6ɳNWLɳ'éÃO~¼· ð̫—9ÝñæüW•Ø‘êrgiUZ¸ê‘5‘õ"Ô3ZW¨½¬î§º2ÉñlÁQaÛ ‡ƒdT’­¢®LìÜÀÑ„2P²0_°c"Zp´½Ã¬†ê2T5æ„ùý> »ƒ¢T"$t”¤°ˆþx‚â$ IWÐT“D>)" ‹žS@"N{(_ÐAqiJn·àNîä1Ї(g#ƒÄ…Ÿˬ#°Q7ËÆE¸Iò5Š5ŒÌ?I»2´LF'Èî¹}­¶slvê!®¥W„zˆO/õ:+Q·wJ=$&]zˆÏõ –\uþž§â—ÅL=$-B=Ä/‡•zHhÁC=$ÿæ§Í(æ‡a¡x Û1hEb¤S$.ê!ñáå¢*%†“zHLaà›/@=ttÍd¥4q²<ŠÕ}N"5øV¥ ͬEµÔC%{D£/'^(Ã2Hqç6¯Åô97KrLŸzHnIê!ÀVˆS5žz¨a ÔC@m)E=Þ'J=Tþ—äDxê!¸£Tfê¡ò"™n«ŸÒeNîÛêð·8SÂ5JÉŠÇ“Ì{Ú—‰™©‡à5ñè—éžûEqê! ·Å+¼{rÒ÷¶/Ï»³ƒ'#õŒ´#SAuõ”(fê!‡¨‡ ^ë õŒ ê!¨7b Ú&Š%Ýn^Ó²› g£|ÚJüývIDE0Â8‰Š Þð QŒ ÃDE¥åÜÊ} @¢¢òmá!**/go¢.Ò%ˆä·Â æÌ <“;ù¤¡Ïä‰\5còvÌ8K‚-Ð2%NTôä-<QÑ)™7n¢"8ÑÌDE`"³K÷ŒDE R·ÜÚê!™°ÔC¥Û4;7cLÌÔC€° |™Ófê!˜9 M=T~–V`¡úN5Q„zè´E ê¡§¼2ÉñlÁQaÛ ‡ƒd¨çânâ÷Q×L&ž øØL³4Lu£ä ³;îR†'`k ·+] ;ê½ø}ð¶¨]’z•Àbu)õÀaª(>HÂÖÒ/J€›ÛM„ˆÇä@‚¦þáG ƒ©—Æ-gJJšÕÇsdQ]ò ÂzÖ^ûÃTú†Ø[E”J/سãa“…b˜>`ÑaæeÏaCeÍqgŸOtHðëÆS ¯ cš}¾‰jþµDäätY@ c7t‡¨ÿ¦ ¥ƒ ‚Á:»Òômó?ùœDdò›$)WÆŽ@tÀu}‹Œè+VÆh€bz(‘º¹Qk’c9ê]çñFúýµs£Úº­ë c Ù†Û×lݳ´¶iØšÝX=Ã5{¶œÑ6Ii<ª„¾Ã:ÀB—PFÇvó`ÉR,t‰Kª”­ÜX%.³dI”ÂH¡Tnºâþã!S*Á©ULƆ{ÁÝT´U §Ç“W¢%º¢ïÉÌo?ÄLÖ.ühý¿ù2_Ô¬Òœ÷çÀà90x žƒçÀà90x žƒ ¡š‚uê§ùÌöéÖw‘<ßÉ9G!O" )U‚dMÀ#ß“ÊñÌ{JÝ8­RdâP ø}5&œ¦{yÓØO^áíú–V%t™Æï2«ëà%OôçØõÐ5-µÄœ¢Ñ„u¡üª²Ui-VÍ@±÷œCªM±‘Uø®/:+ä‘ÂËÔßj5õ†ÕbÊt™È{&ÿh+0¸×!å@CˆX?;]¡?c±P½É¤à`˜­ DF«%±òN²ÀÂÏvôEžbûœí3ñêyb–V;%«_Fž£˜36?gÀÎ…}¢z#U‚éÍòP/%ÊKƒD#Ub®Ö³ÆÉ¿k^œJ—~‡¶ Ç*Ü‹¢;ågØÚÀÐÕ&QC=?êî²Ô7+Ù h‚-ËȤZHq³«XW² ó¢ Ó vð;¾"ŠØ©×ƒÈsƒQ”¤DzEïßÇ»ò¶Ú4’«Çµ~ClÐÍ"ÜD~¿Ù 8ݨ—øãbqÉZŠÁ$ÙÃ8šN6ÿº¡É¼Gq/{nHï9ÌT)ï+§[Õ"ÿPýsJÞ.ªÏ»¸Åþе%N¸qºY*®n&&ª/2$²žÅÒ+ÊUd!Ê[䪩ë©t4»P®Èbx芧J¶”-G²$Õ;Ž’exÒß„…ìLvSë}t__ú³DCµºVÌíмöË’»–àv²b9§eƒbH­ÈV”­^nj_>Ÿ±[ñq«=Ž¢¹OÑÃ_ ¸TÊN.}Óbá)î×ÔèKoÀLª„Ù©­P’ŒÍŸØQ&÷¾d øÝ{qó`eöz²Ä0>eCÎ}b·vL°-׎ ËKä96üŽÌÇæT\äæ´›Sq¹<›Ó§l&87§pF¢’íæåó–åíQeã³r«W¶Ô%ÚÍ4ž¢sxQ—i ÕWÙ Ü?÷2ÕuC9¾¥ºNZ8¿Ç0ÿ¸D^ÎBk …<éeÅ)Uä!^BK¸`!ür*ñAž›9’\%6¬rÄURÛÔ%F+WÈ©J_ynËì;Žz³“½$:ª§ÓãÌüZ§32Û$ÍäjBe…+ÿkñŽ›Žç…O`á[qí4Ž»£OiŃÝf¡Á´«< ÀÌVÉÅir$í Xɶ¨_Ý8ôC Óršv[+2ákô·EÎìó¶F¾è·¥?¥ì")RÈg ž±~è“M‹òÕOGÊ~$}TnR‰+§À°ñ=­ÁþlÞ´mÝÑ[&[Sw=½G¬ò2Lü^€”Ïnr§|Jûð7ŽØÔÏ/?ýCù²|ýRé(?¡I=(ùf=Qbôç%)ê+½%ù‰Bé©”œŸêVùÏùŸôÿøbü·¥SÒ7u”z4IënÖGõ8 P²r7´NMêc¬÷µ‡qÐ1t{·ÞILP½³¦m-„£8ÆÂÕÝ£@í¨~8ˆÔK•œ#l!ij~Ç ¢!eÿ£ÿÀF¬‹¿¦Ý‘öipJ'Cü×/؆<ô¢èN¡ýNcg·ø¯™Û³l™'nMœ:¼¿#ÙM—v©üÃûW«ìfjÖÎŒÔD-Z×Â.i`—6?õGÔÃvÚvÛpÚmÇ4–n4Ívÿí5Aø‰\\ȵœF«³­Ãdé­¶c·š“Fí|º óÔÔ}æáSÖ™Ê ŠwÛƒµ>颿¡ç&¨›uf&v ùC f$,½­¶a5ÏFBª‘Ø©×;¬B£a¶m»áØL*´óé*¬PSs³pûaª$Ñawµ7*Dýâ1 µï±ù§Lêfšãè;æs:‹[åêo{ö͇À²CT¢ï{œ} ´¬¯Êonì±}ñrfdÓ!/Où€€"ÿEsiÖ[G¸A‚@§qÐQ@R½( t‰ý"0ŒRðÕX}ègG×P 1±à½é`€â.=Rë(‡²Ú„ñÿßÃ7¬– ŸEÍ Ý€ìOHòHB ‡MŠž%)vݼ/r?gÿ.tÂ~%pî©¥ n…¿=ãÿ&Ø æÆ5¨GÒ6Õºqt…ÞíÁÛ§QŒŠ<¸xŠ­sLü·ènî·YVËj6Ìf³aÛ­–n›–¾â·ÿç%ÍeQw8c¸cMÃh:m˜ÞÍù†òÞüˆÜ¾õþÀ‹„2ˆ£±â¹ÞϨùUJµó¨¾M>æ¿}Ì6™j‡¶K5»¨¥Ö݉_¿7êóN&ìdk}\|’3‡å›œÅ õ7<§1ÖK¨/¯ß^Çhàß`ð쓟 9é'úÛ<³€&¨ï ðÇùû¨k-Áü4í-ÞWÍÑ }XjãµK/GÙ±úèíl×XšØu7Úl“zçÓU8Ö@MݺÝ~5BÞÝ o~’âÉ®d¤wJvŒÂ»ÝÞ)½t4ÎÒwhùc f ‰ð¶ƒ-ÐÓÛgWn¥ì³ÙëíûìC ½Ã´·í–Þtí§8.ÈžRüÃ4ðÀ´L«©7Ú ½E–q!Èh5æ¡|ij~÷€Í–Ón0ÍéOWaŸ±pS×›v“µ©ÛŸÞnŸwuÛáŸôÒö¹±;J„Z¦m°wÐÖ§åÛg»¦›xê8mSÔØOÂÅ›_JêÛ¬ü®»EUú~‹&îqûø?w W°y0¶{‹vͰ°ÑsÚ†!C-^-PÆ~’àIwo:ë¾KestwÈw´Þ¶ôvÓnØ2Zÿ6$úLÖYZ‚I‰‹L;c›¦ãƒ1Øó´|ÃÖÔܲ'Œ§Y{"Ù{díŽdÿà§h|õxAìÆEG¹ØP~ÊtPûü˜½Øù>,<Ë8ÏÆ DѤ{ïÒºq¤;CDŒ¸v¦†ˆÔ#¢§’h~x;)¯4Wîó¤)’«ÑÍ)1ÊÉû]Ât¾¿f„è=%7ð…}ìó&#˜AZF¦éY˜žÛ%Üö°}ë>q§¥ƒ+ƒ7ÏõÃ)"Û¼ÜÐBQC·¸Å6Õø¸2°”—áGÌnnðÕø`Š· ?ù1¡¦¯O“¸ø½†]dqÞ¨7!^Ñß‘õŸQ”lmÁï{A¶dÕ٠付eæ{ä ‹üxÅÁñpbßKnBbüÌj¯Š&Yÿ(¡i².ž¬‚D¼a6k:þqq©\¶_PZìs]›¤>Ï'RŸç"{uÜ6l¨f€zTTr|5ωg¨ƒ$¨¨;òö»õðMØSn´Af`¶2ÀÝÛuƒÜSàhBg#ì2ˆèv´½Ã¬d1êæªØÜ‘ßK)€{‚˜/XD<ÁÆ6 IWÐR'‰<üiBËF¢çæqÚCyr(.-ÉÖ-JewòÅC”Gm!qáçÆ²Õ…„ºY0uâ&É×(Ö02üï $h™ŒN ÝrûZm‹H_÷ìÁŸ=ø³öàÏüa£I^”Þ“ìc7>„Úä$ö€Ó¬>/^HP‚öÔ)àÄ]*ý ØZâ¢Â{=÷nà÷ÁÛZ Òø"ðÒüÕ¥5â!w\’8=ýb¡nn7 "œ$hŠ×ü†5L½4n9S2+{ÈAŽ]@Ž7¶Ó9,©G}Qâ:DtUfEËKâaË…×8˜®“à“Âú¢gôIû ~„ŸÞ×T=rç  éÇ„ô#ióªîüåSŒÝТþ›"r÷̇ö¾%nltíÉç, ¤fýñ›$)zw’¡w¤ì|æøµ½Œ_%ÁmVøq.M àÆKÛw.É ·Œ>—Œi¢¤vf’;7êÔïߨU7ÛŽiY-͵û}Ínôt­‡WóZm§Ùjé^£Ñ¸Qgr4nË+x$rÃOï&}˜ ðåõ[íçì€(Š¡­^4!¸¹éþÂ?Ýd#õ3÷|ëZùÄâ"·°gGH DIÙ´w'âžìIB€Vü±]Íj¨8ò -¯ DË¢ð\'wqR´¤U¶c¶ZPs+ïâAÛÓÛÐÚfC×ìvÏÔÚŽkáζÍÑî¹Í†¡;$ümo‚]ÅËËh´h¾‘8?Íêö,Ax~‡=¿0®e“Ì1ÂÞ”L=lp“Á4°¿”f®™ízëVÛjÛÓí|zë­xB‰¨ÐpLqEþ¾´±Keçënf¤¼PöŽ-Ä[ò=}ýîå¿”¯_½¼>xŸé‡OzÎk8¿¡O¦èæÐY\ç¿2,E¡Oå_ÍïÁ_™Jþù4Œ‘‹?ÅC|¥“o\?ÀÐÓ/5ÆÉ?™àÏŒâKØÀzÓâ!b?)ûEº:îSÛ°–Ýb”²ãéU¥ÿ‰r}¤Ý#*:È›ÑÁ´Çy†…6O&ýBÜ\ŬÍšs»Óè‚-žÊ«r¥ÐÒ/E`þ¿5o0,-`ŠuMÉ&”¹±7¢‰‡Xä— iNFn<YÇû§0ùƒô¢Ì»åhÊä!Ea!>È“@ÈëÒ7™—$Ò]Ïž³j†YOð¢ a~‡·ŒIÑ,€¦ä¡i"~¹ËÙ—µâ‹g“Ζ>Zús‰†¡oÈ›¦Äì蘞Ö×µ¯„¼|,òl,ŠtwÍQ~ ¹¥—xêzŠb)Û¾T(þ¿Ê—Ÿ_½R £æÔtú7½i´•>bú»›æÐÌ·Êk-χôE‰ÿᇸ« ®Ù%𰦑PFú¼»ôÐa _’†·kÂ*n²<µ…¥$(¥é~Ó‰â‡Äˆâ%;'׺w‘ÛÏrõç“¡hzoêX`–1+,€¬²ÝÀOû±ø!Ö“‰gFN“ }ÎÂ4Q\Ï|Áúž¥¢¾¼HÔ9Ò4ëó~«“f%dd|Ü0¿¯„Qªû‡ø÷„(˜?xè’Ñúá…’nŠ›û“ïöíÝi}¯ïþ ¥…£Ì=N÷ÇŠû)};÷çñûqa‰²ˆAFlî‡~n™„ß;)~œ'8ÿ@®Zwr§!y±n¡jw­¤Kv¡¤oÉ…ò8ÄmMjô<§ø¤…\“‘YȃŽCñÇîòg ¨:ªª»vôÔYüY¼øqƒ ‡Ýô|æÏM_4PH£òï]*÷&ö³)Ãçó×sg§^H*ìTmòÞz÷ë·nßOˆ› ÷ÝÜ4çÀÝù‹Ìÿ±$¶Ì[}"‡•׺XB¾¸$ëßW¤¸©¾ù ŒÜ{üKR|§)>ªcˆãåhìÇ…ˆx>òDó=M‚o?~øðŽbõlÝhë¶Ó¸¶>0zƒ¦…¬¶cµšºÕïÙJ™àÛß%Jè•T7PÜx8cÿ!éÁ•™ÄaP©ê a}È8Zýô¡£ˆƒä„Ǹ¿:J’Œ„qHšÝu3—CMûâ.QêñˆýpÕöâR|??÷ Ö]Ù2¥±KxVh‡üßêß…÷uÚÒ†2o2ˆâ;üŽ0€¡µ…l žÕdæ·°+ @rŽ7@Úº•øîg¼i]¢üWÜy8!%'WßýJ¼ZS–˜Òa¢èn:©À_æªvpl…óiàzéþ‹y,psš¢y~ú—ÒxÃø% n[Œ†~’f÷ºÈ`Ôü}’@aQzϺS<Ã4¿C/<ŠýKÂŽ¬öxÚAAöÈÂFòºü­×=fØt|Åë+~s`àIøÞZŒw!5™*`?oˆ¡«OHqk¥š»YØá a;è^ñF¨? àµÃ‹¢¸ï‡2ú#ŒúÐÃØ÷/›¦`ÜA}%¤hØ¥¨áaÌ2z%¬µùQ¹4ƒ,CquúRý1™ë2#Ò-Ró„ì?§QêJAÆžZ*§§äMc?}>¯}+œä–ËQ‘<ª[+v¶Ù¦ "ÿ3†3Ý䯀œÉ“7[޽òƨF<)-Oò€]ƒsã-¥å“1]Pà&ػƃ)Áñ•ÒrIÝU0Á»p)MÎ;9»&©Û)7‰±ÎHS„²¤ùøtÍ]Û‡šÔÙ‹£$‘+„ôŽúøÿ½0lt}¤l{yç÷𬒤7c×ù¡œé•c>å9ã(ô±ž¥‡_—À%·=E½x³/wÉô=»eH²˜;–´®gèrõ!^*XÖåHÈw­ØGÎì„Lÿ(®Šúô|¥6ôÓÑ´)dâŽç5Z¿i°«d “ÔMFÒ¬õ$‘í¥„¬°IÃÞ²ÛóYû7­‰íDÝÃÆ3åùRXE¼»lYÌEÉÓ¯#ì±Ñ³3)¾=6ÅîàÔK§„´BÒœê!W–ñ¡`,É’ÍO”Á”fljµZþ¶¬Üû­×t›É>UßzR]¢R=ôë]'Õ»@LËrúú+°Ÿk7÷õævpÁ“å½`ÙÒDÒt÷Ÿÿ¡@Ë;”ßÜØ'iN¯•¾¿–·ð4:ô_úüò̲«WÛ¸cSKî½ùzBu©ã4šVܵ™f®D=¿~ÃB¹ÁÕd­è(Ï º\í>¯ñn8º®ËÀOü£ŽÒpË‚Ï+ÈçÙ1„)6ébÎ':n4)z túߺy_ä68O Ð³›špª¥ n…¿=ãÿ&ßÓ[—b’é´Z*•æÅÐÀ¥àR¼ë-K^3Ohu康Ŧw¤ß ëoŠ_$:t¾•?§ÄêÔ…Íî\æ$Mr —¤å´vVnÞð¸£'12OaT<LãyϬß9gAîÖçÄ<øðršŽð-úöÀ´i<óµ€^’É õdS`Yûç¡—zûC?ÔèÓ}ú< JÏ‚W#äÝ)ƒˆ’Z%i–@Ýù„rŋѾĀŠg¥ vmµ9Ú{òæSñoÊëOŸ_þøîí§¿+ï>¼zùNyõáýûׯ>¿ýð^yóá£òë§×ñoMü_:ŒÌ¾~¥P’“d¤hžr¼Q¤üïBŠò׿*´^‚¢_È‘øƒ2h6‰¬ñE›(êÿ(´-Û³ÓñJùuþüÁ‡ç9=øßšÑl8¦Ñh´µ–­ë–ÑÖLÓÔlˆwÀº‰ÿÛÄð¤=–ûËW’ôd8žŒögô39¥eá"˜§[çoU/z 1™Y‡¯ýÌ0²ô šÑ²F?ºí š¿öïÇý(íý[ùüR9ê”¶Ì ‚ß»Ð/»1a½îWeú_ß@_@~oÈ7eÛy£žÃ›ÅcE(Z ;äSê}t_'õ}óo5ÞCЇ;ñ»ƒhJƒIÖ?,‰æ‡÷9åŒïÙÌ®aœùy²Ô ç½g,nÁì¹´Rlk½ÀGa*¼{I}n,­ä±RÝöË-4_5e>h–jÊ\ˆÕ”…ñ™ª)s¡ÏÙÓéŽjÚqTTæÂݬ¨ ¦sóòfd›ªåûT-A4§OK@»G¸xŸÖÉ\¨Ì’ùPù $óaKè®É\ÐÌÕ(¹P9ëFra³Õä†ä¨ÉÍP7’”­n$+î쨑ϢúBrõÎ÷üM†—x°lZ~±œ®Ý|ÝÀ­ýåÿ_?ÿóý?{¯½Ô¿ÿ§÷yt‡Æß®?þ]© ‚gOÚ%`*ÆÝÏŒåß^ƒ³d /4Ü\©bÞ”+XRÒ‚¢è¬eËà3U—+-`ÐÙ0B3)â$½kMA€žâ©+XV\%£/©¾`Ùfšþ¥™9“Ð#Ú,ZÑPX_UCa1»+¿« âà†ùÐݨ02Q]t‚»¼á“t¥ÄÊ ç,u(,§L¹Ca¡ %ŸÏ¬ß^þÐÑ!ç_Ñå†ZM·¯™ ³¡Ù-ÃÐܾniŽÛ6-Ó1žÖ5_—ñ2çþ9Eê‘c-§vSþÙ4#æ»`ǼÖìÚ¼ÜÁNù‘/ R)¨a¤ÅOLðC#7Qz…Jvv¥ô§ˆÜ³!OÛ”ønª\„Q7ˆ†ÙÑó…ò'™ ÏÇ;Ú>½§—Žü„ÔåÃFWxž7 ¯.6Ž«¢«wÙ?e¾ÈK&öìÒØŠUÔ1ªº¼%À¼Mü5ÿ…Å]𖃋’°\×·CȼÞ(.ñˆ×[5Ëqt¼kfÓÒLÇjZ¦¥›¦ÓÔ›öÁû{¿}%«I/@FDþ$Ù+¾Â{nù1ëm‰+Q7¼C®­ \iÜ««wÝ ^î:ãþÆËí ùv æ.ãS{+Ñ{ŒÆZô"ãžá¿È¸Œûæáa4ö›‡±xo2òÞ<äå¸yÈëÝjvôÜÊA"rÝ«fÇݼW >Fñuó= .üÜX¶º°±ÙQy.bs r_ÄæÀ–Ð ¼±Ù¡‹¸îÙÁ:;XÏÄÁÊ*Wt¿½¬| 8e’ØLÉ¢™ç(A` <¯ùÛZžÔ vÔ"uC*{B;6sB$_B'6[B'(sBn9SRÒ„¨YÑf2§ 7ýãå§Ü áF +iVðäkY–qðr³jzÛoö Ceì †Úýqx³–•_ÇnEš[ïΨnF©[G}™°¼^®ÐŠÚ<Úël~+8¨J? ¢¥0 =»Sžd®v‹%n„xD¥NÞáFÎoìo1Û³K’¤m€–dȲ!ÛR`x­ÊãLJsàQå ²Ð/9£O³wÈ OÓwöšp–¼±®Eˆå|—Uí%6`«ê&Ýb«ïf®ƒ–W7 ÝiçoÝJvÌKºÙ–°;1Æü0‚qy^OyE¬bâÌŸ,·VÕ$­éró&ŸÒ‚x*V3‹±¤þ"®—½x„0ó-ìÉœ”~©ôDv)œ©‰OgNIäÌŸŽ(~$˜ŠÈ/* ñ„§ïöÔÆ5·ò.ö¬ža÷šŽÖBwq»©õ Ks »m›ƒŽ<³|,OjrÄ€Ë`[à›|O³ç°}Œ†B¼oY2‡­a_ÑCRb‚(º›N”)Í•9(;?{MHÅò÷Úƒ;j˜Š›Ð ›SM’TàidÈ]?õ ¹vÍvl«ÝÒ,/! b´lÃЛÎÁü¸ÿÏÞÛ6·#kÃ?äþÂbÕ®'ç˜IIÔ˹½ug2Ivj3‰+É̳;±KK‘Ì1EjHʉץóÛ$%ÊÖ 6(É¡÷œŒ-‘Wã¥Ñh4Ðv¼{!§8/”¿nk}üV»ÛyÜßl=·ùÝʆÇNñÇ“I·³˜@™tö°ÓòÑ|.I·Cu¸ò蘫 —G·«è2›Õ²‰7 LÝqÕI4ƒng/‹æÏí¨þÜN0îü¹ýhìùs{±xóç˜yóçø@9òçx€ùòç¸óçØ19òç8@ùòç¸Á•#Ž”?ŽŸýx73憰\4²¹Ý.Œ¼ÿPKAœ)²[ 6dÖbr¯ÒÛ³Éržu3‰ uGÌLư"âgî q·l{æ*ø6ÏšúŸŠÀ*m‰çuZ—u}ß:Ìçâ­“„·(ˆÐ‡¾Æ§Ñ(är‰ B÷! MЏ8¯G¶ºi²ªv¾#0ýœÄv°”¸à'7ÀÁÐ`ʵóe¹è¹9èÙ±ÙèÙ1¹èÙ¡¹è9 èÙÅèÅðYèÙÑYè9¹èÙqèÙÁùèÙqáÇW=,G=;*O=*w=¶„FàM g‡H ¯#,u„¥Ž°Ô–:ÂRGXêKa©#,u„å¸",B :ðl :€\ :ì¸< :ì¨< :ì¨< :ü¨ì :ìØÌ :|| :œØl :œ Ì :L¸åŒRIÂÉ ³A‡íÀQ¥KiIKhø¥3‡ ß7M< #-vo›³ÈÃKò{-÷ª6&¶53 Ë¥å×n¾ÛÎÔ£š·yeÌß¡&H.^®N $Š&œ_/*e~½˜³Ïž_/îñsñÀ‹aÍ­/'€Íì4}‡¯1]ø+\xT m…Û(¿7paK0ðËâf à!Lç¦Aº†Ý‚äa à߀ZÏcîšPøY³­÷;V»­k=×@ZÛFm­oµ]Íîê]×0Ý2Zåû`ðo«ÀîäI ¶ÊÞ>¹†Üõ’U•´MYy9QSðhéQE#OgÊÛæ8‘(w;ã(âåÈmÓêuzOne¶›_¶ÌH‹Ò(åÊ!._àM¾§ù0~ô× &;ˆ0xkY–ÃlÚ*ÒêwŠ—Ä:Á.ŠuB\âáX'úz£Ý5ÚzOku,­m˜F·kt:FWïéûX'v½{!§8/@úâÀ <<Ì[‚G"éßý¹“ðóHìR. ñ)Ê#±³è2›A¾­á‘8²: òHìîeQ‰n/Ä^Œ‹Gb?;“·ÉÃ#ÁìóðHðrðHðóñHp!3òH°crðHp€òñHpƒ+(?;>{–×Êl–Oê†#Ù¢ R ëÞ)ê*!+ÇÍ~£àÑâ¶wÂé, Èñ:¾½ (ùÛ(P¢gDb8ÞxfEG¸`_ÃèV-ʶf™1\2‹«’PÆÇôþØ6ÈÀ; °Jñ{Ö}›Åˆ÷_¹A‚ ¹‰MLRe¶­Ä•°üìØ"Û`N70;6;;&7;474#;¢>+;:+";®;8;.üØà" à€å `Gå! à@å& àÀ–мìÐõÒ¼^š×Kózi^/Íë¥y½4¯—æÏhi.”¹ÎÏ–¹ÎÈ•¹ÎŽË“¹ÎŽÊ“¹ÎŽÊ“¹ÎÊž¹ÎŽÍœ¹ÎÉ—¹Î‰Í–¹Î Êœ¹Î„[Î(•4!œ™ëûñØ2×ÙŽ8Tº“»ö‚_sñ$°ó9DÕ%°– 0½òé›1½õ» QŒ¡&Yž`Y~àå8”Öq±p5ü¹úPœ0‹¨”ý,bë+v!|~a1¬,å”f1*žÅ@¸’j†k%12\¥nìžå*žMu•Ë]Íw²UB:U`˜_À¦¬Êmï[zK·ô•…âb™»;f3œpðŸ®Á¿,n‚~Û >Z» >¸DÁté5LÝEBÄìà› >z(üŒà£ÌÎÈn÷µ–Ñékm}Ü×zíþXkÇ6:-KïuË÷ÀàaÝ‚bo€­'vaÜ[NeÕð(úŒuk‰¹²`¦Dh+‰O‚LûPvëˆYPy“{] A\¾À›|Ooäx‹’G¤¯èa¥_ì™ò:¸ó¢0˜bgy=o-Kòôº‚MÛž¾!©É~ÞÎgÊœæ,ï•íLá'ÉIÖÒ3\S{Ö¸·§~ãS±cšó,Üá2 ð°à‹‡32RÎÊÙVág‹{3&÷ꪚçbíÌ0¡…V.O}-Ë7¢à lCØj_pŠ›%œÃïÄfÚÑ ­KƒnÑÒÜùŸ¼h \©iг7²Ú¹_©WNÞ¡;ä“'¼`’ÏnìÀõÑk’Œ”ÏÑ]#¬œ·¯ïððÿqŽ?|C6¯‚$Âj3 £$\Š¢)DYJŒŸE ùDQÒîÆõ=##(2Í<.êÆÙ"}4•¥Xäg‰‡<{¾—Ü“­‰Èsâ«€l–gôaëÒgQHvÆÐ<~\2¶H ³ÛÐñÿŒ³såŒØÓ{þë«—ÜXþ'Ë?Kk‹¸8‡÷–fROYCüºš;vžH/§;OÚ[½æ«Âý€[õ•G)‡PRg—s žvgÃ;›FÈìV²W¹²ü÷£ÑL|KïyÛ”­~W~åOhæ‡÷ë®åŽ©ú•ýγò+« Ú)^ »L(,q‰dÁ2í–ÙkZ«ojf×ìö‰K·e´ŒÞ^¬]/_H*Ð î8°Î³Ò– ²ÆÈq»ßL"¬]ÚÀÅ„%>8…™°v–]jCÈ·X0\XGV)Q2¬Ý-ʆµ£­iæÕ, Ô’ä´·;í ^[—cgæÚ‹ÅËÌÅÈËÌÅz£j•cæâBfdæbÇä`æâåcæâWf.>P~f.v|öô_fLØàm H>¶Nl6¶NPf¶&Ür¦dQv«£Üñ¤VÇ“`*º¢5É®,‰ÇFk² çòIÓmpK·v{½¾ñ¸`»J°ñ…Ç%`rXj§õÐN+K/íLL*Ãë¤22;§ÌºÈF¥b˜¿ŸŸEƒv®’Èb˜_„+CTÊ~® nä,TE ÄÁǃM†xO¸ˆ4 à`))Ó@á×EÑE(H¯‚–#Jzg§&yðpEò˜¯ÔÁ•ÚÖÛºnc­mØ®ÖÖ–Ö7¶Ö¶Æ­‘a›£¶\ÈшýŽ­ž"÷DnYüÜ"¶¹LL—^ÍÝð,.Ñ;"W@RžÚ»qO¶ñö5ïLhÅ_æÁx¡eìU4h»ˤð\÷F‘V¿ 5¶²&vz£¾å´ m„±µ¶i·4»Õëi­ö¨ßï¶õ^G·Vp¥Äß}³\*“Ks½MWÓ®âlÈçÀxxämLçÈ6gWùædoÉñØø,d9¡óNv‰—“÷$,ñ€yOfô̾e¥gö-«kàQmõ[m³ÕÞ›÷´ëå IzÒÖ¿#Ê{bUÚyOþäÆŸ ƒH ïi—6ðå= Ná¼§e—Úò-LÞÓ‘UJ4ïiwG ç=írXX󞞺L Oû<-Ñ„§½¸< O{Á8žØÂø OÌþ5OÂè„(8Â2kÂû. GÂ;(gÂ/0¸2ð$ó–[™_þ5¼üðÓðýË_^WeàîlŽÞDá^Ž#0‘+R †»¹A¥ zÀ˧5ϧ²üÄDƒj@eYT"iqê3åaÝTb¯+vR3©²Å1mýœ’ýæÞúŸ:VzСª™KLB'¤ÔeŸ_]ªõh¯—fÏ~iöŒ÷ˆò¥Båý·v”±*é²ÝM#éõ¢¾ÒrK2¶ùY¤—é®÷¹†±ŸäÚw¹¦ý¡7e;^òl™íW±:Ì«ûKè’Ú·M½‚àLZ¿÷ÇrþaQŸÆ˜¦³äþ'/ÚÏI¸«À—9Ýq–Dº•Ø®;¹£´Ú0@î‘ÕšÈz´éMŠëÔš¥Uy kqjž-8*l9áÐ`€lï5ÜÀ×4²c³_ÓÈŽÉ}M#;4÷5ÐŒ×4²#Š]Ó(†ÏzM#;:ë5ˆ\×4²ã \ÓÈÎwM#;.üØàº¦‘–ãšFvTžk9P¹¯iäÀ–м×4²C \ÓÈ5õŠPñ à¥âCg% àön@©‡Ä¤ Rñ¹^€}Á’«ÎßâÔCü²˜©‡D E¨‡øå°R - x¨‡Äãßü´%Âü0,ǰãn¡Ë=ÔCÛ°zF»Õî<.QÑæwA‰Š„DdÚÍATTJ 'Q‘¬|ñˆŠ¸å›™¨H¬AXýqma#*Æ´¸åäULTT²°0S„p!'aXœ?ñAÀNTTª¯å 1æä€ÒRÒän¢" ¹%‰ŠK!NTTx¢" ‚•¥Qx{œ(QQév¸–Û̈ŠÊ—‰›¨¨¼H¦Óê Jý ]‚Øiu8ÑìDE 2'=þxNŒŸ’þí%*ªR˽ƒÑÓóçÛ·ÔCð½ËqRå Ž4t‰ ø|VgŒÔC0ÒL=Õd ÔCP¢˜©‡`ªÖ^ê!A ÔCP5b ‚UQ}˜¨‡`„qRÁé`\ÉÚK=tzkfaê!¸µ2 õP½ZÊo•àòå„Ôþkí¿î mïO°š¦Ä©‡ŽÞÂ3P’y㦂ÍL=&2=tÏH=*uéÍ}6áå RdQ‰¤EZ.ã¦2SÁ9©LÔCõfÎ1næ°RÁI桪G{½4{K³g¼GÄF=$¡ÿ8¨‡à¤sP}ß‹z)È’Œmyê!qÑŒÔCÇ£*_6–˜@ õŒLXê¡ÒeZÔAÁ§1&fê!ÀX¾Ì1õû%k[QûÝ–Ù}\^…‡¨h3Ä¡‰ŠJ—jO?—…!**%Tb}*j)¢¢òR§På"**¯óò´O ò58*l9áÐ`€l]' h{.Ú³™ÏÄjÁ9À€Iš†©Î"ïå`Ç-dx––0ñ°’»°£Þپ炗5G’Ô«û«M‰ öKñAn7ç3.î0ö"S4ñ¦?BHL½4n9SRÒ„¨3 “jÁcÌé]èÎ|ãS=h ±·zRâ3¾!°“sT}O¦›#ªó¼Ìv¢«J³}`5g;ÁSkw…1ËSt ¤K‘+A²&àžwƒ¸²I<õž;Jª;7Èû(z_ §é^Î<ò’ûWx¹¾%U -’þBfu \ðDßF¶ƒ.éUKÌ)ŠEXÊŸ¡*›•E¶ùªœ¤+œ›b#«:ñU žtÖ¨&+¾FÒõðlüÊÃ:¡Ymj;PÛïÒ¬_>g;ÀL¥ZÌ2« F`yŽF`ÉØüœ ;ö‰êÜ`–4t9È𨰈À˜±ÑåV?ÌPðòògímÊzF*°¬p¶bÓS3"!`„ms++ ˜(˜.½†©; û:ó…Í|Rã™ídìÞ.„‡’ò”ôî X/ÂNWè‹MÞ —Û5ÒÛíîLhÏé;?‘oA{™]Ç Î=Z'fÀç:®óC'­jwpƒ@­¬‰ Ü Cïkºíö´v ¹ÚÈ2Ý–môuÇ6òM 0o©,ä?ìÕßÎuhé:X.© 0È<†T‘‹¬…` »Øö½c"*±¼3Ϋ®øe<æçÚa Ì- ƒ‘h’O1¸h%EÜÛ%¿£ÚÑÿ¢‚{Ï©8€+®š16ª×ü¨4Œ•œ‘Óô0R1ò÷þ&>ò½wð‹ñå˜ÍªLgj<¶ò+ò\¥MœÛñÄ÷­DJ!`ãa#ÔÛZ™"±âð>ä.mž–¶åŽÞê·;ÆãR즙Ͳ/wÕn .+fzÛµf».ñ`/h–ÇÎQV^âǹ%æâqŽ9ù½áD‰té·è~³pü…TáNÞz(ÛñÖÆž.žÒ:4³ñ!Ãýfe ΰºñƉ–‡]ìô(Ö»/[†ù »ƒÈž^E4›~èØþM'DfEõwñZ…ÔV›G~|ñp¥6¯ˆÅºZ§ÈßWOÃSWê9þ8ŸŠ²‡ ›8}`…óÙÓo‹ñœôÁ;Òç&ôTÄB•Rÿk9ͪæù‡êŸsR»°¹lâfy/Ð râ¢Å²T\}š˜¨¾ôI—Șû!…*Ê•g!Ê›¤!¯ÏÞY*Í.”+2ï:ã©’…²åH–¤*MÚâ¤ì{ú[ ![“ÝÔ¦‹îš…¯5?œ¨Õ•bi‡Þà¹_–ÜG n'k!Š9-O‰ÔŠlEÉ;ÅKíóçÓwk>nU½Ç~iîQÚ`pT %G‘ªXøjöôÏÙûû2—D^ÔäfÄ °éÀ®O€œ¤ñd-шãÏcüPƒ®ƒ«Ûj«3y¾—Ü73CĶ=^º‰F¸·rEKy‡3š€#S¥Ë­F4rd 0:º.¹iÞ—D’Ë?ù:œÞ7™BHZq“ù €NÅR‡D§ÓêœxxÆOÒã0ñDKo»y®a…´vj½2«WfÇäÝc›X™?¿<&^ûòµ/¿¶³Böu¥NýEâœì¿ "–Þ6Z³hʰz²æ'·âTÙˆ•´¼¨4i£À¼)]Ú†#Ç{.g”KkšO%ùùšíaéXH•°¨#þ,.;±£ÏÕaϪWïßà Ûr­Þ¬7ëåf½Ü<èróíñåË£ÊúgíT¯l©"Í$š£:Š<ϨEbBõUº§öÆ‹À½Lõ1%¡ßR}LC¸<™°ü\"' ¡ˆGœƒªÌs§êFºA¹"÷1 ¶t`+^Á/§ä9k#ÉUb"Ì*A\'Åi›ºÄhå9Õá1-¾ã¨73Ù D A¢£z:-ÎʯuBãX ³MÒH®&T–»òß±o9»xtßå~ümp†¡wºææûØ8‡×f(±irÍÔ8ÎŽ bŽ ‹x †S®ò(‹òÚ¤’‹óx¥Ú~°’eQ¿ÚQàû÷Ök5CN#Ï›oпV¶_ÌëÝnø¢_wN¹H¤ÏjF¡âµÃ’MòhÜœâ†jÜOý¡·»°=vv‘“`ýW~ „e’0HüBÉJÐÍ=?иíÅJ*·óŠì4Å zx,(ÞÂKzÇŸ»Èe­óÈŽÑ0ëRweFÊ2ÓU¹ø›òƒ‡5êâáŒLÿgål«Çwv®œQÃO}Y=¿ÚÐÊ8Ë6Ôèwt Š<÷ùÕ%y ±£ J†…ç׋‡TØOiëЖ٪¡~ªQØÇ70„Õ–l÷à~oç3eåÝ[„Ìμð¬öÔoüa*v¬òÓBíéÿ]ªÿߔן>¿üñÝÏŸþ®¼ûðêå;åÕ‡÷ï_¿úüó‡÷Ê›•_?½þHx©tÝêât™ÿ|ýJi޼ ß(š£œ!ç&Tþw%Eùë_•ØGh¦ègr$þ Ì§D»]"kz‹ûCÑfŠúo…–eM5ù/Ét¦ü[]>¿÷áüC ÿ®]«c–Õo5ZØ«7´vÏЬ–ÕiuûFW7~ ƒ“ÒPPÎW/¤æHGXÛ¥ƒ=•H‡í–6ÂVš¦a_*ο?[•©I·Úfþ|âáϳº4q³{u½üõ3CGÒ4£×º±þöýpF¾ýç«Þw¾*Ÿ?ÀéBó%yÞöý߇€µ|2(Ü–Êü¿¿–\b#È·RÍy¥¬ôÓÙ}r­“®R4U´±¢E]ü7…ž)!¬]Šù·¿µhM;˜7ó˲:×Ä[|(·rÌ`³ì’ËP/¸ÃeËȘÊ/DSS6ÌN?€ÈùÖ!¡÷ÙO«Æ zÇ~j–x†wxcÇäpÅØöc‡LO0@b:ö:Á¶­ã{(Häƒ+ƒ³Ü&õ‚9†ÁEåR…ìºUèl¨ 'çÃ^’T§ë!HhF‚Gþ@ªjA˜P#ƒƒÏ64F¸ %é9oM¿ &”t-烢·à.ë6šÏÆh)ؤ܌–¢]Byø¾(Çì.(pÇÅp›€&† S %˜R„´óAÊÆ÷9|qI3ÆÜ‚ð—$& ÙR%Ž}óx´3ÒÉׯ~*·o¶rÁ`¶µ"ø™ ë+*†Šjì§›('`<2ø{ReE‹$©¦¸–Œæ÷Ø;+µá²5â‘!?¹Î¿(úˆÆ(BAFIy½~;x¸Rçž{¥®TŒ£ë–1ÖÚ†íjmÝii}Óhkmkܶ9jøÁ…Ä^G•ÐvD‰YÈ%tŒ^ýØþçÈ=gÉ÷ O­óèõÉ+3©Öfk¾ZÆ…©~~u™©¯´2TRѬ˖ӥtIYü¾ I«8~ÒÖÂù’lš»&wŽ+†ùeJ¡¬9/Çôüƒdû–’ÐɰˆÀ--@IOª°ÿýòògímzú/Œ W_ØÁòä>õ×™»?žÇ-‚Ä>·FSÀDÁté5P ¡ìž>— {ù@Rž:Ð')rm4èÍôjŠ;ZÍ—ab"ò-x8­RµZmdy´N,«Œç:”óC­jwÚ­6ÔØÊšØ¶‘aºHsº®‰Öîi#Óµ5Û6;ãNÏ2Ç-T¾‰¦*t%¥f÷áþ|IÁЦÑmtFG‡jߥX³µ^Ö£ÒØmKÑœ'ªe½Ù{êùð„ þ|y×>²Mkz¿jÊO^0ñѧÄÞw2»¶ÃhœZ¹ŸqK`>tÆÌuØŒßÇå:dvº“!óa2®_ïDnkíÇ«mõ}@-ÐÔl}O ?_‰±ä_…©¦Úî¶oã•KN(‡ þ¶À›|O_¾{ù/åãëW//·“2òüÕrI…DjûÔÖÒçÌix{a´…>µÎ§ta*Ùçó B6þtä£ ¼=¶=?@ÏÈ>ã[o6ßùKØãwæùCÞ$£ì™MÏ÷´¶üÙø^¯kôÚýv§¿zE~ïAõѳ vÇ¡z®cË9ZUWÍPÈ»‹ziSïKiç׋ůî÷;ý–Ù5-&5Úú´¶ýGy™2¤)Ÿ …å§Äý€ßÐvÿÈ,êFnÙWô¢â„á­‡”´Í®qÓ´Uzi2äVo«í–ß—"?[„[ºÙîö{0v…ü‚×QCüj ±ívé=W™ù2óíûîZ…6;½†ë›.Cûp„ES£@Ù#»†ôäî6«›ƒƒçÊ?¼À½Xå©i9S·IÍKG¸»H‡´€ø©?BbZ­^«k™Ý®Õn÷zzÛléø»×á'rrÑ^ìVhu“ è4ô–Ùé³Å 6;ž–oÀŠšÙ€ËÈ %§HqÑh>QˆÊESº‹±ðsÊ\úÌûÁÒM¢ã[×úl ¨¨£ÊoväggûÊœ™Ç\@ÈEž;A(ò=ýs6áN»Í2@ç‘? ¿ šÍÜÑÀ›vOìN#¾sY4²Auiбº-<‰Ü;vœ4äù!]…63›FíM#¶§3¬ð( úá¦Ô P ±â£ùxŒ¢!½|o ]×eàÇÞ†·Z½6|z¿V®+ CÖ²ñpFo(–‡Îò–Þ·aÖ™“³ë6^Nô;쥇1‹j)ˆká·üo‚ÍÉ@®Ü÷è>ìZA¬ÝH‘ßU‘]$ºÍ§àÕ’+ˆŽñLÔÎêõÛKtÑZAPGqÇâO½•.¢9v"²toWK†vKï讳¡wZSïµÖ– d½¤´Ùêö5Á!Z÷»[Ÿí2Ûº¦…Ym½ Ó5’Br1þgï ̹ôNŸ)n«¸¹©Ÿ³»+ m·'L¯ÇéH!ÊÆê eܯc.ý2r·ŽËøþæ8^§af«ÛÁJÈd¹·>]Å, TTöYø#žÄ£[ ¾;¢vH/= [ÖÖYX~_‚ÍÂXx»Õ²ÚÝ#œ'ÈTœe;†yÂ4ðDÑÃ}¥[} ÏȺÑ' ½e’0:VOÇ8›#·õéJÌLQ7›ƒäÜÒ›âÐ7/¦w:±†øù¤—ëëÛÍô¾„3Xx»ßÖ;GjŽÉmÜã£o1{z‹90-½Û6{=ƒI…¶>]…9*jfò;#ï‰ÜÚß*k{hÿée¹Ë—ßÎx¶x±{`ŸÔŒ´#½ÎÃÙðΦDôŽàîêþ§Ì;ŒhL@±aq\ÅÈq”(ÛPÜÀÌ@ñ#ᅧ“í(^Pæ  Ä€Á•í(P®  8ñ™ˆŠù07dÙd1 Ñ±–MóÓÒôB¢3p‚ð£îÊÐÙW€—ÎÏìÊ/lØ{ÒI[b…“ÅñOÈv}/@ŸI>¥ó±t¸Õ#¬£žcpúÎ#£ìïØk £ûwÞ”êÐU`8×-†œ%´b÷ûÝ>Æqü‡ÈKÊÚw |5ÜUð,@Њ”KJ&÷r /Èñ`ždyÊÒTijû4&ÔŠvô¿ÈÒ",æ×À¾³=ŸÉÈ…Á+ |ñ—©×:ä¨5³Àô*ÃÄÉò(Ø ËáWmCSk1ÑÒ[E´|sFcp ÈKÊÞœÚ3-éšÝ¤c%Wjahôá ¿,Ë Å[[/ÙØ‹â‡'Ëöµ¼!–³4ôñEª”ô'MÓÔl×%Îð¥X’8FVr?¦<M”8MüWsywxìßN”TVŠ[t¿¹$BPE!ÒHoN˜8ö|”–‡ná¯r\ÒÄaž*RAÁ–äŽZ¾T´Ó# UM«²ÌgØsDöô‚(ì Ù\d‰ÊÍŠÛƒ\Hj¯Í#?¾x¸R›WĦ]­Øäï«§Kì+õœOfÙC…œ>@/®{úmq·"}šLŸ› äJ],T©íp-·™Uoj§^õŸsRÛ°¹lòfy/ÐB{žÜ¤$Ñâ­Å²‡Â2bS,[ä¾L8¥~.a}Z¾ÜŸÀ,:ï¾”@ºÌÅI¿»ÐŸOÑ/ÄÆ?+ý›’¥ÇØõñd¯V¬‰ÛW.{¹ïõôüùöíšãTuï>JÐ=uƒ# ]¢>ŸÕ™–§’Uáß‚$«k†_póµýTƒ íES´ªER·²Ì­*îÌ ¬V4rª´/ý°Fé©û DUTŸÉ×!ÉÄ«BÉïkJÜÞªƒq%:HsDŸ×‚™FÒãŒñDË2¦«](§BÕz©àÎîί•ãÀÒ_Íõ¢Úy­×Rqm²ÿVÉ4EV}Ù¶_þßOãž¶…GÁÝs2o¹•ùå_ÃË? ß¿üåuUîÎöçèMN¥î‘?½§€Ü_ö+•ºá&èùKöšçSY~¢A5 ²‚,*‘´¨ãÊeÜTb¯+vR3©õNÎéíä0ÐKK~LýÝöKÁ¥Ù6=«×³ž–î)›ßƒ) Drp‚9r ‚çϳۊ ‹ÊûoícUÒ#d»š#FØ?äkŽT ×ÒÐå”ûZN/ç‘2 š÷¹†±ãÎrM»\ÛÀµs/w˜f¼ÐûÖ’ #3¯î/¡Kjß6uù&(«ß{ £¥Ë$Ï:Iœ1%4%÷?y»o’yUø2§Û#©‡ð‹=«Æ.¤`H¥UiáºGVk"ë)¨g4)®OP;/g8Õ™IŽg Ž [N84$  ’T… mßg¢Žàg„="f¡½`LJ”‡´¼“ôŠd4Ìl`qo<×EÁîw·IqEô¦3Åa@š‚æ™Äòðç1½R =ãý€DœP6¡ƒâÒ|ÜaNœÜÈSMPFI‰ ?6Š”#°á0MÅEÙqü5Œ€5ŒŒ/N†2´LF#Èá ²]­¶yâÛwˆkêáâÀË;ćÎÊÀíÝ€ò‰Iäâs½û‚%Q¿%Äy‡øe1ó‰@‹ðñËaåZ@ðð‰Ç¿ù93J„ùa((Ža9­HŒ¼CbŠÄÅ;$Þ½\¼C¥Äpò‰) |ñx‡®™¬¼C‚&N–G±¾Î‰¥ߪ´¡©µ¨–w¨dahôåÄ eX)îÜÓcq'½ÏÍ’Eçç’[’w°â¼C@…€ç*ïPYJñ·Ç‰ò•?Æ%yžwn+•™w¨¼H¦Óê§t˜“û´:ü)Îý¼C§pŒR²âñ$óžöabfÞ!xM<øaºç~PœƒwHÂiñ Ïžœô¹íózu¶×ãdä‚‘vHÞ!¨öbà‚ÅÌ;#pïTµöòÁbà‚ªïÐ&Q\¹v[¯4ëw:-ëÉÝ¡Ê~–¢Í/Jb)‚ÆÉRUý,E0‚ö³•–s-·€,EåËÂÃRT^ÏÂD]¤KIn…Ì™"x!wðIC_Èë¹jÆäê™q–ìZ iJœ¥èè-<KÑ)™7n–"8ÑÌ,E`"Ó÷Œ,E R7ÙÜwZ^¾ KhA•HZœúLyX7•™¥ÎIeb)*/Žißç”ì7÷¾üÔÁÊR'™‡¥¨íõÒìY,ÍΟg·±óIè?Þ!8é¼Cß÷¢^ ²$c[žwH\4#ïÐ1‡AÀx‡Ê…b&PÂÇ;#–w¨t™uPðiŒ‰™wpV/sº=ÂÌ;3F¡y‡ÊÒjÃ,¼Cß©&ŠðÖ¤Ä;tÌ3“϶œph0H@6€zþ(Æž‹†ölæ3‘TpÀG0`’æ`ª³Å{iØq 逥%Ä:¬\-ì¨w¶ï¹àeÍQ‡$ï*†ÅþjSÞýžì ñd±ÚÁ¹vE-$¤cQ€×,1ÙgÌÙTHëêQMãØ¡¤MñÛð7È¿beüˆÆ(Bƒb©+=µ!9°¥Þ®Ô¹ç^©ƒ+U7û³ÕêivÛuµ¶5ÒµêØšÓëwº½žîX–u¥.h™¤ UBÛa`!Ž(£cÛÁd)Z “*e#K˜D‰E¾0‰RÉ¤Ê WÜ~<´RåÃY*"ëØp¯X¬ª‘¶Nfõp’{+¸&·•,s,m´°\•méAAéLä·Ša™U+åÙªHØv’­ª ðˆ8ëaQïé:@Œ‰Œ[ ÝèzoCcZª5Æ­]eÛÄ«.Ôy&úI½ç4{,ó¥ g4€Â°)HSp*”˜¿®ZäåÜ÷/CìßW+œÉØJÌílÅ› hê4¦ó ^è&OÏ9¶ èòGgŽ«±•[•½\j¥F“F®Òd¬ò¹CZ8Ò{ñKûR™àj$É—RA_=ò' $oµSQ;§äTäÉcÏ~2[K*ã–&•]-Ó®è8þüêò{šáçÙr–«\r!ßîùͱõš¤^“|k’'Äõ å9¯PŽ@Û °µ®Õ«áz5üd5œå»Ô+aH‰)EÓ³ŸÞ—JTànúžÌìáÖC¬ÌUðÂÖÞYÍ‹äY‹J P¯;ëÀ`¬ƒu`° ÖÁ:0XAC59W§ùÌvì;ŠHÖgrê(äID!¥J¬ ¸çÝ ®lO½§ÄŽ’*EÆÎ rç>ŠÞWcÂiº—3¼äþ^n oIUB‹œ†‡Y]<Ñ·‘í KzïkŠ"HV<Žœò¯jVz«fà|Λ!ÕǦ˜˜»6J¼_ÕlÓµzzÿqÅdã{Â7@ Öy7+¾Æo¶Ç?‚-_Jmøxït ŽÅj@µ&“ÕÆl5%2Z БѨí@mÄÏPd)¶ÏÙ°²ÐÖ³”ð Ú!Yý4òÀ’¾ú9fbðSÕ©ä¡ËA†G…EÖÀŒ»Ž(·úa†‚——?koS޼0Re…³÷žúë~jJ„¨t+‡ ˜(ÀN€µ™ Bþ„¥ìeø”ì„>⦄œÃe&û,g ¥Ï‰Ô=ÜÏ/úp2Fù\†J–ïy,S¡–Ìѧ®OÔ“$œcÔ“|™3¨UáHVâ@R¯8ùÙAìå·ÕÉMg×êdNÓƒDÕ‹gæFº¬Õ–/‹…‚¯¤Ï\Ùˆ¾ÌxG½`RézL×cúÔÖÁç²\˜pD/oqß©ØäÉc&#.%e¾býX@j Ü5aßo사HÈfDBHQ¤å_^#žò³éô*®ÔVf˜é¦î,÷ûÆV–{c9¥Æ3;ëÌôœ•– a‘˜”§7 |9U±É^0ŽìÆW;qnšw&´ÎçÄ|""ß‚ö2»ŽA œ{´N,×-<×q[Ë‚Vµ;-C‡[Y;®k·ûVWÓõ±©µ»-[³ ÛÖz­^ [ö¸g´Ê71À©20*sTû–®ƒuáÒ‰1À 7^a@¹xØÀ ¶}ïXØß…,ïŒïztïåéùKxdæh¥l·wp*Ï]BžôòÒ µ£ÿEwÔÝ’A…ãvœš­×Õk ~TÆxã¯é‘µ¶ñ’·}·^ ˆñå˜ÍªLgj<¶^ZÁs-¹–tÍnÒ±’‰+¥°ñ°ÁHƒêm­ÆLñþ‰/à}(i2£ú"o“A»I’Y¬Ù®K|Ò J†¡JÜÔ ¹1¹Yøâ1¹ù½áD‰té·è~³pü…TáNÞz(;ê¯=]<å³lfÃìôºÌ…3Ä7Þ8Ñò@Šf¬]È6Ì«2ÌgØÁCöô‚(â ÙôCÇöoÂ8!ªh4+ª¿‹W¤¶Ú<ò㋇+µyElÐÕ*ÜDþ¾zpºRÏñÇùä’=TˆÁÄé“(œÏž~[ŒÐ¤Þ¡h”>7¡é UJý¯%…Ñsš&õÏ9©]Ø\6q3Œ¼‰hêüqºbY*®>åoR_ú¤Kd Ì=‹Be‰ÊÉšäÍ ’·—3Q&¹"óî¡3Ç\*ÿgÛãFÛìèÆã²q­@AD¨§vŽùÍ ö6…‘w[aX•ƒSH\ÈV!µé¢»fákÍ'ju¥XZ­7ØS%÷kÐiš÷ 9]mˆBžð6Kj<ްe^¸Ä¡}þ|únÍ#®ª÷ÖSqO΃£JP(9ŠÄ´L.Õeª¦ÙÓ?gC¼:¨*‘—@¹ñl:°£ 'i'„XRÙWÀ©»@P(ÓØO(Q®¹â´n¯kµ;n·ß¾R/Vf+ò[‰ClUÒ´ÁN™ïºnpŽ©·tÃ|\äâ8üÍ÷ ÃÍ,ðÀ+¬‚IX€ ]ÃTYÝHj3Sm±Cn¦>CÂsj”.×ßâo ¼É÷´û³Yx×2,lðúlEÝöô±ÊK<ñ`mR>Ûñ­ò)q?à7XÔÏ/?ýCùR´øÊ@I-%‹¼ÇJ„þœ£kª2ºW’< P¢I%cš¼VþkùÃ'ýÿ|1þ§¥OÒ3;¹(Íp–4í´šQè£x-u£I›SÛ ÷S`èííz'½3!õÎlwõVa CQ„…?¨>ºC¾:P½`ªç*9 n€Ànƒê‡JLÁ“È¿$Ã;péN“:'øÛ/3ß¾…á­BÛn„]ãoÓÆ.~Ï"ãë&ÂWurwK¾ÙN|z®üà ܋užR5-g: ªyé†a0$Òâ§þGøk£ÕkVË4ô~·Ýmm ÷š üDN!¦QµœF«‹ÍÝÔïõZý®Þ©»Ij7}JˬŒÃˆµ_ú–Þ7{l#}ÛÓ•˜m,œü¯Ï\ÔOï2Û[[¯h§ýðI5Û#;FÃL!Só½Ýx÷ûºÞouXÛiËÓUï~×lØ€›0V!#«É¬ÀGd»J8ú9‰2ŽÂ©âØÎ ^¨+"õõçøcö×ÇÔ®©rØå\Mú¨M{æ5ïŒæÒ.j«Gf!ÿ$nî²,¿¡ˆŒî ‰m«//¾ŒÐØû†?ÁÒOÞz%üAú×òx:=®¾ßc˜>.+•ãO?iãÕ f ¼´é1Û/EœíÏ_/6«Ñjµt« Ñ®µÖº6šAÁÂg6EK?VÐ7LHñÜÁÅÇsß¿ßcNKO[kmv;½¾Î0öv<½Ñp’u´B§ÜŠþÀ7+[«ûT©_(Û†-X-ùž¾|÷ò_ÊÇׯ^^n·Åå`÷d‚Ü <1"„ÍY§Œñ9ÿ’¾lMã[o6ßJ†€‡°3Ïò&AeH×Á:Ð5°ëm²JÙüôº¦ÿ„Fó‰‚MVÒCj7DÍhg¶§ÙÌ®-]Ò/dßW1F·ÑÙ¶;µVQÒc…0* =š{ÙÎxRZÀëš2 ɉ;¼°#ç†ú-Xä—³æ<Æm{cGK‘+¦¼³2uËДÙ}r¹x2øHÜ‹T—H¦ƒ±é{£fú\«a˜Í϶ڷöOÈ@Q2B"¾XŽ¢óÖÈY=6´QáëCß3Oˆ!ÙÒ0#/h>Ö¾ò²¾¸KÝ,Š4w££ü@" çxè:ŠÒR0vû\¡øÿ^(_Þ¾z¥F£ÓÐéwz×è+?|Ä:õw;ɾÐÌ×ÊJžué‹%þà þ°× Üh—ÀÚFr_1Òçíç¡÷ýJ¢Ô[¬bÇÅ¡-,%FIBäÌgЧ!Œî•™?ÇËfq­{Ú.- †¼è£¹çc©§-,€L­Cß‹lÌr¯„‹­M”6á"Éõ9½q)Œš©ØÜ1U4‹“Äç}û¬±l·&)VLzÆÃó\º}:³còwLÌßIoýðBÁ¯Þ„®x·9‘7ûnkoÏ“ð{­;ÙZÌJ¤,ÝL·ÇšÏ)Wt1nÏ÷ãÊ¥kR嫇] /ð2Ë$\Cì¤x±HX]~ a¶Aæ4Ä/[¨Æm/’¥'iÛ•Քà ÿÒB>’‘ZȽŽCþå°øÙ ªI ªj®--U‹¯Å‹Û÷GØMÏFþÒô…c…*{ï\¹3±ç˜>Ÿ¿™9;Í\Rn§³{ðÒÛ_¿ ]/&n6\ †™i΀‡ËŠ,)ˆ-S«O$à°V­³òÙ9™ÿ¾"ÅöIJà}zpÌÎQòwâ³®:„hz"Îö!: ñ|䉿{šDÜ~üðá¥o"ÓµÆcÓ¶Œv§ïô;Ž®¶áºhÔw;¥TÄm·ï¦§Íl_±£ÉœaŒpefQHvšûšBXÈ ¤®÷E|‘šñ¬âö(q|#Œ3BN8EÃÔ¥ÅPsWÜ%Jì î±ΰڞ‹¯ç—¾#Á: +[¦è[ÙÃ;쎓Jü¿êk‚»&-)C™šŒÃèWB\Ý …Ì-dIð¬3¿…U¾,x׊¤= ˆ²nÜ7‹­d½MÓ¤="'7HÉ8·W‰WkJwjA:Ì~ÞÎgøËd'i°¦!Ûzª{?XJ}–y—cÛIv倰ÁåljCz9Юüv<йõ¨0'.[„&½€«(錆¡ï`TâÄ¢‡„bèÎñh Ï‘R{ FÞ$a‡1V{r¤rD&(0rý›7&튠û ›Ž¯x~%y°À³”´-«†L w4ã1´b¹S&FJ5 v³P@Ä@kGF€¯NFnFÞ­Ñ¡ Ý®;!^4ÝãŽýð+!êÃ.Ew#Mºˆ$ÌåV¹4ƒ,Cqu\)È”¶R 2½ÊN r¶‚ýçÁ³@Óº ÙªûÈ©iøWw:"—î¯4&^r3A ™ÙÓåÕß4ØYÒ'qbÇ7Ò¬õ,–íM¤„¬R~‰œHÊJEعj¸è6ž)Ï—Â*âܦÓb&JNŸ~½ÁÝ;“âëÐmSìÎdNnq’4¦FÈ–e]= ÆÁ‡—óä¯ÎÂo÷L“ÀÆÁ3Ÿ è ™!åÏ>¹Àvwi†‘7ñ>Þ¥S‚Ò£àÕ rn)åúæÅIš=Þz„o…\ÜÀÇ='s´tÁ¦­8A{WAþïr(þMyýéóËßýüéïÊ»¯^¾S^}xÿþõ«Ï?x¯¼ùðQùõÓëx‰†€Vÿ£ÃÈüçëW e8‰oÍQÎs*ÿ»’¢üõ¯Jì#4Sô39PæS¢Ý.‘5½Å=£h3Eý·B˲95/¡”«Ëç÷>¼LèÁ¿kF×ꘆeõú üo_ïö4³oiF»Ý1[V«ÓîmÃÄø¤@—ÿí YEzÒ#G3Rú™ŒR‰²Gp,Ó­³Z5ó‚îKLfUãË_?3t-}A3;mÛÖ;£‘uC¾µûãÛî|ÖS>ÕŽ&å-³}ÿ÷!tmŸ Zç7¬2ÿïo°5¨ =ä´ÍÔQÏ¢jÑTÑÆŠ÷úßz!x0÷}ÅüÛ_ €ŠHñtgÞpÎ7»ƒÖO,‰æwΡM`œúe Þ"rO/éës8лœžV¡ü´0ÀŽ=$§SaKëø!ß– Þ¼Îòb@б‡|w#2‡ÍÓ¼‚¾ñ\C*!†-51$°ˆÞ”C ¼ÂY¥¬JÂ'9°è·µ…ê§Ý—$ò¢ÎGÙáOØÒúöùrt.`¹ñPVF­lâÞ’¦ñØ. \u_ÃÈF%Æ?N†2”FF#Ð¸Ö ²]w2Ä3]D½³}ÜoD'-ൽÆZƒ./· šxS„!WG™ziÜÅA# 9É rþÎ÷üM†—ˆG† æ!g§Ëé¼1\Þ¬þòÿ_?ÿóý?G¯Ä™¾ÿ§óùæM¿]~|^¨ ‚Gíà©Í†og\B;¿:2NìélóMŽÆï@ÕÈÎb“T7÷Mîž~ƒ†+UŒ›§Ëøsv}bÚ6¿I”„±Ú*•0×Ô'2~q.OÀxðÄ=,¤ˆ“T \)Fcƒœðk€¢hŒ"¤sž_€´­Ý1;:äøË›Üh£^×v5ü®¥µ{†¡Ù®ÞÒ:vßì8Èè™㸖¨ù-íföŸs¤8Örj'æ_‘E3b>vÈóa=Á¦Íî¼ó‚ ز•mù¢ &×åõ#´´ø‰~èÆŽ•B’î])î‘Ã6ä rªÿb'ÊYýp’n=Ÿ)_ñ;ñ 9^Ѻô¼½ý8¢·Õ s ò£uX]?Óž]¦ûÒ¡ >Û4¶bíUà!Þ þ’ëàâ6äžÕ2:ÖãŠñsÜŒ#󘣸ÄÃsìë £ß³t£«µÚ†Öi÷{º¥÷qÅÛݽ§w¾|!©@/@ºCþÙ)¾Â“{Žù1+m‰Cwݨ{ûçÆü‡wjÃúA7¨z‚gÜ]v© !ßbœc<¶J ž`ÜÓÑ¢wÔƒÿãN0î‡ûÑØOîÅâ=qÈÈ{â”ãÄ!ð î¯n`:©ˆìßcrœäå;É ® ç#ù@½`ކa0DQFÀ]碱x`cWp³˜{u´ ¸’ƒ”ÀºcÈ…º= Xt–Ø;˜°ÒL•}ì±>F(0öˆ;¾ÇaD» ²¼“t» YO²CsŸ©æ€f=O >EÑ ³P= .üØ(Z]HXŽØì¨<°9P¹`s`KhÞØìÐùÆÛ°v°jë™8XéµÃØs±—•-#‡L9À€ 9K4 ãÅL—¾À––'eƒ5OكʞÂŽÍœÂÉ—‰͖ ʜ„[Δ”4!jzc3‹s†~ûñ²Ýmp£„™4½íäkYšþ°÷P³jz›Oº„¡2rÆíî(¸…™ËÊÏc×"Í­wgT;ÂÄ΀½^d+oæ·VhùÅ<Ú ëlv8®J? ¢¥0‰<«Sž$®Žv‹%.„wD¥ìOÚáFÎNêo0Û‹s’¤-€ 2dÙM©/¼Våa!¥8ð¨rº?^關ާY;d†§i;;M8K¾ŽXÓ"wD1Ïe]{‰ بºñ0_êÛ©ë eWùûβó—n-+æ%]lKØžcþÁ8¯gÄSž«˜§8ó&ËÍU IsºÜ|ÉcšOŪsf/–´Ñ¿BÄ5à²f¾†Ý™“Á/•–È.…3%ñx¦AáTD~Áüiˆâ›A‚)ˆü¡ÒOxønN9´ZPc+kb§52Ú£nG롱‰›¸ßÕFc«¥uŒv¿mŽÇ:rÌòM 0=©q† —ÛÁ~[àM¾§Ùs×>†>ÃE¼µ,™»fÂ'ôßl? oç3eNÓdöÊÎö^crã ù¾qoOýƦbÇ4¹æT³„$]p™q—Çžg4LËjµÚ=­Õµ4³Û2õn¯eöÚ~ËØ›·óí YEÚÖý¶…Ÿ|ÜåÌ©t›_¯lì<©t;‹ ”J×›ØhúÍê ¤ÒíT®\:æŠÂåÒí.¼Ü¦%dÓ[­DÓéöôµh>ÝŽŠðçÓíãÎ§ÛÆžO·‹7ŸŽ 7ŸŽ”#ŸŽ˜/ŸŽ ™1ŸŽ“#ŸŽ”/ŸŽ\8òéø@ùóéØñÙ{3cnÓE#ÛiÛîÂÈûµÄ­"»—`Cf-F÷*½J›,?!áY7—¸PwÄÁd,!"ç¾àw À¶g®‚oó,ª/àð©¬ÒØ–xNQ§UpY×çð­ÃLu.Þ:Ix‹‚Ýyèk|Bn%—Ø ô¸’Рˆ‹ózd«›&«z`ç;óÑÈIlK‰ë~r ¦\ 0_–+¡ž˜#¡ž›=¡ž“;¡žš;¡žš1¡žQ,¡^ Ÿ5¡ž5¡ž‘+¡žW ¡žœ/¡ž~lp%ÔsÀr$Ô³£ò$Ôs r'Ôs`KhÞ„zvh„ú:ÂRGXêKa©#,u„¥Ž°Ô–:ÂRGXŽ+Â"ĨÃÏÆ¨ÃÈŨÎËèÎÊèÎÊèÃÊΨÎĮ́ÃÉÇ¨Ã‰ÍÆ¨Ã Ę̂Ä[Î(•4!œŒ:ûñØuØUº”–´„†_:ópºð-pÓDÄ0Òb÷¶9‹<¼$¿×r¯jc¢[S0# °\Z~ýV1¬±-M=ªy›‘gÆüj‚äâ™áê$à°ÐA¢hÂùö¢RöçÛ‹9ûìùöâ?/Œ°Ö\ûr(ñÌNÓwHñÓÅ¿ÂE€G•ÐV¸òð+p3¶s¿,næ~Âô.|az¤kØ-Hæþ ¨õ¼æ® …Ÿå5Ûz¿cµÛºÖs ¤µmÔÖúVÛÕì®Þu ÃÑ-£U¾ÿð¶ ìNž¤`«ìí“kÈ]/iQUIÛ$•—5–U4òtF ¼mŽ€r·3Žb ^® ·¼ÖÖn§¯?.‰Ð°Ý‚%0lÙ‘¥QÊ•C\¾À›|Oó1büè®LvcðÖ²,#†%Ø´$Øï/‡…‚C& E ‰d¡0†Ù6­^Ok›m­ÓíuMÝhõ̾¡·ö’PìzùBR^€tÇuþhH%Ø•¶©ÄtØvƒiÛ •Ø¥ <œ%§0§ÄβKmù †QâÈ*%J(±»£ù$v»Cœ|û=3> 4f> 6¯“ƒO‚Ýæà“àeç“àæâ“àCfã“àÀdç“àåâ“àWv> NPn> |æl¾,Ô>-§Ô G5²Å¤Æ=T>ÔUbV Ž›üFÁ­Åmï„ÓYcv\{`òù·SÀDψÄp¼ñìŠ6ŽpÁ¾†Ñ­ Z”lÍø3dødW%!é½,°m‚w`•â>ö¼û6‹ï Âò7‚(pC› ¤Êl›øU±ìØ"Ü`N?0;63&/4/4¢ >#:#".?8.üØà!"àe'"à@å "àAå%"àÁ–МDÐüDõÒ¼^š×Kózi^/Íë¥y½4¯—æÏii.’ÁÎÏ”ÁÎÈ“ÁÎË‘ÁÎÊ‘ÁÎÊ‘Á.€ÊœÁÎÍšÁÎ É•Á΋͔ÁÎ ÊšÁΆ[Î(•4!|ì xLìŒG*]ƒÉ]{Á¯¹8Ù9¢ÊÙ!Ë—È^ýô͘ÈÞú]…Œ(ÆÐ“ƒ,ϰ,?ðrJëxØ ø~\}(N”Í@XÊ^6Áõ3›>7›¸F6ƒ’ʲÈÏÄf ^I5õ’®R7vÏr• ϦºÊ宿;YŠŽÊßD—,´«À0°¿§·»Æã"Oùß_œB,sOÇð ØÓ|¼pâD²x‰>Dl'úhý®‚‰‚éÒk˜º …ˆ™Á7}ô:PøÑG™‘Ýîk-£Ó×Úú¸¯õÚý±Ö9ŽmtZ–Þë–ï?€ÁüÅÜ[OÌÂø·œJªáQôóÖkeÀL‰ØV—™ö¡ôÖ« ò&÷º‚¸|7ùžÞÈð%È^ÑÃJ¿Ø3åupçEa0ÅÎò&šÞZ–äèu›¶=}Cò“ý0¼Ï”9M\Þ+;Û™ÂO4’˜¬¥g¸¦ö¬qoOýƦbÇ4ñY¸Ã7d@þàaÁgd¤œ ”³­ÂÏ/ögLîÓU5OÈÛ™ `B ­8\žúZ–n:DÁ؆°ÕÞà6S8‡Û‰Í´£AZ—*Ý¢¥ ô?yÑ@¹RÓ$godµs=¾R¯?œ¼CwÈ'OxÁ8$ŸÝØë£×$/(Ÿ£9º FX9o_ßááÿãø†l$^I„ÕfFI<¸ ESˆ²”?‹ò‰¢¤Ýë{FF:Qdšz\Ô³Eúh*7J±ÈOŽ—‘¸d_"òœø* ;凨ºèY’m14‹'‹ˆ7ÌnCÇÿ3ÎΕ3RvüaOïø¯¯^rCùŸH ÿ,­:.à"àÞ XÚhH%eŽ;r+jîYlÝvÒž:è_¾Ü’¯3ƒï™"HD>f\fpNÞsUÀˆì >ì°< >̨\ >ì¨ü >ìØ›Á‡û€ç°öàk¾öàk¾öàe}°Ã3}°ò}0ãr}°çð}0£r}p£r}0c³}pAr}ða3}ð²}ÿªÓ9œT²¢œŒ&{ñM6à\nb4Ù`´ú½vëqA8<ØÍKÀâ0!Õë!=V¦.Zè §‡ÊÊ꙲+"#…Jçw€âó³g0ÃîÍñâBXóã‹pdˆJÙÏ‘Áœ%Nƒj£18øx°É ïNÿ’&¡ì$e(ü è#£éUÐrDIïìÔ$®Hþò•:¸Ru³ß1[­žf·]Wk[#]¡Ž­9½~§ÛëéŽeYWêBŽFìõjÅô ¸'JðËâ&.౸ ó» & ¦K¯ænððŸèa+ )Oí݉¸'Ûøúšw&´â/óß |Ð2ö*´ÀŒÕ‚eRx®ƒ{ qHjleM<î;zgl[Zß´t­Ý™Z¿c·pc·Í±ÑÙ]ËXÁ•B[ôÍrYL.Mó"ô<éK;‹³!ãá‘·1#Û–]噓¼%·cã³åÎ7Ù)^JÊ“¸Ä¦<µfËjõ¬=§o˜–ëÐê›f¿eõ÷¦<í|ûBV‘^€ôÈUðxRž˜õ¶DÊÓ·oÓpü5ú@ÊÓNuàJy Â)O» /·)äÛ-˜”§c«•hÊÓž¾MyÚé¸0¤<=õ2Xrö:[‚¹Nûqyrö£±ç:1†òÙsØ}lŽ\'>Ð; ‘pî\'.dÆ\'Žö\'P¾\'n`peàÈuâåÏubÇg?)ɵ~ƒÁJ7A÷‰–±–Ÿ–¦]ÚK °°’•=<âÆ¿ì‹x†ø~˜E᯳ãŸíú^€>‘SútÛÕÒuàVÝã'à8ôGFÙß½8 £ûwÞ”ž³2 «'ÈÙ,’€ì;&:wñ=;l„8©CÅe­ Ù®ýÎöºâ„dIMîå(R„Û¯ù³}-iª4µ¿}šGjE;ú_diók`ßÙžoãU@.ì$6áó«“>®uÈQkfá° 'ýL‚/×~VmCW‡IL|u[‰ÀN«–tÍnÒ±’‰+µ°4ú’ŽMH° Rܹµõ’ݹv‡ZîŽõÓa–†>¾H•B%i7I2‹5Ûu‰3|1èé½–Ä1²’›ø±FÖöM”8Mü¹—èò{É’ÊJq‹î7‚Dª(„†·ÒbäD(ÑÈ>EZš)ÙL?ŽñcâèÓ¿+)X8CA|ã-_*Ú)¥ÿEUÀª,óö‘=½ ;h6—Y¢²F³âö ì¤öÚ<ò㋇+µyElÚÕjMþ¾zºÄ¾RÏñÇùd–=TXðÇé4sþé·Åó+éƒw(¥ÏMPr¥.ªÔv¸–Û̪7µS¯úÏ9©mØ\6y3Œ¼‰h!¹RD£Cc@·øe…eĦ Y¶HšˆPÉð ]Âú´|Òp.™fΫwèTé2'=þîB>E¿¤úw)¤SR£K;!»)*ž\·J웫ý¸Nk®Áž±¿ ŒÞn_çPSD\Uz‰ò+ýPM³>Ö„GnVÕ½›yu™óWAGJ•p- ]¢ÊU<¶µDתšfOÿœ ±KZ«DöÃsóäØ$ái>@NÒxâö6â;§á¤µ5¨ë>èXÝV~òÊmfÖõØ:P{Ñ\ÐjDÚ‚á E^èV!ÐÒ«©V4rªdtt½¢1ÜY#ª¢úL¾ç÷­ aOò¡«ÒÁ¸´Z½¶ÜúZn5ž.¯iu­ô”CU3–˜„NH‰Ë>¿ºTëÑ^/ÍžýÒìoåK…ÊûoícUÒ#d»š#–DsT/ê+-·$c›Dz™žßzŸkÇ1.aÑ©}—kÚªqYöâ%Ï–ÙFq«Ã¼º¿„.©}ÛÔ+Τõ{,‡uPðiŒi:Kîò"RB¸X¾Ìéöˆ³dÒ­Ä.ˆÝÉ¥Õ†r¬ÖDÖsMÏhR\Ÿ Ö<(­ÊÓ\‹SólÁQaË ‡ƒdxohä渡‘›ý†FvLîÙ¡¹ohä€f¼¡‘Qì†F1|ÖÙÑYohä@亡‘Wà†Fvp¾ÙqáÇ× °74²£òÜÐÈÊ}C#¶„Fཡ‘Zà†F®©WˆwˆK7ï:3K¯wË;$$]”wˆËõì ¦Duî–(Á;Ä-‹wHZˆwˆ[3ïÈ‚‹wH8þ-À™!梠8‚åw =Í Üö¶Ùi·LëqaÖÚ‘‘¥h3(K‘ˆL»9XŠJ‰ád)’µ€/¾K·àb3³‰5«ÿ!®-l,EÂø‚·œ¼ŠYŠJfŠ.Äâ$,‹ó'>ØYŠJõµ¼!ÆœPZJšÀÍR$·$K`)ÄYŠ€ ÏRT0–" ²”b)oe)*Ý×r›YKQù2q³•ÉtZD©¤K;­'š¥Dæâ¤ÇωñSÒ¿§G™y‡à5±ìÑ;==¾}ËÁ;ß»'UNÁàHC—¨€ÏguÆÈ;#í¼CPíÅÀ;%Š™wFà>Þ!¨jíå‚ÄÀ;U#Þ!QÕ‡‰wF'ïœÆ•èà^Þ¡Ó[0 óÁ-”Yx‡ê¥ Pr«–-!¤v^kçu_\{v-Ð4%Î;tôžwè”Ì7ïœhfÞ!0‘é‰{FÞ!P©Žlî;­ /_w´ ‹J$-ê¸r7•™wÎIeâªwrŽq'‡•wN2ïP=Úë¥Ù³Xš=ã "6Þ! ýÇÁ;'ƒwèû^ÔKA–dlËó‰‹fä:æ0ïPù¢°Q,ÀJøx‡`dÂò•.Ó¢ >11ó.À*ðeŽ©Gö\¯¶ ª¥÷úý*…‡¥h3Ú¡YŠJ—jw?—†a)*%Tb}*j)–¢òRÙ¦Påb)*¯óò´O ò58*l9áÐ`€l]' h{.Ú³™ÏDiÁ9À€Išƒ©Î"ï%`Ç-¤w––Ðð°2»°£Þپ炗5G’¼«û«MY ö³JñAb7'3.î0ö"S4ñ¦?BL½4n9SRÒ„¨3 “jÁc̹]èÎ|ãS=h ±·zRÖ™yš¡^›,ìô™á0/Ñ*k‚;ûx²ƒ Ä“AÆçÚµP”E^³ÄdW2ç^!­c¨G5c‡’6Ågl_ð9¥yøOPuëw¨ù2#õK[ÃÚ=AÎï X§ÂzþÒßÓóñçŒ*#m”ß$IÁ¸2–Gêx>ð7È¿beüˆÆ(Bƒb©+=µ±? VNÀíàáJ{î•:¸Ru³ß1[­žf·]Wk[#]¡Ž­9½~§ÛëéŽeYWê‚–IJ‘àQ%´Öâˆ2:¶?L–2`¡"1©R6rŠI”Xd“(…‘LªÜpÅíÇC+U>œ%Ûå†{ÅbU´u2«‡Å)ÆOqM2n+YæXÚha%¸*Û>Ò‚‚,6Ò+˜ÈoÃ2«VʳU‘°í$[Uàq–DË 9r.ÕîHu€ rÖ8´*WÔ&Gýá4{,óx¥ ¯t`çƒ;MÁ©Pb~üºj‘—sß¿ ±ƒ{_­ð Í×ÒÃN-gÅ› hê4Jó ^º&OÏ9¶ èòGgŽ«±•[•½\j¥F“F®Òd¬ò¹‹Y8Ò{ñKûR™àj$É—R…C¹îOhÛj§¢v*NɩȓǞýd¶–T†G-M*»Z¦]ÑqüùÕå÷4Ã=γ;ä,W¹äB¾Ýó›cë5I½&ù.Ö$O¨ëÊs^¡¶H]k]«WÃõjøÉj8Ë`©WÂSЦg?½/•¨ÀÝ$¨H»’á¶âX-«Óß 'ÚX¡+Y=IÌÌs.ü`íÕ¼Hµµ¨´ÕI«FREýÇF„ÉF^±]`#Bêhå6<Œn–4 ñ†ŸÕDÈF¬ 4LP¬pQn0cp…ŸßØÉ »¾'ÓÍÕyVf»ÀWV¥Ù>°š3žà©µ»Â˜å ºҥȕ YpÏ»A\Ù$žzO‰%UŠŒäÎ}½¯Æ„Ót/gyÉý+¼Ü@ß’ª„!³º.x¢o#ÛA—ôÞ)öE€"¬XåÏP•ÍJ"Û ì„UNÒ•NΉM12wöªO:k¼› _c,{x6~HåÆaÝ­¶µ¨íÀwiÖX@Ÿ³`ç•­f áÕ#0'R÷p?cèÃÉås*Ex»ç±L…ZrAŸº>QO’°ˆQOòeΉV…#Y‰Iy¹âäsd±—“ÖV'7]«“9MUµŸhÇ™¹‘¿ÙµÒlp1‘ê•ò™+Ñ—“¨Lê1]ézLŸÚ:ø\– Žèu,îÛ"¹úžžh­–¡Ë¦.#e¾âuýX@j Ü5aà²`£» +²‘šbiùWÀ¹#ü"âù(?mN/×Jm°á0 —(í¹†©;Ë}„ƒ•éÞX>©ñÌÎ:3=9¥%ÈGXd&åéÝ_ÀzñVõÖ#TÅ&oxÁ8²_íĹiÞ™Ð:Ÿ_ùò‰ˆ| ÚËì:-pîÑ:±\ ð\Çun- ZÕîàw¡ÆVÖÄŽëÚí¾ÕÕt}ljínËÖlöµ^«×Bã–=î­òM 0Eª,ÉìÕß~%„¥ë`]¸tb 0È×=PE.^î6°°Gƒmß;&>wË;ã¼]À{yDc¾Ã™9Z@)ã}|ŠÁuû†ˆ'½¼CíèQÁõG÷^P!p W\5[¯«×ü¨4Œõ NÓøÆßû›®mÛ{%¿_ŽÙ¬Êt¦Æcë5<kI×ì&+™¸R yŒ€4 ÞÖjÌïŸøÞ‡r¢RíÔ)èT‚v“$³X³]—ø¤”ÞB=—(/ñcÜ|ñ˜\üÞp¢Dºô[t¿Y8þBªp' o=”õׯž.žòY6³Œ€!Ë-÷% ÎPßxãDË)všƒv!Û0¯Ê0ŸaÙÓ ¢ˆƒfÓÛ¿ ㄨ¢Ñ¬¨þ.^}ÚjóÈ/®Ôæ±AW«pùûêiÀéJ=Çç“KöP!§L¢p>{úm1B“>x‡¢QúÜ„¦ƒ,T)õ¿–Fω—Ô?ç¤vasÙÄÍ0ò&^ ¨óÄéŠe©¸ú”‘I}é“.‘%0÷, U”%*§_’73'k!ŠdO˜˜ÕŠlCî¶Ä¡}þ|únÍÇ­ª÷Ö“kOÎß>8¯¾Kç_³§ΆØß—¹Ä 2ð¢&7#^€Mv}ä$'k‰F|ç4ã‡t4èXÝV“Xü‚Û̬ôn÷–öá"ÜÊAc g”vD¦ K—[häÈ`tt]r R¶‰"$—òu8¼o2…2µ¦„-[u*–ªSV«×>ñØ ž¤§[≖Þñû\c iíÔzYV/ˎɵÇ6±2g~™_;òµ#¿¶­B¶i¥NýEºàì¿ "¶qoOýÓšEÓ{eNÖüäÖ p]ŒlÄ|õIiQ«4íì¾éÒ6œ Þ—Þ'—Ö4ŸJòã2 ÚÃÒ °*aQ‡ûY\vbGŸ«ÃžU¯Þ<¾ À¶åÚw¾ xÉ‚¼°kãqQ÷‘ï@/N……€.N!KÁ³8–˵8•²ß¨”AZœ–È»8-#pqþ<º¥¸˜ª¬ÖŽôÊ–Z¸l$‰æHžFHA¾G…-'ð<£/oP_¥Ûoo¼h¯OÊ-èñµ lž(·˜ÇW5,1,¿—È{SƒˆˆG÷2på”—7¤T‘ûnchéÀWw/ÀÏ÷r&®c9’\%6RñRf]]'n›º¼ùJ]#ð.ÞI´,ðS™wI’V³â–92ÕÓiqfòÓÇim’F²l ^wå¿c-Þṟžø&¾5×Nã98zD3ì2 ¦\åQF¶ÊÆHË\)u·£êF–ÆÍdWçð’×x+‘ºbjTZ²4+nˆb%対C ^¬yÓùTÉh</€ð!ƒŒÅQý%—ÓÓé ×¾¹Š«ol?‡Ï !–Åpæpê®ÔÊ4öJ”+F®8­ÛëZíŽÛí·¯TÅ‹•ÙŠüVâ[•4m0WÞÈúÍ¥ ¬S°zds8PèVÝHyj³“g1Cn&3CÂ\j”÷•J!ˆ¿-ð&ßÓŸ_~ú‡ò¥hm”’NòJõ•ý9G1nSet¯$7ØQ’C%c9¼VþkùÃ[KãZú4±ã[ef'7¥Î’&™°n4£ÐGñZ’A“<7§¶4î§þÀÐÛ‚mÛžFÈõ"ä$Ø0(?k1P¦!Ùˆ_(Y £¹çã·½XIB¥p1=ôˆ?oaÉñç.rYë<²c4̺„Ô]‡‘²¤‡P.þ¦üà%hzñpFÂgålëìrv®œÑ3ø©/«çWAòβƒ(ô;ztƒ<÷ùÕ%y ±£ J†…ç׋‡TØOiëЖ٪¡~ªQØÇ70„Õ–|ÀýÞÎgÊœÌjû‹z2•¬öÔoüa*v¬Þ•ÓBíéÿ]ªÿߔן>¿üñÝÏŸþ®¼ûðêå;åÕ‡÷ï_¿úüó‡÷Ê›•_?½þHèuÝêât™ÿ|ýJi޼ ß(š£œ!ç&Tþw%Eùë_•ØGh¦ègr$þ Ì§D»]"kz‹ûCÑfŠúo…–eM5ù/Ét¦ü[]>¿÷áüC ÿ®]«c–Õïà’m½«YzO3»Ãêé­N§Ó3[m OÊCa¹_¾T  Ýq`ÿ•ùT"¼[Z)Ë’%¡”üËañ³U™šôèÉÌŸO<üyV—&.ccv¢´—¿~fèJú‚fvÚ¶­wF#ë†|û­3ñîì(T>€Ô†æKò†íû¿ëùdp:7¸5•ù-»Ô†o±šó8Ji­è§³ûä& Z'^©hªhcE‹`;úo =mIÈ,óo5êaNÃ[ìô-ërMÇrÑÕ¹±ƒ ",Öc!+·ŒS½à.¿·£|ˆ:5hÃ,ó (Nmϼ!áÇcaå½ãÈ<áž¡ÀÞØñ L'‘é¹>HLÇFDضu|‰<`pep–‡Ç€A½`ކa0DQDéÅ!»¯8½À»¢)Y\ØË{Òµ$43ç1wp“ãâO¡½ÓüÄ_c„«ÐX²’œ%úM0¡¬¥9ygp¨óR ðí&’g¡& yë: þäC÷EÙËN„î¸Kç÷åØè¡¹[‚-îÏþ0ç² Kš±æç‰Kx”%'+¹Š†o 醼b 9sÛ^ë˜ýn»õ¸tÇu6 gEpÃ1ßÇÂ,85ˆÉš¸…Ãõ- Ò¦mU“൥ï39ÒÜÀá,wñö.رÇaz÷dy'éÎ3f†°¸7žë¢`HÙ€[‚@`½é ¯S€4%9åáÏcz7% z¶‚DœPv2—úéÃÜ(7òá™q˜Ä…E6¦tí ¨3;Ž¿†°†‘ñàÅÉP†–Éh9¼A¶‹ Õ6'²Ga¸¦^Î( 6W†š5 ÃïrDaýµƒDaøÝ;`g›- S»ÚUôEÉ(Œ˜@Á( ßJÐİDaø[‚) #¤ðãJ( #.‰5 SBOF\ WFHLW©ã*§W¡ÇÙ£aì¹h˜o1zÊqä&é}©Øj í£ŽgÇ-\Å XZ8€_‹Þپ炗5G¥;÷À ¦¯6½³2F ‡Ë$FøâéñŒÛM¼)ÂL S/[Δ”4!dµwÒ¥dšRr(’U56ä Ó“Œ´„…3ü‚YÚBy¹@¶ýÙÍβ7 «èäüôžÏüYúXôÿŒ®eêV¿o¶!ãòó(¤h{•›±éæð ù ÝßUȰu •“êãVîÛBuîÔì rßäûFp«hè(¢¼°êŠW<þœ¥Ã¦íñ›$)WÊBn<Å-ÈÏ]Šjìç¶*'`<šmRŠ$©¦¸–#ÅUvVjÃekÄ#C~r~ÅÎÄG4F 2þëîõÛÁÕ:÷Ü+up¥êf¿c¶Z=Ín»®Ö¶Fº6B[szýN·×Ó˲®Ô…Ä^ƒ;žË˜¤DÚ øôJÊÀö?Göxì9KrÚyj‡ùO^™IE°6Ó üÕ2€Nõûó«ËL}¥•¡’Šf]¶œ.¥KÊ6:ª´Úð¨@ÚÚ¾‡$›&Ç®ÉãŠû!2¥PB¿—czPD²}ËRIèäÅQ PÒ“ê쿼üYËzBðMsìà/‰Ô_!ŸOw*·òeu?Ê]ùz™^/Ó¡VÑ<ñå#Yد•èTÖ §bÐó@·ck©é­Mºä£×°‡°¥ŸgÛwŽ HÊӘȉĹ‹ U zb#½ñ΄Vóå)ùVÊ) êIÎ=Z'–ÀÑsÊù÷‚Vµ;­”þäM<êÚ¶Ñ6úÚ¨«·´¶3êi=»ÓÕ\·=²úÙ]€&˜¬`¡ªNz÷ÓÏ—¤ ½aÝF»aA¹¤+|X«µVÔ£Ò×m±Åü–-¨†õfoì©ç{bÓÁúU˜[ßìÝn{Gf¯úóåÝžíþüíÏ “·Ëýªá?awÙGŸÛ¹U„°Úf…Û? ó ‰Òæ–À|8›™ëP67<ßaløÅQ æC×J?ÛžŸ™qhÇ·Þl†?3” / œyþ7 Â(ûCf{ó=­-6¿×6Œ6^»µØ¤l{ZÛþ£¼L™(•Ï„8øSâ~Àoh»d5£ô¾Œ°¬Äá).Í'ŠŒÃhJC`×{™S¦æ°4FOv; ¨¬P~³# „®+²Æñ!oyî  È?öôÏÙp„;í6KÝžGþ€þ6h6sÿ‚®Ûœ0°ÓÔxÉkÄwN#[Ë6¨. :V·ÕŒQÏÓ{š™‡Âýã)2 ˆV¸é½ÚP xN†£ùxŒ¢!½ïw ]×eàÇÞÐ@±:V >½Ò3×Ȇ!.M<œÑ‹ˆq¡u8Üp–·4è<ð¾ ³¶Èn*H¦3(ô;ÂŽ@µĵðÛ þ7Áfb‘Ÿ­Âû†etVÂQaáªî^/«d6UÏU›P7@üŽ¿óà ÝÞ¢¿AŠ_ ’!vÊ\zKz:¹©_f¾}? ÃÛtž¾Æ_¤1…Ç 2bóàwq;dÓnˆ:¹»%ßlß8WþáîÅz_M‹˜®ÔhŽmtDÈmÃ[üÝáØíu»}£×7ûí–Þ²Ú]³…¿{MÞûÉNìñ‚^RrhU]llÝv»×²¬®Éæøm}º — ¨¨/Yyuƒœ[z· úæÅôþk‡”ô9ï%+[¥—¾¨µ=‰Q~_‚ j,¼ÝéYz¯ÚAM›OuQrD#;/Û¤€CZ@æ1ž^k«r*ô6sÐmu»†Ñ1ÙThÛÓ•˜˜¢föà-JÖ¯UR^å‡+¯ƒ;/ r1¨èbm«øí‹µÇ7^=¹Ó;¿éèlñb÷ânŸð,5½Gel;{Nl³À­‚š™J— ^«;É!„PA(¸ã=ǽ÷/bÚÖ©v4HëRÅ Û ôÞ›Ÿ¼h \©éÝ$ÞÈj7³‡¯Ô«[ÎwÄà’'ˆÅ%Ÿ¥öó5áÐ ¹¿ò* Kjt~œãéu±WABî9¥»Bƒ«@Q4…(ë [¦O%ín\ß32ÝE¦7†uãl‘>šÙí‹üäxòü0M &†×sâ«€Pgxé]ëÂÉÞJnÐ<~\2´H ³ÛÐñÿŒõ«à ü×W/¹!³ŸHÆìYZy\:¬³€š”ŸýXZ*H5e:ì(¤ªÛŽ“àNÎ/ˆ}lê+VЇ‹»·AÊ£”C(©³Ë©ÅÃÙðΦ‡ ÉäV²WÙïæaB£%™÷rß:c—S0ñÎxÌïƒ[:ô ³ÝêXŒ«œmOWâ¢Âuçµ ŒWî_±n“^zÅÚïlW;é} ªvÝŽivŽoźš9cÅj­^«g¶ººÕÇKü–ntV¬›zÛ*µguº=]7aú"¥BÎÛþ#²]%ýœDc_Rqlçá¯óŒ:xPŽ?f}LïV$®^â‚AšxѼ3šËÞXÞO»êŒüÛ¡ññl­N—¨x Bšs‚HÓ¾¼üù2Bcïþƒ§Ÿ¼%4ì$¸GÿZ.YèŠE}¿G>.ë£>* þòÓ|´ª¯š¡åV(ã¥M„}Ù±þèõbs_v¬ŽÙï÷ÚÝSïKUyÔ‡Zv<&c¢¨¸KiX»òiQ =›#í|c[[F ÛÍnKg²ñ[Ÿ®b¾*êæ3ácBbLñä¹Òæ“^z¾¶¶ßß-¿/Áæk"¼g™-ãçkr˜Ç|ÝÂÓu×2»]«Ýîõô¶ÙÒY"ÌÛzˤmu;ý¶ÕéuÙtgÛӕؘ¢î²¬»KûíÀ6éåwšzÛí€ô¾„³Xx·ÓµzGê·“h·ôŽná&0ôN«cb×ݰì.uu³§w­§wºñ=]… **»+qœ¾LýçmOziÐÙî ÈïK0@„ã ®kÔ&@ª Ø¥Ð-B·aâÅŸÞ5̾h×tbõ·ŒðÆM®] ׃«"îXò~¹B\ÃæÁؼfì6Z¦®[=³Û9mµ¨ƒ[;¸ß5­n»Ï²Üñ´|O¬¨™+ ¶ç”%ÿPÉ~áb;Œ¸Œ;Œh^p:öž;åØ°Ò­óÐãJÛôà%`~$ex‹ö]"zÇtÐ…x†;Gñ L'‘é•)˜ŽM¯à€m[Ç÷ˆß% \° HзÔ æˆø›ˆœ(î:¦û ù0ùN€ `ï9VÃXŸ;Ø 0þÎZjû 0p†`‚ª y¬DøO€ ô6ÒÌZ²Þé.Ìv§;'6Ó|íÃ×>|íÃ÷><ïýÁ¼ð{ïæd½?˜—ñþ`NTÆûƒ9QïDeº?˜›åþ`HæûƒE°÷Þ,Êr0;n9S²(»Ñ±;ʼnc‹C8Å©šŠ2_”̈·÷¢dަ“à•Âz£µzä^(€'!Åë„÷6Yo1îýåU°Þ¬ËïEûß·"Wö”’²óÊ1äìB)kÆùJ‚›ìðÃ\š„ °]–TºØ/M-'Jzg?½4ÕpÆú¨‡zÚ¸ãZZ»;îj#Ãíký~»ÕõG¦œ=—¦–+’=Eî ±íeñ\$(bûõG½ß-2r »_q€”è…HœRØ/D:2÷DäB$AÁ\"ÉŠ}B Ìnëa™žëàÞr!RjleMlu]sŒÐXÓûNKkÛ-S³­–«!×ìuœqÇÔ±ÈC‡„ßxì@6P:O·ÑîôM³Û3 ¦¢n}ºŠ³ã@E­ ‘t‡ty‰¤Uô%¤ÚµIz”““RS– '’â¾°zF§Ûj›lj³íéJLLQù8%~Ĉ•‹µx‡ôòœÖv ½/áLÞÇò[Çgž9§D¦ÐÛÌAÏ–‹cS¡mOWb`ŠZA^ùéòòÊ«èK8s€…÷ô~¯U{R=¼rÜ1Ëhw¬SO þîâºnWowÚÝb2ø‹°õé*¬;PQ+ Û!]X} f݉ð~Çêázﳆl%ë5̶Åtû¤½*lzŒÿ™Å{-»KyÈ•;)C -¶ÇÜÇëöž¢¬Í/”QŒ3d½ôˉ`—ñýͳC¯Ñ6zÓjõY,ÊŽ§åÏ`Eg’Ø!‹IbÕ©Œû¥ŠQH0â2RH0¢1QH°aqPH°rPH€²QHp3SHð#ï§àÄd£àe¦W6 P. N|¦ô3>Ì [à™W4:Ö¶ºZš^Ht†\ü¨¬ç4Ö<xéüYCü€}Ïߨž`—.þ û¾¾ O$õ™žzµt¸Õ#DXpúÎ#£ìï^œ„Ñý;oJSW è*äÙÛà‘?ë±w$M<–" Ÿù–Çq49ãkýö·¥§eÀ °E\@+RB¨'&÷r) }<˜'Ù±Biª4µ¿}šGjE;ú_diók`ßÙžOnÏ…Á+ |ñÕŒAý¸Ö!G­™…œJ&N–G±N³!Ï~VmCWÙX个–‡_4ƒ®ZûvÚ·Ç·ÎÔÂÐèÃ~Y–AŠ;·¶^²±;í: \¾¯å ±t˜¥¡/R¥PIÚM’ÌbÍv]â _ z4|]ÜÄ5²¶¿h¢Äi⿚³È»Ãc‡üÞp¢¤²RÜ¢ûÍ… ‚* ‘úÊXƒµ±ç£´<4H¿âQNÉ…‡9q g(ˆo¼q¢åKÅlÿò¢ª `U–ù {ŽÈž^…4›Ë€,QY£Yq{Ê7R{mùñÅÕڼ"6íjµÀ&_=]b_©çøã|2Ë*,øãôJGöôÛâæUúàŠFés”\©‹…*µ®å6³êMíÔ«þsNj6—MÞ #oâZhÏ“ñÖbÙCa±)H–-’f‚W2-_†4œK¦™ójDçÝG':UºÌÅI¿»ÐŸOÑ/é•g¤SR£t£ZQOöjÅš¸}åB qJ*ÐÓóçÛ·kŽSÕ½ûèˆS78ÒÐ%*àóYiöôÏÙ;™Uø·Dö¬sóäØ$á‰;@NÒxâÈ6â;§áøó?Ô Îø cu[M;¸wì8i®¨fš™£xÁ¤ÛÓ™*ñØ5J·S(Âõ6œá*†n-½šjE#§ AFG×+ªQzvºQÕgòu8¼oU{B9U•Æ•è Õ굟ÙšF ÒÓ®ñDEž;A/œS¡j½tpoñø«Ü¡¥$tšëEµ3[;³¥âÜ”õ½ŠiЬ³mÀæS&±“¶ð(¸{Næ-·2¿ükxùá§áû—¿¼®ÊÀÝÙþ½‰Â©Ô=óÍ´ѸR©|´‰’äÓšçSY~ ¢A5 ²‚,*‘´¨ãÌeÜTb¯+vR3©õÎÎéíì¢âôCU3–˜„NH¹¡?¿ºüþv•.á·aZf§gôž$wq/ä6ã@Ž " 98Áœ 9Á‹óçÙmÅ…Eåý·v®±*é²ÝÍ#ù¾ò5Gª„kièrÊ}-§—óƒI‘Èû\ÃØu‰‹Ní»\Ó.×6píä˦j¶Í<¨ÀæÕý%tIíÛ¦.ßeõ{tt¢t™äY'‰3¦äÑ€¦³äþ'/ÚÉ<òªðeN·GœåÅ&•Øîq¹£´*-\÷ÈjMd=õŒ&Åõ j̓Ò8΂óÌ$dzG…-'  Èo«·}Ÿ‰J‚8œÑ«ÄXh0رÇ!e^-ï$½á ®¼ä„¾ñ\Ãq~‘$t'°ˆÅkêiÞI,ŸÜ0 ‹žñ€@"ÎGˆé‚rN\šŸ;̉€yŠ¢ Êë qáÇF‘‚6²Üω:³ãøkk^Lïƒ×2@ ‡7Èv´Úæ‰pCv"®©W„‡ˆO/:+k·wÊC$&]‡ˆÏõì –Äuþ–ç!â—ÅÌC$-ÂCÄ/‡•‡HhÁÃC$ÿæçÐ(懡¤8†å´"1ò‰)‘x÷rñ•ÃÉC$¦0ðÅà!:¸f²ò š8YÅú:'–|«Ò†¦Ö¢Z¢’…= Ñ—/”a¤¸sOÅô>7KrLŸ‡HnI"ÀRˆóž‡¨` ¯Wg{=NF"iÇÄCÕ~ çÍêcSlÌ]›$^n—¸ ¦§›†iòÝÕÅÒ›q…o€¬ónV,|ßl·&Z¾” ÚðñÞéƒÕkÍJU–Ùj@Jd³°#£¶µ(q†"K±}Îv€™…¶˜e„ÕÉê§‘çh–ôÕÏÙ°ƒŸ¨ÞH• ]2<*,"°fÜuD¹Õ3¼¼üY{›rä…‘ ,+œ­¸÷Ô_¨)ùE¢Ò­‚`¢;ÖfŠ0üñ aø•²ŸáOhPrú›r—ì³”-”>'R÷p?¿èÃÉås*EX¾ç±L…Z2GŸº>QO’pŽQOòeΠV…#Y‰IY¼âäsd±—SÜV'7]«“9MU#,Bvœ™é²VCZ¾,& ¾r>se#ú2ãõ‚I=¦ë1]éS[ŸËra½¼Å}[¤b'Œ¸Œ”ùŠöc5©5pׄ}¿± ²"!› !5F‘–uzŒx>ÊϦӫ¸R[uša¤k˜º³ÜïG[™îå“Ïì¬3ÓsVZ‚|„EF`RžÞ$ðå4"TÅ&oxÁ8²_íĹiÞ™Ð:Ÿ_ó‰ˆ| ÚËì:-pîÑ:±\·ð\Çun- ZÕî´:Pú“7ñ¨gÚzÏÔÆcÜ£m 9Zµð¤ƒÌn¿Ó»Vo\¾‰¦H•…Q™½úÛ/°t¬ —NŒ¹ñrªÈÅ« Àöh°í{ÇÄþ.bygœ×£ x/HÏwXÂ#3G (Å`¼½ƒO1¸îêñ¤——f¨ý/*¸£þè– *ä·ãÔl½®~\kð£Ò0Ö/8M¬í°—¼í½õ’_Œ/ÇlVe:Sã±õÒ žkɵöí´7no)…<€‡ F@ Pok5fŠ÷O|ïCIû“íÔy› ÚM’ÌbÍv]â“^P2 Uâ¦ÉÑÈÍÂÉõÈï 'J¤K¿E÷›…ã/¤ wÂðÖCÙQmìùèâ)Ÿe3Ëf§×e(œ¡ ¾ñƉ–Rì4cíB¶a^•a>ò§Dͦ:¶Æ QE£YQý]¼ú µÕæ‘_<\©Í+bƒ®Vá&ò÷ÕÓ€Ó•zŽ?Î'—ì¡B &N˜Dá|öôÛb„&}ðE£ô¹ MY¨Rê-)ŒžÓ4©ÎIíÂæ²‰›aäM¼@+PçˆÓËRqõ)“úÒ']"K`îYª(KTNÖ$of¼½ü˜qˆ’0ñмd¹Ï2»f{k–ß²3éü¸§7Bñ´F‚ˆPO©Ò¤-¤ _Ëi•ƒSH\ÈV!µé¢»fákÍ'ju¥XZ­7ØS%÷kÐiš÷ w‰Bžð6˾û—”˼p‰CûüùôÝšG\Uï­§âžœ G• Pr‰m™\¦ËTM³§Άxu QU"/r3âØt`G)@NÒx²òhÄwNÃñç1~¨AWMƒŽÕm5íàÞ±ã¤I¬OH­O3³Gô/˜4b{:óeZEÜ^#\¤[¹"ÈQŽáŒRœÈdér«™ŒŽ®K®Aʬ#Q„äòO¾ç÷M¦BÜÖä8SZ§b©:eµzmõ¤ÖO"4ô’ž‰'ZzC0[D¢\"Âs×&­Z/ÓêeÚ1¹úØ&VæÜ/SëkǾvì×6eÈ&¯Ô©¿HMœý·AÄ6îí©Z³hz+ÍÉšŸÜ.›‘m€˜¯Y)-j•äÝm"]Ú†óÇ{“ãÁäÒšæSI~ئA{XzR%,êð?‹ËNìèsuسêÉö}xĆœûlî˜a[®Õ»‚õ®`½Ü¬—›]n>£ ¿|yTYÿ¬ñ•-µpUIÍQ€EžgÔâÕê«tƒí{™êãKäø–êã‹–Ç–_€Kä½çADÄ£[¸r JŠËRªÈ}w9´t`«›¾œJ|çà$W‰‘d¼TqH¸m꣕k„Þ‡;Ñ´øŽ£Þ¹å ÕÓiqvNò“Çin’Fr5¡²Ü•ÿ޵xËAÆzâ˜øÖ\;­Š£¡‹c_f¡Á”«< ÀÈVjY+¥®ñÑÂùá°cTÝÈÚ¸™üê^òoc%RWÌêOKÖfÅ Q¬a¢ÜØwHÁ‹5o:Ÿ*-ƒçãp>d±:ª¿¤ârºº"ôÚ7WqõíÇàð9AÄ’²μNÝõ‚@™Æ~B‰rÅȧuÚQÇj»ýñ•ªx±2[‘á ±KÆ!†Kº ¶ßưÖãÈê•6ïž3雨ÆáçhŽ ÑóaXàVÁ$,@®aª¬n$L5€À™©·Ø!7S¡‚¡?á=5J—ëoñ·Þä{ZƒýÙ,ÜìuôN¯g²uÛÓ;Ä*/ñăµIùlǷʧÄý€ß8`Q?¿üôåKÑâ+%u´”,ò+úsŽb¬©Êè^Inð,@‰'•ŒyòZù¯åŸôÿóÅøŸ–>MH[Ììäf 4ÃYÒ´Ó6jF¡âµÔ&y4nNm/hÜOý¡··ëôÎÕ»V»ßk­„£(ÂÂTÝ!_¨^0Õs• P7@`·Aõà ¥¦¿àIdˆ_ ’á¸t§IÆüí—™oßÂðV¡íN7®ñ·i cßg‘v¬:¹»%ßl'B=WþáîÅ:o©š–3Õ¼tÃ0’iñS„#Rw£ÕkõÌVW·ú–ÞkéF÷š üDN!¦QµœF«‹ÍÝÔi[íN§ÕgÓ¨mOWb`ŠºË<|JS‡Ñv{ðè‡Oº¨yÙ1f}š‰íFBz— ,¼kôõ~m$¤‰­z½Í*tÍn·ÛkwÙThÛÓ•X˜¢ffá-^G®5ò*ßÌQ^w^$X³Ãd?œâ33¼(_–œU×ÊÅß”¼M/Έ ž ”³'û,4ï£ñ‡y¶xAžßZØ#<=ŸvËpl;Éž@$\ºÒ,¨ä0?n9\íA)¤“À€q'#ðbï'óäDLÛúIB9cìϱYüÉ‹XsGMßYíü˜Ü•z`ËùŽ\ò±¸ä³Ô~¾Ž¢0Š ‰\4ŸçøC» {¢§ÀW¢h QÖ’î“O%ín\ß32ëE~’Ã|¶HÍìvŠE~r¥UÇ'~üÊ÷p^¡(¡5HÁ›„€0ÆíŠ"Â8xV|ôè~Ë“·ø›ìI{3 càˆOÏ\‘7|ùd¾ó´ñI‚Õ.*<@>šÙqü5Œ\òñoÞÊ–ýþ*{áG;öœ—sÚ¤Qp‡©`ú»\ùeKã 9rÙ¨xEFï¶]öÐ÷5ªüvÙÏá‹°ãB ¸ÞA*R¡¤Î.g[? gÃ;›Þ•Cæû’½ª:؆Ncb¡K¢Ñ¸›OYœ˜r &Þ™o‚¹Âp«©®Õê˜k«©EÝòt5^;HQw®å×ïû×òÛ¤—õõ;;ÔNv_‚ª]×Ô-ýñË™ó8ñ-¼†ïZØDXív¯§·Í–αˆgX¸wZºeõûÅ=Ê2}‘±ÎfmÿÙ®Žþ@N¢Œ£pª8¶sƒð׫ÑÔŸãÙ_Ó€¤: Y*çjš¡£6ñ¢ªyg4—½A8ªuFþIÆ ¢ÕÑx¯ÊHsNÈy4õååÏ—{ßð'<ýä-¡HƤ-Wqt§¾ß£ —õQ•ù©xœš¡åV(ã¥M³b¿l€Xôz±¹/-ËèuÚ†Ñf2[Ÿ®Âœu£9Eî§@¢2±mÝoÓù¤—6çÖö˜¬ü¾3çXxß0û-ëøÌyŒ‚ø˜b²Ý^·Û7z}³ßné- ;-s¾U¡·Øt«ÛÆØzÍÝút%v¦¨|vàGÜ‹XÃx·h¶J/o¬ív@z_ÂÙn»­·Ì~ïH÷fŽÉ­Ãÿ£[¸ ÜmHo=·™Bo4ý†aa²mõ Ñ~é“‹7wí»Uîçírðvô±³Gp¶?¿ÙãÃ]ÛíõÉdҗѵ¯HO*S/ޱA»32­_FHÏ•§}ćÜR£že¶;-«+£F?DOñHQè»J”ß°»¥0-Â3jš‹ñÝñ´ü‰¬¨ÙL*á”E#i­iz”¶¹¿b‘UFÜUd•š¼r`^p:ù‘ÑrΘ0Hþ0ã9,˜o‚É]V»o½cÚZåž¡ÀÅþE|³/úÍL97 1{Höã`ÛÖ¡;ò€Á•ÁY;ƒzÁ— ‘=là®Ãs£x`c—ÿÌöž]K~ÄúÌA}æàxÎðëïpp±Ÿ9g8s 8z!Ï”(ÿ™ÞAZ€M >^ mߟ™(p8ËgÕqHWŠ変Sh˜©"`qo<×EÁ’û·1í°ˆÞt†g 0 MAoøŒåáK ‹ž™?HÄùe4 ¸ô&òaŒG‰“„p#OQ4AYØ~l­.$l8L£4 ¨¹›ŒJí8ÊÐ2@ ‡7Èv´Ú.e×ËšzYS/kêeM½¬©—5B3 %Cˆ†±çâµM2œ¨âÈLÈ•¾”‰ÅhÇŃœ¸ šÎü°´ÄÔÀ»‚w¶ï¹àeÍQi$Ø_ùj{ ð2”@—„YðÅÆ>B$Û­ šxS„!<¦^·œ)Y”ÝÛgÀ±&œgPMEñpÒYÕ¡g@ÊnJbË…ç8˜¦“à¨Ã:èµc^;æ§æ˜8WRqx<µh ö­¦³¤pô\PÄÍì rßä1Þ/`Ž5ð’Dbdu…]ü9c^KÛã7IR0.ô’-EHY.ñ;YÐK‚›¦¦‡¸4 À…—¶/È õ>æçôbi¢¤wvj’WêÜs¯ÔÁ•Úéµ:ÝžŽ4Ór]­ívGZ¿Ý±µ¾5êõFvG×]ëJ]ÈѸ8€¾~õò’×C Q|‡ôŒ3f™ßB†èÓ)`°"õ»0ZŠBŸÊÒG.tòÄ<ˆ?Áýš~0¶=?ÿ’¾lMã[o6ßJ†€­ª3Ïò&vŽÒ?¤ëàN0-<¨Z¬R6?½®é?¡Ñ|¢|¤¡íCj7DÍhg¶§YR’¶LvýB|[ÅlÝFçz«¥ß›G€•1VåB¡wàæéÊÙÎxRZÀëš’&)i´•&Hc‘_ÎhÌ=¾±£¥È&^4ñ7NÎÊÔ-CSf÷ÉMäâý,qŠT—FûÉ`$1ÿfú\«a˜Í϶ڷxçÅ( qIæ!_,G1K¼‘ÿ²z6lh£Â×% †¾!gžC²¥aF^Ð|¬}%äe}‘epaQ¤¹å’އ®£(-c·ÏŠÿï…òåí«WŠa4: ~§w¾òÃG¬S·“ì Í|q­üð¨äY—¾(Qâ?¼à{½Àv <¬i$¾‡‘>o¿ƒy?ЯäfŒ­V±ãâЖ£„¦ÉÎgЧ!Œî•tÇL\ëÞ…6ÉR/†¼è£¹çci¹°2µ}/N°1w|/Àz2³£´ÁȾZ®Ïil&Œš©ØÜ1U4‹“DsÂójÖX¶[“+&=ãá‚y.½²‰ì*á¿c¢`Þø~Hzë‡JºÏ+ÞmNä;ÛÚÛó$ü^ëN®3ÊJ¤,ÝL·ÇšÏ)}‰·çáÛqe‰Ò0òÕî„x™e®!vR¼(#øœÄdNCüâ±…jÜöâ!Yz’¶%ùQ€Ë7è&Nþ¤…|$#µ{‡üËañ³T“@UÕ\[Zª_‹ ¶ï°›žü¥é Ç )TöÞ¹rgbÏ12|>3svš¹¤ÜN5f÷ॷ¿~º^LÜl¸ 3Óœ—YþR[¦VŸHÀa­Zgä³s2ÿ}EŠíGÈvïÓË*í%§)>ëªCˆ¦·pÚþ!D‡"ž<Ñ|O“ˆÛ>üc 8VÏq ™N¿×6õ^¿c·Ì–ÙE½^ß±û-¥TÄm·ï¦7\Ú¾bG“9¹‰ƒUîÊÌ¢05÷5…°>`grDêz?PÄ©xå¤ÎÂ@‰ãa’…9EÃÔ¥ÅPsWÜ%Jì î±ΰڞ‹¯ç—¾#Á: +[¦è[Ù„šˆ6Èÿ«¾(¸kÒB2”©É8Œnq%:†Bæ²$xVƒ™ß®‘fIÚÓ€(ëF‚NrwY/EÓ4iÈÃÉ Rã¦Þ^%^­)IÐÙ‚t˜ý0¼Ï*ð—7^õ ÆuÝÜ’ÚkI_ð¥4ÅÜqd€ —-B/NÒÃ\¤3^‡Žë „•æ@pŽGcdyÀÃÈû$ì0ÆjOXQ Gdâ€# Þ˜´+‚î3l:¾âù•ÜM < }Ϲ‡B‹ð*¤!Sbìà1´b¹„—VªYð°›…"Z;bç¹s^;œ0Œ\/ÑAèBw£ëÅNˆM÷À¸c?üJˆ±KÑÀݘæ6K˜5Ë­riY†âê¸R½)ë2CÒ,R³…„ì?çabKAÆžZ"§¥cäÌ#/¹—ž§×K§É‹RT$‹ê6ò•mº(ɦp¦›œ3x²b˱WÎ5ˆ'¥e™°spf¼¥”|6¥“B–gŠ;S‚ã+¥ä’š«`ŒWáRм–í+©Ùñ" E³ëŒT1y(KšO§Ñ̵½oH½¸ðŠc¹BHë8‡ÿ߯wØGŠÀ–—·Þ*Iz3µ/3¼2ìüÀ§<çqÖ¢ôðs`\rÙ£›p/öå"™¾Glc· I¶KÇÒ#f:°}9“úÏLër$d«Vì#§vB¦áÇí5"—î¯4&^r3A ™ÙÓF¶Ðø¦ÁÎ’~8‰;¾‘f­g±,ho"%d…Mö–í‘çËZ¿êUl'\tÏ”çKaqnÓi1%§O¿Þ`îIñuè¶)vçN2'L’ÆÔÙ²Œç ò§’,ÙrGLi¶ìX«¥á¯K!,ÊÕïñóÂ` Ù»êwª/±ç–(q8EŠK³Ì ÛÖ{öÝy¤_nÙ©Þ†Ñ1Œ^÷ÉÞûÚÝ]­¹]pgy'X:5‘ÜÜÝû¿{A(Pñ`‡ò›y$ÍiÇÍ†Ì“× !oyî  È?öôÏÙ’Õe£xùúÛ ¹<*¿~zý/ÑðÐêât™ÿ|ýJ¡Œ'ñ¢9ÊrnBåWR”¿þU¡×H(ú™‰?(ó)Ñn—ÈšÞâžQ´™¢þ[¡eÙœªŽ—PÊ¿Õåó{^&øàß5£kuLòúF£ÛéwzFOkë}Í4ÚzOﵬV§ÓÓ-ŒO Dqùß¾U¤ =r4 ¥£É(–(›7Á2ý:«U3/èžDef5¾üõ3C×Ò4JþEöŸÿ É·í †¡þ§òù¨v4)™íû¿¡kûdÐ:7¸a•ùƒ­Aí!ß m¦’zU‹¦Š6V´¸×ÿ¦4]t×$×)æßþjTDЧ;ó†ãpN¹ƒI<ÖO,‰æwŒžΡM`œúe Þ¢ý´qƒÞ1^sÁìØô"1ØÒ:ôyÀàÍK¢èpaék7"¹‚æ»›šån.D¸…ñ™nàæB_ò¨Ó•!԰㸅› ÷é-Ü`:·¼ý,Wµl½ªÅˆ&úi1hó_#À'…õRm.TæKµùPù.ÕæÃ–Ð\—jsA3_ÖÉ…Êy­&6ÛµšÜ×jòc3\«ÉÊv­&+îâ Ðü†ä<œïù› /qïVS¾âwâr<¼¢uéy½äÆ‹Ée}Øè s ò£uX]?Óþ)õE^:4Ág›ÆV¬¢£ª\ÄÀ›Ä_î:¸¸¨Û·úÆãz@sÜ,õ˜£°Äs4½ž¡“Ù¹ÓÕz=«Õ×ñwÝV›áãŽw/äçH_T0¸¸½wñŸ4Ü‹ÆqÒp÷IC@\ <' 9€g(p‡xpÓIEdÿ“ç\$;(ç¹H^`peà9Éês4 ƒ!Š¢0î:½À»¢›Ä\Ø«#ŤæBÔíáwÀ¢³ÄÜÁ„-@`ªìc¿xˆð1B±›@\pØñ=#ÚMå¤ÛthÈ|òšÿ,5;4ëYjfDÁ³ÔBøÌg©™Ñ3+‰ÈwŽš÷é9j@ð)Š&h˜…èqáÇFÑêBÂò¼fFå:xÍŽÊðš[B#p¼f†Î7܆µƒU;XÏÄÁJ¯¯Æž‹½¬l 8dâÈLÈ¢ç(F` ¼¼ø¶´\©̨yª†TŽflö.HÎ>lÆ>PöÜr¦¤¤ QÓ››ÉXœ³œìÛ‹—íjƒ„%̤é­'—XËÒ´‡ý‡ÑXUЃØ|Ð%Ì”‘3žhwDÁ-Ì\V~»èin ¼;£ÚA&vpìõ"ky3¿½BË/èÑfXg³S øÃqUú-Jàa_r%ïðt°[,q¹ ž°#(…!Y‡9;¡¿Ál?,ÎeH’¶*ÈeC6¥¼ðZ•‡…”âÀ£Êéþx¥_rzŸf랦ëì4áLy:BMŠÜÅü–uí%6`£êÆÃ|©o§®ƒ–]qäï=ÃÎ]ºµl˜—t± ,a{"Œù;`㼞OyF¬bžâÍ—,5W5$Íé’ó$hB<«Î›µXÎFÿ ×ÌV¬>Ì| »3'%‚_.‘Y o*âÑLƒâ)ˆÜ‚Ò…7ƒDS¹‚¥žîðÝœjhµ ÆVÖÄNkd´GÝŽÖCc7q¿«ÆVKëí~Ûuä˜å›`zRã: .·ƒ!ü¶À›|O³ç¬} }† (xkY2gÍ2„è¿!‰0~ÞÎgÊœfÈì•í½Æä¦ò}ãÞžú?LÅŽi^Í©&É"þ?ŠŒ¸Ëcψk7ô¶IþÒðßšÑï¶[ÝžÙÂ_´þÞœ¸o_i[»[m³Õ3÷ô²ÌYt›q*$;ÅOÝÎbeÑ9vìx³a(E·S#¹òè˜+ —G·»ðr›d”TKwlµͦÛÓ×¢ùt;*ŸO·Œ;Ÿn?{>Ý^,Þ|:&@Þ|:>PŽ|:`¾|:.äÿŸ½·onÛHÖÅ?Èý…ª³JÎH‚ïä¹ÚºŽcgSëÄ*ÛÉoO,)D$À l­Š÷³ÿfðB‘Itz@R†Ï½["Ÿž—žžžžé§ùtpLD>—O‡&WD>ŸOLJ?÷cn ÓùCË®ˆ*wžïü;²­·—dKf#F÷:.©-ŽŸ”ðÐË%êž2™Œ%á øÂãÌ ¡G€v|é×>|é×>¼:š8<戣ùã¢h>àš0*пРùcÃi>PHš6æ §ù8þçS§ó0)gG‘|&™x@>“-8qúà®ot[f£Ñ}*y7ûI–»ïéäARé³ÖgMÒ’@c>* ê›ÂUH¡Òýƒ ùxö 0lfŽ I•ÉÍ‘!+%›#œ$N“j£ 1<ø¸¿ÍïOÿ–’¦ v’<ä}q™ÿ™ÏܸȳQÊ';6Éý‡+‘¿|¥÷¯tÓ׆]Ö5Æ­QÛhvÆchŽzF¯×lt†½aÝdö•¾T£™~­œž‘"ÏDâ¼,4q^Änâ‚î:™(š)½&Ú»ÉX8Ñ{WDRžÛ»qOvñõUïêÔŠ¿Ê_ã ¤–‘©hÔV ȦðR÷VâF£Cµ¶’!nwFõ1cc£Ö³FÓjÔ «ÝlTï¶ìq«^ktNp¹ä¿-ûÍ|9L£(ÉKÐ`’—ö6gK"Çã+okGr5û˜g.ðŽäŽ­Ÿ¥l'qÂÉ^ñJžä%0á©Si¶ëµf³½Ö7[µF»]ëq™ð´ïËŠô=ÉtXÿŽ'á ¬´9žºá¿ÝñשLÂÓ>m@%<É/Né„§½mW:ê-MÂÓ‘uJ6áiÿDË&<íuX OϽ H¦S¦“%™é”‹ÉtÊFƒg:CøðL'¸oÈtÂÞ)ˆ€£3PÈÀL'Ä <Ó ŠËtB“+"Ó ŠÏt‚ãÃ_I¢ÎíOb/\é‚*éýÐ*ÆòãÊôR¢+»9€×N°*¢F’Ûpq¹æÌ¦Ÿ‡¹ïMøù:ø‘Y£©ã²â…~tÝÚ®ÕˆGÝgÑݾ7É¡ï±Êþá¡çß¿sfÑ +“º i’šKÒ™ľѸËßÕq#„¤ •—µ)do˜ö»ã B‘!5¹W£H>?°óã~rŸ¥L•fÖ× YÑVí?Tió›kÝYÎÔâ§€TØI\¾§%“>lLÈQkæÚÓ_&Nù[„©ZûY´ }|(báUJ$nXæí¬;nŽoí¥= ÑWô\BePâÎmœ—,îÎù´7ÓjoªŸ/³8ôñY©”H’q†óÀ°F#á _ô»µnCáy”NCœí/ª,´«ü_¢Ñ_;âïÛ kÅ-»ßÞ!(¢¶çÝ:̘í³ÐWq{¢,Éjüã€ÿ7Žþ þw! óæÌ nœqh¤GE+¦ò¿(jxlËbÎ=GfÍ.„Âö«ÕU@V¨¬Y-x<3è½±ð§ÁÅÕ^½6íêñ€-þ}õüˆ}¥Ÿó§›Yò¡µ ÊšþÛõw+ñï˜?Œ?7aᕾ\êJÇáZí0ëÎÌŠ½ê¿¢·^u5äUÏw&Žkx¢”ˆ-~tµ¯z)¬"6k’U‹ŒR Ù”KØÜ–/½(œ+¶™óbD§Ómtºr™Ë“^wÞt1c¿¤úw)¥3Ñ£Kk— ^¯Qo›O»Š»ý©k°íoGÊ£·»Ï9‘).Œ®¼EKõ~(fXMX›Û 7«èÙM¼ºÄù+`"•J¸V†®PÕ*è,G2µºaX³¿æî’ ÅºÅýðÔ<9.7I|›w™Vž¹½•àήØq¶Jäº÷[íN£j¹÷¶„ÕÇŒÈjbç¢8î¤X³ù´«ËÇ/Ê -F” 0Ìy½QÛµbºåí"™­Z­ j×ш*¨?“/ƒ…ë|-BسÌè¢t0(DÛnSíŽ}­¶ÏÛQ|!N‹ &ÆÐwF:fçoKêÅBUKÃtN×½åë¯p‡6âJ0FŽ_:³¥3›+*Q±M‰S`riX}žð~Ò>.=ùbÌ[je~ùïÁåû¿¾úåMQîΚ.Ø[ßËä:"ùÈN(ÎM*U‚݃^~Ôót+KŸOT" (¬!ËB$-O}§<¬›*ìuÁNj"Uµ8Ð=Ð)Ùoô=ýÖ!ø´âWEí\bèÙ^Daöéõ¥^®öòhöâf/øÂ(=*>ï‹’î3kôÞrÆBÁÊC}¡íVdlÓ‡I¯â÷\¿¦Ö%/:¶ïjMûC1n än^ñn™\q:L»û‹7½oÖkgâþýz,!–ePðyŒi6ïtülrBÂX¾ÌéΈ½bÔ-Ä.ÈÞ©]¥Å†R¬ÔDè;§´)nnP”Qäë®å©y¶ä¨´í¤C£A"²Øjh`DµF86¼Z#]­®Öˆ€Vk„#ÊUk”ÇVk„£C«5"QÕá¸Õáà¸jp\úµªÖˆ€ETk„£bª5"PÑÕØ [­-Q­µõÊðá`yˆpèPÖ´wCÊC$']’‡çzÎ$q?ò1Ayˆ(`ø2ø¾@ФíÂ3›õv¯õ,£*sþ¶OkQ~Q@ã‰b-Êߪýóœ|ñ¬Eù„*ìÏCA#g-" ÛBi”Ÿ†µˆ@çÕiŸäkrTÚvÒ¡Ñ Ù€èœÀüAàŒØÀšÏ§Š ¼o†qN¦>÷YE À]K÷$l­ å2½ P﬩3"okŠ:yX-ö+b-Èd™BB ¢—QJnBÜÜA0eLxL-JÐЙ1þÁàP¯åÆÍgJršÝák†dS]óS®—èf¾òg]?h¹·zãE,4‹8c='7YÜ!¦3<I¼D…&¼Ãדåºß N<:×n] e™ïò3K n%S.1:¦~TÛ8w(£¡øÄí ß"gQ^þ3ÔZ÷ªý2!ù‹GÃ$Y®5a£·)½ßg²I¥õü•-¾çï#‚O uF<(¿+’ÂqUôqŸ|áo‘á}áÊø™Ï\›JOzz%; –OÀmÿáJ_8£+½¥›ö¸6ì²®1nÚF³3îCsÔ3z½f£3ì ë&³¯ôeÔ&%M¢GU0v\ Dytl7Ÿ˜*eàB׈ŔJÙÊ1¦Pâ:Û˜B)@r©|Ë•†f*8KµJ ÷#«U1Ò6É­–§?å=I¸®T™ce«Jx•w|”;k²`$X4‘ß"–eÒ­˜w« a»I·ŠjÀ"-…–Aqä\©ÝQê­ÉÙàÔ*`]Eî°Ð¸È޳ÇW¹ðBvº¸ãœ%¦Ï¯‹y¹˜N/=îàÞ+¼@óµò°cËYðĆ̟9n¥ù…]ÄÇïÛ†Lù“7ÇÅXŽÂ­Ç£½\=jŒ¦xŒ\¤ÉHl ÈÜuÚ§¾élðn,|ƒ±ìáÅø!…‡Mv·Ò”v ´ߤØ`}ÉvÎ+[.Ì‹ F`y‰F`E_ý’ œüDõFm0KºdzTZDb LØè„rëïçÌ}uù³ñSÌzçù:±,oþȦ§ÿ!›D‹Ô£;YÉDN­Í”âìC ‘âì“”àì“Y”ŠYS"Þá"è;óØBå{bäf3†>œŒQ>W¡R‚·{¨T¨ô©ëSäI ±È“|•r¢áHâ@F¼\AøÉ·ÜÀIIk‹“ï®ÅÉœÅOƒŠºO´‚Äܨ¿ìZ-é.¸@¤z¹|æÂVôeÂ$긓rM—kº\Ó§vFÌÞ̪]@õf¯]ïn­Ÿ7ŒŠ·Œ~Z§bϘ·í ÔdÄy¤,Y`?#0²£ aä²h‰c=Ò!Î/âê"¤ÆÌ7Ò_KDÇ;ð"‚Å0}›•âŠ-±é  Jö\ÓôRßO0¶‚ªÌâ¤s+™Ìø•²)ã"}2)Ï+ |&›ÅZÕÛŒg­yÅqǾUùb…öMõ®N­ói˜BäO¤³ ×1j 'ê¤ÜÂK]שµ\Óªf«Ñ¢ÒŸtˆ‡Ýö°YëÖñ˜Ïh³Íl£Ç|ÓaõN¯ÕÚÝqþ!&Ø"u£2¼û» H´k5²)\91&äÖâ&U“×KA-,îÑpÛ÷Äþ.cyçÈbêÞËÒó=–ðÈÌÑ’J1€Õ;pŠªÕ!ãI¯Šfè­ÚèäŽú“*‘: k®žœîõ~T­x4=ÀË3üìo+ò–Yõ/fªÆle:cã±³h¦,¹Ñ¼uÇÍñ­}£¤‘°ñ´ÁJƒAêm=®™õjŸÉçðAMT*!©ú¬=’`Ü„á<0¬ÑHø¤†~®P^8 QYøâ)Ÿø{ÅöCåÒoÙýváüJ…Ûžwë°$1À;Svñœý²šä ’·î*äÍ™Ü8ãÐH)Vœ±v¡Ú0?¶a1ç³fBûÕêÔ³­é„BÍjAýñӇ譱ð§ÁÅÕ^½6èê1Ü$þ}õ<àt¥Ÿó§›Kò¡µL`â{‹ùóß®GhâÞ1n%,u%ý¿VFOišô¿¢w^u5ÄUÏw&Žk¬í÷…Ó¨Rqý9“þj*¦D•ÀÔ³Xë¢*Q)Y“ºáAòæ~œ0E$LjE¦Óíxºbak¤B‚LJW&myRöÁ$/d''^±»êÚ¯©7Ñ‹kÅʽå{¿*¹Ox€NÖB¬S<ãmÖ ²€Lo…KûüåÌ݆[Ôìm¦âžœ ¦}p^|WοaÍþš¸¿¯òˆ!dðCMjF—›îú¸Ì+ÏΕàήØÓEÀ?T‰ÎAýV»Ó¨Zî½maUX/²>ÕÄEÿpÜI%°fó)SzX2†¼I·jEˆÇƒyDZ¢RP»¦¶þÐV)ÀlÕjŠ{så(¡¸ý“/ƒ…ë|U)DP±U¼pÙ©SRj7ºÍ5DÁ”øµK01â Á/5Æ÷N/iå1í˜\}n sîW©õ¥c_:ö×,âÚVéÖ¿N6œü·"ÄVî­Ùô´vѸ*ÍÉšŸÔ¬ŠÍüòFµNÉ-ê1É;©V¢\Ú–ÅYÉñtr£ž¦[Iú|¦Ͱò,•JX–áˆË.ìèKuؓÇw!8ç¶Ü8‰[ÁÝ „»PµN£ÛÛ•=1"ÛriXÌáT^åá”´ˆÃ©¼\ÌáTZJ¦QÉ…,s8Í/y8Í%pyþ2¦eý0UØül<ñU-u­TIè/˜:PcÇÉQiÛI¼Ïèë¥ô×ñuÜ[ÇÏòIñ‚ž}y¢x1O =¬5¬~A.[çAFÄ“ª˜\‚¼âÒT*2«–C£F,ð±rý~¯fCA=ÓQä*Á(És™u}“v¸Y¯©Û¯ô úïÜÛB¿•)p—i5œN7ÿÊQ騞ΈƒÌOgK¤¹)Zɪ5xÓ•ÿ†µxdzÇrã“Øø6\;ñô˜v<ÚcM»ò£¬lÆg Á^Kç‡Ó®Q}+Çãvò«szÉ,…H}äyÔ\qË )žôYrêhø”>9‹ŸÈ ÜS[çïå@tÑø‘¥ÓãÛh.›¼¤90?/‡„'Yrh1ir;¿ØîµM3#3ní6- ö®%'îmÔVì\Ïu°ˆWkX8p}©®Ël Rr¨¶¬pº¹¥AZÒŒ­.x³C~¶œNaŽ4Ø›§.^ö!Œ=öâZ€”íÄ7ÏlbÂæÞ8£s;ñHˆ-¢3›ósŠçŠ¡ˆHÏuø‹ ªUIˆž…(C–¼L&ÅüôAj”‰yÆø^7HÞâÒ¯u•ÖÄôí¤¨s+¾x>±†‰õàá@…–©9¸aÖˆQ«mJ 0@Da0[/6 ƒÂÆEaPÐà( Ú)ÄDaäüµÃDaÐî±³ ŒÂ”®vs‘7 #%P6 ƒ:IšP=°(ŒŒvÒ¯+¹(Œ´$pF^* #-¦Œ«”q•2®ò¼²ÇüAàŒØ ½b&ô”ß& ãú©Üj°€eQÇÃq×J³¶VèÏ¢wÖÔ‘·5Enî‰L_¬¨†7eL@VIŒôÍDÏ8n‹4tfŒDdJÔk¹qó™’œ&Dœön¼è(çä\ŠâTÍ 9Éö¤â­ààL`VvP^­éüfï Ù™¸ld £ÍùyÝÏô³ÑÇüÿkvÚõZ»×«7)ãêóh¤ìx•›±­’ø–|ÐÞ:eØ: ŽÊ)õq ÷m©&wf¹Ö„Þ¦÷Ft§hê(¢º°ê#¯xð)I‡ÇãwER8®’ƒÜ¸¯,Š»&CusþëžõÛþÕ¾pFWzÿJ7íqmØe]cܵfgÜ1†æ¨gôzÍFgØÖMf_éK…³Fw¨Së4ÌòÊ,:µ9 Ê_­è‘~z}™¨¯²6ÒÑdÊVÛ¥rIÉEG’/< ¶qï¡È¦©±kj÷¸õû•R"B¿Wã衈bû–<¤R0ÉË£¾9 b&õ÷Üÿ~uù³‘ôxä—æÜÁ_ÿè¿Q>ŸßTîäËêýq”·òå1½<¦S¢1ñå#9Øo´èTÎ §bÐÓ@·m±é-Mºâ§×´°•¿gËzÇF$åyLäDâÜë U•¨b%.„xW§VóÕ©ù“’W@Å?“\8QŸ £—º”ÓïkZÕl5ÚT—¶«!ntLÖêðµM£9®5 «6ª=›Ç½f™Ívþ!&جhŸ¡êv\ûéçËhj•ºÙ©4+fƒj}>  5[›m=Æ^nOßÚ]Lëlé»D4½z§ñ´¥{[µý+[[5kÍœ©“½}€1#ÜŸ/ïö_øƒá®i{zÿ8ð¹ÃxSö?Îs¯ Íé½Kznúv{÷zV>—2v výQ8ó}.üAŸ²;Æ=.~|{ú¹.¼[} ÄüwSo…H£¿ðYð¯ºá€/ëQTiWŸþÛÏó©u?ô¼[-ö¨ð5ÿmìš>Ý׿ie=®¶-¬¦OînÅov‡—εr ºØŒéq;ã­TO['xEQù§þô†ü×.+³Û«÷šZ£ÝìÔüwoÂVhUâ*j:R¡õåÖyi×Ú]®µ.H…v~ºs@ÔÔÄüÄÂ'ƒõ:­S©½qïßsEªëÌ-)~7OîÓ ÏJH¦ÄúgËï÷óêf O²&bÚî±eg<‚À=zЉJV„%0)„D‚˜{G†½~Ecg^Xb㱎µ£"F7RŒèÌѬÿèø}íJ©°a»YM>|¥_¹Ür¾W|BX\ñ³Ø~¾”­A_å’®Ü!WÎÛÈèü°à?Œª“]¹¡(«… úW®¦šPÖ¾º?Ñ´xºyÏÄv'9"¨^׳eüÑÄnÇXâOŠ™ËàÊÙ™NL×¼)‰J[Ž0Ë·ožÊ+ê·ï„è(þЯV7>o° Lô{õZMÔYìÓ»gq×Eí‘ið:"Þ}Í¢ê#gÕ¼¥ VE†cÅæ¿Yÿè?ÙýŽOÞòß$Ÿ´¶ÚÖÜÇ(;õ×Älmøê“iQÍ­ŸØIïüµDÕW²ñãßG[öÇëä ?Xc¿ZDó'…O˜N¦¿KÂŕ޾¬Œ7åÊ=7Z½û.t"åßR@øœ¾ 2LèÙ&AÊ’!§Î®vÛ©çÍwVtÍ/öûœ³ gÇ¡E-"q¶^sbò)˜üdJ|“Ì&;MµÍZ­Õì™°ƒßÎOâµÓ4uoa.`1®ÌCüNé¹ñ½ÖnµS>—¤jשw[æñâwÎã8Ä7øÙ½Ukó!0k­F«^ëBñÛzÛÁ½^«ÔÚ-³×­×›’sQ¯mÌELF˜Žýk˾E—îùs<¨Þ™Éמ mõé¤xÓ•;³ÖÑ=êvºf§£¢G?»&Uµ¢7MšŸ>jÚјF½nÖ{Ív3{Uîû´r K×ÔÄÄÊÙT¤¬h/nÌç%32Z),-å”Aq²P(óA*±à‹€ðŠE2  ŠEx`hÅ" äÌŠEXLPÅ"4(´b‘$0¹2€*É€b*añ!\¹HÌ-áÊd‡&Za•Çíž=;®"º7®²Ã9§—Ž©H[ÎÅþW²#Á¯‰Ï‚àGf¦ŽË> žöˆ(¥]«: „¼<ô#VÙ?œ ôüûwÎ,ât3©»ý`J9É,á§šw\9 _gå••ùRkµÜuò,I—ÔŠŠ:“{5ŠäóqåG¨$]H™*ͬ¯þ$²¢­Ú¨Ò".æ7׺³œ©x” £Wúæ¯Þä}ؘ£ÖÌ5²Q&N•G&Wˉ_´ }¼Cqy#½¬7ÔÓˆ¾1›Œ‡7ÍVÇ*mì>½áWe”¸sç%‹»sþž ‚¹V·Äâe‡>>+•I2 kö×|°ð§ WÆc¿¸¬~u„ÒøŒ¹Üµ¬<;T‚;»’äHU¢ð[¿Õî4ªbñ/ŒªÉ²r½ÐßÑ|Èޜ#J<Ì™ïx£"¶kÅtËÚE2[µZA=²½iða§¨‚ú3ù2X¸Î×"„={wU”…è`»ÑmêJå\«í†î̬ø”ó×B\AzÕè2¾„änÖÐwFÖ^s z ¬"g±PÕÒî¼ébÆ~k»˜­øA¹„är…÷èÒ Å½B´þ X ›ïm¢—˜ÆÈñuå‚—jŸ2ôåyé¹fšñ¤âaÛ íô…rõù³æ“¶ðñCõcÞR+óË.ßÿ8øõÕ/oŠ2pwÖtÁÞúÞLif;£Ù6.T**%A•ü¨çéV–Æ×*‘Öe!’–§¾SÖMöº`'5‘Zz©¥—ªÐÛ;è´­{M…Ï߯@QÒ}fÞ»Ñû*ñ®®ÔIÍYÏ=+Zs’$4#^Q m÷µª÷ñåÂ+; Öþš‰"ž‚Ä[ôI_G±Ù<¼ÿÑÙKÊNî@°Ÿn˜Å^%“qêÓ%îÜÕZÎe±n|êF”šÜ6OO\µ«ÝV9ìkÝÜÌaW¬ë¥Öí^Ÿ1³\RÆíåªÝ†§b<éõ©jŸòØßÓbÑ.w¡íá0vûi“öÊÝþ•-r÷[ 0 w$¦,dk:å³ ½yZ#3Ž=öü(q‚²½“¸"d¤ÇBß8£sã´,%´„´ˆÎlÎüÀsÅPL|o1Ôá ºZô$‰q1d‰ÓIŠ=¤Ùă½áWe”¸sɹæZÝ?)Ï-• I#í`ÉTƒH†¤N†¤˜• IÕ­ÌdHA€dHª’!iDÔP2$0d2$…è`f2dn9×j»A˜ ™¿-˜dÈüÒ0ÏÌIÔåA¹™gæt‚‘¯ÜH/Õ.>eèËóÒs%I†$Ú¦ä“!ÞÂ’!Oɼ¡“!éDƒ“!ÉD¢’!I¥J$CÒË—L†$mȲIËSß)릂“!éœTP2d饖^ªZoï ÓO†T0ˆdH:éˆdÈRsH’!é5ž ùmQ• _«z‘7R^40ò˜õðdHBª€ øtÃ,àdHšu2dîV-‹uã!Éߨ&J$Cæ ËJ£Ñý¼Éùu½Ô:’dÈSV;édÈcÖ>,ò¶´Àßïôêíge`‘þñVŒ\¯o±ˆth4H ùãÌΈ ÒòlÙé-pxß& ㇡úÜgAv® wíÍ)akE¶8 ŒzgMy[SÔAì8b±¢d@ò* RäÒœ)âæ‚)c«oQ‚†ÎŒñˆÄz-7n>S’Ó„d”CGtcíTó´’©~ÐÎXxãEÉm|{ .'7YüÐF3f2y} `t>ü꼞¢)áÝÿÄ—ßfÑ{øíei&éòñˆ›D 3˵&lô6M”ÿLæg<ú?ÊôíùErð)IY‰åwER8nŸxbô>¹®o‘Õéþ–锉ŠÄUñÆ\nûWúÂ]éý+½Õm´:Ý3êíÑÈhŽ:C£×lYF¯=ìv‡V«Vµ¯ôeÔ&%M¢GU0v\@ 9tlwf®*eàB×Rt•JÙš­«PâzÞ®B)ФÎ\Ë•*½3°B Pj¸³I‹‘¶™TªÈœ)2@k=IrLU™ce«œhšs|”;k²€É§y%¶,“nÅù® ÛìZTž$°*´ jžbìŽRhMÎF.kë*r‡…ÆEþpœf“x¼Ê…º°ÓÅç*(1}§Z´ÈËÅtzéq÷¾Xá𝕇[΂'6dþÌq£(Í/üè"†<~ËvØ6`ÊŸ¼H.Ærn=íåêábd4ÅSå"MæÁ:Ÿº˜kÏ6 ~e_ \Œ$õRŠp(7ý‰ä)UéPPJŒ³÷^¼‘])ÑZZß·df ö£Ö·spR#µðƒwÒóõ¼Êe¡ xq»Yy¾*ÏWåùª<_œ¯Ö3ÃʃÖK>hXÑÖ“WKEû<ݵÜÝoHÍËxU¯:‰x•Ú›Hå¯.FnP˜·»é¡å‡EŠ ì6ZL™ÿk1&m# ¸´¥(íÀ7i6(J^²“Þ” 3p·Ø%Yü6òÀŠ-ë%8Ù‰êR êÐÕ Ó£Ò"k`’“-”[?gî«ËŸŸâÜoÏ׉eyóÇœr=I§#!8'væÆ“‰¢™Òkš¾“÷ÅIÝSÔ—HÊóÔïÏd³H»]=! XòJD ^‰™ÁïêÔ:Ÿ’X|""e¸ŽQ \8QŸ ùñ/u]§¯žÖ´ªÙjvzTk+âN³[³Ç–ÑlØM£Ùb¦Ñ³»|ÄY¯7î5ëq³“ˆ ö-PGx÷åkqc¦Xƒ)S{Ž­¹ZX˜ZÛ2–[/ƒ¨ÌêÌÑ’J1€t 8Å@‘+ȸ·ˆšÙRðÈZÙ8 k®DmìÂ5 JQ€4=@BüìocåÊdæÃ‹™ª1›E™ÎØx[R[²‘°ñ´JƒAêm=®p©l©9T‚ÌÌ—ÄÎ'å`¥°ó v^àÒ×ùe•¼ÎÛÌR×ùJ\çí ´u>ŠÛ*eO²„u~ ”êTfÉjiük5Í&,Q³ I€ú«)×Us…ª…-/‘ì'/dgjŸÎ7Ç»êÚ¯©7Ñ‹kÅj*ß:SeCŒ)à˜ki«½ÆlÌ/Y'êï7¯éï7ÏK/~µUB*?æÜúåË‚Ý. (~Ìæ]ö;¿H03Í{h™oº—/¸òÞ„/näÊz—/p^ŠË.×}‚;¨.xé±—{é±ç²‡çÇ3-—9¦åY õ]‚›µZ¯a>íÒ ÷q‚ ©{Ç=¿TDõ\–êúñ v„d ©Ói¼€z¾)R‚|MŽJÛÎkÚIÔ×y ô×ñ Ó[ÇÏò¼ð‚ž2€ü-¼˜§,«‹ãÕ/È%bIdD<¡À¼È+nk Zj‘YD±ÀGZÏä+^…×áÙ/ò2>šœAµî‡žw«Eãv®ùoã}{_fêz®ò¶Te}rw+~³;e÷\û§ãŽ.63lõ¸qbž¶nà¹ÑÀAÔ@þ©?½!ÿu£ÑmtÚõN§Ýlv»µf½Qã¿{#~7ÍñΩçÓh}¹ušš\d«Ýh¶i¦iî{_ïWÓòÚ²o˜6s‚€¯‚;3©×é!ö\{6àÕ§SÅœÝÇÀ=j™Íz³QSÒ£ŸÝ?™rEÔ¢ìs>èIúùŽÆ´Z­F«Uë4@ vç§‹°¾DMMÌïO,ÜÔ?m5íÚ÷Îñ=wÆguÝÎnÿƒmÝõ™wÛ×>Gy 7^À¥\ü]ûÎ ÙìâáL,ø³¾vöLŸ¢GR•?ëgËïÅçw;ßYÂãÇ*ñ´ Æ–Q÷˜ƒ”€Az±>x\BÖÞ^Ñ€a_î ±3ó÷°ˆIi¯çxÃçÓß„~tü¾v¥W_:Ãv³ºª$vår»ñNXñ afÄÏâÝêï{>7„Ÿü»r£¤•ÈÄÿ°à?|kMþÓÐçj3÷ü0è_¹šfhBYûZü?Ñ´xºyÏ„w!ùÙƒÿ³eüÑd—Œ±ÄŸsƒÅЙ:á½!Ì®cW®5òÑEÄ{S:·€üC7lÙ±IÅÀÎ(ªÞh›£f«=6ìáÐ2šívÛèöê£k5;½Fͬ7zݤJøRI“è)"ñLÈn–’…+Ü,%B®póa‚E×´·ZˆÒÉWoFIÁTo>*÷D®z³”`dõf5á`jIiaȦðR÷öêͼ¿T±Ð´@öˆµÍVÇèÔÚC£Ù¨YF¯×lÚ¸Öh¶m»Óè臎ŒI[â›dÙ dI0íf³Ö®·j-š$áºkY/IÎ…ÿXc_A,lnpƒñb:½Ïà ÈÍŸ±ÜÕëz»c¶a4&;?½5±DiQŒ*Í2ù—›s®íìnõY¿×öÎ-E/qŸ¾|÷꿵o^¿º„¥®I¥¶í”ž,ÍU~‹X¢Ï·€þ#KÃ…ijZô©Í$™ þãøç ×gÿ)Ÿâ‹šøöØr¦üÑߣ/µgÁ­3ŸóŸÕÓ/qk/Ò9—ûIñ?”«ã^uh6-³ •²ýÓ›Jÿ#.&Ú‡(Ê}HE§èY4™ÍY’Ÿd¬2ö> 7W«WÌN¥µ›*- 6 kc®>Ú…1Ù§9—É+öx’[À‚ëšçiqà5Êòä"?ŸEá÷àÆòW"«üüä7Î8<ËÓ·M›ß‡7ž›ŠŸ&9T¢»Qà_¬Kþ¯ÆŸkTÌz5à›‚Á ø-?2i³š’”‚â×Û±žêZIÿòøÙ ¿eŒÖ~£aì+³¡0$;fè¸Õ§Ú—C^2I2%†»ÒÒ¾‰¼ç|éÚšÖÐ8vó\‹0øÿû^ûüÓëךiVZ•Zô»ZÇìiß}à:õ+L~aÔ¿¿Ö¾{ÒòdJ¿ÏÑâ?÷Ok³Á•f<®i"ÔÇ‘>í«ô[ —v-XÍ Ö—¶´”€…±Ób®9®0‚ž¯Å—gòZ÷γFtm1¤M.œ)¨}/Úýäˆ]v0u‚s{ê¸\Oæ–˜¸bKõ9Óx~5ö«{¶Šêú&QͦèzÜ5VãVÍ ÄÌ8¼aÎHs½PLüßP0g|?³õÝ÷Z|å+?m¶ïÌ¿ÙÞ[‹ÐûVûþ‘…©£­ª×ºËì±f¯ÝìÖ†»ÖèXÝfݲÆ]-Wðm¿ïâÑ;]kªYþd!XVýná®ÌÜ÷Wn5k(¤õ;“CÑ×û¾&Hå'G7vúZÜH㈄ÌÄ.-‡ZŒä]¢ÐšðûíÙ¹üy~å; ¬º²yúÀ¾†¾%XŠ¢ù¿Å÷‚¹wÕ¨¢ yz2öü[Þ‰–4€©‰½E ^ÔbÆ[Ø þ,E×mÝÊ2(x±ÅyÉŸÅHk(>Þ0ÍŽkîîVkr² 6(æ©çÝ.æøË[©¿¤ÁTÞ¸Ë׊Éàsn¼sÏëo›Ï&NÆïºÄdT}PaÅ)Ô \ðÕè†IŠpÏwþ­Û ¸ÚóeG9˜È=pÆb\õœqÓñ…ﯼçÄÀó¸:,šÏO!•*p?€ˆ©k$JxðÖ*5 w³˜+ÄPkGRi—^;lÏóGI•XjöFÔÓ8rÛ㇦{bÜñÔû"8¹KQáÓ§9+Ø5*««reY…áꌔ ;3±ÖU {bX” ' %Ø-¼ÐR‚Ì=µPÍH§Åº•€§™öJÀ£xâ?3:Ó-^ ¨YŸE›B’rÊ'S㫤劆›«`ÀOáJ𼑸«hØù!‰ùsŸëŒR1i(K™m£‰k{_Qºzyã}j…ˆÑ±]‡ÿ{L ëÝqÉ';^Þ:C¾ªéÍ̲oWÍòJ°ÓŸêœÇ™ç:\O„ÒÓïkàŠÛîßxÀø°¯v©ô=‹»eL±X9–Ž0Ó®5U³©Oø.PÀ¶®FBrjå>rl'T~> É]#E÷+•‰Þ,†”BæÖ¬’\#T¾´»äÔ›¡Ü(³Öó@´3Q²â&{ËV\GQÍIˆùܹªŒØmˆxë’UH ¼ð§ýèoýêêñ̺«WyöƦÜÙ«ý$Ò¥~«Ýi¤ÙGÕäíë…Îøžª±Ìú0ŠK³QŠ“Ý`¸™?°½…ö5³U«ÕTàο‡o7ºM*ø˜Ÿ0Õ ÊĹÁ€;oŽx Ø®Ñázót¤É@®óuŒEb}ÃÙœ =~£Éמž âZúÛKü7qŸÞº ‹§â”Z”…,%7á]½Ìù\ÌlѾŠz±Ó;1î‚9äæ)šhÁ¦•¿Âsõª‘ó¿¶Lè™úâþ-óií2ߺÁ8¢'±b° ^uóe¯xÜ‘« qèÊu@±Þ þ]~4ó¾Þƒ6­ƒ¾DÏcâbNñX×þUÐ¥êùÎÄqèÓFôérä^¯o˜}«½ˆÎ*ãÔaïÖd+‚úzÏ‚(x4j’C[lvöÞ†üŸÕRü»öæã§W?¼ûùã?´wï_¿z§½~ÿë¯o^úùý¯ÚÛ÷´ß>¾ùÀhüØîðÿ©ÑÈü×›×ZDoÜh†­1ûÆÓþߣíoÓ¢òZíLÄï´ÅLè@§#dÍnùÌhÆ\ÓÿG‹Ú²=/¡´ÿÑWŸÏüð*›‡ÿÝ0;íVÝl·{J·Ñh5šFˬ&ïZ·Õk·šÝv£‹æD¨Øï^¨iÎ÷$sq4ª³Î$LJi Á*Ë:éU5mhF>2X/û˜Øè Gýz?ëþÕ¬¿m ï{ó°Í´Oï u£‘•YÓéê¾>[¬ö Vmñ¿¿R¶_ùX¨7bÛ¹¢^@Çü™fŒ5Ã'ï¿kÕ»«Š2GZýï3 º¡Ä³;ƒ±·ˆ8‚Eü’Ö/̉æ¸w ÉÏ›ËåOhœøÕÞ²ìBlhÐ;`9 °mEÃh[kG•jÔ“¯ˆ>°¯ÄjwlT>þ|M«´ƒ†TÚF!JTÚ–ÆUÚF¡¯øÒ£“ Õ²CTÛFá>¯¶M¦s«*oâxj$çS#`QŸt¹œhñl*¸x6W<‡­`PųQÐ࢜(TdùL6¬|&Q> (Ÿ‰…•Ï„â.ñLë-$mè|ÏßUx‰™…ª0hÉSòhß$ûoí/ÿŸùåÓ¿~ý×ðÚ³_ÿeº¹e³¯—~ò.tÁË£v @ÁÐã +X3ÿ ê²H šn­±nò Ì))£h ,:´p`|P=¹ÜÆýgFèa©Dœ¢^@«Œ¦’`^q…Ìþ󊂵z#6º†ÕŒf{X3†¬ev·×êt»5»ÝngVÌÛ¬SÓ¿06g FDA›ekJËÃÕ1”³³–¡ØÖIÅÑMó5¡»Q`d¢¸èº áQºRr… ¥…#‹JËÙUàP[r%`J¬IR²O} «~kÁÃz«F¹þÒ!7›¬Û±FF½]oÍ®iÖ¨Ö0ZV¯Þ²™Ù­·Ìã:¢&û2ßæ¬¿L?p¬åÔ^È¿‡f~vÈ÷`]É¡M Ü9î„ì¹\ù27µñ„ú Zþ‰9ÿÐhCÆ\-¾»ÒF &žØˆOˆW”ü/V¨¹Þ`êMâ«ç3í ÿN0g¶ÃO´£è}^xã¢7ºÒë¼Mùd±}XÝ|Ãþ1öE^ÙQBÏ.-XE[fQï¶$X·‰¿ÌûPq§°^‡ïÐOûšõ¬që—”>k”–xÀg­J­[ïtjm£Õîf·Õ6[f«Ûh5ù26îýö…ª&}O2#,’}â |Ò˜õª·9^3ÖØô‹çº¯÷ªÃæS7ªŽÒ=eÜßxµC¡ÞnѼd<¶^É>c̘k里»;"ñqþåa&âåaúå!ýòŠyyˆž3w4à炚IZGžÞcbÞIÂA‘ï$±ÀäÊ€y'‰uÜxî€ù¾çO݈×![»²—Æ(ìÇ'ë':J à›C êîpýE\ƒ0{±ø0ó5íÍœ’~¾ôD°ljâÑlƒò)‰hÁéˆÒ—A²©ˆhdiˆ§»|·§¶Tk+b»14›ÃNËè²qq¯c Çí†Ñ2›½f}<®1»žˆ ¶'=(CÀ4!à|7Òß–ø&îÓð¶ÞP€ÛËœ9lmSú‰þ[‘3õ¼ÛÅ\[D¹2™²“»×@T:¿¯Ü[³iåϺfQ†Í©¦ ©"þ?Š ¹ËcÏkWøÿtÛf×hó=Ĭ›f«Óéò¤ÞífgÈíýö…ª&­eÈíš‹v§ÓkÖž%Uî¿íß)l‘ì<ùt{›I”O×vÛþ_÷5S"Ÿn¯ò òéÀ¥Ë§ÛßxµCµròÃB“Owl½’ͧ˘kÙ|º=ÁçÓíCçÓe£Áóé2±°ùt @l>‘O‡ÆåÓ¡ùtpLD>—O‡&WD>ŸOLJ?÷cn ÓùCË®ˆ*wžïü;²­·—dKf#F÷:®Ÿ-ŽŸ”ðÐË%êž2™Œ%á øÂãÌ ¡G€v¹¦¼õRUUtMBÙy5QSòhéQE#Ogª»æ8‘¨ö:ã(âåVÈ]V§Þ«?mbÙnY¶`¤en”|í—/ñMܧqŒ?8îÈq'{ˆ1°½ÌˈіÚ"ì÷ŠWÄB—IÅB!/ñ€,J³×èÔëFǬ¦ÉÏF£×2ë½Z6 ž/_(jÐ÷$Óq`?R °Òæ •0‡Ã?{AÐ •ا (N ùÅ)Í)±·íJB½Å¢a”8²NÉJìŸhY>‰½î–O"Ó3CñId£Áù$@^'†Oì cø$p > 0ŽO… ä“€c"ø$ 8> 40¹2 ø$p x> 8><Ûu‚%»§ÅIÝòT#9¼QJÞ¡¢P³rÜtáWÖÜZ>ö¶7›{®xf‡»Ó ’/qB%z.$zã­oWŒ±ÏöÅóouÒ¦,i{&‘!ƒ’AØ\]„4>ÄuYhÇ ï52Ä*…ö¢ç6‰gaу ÀÊTh““T˜mËQ*oÁŽ-ÂM案ÐÀ"86œˆŽ‰&"€C£‰Ð@"8¢>”ˆŽ%"@ ¢ˆà¸Dpp—~m ˆ°"8*†ˆŠ&"@`+,Z‚ˆ <š—Góòh^ÍË£yy4/æåÑüÍ¥2Øð° v *ƒŽ‹É`‡£b2Øá¨˜ v<*<ƒŽ Î`ÇAâ2ؑذ v$(8ƒ„›Ï(å4!È ölKOø– Èï.øâáL¬”³¾v¶SøÙòûÌŒÉL]Õӄ챕ø &tm«W_«öÓm‡Ì½#Û¶Ê Ná°!á¼›hGEŒn¤Ñm”@ÿ£ã÷µ+=Nrv†ífªÇWú•;õ&ïØ›ŠO8îØ?»±ÜÑ”½ÉxA_ûä/Ø•;äÊyûæŽ/ÿü‡oÅEâ•ú\mæžý+WÓ M(k_ øgY(~¢iñtóþž‰•.9J=^׳eüÑX®c‰?)rƒ+WÜ'Ìa›ÙÔ BǘåÛ7OåŠõÛ‡wBôM΃~µºñyƒ†øE¿W¯ÕÎε³EÀ>½ûxwÿ;œ¯£”Ê×Ì£ÄàÕ袶*î˜+6ÿÍúGÿÉîw|ò–ÿ&ù¤µжÖà>Fï~MŒÀÖ†¯>iû,ÜùIôÎ_û€øQúâUüøwçѪÿñ:ùÂVàØ¯Ñü‰AáFx2:§÷ŒVåÊ…cñ«7u·¶ÝÅEÊo<³þÔ§à"2ºsp~”|9uvµÛrGd>¸³¢XŠØïsÎ*Š÷ -j‰+°:OìrbôoÊÓþ‘ͧÞý¦³½‡}뀞v¯õ¢<í"¸’öŠWÄ—IÅ&/ñ€ü`ÝJ«Ñk4›-£Ónõf¯ÙéŠØ~¯eš­L‚°½ß¾PÕ¤ïIfäÀj<a`½ÍAÜ7º››‘EØ^u@q„É/PiްýW;êí KرõJ–&,c®eyÂöt¤1K¼Ûƒ…ÊŠœ÷ÆSç=‡|`pβL,,gËY†½#‰Þ5òq–¡œepLgÇY†&WgÏYLJ'Fƒ1iÃÚ ò°v£ k—aík7èÂÚ •a톒°vãðaíF‘aíYX»1S 3X¾'40‚ï Ž ç{‚c¢ùžàÐh¾'4ï Ž(Ç÷$‡å{‚£Cùžˆ(¾'8®ßÇ÷Ç¥_(¾',‚ï ŽŠá{B ¢ùžØ Ë÷‡–à{*5屦<֔ǚCk.skvÉéÕ:½nçië%AÛ‚P–$HKšÞË1ë àaÌ:@³ìGÅ0ëÀQ1Ì:xT8³̬ƒƒÄ1ë ±aÌ:HP0³7Ÿ)ÉiB@ß@ÝÈùðMyG)„’Ÿ9ñ`B°¡Ã¹õ0LˆC€°Ø¥_ºñGãÆÃì*Á"Bºí0P¨»_0j&³÷•£…båA9Ö´ç–57÷ެ”lî4rBÈ@ª2„'òàãþ¶­éaI.MA¬GyÈûâ2ÿ3Ÿ¹q‰y5¢”Ovl’ûW‚áJï_é­n£ÕéÖ˜QoFFsÔ½fË2zía·;´ZµÚ¨}¥/Õh]@"ñLä DÁËB¢àEì$D{å‘‹®‰önò§ 8Ñ{¢sDRžÛ»qOvñ€VïêÔŠ¿âÕXã!U‹Ý«hÔ¶ȦðR÷VB¢f›JÒ!6Çݑٵz†Ù÷ŒfÇÃVkhÔ›Ö¨nÕk½F—野ŒI[ö›ùGQ¦¤ ÁdîmΖT(Ž2k&TrÿÈ_!ðŽô¨­Ÿ¥l'qÊÖ^ñJ²å%0k°Wi¶Ìz·×ŒR]L³Ó«×Úf·ÑnÖ;Í̬Á½ß¾PÕ¤ïIfäÀ*x<]îg#Òq w ¢áètA20]q[OD€âÒÑÀäÊ€HÄâÓáøðwµ¨3ü“8 Wº JzW´Š·ü¸²¼”èÊn ÁµÓìɾæ#œ‹`Îlúy˜ûÞ„Ÿµƒ™5š:.û(r:¢«×v­F<ê>‹žAp“úΫìNzþý;g=)3©»¦¥¨¹0‰ ì;P©ù{;n„´Äò²6…ì Ù~c÷]A(rê&÷jÉçãÊÏýÉÝ–2UšY_?.üIdE[µÿP¥E\Ìo®ug9S‹Ra'qŸ–eû°1!G­™koU˜8åï¦jígÑ6ôñ•¤ˆ‹?VB’¸m5f“ñð¦ÙêØC¥= ÑWôtBePâÎmœ—,îÎù´·Ôjo­Ÿ/³8ôñY©”H’aX³¿æƒ…?U¸2ûÅeõ««²¡?0û|Æ\îZVžF*Á]±ãÂ!•(üÖoµ;4b?ª&ËÊõBg|_Dó #zU]Œ(‘Ò3˜3ßñFEl×ðR¢T·_èôZÝÞÓf!ÁÚÍÚ*Ûy³U«ÕŠd€r+4¢ êÏäË`á:_‹ö,¡˜úàl7ºM]©œkµÝЙŸ‰þZˆ÷V^5zr¿¸âNÙÐwFÖg¨^«8[,Tµ´;oº˜±_ÄÚÎÞ¸)ÔåA¹„ä*†÷èÒ Å-D´þ X ›¹oQf’1r|]¹à¥Úŧ }©nNÔªÌÏ¥1ã —JÛ í4c¯ú<—â¤-|\-éŘ·ÔÊüò߃Ë÷?~}õË›¢ Ü5]°·¾—+L%ò1ñ PO’TªLâ¹ü¨çéV–Fã*‘Öe!’–§¾SÖMöº`'5‘Zz©¥—ªÐÛ;è´­{M…Ï߯AQÒ}fÞ»Ñk,ñ ¯ÔIÍYçb(ZsR#^Q m·"G!}•ÔÆþ_ÏZ^t¼E«ÝêÙlÞÿèø€¼u:ª€ øtÃ,öŠl¥ˆSŸT>´ÒV-‹uãS7¢ÔDà¶©xz‚ˆg©ÝVœNkÝÜätR¬ë¥Öí^Ÿ,ñ'#Qˆ«vžŠñ¤×§ª}j÷YÀÈp¬¿9%l­Èö&!P﬩3"okŠ:ˆ]Rì/V” ‘™¼Š„ùc£4gЏ¹ƒ¨~'ÇmQ‚†ÎŒñˆÄz-7n>S’ӄĵ@)6ÕµsÊÓJ¦úA{8cá%·ñ [,¸œxÜdñc͘áóú€ÀÈ|> *ðÕ=b=ES»ÿ‰/)¾+Ì¢÷ð[Pÿ Ú"’tùxÄM"ЙåZ6z›&ÊÓ›h]eúöüj8ø”¤¬Äƒò»")Wʼn@÷Éu}‹ ï WÆlÌ|æÚ,Pz¸Ñ+Š#ˆúmÿáJ_8£+½¥·ºV§[cF½=ÍQghôš-Ë赇ÝîÐjÕj£ö•¾ŒÚ¤¤Iô¨ ÆŽë #—ŽíÎÌU¥ \èZŠ®R)[³uJ\ÏÛU(–Ô™s¹òñC¤wDp*E\apÃý˜MZŒ´Í¤Ò‡“ òž$9¦ªÌ±²ÕL4Í=>Ê‚5Y äS¢`gË2éVœïZ°ÝÉ®E5àIëò¼ª(ÚZ“³‘ËŠ–v‰S€]ЦÙl´ëϳcçYègä=Çi6‰¼¯©ÛñdžàÌ0n â\…%¦ïT‹y¹˜N/=îß+fì(%¦v¶à‰ ™?sÜ(¦ó ?èˆ!ß²¶ ˜ò'/’‹±…[Ç`Ãêábd4ÅSå"MæÁ:Ÿ:¤kÏ6 ~e_ \Œ$õR ˜«'þDò”ªt((%ÆÙ{/ÞÈ®”h-­ï[2³ûQëÛ94©‘\øÁÆ;éùz^å²Ð¼¸Ý¬<_•ç«ò|Už¯ÎWë™aåAë%´¬hëÉ«¥¢}žîZîî7¤æe¼ªŒWD¼J©ÅšÀg~ä…y‹±›Z~X¤ÈÀ¾a£Å”ù¿c£§PöÂwÂûמ²¯aQB×SÉ!³¸^;òüä[6»ŒèhÁÏ÷(šð˜>¯~‡*lWzÕÜÈ}Éòâã°”Í?(óí%NÌÿűâ…o¤ª>”7dÒ7di½¥(í@i¾á›òäéèK¶`›raæ^lØáÛÈK4+þ«—làÌb'ª7jÃVÊÐÕ Ó£Ò"k`’“-”[?gî«ËŸŸâÜoÏ׉eyóÇœr=I§#!8'væÆ“‰¢™Òkš¾S÷EJÝSÔ—HÊóÔïÏd³H»]=! XòJDõ]‰¹¾ïêÔ:Ÿ’X|""e¸ŽáîM òQéQ@~üþuÍ¿Ýj>í*îÓRVŠ”a 0«âêk:Øl5;=?™N³[³Ç–ÑlØM£Ùb¦Ñ³»|~X¯7î5ëq³£ç–¸ÌßhHÉGD÷¥+w£¦V±)Q©¬Ð[XˆÊÜ8àÄN#«ÉJÈ )Êzs´¤R 9N1PT øiÃTØ–ƒÇUÖF*]sñ•´‹×0(¡Òôé ð³¿Ã+“Ç/fªÆle:cãQhnÙFÀÆÓÙyjƒAêm=®ham¹9¤_"ñ2”F$ hç”r¨ÂÙ¹'»`vnàÒ×ùe•¼ÎÛÌR×ùJ\çí ´u>ŠÛ*eO²„u~ ”êTfÉjiük5Í&,Q³ I€ú«)×Us…ª…-/“ì'-dgjŸÎ7Ç»êÚ¯©7Ñ‹kÅj*ß:SeCŒ)à˜ki?(CŽv¸‚ù"ëDå¸TcÉQ,ÑÓõâ!•snýòeÁn”?fóƒ.û_$œ#¯¨øÕ °Ì7‰´-w®Yítr%Ëz“4`©TÂòÔv¨Ã¸ìàrÝ'调ꂗ{é±—{.{xþ2¦^Bp~¥ÓóKE”L/5UN#à%Ñ¿c6)"±“¥¯3 è¯ã;£·ŽOîKéO9ÔxPúS^ÕUðêäÑ´"ž(~2µ?@­È,ê€FXà#QÀÉÜe#²_ò<z]§w„'æŠ nfû5ë5…QH‰Ç;j¶ðe1GçÔ/ý†µx»_¦hø™ó'³^6 «'C0Ë·o Dc©h}”û„â¢TY•ÝpÛŒ'#z*šK{„:¶wþÇ‘£Zá"ÈÎsΓÿöRuòاWÿ©}^}¥õµ¸Þ´–œyÍg-øQ•´á½Þ8%óiI6ßµöŸ«?Ø^šÿÕ¨ÍB+¸ÕæVxÓתÞ<¬Znà §¬ê{Sl< «Šՙ帕ûÙ´oÖš’cÛž·Î|î¸~܉„ÞxAx­]ü]ÚæµãÒâöåN˜HG[Ó€çC‹0âÌ礹ɫyáÁ¨ã2ùGž‰1ŸYA,áu*Κjq¥tí‹hoE[tiIÊ•ùòÝ«ÿÖ>¼yýêrM#süÁH_å‹®tIôö¹¥íÇŸ«Ï¼Û ÓÔ´èSÉW͹à?޾pùœðŸòÅpQß[Δ ¶s`ê­ŽYo?/M·¡ô¡žŠàÓk/"HþÇ™¸ü|ÿ'Aå§ÕŸ-ß3+n­nÖzµZÌ÷ù÷ô)?¾Mõ¾î¸cO?×ÅuÓ–Ü{óþ»©7‰ˆD*å×{þƒY0áÿz͇Ÿi3'úZRiý\û§ãŽ.b#9ãÇÃsíÙsóêÓ³$s¶f»/wô¨×nŠ †*zô³û'³Cn÷´({+@’¾~ˆÆ|`ÖHó†¢EÚØ÷fš-Æ›ÿ:½†ÕûúÏÁ‡ä_âHï‹‹Žs=éëUkîðy©®rþùžñlBVG÷àéÜÌ þ!æo9çŸë¿3È…L„³­¿ºüù’˜ó•ÿ„Kò“ï-æüñ¿V/>¢ú¯DÛxô«ŽêMä?ÿ¸>Žžñ¿ái‰smÿó&6ãz¹]-ZÝf«Å—k°¶÷|ÚØýG{»Ú'á| Gïù7ŒýT65ñ•çh§´ýRÖæ†ðÌCùÎ ÙìâáLhüY_;­lQôp®ògýlùý>W&[tü€)žÁÔóæƒ;+âa¢wº@ÜG)ZÈùÀ÷ŽNâåëpr[(îDi)²Ü€éS°Á-»ÏHª”½=0CÏ™;ÜXÁM–‹GŽnª)1mkÅœHÇÖž:|9©&W{u×F ê¸ 6ðÜßõ#&Ê©‹Ž/ÙÚÝñò2q܈VǫͣH‰HóÆ£î½ëØáŸžÓKßÃÊD&lI8©Ã’#!EØ") BÜ" $p‘” r‘CFºÈ À³䔕™ü¿Zî:y–¤ˆKjEÊ&~É¡HP˜œÓ %‚É/N“Caè›#ˆ9ÍÆä1qª< 8³K>ü¢m(–éeÍzÆô2ìÚõa»uö”6ö€FŸÞ𫲠JÜ9#Lþ¹V·Ä ¹¥4R L1„ÒHc,÷žûÒaU,'/Šš¦Ü1ÑÚª·ã¬ˆîd1ÉŠ‚0Ê ÜÃ,CÚ­} 3„‚ö3Íöh?ã ¡¨‚ú“Å@C( ÎDC¬ƒA!:¸™†FεÚnÐ0ÕµH C$ ˜3I§.Ê% s(‰Ã_õÓ ^ª]|ÊЗç¥'›—-…r›’bM9 ¿ŸEåä̆U…X4„]…V$”e…^*ŽmE‘|<ë }C–…HZžúNyX7ÂÎBì¤f‘¦”^jé¥àítÚ@L ªæÆB,Æ Rj cˆ"Í1‡”GT5í¾Võ>#›DNÑÙ$G¨‘>P;PlÀ§fB@$îàÕZÎe±n|÷¬‰8b"¡™Ùò„ºŸƒ|H×K­ËKÎpòj'C pôڧƃ<ö÷µX´Užó®/µ›ÝŽÙÚ’ —»S¬Ã‰) ÙÀšNAù-h`o.R\Hn{ìE¹¡¤íÄÅÕÙ ±h„ͽqF#æ¢(9ñHˆ¼HZDg6g~à¹b(&"½7P‡¿¢÷„èIr%âbȧ“7z4eèËóÒ“%IŽ$Ú¦ä“#ÞÂ’#Oɼ¡“#éDƒ“#ÉD¢’#I¥J$GÒË—LŽ$mȲIËSß)릂“#éœTPrd饖^ªZoï ÓOŽT0ˆäH:éˆäÈRsH’#é5žùmQ• _«zŸ‘79R^409ò˜õðäHBª€ øtÃ,àäHšurdîV-‹uã!ɑߨ&J$Gæ ËR£Ñý¼É‘ùu½Ô:’äÈSV;éäÈâµï2}ZÇ4;fgG–ßÞvlÿfÞTJ)TÚvÒ¡Ñ í·`˜Ègþ pFl–oËLwAÀû61`?Õç> 2sOà¸koP [+²ÿ  epÔ;kêŒÈÛš¢bÇû‹%Gd'³â 7*g7wL^}‹4tfŒD$ŠÔk¹qó™’œ&$®š±ƒº±vªyZéT?hg,¼ñ¢d7¾½‹—/®¶N3fy~0`l~ ú ¾ž¢)áÝÿÄ—ßfÑûøíªi&éóñˆ›D 3˵&lô6MœÿLæg<ú?ÊôíùErð)Ia‰åwER8nŸxbô>¹®o‘•wÿVw”‰ŠÄU²ãùÜö®ô…3ºÒûWz£mŽš­öذ‡CËh¶Ûm£Û«wŒ®Õìô5³Þèu¯ôeÔ&%M¢GU0v\ ytlw¦®*eàB×Rv•JÙš½«Pâz¯B)À$Ï|Ë•&Ý3—°B Pj¸³K‹‘¶™dªÈœ)2@k=IrNU™ce«šxšw|”;k²`ɨ¹%¶,“nÅù¯ ÛüZTž$´*´ êBºêíŽRhMÎFnkë*r‡…ÆEþpœf“x¼Ê…º°ÓÅç*(1}§Z´ÈËÅtzéq÷¾Xá𝕇[΂'6dþÌq£(Í/üè"†<~ËvØ6`ÊŸ¼H.Ærn=íåêábd4ÅSå"MæÁ:Ÿº˜kÏ6 ~e_ \Œ$õRŠp(7ý‰ä)UéPPJŒ³÷^¼‘])ÑZZß·df ö£Ö·shR#¹ðƒwÒóõ¼Êe¡ xq»Yy¾*ÏWåùª<_œ¯Ö3ÃʃÖK>hXÑÖ“WKEû<ݵÜÝoHÍËxU¯:‰x•Ú›Hå¯.FnP˜·»é¡å‡EŠ ì6ZL™ÿk1&AY%°óv#³ôu>€’×y{(uO„âöƒJ[ç‚,i_§¥:•YÂZÿZM³ KVçlÃFR þjÊuAÕ\¡jcËKÁ$ÿI Ù™ê§óÍò®ºökcêMôâZ±šÊ·Ž²=UÐ1×ÒV{CŒ-à˜_ ²nÔÞw^Óßwž—^ýj«„T‚̹õË— ?º]Pü˜Íº x~‘p¦ ’÷?вßt/apå¾ _àÈ•ù._ä¼—\¾ûvPðÒc/=öÒcÏeÏgZöÈÛÔ­·[­ZVö töíðSKE\Ï%l©®jGH¶°:FÀ ªç›"%È×ä¨´í¼¦D}§@ß8½uü,Ï /è)CÈß‹yÊJ°ºH^ý‚\"–”@FÄ Ì«¼â¶–¤¥™E<Ш |¤øL¾âÕXxž #/é£ÉÄóSqpb®LßÌþkÖkê6j]âéš-\¹oú¥ß°o÷Ë ?,“þtÖËfYödfùöÁ‚h,­RcŸP^”*+£²n›ñdDOEsiPdh4íÊB°²£Š÷‹ 3ï9LþÛßÄ}Ú ý³]x¯S«uš&¬©»>½G¬öÊ œá”iŸ¬àVûŽÞóo°©Ÿ^}ü§öyýœÖ×âªÞZr’ 4Ÿýµ`AÈFÚð^ oœ@‹R&µ$gòZûÏÕœôÿõÙü¯Fmб˜[áM_«zó°jÅcTõ½) 6žçUÅGƒêÌrÜÊýlÚ7kÍÝz§|2Iõ®Ñæ¿xÎ|Ÿ ЧܯŸê}ÝqÇž~®‹[‹Ìõ©7‰2Ò£¿8îd 7–;Š"'ú,˜ðß~žO­û¡çÝjѸGkþÛxßÞ—©ºž»¼-uYŸÜÝŠßìNá=×þ鸣‹ÍŒ[=ngœh£§­xî@4p5êOoÈÝh6j­Z›Yk5ZõZ·Á÷F ü(nšãSϧÑúrë4µ[f“+‰ ³d;?]„y jjb’áÛ¹ê·ÿAÊŠ6’úÌ»íkŸ£Wó7^^k×¾sB6»x8êwÖ×Ξr£';•?ëgËïÅçw»‚YÂã§ñ ¦ž7ÜYǃhÁ„kó•8a"wxlM–Íqïø %¬ùzœ\=ˆë ­Š”0}…2¸e÷™ù]hÐ;àÛ$ðœ¹#n0ƒšIZGŽ®½(1mk`IÇÖž:Â+&W{¸'uÜ{߈£¤nÊ©ã{¬ã:dkWæÑ;3‹ø˜ß¸ñð’7|>]pÿåGÇïkWzuøÕ©3l7««¢tW.wqÞ ÏH|B¸Fâg±£óFÌWÐ×>ù våFùN‘wðÂÿð­˜Ä+7ô¹Ÿ{~ô¯\M34±³ôµ8n.~¢iñÞÂû{&S±ë<Ë9[ÆM¬KüYá17X ©ÞÂErìàʦÀ‰ ÷¦ô¹/’ÂnØ"xÚ±Š˜õN¥ÆÿÏ<;×ÎDóù»µ®ÉÿõÅ oD:íG‘N{÷ž7O'›­%¡*¥Ä+¯WzBpPÚ²”®îˆ¾‹Y6¢t¾-Ñ÷sú&ì!¢›m¤%™¹œòáXÓ)¹Ž€ù™S¬FÚ=dìùÑ4Q¶w3Œ±A¢Š„ͽqF#æ¢ç»Ä#!Ì-¢3›s{ë¹b(&¾·˜êðAÄòFˆž˜?JÄÅ%We¤¸ƒÂ e¶!äó',9õSâÒ¯u«K ë øžÿ•Ø!ž[AðÅó‰5L¬'àZ¦bäà†Y#F­¶é#íAéÄ—N|éÄ—N|éăì¦è(ó3âž|#4Ëo†1£ßKX°Ÿ¨…»FÖEØZá¥Ò;>wÖÔ‘·5EÄo™H±¿X«#å¡K@ ¿g”îõÄÍSÆÄ#Å%¨ tæ —õZnÜ|¦d™÷ªC\¾\rl_¸vƒ¤´£Âuð¢]u‘Åò Âã–‹ïq4C§À-¥uGK7ôØÝPWB‰ÛIïnn¥¸|þnìÿAåVÄõFoÓøÝg27’ØWx¼}$ >%¬­ñxü®H Ç¥> Äè}%‡Ÿ~V%˜\àãþ6Cü°$—¦ ¢è´ª£çšŒçÕÔˆR>Ù±IÅÎ(ªmÔ¬q°¶96š¦52š5»aôêfÓh¶Ç¡iÕ‡Ms”T=Z*i’=#E$ž ÙB4R²p…h¤Dì,D#öÊ# \ÓÞX &%_%Sæ¨Ü¹j4R‚‘ÕhÔ?©&¥R ›ÂK]Ü[«Ñ4zMªµ• ±ÝöÚvÃ4†ÛhÖ­†a5º]£Ñözf­ÛªµõCǤ¿-ñM²7Ùd¯ü»ÝN­Öåòi^ùû ×]{ÖŸ¼7âkì«#ž· ›Ü`¼˜Nï3ÞÐç~ÿ¿ÜÕë:×w³ š ŸÞú<^$=iQD&}+ÿ.ùà\ÛÙÝê³~¯í[Š^â>}ùîÕkÞ¼~u™ùô'=IüZ=ÕKôùÐ|×ašš}*ùjò þ‚ÿ8þùÂõ™Åʧø¢&¾=¶œ)ÿ@ô÷èKíYpëÌçügõôKÜÀÚ‹ôCÎÄå~Rüåê¸WšµF§]ƒJÙþéM¥ÿ‘ íCÓ=¤¢Sô,šÌæ,Iµ0V)IŸ…›«Õ+f§ÒÚê™›<µ1WíB‹¸µÒ¬²ä¿{<É-`ÁuM‹3'´8¯<Êcã"?ŸEÁæàÆòW"«üüä7Î8<ËÓ·M›ß‡7ž›ŠŸ&Ù ¢»Q˜[¬Kì®ÆŸkTÌz5à›‚Á ø-?2i³šb{SñFLˆ_oÇz2_%ýËãgƒþ–1Zûuކ±¯Ì^„Â옡ãVŸj_yÉ\$i)\”îJKûN¤*žó¥kkZCãØÍs-Âàÿï{íóO¯_k¦YiUjÑï¸gÒÓ¾ûÀuêV˜ü¨­}÷¤åÉ”~Ÿ£Å:îŸÖfƒ+Íx\ÓD¨#}ÚÇí–ô[ณ V³‚õ¥--%`a(ä,æšã #èù÷Z|U$¯uï|<?Ž–(Žhâ"–ÿÔI,“t¹“âøÂ"quùN<¸î'NCðýS U¹íq c+ž•û. YP‰îsÒßQZÈ'2b ™é8¤¿¬ÿìª* Š®#UŠ?Eñ+ µ¦Ó!÷““¥·²=ÞXJ¾w®ÝÕ¹ëë,Îé®&ÞF5•”ŠÊüž¼õÖ—¯ƒ‘?—®ƒÄ6&ÀƒUGVY›§Wʼn£[gkÈgçbúÂ4k*ø…ïµëŽÿ#EI¿S¡ŸLÕ!DÏø~0³¦‡í¹L¸êDã>-¢_?¼ÿϾÖîXÝz·W«w¬vÓ™½N¯Ñf½Ñ°n›£Ú¸«åŠ~íw¼ zjM5ËŸ,f|úð%æ¾÷'·šÕ¬¡ÖîÍ E_ïûšü)1)™ÉÇ«¯Á4ŽÈv›±AìSr¨ÅHÞ' ­ Ÿ±ïθڞ˨WΛÀ: /™§ìkè[‚ñ$ÿ[|/˜{W!Ú§'cÏ¿åhI˜šØ[„Oþ¢3ÞÂnpñ(º_ hëV"³Ÿø©QXüY´†âÃá Ó’ò¼»»„ÕšœDf J‡yêy·‹yþòV¶¦i° ö¥±e‡û_ÆAàV„A«4ñϹñ"Ì=×÷(Þ6ŸMœ ŒV‰É¨8ú< ÂŠlS@ÝÀ_n˜ŸE›BRó‚O¦ÇWIË 7WÁ€ŸÂ•4y£°ˆ¢aç‡$æÏ}®3JŤ¡,e>~´&®í}Eéêå÷Y¨"FÇvþÿí11¬wÇ}$Ÿìxyë ùªR¤73˾q\5Ë+ÁN_\ªsgžëp=JO¿®+n»ã âþÚE¤Ò÷,î–1ÅV`åX:ÂL»ÖTͦ>á»@Ûº É©•ûȱPiøù$$wlݯT&Nx³R ™[³JrPùjÐî’So„Vp£ÌZÏUÐÎDIÈŠ›4î-[1Šš“ó¹sU±;Úx¦:_Š«ˆ}o‹‰(5súå†{lÑÝ™_'º6åîà‚5BÑš2K•ñ¼aÓ™"K¶ºQ&Sš7Öznøë\Ë|ý{Z]El©úV}ëMõ%÷ÜB-ðfLEi^k×Ö÷îßTwzW|ÉÁv~¾Sk÷̧ÂרÕDžlÆýïVˆ§÷Éë;´ß-ßyFY7“cL!oè;£ ëS@‰ÿ±fÍMZ² Iþ´ý­_]=žYwõ*ÏÞØT‚;{µŸDš×oµ; ®í÷6ߪâÍT“W8Ñ%w%>3P5ž¹B?F}oqd â¤7.Æcæ¢Jï}ÍlÕj5øóoÆáÛn“ >¦ÇKu…r`ok0˜G%èûZ»F‡ëÍÓ‘&]¸Î×A2‰5gs*ôøÍ&#\‹z.ˆkéo/ñßÄ}zë¦,’Ž6jÞiQŠJ”ݔwô2o̶ôТwå¬YF¾àéwÁÁòsƒÍ@´`õÉ_ áÉzÕÈ™__&|I}q„ù´v™oÝ ÓSX±ÏX¯ºù²×@<îÈU8xå: Xï_-Â~Tó¾Þƒ6­ƒ¾DÏeâÊ9ñX×þU¦êùÎÄqèÓFôérä^¯o˜}«½ˆ_*ã\^ïÖì'¶Ïö¥¼ 5É¡-8]z_CþÏj)þ]{óñÓ«ÞýüñÚ»÷¯_½Ó^¿ÿõ×7¯?ýüþWííûÚoß|àG4~lwøÿÔhdþëÍk-â n4ÃÖΘ}ãiÿïQŠö·¿iQõ­v¦FâwÚb&t Ó²f·|f4c®éÿ£EmÙž(ÎPÚÿè«Ïg~x•ÝÃÿn˜v«n¶Û½V…ÿµiš=£Õ3 ³Ùêö:N£]k6ZMŽ/áâ¿}¡ªIß“ÌÈÑ,€˜ &!8ЏœH˜V¹×I¯ªiC³²”¡j|ùÛ'ÀÔF_0Zµn½=þs>ýZ¿ýú§†-o }zOªÕˆEÌšNÿP÷öÙ¢µoøÀj‹ÿý•¶Œ‡zƒ¶ÈéEtÍŸiÆX3|âYÿ»V±»ª¨º£Õÿþ7“ #J<ݹ3{‹ˆÄWÄ3iýÄœhŽ{—° Ñøà±áXþ„Æ©_ ¤><ôXol[Qý*ÚÖÚS‡¹¡:`òáÑö•¸±Qq²³Ÿ¯‰ qeŽqÐ2Ç(D‰2ÇÒø 2Ç(ô¡yt2¤ZvˆRÇ(Ü祎ÉtnUtLWä¼j,Êò3Òá‘æóÇIV.F¡‚+ãPq•‹qØ U¹ ®‰BEVsDaê9¢!ÕñØ€jŽxPX5G(îò д B@ò‡Î÷ü]…—˜YI ƒ–<5öA²oðÖþòÿ™_>ýë× ߨ¡=ûõ_ö§›[6ûzùá'ïB'¼õ-¬ú­ ë­åúK‡Ül²nÇõv½m4»¦iX£ZÃhY½zËff·Þ2눚ìË|›³þZ0ýÀ±–S{1ÿZšøMØ!߇u%‡6©@縲ävråËÜ@¯ê'8jù'æüC7V  sµøîJ-˜xl#>!^Uò¿X¡væzƒ©7‰¯žÏ´/ü;ÁœÙ?ÑŽ¢÷záˆRyÜèJ¯ó6åÆöaUtóMûÇØyeG >»4¶`m™E½à’a>’‡‹—˜‡‹»€[ ³Wk<ËÅ>sÜsäÏÛ•N½Ó¨7[F»k­z½fv¹›Tã=ïe¾rÜ÷å E úžd:Ô¯½â |Ù˜ñ̬´95†£æm¯Û %5îӆ͇nTý¤{θ·íJB½Å¢yÇxd’}Á¸¢e0îéþã^0ô‹Ãl4ø‹ÃL,ì‹C öÅ!ñâê¸ 6ðÜó}Ï'žº;®C¶v%/‹qØO ÖOr” 7†(ÔÝax¦Cbïd–$HK2UžrçxÀ½ò5s?Aøá´ë{ìùÑ4Q¶w_×±ô ý¦ |S G”{S-‡}S GO¬%"ê=5÷ù{jBðó'l„ê qéׯºÕ¥„E<À†£b`#PÑ°Ø û^¼ J«t°^ˆƒ×°Έ{YÉ1’pɾM Š·D}±ÎYÀÈxUý…¶µ˜” 8jš²¡žÇ'‚à q‰ HlX"œÂÍgJrš=.ß,ÖâðÂ//¹Ý& 7*ØIãÒ'—\Ëâô‡ÌGi`Õ ô ¶¿ †JßOŒ»?}÷–f/Ë¿]LŠ2·†ÞÑ-×õB+ 8Îú:uy5-aa¤UzŒ9×Ùä5(ÿá¸(ý Š–Ò$ò N§˜$ÔD»Å Ò‰;²R²“vÐÈÉKý-fûay®B’²К U6d[ê Öª<,•4‡UÍôú¥fö£¬±ÃGi;{M8$_GnèH‰'b=ÏeS{… تºÁ =ê[±ë`$uަYoÙñ­ÛÈŠy¶‰%ìNˆ©ÿAÁ8/wÄSދاy“ùöªŠ¢=]m¾ä1mˆ§bÕ‘Ù‹9môoq º¬Å„™¯ioæ”Dðs¥%Â¥ Sg”NEÄ Æ§!Ê_I¦ âR¥žðòÝžrØnP­­dˆíÆÐl;-£ËÆu>ĽŽ1·FËlöšõñ¸Æìzþ!&Øžô  Ó„€óÝ`H[⛸OÃs×>xS@! l/s段MéúoE6ÌÔónsm¥ÉdÊNî^QñDü¾roͦ•?ëšDÉ5§š%¤(N>AB¶ÀIfÆu*Ýz³[3›F§Y3êM¾‡´ùÿšÍf£™·÷Ûªš”+9îRJíwIh·zÍ^ûéœK-’íPG—J··™D©tãZcÜwG©t{••Kî(].ÝþÆ« èš’šlºcë•l:]Æ\ËæÓíé>Ÿn/:Ÿ. žO—‰…ͧbóép ˆ|: 0.Ÿ… ̧ƒc"òé ¸|:40¹2 òép ø|:8>ü¹7sK˜ÎZvET»ó|çß‘¥n•¸½$[21º×q]mqü¤„‡^.¡P÷ÄÉd, ÁgVp=´ã™ªàOiÕgrøXWinK{]§urY×çô£¦:—лe®Ïîö%8AUÉHôÜ) RÄåy¹²õm›U¹°Ó‚ÅðOf‡–Í¥å?¹N†FÓ®%™/‹J¨G#êáØð„z8&:¡N¨G@êáˆr õrøÐ„z8:4¡ˆJ¨‡ãJ$ÔÃÁq õp\úµJ¨GÀ"êᨘ„z*:¡­`° õph‰„ú2ÂRFXÊKa)#,e„¥Œ°”–2ÂRFXŽ+Â"Ũƒ€‡1ê QŒ:p\ £èGÅ0êàQáŒ:pl0£ǨƒÄ†1ê AÁŒ: Ü|F)§ A2êdãÁu`Ž =J+:BÓ1œ.¸nœˆèùF0º­Î}‡ÉïÔ«ÚšèV•̈"l—‘–ßZO«ìJ@ÓjßòÌÔÿ Ú Q<3¨I" $Š&o/+%;ß^ÎÙ‡çÛË{ü(^i1Ð\û|"♽¦ïâ Pá_é&У*+>Fiø•xˆ›ƒ9/ Í€!Mï‚ Ó“ ]Ó^Ab˜ðP›yÍ:~’×lÕz­v³Y3º#“M‹5^»92¬N­32M»Ö6ùç`ñ_«ÐÞä) ¶ª¾>¹¦¼õRUUtMBÙy5QSòhéQE#Ogª»æ8‘¨ö:£\ˆÙH—+¤]ßêÔ;F÷©|¨ï´ýÛFŒLyùßÄ}LjñƒãŽw²‡Û˼ŒmÉ¡-‚…b¯xE,p™T,òÈBÑ­tjÝf­mtzM£Þ®5Zf³g6›ÍF;“„bß—/5è{’é8°Î©XisJLÙ¿{ÓÑŸ2¤û´Å)!¿8¥9%ö¶]é@¨·X4ŒGÖ)YB‰ý-Ë'±×ÂòIdzf(>‰l48ŸÈëÄðI€±|8PŸÇ'BòIÀ1|PŸ˜\|8P<ŸžíÆ$½§ÅIÝòT#9¼QJÞ¡¢P³rÜtáWÖÜZ>ö¶7›{®xf‡»Ó ’/qB%z.$zã­oWŒ±ÏöÅó÷3“¢›²¤í™D† JasuÒø×e¡ƒ4¼×È«þAØ‹žÛ$Vœ„E‚(S¡MNRa¶-G©X¼#C»&AZ’9A("40‚ˆŽ '"€c¢‰àÐh"4ˆŽ(GD ‡%"€£C‰ˆ("8®GDÇ¥_(",‚ˆŽŠ!"@ ¢‰Ø KD‡– "(æåѼ<š—Góòh^ÍË£yy4AGs© v<,ƒˆÊ`‡ãb2Øá¨˜ v8*&ƒ Ï`‡cƒ3Øq¸ v$6,ƒ Î`áæ3J9M2ƒ=–Á{âPèLíÙ‹þÌ…IdÇ9DÅ%²¶‹0‘½ðí˜ÈÞøC§Œ(Ô“ƒÏp,?ðqœJëPl¨¢?#Š“f3•’Íf w¾‚³HáãÙ ¤Å@Ù ò ÈÍf T<ŒÍ@º ŠzÆ{¥02\¤nìßå žlu…Ë}ÜïT)"9ª‚!´a5Ö¢“ø¡¾|"`çw»=³QÛ“%™9|[Ô}àe¡‰>ð"v}4ö} D-I®iú."ƒo'ú趨𢫷†V³g4ÌVÏhÖÆ=£ÛìÆÐ¶-³Õh׺üóG°xÀWPà ¸z Ã_9åTã˜3ðÕ´³d¦Dî* %A¥}È}u”ßä^çB—/ñMܧ·òüÄÂ'䯣ÇJ¿Xsí{çøž;ãÎò6šl/sòt;’CÛœ½ùÉSÏ»]̵E”¸œ);¹™â?ž"1ÙˆßpͬyåÞšM+Ö5+ˆŸ¥'|Käw|ñp&VÊY_;Û)ülù}vÆd–®êiBöØJyË“|ÒÚh[kp£÷¿&F`kÃWŸ´}îü¤ÀNzç¯}@ü(}ñ*~ü»óhÕÿx|á+pìW‹hþÄ ð #<Ó{F«‹r傃±èÕ›º[Oïâ˜35"å7žYêSpÝ98?J>„œ:»Úm¹#2ÜYQ,Eì÷9gÇ{‰µˆÄX'v91ú7åiÿÈæSï~ÓÙÞþu@O»×zQžv!\IûÄ«âË$ã“–x@~°^¥Ýítku£Ûä}jó.4;õ^»+ho2ùÁö}ùBQƒ¾'™ŽëüñƒA•6?Xgî.¼ÅÂ’àÛ§ 8~0éÅ)Ͷ·íJB½Å¢á;²NÉòƒíŸhi~°Ýýh̯ö`!²†"§½ñÔiEÀ!‚«, ÍUDs•¡@ïH¢vœ\ed(WÃUEr•aÉ•ÃU†•à*ã#¢¡˜´áìy8»Q†³Ëpö ‡³táì†ÊpvCI8»qøpv£Èpvƒ,œÝ˜)Ð4ÏÃóÆFð<1ñ?¯óÓ~r—¥L•fÖ× YÑVí?Tió›kÝYÎÔ⇀TØI\¼§å×>lLÈQkæÚÛf&Nù;„©ZûY´ }|)âà$nWa×®Û­Û°§´±4úŠžJ(° Jܹó’ÅÝ9ŸöVZí-õóe‡>>+•I2 kö×|°ð§ WÆc¿¸¬~uUÔáfŸÏ˜Ë]ËʳÓH%¸³+v\ ¤…ßú­v§QµÜ{îK‡U±œ¼¨ªh5Y`ÑÚª·ã¬ˆîFôªºQ"¥g0g¾ãŠØ®Ó-hã­%´íúZ¯n¶š½5¬v7ËlÕjýߎï? Ì ¨‚ú3ù2X¸Î×"„=ËC(¦‡þ0(BÙnt›ºR9×j»¡;3+>#ýµï­¼jôä*~qÅ´¡ïŒ&¬=ÎP½Vq·X¨jiwÞt1c¿ˆµ¹‘“¨Ëƒr ÉÕ ïÑ¥Š[‰hý°6sߢÌ$cäøºrÁKµ‹OúRÝœ¨U3ßKdÆ.•"¶)ÚiÆ^õy.ÅI[ø¸JÒ‹1o©•ùå¿—ïüúê—7E¸;kº`o}/3W˜Läcâ Ž$©T‰Ä1zùQÏÓ­,ÎU" (¬!ËB$-O}§<¬›*ìuÁNj"µôRK/U; ·wÐi[÷š Ÿ¿„¢¤û̽w£×YâU^©9’š³ÎÅP´æ$¤ F,¼<¢ÚnEŽBúP*©‰ý+¾Žµ¼èx‹V»;+>Ô³Ù<¼ÿÑñ³óÖ ¨6àÓ ³Ø+²•"N}RùÐJ[µ,ÖO݈RÛ¦âé "ž¥bt?Xq:­us“ÓI±®—Z·{}²@ÄŸŒD!^¬Úmx*Æ“^Ÿªö©ñ ÉQiÛI‡FƒDd°EFÐÀˆ"#plx‘8&ºÈ]d ,2G”+2"‡-2G‡A ¢ŠŒÀq%ŠŒÀÁqEFà¸ôkUd‹(2GÅA ¢‹Œ ° ¶ÈZ¢Èjë•IŽÄ À&GâС© hï†49RNºdr$Îõ"œ ÈkzüHÈ'Gâe“#e e’#ñr É‘RLr¤|œŸØ“#œN“'s Ç1jE&GÊ)*9R~zQÉ‘¹Ä “#冾ùÉ‘×Lhr¤¤‰SåQ “#sE¤Š´¡‡HŽÌÙØ}5ñB–A‰;'‘Y>Ç$GÒH;šäHªÁ$Gnu™39r‡¬n­cvj§ÝÚ×¼_!N¥$’’JI$(;9’¬GÙÉ‘D¢ ê$9’H.9’PƒBt0+92¿œkµÝ KŽ$h "9’@âÙ9º<(— ñìœP0îÕà¥Úŧ }©nNÔªÄï¥2ã€wïTÛ”träñ[øìäÈ“2oØäHBÑÐäH:‘˜äHZ©øäHòå’#i²,DÒòÔwÊú©ÐäHB'’Yz©¥—ªØÛ;è´“#UÌ<9’P:<9²Ô’äHšNŽüƨJ9 ¹“#sˆ†%Gõ¡œIé@°Ÿn˜šI!NŽÌߪe±n< 9ò[ÕD|r$PP–‘îçLŽ$ÐõRë(’#OZíd“#ZûÔx䨴í¤C£A"²‘‡ÍüAàŒØ -ß–•î‚÷mbÀ0~(ªÏ}0²ëoP [+²ÿ€ eÔ;kêŒÈÛš¢bW€û‹%Gd&³"!E>Ù(Í¡"nî ªïÉq[” ¡3cü#"Q¤^˛ϔä4!q­PŠMuíœò´Ò©~ÐÎXxãEÉn|à .'7YüF3føq“tf¹Ö„Þ¦‰ótÁ&ZgW™¾=¿>%),ñ ü®H ÇUq"ÐÇ}r]ß"Ãû•ñ3Ÿ¹6 ”nôŠâ¢~Û¸ÒÎèJï_é¶9j¶ÚcÃ-£Ùn·n¯Þ1ºV³ÓkÔÌz£×½Ò—Q›”4‰UÁØq$täұݙºª” ]KÙU*ekö®B‰ëy¼ ¥À’~ˆtO‚N¥ˆ+ n¸³K‹‘¶™dúp’!CÞ“$çT•9V¶Z€‰§¹ÇG¹C°& ”ŒJì,bY&ÝŠó_ ¶;ùµ¨ÜĹ JLß©-òr1^zܾ/V8ÌØQJLílÁ2æ¸QLç~ÐC¿e;l0åO^$?"·pëñh]W#+ž* ,i;ÒùÔ!]{¶yñ+ûR˜àb$©—RÀ\=ñ>’§TE®Ž—ïPÄÙ{/ÞÈ®”h-­ï[2³ûQëÛ94©‘\øÁÆ;éùz^å²Ð¼¸Ý¬<_•ç«ò|Už¯ÎWë™aåAë%´¬hëÉ«¥¢}žîZîî7¤æe¼ªŒWD¼J©ÅšÀg~ä…y‹±›Z~X¤ÈÀ¾a£Å”ù¿c£§PöÂwÂûמ²¯aQB×SÉ!³¸^;òüä[6»ŒÈkÁÏ÷(šð˜>¯~‡*lWzÕÜÈ}Éòâã°”Í?(óí%NÌÿűâ…o¤ª>”7dÒ7di½¥(í@i¾á›òäéèK¶`›raæ^lØáÛÈK4+þ«—làÌb'ª7jÃVÊÐÕ Ó£Ò"k`’“-”[?gî«ËŸŸâÜoÏ׉eyóÇœr=I§#!8'væÆ“‰¢™Òkš¾SûEJÝSä—HÊóÔïÏd³H»]=! XòJDõ]‰¹¾ïêÔ:Ÿ’X|""e¸ŽQ \8QŸ ùñ\×[K¬íè6;­ÚÓ¦dX­ßÙWl}M›­–ÙÔ‰ð“ é4[¬m·l£ÓmÖ¦e5n×îÃz§>lt»½Æ¸¡ç–¸ÌßhH HD÷¥+y£¦VÁ)Q¹¬Ø GLœh¥npb§‘Õe%dÐiÅ ^’ -©H΀S ~Ú0·åàq•¶‘ @×\|eíâ5 Jh€4=@úüìoãðÊäñË™ª1›E™ÎØxZ[¶‘°ñtvžÚ`z[kZh[né—H¼L¥Ñ# À‚Ú9¥K!í܃•]@;·ˆõÂÙ e•ÀÎÛÌÒ×ùJ^çí Ôu>ŠÛ*mO²¤u~ ”êTf kiük5Í&,Y³ Iú«)×Us…ª-/“ü'-dgªŸÎ7Ë»êÚ¯©7Ñ‹kÅj*ß:ÊöTTAÇ\KûAr|ÔÃpÌ/Y7*—À¥ƒHŽª`‰ž®W©™së—/~t»( ,ø1›tðü"áLyEůX€e¿I¤m¹ƒÍâl§“+Y曤K¥–§¶CÆe—ï>A‡T'¼ôØK½ôØsÙÃó—1-ð’ê„óƒ(¥ž_*¢„z©¨Rét/‘þm³I‰,}y@ß!½u|r_JÊ9 ÆƒÒŸò ¬®†W¿ —ˆ¦ñ„T@ñªm|jEfQ 4jĉNæn‘ “ÿà¡Ðë:¸#<1Wdp3û¯Y¯)ŒBJ<æQ³…/‹9:§~é7¬ÅÛý2EÃ̤?™õ²Yh=€Y¾}c° KEë£ÔØ'”¥Êʨì†Ûf<ÑSÑ\Ú#Ô±½û?ŽœÐ AvÞs˜ü·%¾‰ûô§Wÿ©}^}¥õµ¸þ´–œyÍg-øQ•´á½Þ8%÷iIvßµöŸ«?Ø^šÿÕ¨ÍB+¸ÕæVxÓתÞ<¬Znà §¬ê{Sl< «Šՙ帕ûÙ´oÖš’cÛž·Î|î¸~܉ˆÞxAx­]ü]ÚæµãRãöåN˜HO[Ó€çC‹0âÌ礹ʫyáÁ¨ã2ùGž‰1ŸYA,áu*Κjqåtí‹hoE[tiIÊ•ùòÝ«ÿÖ>¼yýêrM#süÁH_å^®tIôö¹¥íÇŸ«Ï¼Û ÓÔ´èSÉW͹à?޾pùœðŸòÅpQß[Δ úûºóŸÕÓ/ñ ³ü»Z_ov5óéØEÎÄåçƒ÷u•Ÿ6V¶|¯^1Í^£YçßHÙóic÷íUl´OÂP} Gïù7ŒýT65±Ú—¾ã†ZàÍ7ÃÅDsܱçÏ¢ˆÑuæ @Ê\i-Ø\fƒÅÁù`²'≀Öεß-ßËf÷¡l þPÈK^ÜR@‰ÿ‰’3¢¤ƒdK"^øÓ¾F’›a{S‘¾;J32\/tÆ÷Te®Ð‡Q|IFÊWàd0\ŒÇÌD‘á¾¶?© ~àü›qøè}9|ÒLuƒr`„ë$(")×›§#M*2ÉX$nè¾Û|$úó‡^Àמž âZúÛKü7Éö`™?;…w»NûQ8ó}.üAŸ²;6ÕûºØGõs]¼NÙBÝSÿƒÿnêM"ž¢è/b‘FâwàFÑ}Z¼­éŸçSë~èy·ñ}Írž›¹íK©KÖÉl¶qÙè“»[ñ›Ýœ.çÚ?wt±IÁ¢ÇMŒs©uÁ­³?ðÜwË÷§7ä?l7›µnÏlóÿv›½Z¯ÕâkL#¾÷£xB(üŸWÑÃc]_nÝzËìš5"ìütÎQS·ž _ß0ûVãޘƾ:AÈD³¹8L‹£L{Ü3œôÜ'èFmç¢V?—d‹Zï6ê¦Y좎†=ºÃ?¢•¶N¬mÑÀAÔ@ðƒ¤:R¡w˜ƒ¦YoõÚµV“f^æ¾÷õ~5˜5Ò¼áŸÜgÔÆ¾7Ól~Væìê §ÞÐ>$ÿúG±ô¾ðsÎõø=^µæNÀÿgTïÌêj~ø¢y:=Ü»a±¨ úd’æF”Nü„ÝLÿïþ\ÌDÄêõW—?_úlì|å?rãýä{‹yô“yò“Õ«ñèѸþk†Ö|XõW_k#ÿÅÇÅðq,ô鹊îný¥%îÅúŸŸà¿½Ü®ÜÛ¯µºÝvdXv~ºˆM‚ 7ͦÙéB›ºýÓÉ.‘VwFO1'u.«^¯÷ž‡KvŸÔ¿sB6»x8Z~Ö×Î'5Ê·©üY?[~¿ÿHŸ%5Î{ˆ0õ¸³~gEtŽBôß„ Œ¬Ñ÷ŽNTÍ×ãä•¡xK5HKçLSH·ì>ƒœEô”˜‚ž3wÄ7Ìà†f’Ö‘£®”˜¶5ˆîªIÇÖž:b/VL® öê1¨ã.˜ðAø†1²QN]tíá­Ý[‰W@´:6Ø1\™^Jt]uß©ž½ô=ì®d–„s‘AA$9RÄ’² ÒÐH"HI9BH9d1¤¼<ËYNYY$bË]'oÀ’qI­HÙ’9 J$™sz¡„’ùÅÀ‰%s( }óqD“Ç¡™âÉ<&N•GgˆÌ‡_´ E2F®;PÏ#'öíxb{íPichôé ¿*Ë ÄÃ1KæŸkuK ÂIC#%~‘q†óÀ°F#á _ô»µnCáy”NãîQb1ÿWuî;w|툿Wl?,¬·ì~{#D„ ˆFÄñÿäÙ´1v¦,nO¤¯Æ?aôèuõ ã½:eü9·X7Î84Ò£¢§í]µ<¶e1çž#³fBaûÕê* +TÖ¬<#~½7þ4¸x¸Ò«W¦]=°Å¿¯ž±¯ôsþãt3K>´vàâLÄMÆóß®ßaÅo"âÏM¢ºK]é8\«æçt,«!¯z¾3q\óá-,b¢6¥ÇšdÕ"çž³<(—°¹-_zQ8Wl3çňN§/Úètå2—'½þ€Œ2'§Ï©LÖ6{½`MÜ}r‰Œ‹pJ ÐÓó—;·ŽSѳ Ë‚;ƒ£ ]¡¾œÓYv=Bi$oÏ…5[ ©Þ§Ïϳ«’Ùþª¤¢ Õî}ýMØ­}Õe=ð'ìÑþê„¢ êOV5BaðªÄ:²¤Z­Fë…™eªŸ•3Š”§">LU>l&CSé¿–þ+(´½—¸“r›’bÀ? ¿Ÿÿä̆!ŸX4„)ŸVdÌ=‘͘O/uË;LÒ'˜ðž£ôé²,DÒ² -çqS!LûÄNj~y™s´—9snÂã§ Eí\bèÙÞTHÿôú²¼H:õ£ÙeG³F­Ùm6 )oÛ?]ÀAŽDðò\¹ˆåvÜ(e>P5°ÄÒa…5G©„keèjÚ}­f–s1¯çMˆž2ÔÚÔå½ÚešÍ)K·2áܲ„279fó¾–ÈÝ&uÖIᎩx5€Ê¯¼"|™ÓH9Â5*‘…¡x•¥… êÿoY‘ë'¸)æ'A?úIgKŽJÛN:4$" ‹l… ¬éÄöæ‚@"€0_À±Ç^D¶DÚÞ s™H?$6а¹7ÎhÄÜAtßA<"Ë…љ͙x®Š(Õ$P‡¿ã%zBýA‰¸²dC'ÅRr)wñ Ϙ?a W%.ýÚXg¡„õq6)êÜ ‚/žO¬ab=8A8P¡e*A@n˜5bÔj›æ¾ àÔC¨­W†z'K=„C‡ ½Rê!9é’ÔC8׋p. ¹êø‘§ÂËSÉ@ËPáå@©‡¤ê!ùø7ž6#G˜Ÿ†…âŽcÔŠ¤’S$õüô¢¨‡r‰ARÉ) }ó%¨‡®™Pê!I§Ê£Ø<çJƒoEÚÐØZK=”³±4újâ…*,ƒwîù³¸“¾ç†$ÐDÑñÔCDrsR¶Bžzˆ¨ôÔCD #¡"jK.ê!òñ8Qê¡üϸ_ ÒSÑ]¥‚©‡ò‹½V?¥Çœè×êô¯8³©‡Ná¥bÅü?íÇÄ`ê!zM<øcº—þPA=¤àµxoONúÝöyy:Ëô8ÔC4ÒL=D5dê!*Q`ê!YÔCTÝʤ¢ ¢ê€zˆFTAýQmv™zh§¨N«×xVØú=DE4‚²‰Šr˹VÛ B¢¢ümÁå—†:ÛP¨Ëƒr RÙ°d‚±9!‚—jŸ2ô¥º9Q«f0o—ÆŒƒlIDå *:z  *:%ó†&*¢ &*"?º‘JÝòj3óÁ&¹|I¢"Ò†, ‘´<õò°n*˜¨ˆÎIå»ú9!û¿ú!ß: DEt’1DEåj/f/âhvþ2§ A=D?ê!2éê¡oúP¯Y‘±% ’ ¥:â0õPî¦YH%Hê!™ÄÔCyÛ´,ƒ‚ÏcLpê!ºX¾ÌéΜzˆd’Så^¥Å†@ÔCߦ&JQÔ¦HE=tÄ;“Ï–•¶th4HD6 òü™?œXóùÆS‡÷mbÀ0NÃÔç> ²™À¸kž„­Ü:`º0ê5uFämMQ"õ* ÅþbEÔª(¤`k¥ %ÄÍSÆ„ÇÔ¢ ã4 õZnÜ|¦$§ ѾfH6Õ51%l‰îÚ+Öõƒö{«7^D%³ˆ“Ôsâq“Åbš1“aÑ£Ùs@¨àwðz²\×ã›ABlGçÚ­k¡àó]~f Ä=cJ¨"FÇÔjçe4Ÿ¸}á[ä,JņjÖÿ Ú/¦¾x4L"ЙåZ6z›rôÑEi=e‹ïù‹‡àS–Êp\Ç#}Ü'_ø[dx_¸2~`cæ3×fÒ“ž^QØÒoûWúÂ]éý+½YkÖjmsl4Mkd4kvÃèÕͦÑlCÓª›&ÿà2j“’&Ñ£*;® îˆ:¶›L•2p¡kì`J¥l% S(q2L¡(ŸT®åÊÇÅ,•;œU)"²Î ÷#‘U1Ò6ù¬Nòn…÷$¡·ReŽ•­0ÇUÎñQî¬Éò^‘D~‹X–I·bª­‚„íæÙ*ªO¸³–ånÑКœ ­ÖUä ‹üá8,ñx±Â32wA5[f»Ö~Úy3°Oæ57q NÓç×E‹¼\L§—w‡ï‹3v”S;[ðĆ̟9nÓù…tÄÇïÛ†Lù“7Ç…È-ÜzuHמô@üʾ&¸Iê¥0WO¼5ž·"WHéT”NE^‰iòØ‹ßÌ6’Êøª’Ê®ViWÑ:þôúò[ÚážæÙr—+\òZ¾ÝËÛcË3Iy&ù&Î$ϸËÊK>¡¶­±À–ºVž†ËÓð³Óp’ïRž„)%ÆM/~{_)ÑwÓ·dfw3W‘ ?Øx'=_'ÏZÚ€òÜYËÀ`,ƒe`° –Á20HªI)¸Ê8Í7`¶×Ⱦ¡ˆdù&§ŒBžDR©ÅšÀg~ä…mâ±÷Z~X¤ÈÀ¾a£Å”ù¿c£t/{á;áýk~Ü`_â„®sBfq¼æ‰þä[6»ŒêNS)šðÈã¨~‡*lWz«ð ¾äËâcS0æ®?ÕðMgƒISB8¶t×.ì–٩׺;3]V3ò+Ä?ÚFíµHÙ?â$"žÞnpÁ¬ÙhBœNÔjPJ„Y 2‰…ªŒÕ ^ÚÒ7IŠíK¶`Úraæî»$‹ßF^¢XÑW¿d'?Q½Q*AºdzTZDb L¸ë„rëïçÌ}uù³ñSÌ‘çù:±,oþȽ§ÿ ¦Ä‹D¥;9ÉDN­Í”aøÃ ‘aø“•’Íð'µ(„>Ò¦D¼Ã…“}æ²…Ê÷ÄÈ=Ìæ}8£|®B¥Ë÷"P©P+æèS×§È“œc‘'ù*eP+‘,ÄŒX¼‚ð“o¹“RÜ'7Þ]‹“9‹#ÌgV˜å²—´zY ¾|>sa+ú2áuÜI¹¦Ë5]®éS;Ÿ«ra¼aT¼eôÓ:ûƒ:yp2â™”ç•>ŸF„j}È+Ž;ö­Ê+´oªwujO Ä|""e¸ŽQ \8QŸ å^êºN­åšV5[M³Mµ¶’!6ù€¶ÌZϨY£®Ñl°‘1¬MƒÕG ËìÕl«nçb‚-R‡0*û¿»€D»V#›Â•c’An-aR5y½ÙÂâ ·}ï@ìï2–wŽ,.á½éED†¡+¼Ô¹1†¨,|ñ”\Oü½bû¡ré·ì~»pþ ¥ÂmÏ»uXòÔß;SvñœÏ²šd ’×ë*ä͹e¹qÆ¡‘R¬8cíBµa~lÃbÎa ú¬DJ¶=ɃŸ¶×ˆBžñ6+<ÄMØ*/\WÖ’åùË™» ¸¨ÙÛLÅU8QJ¯ÉQ(”E‚“óL™nÖì¯ù€Ÿj£.dð#PjF—›î(¹Ì+ÏN•àήØÓEÀ?T‰NM}Qî²*¬ÎbèLð¾š"èõxÎ!òVܪ!^o 櫉JAíšÚnøC[¥³U«)îAL¦£P„âöO¾ ®óU¥ÁÕVE<É­SÒ%Ñj5Zj6EULJgÁˆ(Ú?‡ &F\B„È׉ „´ÀÔ›‰{§—'³òdvLÞ=·‰…ùó«lúÒ—/}ù{q¯«të_g#Nþ[b+÷ÖlzZ»h\ˆædÍOj Öê˨6@àÊ*¹E=æu'åL”KÛòä83žLnÔÓt+Iß×T¢VÞ€¥R ËSÛ¡ã² ;úRö¤{ªÄÀn Øão ÉöŽ9·åäÚ0·DÔµá·cÊãfyÜ,›/ëŽ/=6?¯zUK]«Nú V H‰÷}½Úƒþ:¾S{ëøä^¦þ´ÎƒßRZÛaõ2aõ r‰ØÒ2"žr@¥ä—¤R‘Yå5bÅ>ŸJ|óÖF‘«äÏAÜänÖk £•Þ‡{Ä´ü†£ÞNÜü…ŽêéŒ8œ†üdÖ±Df›¢•\L¨,uå¿a-Þñv±Üø$6¾ ×Î(â5èòØYdh4íÊB°²u )-´Sú-N»Fõ­DÛiÁÏé%oP5"õ‘¬QÿqEÔ¬<h®j7ÖÓøaÍ™-fZÂĽ¹­Ð‡ "Gý—X\ÊP·ÎA}öMU\kMrø”bÅRLg^ ·îoj5T. Dc?²P»ÒÅíö¨5ìØ­±}¥kN Íùo.±Ç–Æ6R·²>ù e k,X?²=º½Ôþ×®·ÛÝ^·þTê6>¿m©û©±¶cçbÛ‚Cng?%CFujæ^æBÿöÞovjµ¦ÙxúMܧ Ú?Û…wšf­VëvaMÝõé=bµW|ãáÚ¤}²‚[íc8zÏ¿qÀ¦~zõñŸÚçu‹¯õµØÑÒ’È{ ùì¯ ¸¦jÃ{-¼á»@Ä5©%d“×Ú®þà¤ÿ¯Ïæ5j³PŒÅÜ oúZÕ›‡U+£ªïMY°‘ºQ ª3Ëq+÷³i߬5wëòÉ$Õ»z¯Ûé= g¾Ï…?èSvǦz_wܱ§ŸëâQ€¾‚» úÔ›D¬ÁÑ_ø–3à_uÃÁ厢›&}Løo?ϧÖýÐónµhÜ£‹°kþÛ8†±âsôuç«>¹»¿ÙÍ}z®ýÓqG›T¥zÜÎxÓÔÓÖ ÃìÛ+ltwØû"æ’ÈÞGÂÛ³ÖîÒØJ{o{Ó©‡½o7›µnÏlóÿv›½Z¯Õªìý6…ÞaãëºÙª™µLmv}º@ÓÔ}&`cWL÷ËäÖ.‹€“žÛ´ÌÝ&@ù\Ò™€N½nv» ³4JMÀ>…Þauq0»[ç.&{f…9ÅΊ¿p]æ‹9ñnWsÑívøð×zf«ÉûÝ«7ëí¹&îU”_¬ïÝf³×ìÁÖèÎOao‰ššÜKßqC-ðf,žjM(‘?‹BåÙÎRfºõnûÚçßëµvñ÷wnÙ`ñ“ÐÚwè0h]õ´ß-ß‘øÝJÈÛL!/a9¡€ÿÑ`EDO}m¼÷nXxáOû –åÞÛVV…aò¢AJ†™‘J`ÍæS²Æ3WèÇ(Îõ õ…].Æcn碡¯í'vÊ8ÿf¾Ýè6©àãg¦©®PŒ¸C 0Q5‡×›§#M*X¦ÉX$徤4$º ‹öF¸õ\×Òß^â¿I¶'“ùÍÍF³Óê5uPSw~º÷¨©Ûk7̾â˜ì«„Ü΀£78éù#6µj§~.)Õ®[«uš#<®}0È8®ev×2zÇi¢Ù2[f³Ûnz„>¾½¬î»})2>7gO€~_C×¢ó+œÝŸß›ïvÛõn§Aµäâãìjn#oÄ?J'®Œƒ…Í›ŒÓé}Æ•Sîë²½nuÜ ÙÄŸÞº“‰wqZ´Ómí;œ%8×vv÷¹R¯í6§D½Ä}úòÝ«ÿÖ>¼yýêvµ!uõ±Szâ„­ãbW|þ¾°ÿxr¿0š}ÊæÄ„.j±·ë3n†Äi(þÁØr¦é/£/·gÁ­3ŸóŸ™Z‚À—°½H?äL\ÏOþ¡\÷ê@§Ý鬛îýR¶zSÓŒ"+Ü”q%=¤vSô,šÌæ,qµŒ•SðYdfiõŠÙ©´vŸ²`5-ÎíÒD•$íB‹#R·.ùoÅOr Xp]ÓfžÈ‰ç{¹åÛ7‘#ÉE~>«.>¶7–¿ùXÎæ,Oß4m~Þxn*^,>nÝ’£ÅX:Ãjü¹FŬW¾ÜjßZ¾!'@MauîIñëíX÷¦+é_?ô·ŒÑÚ¯s4Œ}eö"†dÇÀ ·úTûrÈKæâ.vK¸(1Ü•–ö8+œó¥kkZCãØÍs-Âàÿï{íóO¯_k¦YiUjÑïj³§}÷ëÔ?¬0ù…QÿþZûîIË“)ý>G‹ÿtÜ?­ÍWš9𸦠vJŽôi7cI6ÐoâÕùΫYÁúÒ––°0rÃsÍq…Óàù÷Ú|ºày­{çY£Ø·_-†´éÃ…3åcO[Z€ØZS~‚àÆÜÅŸGÜÚøñ€ ¶ðTŸ¹™ñ-Þ¥jìV÷lÕõMb‹ó¾{×X[U4+3ãð†9£(ÁinâßP0g|?³õÝ÷ÿê7’Ÿ6Ûwæßlï­Eè}«}É?I‹´•›©ññØð9¥ ù‰.àãyøq|´Dñ™TûâpWÂqÄ2I÷;)Ž/,W—ïD £Ÿ8 Á÷O-Tå¶ ÄÑSŒííbÈ|—·5¨DÄéï(-ä±…ÌtÒ_ÖöUPE ׎‘*Å—âåˆ5¹›ž¬ü•éóÆšhTò½sí®Î=ÇxÉà|þjâìTSI©ªÌïÉ[o}ù:9p³éz0HLsNí¶R”ô;JñÉTBt”³nM!Ús™ð|Ô‰Æ}ZDÜ~xÿþŸ}­Ý¬Û½áÈm³Ù5º–Õi­Z½×êŒêc-WÄm¿ïâÅùàÖT³üÉB ý¸2sß7 Õ¬¡Ö‡ø¦Ú ïûšü!5yÂÇ«¯Á4ÎÙÞŒ b—–C-Fò.QhMøŒ}wÆÕöì\þ<¿òÖ]Ù<}`_CßÜqw\tâÿß æÞU£Fˆ6äéÉØóoy'ZÒ¦&öq$xQ‹oa7^ø)ºÓ hëÖ‡ ?ñCëÚCHÍŠ‡7LKžœíîVkò>d t˜§žw»˜à/#߀fÅÅIïrlÙá>–&\Zïd0·×ÁÞ×¥P¼øÕ9 o›Ï&NÆ´b2*ŽG‡>¨°¢gu|5º¡c+é=÷|çߊ°½€«½ h!‚Šƒ Ìf~èŒÅ¸2ê9ã¦ã ß_“-ð<¦Õ&Bóù)¤¢Rîð1µbfN”^ªÔ,8ÜÍâ‡ÛQJLQN¯¶çù£„^›Z£½õ4ŽœÀöø¡éžw<õ¾ˆR:Ü¥¨ðiŒh‘|»FeuU®Ì « \‘䨰”dO ‹ää ¡û¯…ZJ¹§ªé´ÊðÍæ¢¶™ð_èjT$‰êVÒ“m|(ðÄft¦[¼P³x’f«±WöŒU„'eÌ,—’O»'Æ[IËç³hS`‚<Ò±ùd*p|•´\Ñps ø)\I““AŽßŸ)v&Òèæ>×¥bÒP–2?ÚF×ö¾¢tõòÆ MµBÄèØ®Ãÿ¿=&†õî¸ä“/o!_UŠôffÙ7Ž«fy%ØéƒOuÎãÌs®'Béé÷À5pÅm÷o¼a@|ØW»ˆTúq¦°b+°r,£lgךªÙÔ'"õYý¶®FBrjå>rl'TþÇ”s6ŠîW*'¼Y )…Ì­Ù*sý«A»KN½IZÁ2k=TA;%!«˜:%éWrb>w®*#vGÏTçKq±oãm1¥fN¿Üp-º;SâëDצÜ\ØáÂgÊÖÔYªŒç ›ÎY²Õ2™Ò츱ÖsÃ_çBXæëŸžä±é —‚4ØRõ­úÖ›êü=é—;nªwat[ Žò´ˆ{íξÑÜŽ.y³¼ Ä.‘c#± åe³ ¡°ìBx`:v!ñÖf»)­ÐZ²:M“¡œB(P N!iü˜S¨Ýj5ZTð(N!2œS‰ ãB¢9…Pè`N!0ªž âZúÛKü7qŸÞº‹L§MbÇ(/& \JnÅ»z™·ŠEûˆvWÔ»-˜Þ‰qˆ°ï07HÑ D FÔHúk!üW¯¹°ñ›Ë¤®h_ÜÂa>­]æ[7wô$Ö@ì)VAÁk n¾ì5;r$n]¹(ÖÁûW‹ð†м¯÷ Mà@ëà…ïÑ#™AD¡/uí_…^ªžïL׈>¼/WAîU@ͨnìáÜ?´ÅæhïmÈÿY-Å¿ko>~zõûŸ?þC{÷þõ«wÚë÷¿þúæõ§Ÿßÿª½}ÿAûíã›üÈ€íÿŸÌ½y­E$'ÁfØÚ³o<íÿ=JÑþö7-˜26×jgj$~§-fB:!kvËgF3æšþ?ZÔ–íÙéü¥ý¾ú|æ‡W9=üï†Ùi·êf»ÝkWšíN£Öhífͨ׻M³S¯·ÍV·Ñäø¢A.þÛªšô=Ɍ͈hV¥ˆ@Š„Ž`•qôªš64#7¬Æ—¿}Lmô£ÕnFÍÀ¿õþ¼«×mSûôžT;ªu™5þ1 îí³EkßðÕÿû+m õm;{Ô‹èš?ÓŒ±føÄ³þw­:bwUw1jõ¿ÿÍ$èˆOwî ÆÞ®]ÄÌJê'æDsÜ»„zÆ çÀŠJp®Fð–Ýs@1×çt w)C­QÔÒÛÖ@»sØ—à4ET#W8 Ñ3?¦`(H—çåÊÖ·mVåÂNoÃ?™Z6—” üä8M»–d¾,&‘ O¤G`ƒé˜ØDz46‘ K¤G J%ÒKâéèÀDz "&‘‹O¤G€£é¸ôk“H…'Ò#P‰ôTl"=[Á éÐøDú2ÂRFXÊKa)#,e„¥Œ°”–2ÂRFXŽ,Â"äƒ1é`1L:\“Á¤ƒ@E0éH ‚™tØP&$$ŠI‹ bÒÁ‚B™t`¸ùŒRN‚cÒà˜t€Ž =J+:BÓ\.Ènœ€èùF0º­Î}‡ÉïÔ«ÚšàV•Ë„¢l—‘–ÝZOüªìJ<ÓjßòËÔÿ Ú 1ü2¸I" $Š&›g/-%3Ï^ÒÙçÙçðø1|0òb€9ö9D„3{Mß!Å‚¿òM GU0V|ŒÒð+ñ07Vž1@B–1@B„,­ 2LO‚tM{‰` ¸€ÚÌgîÔ©ð“|f«Ökµ›ÍšÑ™ÌhZ¬iôÚÍ‘auj‘iÚµ¶ÙÈ?‹‡øZ…ö&OQ°UõõÉ5å­—²¨ª¢kÊΫ‰š’GK*y:+PÝ5lj,@µ×G±/7!w}Õ4ÛõfóY¢3zÙnÇ‘Y¶`¤en”|í—/ñMܧqL?8îÈq'{1°½Ì˄іÚ"rë÷ŠWÃ>IÄ>‘CâÙ'z•6ï 7†F—ÌšiÖëµf·Õä¯gÒOìýö…ª&}O2#Vûã¡”ëmJ‰AÓ¼ýrûU‚Rb¯: 8%ä¨4§ÄþÆ« õv‹†UâØz%K+‘1ײ¼{Ý",¯D¦‡†â•ÈFƒóJ€¼O ¯Ø)ÆðJà@¼`¯ È+ÇDðJ @q¼h`re@ðJà@ñ¼p|xÖê$Kv_‹“ºåÉFrˆ£”½KE¡>&hä¸é¯¬ù¶|ìmo6÷\ñÜw·A%_âZ…Jô\HôÆ[ß°cŸ7ì‹çßê¤MYÒöL"S%ƒ°¹ºm|ˆë²ÐŽAÞkdˆU ÿ0ìEÏm3ΠƢA ”¨Ð&)©0Û&_*V‚[¤›Ì B „pl8!MH‡F  „pD9B9|(!JH€@DÀq% àà8B8.ýÚ@ `„pT !MH€ÀV0XB8´!Ay4/æåѼ<š—Góòh^ÍË£ù :šKe²#àa™ì@T&;“ÉGÅd²ÃQ1™ìxTx&;œÉŽƒÄe²#±a™ìHPp&;7ŸQÊiB™ìÙx°Lv؇BÏ`jÏ^ôg.LB;Î!*.¡°]„ í…oßÀ„öÆ:eD1 ˜äx~€cùãTZ‡b5@ ý¹øPœ4«¬”lV¹óœÕ@ Ïj -ÊjO@nV¥âa¬ÒMPÔ3Þ+…‘á"ucÿ.W¸ðd«+\îã~§JÉQñCt ¡×x /wb÷šf¯ý´¹û›³õ+ÙÍY‹efL VÀ’¶½9?ð²Ð„x» ?û ?P¢h¦ôš¦ïR!b0øvÂn‹ ?!üè±zkh5{FÃlõŒfmÜ3ºÍÞØh mÛ2[v­ÛÉ?‹|Š«'°0ü•SN5<Š9_-A;û@fJä®’PTÚ‡ÜWGPAùMîu.yùßÄ}z+OÀO,|Bð:z¬ô‹5×Þ¸wŽï¹3î,o£ Àö2'O@·#9´ÍÙ[‘¤<õ¼ÛÅ\[DÙË™²“›)þã‰!²“ø ×ÌšWî­Ù´òg]³‚(ûYz·d@~çpÁgb¥œõµ³ÂÏ–ßggLf骞fe­Ä%0¡k£8X½úZµŸn;dîØ–°Ufp … ç ØD;*bt#ň®h£,ú¿¯]éqš³3l7S=¾Ò¯Ü©7yÇîØT|ÂqÇžøÙ厦ìHÆ úÚ'Á®Ü!WÎÛ7w|ùÿ°à?|+.¯ÜÐçj3÷ü0è_¹šfhBYûZÀ?ËBñM‹§›÷÷L¬t¡ÈQîñºnœ-ãÆrýKüYá1—oÇÎÔ ïÅÕ„ïØÁ•+.Ë:±Mésß7cl„œ:»Ú[ø¶;ÜYQä@ìn9g—埉µˆdã[yÏ»¶lý›ò+dó©w¿éZîáœ: _Ùk½(¿²z }âU±be’±bIK<+ï_¥ÝltZF·S7êí^½ÑëöÚµv«UÏâÄÚûÝ 5Íùžd.¬ðGćÕØ|Xùuþï¿Z-4Ö^eÀ±aI/LI6¬ýMW9 êmÖ±õIŽ+c–¥Y°N#tä~:œ ä­cظ074`&肸ž ƒ eãcbظà H6.,0¹2`ظP l\`|DÊo°-¶eÀ¶ ØždÀ–Ì\âx{°ÀÞ06‚·Œ‰çíCãy{àÐPÞ0¢$o>˜·û’ˆÇÛÆ•áíƒ#y{°¯©ˆá¼=pX oÅÛGÅóöÀ± š·ý¬sP:ñ¥_:ñ¥_:ñmu px ÃÇðÆE1|À“0 `TÃÁðƆ3|  ‘ 8l ÃÎðqü§NçIRÎŽ"©L2ñ€T&[p.wP™ì¨·;f»û´-P'vû·Ÿ¶â€J§õÀN+h––*ƒtRa P箋0ú³þAóñÌ`ØÌü.’Äa/Ã!+%›œ$M“j£ )<ø¸¿ÍïOý–’¦ f’<ä}q™ÿ™Ïܸ´QÊ';6Éý‡+‘»|¥÷¯ôf­Y«µÍ±Ñ4­‘Ѭ٠£W7›F³=n M«>lšüƒK5‘éØÊé)"ñLä -ÀËB“àEì$-{%™(š)½&Ú»É#X8Ñ{"WDRžÛ»qOvqõUïêÔŠ¿Ê}_ã ¤–‘©hÔF ȦðR÷VÒF¯Iµ¶’!¶»Ã^Ûn˜ÆcͺÕ0¬F·k4šÃ^¯Ó¬u[µ6Á .‚ü·e¿™/}iåw jLÞÒÞælIãàx|åmÍâH.gsÌÅÞ‘Ú±õ³”í$N7Ù+^I®“¼Äæ:™•N³Q7ëñƒýv­ÖkñµÜkwº­ff²Ó¾/_(jÐ÷$Óq`ý;žt'°ÒæHw²šƒawÞêI¤;íÓT¾“üâ”ÎwÚÛv¥¡ÞbÑd<Y§dSžöO´lÎÓ^‡óôÜ»€$;e:Y’ÉNÙ¸˜d§l4x²0„Ov‚ûÖˆd'è‚8:Ù … LvBÜÀ“ ¸d'40¹2 ’p ød'8>ü$êÜþ$ö•.¨’Þ­b,?®L/%º²›H€pí«"j$ùº ·!œ‹`Îlúy˜ûÞ„Ÿ¯ƒ™5š:.û(ÞèG×­Ü© uŸEwûÜ$‡¾sÄ*û‡„žÿΙEO¬Lê.¤ij.Ig"ûDá.WÇ’.T^Ö¦½aÚoìŽ+EŽÔä^"ùüÀÎûÉ}–2UšY_?.üIdE[µÿP¥E\Ìo®ug9S‹ŸRa'qùž–Kú°1!G­™koU˜8åo¦jígÑ6ôñ ˆ…?V(‘¸a5&öíxb{íPichô=—P`”¸sç%‹»s>íÍ´Ú›êçË,}|V*%’dÜ„á<0¬ÑH8Ãýn­ÛP¸Få†ÓÀgû‹* í*ÿ—¨EtÇ׎ø{ÅöÃÂZqËî·7BDŠh„íy·ãæÁöYhˆ+Џ=Qžd5þqÀÿGÿ»†ysn±nœqh¤GE+¦ñ¿(jxlËbÎ=GfÍ.„Âö«ÕU@V¨¬Y-x<7è½±ð§ÁÅÕ^½6íêñ€-þ}õüˆ}¥Ÿó§›Yò¡µ Ê›þÛõw+ñï˜?Œ?7aᕾ\êJÇáZí0ëÎÌŠ½ê¿¢·^u5äUÏw&Žkx¢Œˆ-~tµ¯z)¬"6k’U‹Œr Ù”KØÜ–/½(œ+¶™óbD§Ómtºr™Ë“^wÞt1c¿¤úw)¥3Ñ£K+·)úÚf¿Kr£ÞmtzOû¹û×þö/æÑÛÝçœÈ FWÞ¢¥úN?3¬Ï5aÃÍ*zv¯.qþ ˜H¥®•¡+T@µŠ:Ë‘L­nÖì¯ù€»¤h±.dq?<5OŽËMßæ]f‡•gno%¸³+v\œ­¹îýV»Ó¨®¥BV|±N5dQ&h1¢iÁ`Î|Ç!°]+¦[þÐ.BÙªÕ ê T¨‚ú3ù2X¸Î×"„=ˆ.JƒB–T«Õh©Ý¤¯Õvãù ; )Ä™0ÁÄúÎhÂ@'ëümI X¨ji˜³Íéz´|ýîÃFüÆÈñKÿµô_sÂ#þÂ"¶)qðKî «ÏsÜOÚÂÇ•&_ŒyK­Ì/ÿ=¸|ÿãà×W¿¼)ÊÀÝYÓ{ë{™üFd" )µ¸I¥JzÐËzžne鋉J¤…5dYˆ¤å©ï”‡uS…½.ØIM¤ªºú9%û¾ú¡ß:…VüС¨ƒK =Û‹hË>½¾ÔËÕ^Í^üÑìߥG…Âçoã)cQÒ}fÞ»QšXè/Xy¨/´ÝŠŒmúéUü„ë×TÃà/¹äEÇö]­i(Æ „\Ç+Þ-“»â"N‡iwñF¢÷Íz­€àLÜ¿_åýò >1ÍæáýŽŸÍGHx+À—9ݱW$º…Øž;µ«´Ø0@ê‘•š}Úô‚6ÅÍ jÃ2Š|е<5Ï–•¶th4HD6[¢ Œ(Ñdž—h„c¢K4¡Ñ%ÐÀpD¹røÐpth‰F"ªD#W¢D#W¢ŽK¿6P%°ˆpTL‰F*ºD#[Á `K4¡%J4¢¶^ê!œ,õJ€önH©‡ä¤KRá\/¹€äªãGBžz/ L=$-C=„—¥’:@`¨‡äãßxÚŒa~Šc8Ž¡GhGràNˆ^Ïì˜O[”Ž#¨h;)Q‘”ˆD»DE¹Ä ‰Š¤d-é›/AT„–CÜl0Q‘Ü€€ýimIãËZÜ\ò &*ÊÙX¢-B¶Ë“° çOzÀ‰Šr͵º%OÈ+%N@ÉÍITDØ y¢"¢FÐ5Œ„¨ˆ¨-¹ˆŠÈÇãD‰ŠrõÚaV@T”¿Mh¢¢ü"a¯Õ)”úA¹É×êd¢áDE$2—'½þP/ÆOHÿvA¨‡È51÷Ó;==¹s‹¡"Ÿ]ÌK•08ÊÐ*àË9A©‡H¤šzˆhÈ ÔCD¢àÔC$3©‡ˆº•M=D"B=DÔ#õ‰¨‚ú£"†¥"ÓÁ %•M=trgfyê!²³2ˆz¨<­å·Òû°ÀœÒ-ý׌Ð6 Á–f›ÊA=tìB=tBæ O=D&N=D%2~t¥¢”ºåÕfæƒMrù²ÔC” Y"iY†–ó¸©pê!2'F=T^æáe˜zˆL2Šz¨\íåÑì%Í^ðzˆ~þ0ÔCdÒ1ÔCßô¡^ ²"cK@=$-J=tÄa:ê¡ÜM²,JÔC$2‰©‡ò¶iYŸÇ˜àÔCt°|™cš‘Ì"k»Àšm³×klϨz<”ÁˆŠ¶cš¨(w«öÏsnx¢¢\Böç¡ ‘Bå— ÛBi”Ÿˆ¨(¿Î«Ó>%È×䨴í¤C£A"²Ñ9ùƒÀ±5ŸOA¬xß& ã4L}î³ “Ž»–áIØZÁÄ%w£ÞYSgDÞÖu R¯Zì/VDTM,…ƒÜ.£”Ï„¸¹ƒ`ʘð˜Z” ¡3cü#‚´¡^˛ϔä4!ºÃ× É¦ºæ1¦ô.ÑÍ|åϺ~ÐroõÆ‹ˆgq’zNùÂß"Ãû•ñ3Ÿ¹6 ”žôôJv,Ÿ€ÛþÕ¾pFWzÿJoÖšµZÛMÓÍšÝ0zu³i4ÛãÆÐ´êæÉ?¸ŒÚ¤¤Iô¨ ÆŽë„;"Ží¦S¥ \è—˜R)[iÅJ\'S(È'•o¹òñÃ0Kåg©6@©á~$²*FÚ&ŸÕÃòã§¼' ½•*s¬lµ@9®òŽr‡`MŒ÷Š&ò[IJLºSm$l7ÏVQ xÂ¥Ð2(Žœ+µ;J 594Z¬«ÈùÃqöXâñ*^èÂNwœ‚S ÄôùuÑ"/Óé¥ÇÜûb…h¾Vvl9 žØù3Ç¢4¿ð£‹òøãaÛp€)òæ¸ËQ¸õx´—«G­‘Ñ‘‹4™ë|êb®=é=€ø•})Lp1’ÔK)¡Üô'Ö˜ÛJ§¢t*NÉ©H“Ç^üf¶‘TÆWm”TvµJ»ŠÖñ§×—ßÒ÷4Ïî»\á’×òí^Þ[žIÊ3É7q&yÆÆXžP^ò å´m×µÔµò4\ž†Ÿ†“ –ò$L)1¦hzñÛûJ‰Ö¸›$i2Ü.¤V£Þ¬5÷eÂIÖvà"NO*$ƒy®È…l¼“ž¯Sm- m@qÒŠ‘TÐüÁÈä"1aDJáÛh‘TG ·ÔaDz³| ]F|Y!8ŒXhš0"¥XÙ0¢â6`ÇÀ„_‚ߨI »¾%Óˆê¼,³½ÆWV¤Ù>°šÃ^ð”Ú]`ÌòÝåRÔJP¬ |æGnPØ&{O¡å‡EŠ ì6ZL™ÿk1&©E‰ è‘6%â.œ¾3—-T¾'Fîa6cèÃÉås*%x»J…ZqAŸº>Ež¤`‹<ÉW)'ZŽd!dÄË„Ÿ|Ë œ”´¶8¹ñîZœÌYü4¨¨ûD+HÌúˮՒ.à‚ Bª—Ïg.lE_&L¢Ž;)×t¹¦Ë5}jçàsU.Œ7ŒÊ±Œ~Z'W—˜ ‘?µóó½f«Û~Ú>Z2âí2`c°xdýPŒÀÈvŒ6„‘Ë¢E$ŒõH‡8¿ˆ«‹P3ßHE,ï@‹ÃômzTŠ+¶lÄBvUAŒ(í¹¦é;¤¾Ÿ`lU™ÅI æV2™ñ;+#dSÆEúdRžWøL6‹´ª·ÏZòŠãŽ}«òÅ í›ê]ZçÓ1…ÈŸHg®cÔNÔ'H¹…—º®Sk¹¦UÍVÓlS­­dˆM> -³Ö3jÖ¨k4ld ëCÓ`õQÃ2{5ÛªÛù‡˜`‹Ô!ŒÊðîï. Ñ®ÕȦpåĘd[‹C˜TM^/A¶°¸GÃmß;û»Œå#‹©Kx/OHÏ÷XÂ#3GK*ÅVïÀ)ªV‡Œ'½*š¡·jÿ¡“;êOªdDB耮¹zrº×?l øQi´âÒô@/Ïг¿­È[fÕK¼˜©³Y”éŒÇ΢˜²äÆÄ¾Oìq¯*iäl+A$7a8 k4>éED†¡Ÿ+”NCT¾xJÅ'þ^±ýP¹ô[v¿]8ÿ…Rá¶çÝ:,I 0ÆÎ”]Η¬ÐJ &N Œ¢p>}þÛÕMZpáEý´Ü(y<²”…´ÿVP=§i’ÿœÓÖ…Í¢‹›aäü@Y!ÚïR§+¥âòsþ&ùlL‡D”ÀܳXi¢(Q9Y“¸•á›0äõõ8cJH˜ÄŠÌ‡'YñdÁÂVH…(™”,LÚò¨ì„%ˆ[ÈVN ¹9ðÍ•_+ãp$WW‹Â½%k¿(¹Ox€ŽÖB¬R<ãm–+² /½NíÓïgìÖ|ܪFoý)îÑÙ`üÛ§õÆ·pþgòç´Gü}‘[ *ƒljr3âÄt×'ðÜYãÙ^¢/܆;žÇ¤P#Ùu-»e4©Õ™÷ý±?{hf†ûÀ{KõI-îÅŠ ÷1zÓ„§D¤ [ÛŒ¨ïŠ Yª*¸)=Ž@‚ë?úÒ›þW‘B(ûZSÀ¥–­: –eXG^Hâ'é—x¤¤I¿×°BÚ:¹Þ™Õ;³Còî‰M¬ÌŸ/^Ó×¾|í˯¬Ð“Z¡Kÿ*¿pöÿÛxp&ããZEÓD4Gk~rk°’_F´bÏ•RVÔã»î,A‰pi.ï}&7ii¾”ä7fÉ ¯ÀR¨„eñgqÙ©ý^ö¬yõàáN‰-Wþâ ÈÛðl[7Ôö³k»Èæ/!oNù…`nNQkØœòË…lN¹¥ì5*¥y6§å7§¥.O¿aYÝLU6>k·zEK]ÉN2‹æž8‚|‹ŽŠ[OäuF^Íö Ÿ§'poýhŸO ô4Ï“' ó4·Cq¡øºDhjO9@ž”—w¤P‘ûÒ7*²ÀÇd øë½˜t3G«ÄÆB^ʬËëL殊[¯ä5ÆïÒWž¸ë‚¿” p—i5;ƒnù™#ÒQ=žg&-?žyÌñ²MÐL­Áë®ü_X‹·Üt¬>Ž…o͵SwGiÅÃÝf¡¡áÔ«< ÂÌ–Ù(l™%¯Öâùá¸sTÞH븙Dü_ò±c%R©å×­³4½X ™tç,<‰lÖüÉ|"eL É Ý~È £}”KÅå u« Ø{ß\Åå·Î8F‡Ï9! Nc<óЏtÿ¥&!re{åͤFº8ŶV¿åZC÷F–üXš>²å œb5M;l nf]Gsak…ZX>°5)t+oäHÕÀ™ù³Ø!7󙢡?#/ÕÊûJ¥ø¿ÍñMXé볫_¥ßW­Ô•ÒE^Ê¢¾±yν˜ô©ÔfwÄ%<‡RFtx+ýGñm¥ö_†:™9ñ½4ufw]©NgMº0ÝhFáØ‹×4iѸ9qü ñ0w5Õäì[sy?òÜ1 ÒߨµèJ“Ä?JY ý¹?&÷íXš…ÒJ&‹äÒ#ù˜S¼Mü%w<xÖ6÷ØëeCBÛ. ÃH*"¤W?IógÞäÕ·º>éJ'[W—“Sé$¹1CJýþXþñ"H^à$»ˆ’ü.¹ºAË]Ÿ_Ð3'y³ÞJ¹åíòÇ—TØ«´w’žÙª¡O~ªQاƒ×Õ¸Õ–^| ãÞϧҜ®jû«qzQ>•¬ÎdÜøC—œX¢Ô+Ç9…ÌÉêÿ“ôæêúìç÷ï®þ.½ÿx~ö^:ÿøáÛóëw?Ho?^JŸ®Þ\R†FUµ[äGæÿ¾9—š}?hÆw’âJ'ž{Jÿ÷(Eúá){ÞTROÄHü›4ŸPelµ¨¬É=I™Jò¿¤¤.kªÑÈÿ2›L¥ÉEù½…óòwEkÙ–®‘öé Û4,C3”Ži*ZÛ64M·5]ïXªFði…\ø·_‰ªÒ(#òÂjÿ)™õ©Ädþné&—Ò•»I4%ÿeoõ³Ç:5“Û'Óñ|ä“ϳ¶4IÓ½½øtÍ0–É‹lw_3véoïâIÿ—ø×QÕ¡yF¿âŒÇŸ{ˆ }6AÝ;ÒÒü?¿âV^lWˆ·[Íy¥üVɧӇÙ]ÇÞªh")CI‰Çú')¹uIy-%ý§4„†è“ðž8Ecn©Y.Ê#»wN0ò(¡õFÊÊmçd?XÊå ?ʇªS«ÖË^`!Å«©ß£Ty ¤£0Ðû ðÔ ½;'¾Ã¤Uää~&¦ëô(9"nߺcß fâ€Ñ•Á-.‘!ƒúÁÜë…AÏ‹¢„isèÈÎÓ|´¹Ëù4 †]¤pH÷H˜Ð¬ôÇð ' c(×j~ó¯Ñ'Mh†ôíRò›`”˜æ<žÁKÝ›Z¢°ð=óu)œï™sèp¨”_|,Jæ=á¸#­Î!žÏ11EÃ{‚éYÝ!Þb}ÓÆ/iÊøN¯„„'¯åD=²J¸+Ïw‰iíV[żˆFÜ}h†cN¶ÂÕtcÏ'ÉØƒ…ã-Ò§oeJ‚=#»Åñ˜É5‡ÓÜiÛëÖ³cÃ4±f}G陲×ËL+buïüÁÀ z ïrOÐ.¢?™’GЮHÌcqøó8I<‰ˆžmn0ç}/»sŒŠ›xÞ½Ü(#wòÄ#k]/»‚ˆ‹?7V}PLذ—r±£¢N8þFÈFçƒÏz"´LD'PÈÞç âQ¿ï~»‘çþàFîÞȦjªª­ SsŠ©º†ÒÑ5S1í¡Ñ×½oj¤àRà¨áÇOE,B"í+øI¾ÉÀ_GÎpè»íì7qj‡ù^™iCˆ6'Aù›"€žè÷õùE¦¾ÂêPIC³!+–Ká’²ƒŽ*$=xT míÜCMc×Ä®q«ç!"¥$T}gÃ䢈`û–]¤0È˃>9O7 t$åÄÿ>»x§dÔ;!ú¡9qð JùFàóùIåV&,óóAžÊ×Ûôz›Žµ‹†Ä—dc¿V£cÙ3‹AÏÝ®£¤¦·6邯Üâ^Â~Ÿmß=6$)Ïc"Gç^åžj$iŠÃ…Ž­æÅ)ù‹[@Õ_“œûI›XGßëTÎ/¼¯h•i™fkne]ì8Z_ÓÄØ·:éX§­ôõ£8Žn ­¶­ ¯|#,V¸×Pe7Íêôî‚v‚¦6t­Õ°š¥bõo!×l­×õ 4v[t1Ï …Õ³þô­3ñÇ>ß‚p!ýÛ¶âÍ65ói¶ŒÃ»‹ÅîÿÍhàþg†Éûåá±ã¯ˆÃ<ö®fŽ{/# aº¶Í ·w!!q]Ó†K`½žÍ ¹– ‡]ÇÁ/j"°^»5‘ã* é* ³Ð%Zo? ·ÊÂÀÃ289‡ùy±`—ÆEh9cÖöfŽCgð³3vÈ>'ÚÔ`ke)þos|VúâýÙ?¥Ë7çgÛ)!?\­,XikŸÛÚî#Åà+ͤ¤TFøJ¥%æAä9ä“þØK?:þ8ÿeÆßûÓ)ùL“2²)pçy!„Qö‘ý +­?¾g6L­c›–e­ð´xQD¾÷M{ ,}Ćò©LÝ yÄgò»q8JbUÙ#æSyÈ¿.=g …ý?ˆõ—†dý—\ÒÃÄ›)¹ûM~_fÿºL9Ýå.MÉ{*§éˆå&ÙÊ7Z³qœÑŽfz³(#™Þ–$êT&›·>%ÿ8»xwyCÿ+ù„ÈH?ù…ò(Ò›½$/xŒ²l ²È—E³²ÜUÄב¯æýÇæÊ -±»ªNâØý¾‚´ó·ËåæÖu[m«ªyìœ|â¤é^Ÿ tÅ#»VÖ^Ò§ëE·¥e©–nlØ–ÁJ+Û¤³” Vº¦ ÞW³ÁGò e÷Ȫn¤Ö?§7©=É Ã{ß“Rý¿•x×–ÒKç‚0Ú[ß*Æ’çg«p[×ÛŽ ¡!áùj0ë‘5x$¼ÎLÊïÓ±óÐ'C+%Ýžäã¾%¿M÷‘Op×óÇJ”Ù5‰G‹û|VnŽŸJ¿úÁàÕzèVNë™ú½r^;JSJ+ØK*HJýRûC–aÒ[jG³LÒ ÝÔmò»7áµ3s«–a·BËÛL€¡éfKk3ªÍ–ÒÕ˜”ªî2k5¤K2÷Û˜ôÒ&ÀÖv˜Ñc‰h ¢ÒZ«s€& ” °MSm_œü¿mvÔŽEcج&à¹Bo³mòŸ­š›îl+]‰À©*»È^'ž¥îÕ6‹“^ÚX;ì€ð±Ä³m«£µ;-«v„º»z£E°f«cšjrUoôc£g5lÕ"¾~Çb±;J‹·îhU­ÀËÛ!]œ—WÅX"Y÷DxÛд’ ©­{9/ ˆ¦·4«¥2êζҕØœªÂìÀÏd‰†Ár)î^ÞØÛí€ð±Ä³D¸eš†y€vàûÞíe ½ÑØ Ë¶h"Ç6KÀ`Giñæ­ª™=øÅ›=é¬ó„÷7g*½ ~/`ÅoÏKö4±m1:?Ïɇo)ÉØM0‹ˆÚ$ĺ7$)UÖ®“²ÞŒ~"Iép“öžÐåŽ*r’pU7N–iÑÌn§Xô'ÇKÌe|Pîãó$ÑÙ¹—d}>i¦à̈́ĭIùç.ùÍjÑ_½‡-%ïÉo²’Îf@×Y»J¸?dF`cÅ‹’4ª¿µ$ÅÎZ­H²^gTæôãø¶ìóyö…ŸØwÏæÉøÑN!&£éïqråwã ã9sƒqÎÞ-×í]å™õGlX žbŽÑFA*R¡¤Î«í8 §½…“<¢ë}ÉQeÍFʈ–ÔÅÈ|«SNÁø“ã›h®0ÒnŠo©ªi[†ÍVÕm¥+ñÚqªºqÿÚ›ŽÃ‡u7žÿÖÆé¥7ñk»Ú KTµkY–ÞâÝÄÛõ&~ç&~“BoÙ¸ÛMkëjûÇâû¬î¨qÑ ÛÖÙV‘­¥«0ÍT¸eÛZ‡¹ªKW_Ý!]\|5J£–ì´±´xÓÜ"¦Ù°;†eÙ¼æ uGµ…›7A[£ Ïp«¸ãøVùâpw ‚±ùÔ·Õ°[ŽÞj[:ƒï(-Þ¡U53G|®!P–øXî~á|»KF\ÆÝ%#š,B×Ù“× + Ï„ƒ9i´bÀ<Ü»÷ö%ìà]0Å—ÁÀS/-¾Ã¤Uä$m &¦ë$i0pûÖM¯â€Ñ•Ø€™÷u†êsºÀ ä#SNF&ìà…{OèŽX¼Ô/‡sð×ß%âäb?xág8x᜽˜/%ª?xám¤%Úšjž˜-Õ<›)Õ<’j I5…ÞŸjˆN5_Ÿ!Õ<!Õ<‘5Õ<–jΜjˆ‹?7XSÍCaÙRÍQSÍCQ!©æ¡Ø:j K5_okêmM½­©·5õ¶¦ÞÖ”Íô …ß›é ÈšéˆË˜éˆÊ˜éˆÊ˜é™•)Ó3›%Ó3$s¦gì½™žy@Y2=³ã–3%˲Ça»/[¸/[VÓPæ”ÖŒx{SZºN€£Žë ×Žy혛cŽà\ qÄñpÖÜÏXŽkZh¸c¼%áÉ7UJÊÎ|S|ÈY6$!ÛAÆt’%Á‡ÝMKÓ·%º4 `ËôUºƒØ3þ–%|°ŸgüµÚ¥ñÝ sÐê+Ór”ŽÝo·ûŽ¥ª{OÆßrU g¨ˆÈ#Á—Ž‘S$w§ˆí¹»:Ÿ-Xt‹{ªõQ:Þl^@)ìÙ¼Ì=áÉæÅ)”ÍKT8[`–jŠeQø^'÷æl^6–þä]¬ Û­ítÍv³¥ •¾eõÝtº£«£íÉ/ãþ6Ç7Ñ^2 ½Åi©µÕ2ViwTuké*] Uµ Nâ«x‡±O8ˆS‡ ®$§»N4ÆÞ»qƒ"Ö¡Ö:ÔzH¡V¨þ.'$ô g »pÍÞmNoñ´»â®Ïðh£ •G)‡PRgy^=2á2¿zdBÛ{ÌËì ”8æeîos|ÍÆóÚ5Ã6 £c°Uu[éJ¼vœªVÀ©³Cº8N*ÆUí¬¶f™8Ä 5 7§N»A`–j´¹SÒ´Dc§/tè$»+ÏF¢± c3‰Q »¥¶Éªd´ZÄä2âû”cL†ÙI±²2‡ö(F¼®´eµc¥Ž|+ëµ/4c—ñûÛ´Â2-³­i˳£´x­ªèÔ*;d‰éíηÉ`ÄeÜd0¢1Q«°a¨UØÔ* lÔ*``fj8ò~j &µ ”™Z…]بU8@AÔ*@|¦7ˆ0LXü{OŽXÇßëøûáÄßáú»Dœ\ìñwp†ø;çìÅŒ¿—¨<þÎ1Ú(HK´„•Z…˜ZˆÍD­Ä„P«¡!Ô*PèýÔ*@D0µJ |j :µ ‘•Zˆ £V‚3S«qñç+µ –ZˆÊH­E…P«@±t€Z £V©·5õ¶¦ÞÖÔÛšz[SokÊR«@á÷R«@Y©U€¸ŒÔ*@TFj *#µ '*µ ›…Z…’™Z…{/µ ( µ ;n9S²,{¶ûÎà ŒûÎ]5 e¦VaÄÛK­è:Ž:®ƒ^;æµc~lŽ9‚s%ÄÇwÀ©Utõ3–£ÅJ­w¬‘·$C <Ô*¥¤ì¤VáCf£VáÇf`Û( ¤Vá—& lÔ*¥;ˆZ¥œ(áƒýœZ۵iÙCÅí÷Å´m[iwô–ÒvÌVÇP5Ýè´÷P«”«’=CED >jNYjN[©UèZy`Á¢[ÜS­ˆÒñR«¥°S«˜{ÂC­Â)D­"*Œ-0ãý`Y¾×ɽ™Z…´+š³× <[³ZJKµûŠi¨ŽÒ阆b¨CÕ0m×m-ù¥#cÜßæø&ÚK¤×rí†m©f[o6SU·–®âÑnš®·X«º¹´€GT–m[v›ÿÑÅãKæ×û¤ò¾¶`Âe~mÁ„ÆøÚ‚ ôÚ‚ôÚ ÊúÚ xmEfymÂd}m¼¶àFWÖ×`Pàk >ãµ$æ§7{Љ4;ÖœÛ×…éÅDg Ù@QYI/מkâKç¹:¶D‹xê¹øã0Âq¤ãמ3ûwE¯Ä&qn[U‘{=ò’3' ®¡C/|:Ëþîdz0zxïO’ó{ » ù­^ôÈ"Ndüž^Ž…ÈW¾b®ˆ™_ë4u…Ô£èX¢".±iFŸ$ŒÄ(RDúÕFY Q˜*Mœ¯Wóh”XQKýwQZDÄ| œ…ã² È…á+ ~õå좾|¹6 ­™+ËD˜8QÅúó qö³júx%…¨•<è¥p°†+“ѰgZ-·/´²/hôñ ¿(Ë Ä[Û/9Ä‹v ”kqS,fièãw¡RIŠâLþœöæÑXàÌxl‘ÕmD;>Ù0GdÄâZ6žíFñÂm¸ãyL 5’ø[ײ[F“N#ò…A3›VA8ó‡UT_Q’+lÕˆ¢÷§{S/òÃAmµšfE}· Aš¥ªµÈ ç¨Á‡­¢*jÏèKoø_«öìgU:W¢ƒ¶Ñ6e¡rnÅ6Cö'NºËùsN«ÃfrbX7«ùƒ‘×¥^s,z ‘³T¨hi‹p<Ÿx¿Ñ¹]ÍRüM¸„ìp…´(åÉKç_3aýé@r±[ø‘,\ðRì䆾<­=×½f<{\^Å2åÍÜüÁCóùUÔ£¶ðiž‹ïƼåVæ·ö.>¾î}8ûíMUnáŒçÞÛ(œ Ál¾7~é +• ½w/D~Òò|)ËãkD*«È²IËc_)_ÖM¥öºb'5“Z{©µ—*½ ·÷¢Ã¶ê5U>~kgUI-Þ{¤%ÚCvÖu`VÖõ-Ø[X×·aØF‡üù´~Lí[¿]ž£ ãh‡A³p´ƒ98Ú¹ñ™8ÚAèLí0DvŽv.”£àháâÏ vŽv,+G;•™£† ãh‡a èG;ÊÑç} } Cg}ºBE É'ó1$HØq,XnÏÃ{‚ÿ1$\ócHhžÇp9¬!ÁÈÐÇ\8òðËBzÃU%*âÿ³w­ÍmKö‡ì—)TmlßH‚orשÕulßTœHe+Ùld. ID Àà!Y×Åýí;3(âctƒ’LW+"ØgÝ==ƒ>= hE’$CS¤\dÈâÓ›‹ Y &'²˜ÂÀ7¿òàš)K†,èâ°"ŠœdÈÂò«ö¡‡ C–lì>¼ãÇò (á\2d©¹Æ31é”òÒ(¹È0h#CB – Jš ¸ Õ­½dH 2$T$È0PõGŠ –“ §ƒA%:¸— Yç ·€dÈòmÉC†,–'ÍD]¾¢#I3‡Ιå¼À5>4é‹“cä B†Z¦Š“!½‡— C>%÷–› -M†ƒÌE†E-@†„Ç/H†mÈ¢¤ÅS_)¦J“!á‚T)2ä1J=F©¸ÑÞA§Mž ‰09Èpè9ÈGÍ!CÂkŽ<òÛÞ¢¢H¾ÂÊÇ(K†,-I†|Ì›zy2$`UÁütY¤É0 ÐdÈÒ­ZTÆË!¿QM,@†,*ÇJƒÑý²dÈòº~Ô:2äSV»ÂdÈǬ}8äcϧ…˱Ï+é|r›žÖìvšëÍÙºùùMüS_l‹êéõl{é-9Äû&°À0N Uæ> örMäåfrN[ËÙ~²2y©7†c[àmM¥êqà*ûÖdˆýäÕ|"9ÌJ9SÀÍÕ‡RÕw …òûÀÙ#œÒl”–»(%aQ®cñe {–`©ndv5ëW™*íጆSOÛ¢}·ÃKÉc.‹mÚ`Ƭ¯ONp^>ŸœTÙ¬{y{2\× ”˜#TÎh!gBû. iÀße¥/>:š¸r.,üCqÁü ["g‚ðPjã¨õ2©†$tf¸Æ„ZïÒª—`“ §¨Æ÷ð­zp‘ðwâAù …ÉR,}nø0¼[¦Œé˜úÔ5i€%àjû_Ë\¿~V"Ûú¬ ?+~«Óë7¨ÚìZ–Ú¶z#uÐîê ;ê÷GF§Ñ°ºŸ•…hJ“à¥"ŒÓ6KÛNSÆRšá+£¢l¤.#"fÏ(’ ×ræÊÆ/×µX%(uÜ÷ÔÚjÐV¶Hî Éez’n±Ü1šµÈ²nËŽz@Á’câ–FªÌ,“nÅäߊÀ¶3«jÀ›Ñ3à<ÕøÔ(ƒ³Bì­À®D8Ì5NÄÃ1ç(‰xÑÁ+5ìÔ¸câF…ˆiÒnÕç‘ãœ{,À½«¼B÷µŒ°cÏYñĆԟٮ8¥ù™m]øÇ‰}‡mæ|-=»ÏQ¹÷¸÷—Ë,Ná4yÞv•.ó`OCÌLëà—þ¥2àjðQª(Wã‰$¯ìP@"ÆTÆgïd—J”á8~Kn¶â8*»œË2<ÁÁ6ÞIϳ$ÓE¥ xv«ÙquÜ_÷WÇýÀþ*K“;n´žóFëÀŠ–eòíˆt3DæoHÍçUÇóª'q^…û&=ëÂrƒÊ¢Å8L ?¬20§ÔŠêÿR ÉMfäÛáÝÏ é—°*Ð,¯þ˜Õ pfËóÞ7Lz.jóâ'ä=Øï<îÌ’ó¢Gã[AºýN³µÞ›ä t…v,³xo†uœ& "ʱ]Á+Ôt1ŠküìY´zð–ïžH ¥‚1\Ÿ¶Âˆ>ú£8úoÒ¬ÔkyÎ~@ºÐÑ0Ë€»ÕšdõËÈstËÒaÏÙÈe{¢zƒŠ€'G2¼TX‰À˜p²¹r+gsêžžÿ¨¾¹ßž¯cyó{N¹ò«LɅܼÇVn<à$ÀúÌBÌõÜ …˜ëQ$˜ëEŒ2Q­¨+á¹™9ŠX”ñ…èk¢÷×Íøúdœò †JñêUQ€©PËŠHO]ŸD$ɹ´"’ÚôѦŸÚ>ø+„ñF¢(©õ>[bì+^Ž";%P¢ûê&«ÞÀZûvÏ.øŽ„¿Œy±ê«éGOï#ˆF)ÝA¤Ž}ÕÓ<&‘tÓw™»¤/H·Ìd2ׯ^‡ByX!ïòiœPe‡¼&.“«Å·ÉÝ4¡u>-|ú‰C¾eyƒŒlÑ'™2‚ÏÕ®So™Ñªv§=€ÒŸtˆ{í~÷:j»e¶Õv‡jêÀì³§ƒÁxÐn¶Æí^ù!X"©JAÒÝß^±Ûh€Má2ˆÑÀDn,z¨A59[â̰XDÃ|ß¹ªf<ï<çe\¢—µb^;<á#sG (Å­J™K1òÕ ,I/‹A*Æ¿+àúZõGò_Ç)É~]ù¸2àJä+9æs=X¯Ã6/ß›CnÇmVå:cç±µcž+áÔÙd<š¶;=s„ÒÈøxØÃH‡mÝÛL¶ ã%ø"½ŸLJ]â½dPUcö×\|GA|•Á1†õzª×¶RŸÍˆË"¾Úƒ=|-¸1k¦졚㙆3ìt{­:7ö«ž˜…ë…öø³Ùª:bø×¸ü@JŸ J&P·Û dbhlË܃˜ ˆÜþÉ­¹öLN4¯#œêmÕ©U§º­~[A‘Ÿÿ,FŠ}¨¤¥‘”¿"~¾çÕÅ_|ÂÇ–¸Úï³Á6ð~Këµ;ë}Ú£ø›¿$Ýâ•ÊJÊ©Ã4'ÀLã»x,°PrTP*²µ^’–қzæcÕñ&Ju­XNå;ÛAâµI—(({ƒ·R’ã`¦Z…ð HöÀ$o"/¸@‘|.ÁDqÔF*æ/5eJz)8ž®^Jþ®qXqQ!Ž:^!ù¡¸:ë“u?©7È]Åv@ÒåFKCÝ'¶'5>ÑÑ6¼sÝG€Ã=M—’ô€±&f½ T„ÅS[¡0Ãåí>÷£qxÿüö¤{Ljý±#vĈýäyLK6¬l~VÞ a£fjY†~D±G#²U…«Òˆ•2^Çm6l;ƒ,%[¾Qy¿azgûà±”²^¸'‚RÖ‹5._/?GÌ[«±ÄZeÆý+¢³2º#á”­¢I$mÓbÛ4 Zƒ†6Ô¨-OïÐrÊ|fôä‚OͧÐ:cß(¨QƒnOë7¤›ºñé‹ÓO?‘ËrƒyEþ¶ü“ýß.µÿl5fBMçF8’º7ëF O‡ésÞõkM#D±„Äï²È˜©yMDºWºÈ$×Ìñ¤4@ÄtÌ<ž-AâWbYc—/êQà׃©á/!ÙZGÝ`jÃeú–H#ó»pê¹)<7>¾ âÝåÈÂëŽ=ªÇϵjZ³°•@e^ûšm̃´YMIØÕ>ÛŽìÚ^K¸6n£ÌÇ%F¿P3 ¹#Ù20#Û­¯k_ ¼d.nâ¼~Ň»Ö!/yärÂL×$¤E˜ìö 2Ø?¯Èåû7oˆ¦Õ:µ†ø¬ÑÓäåG¦Sÿ0Âäµùꊼ\ky2¥¯J´øOÛýÓXmp­]BÓ4ÎÎa’.v¥îô+?´Øj°Ä²¦]% aÈq¢9±]î=ÿŽÌˆm²ŠkÝϰ¸ÐŒ1¤ME¶ÃÉØ÷f¤0_ZuÇBæÌM^ýËbÞÆŒs«S}Žëbz~=ë;–Šzv‘x¸Ô·¯Ëq«óf|flÖ0ÛÇãs#àÿp³Çw:Ÿ­—¯:õ¬âÓfúöü›í½…Þ·Úw~tœ´ˆ,ÃLÂÆc%æ,$úœ $ÏÃã½'й&äÖf¡„íÚ‰g*ÜC¤Ø>÷HL]^òC™a4¯Ö=Tíºè|ëÉÇ6s±ˆ `¦ŸAzÈ5ŒØCî ÒõìïîEÕ¹¨ª†kËHáðÅ Äpœ ÓË_º>oLx£’ï›&‹c“Éóד`§ž"¥~ª6¿o½qûE·ì€‡Ùp=ÐלÖ—Yþ-Ó«OüÀa¥[/2’_œðõï–Ãጛ»81ÀH¥¤ß©AÂ'Suh‘ñ`8‡€ö\Ê#<è|Oó·¿Ÿý4$žÕèô4³A{Ýv³oõ»½^»70hßw5«OJ¸íŽ]¼8›ÀpˆáO"ž¢ ÊÌ}ïOæ5ëû†¢°>°`rÄûz7$Å7©IÑ86^CÓÂrFÔôfTCZ&*²Š‡D¡1a3öòSÛ'Å÷óËØ‘Ë:`([¦ôKèú Çy'þ»ú^P÷¦.ÁÛP¦'cÏ¿fè ¾¶ð-Á³2æü–\.7xWéD[7¾V}Ï6­|¿äÏâHcħ”$*·w)¯Ö”|­Ú‚ ˜Ï»ŽæÄËüMÒpEC^¿#koŸ°¸8K]Ž 3Üã+#.­÷¢O|/šïÎï••'dîÈË%‡µÍ§[”Ig*Ê'£f{pÒç”,á›èFÌÝÐ6QzÏ„{¾ý/$Ù^ÀÔžgø‰ñ…JgûØc>®zΘë¸eë+O…< ×@Ò|¶ ©aª¿Iƒmˆ¡ËšÙ‚ø‚êlfQ—Ã@kGB^‡×Óó|+!^Ck´gAO£e¦Ç6MwÀrÇŽw›\ìVcÓ(rp}„U£¶|UŽæ1x¨c¡H…µP${|XP$' ÙE^h Hf‘Zˆ3Òiý áé=$(Â#fè8*’œêÖÒm¼)ðø_38×Í3pŒ'i6Ž¿2g´Æ#©ôMØ58qÞ(-ŸÏÄ¢°ÀÙd"¾(-Gn¦‚Û…£4y…j4ì”_º1÷™Î Â¤GYh1¾XF“Ðö®†j½¬ñœƒ… ÂGÇtmö¯9ëݰÉÛ^^Û#fUHz33Ì©íâ˜W";MøÄ gžk3=áJ¿f„#·ÝŸz£x³kD˜±G`°°Œ"{e`)îFr gQŸ°U ‚e!Ùµ²9ö˜Žÿþ‚*j‰÷+µ‰N£$Èܘ-ï¹ú¢Â®’Ž7 B#˜¢yëy€%Úž YÅtã´ÄÊNˆú,¸ªYôö</–b*b^ÇËb…3§·S±‰wg(±ŽxmÊÂÁÈ #~Ï’M¨å<§Ô™!y²åe0¥ÙòÆZ)-þª”„E¹þ) M’1¯˜RXØû­úÆ7Õç,r IàÍ(±Ë,óÚzÏ{÷<èçÛÞTo2èôzÍõ.È¿×îíÍ‹¾YÞ%,^š87wÏûß}B„ lbùÍðmNsÚqKIÎÃk†ÀKnœƒÅÿ#.#—n&V*8ò!9Ì]¤¹K]®V\öJ(ßÙé£h<¦¾.jɾK5 ËìQ&^ܯ$>.Ò›êäÀð‚>Arëtpr½y:Ò`Bù Ÿz2‰÷Ý}?E.éqŽ&´=¥”ˆ«Âß^äÿf¾§7.œã´RgƒFŒ8²,ºoéeÙ*ÝG´®æËØ’Ò;>îúòÒo]Ì€0¹»•J5`QÎnr¢OÁâA *¶¦ö¼m ÷œV½Bøh%íàì4 §lkæ}¹“ZdÏ|-é1úœÏ‚þà~½å¡KÝóí‰íªâiU<}´‚ÒVðfJÍk2öD « Œ©Ã޵͋­ðªÜ; ¢b+h5 mÅìì] ù¯¥)~OÞ~º8ýû‡?ýƒ|8{sú¼9ûå—·o.~<û…¼;ûH~ýôö#Û¢± `·ÇþÓ€Áüýí"Ê›S¢šä5§ù¿{òÝw$p(“Æ Ä—$šqèõ8ÖìšÍ QçDù'mÙÌKg[(òOeùüÞ‡—lö³ªõº¦Æú׬5;fK4{j³1è5ûšÖt]­Å¤óæ©y¿û§9¯@æâѨ~\u&©¤$ŠF” X²¬“^ÕÓ†îã#Ë*ðù¯+¾ cd7FÀ?¥#{<è´»äâ P7ê¢X™á8èÐ}}`¬æ” +‰þã dûÑÇ߉m®õ :æÏˆ:&ª:ßßq¹®9i~ÿÐ ”Èvnëc/r­ä>Oظ°¤4Û½IЬÁÄܱ»Ô Ä/Gðšò ù\ŸÀ ½‘¼Ì=—`ÓÐÅI ­5›×eF >¼æò¦H@¡c›:–P¾j{¾ e^‰žÚ–E]] °­æŽV¢=ã9'žËv4÷üT$ùœ@+ýÚ¾Pù´ïÆ«|R£Q’é ÛZÇQGç\™ë«±’ªÏdS½x />o^˜îÖó-`©Üù¡Ž¡4ƒ α¦Ô°¨lîÜÄ]”zc86¿ÛBïb•}kØ!L´"R_^èß\]„¦LnRhhÏ({„_Òl”–»8艧Oãê‰H² \ìùF”È,Ë“Tr±nèË«•ŸÿG»½øý—ßGoÍМýò»y1½¦³/çß{¯àÅ£ ØÒfÀ3k¡‘Þ„Æl¾ñZ®†öP7’ÄkÎk³Þ¥áé%˜h8[©Ânn‚‹äê¬xl~CDb²‡c# Á5õFmÏ­ê¥ÆÃNè멬(NcŽwëRÿ#SŸºñš‡WÉìÇŽ|øõ³ÙÖgeøYi4f«ÕW¶e©íŽhÇPÍ>Ãé÷f·Ûý¬,ð´¤|LS½þ%7"ŒB›“u‰(gsêžžÿ¨&÷úy>†#Žé_ɪßôˆn»m“/ë ppÓ|nTx2QÝéD¬@Üî ¥vŽÄ°„ed¾rM Ý€g´­Ýivö—¹Ö¦ýža©Ín³«¶ûš¦V£¥vŒA³cR­ßìhk‹šÞÐ{67þЍrà³–§–!ÿ†oš©tØ!óÁú‡6¹àÎv'`o»É+_êün<®~¼-{bñ‹ì2¢Ô%ñ»+bE”§Øð'x%ûÁÉ ×Óo¿z~AnÙw‚95m¶£µD~ž¸$ÔWÓ¶ó.dÊb÷°*ºšÃþ)ŽENMAèÙ¦±«hG«*o«HÕß ðçe·uº­ö ½Þ×=i›¿„™ÖXñ€i­Zsд»š:è6ÕV£ßé·ƒ–Öl5{ów}ù5Rƒ^L¾…섯0ŸqOzŸ´Ò–Hew§v'0쩌»´a5Í ªŸpiŒ;ÛŽ:ø &‡ñ‘uªhþâî‰.šÀ¸£ùw Ëq¸_š|Æá^Yy3¥æÍ8Ì'4GÆaÁsêZ:ÛLa&)+Ù¹–™#?2‡Ð|ù‘¹ƒ+CŽüÈ|Bm7¢ºçêÔ÷=xê,:¶]Ìv ¾,Î'û>µ »“ƒD}c˜KêöcxÀ¦Ëœ½ƒ-@$-ÀTÙaÁ±Î¢p‚YœÀãpXû{¾˜&ÈöNâ×uT—Í”;§:‡hÉœjy‰Årª‹É—Í©–—žx9H‰¹ò©åå>̧>£þ„êÉQ= \xÛÈz]H±9°å¥æIÀÎ!5wvÙƒ7[^túâM?XÇë™Xñz`[,ÊJ¶‘€&ø&°Àç ¹Ó€‚)ðòvØÖæ¡lÈKM)8Rå‰ ò²¥‰ ùDæ#‚ä”-GÉ)Tš"%·œ+)éB”øzfn‹‘D†ß~yÉÛmãF„•4¾ÚäœiYLØ›”&­€ÄæŒ@‹W¤ôÍñD½ùÓw¯aÖ²òëØÀ¤ …5ðáŒb¸®éà¬gK“×Ó+*ÔôuÎt6Ée¿W¥P§¥0Dž»Ó<$ž\#n wŠ¢ì'íä–œdêopÛ_'Hh  –ÙD}ÉëU¾.Pš/gúƒ{ý™}ÁÚá+¼ íìtá2|bC*x"²<—Uíå>`£êzºÕ7âÐAMî1röå²çoÝ +æTl¶¶bšž`œWħ¼"þ?{ïߤ6²¤ ýG¡ˆ]Ïìm¿öúÄõñÌx'Î̸ÃöœwwÜ\‚Ö4HŒ$Úîíà~ö·J?hÑ (³”% -ïÙslO–ª²²²²2ŸªbBÖM–[«’Öt¹õ’§´ ž‹UGV/–´Ñ¿SÄ5èªf¾¦=™“Á/U–—‚,IÁTšÙwœ×i•\Öõ}ÎÅ{'òo/pî\çKxÂo!—Ø!qºŸ#¡+H×õÌVw-VõÄÎNV£?qd™”°žàg7ÁÉÐhÚµ&óeQõh`DA=^PÇDÔáÑõh`A=Q¬ ^ ZPG‡Ô#Qõp\‚z88® ŽK?7PõXDA=SP@EÔ#°%t¶ -PP_GXêKa©#,u„¥Ž°Ô–:ÂRGXêËiEX„uð0F ŠQŽ‹aÔ£buà¨F<*œQŽ fÔÁAâuØ0F$(˜Q„[Î(•4!HFb<£,á¨Ò­´¤-4ýÖÃé‚Ûà&…ˆ~ …“Ûæ2pÙ–ü^˼ª…nMÁŠ(ÂviÙõ[ù°Æ¾4õ¤Öm ÏLëªÅ3ƒ$â°ÐQ¢hÂõö¢RŠëíÅœ}x½½¸Çâ…­µ/' &ž9húŽ)^]ü+ÜzT }Åú( ¿wqcK0àe¡™ð"„é]paz¤kÚ#H sþj»®¹Û¢ÂOëšm½ouLS×zÃÑLÛ1µ~ÇœhvWïN c¬wŒvùñ#˜<ÄÇ*´'y’‚­²O®)O½¤EU%“P¾¼œ¨)y´ô¤¢‘ç3åsœÉ”{œqñò`ìCê²ÞÓÆ¦íîŠL[0Òº4J¹vˆËø%îi#Æß]oâz³ÄØ·,ˈÑìÚ* 슗ÄB—IÅB!.ñˆ,V£cVGïi†ÞÖ5«Õ7­¾©^[o²PüõkYMúždDެö§Ã+ÖÛ¼];Z…w퉯ÄAu@ñJˆOPa^‰Ã—Ûòí ¯Ä©½•(¯DÁX‹òJt‹°¼…ŠW¢ Î+ò>1¼`§Ã+EðJ`€q¼(d ¯Á+ÅñJ É•Á+ÅóJÀñáU¨,Ùy-NꎔtG)z–ŠB},Ð Éq³‰ßÈù¶¬ïÇþbé{<Ýw¶A%_àX…Jô’Kô§;sX´iÀöÅnUÒ¦¬ißL R%ƒ°¹*m|Hîg¡íƒ, |ÐÈ«>1ìEm3. Æ¢;A‚ ¹©MLRe¶­Ä•±x vj‘n2'EH€FÀ±á„pL4!MH€€ÀÅ Äð¡„pt(!EHÇ $€ƒã à¸ôsEH€€EÀQ1„T4![B'` àЄõÖ¼Þš×[ózk^oÍë­y½5¯·æ/hk.TÉŽ€‡U²#Q•ìp\L%;SÉGÅT²ãQá•ìplp%;WɎĆU²#AÁ•ì ÜrF©¤ AV²ãÁ*Ùa)•îÁäî½è÷\˜‚vœCT]A;a» Ú+_¾íí?TʈbH09ÊöüÛò#oÇ©´Åj€ê ú=rõ¡8aVQ)Ŭbû+8«>žÕ@X ”Õ œ€Ò¬RÅÃX „› éÍØ[IŒ W©‡W¹Ê…§K]år×;YŠHŽ*¡‹8½Æc`/à±Êrï¯:­n·ÿ´Yû›“‹e ÌN`ñà?®á^šð/b?áGû0áJÍ^Ó¼»Pˆ ¾›ð£gQá§„}§el³¯µ «¯™ú´¯õÌþTkÆcÛ°Ú½×-?~“|'°0ü‘SI5<‰1-A_öÌ”ˆ%¡$È´¥Ž ‚Ê›ÜëRâò~‰{z'OÀ;'zBð6NVúÕ^*?zwnà{ æ,ï¢ À¾eIž€^W°kÍÅO¼Hyîû·«¥²Š«— e§'Sìã™Æ«“µ$‡ka/÷öbÞø³¥Øa\ý,<à;* ¿s™àׯøLy5P^íþjý}qÅd‘®ªYUöÔN}PšëÅá&ëkÓ~ºåÐñîÈÀv„­ ƒS(lP8íĦÚÑà½+F|DWÑÿàåJMÊœÝQÇÌôøJ½òæþìçΙó'\oêóÏnlo2w~äÅxá@ù¬œ+oÄ”óöÇ;6ýÿ¾bþį¼(`j³ôƒ(\yŠ¢)\YJÈžu"þ‰¢$ÃÍÞ÷Ÿé\‘ãÚã¼n¼Z'&rƒ‹ÿÉð.7¼òøùxÊ ¶-ЙÛaäŽCÇÆ7Oåòõû‡_¸è›(Z†ƒfsëyÍ 5þÅ ßÒõWÊ«Uè|úåã«äÕÙ¿£yø6.©|ëQü x3>¨mò3æÆ˜}“ôÎýž'oÙ7é“önÀ±ƒûçü– ß<9œhï“;}» ÷ÿ(ËxåÿÓ}´ê¼Mðw;tÇoVñøñNaF¸3º ÷Œ6 åÌcѳ7s·vÅÅʯ=³þÔ»à*2º}py”r%uv³Ú2Gd9¼³ãX _ïKŽ*Ž÷ -n‰+°ÙOìsbÔoÊÓþÁYÎýûmgû ×=í¾õ¢<íJ“‰—Å–IÆ&,ñˆ¡ñœOph(çQóIÌù„ÍB£DÄq>qE8ŸÀàHÎ'l&1"œó ‹á|£¢8Ÿà¨xÎ'8¶„N@s>¡S‚‡õ¶¦ÞÖÔÛšz[súÛšKÁmÍ>½V»×1ž¶\p´LÒ&%lM‚´¦y{1v<Œ]ˆb×ãbØuà¨v8*†] g×cƒÙup8v$6Œ] f×á–3%%M(ù ô%“ߤ¿(ŽF¨F#ë:œ[Ä8‹]»ñµ2n<Ì®L"¤Û…ºëðÙ £g2úP9Z(f”cý@»`YKóïˆJ)æßA#§¤ ¤Ú(Bz">ìZšÖäÒ$¼‚ù¨Lù_<'øàLÀñ’ëæåˆ’>؉I<\qn„+up¥Z½¶ÕíéŽÖêL&š9鎴¾iÙZ¿3êõF¶¥ë“Εº–£tqyˆÄ#Q‚/ MŠ‚±—…¯•',º&Z»ÉSp¢D爤<·wgâžìãmÞµ¨í‘ã"•‹=¨hÔSÆÈ¢ðR'÷NR"³C¥?YÓÞÄèÙ}ÍèLûšÙ5¦ÚȲFZË´'-»¥÷Û=G=vdLø×¢¿,W 8‰«%9õ¦ ð`svÔB1¼È v–B¥ø|ï©Úù,e;‰k¶Š—R9(.ñˆ•ƒÝF×ÒûþB¼Î¥×2ûz·Û²:f«cüõkYMúždDެ‚§S6ÖÛeƒádhοš_ʪªjP|‚ W n¼Ü®o·hjOí­DK ÆZ´bð ã¨|îe@J‹ÃÄJ‹q1¥‚€ÓNp© 0”/„û؈RAè„H8ºT… ,Dœ”ÀK ¸RA40¹2 Jq øRA8><§µƒaJ6Iω6±–6¦—]Ú $P˜ÛÉžm&áX„KgL?ËÀŸ±}vøƒcOæ®ç|äõñ±kG׉{=pânCß¹|–ý§F~pÿ‹»ˆÓÉ êWÈJRä–.x@öÐUâgvÌ!i‰Åem 9®ýÆÎºÂˆ×ÓÍîå(RÀú•íùÓs-iª´°¿~\³ØŠZú¿ÊÒ"&æwϾ³Ý¹Ív™°³8„Ï®eû°5 '­™¹fHò˜øãMH'­Úb6ݘVw<’ÚØ#}Ii,ƒwnk¿d3w. =¡–{bý|š%¡ÏR¥Ä’4Í^üµ®‚¹Ä™ñø^LÖ ¹¹6Ôe怘Ç\ËÆ³ÝH#¼7ÆÉÅ!8þ6°:Ýv­Ÿ4Óiåù‘;½¯¢ùšgTW#Š—ó —Nàú“*vt¼”¸ÌmßúF·g¶Ÿ6 Ù Áh|¸Y»¥`_Þ°t]¯B¦®[¡UÑû̾ Wžûµ aÏjªyÃ`V¢ƒvÏT¥Ê¹–ûª»°“=Ñ_+žkå7ãt«$ÛŠ9e£ÀÌœAœ˜!{ lâl‰PÙÒîüùjáüÊçváÂM¢.Ò%¤G1ì.툟BÄ󯂙°]÷W%i7P¥ ^Ë|ÒÐ×òÆD®šü\"3žò¨T±L9Ñ8«Ök>¯£8k ŸÜ–ôbÌ[fe~ýïáåû†¿½ùõǪ Ü=_9?~a0™ÈÇ¢'À}’¤RŠÆèåÇož-eY4®k@e YW"i}î+åqÝTn¯+vRS©µ—Z{©Ê½½£[Þkª|ü¶N ª’8öä½gcEÁÊ©5GPsò< UkNJÈ %Âë-j¥í–ä(d‰QéÝØ¿áï³,ÑrWgÉ›zg±ŒîpƒâšuBª‚ø|Ã,ã ÑJ»>¡Zh©­ZWëÆgnD­‰ÀeSòð„1ÇR5ºnøœr¯¹Íç$Y×k­Û??ÇŸ´T!^¬Úmy*Ú“·>Wí“ãA’£Ò¶“‰È`/A#.cÃ/c¢/C£/A@/#Š]0"†½`޽`ˆº`Ž+pÁwÁ—~n .AÀ".£b.A ¢/A`Kèì#ph FPK¯H1$N¶‡-]@{7¤ÅbÒ‹!q®áX@²çñ=!^ ‰—.†)†ÄËC m 0Åâqf|!O‰p:M]Ì)lǨ X )¦H¨bHñáEC–ƒ,†Súæ C]3¡Å‚&N–G,†,‘ªÒ†£²dchôåÄ eX)îœ@1d$Ž)†¤‘v´bHªÎCîµu9ÞÞö¬¶i=m#uéän)Ô¥“4R Å$‚ ÅDo)†$UÑûÀŠ!I„a‹!Ét0¬D‹‹!Ëʹ–û”Å¥Û‚*†,- •fN¡.Ò%¥™“ Æf¹Q^Ë|ÒÐ×òÆD®šÁü\3Ês§Y¦JCžº…‡Cž‘yÃC’‰†CR‰ÄCRJ)†$—/Z IÙu%’Öç¾R×M…C’9©°bÈÚK­½T©ÞÞQ‡ Q I?~˜bH2é˜bÈZsHŠ!É5Q ùMoQ¥ KrŠ!…EC‹!OxS(†¤s *X€Ï7Ì/†$ €C–mÕºZ7T ùmj¢H1di¡Àª4Ý/] YZ×k­£)†ç”°µ¼Ú\@F½³ç­ê0qH±¿Øq1 xÉëÇ&YÍqs‡ñÝ ×¢Ü…Ãá…!-½4n9SRÒ„$—R,ª¹}ÊÓ«LÕ£¾á‰nü¸¸m•¤Í—Äc&‹mÃhúL¤®Œ®ç¡‚³îÁóÉöñwÈð¿0eüàLÀñÆN(u§§6$‡SÕÛÁÕºr'WêàJµzm«ÛÓ­Õ™L4sÒi}Ó²µ~gÔëlK×'+u·IJ“èQ%ôÓP5K Û_¦,K˜Ð\½²T);K—%JÌ1K”­p-5]Yÿ¡j]K‡³Uœç0ÃýXZ[´í Û‡³ŒŸ²7I ne™ci³\u[²¤;9YÀJ\’ÈoÓ2}­¤ø·"aû+«jÀ“jÞ‡õ7snsY½«§›¦Ùêì­`„¹K{Pp3p« X®´Gç™ëgì='5G©,]8Ð  c¦ )ܨPb–´[µÈËÕ|~é3wø¾Zá cG*1³³lä ׋c:¿²ïò$±ï¸m8Â?IÏ®ÆrTn=íå&‹36š^ª_=÷2[^–‚_ý‰S½ð­ºÝ‡ú„Lø„l«Æ¹¶µ¨íÀ7|Rž¦Ž¾d;åô©'f)áÕ†ްŒ¼D#°!{ÉL³v®z#7l% ]2=*-"±¦5Ù\¹Õ÷KÇ{sù³ö.©ýö•X–¿|¬)W/¦\Á 8öÖÆ“‰"Z›)P¹. D r]XJaåºØ¤„ª‰›žm &±(g E×ÄH{úf»ÓƒÜ›;“Å,‹ÛNq¥M¸jAÇŠ@ÆõËv´*Ö ÑnHö. ’<)`í+% ö;yåmìw¾Éêˆ!ng¹•¼\:V<¾þ)°½Ð͈^ª“›¬ÅÕÉ\$¹iÕ ;LÍtYSZ¾,H!z9AÕÍèË”}Ãõfõœ®çt=§åÍi9.×…,ÆÅ¦“wyB²yòÀ”<¥¤¬¹P>T#0¶“-aôn2)"qgä#|GÂ."NíãZö±D|¼-"\²r‡˜:±UÄBă*Q4ÚsMóî» $#¥†K;̧µSIyΧ÷™li·'ñ¬|—7âËäÉmrw-jÏhR?r‘ïHG®cÔWnüNÒÁ—:¯3k™Ó*Ó2ûTú“uq×ìéãiÛÒÌöØÔLË1´þ¸ÇzÜé÷§}³ÕžšÝò]L°Dª ¦ ðëï§Qìè:Ùnœƒ r'E¢AÕ䆒Î'S*¢Ïò4Í^üµ®‚¹*ñ(ƒË4›™^»^älD<æñ5žíááݸ1ž¯BöPcîíùÀêtÛM> Ø&ÍtZx~äNïe6[ÓFLþ­\< 5\Æ%Y2ut¹¯ŒÆ2lË-ù ’J@‰"$·öe¸òܯ2…ðB󦄨Þ^ ¥êT§Ý3U)ø×’Ž)2j$õ¯ïùÍ8Ä—Dø˜Û’°ý¸3ÊReõ9W’úfÎtAÖXm‚€ÉÛÉ’‚áD²—Ie‹ã]3÷µ6÷gju­Ø åOî\Z?a=’çÖHÎÈóOÄ6Q•|ì»[ü¿¯Ï!ãð|½øôšo™KžJ)ýß_Tx^«h·z¶æ'³9UÙN ZVÔcb{ÊÚ)]ÚŽSÔ‚2¹ñ›fKI2lÄ#,½k©Î-AëH.;·£/ÕaO_¯öØk½öØÕsLB¯tXòfeã³uÖ#[jŽ2 VN­‘ç ®J#¶h¼^ü6û±¼ϰÌ^¯ÿ´•ûåï~þ€üÃ.NÍÓ7ªo“¦ŸÜ Èó zJÜò·ðbž’5nŽ7_KÄr5ŠˆxÂ̈É(+.ëH©"‹øÛ:±ÀGöÅϤÀ…ëb¢SŠˆË@úhB‚$´]RƒYœ„E$6x‹BÉléòd ¤ú¨RZ#]ƒ·½ØoX‹w{q’ºFGx>óe‹ú/ëÂбƒñæ„q_JšµÆ>á ­UVDe·Ü6íIž‹æ’"^“¡Ñ´«< ÁÌVa¼2à—R·Xdè|lÚ9ªîäZØ]²zA/y‹m¡©| ê®%eâ;¡âù‘rcß9 Ûˆ¹‹ÕBI‹)Ü9ÛÜ6èÃ)ƒúk".+2ÏqPïk3W²ç!9|VÖ±!¢3¯„Kw=¨&Pª±H¹V|k“¾Õ™³w¥*n¨,l$ΰdž&ý5‘7±>+ió*G÷£žØN•Uwò–Dàà X8änF2ôgô#FyW©‚ø¯~‰{úÓ›ÿP>ç2P’5^Iº¡8­œõ©2ºW¢fb¦%¥*¸Vþ}óû–Æ´õEd‡·ÊÒŽnJÓ_FM¾.0ÝhþÜ ·²ä›üѰ¹°]¯q¿˜ ÝìÛÎ"¼u—Kf˜Ÿׯܸat­¼þ›ðZ×Y$.ìíÍ^™?åþÂE9´c¸q‹s\!_ Øâz%Ls*…÷ÅðÑô¿ÍÄÙs6ø![¡”/v¨$þ°$éÊ|ùË›ÿV>üøöÍeN#Küš²]âoû|]dÏù·¯ CQâ§R•y­ó'Vö ›ÉSf+³/óºË>k))¥ñ*{Èy~þCfãžÖ6vü®ßhu;­^·o¿s‚€ýîA3—h®T×›úê…Êhvxm}浩só.qrˆ¯÷ìƒE8cÿúàØÅýéŒ#eø¬ÇY3—C}$4zP?¤ÿúF“÷ìÚá?{…÷Ûýn ÜÔO§Krºï]hwÿÙ+«oõº-sÉòoŸ­ß¹‘³xýðŠkù«ò*G±Ì³Ô¶^­¿?´XKM²…“ÎÎ}9¼³cÆ4.zï‚Ä.Â@4×»c½“–ÂJssxNÁ0-g(˜%^oû Ð;P:7xéx“áÞÐ R9Î £ÄÛÃøÌ†´oÇs—Í'yÀäÊ0Þ䪃ºÞÊúÞ-ø1oåÐŲK6w÷Ô9Ý~ c…{Œ¸R¢HUð¨söyôÒ°’ [ŽEQ‡`OÑ£ Ê‚‰„¡‘ti‚r´ibÈú4qx. ’² ©v6Ó]%oÀšqM­HÅ4k% J·Vrx¡´kåÅÀé×J( }óqtl§¡™z¶2&N–GçQ+‡_µ Åòªå¨g¼j£Þ¸5êX·Q_jchôé ¿,Ë ÅÃñ¯•kyS Âä@#ÊËF(„ŸÍöî™/5ùtòÙ“N1µÅs«ÚÌŽ;U¼No©(¡Àièë‹Ú“-4ãÔ ”Ë”GÙyXøÃœeggÞ0fÄ¢!\f´"“”¸bN3z©;ÎüHûàoŽâ8£oȺIës_)ë¦B¸ÐˆÔ"вÚK­½Ô ¼½£ˆwKÖøÁø·ˆ¥Ãx¸jÍ)Ò?—$ÍñtÕ[T9í¾–•ŸQ‚©¤èb’¤“ßÔƒH“¨¨ àó ³@H• gðr-çºZ7¾€é[ÖD1‘ÐB¶BÝ/A^D¤ëµÖퟟ0r£³W;¢“×>9ä©ç×ÒåÜ“ ­É KæNä íùœ¼b%ö—’PµMÆG¼ç7m½Ó±ZݧíÙnÉÔËA‹ßnÚ®·›%$Î0µhE/‡€¾q'ÇÆQòÚ#ZDw±t‚Ð÷xWÌxYo(òšhJô´8‰q5rR§“7NfÕļp‚™3LSÁ qéçF¾.ŠÖ&•ö¤¨K; ¿ø±†ñùà†ÑP†–Éè9¼qì‰C­¶Yªù\ ,ŽD @G"Ñ¥ 8TêâHAébÅ‘8ak±dÓ ô„pq¤€,hq¤´@q¤€`q$Y)&@¬°§„,š:±¬I×ÔŠ+ŽT$Lqd‰áÅG–ƒ+ŽTúæã‹#¯™ÀâHQ'Ë£ÀGŠãWmCPY¶±G4úô†_–eâÎá‹#˵¼)M)//SI$íTŠ#É:¯¸8’L´8’H`Aq$ÙkG *.Ž${£ââH"Q½¤8’H®8’PÃJt°¨8²¼œk¹¯AWIÐDq$4DÚ9º÷•ò¸n*´8’ÐI…GÖ^jí¥JööŽ:làâHã/Ž$”/ެ5‡¤8R‚怋#¿ñ-ªäkYù%‹#Kˆ†Gžô¦\Ié@U°Ÿo˜ZI!.Ž,ߪuµn< 8ò[ÕD|q$PP•‘î—,Ž$ÐõZë(Š#ÏZíD‹#OZûäx§ž_‹E»Ü¶ÂèzïÙuˆrwÿ[J ‡‰ýq'†îÄf×·–» àƒ11`”$ŠªËÀ kOูTÂÖòê?hAõΞ»ò¶f¨ÃÄq ÅþbÇÅÅŬ8È­Ë™‰›; çŽÃ½z‹4r{„Š´ôÒ¸åLII’\Z°ƒ^#·«yzÕ©zÔ7\8Ñ»%7Û—46jr¡7MŸ ÔùÁ€±õ}0Th>|>ÙžçGvV¨Oç´WFž9!?ËÊJ¾xïêI-ãÌýŒ»â³/l‰\ÄÅÏQ?¨ÖË”K é ƒta{öÌ™ü”±|&T:õ:ùžŸª‡ŸÒzž¤Sþ)I ÃwR‚> Ÿø;dø_˜2~p¦Nàxc'”&*×(¾–p;x¸RWîäJ\©íŽ11­ÎTF¶fv:­×ouµžmvûmÝhµû½+u·IJ“èQ%ôÓHuKÛ_¶,K˜Ð\ý²T);K™%JÌ5K”¬x-7]Yÿaj_K «Äe†û±Ô¶iÛ·’Ì™$”{“´W–9–6[ U¸eûGºC“«Ì--©²i™¾VR \‘°ý•ÀU5àIu¯DË Çá©ÆîHu€rr¶ }+˜W±;Ì5.ö‡“š£Ôã•.¼Ò‰Mî¤p£B‰YÒnÕ"/Wóù¥ÏÜûj…Wh¾6vb9+ØÈ ®Gi~e[ÞåIbßqÛp„!’ž]å¨Üz<ÚËMgl4yÞv•&óh/Ÿ¹˜¹Ö#ˆßØ—ÊW#I¾”*Êm"Í+« J‰I)ã‹7²%ÊÕ8~Kf¶b?*¿œC+<É…­¿Ó7Ï™®+mÀ‹[ÍêýU½¿ª÷WõþŠ`•/“«7Z/y£udEËWòÖŠö xº¹BæoHÍëxU¯:‹x•Ü“HéY/¬Ì[LÜôÈ¢*E†ãg²š;ÁoÕ˜ð8¹i¼ Üèþ­ïEÎר*¡ùºúcȬ®ƒs[žw=v.cn^ù yÏö;§Yr¹OÐ>V¯Å¾y^±Šn•ïÝÂ$…Óé„A}AJ‰°jW2‰•wj¶%œ¿ú§zá[U¾‡=!2Ñò¥TЇOÏÓ¶*¢k;PÛÚ|“v`‹¯å%Û0P=1Ë÷ª’Õ/#/Ñl¨Ã^²€“²©ÞH• ]2=*-"±¦5Ù\¹Õ÷KÇ{sù³ö.©ýö•X–¿|¬)WP.àEp޽µñd¢ÖfŠT®ã…ˆT®‹J)®\š”ˆB5aSÂs3á$¥l¡ô51v‹y3ÎÆ(_ÈP)Î^µ e*Ô†éÜõ)ö$y-mìI¾É*ƒ«p$+q ãêÔ0úØ^èfÔ-ÕÉMV×êd.’Ü´j„ަæFº¬Ç)-_¨´¼œÏ\ÙŒ¾Lù4\oVÏézN×súÜöÁ²\“’NÞå)ÆäɃ“씑²zd7ùPÀØL¶„}»± ¾#á‡'ëq-ûêü"áj”•;Ąԉ­:Ï0 Ò5Í»CîV_Œ“.ít0Ÿ^ÅN%å9CÞçóˆP廼_&×Hn“»kQë|F|ú‘‹|G:Êp£¸rãw‚оÔyYËœV™–ÕêRÍ­´‹»¦åtÆÖXëöÌ–fÚ¶©õzã¾6ju[£v¯×oOÛ廘`‰T!LAð×ßOŒØÑu²!Ü81äNÒCƒªÉyŠC²‰Å<fû~±š‰XÞ%ò2.ïå ™×KxbæhM¥@VJœb 8(E<é ¤jéÿª’;êOØc!§x§¦ûuõÃV‡Ÿ”†A™‘¦GÖqØNòòÂÛðbærÌfU¦31{É1WÂi£Þ¸5êX·Q_J#`ãiƒ”ƒÔÛzœ3yÆÏäc(é|2¥"ú,ïAÓìÅ_Ëá*˜«2¸ŒA³™éµëENÀFÄc_ãÙ¾Þãù*d5æþØž¬N·Ý´½{æÒFM>|ö¤4Ó ÏFÈ,áÜ‘ùš6bí¹•+‚¨†Ë¸DK¦ Ž.÷5‚ÑX¦ÃÒuÉoTJ!¹ý³/Õç~•)„ž7%DùöêT(U§:íž©JÁÇÇf.‹`÷µ{í®iíªJT3b%õ¯úÍ8@˜Ä™Ó“p¸+(þn!%í9Ó’úfÎ4'”%0ó÷’·“%Á¨$.d/’Ê–Ö»fîkmîÏÔêZ±ʟܹ´.~™ôYŠ”Bg®r²1̱WÄTR‡=›À›ÉK \KA¾&G•0Eå¨ hPjÈÔì’pyj¸MÄ”þoƒ‹/.”£Ž×’ìPÂÖz¶æ'³9VÙL?ZZÔc¢{Êù)]ÚŽ3Ø¢:¹ñ›fKIplÄ#,½k©Öç¶BÉt—÷»ìÜŽ¾T‡=}½Úc¯=öÚc—è±_¼ŒaÉ{˜•ÏÖI‘l©9nË(X9µFhDže¸*Ø¢õª·Ù´í$v²Ô<£ú69qúÉ È})õ)‘£J}JÞ¸9HÞ|A.ËÝ("â S#&k ¬¸¬#¥Š,âglëÄÙ?ŸK NB°ñèuOÜÌÓT.2¸M’d¶t‰QHÔ9Køºš­sæ—~ÃZ¼Û/“Ôý0ÂÁó™/[ä~Y†ŽŒo4'ŒûRÒü¨5ö 3h­²"*»å¶iOzô\4—v E†FÓ®ò(3[…ñÌ€_JÝb•¡ó±i稺“{aw ë½ä-ö…J¤>ò/¨?l¸—”‰ï„ŠçGÊ}ç(l#æ.V %-®pçlsÛ ¤Ü ꯉ¸¬è<_ÔA½¯ÍT\ýÉž‡äðY™Ç†xˆÎ¼.Ýõ¢š@©Æ~t"å X®u¬ñÔšš£QïJUÜPY>RÚHœb-M:l"of} VÒ&VŽÿG=±5œ(,«î$21ˆÀÁ%±pÈÝ%dèÏøHŒò¾R)ñ_ü¥ÙíwÚݧ¿Ä=­ÑþÙ-¼×¶Ú¦iæ„;AÀ„?¨sfíæê@u½©¯^¨ü´{‡Aî3ƒ¬ÎýYL±ÿ…Mæal(‡7¶7‰ãóê"œ±o?/çöýÈ÷o•Èo•øøàš}›ìñ!ä2vd¨³»[þÍ~¢ˆ å®7y½Íë &íLÌ‘šµnè{CÞÀaÜ@öÔŸþˆ}Ýëu-K×û†Åv{­~ËluØw?r„x†R²?Ûjƒ2P§LIÛ*ó×Ê ÙPF÷JtÃVŒ¸IJÖ¦õÞabòõ>L£ö=}@;”7lÅg“^ùćæc4yÏ~!¨Q£cvuhSw?ýéÍÇ(ŸËuæµòï›?8éÿòÙø¶¾ˆÕtiG7¥é/£¦ôQ3ðçN¸U#Òä†Í…ízûÅ|`èæ~óÀ¤šíNÞC;Ÿ®Âó£(¥Õ0º kø±VQ’Ã,eÊÔGy­Äù^Ù"“þoc<•°bº¦,|ž.¡$gñ²ÆD~~Õ\…A3¼±ƒH¶Ö9^xãN£WeÞ-ES–÷ÑïeâùäãÛ þº\r<›swÔLžk7ŒV3d+Ƭö-Û™‡Y³š2æÄãX|¾ùµ½‘ýåñÙp°£r_—h˜óÕ¯"nHötÌÈõšOµ¯„¼t,î’Ä~&ŠwwÃR¾ãžË›ºcEi+ Û¼Pb öŸï•ÏïÞ¾U £a5ôø;½kô•ï>0úO;J¿ÐZß_+ß=iy:¤ß—hñŸ®÷§½Ýà†Yi/ÏaHŸåý΃{'¬b‡ù©-,%t¢ˆËY-×ãFÐî•å|Å6YâZ÷‹oO8hn2dM­Ü9¨L¡ àKëpî†3æcN6aÖ&H:ŒWgúœeúA3q›–Šf~‘x¾4÷¯›~kòf…|d\Ö0wÇÇ—vÈÿrs§÷C>Zß}¯0Ð">lãÀ]~³oo¯"ÿ[}w;N[¤lÜL…õÇ–Ï)}É:’õçñûñÑ%Å&Ê—¹®ç¦–Iø ™“âÜ"1uùŽe©Ó~ÿÔB5n{áo=yßæn‰k0³ï(-䉅,t²/‡ùÏ¡šªªîÚÓSµøZ¼ø±çósÓÓ™¿1}þTáJw¡Üµ˜ç˜LœÏßLf&)³Så=yëí/_‡7än6Ý SÓœ7/²ùKNl™·úÈ[¯õ*‡üꂯ_Åžó’›û$3ÀÎP²ß4(ŧCu Ñqʃ=?†hßs¸ç#O4îiqûûû÷ÿ(ötdLzŽ1íŽ8ãs¯Õ¶&m§?™ŒLÛšv•2·ßÅOÒ ì¹b³ÏQ Gpe–ÿ'³šÍ¢®ÖæLŽø»ÞñMjÊ"Çúk „á0ÎÈû g˜¸´ j5w‰"{ÆFì»WLm_]ˆïç7¾#Ç:¢+[术Q`ï˜;Î_âÿTÿŽw׌ÁÛPæM¦~pË^Â0¾¶ð-Á‹šÌx «|Þlð®9g$mÝy¬úŽmZù~)X$H{ÄŽn%e¬ÜÿJX­)y¬Ú¦t˜ç¾»ZVà/ó“¤Á–†¼þÛ´½"°„%õ.§ö8:œä Ë_†³À_-'øBñbÌ™c(Ö¶À™¹1o:SQ> ×§C_†TX±m ©¸b³Ñ‹Ü±”·gà~àþ$l?djÏ3üˆ G|á ãå>î”÷«C=fÌt|aë+O¥^&×DhÛ…4dª¿Zƒmˆ©k²pãÊ©fÁen–ãq1ÔÚ‘V¯ÓkÇØ÷ƒIZyM­Ñþ„z'n8öÙ¦éžw:÷¿¤7½5Ø0Æ9¸„U£±9*—feHà®ÎD r̬%ÙçÝ"9ÝHHÁþkåG¶dæ©Erz:#Àž]L"|Å&ºI£ºlg›l |þ? :ÓÍ3äLž´ÙrìÕxá4¸'•ݨI»§Æ[JË—‹xQHËÀÙ`Jp|¥´\Rw3 Ù.\J“·jí%u»ÃoáXLg¤ŠÉBYÒ|üxM]Ûû†ÔÙËÏk°ä á½3ö\öÿã)1¬Ç|¤€l{yëŽØ¬’¤7 {|ãzr¦WŠ%|Ês¾ç2=áJO¿æÀ%·=¸ñG!ñf_î$’é{$ I¶Ç2¾,ɳçrõ¿9Iþ².GBºke>rb'dþÇ«œI|¾Ò˜¹ÑÍjD)di/6_}ÕhWɹ? #;¼‘f­—¡,hw&%d•”gRvBNÀœ«ÆÄ¹£gÊ󥘊Œo“e1%gL¿Ü0->;“âëÄǦÌ\£¿hBÒœ9¶,ãyãÌ’,ÙæD™LiöœX«¥á¯K!¬Ë½ŸšÖ±©eÊ)S„ÁÖ²OÕwžT_2Ï-RBá(“¸Ê,wl]p~¹ï¤z/HÇêµ{O_~®}8Ga'¸èÉò!°diâµ¹ç¿E 1P>±Cù§¸¼Ì©Èã!cêš)䥗ÈQ@ñÿŠo'oÝLg!)ð*˜”Ó¸œÕxÇãú1Ixà©@ùNo8ZM§N0ŒÉJÑ-›Âø¡û?ƒ/\$‚OX{3]¡ìNð¦W²”ŽN‡ë/³ž&åW~Ó¾H­ñá +PèIΦC8ÕR׿^ã‰{zç¢Ìkž¶x7”¸B&aŠ.Ê{Þ²,+Gç„ÖY\Hïx¿7·‚ãˆ' ì²¥R X—›7(Çôæ@â3fAÅs e¼ì9ô;rlß\σRóàý›Utöjþ×{Ð"p¤yðÂׂ8]f¸ä£0|váÞ&ÓôwæzZü´?]ςҳàí3¾U¦~ÌiFI)±ëròNÓ}`BT< Úº`×V\­}¨!ÿ{3ÿ¦üøñÓ›¿ÿòóÇÿT~yÿöÍ/ÊÛ÷¿ýöãÛO?¿ÿMùéýå÷?~`[4¶ìtÙé42ÿëÇ·JLwÞ(ÚXyåŒo|åÿ=JQþíß”pî8KE%GâwÊjÁu Ûå²·ldm©¨ÿW‰Û²»Nm¡”ÿ«nž/|xSÝÃþ®ÝŽÕ2Øûµ¦Õ2ûý¾ÖïZZ«¥ëfÇÐ[f¯Óêv>oPŒ‹ÿõkYMúždDNf$\4)¿RL%EBL°©½Nߪ™5´¨JªÆ—¿ mü­ß2ÿtœ°×3úüÛpht¾¶olåÓ{RíhÆ$fö|þÇúmŸMÚñ ëXeõ¿¾Ò¾Aý!ß íæ‘z¯,mªhñ¨ÿM‰oßõVó¹ÒúÛ¿/"ÅÓ]ºÃ©¿ò&é…Ÿ´~bI4×»KIØh|ðÄpí`FãÔozðÖá7.ò±¾ ½ÞöŽÛÃøJ%ÒÖŽç.çm–LÞ½ãÍU’„ S×™O†¡Ã×o?ˆ‹6¯‰ oÜÉÄñ†±„¶ÕÜÐ"º ž“â{l‡óX¿* ŸТߺ±-T?]‰…C]ÒLPÚÖÎí‘3—£sä~+BY)+´²‹OžHŠŸÄ_h‡€×}ñƒ 1*7þa4”¡42:!ŽkÝ8öÄ ˆ§;Ÿâ©.¢ÞÙs—ß}1L.k$Åþb»´9ÜÜøEßÜaìš2\‹4r{„_+ÒÒKã® œ„]1$IÆ¡ó=ÿ)ÃKd3Ã&óÓTóxÝnî‚TýÿŒ/Ÿþë·ÿý8ŽÆ‹ßþküéæÖY|½üðέ’^Ÿ´KÀ–6›¾ŸY í좰0²Ë÷véïíB MêÞ&?eîég2hº¹Rży¾?¥Wk%}óO‰’ö@B_%äšúLF£àÚõÒ¦ƒgFèa-Eœ¤·`o Åhìãñœàƒ3uÇKÖœV¥TÑRšÂÄîSÔƒ(b·XâvA¸GTJq9ÍÜßa¶Ö2$IÛådȲ!»Ja°Våa-¥9ô¨r†?|Ô/9£Wñð>.ã9hÂ!õ;b]GŠH<ùº—míå6`§ê†Ãl«o'®ƒ–Þ{4/ÊmÇ·n«JæM¼Ù&–°¿@¦õaã¢^ÏyE¬bBÖQ–[«’Öt¹õ“§´ ž‹UGV3–´Ñ¿SÄ5èªf¾¦=™“Á/U¦—‚,QÁTšÙwœ×i•\Öõ}ÏÅ{'òo/pî\çKxÂo)—Ø!qºŸ#¡+H×õÌVw-VõÄÎNV£?qd™”°žàg7ÁÉÐhÚµ&óeqõX`LA=QPÆÄÔƒ¡ñõphhA=Q° ^\PFÔÃqõ`\‘‚z08² ŒK?7põpXLA=UPGÅÔñ%tº  -RP_GXêKa©#,u„¥Ž°Ô–:ÂRGXêËIEXÄuàð@F8 ŽQŒ‹bÔ£¢uÀ¨(F4*‚QŒ gÔAA"upØ@F(œQ‚[Î(•4!XFB< £(á¨Ò­´¤-4ýÖÅé‚Úà&…ˆ~ …“Ûæ2pÙ–ü^˼ª…nMÑŠ(ºviÙu\ù°Æ¾4õ¤Öm ÏLëªÇ3ƒ$â°ÐQ¢hâõö‚RõöBÎ>¢Þ^ØãÇñˆŠ×Ú—Ï4}ǯ.n=ª„¾b}”…_‰»¸±e˜вðÌhâô.¨0= Ò5í$Š9}µ]×ÜmQá§uͶ޷:¦©k½‰áh¦í˜Z¿cN4»«w'†1Ö;F»üøLâcÚ“mwÿ¾ä´E^“ ­K£”k‡¸|_âžÆ1büÝõ&®7;@Œ}˲ŒÁ®­¢Àþ xI,p™T,âÈBÑit ½m°·1ôn[k™í³¶z—m3Ì®UHCqøç¯¥5ê{’A9²æŸµXuKPK´ïWý¹5µ¨%ëŠ[B|’ sK´^rgÈ·^4ì'÷Z¢ôEÃ-Ê/qÐ=ÂòKzj(~‰b48¿È ÅðK€c ¿Á/ÆñK üpL¿Ç/&W¿Ï/LJW? v´dç¶8©;R7ÒÍ¥è™*>°j…ä¸ÙÄoä\Ö÷c±ô=žv‡;ã ’/p¼B%zÉ%úÓ¹,Ú4` ûâ·*iSÖ´o&P1ƒ’AØ\•‡8>$÷´ÐöA>hdˆU Ÿ ö¢Ç6Ó‡Y%Ø@‘ÛÄ$UfÛJ\‹·`§ñ&s‚PÄh`1NLÇDÀ¡ÑÄh 1QŒ˜@ JLG‡ QÄp\b88Ž˜ŽK?7PÄX1CL€@E °%t–˜-@LPoÍë­y½5¯·æõÖ¼Þš×[ózkþ‚¶æBíxXE;UÑÇÅT´ÃQ1ípTLE;^ÑÇW´ã qíHlXE;\ÑÂ-g”JšdE{1¬¢–âPéLîÞ‹~Ï…)lÇ9DÕ¶¶‹°°½òåXØÞþC¥Œ(†Ô“£lϰ-?òvœJëP쨢ß#WŠf7•RÌn ¶¿‚³áãÙ „Å@Ù Ê (Ín U<ŒÝ@¸ ’ÞŒ½•ÄÈp•ºqx•«\xºÔU.÷q½“¥ˆä¨ø.Jë+÷ý¤ÝÖuÝzڊäaäÃÍÙ]lês±ÌÃ@ÜÞÄxYh⼈ýÄíÃÄ(Q4CzMóî"!b8ønâžE…Ÿô–5²Í¾Ö6¬¾fêÓ¾Ö3ûS­=mÃjwô^·üøL輎žàÂÐGNeÕð$Æ z´~Ù2S"t”„“ Ó>”=: *or¯K!ˆËø%îé|ïœè YÀÛ8YéW{©üèݹï-˜³¼‹6û–%ùz]Á®5?ñJå¹ïß®–Ê*.a.”žL±g/QÖ’®…½lÜÛ‹yãÏ–b‡q ´ð€ï¨€üÎe‚_?¼â3åÕ@yµWø«õ÷…“…ºªf¥ÙS;õA Lh®‡›¬¯Mûé–CÇ»#Û¶* Ná°!á¼›jGƒ÷n¬ñm\Jÿƒ ”+5)tvG3Óã+õÊ›û³_œ;gΟp½©Ï?»±½ÉÜù‘ã…åS°r®¼SÎÛïØôÿûŠ}ø?H¼ò¢€©ÍÒ¢ppå)Цpe(!{Ö‰ø'Š’ 7{ßW|¦sEŽ‹óºñj<šÈ ,þ'Ãs¸ÜðÊãçã)“ض@gn‡‘;;ß<•ËgÔï~á¢o¢hšÍ­ç5'Ôøƒ~K×_](¯V¡óé—¯’WgÿŽæáÛ¸¤ò­Dñ$àÍø ¶ÉϘcöMþÑ8÷{ž¼eߤOÚ»Çvîcœð[jv6|óä8p¢½Orìôí‚Üü£,ã•üO÷Ѫÿñ6ýÁßíпYÅãÇ;… áÎè‚Þ3Ú,\”3ŒÅÏÞÌÝzzç¸s-V~í™õ§ÞWáÑíƒË£”C(©³›Õ–9"ËáÇRøz_rTQ¼Åhq‹H\Í~bŸ£~SžöÎrîßo;ÛظŽèi÷­åiWÁštP¼$¾0¸L*¾0q‰Gä ë6ºý®nô»š¡÷ ­ß5ûºÕiuuž,¤ ;øëײšô=ɈYíO‡, ¬·%Ⱦ~éû÷n&@vPP\aâT˜+ìpãåv…|»EÃvjo%JV0Ö¢8S'p¼äºy9¢¤vb’WœáJ\©íŽ11­ÎTF¶fv:­×ouµžmvûmÝhµû½+u-G#èâò‰G¢  Zž -b/ _+O,XtM´v/JW#’òÜÞ‰{²´y×¢Vü §FŽƒTF,ö ¢Q L™r ‹ÂKÜ;ɈLö¾T±Ð¤‹­ÞÄéVWëê‘f¶u[ë÷ͶÖÖ§zÛìŒÇÝvW=vdLø×¢¿,W8‰«$9åªúïPsv”A1¼È vVA¥øÜ|ï)Úù,e;©Ëµ‰—S1(,ñˆƒ½†Õë¶t£—”¹ºÕj·»ýV¯ÛîµÛ…%ƒ‡þZZ£¾'”#ká  BU·DÑ`ëÖŠ¼Ù×¥@Ñàa}ÀU ORáªÁ‚ÖKî ùÖ‹¦nðä^K´p°h¸…+90€ÊÁçÞ¨d°ðXL°d°U2X|ê /„…ô%ƒ`_S2ˆ½“Ç— b¡%ƒðDÉ Y2ˆ&WLÉ T dŒÈ­ÅìãŸÄb˜Ò…MÒó¢MÌå‡é¥D—v’ æv´g›ÑG8áÒÓÃ2ðgl¿þàØ“¹ë9y]G|üÚÑuâ^œ8‚ƒäÐw.Ÿeÿ醑Üÿâ.â´2ƒú²Ò9‡¦ ˜ýtU„øÙ3BXZbaYÛB†m¿±3¯0âuu³{9а­;Ûø§ç[ÒTiaý¸ f±µô•¥ELÌïž}g»s›í2agqŸ]Ëöak@NZ3sùÎ2LœôÜ„¹\ûYµ }Ì”ä±ñÇ›N\µQoÜu¬Û¨/µ±G4ú’Ò'$X)îÜÖ~Éfî\@{R-÷äúù4KBŸ¥J‰%iš½øk9\s‰3ãñ½˜¬Assm¨Ë6Ì1¹–g»‘Fx7nŒ“‹Cqüm`uºí¦íÝ3_:jòéäÇ·6Ó Ï­Fh3;îTñ:šgZãE] ˆâe>Ã¥ì“cš­n§÷ô•\r¸y»†m^G¯BЦ£q‚ K×+z#Àõ+4¢*zŸÙ—áÊs¿V!ìYmBU:V¢ƒvÏT¥Ê¹–ûª»°“=Ò_+žƒå7ã4¬$ ‹9i£ÀÌœAœ°!{ lân‰PÙÒîüùjáüÊçváBN¢.Ò%¤G3ì.툟JÄ󯂙°]W+i7P¥ ^Ë|ÒÐ×òÆD®šü^"3žò«T±L9Ñ8«âk>¯¯8k ŸÜžôbÌ[fe~ýïáåû†¿½ùõǪ Ü=_9?~aý0™ÈÇb(Àý’¤RŠÉèåÇož-eYt®k@e YW"i}î+åqÝTn¯+vRS©µ—Z{©Ê½½£[Þkª|ü¶Nª’8öä½ggEÁÊ©5GPsòü UkNJÔ %Âë-j¥í–ä(d‰Ré]Ù¿áï·,ÑrWgÉ›zg±ŒîpƒâZvBª‚ø|Ã,ã K»>¡i©­ZWëÆgnD­‰ÀeSòð„1÷R5ºnxžr¯¹Íó$Y×k­Û??ÇŸ´T!^¬Úmy*Ú“·>Wí“ãA’£Ò¶“‰È`/A#.cÃ/c¢/C£/A@/#Š]<"†½x޽xˆºxŽ+pñwñ—~n .AÀ".£b.A ¢/A`KèìÅ#ph‹GPK¯Hq$N¶8‡-e@{7¤Å‘bÒ‹#q®áX@²éñ=!^‰—.Ž)ŽÄËG m 0Å‘âqf|aO‰p:MÌ)lǨ X)¦H¨âHñáEG–ƒ,ŽSúæ G]3¡Å‘‚&N–G,Ž,‘ªÒ†£8²dchôåÄ eX)îœ@qd$Ž)ŽÜ%íò4‹#÷½‚et[–þ´Ã ¥”»)¥”’JTVJYXI#°¨8’êµ ‹#iŠ#©ÞPI#ª¢÷GÒCGÒé`X‰G––s-÷5‹#Ë·SY^&íœD]¤KI;§ŒÌz#¼–;ù¤¡¯å‰\5ù½Df’÷N´L‰Gž¼…Gž“yCGÒ‰G’‰DG’J(ޤ—/XIÚu%’Öç¾R×MGÒ9© âÈÚK­½T¹ÞÞQ‡ ^)aüÅ‘tÒÅ‘µæGÒk¼8òÛÞ¢JA–ä(”/Ž ,Ž<åM=¼8’Ъ`>ß0 ¸8’&B]YºUëjÝxHqä7ª‰ő兪Ôht¿lqdy]¯µŽ¤8òœÕN¸8ò”µOŽIŽJÛN:4$"{ØN0 ݉3Ì®o+,wAÀcbÀ(IU—:d ùTÂÖòê?hAõΞ»ò¶f¨ÃÄ ÅþbÇÅÅŬ8H^O6Éj¨ˆ›;Œïöd¸%hä.ö/ié¥qË™’’&$¹,”bQÍíSž^uªõ NtãÇÅn«$¾$3YlFÓgu~0`l} š…ŸO¶çù‘•ÞÓ¹vy-ÌÝîúͬä‹÷Ž¡žÔ2Îʸ+>1û–ÈE\,ðÕøƒj½L¹’Þ0ˆ@¶gÏœÉO‹]äÖó—6ùžŸ“‡ŸÒzž¤Sþ)I Õ±=R§ò‰¿C†ÿ…)ãgêŽ7vB©;=µ!9œªÞ®Ô•;¹RWj»cLL«3ÕÆ£‘­™NGëõ[]­g›Ý~[7Zí~ïJ]Çm’Ò$zT }ÇtRÝRFÇö—-ËR&4W¿,UÊÎRf‰óEÍ¥+^ËMWÖ˜Ú×òá¬Fç9Ìp?–ÚV#m»âöá,ã§ìMÒ\YæXÚlVá–íéAN¬2—&ò[Å´L_+)®HØþJàªð¤º?³Àõ’{A{zO7ž¾J±{´óg4Vª 4Ô]*##•³U,WÚ£óÌõ3öž“š£Ô?–.lÈ„1SnT(1KÚ­Zäåj>¿ô™;|_­p ±#”˜ÙÙŠ6r‚…ëÅ1_ÙF‡wy’ØwÜ6aÈŸ¤gWc9*·ör“ÅMž·]¥É<ÚËgi.‡õâ7ö¥2ÁÕH’/¥‚±zâO¤yeµCA)1)e|ñFv£D¹ÇoÉÌVìGå—sp…'µð£õwúæù"Óu¥ xq«Y½¿ª÷WõþªÞ_ì¯òerõFë%o´Ž¬hùJÞZѾO7WÈü ©y¯ªãUg¯’*A²&°‘ŸxaeÞbâ¦GvU)2ß8“ÕÜ ~«Æ„Ç©PãUàF÷o}/r¾FU Í×ÕCfuœÛò¼ ì±ssóÂÓ÷šðÈ%PÁ xU«Ò“¨æV!ñKW¯Ö¯ž÷F™-/ KÁ¯þÄ©^øVÝîC}B&|B¶Uã\ÛÚÔvà>)OSG_²€súÔ³„ðjÃGXF^¢ؽd€ Y;O½‘¶’—@/™•‘XÓšl®Üêû¥ã½¹üY{—Ô~ûJ,Ë_>Ö”«¿C(Ð"8ÇÞÚx2Q„ƒ@k3…*×ÑB„*×¥*×E&%¦Pmþ%°è.&±ØÜ1»m½ý´á0ʋݿ%¬q/'€;“Å,‡7áУJ誸~ÙŽVòF:Ù»$üIç®O±ßÉ+oc¿óMVG p;KÉ­äåÒ±âñõOí…nFôRÜd-®Næ"ÉM«FXàØajn¤ËzœÒòe ÑK ªnF_¦ì®7«çt=§ë9-oNËq¹.d¹0þ(¦0¼Ë’=È“§ä)#eõÈ…ò¡±5˜l £w“I‰;#éà;~tqj'в¯ˆ%¢ãxáj”•;Ąԉ­""TA‰¢Ñžkšw‡Ü­¾ '5\Úé`>½¸JÊs>½Ïd£Hë¸=‰g廼_&×Hn“»kQë|F“ú‘‹|G:Êp£¸rãw‚¾ÔyYËœV™–ÕêRÍ­´‹»¦åtÆÖXëöÌ–fÚ¶©õzã¾6ju[£v¯×oOÛ廘`‰T!LAð×ßO£ØÑu²!Ü81äNŠDƒªÉyBD²‰Å<fû~q ‰XÞ%ò2.ïå õ×KxbæhM¥@Kœb +E<é u¤jéÿª’;êO¸"c!t @×\5ݯ«¶:ü¤4 Êûˆ4=ÀÃ3üèï¢:/¼û/f.ÇlVe:ã±—ºs%œ6ê[£Žuõ¥4ò6ž6Ai0H½­Ç9“gaüL>†’Î'S*¢Ïò4Í^üµ®‚¹*ñ(ƒË4›™^»^älD<æñ5žíááݸ1ž¯BöPcîíùÀêtÛMÛ»g.mÔäÓÁgO:A3 ñÜh„ÌΙ¯¡i#Öž[¹"x€j¸ŒK´d êèr_#e 0,]—üIe D’Û?û2\yîW™BxáySB”o¯N…RuªÓükIÇU’ú׊ÇûüfòK"~ÌIØܹ e©²úœ;I}3gº k¬6AÁäídIAp$‰ Ùˈ¤²Åò®™ûZ›û3µºVl†ò'WÚšª>aA’çæHΕÈóQÄ6Q•| ޼küïÏ!ñ|½úôÚo™KžZ)ýß_\x^«h¿z¶æ'³9ZUÙL(ZZÔc¢{Êâ)]ÚŽSÕ¢:¹ñ›fKIBlÄ#,½k©Î-aëH.;·£/ÕaO_¯öØk½öØÕsLJ¯tXòfeã³uö#[jŽ­2 VN­‘ç ®J#¶h½Ð“vWä¾ßwõ^ϲvݱ†syvã”Ý” ¡Ò¶ó°K††SótŽêÛäÄé'7(ò¼ð‚ž9‚ü-¼˜§ä›ƒäÍä±Ü""ž05b²ÊŠË:RªÈ"~ƶN,ð‘ñ3ùŒb™T8ň¸ ¤&f/Î¥ÇÁ¬NÂ"¼E©d¶ô y²RT)­‘®ÁÛ^ì7¬Å»½8IÝ£'<Ÿù²E˜uaèØÁøFs¸/%ÍZcŸðˆÖ*+¢²[n›ö¤GÏEsi·Pdh4í*B0³UÏ ø¥Ô-V:›vŽª;¹v—°^ÐKÞb_¨Dê#ÿ‚úÆ{I™øN¨x~¤ÜØwŽÂ6bîbµPÒâ wÎ6· úp@ÊÍ þšˆËŠÎóEÔûÚLÅÕŸìyHŸ•ylˆ‡èÌ+áÒ]O ª ”jìG'R®€àZÇO­©9õ®TÅ •å#¥Ä)öØÒ¤Ã&òfÖ§`%mbåøÔ[ɲêN"ƒ\ ‡ÜMQB†þŒÄ(ï+•BÿµÀ/qOzóñÊç¼µQJ²È+iD7T篕²>UF÷JtÃ,PL] ¤Ü×Ê¿oþ`ßÒø¶¾ˆìðVYÚÑÍ@iú˨ɦÍÀŸ;áVš|“?6¶ë5îó¡›‚}ÛY„·îrɬs4ãb™?Œ®•×^ì:‹Ä‡½±½™ÃKõ§Üa¸(‡c 7~q޼#äK[A\¯„iN¥ð¾>šþ·™8{Î?dK”òŕĖ$]™/yóßʇß¾¹Ìid‰?BSv£KümŸ¯Kƒì9ÿöµa(JüTª2¯uþÄÊcÁ>a3 ù`Êleöe^wÙg-%E`£4^e¹3ÏÒÈìoÜÓÚæÏóßµ­F§g¶¬^§Ý/–rèimÿåMbW”OÜÞ|Œ&ïÙ/´Ãd655¾—ËvS¡¿pØl­fŠëMý`¯ã×…ŠŒ”¹Q>¨Õ€%.n8Û¿e‚Ä@yVåŸvàò‰°7òŒAÖ(ä¥EDPü¿âzÓ¸Ž2]YHWÁ| ”›Ž9ñÙ8šdE¦…SØö:W‰I’úCÊ&ál8ZM§N0ŒO°™Ûx¨T² ~èþÃàãª9"øäè5SÊŽá»´0­«åT;t¸þ2ëi2P^·9Lû"u(ä(bÑïœ`ä‡áôSKA\ ÿzÿ%Ù2,ògŸð¶nô{ú£p'˜ðuîÜ9su ò¥T½PyέºâöÝÜŸÅÔñ_ø$0Cæ‘M⼟deS?/çöýÈ÷o“Eúš}‘8= â0ó§ÅUöé#)éÞ.Î=uvwË¿ÙÏ=w¡üÃõ&¯·©âÔ¤‰I„C VÌ@Cßú·ì»?ýûÙ––Õéõz}ËêwzºiôØw?òßýÀ #¸ ô&.§RÕõÎÞe¿1û­~«R„½OWá5uç^øí3¾U˜C¦8_Ý0b ¢Œ™¸Ž’œ†ðÐpÒKï…ÛúÞI-,É&5nYÝV¯Wí¤Ž»=Î5<¡™µŽÏmÞÀaÜ@ðOsU¤Bï4F·Ã¤-Cx\:[ã² ü¯÷›qøàØÅýÉÜFeølCÌ6ÀlU hÔŸÃé¿>$ñ(uÀýœ 5ÉrV›öÒmÞÍÍȰéòt`šÉ;¦iß÷…úO¶Ò3ÈYü7—?_ÎÔýÊ>a2’OÞþjÉ­gü¯MÉ[\ñ¦þV 6¯•ž»³ýú1Ï·«¦(Ùøíoê¥ÍsŸsHq½Þ=À½®ÑéwL²ðxZ¾½'kên{Ï9°/°÷¤—·÷½=ö¾Š±$²÷±ð®Õïwt»RÛ{ Ð{l|¿¥¦ev Úì}º @ÔÔC&`ëŒCù¸•Þ¾Ï"ग6–±×ÈK2À„·LKoµk ÕRè¡Ûè²¶Õ6Ÿ„áž–oÈš*-(@&>(_ ÊÃ@D‚òd’ ]…i'jªÄÝö^™"»í"0Ðn"¶Û#í¶ÁòŠwÛ`(ìnL·ÛÞýUü^D}ø,lû¡[o¨ÀÖ[v‚‚Gm½qÈð­7¶õF¢·Þ(tðÖŒª–‚¸þõÿK²e™ÌKï÷ÚºÑ5û½Ó: á$Gßz·;F»oêF»Õ7zºÕ³Z&rëÝï·Zìç–~z{ Ê£{ ¢8鈋ÙÓõ6p‚î{º¿¹ßêv{Fׂ6u÷Ó•„DöJ—aB{z«ÛwÐî§åÛ^CgÞÑï÷!ùŽž–¯vdM…ç®~`:P¼sÃI/­v}¹«UŒ%©ÚuÛV«%º õêÜU|î*WèËO¯a´Z}†Üëœà€Tíy…F |0B·€K[oýn4§÷>]…}æÂMæ1Á›ºóé Ü‚Ò幉Ð^Û•xº ûÜk3kÓ1;ÛgÈìèiF‡Óôt ƒu’n¶M¶”¡ö`qïvZ]Ý2Aа÷é*&5nvúmxSw>--FžÈìXݽÄRˆy1 F‰‘#IbäyE1r.F.\EŒ<6#Erdãar$(:@^ GÂ#äXdh€ #A‘r$:0@Ž@UKA\ ÿz}ð—½~Ë´À ÝΧ«pÎú-½ß±ZF—Æ9«9%N‹S‚ °iv;¦aXõ¿Ôf.}§ÝiŸh´å„ÑŠv[¤Ñ6.ÝV¿×퀽OW±1cÂ{L~»mêî§+‰¶ì•.3Ú„öV·k@;h÷Ó•,è]½Ûm†kê¾§+Q;š¦âaþÎlSB´Úí“^þ¦³_í¤%¥ÚõZº™?ªa*9„Izç*Ôoè-S×yƵè¸ôk÷ï„Ý?>À½^«ÝÏÁé×u4ä]ÉXXÝvâ&xZþÚ›ï·Ø7à¦î|ºþ¾Òåñ÷ÅBû-£ßoC;h÷Óò×^&Ü2­~¯S›É&MÞM§×ïö€ÓlßÓ•XÎ=×6Í6´©»Ÿ®€Îï€tyt~‰PË`N.¸ƒv>]‰EèÄA?½kê¾§+Q;š¦V³í“.u!’>–¤j××»þé-D/$á‹r>QÃ2:õXH §€¹Ší~ÛêÃfô¾§+±ÎLx‡ûÞà¦î|º§`Ÿt©Nwû;FßwÐΧ«°ÎÌa²L΂r‚Û„—‘– ·Îl,:V×ìÃæÕÞ§«0\x¯ßîà¦î|ºm¯t™ÚmY}£í ÝOWb:V¯Ýk›­ÚH5Nš~«o¶a»Ë½OWb˜p«ÛÖ-pSw>]‰S°WºL§€ íöA·µzº ‹ÐkY­^·£Ã¬ûÞ§«P;&¼m0¦mêî§S½{çDO´ì­ïMÝÙ¯öRùÑ»sß[8^$–I~@üþLòïÜÈY¼~xÅß«ò*œic?pØñv-ìeãÞ^̶^­¿?”y^, ¤Ãž°å‚´µI_'ÚÑà½+oør¾b òn0P®Ôæ* šswÔ1›éÃWê•ÇÖì_øRÏŸàk=ÿ,Y¹ ?ʧ`å\yqt¼Üý}Å>Œï¿ò¢€©ÍÒ¢ppå)Цpe¤™ÓüEI†›½ï+nV¹"óôá-Súj<šz ÿ“áeTùÜ!ÜqxåÙËåÜÇÅÛ¢—O–¿qVáSñ|bqñF«ÛÐÙÿ¯.”W¼íìÞÞ3Ø¿¾¸ÑÍ'wá|ŒìÅòUòê¬m*ÙP­ õè–y4éÍð‰¢TRÖ‡¶EMÜ=eV†öÌоئ ©;©ìð'éF›©ªÐé{lâ×ø7¸…ÁØç&vzý^ß0:}IÙûtËÞ·˜YÑ¡MÝýtº>ˆe_ì—ÅûŨ<3 \( Š Kà‚¢¹ÞŸ$„îOãb%þdÅ^ÚŽÙÖ ³¼âá­sϽÕ|~AzÉVÆ/oÂVÌð†fòÈó{ḇ=;li$íÛñÜ勱4`re`6 r¾Fô ®·r¸âð´pâ¡c‹‘ë¹dsÆ/‚}8;Z±Nã?N¿ÀP­ õœÆ/^œÆ/ª¨„iüeš€Nãm¤5™­œ³îÚó9¹Žýe<i©ïaHÛ;s<'`–g˜ª"asoÜÉÄñ†S×™Oˆ{‚›/ZDw±dÆÖ÷xWÌø64”‡¿ ùž=5”ˆ«‘“ØZܹ=ræÃÐáK[QiÁN0sÒ˜%.ýÜÈ[]JX˜„…HQ—v~ñb ãóÁ £¡ -“Ñ rxãØ‡Zm7E½µ_{ðµ_{ðµ_l4ù‹Æ7–M˜ŸÂmrŒ‰#fb+ÆÌ}œÕDƒ9‹å<&l-wQ齞;{îNÈÛš¡ÆñEâ¥ù‹íFÄ;.ÉžI¶Ð7w·ӂX” [GØ# Öhé¥qË™’uÙCŽƒUê˜ã Ñ*õŠ^”»~¼ª®–ñ¼+‰Ç,[ãhºN‚OJë‹Ö>èIû ~„Ÿ“Þ×TÇ<Ñ„ ïÇ÷#oó3TÝúƒÊ§XØž=s&?e‘»Ïd>$±÷-qc›D\ãø””Ô¤?þ)I Ã¥Þ$è);Ÿ ~ƒC¯/ä€O»¬ðÚ\š„`—¶ïÌÉð¿xNðÁ™:ãPš(郘äÁÕºr'WêàJÕ[}«Õn÷4ÛœL4³3Òµ‘cÙڸ׷º½ž>ît:WêZŽFÐmyå!Djøãܤ÷KÇ{sù³ö.9 òj«ç/9njº§ðOŸ‰à›–½kå‰ÅE®iÏ*Ž* DIynïÎÄ=9ÂL­ø™c»]qä“Z [¸<È¢ðR'wvR”Ó*ÓjõzTs+íâi¬[S»£õ[]3û£–Ö·ì6ël³55ú#»Û1Ôc„}à—}ݰ:ºÕå7ï{ºŠê޾iXÝn[˜¨.=BѾÙí[–©ëuvÅUØ®ùÇ¥§›ºÕnÌÁÞ§«¨ÌàÂ;=Ëhƒ›ºóiù·~’.íÖïTh¿Õ³Lpí|ºÓÞëö ÖCz]À[qo9`ãÒéuZ@s°ïéJÌA­(f«kB›ºûéjÌÁ>éRÍÛ(èý¶ÞvО§å›ƒs´fϾsÐ4j.Â:Þx@Z­¶ÙÜzèiùv nš}³nêΧ+¨ç? ]^="´Ónuá´óéJì@Ÿ+R·Ý>Q;ðrëù¹ñ¸´ ~+,L…ö=]‰9`ÂMËhuÀMÝùtnÁéòÜ‚Dhß4@Äœž®Â˜ív»×ÞâË-c$€Bö_˰0 4‰©øÕ`I%rƒZ ·ƒC¼eD¹6бJl·~5ÚÆþ~w(Éhtt½ÇÂ0ßdïÓU£Žn°½[¯Ó6uÏÓäd±,fwtË'“xT ‹D±T1 .Eˆb‘€a!X$à€ P‹Ì"G.f‘@bÂX$° ` 1`re€±H€¢X$ø 4掳ðÔ+ š[gÞ?lL/%: “ MØØòè¥ã‹‡ðÂÖ„c.1ý80xÆ\ºðæûÎ]ÏùÈëŸãôWæ&÷zàĹÖÜ ‡¾sù,ûO7ŒüàþwW°Ô¯•p“Dšˆo~áµâ¡ÙÊ·ÉËÑä̯í[|7B¦Í7`MЏ¦V¤ˆóOÌîå(RÀúÕõfi~¡4UZØ_?®‚YlE-ý_eió»gßÙîÜf»€L½ÂÐ7_MYÔ[rÒš™+­”aâdyÛ\òìgÕ6ô±(‹ç­jYøEÈxÕ¢n«Yh6‘ÚØ#}zÃ/Ë2Hqç¶öK6sç‚C™ÂåÇZÞK¦Yúø,UJ,I»‰¢e¨Ù“ w†_zz¯-qŽ<Êæ¡Æ÷ö¯›N4n²5—{Çæÿ{cD•µâÖ¹ßÝ!¨¢É¥zéŸÚÔ;I{â ýã•§É= ÃìæÐ æ//¼q§‘–mÓdÇ×U-mY-™çèØ‹×\aÍæ& ËUÖhVÜœ÷¿½¶ æáë‡+µyÅmÚÕã›ÿûêùûJ½`g‹YúPnÃ&ÄœdÏ¿Í&Þ9Á(ynæDWêz­Jí‡k¹Ý¬º ;ñªÿZñ·õ››.oú;s=Í·WÑO÷ÖBÙSa±ÉI–-2.¯dx.a{Y¾ôãp._f.ª _¼Ð©Òe®ÏzþÝùóÕÂù5IhAú·ào”T+êÓÅ^­X÷ï\bãÂ’ ôôâåŽí–ãTõè>¹¯ýÜ Ž4t‰ ørvgš½øk9dNfþ-—Å<ëÌ<¹3IláöœqÔxæÈ6»qc<_…ì¡F쌬N·Ý̘fš©u+.&í¯˜e§Qœâm¸tןT!°£WóZÁh\… ÃÒõŠÞ(¹U¹Q½ÏìËpå¹_«öŒiª* +ÑÁN»g¾° s!Hr«Ã™6 ÜÉÌ©x£œUë­ ;Ëæ_ålÌ=§MÜ v^kçµT\;¦z¯b™â»¾ôدùœBì¬-¼ãݽ$ó–Y™_ÿ{xùþ‡áoo~ý±*wgÏWÎO¿zF¾›ïïƒ3­T*Ž/Q’üøÍ³¥,K€hÄPYCÖ•HZ×qå2n*·×;©©Ôú$çüNr8Cq’·PÕÊÁ$FþØ)¡?½½¬O‘ê­™àÖì’ Þ÷|«¥›gÕ¥Ò7r»Å’'‘ˆx.aÏqR¶±P/ªiÁîkª’8öä½׈ñú^ùš#Uµ4t9í¾–3ÊY"Ò›$ë·LÃài\⢓ՠp!(5rmêä^î4UÓcåAæ0{Ý_ý {³¥Ë7AéûýF”*QºMò¬“ÄSòlpËèþ78HO<óªðeÎwDÆ›M*± "¤ãrgiUZ¸í‘՚͂zA‹âöµåAiˆÜ¯S^™äx¶ä¨´í¤C£A"²Ùõö|¢Ž@ûËø1í{êÇLK¤í%W›8CÀM—Hèw2q¼á4»A‹Ú#ZÄüÝôqI(Ÿ_,J‹žò~P"®FèVr$n\;̈ˆ;yá3'åG¤Ä¥ŸyÊJX¹ö‰º´Ãð‹kŸn_K®e2:Co{âP«mVø6„ó¡–^Þ!œ,ïÊ€önHy‡Ä¤ òá\/±€ªã{Bœw/ Ì;$-Â;„—åÚ@`x‡ÄãßxÎŒa~ ŠSØŽQ+wHL‘P¼CâËâ*%É;$¦0ôÍà:ºfBy‡Mœ,b{ŸJ ¾UiCkQ-ïPÉÆÑèˉʰ Rܹçiqg}Î ) ‰¢ãy‡ˆä–ä"l…8ïQ#èy‡ˆFÂ;DÔ–R¼Cäýq¦¼CåÓ¸$ ÒóÑ¥‚y‡Ê‹e«ŸS2':[>‹³˜wèÒ(%+&cü¼“‰Á¼Côšxôdº—ž(Žà’-^aîÉYçm_Ô»³BÈ;D#혼CTýà¢æ¢XÄ;DõZ…¼C4‚¼CToà¢UÑû€x‡v »¤àÚ+°oõõ·Èhâ¾_P³Ñ*f)*-çZîk²•o †¥¨¼4ÔÆ†B]¤K)…¥Œ-¡¼–;ù¤¡¯å‰\5ƒ¹º4fT]K"ªKÑÉ[xKÑ9™74Kh0K™È$ãÈRD*uGÊfa¶&¹|A–"Ò†¬+‘´>÷•ò¸n*˜¥ˆÎI±•;÷9#û?÷!_: ,Et’1,Eõl¯·f/bkvñ2‡ Á;D?~Þ!2éÞ¡ozS/Y’±%à å:á0ïPé¦)H%HÞ!™Ä¼CeÛ´®ƒ‚ÏcLpÞ!º X¾ÌùŽœwˆdŽ’ó•ž¥Õ†@¼Cߦ& ñÕ¢HÅ;tÂ+“Ï–•¶th4HD6 öü`ºgh/—sI>FI ¦º œ°˜ÖŒ›+ï$l-'Ösµ€Qïì¹;!ok†:äuW!-ö;æðD¡ 9UË$£'!nî0œ;÷˜,JÐÈ]8ìÎÁÐÒKã–3%%Mˆê²9C²¨æ<ÆŒ­%>koüÙRú†Ì[½ñc™UR¡^™,æÓô™…MB¸ƒç“íy>[ RV;:×.¯…œt,ðØž%ä猛 ïC=©eœ9”qW|bö…-‘‹¸ÿªÞùƒj½Liú’Þ0ˆ@¶gÏœÉOA]DÖó—6ùžg<„ŸRªŒ¤Sþ)I Õ±=R§ò‰¿C†ÿ…)ãgêŽ7vB©;=µ!9°¥Þ®Ô•;¹RWªÎ°ZívO³ÍÉD3;#]9–­{}«ÛëéãN§s¥®ã6Ii=ª„¾c:"Ž(¡cûÁd)š£“*e'K˜D‰y¾0‰R dR¥¦+ë?­TépV£ŠÈ:3Ü,VÕHÛ&³z8˳ö&)·•,s,m¶€ ®Jöt‡ ' HzEù­bZ¦¯•ðlU$l?ÉVU xBœõ°®Ït«v€rr¶8´*˜W±;Ì5.ö‡“z°ÔãÅ U8îlwõ^Ëxú6"f`7’H63I N…³ôëªE^®æóKŸ¹Ã÷Õ ‡;J‰™­x`#'X¸^Óù•mtx—'yŽÇmÆüIÎñC%r+·Öu“Ô›XžŒ 0°¤í8ÊËgi.¥÷â7ö¥2ÁÕH’/¥‚±zâ}äHÞªœ!µSQ;e%fÅc/~1Û**c³6.*»Ú”]ÅóøÓÛËoi…{ZgwÌU®rɹz»—·ÆÖ{’zOòMìIž7Ö;”—¼C9mËQÀÖºVï†ëÝð³ÝpZïRï„)%&M/~yß(QŽ»é[2³ÇÛ™«È…­¿Ó7Ï“g­+m@½ï¬ƒu`° ÖÁ:0XëÀ`$ Õd\uœæ0Û9²o("YçäÔQȳˆBJ• YØÈO¼°²E<ñž";ˆªŽoœÉjî¿UcÂãr¯ñ*p£û·l»á|ªšç4<†Ìê:8牾 ì±sß;.Q¤hÂ#£üª²UéI¬À7ø’CªMÁ˜»Î|WÃ-&Má—BÂ÷I0Ûf§ÕÞQé²‘ß þÑnj¯…DÊá'ñôìt‹ `5Èz¢àt VƒR"ÌjI¬¼SE¬µðÚæP¤%¶/Ù€Yhë‰YF¸W픬~y‰F`C_ý’ œüLõFªyèréQi‰50å®ãÊ­¾_:ޛ˟µw Gž¨Ä²üå#÷žú;€š/‚•îå$E8´6S„á/D„áOTJ1ßФDú›ž‡ 'û,e ¥¯‰±{XÌ/úp6FùB†Jq–ïU(S¡6ÌÑç®O±'É9ÇbOòMÆ V…#Y‰³x…ѧÀöB7£¸­Nn²ºV's‘$U#,pì057Òe=Niù²@|å|æÊfôeÊ;êz³zN×sºžÓç¶¾åÂø£øò–É»<ûƒéë˜ C•x¨Ákc4~³ðë§äzüïqI—~ëÜïξ*|ìû·®“¦úkSwî¼~ÎgÙL+†iöºÌùKÇ oÜi¤e;©X{-Û0?¶aµdžc/^sE4›slÏoü0âªh4+zÿ Û}ð·ÕVÁ<|ýp¥6¯¸ ºz 7ñ_=8]©ìãlqIÊÅ`ÂäY௖ϿÍGh’ïœ`”<7‹ËAÖª”÷¿–FÏhšÔ¿Vüíü榋›~àÎ\OËQç¸ÓÊRqõ9“úf·D–À̳Ƚ¢,QY“¼•AòñòSÆ¡˜„I®ÈlxâO•,,G*ÄɤÔÓ9­„ÔîóôžÞµžU¶&»Ñr§¸½ BjsâÜ5s_ks¦V׊Õú‰y ²ä>a ú,EJ¡=)…œì¶sD!Ïx›%uâ$lS®JkÉúâåŒÝ–G\Õèm—âJ()È×ä¨JŽ"¶É¥†LÕ4{ñ×rÈvµQå2Ø(3#®ÇLs”ˆI´‘ä*ÁHÅËE·‰ƒÍ–.1Z¹Eà}¼ ¦õ7õ†â$:ªçÓã`òó™Çem’fr5¡²Ì•ÿ†µxOâb½ð ,|[®VE*èúÔ·Ydh4í*B0³U#-ø¥Ô-þY:?œvŽª;Yws‚_ÐKÞâiÜA±e’K}djTS>¸N¤\©®´n¯Û1­I·o^©Ê*ájmJAèwçovÒûí¢L5ˆÀÁä[pÈÝd¨dèϘOÒÀëRâ¿>øËŽÕ²¬ÞÓ_âžÖhÿìÞk·VË6uÏÓÄ*oØò´Iùd‡·ÊÇhòžýâˆMýôæã?”Ïy'J(Éj ¤±øP œ¿VNÈ4UÝ+Ñ*1õ¤’rO^+ÿ¾ùƒ“þ/Ÿÿh니÷ÅÒŽnJÓ_FM;é£fàÏp«˜£É › Ûõ÷‹ùÀÐÍz'{0IõÎêYzçQ¸Løƒ:w:P]oê«*OPw@0ç@û³˜D8þ óÙ†ì§^4¼±½I|ö¤.Âûöórnß|ÿV‰û=>»fß&QCŒŸyØ]°êìî–³Ÿ õBù‡ëM^o3—ªI;“%SÍZ7ô½!oà0n {êOľî´{-«Óëõú–ÕïôtÓè±ï~ä?ð¼Ä$® –Óhu½{˜Ì–®3ëÔª‡Iê0}LÚ¬Lý<.n¿gÂfú¾§+1ÛLx_ïv-pSw>}Èlïí½¼~ò']ÔlìЦ ™˜ïýÆÛl†ÑÒÁý´ûé*Œw¯Ýiõz¦Ñ5uïÓUhQSSõ{çDÛ ¦¼ÍŽw”½;7ð=¾£< v餸ÔãôoÊç kÕµòúoÊwnä,^?¼âðÕ@yõìä%®iüÙzµþž?¿?€P$<ÉÈO†e8µÇÑáÐ$.Ù–æfÍ0KÀ>žQɘЀaËÐØ…tžXĤ¯Ÿ•ñ¬ãùŠ-Ê?¸§k®Â 9wG3Kœ»R¯<¶nÿ—{þ_ïùgÉêýcøA8PxPîÊ‹+óã%ïï+öag¸ò"aŠóÂWž¢h WÖ’œóO%nö¾¯¸uåŠü¬ªùÕ:y4õ,þgƒçxá*‰×p¿ pÇá•g/ãföÞ’Îs­èÆY…O[ÀçoÑê6töÆ« åoþ«8Ü`ÿúâF7¦Ü•D‹[D²ðíÎ2È-ÙåL|0~IæøÑù¨Ì•g[ ;½÷éJ|Tš¦Ü!wE…;¤½ÒK¶úÖ~µ“>–”jgè-£}‚q­Ç•ó4&ÝŽ©[ºÞ·Úí~·ËþcZˆ€ HÒo³ýq_7O1Æè¸s-:±0:½žeu £×fÖ6 KŒÅ[~;€ódGüÍô¿³æð#ô½ãÒ±xð4÷>]…iæÂ»ì xSw>½Ó4õ6xµWziÓÜéì5Í\h¿ÛÖMpí|º ÓÜ7-™¶kê¾§+Q;š¦VµÚ+¾Š¨U‘pdÔ W:jŠZaÀ°Q+4va$‹øò£Vñ*}(PåðT)w:v0¾Ù«úýÃ/\tÌ0h6·ž×œ0&ô[ºÎãX«ÐùôËÇ4dÅþÍ÷s—5á­¸^5ð&¿8#dýêü¦ŒWùGÿáÜïyò–}“>iïÛ98~n“Ôòw6|ód–A½óI޾]{ ÜÙaøÅ&ü㺶ì·éþn‡îøÍ*¿Â8Vׄ“ ÇCƒƒâxB³÷PµH¬ü¨8žpDâxèÑ&A*R¡¤ÎŠÄñ@¸à8­0ŽvJÄñ@Ã!ük_’¹Â„^{¿gêí|@å`S÷<]×NÒÔJâx{¥ËŒãÉKRµëtúÖ©æ§Pì¨Ýëë½N¯Ýj[&ù›F›2Ž×jè–ÑÑõN§ P›OË7dM­bã~@¼ü{±pÔÆWrãظãÀpwì×øò7î<„ï'‰§•m‚¬5¡&Áw©à€]ª ªîÛ¥òsl¶I‰&àw©£M‚T¥BIÅïR¸À]*­`—ŠX÷„w©ÀáþµÀ/Éü>¢íÞïëz´³9ðt5.*IS+Ø¥.o—ZÅX’ªÙíµ»¢»Ô–Ü ‡Ê6iwŒvßÔv«oôt«gµLÚ]*ßý2ó“/ã,3é¾ißpì‰âþtÆ‘2e¾¤2¶Ç7û:ÛÁ¨ƒõçðCú¯I±—:àœ j‡ª6Ù¢yg47£Áo2Ù'aóP¹Û‡ð>q õÍåÏ—3u¿²O˜„ä“wüjöAò¯Í¾%Þ¶¨¿¨Ã‡ÍKe_‡ìÓ«ÑãÛª)LA]Ø¥³‘Îãìþz½{h­~ÛìõÛzÿ¼‡v³ä?ÜÃIÕ÷cá^Ðø lac0ö©EÇìY¼ØÏx©jqøŒîÄÕ¢ ñ µ8ˆ±W-ú­n¿ÝOÝV‹`åy¹U8uµäcÅùêòšÞ<ëOAEjéjÚ½oÍ|‘ŽÙyM{ŸÞéërÞ%^ª3Ç÷;œ¯p¡ì}Ýçâ{e¿ÃEô–¸§/yóßʇß¾¹„¹ÒB®ö^éé6mæ~óóèÅà1hüÚh+JüTº~­ó'V^à0ƉE’¦¶;ϾŒÜY„·îrÉ>3”Môñ*{Èyl&ÿ®ƒ‡t mðDËPÊž§·5ýg´š)Ìä1%=¢v“¼Y<˜æ"ÝŒi›mÃgn@•VÃè6¬ë½Û¯"Ø,xªL™ú(¯•˜ä?Ûø¥ÿÛOg¥¬˜®) ŸÓ˜+IªQ¼Õd"?¿ŠãÖálD²ý'ó!nÜiôªÌ»¥hÊò>ºñ½L<Ÿ|<¨Ì_7Ž˜óÉÈãæÍä¹vÃh5C¶hÌjßÚ3¶Î§@MIoyæâóíÈï·Ù_Ÿ ;ú(÷u‰†9_ñ*â†dOÇŒ\¯ùTûJÈKÇâ.q_˜(ÞÝ KùŽG.ØÔ+J[aØæ…c°ÿ|¯|~÷ö­b «¡Çßé]£¯|÷éÔÚQú…ÖúþZùîIËÓ!ý¾D‹ÿt½?íí7ÌxLÓx”!}ÚÉD1ÐïœWoï„Uì0?µ…¥„Nq9«¥âzÜiðƒ{%9u׺_|›çäç'CÖôÑÊ3‰—.,€/­Ã¹F̘ç®ÇôdiI‡ñ³©LŸ™™ löJÍÄlX*šùEb‡ã¿ÕØô[“7+ä#㲆¹“˜&§T²‡\ÁÜéýÖwß+ü€ËŸˆÛ8p—ßìÛÛ«ÈÿVßž¦-R6n¦ÂúcËç‚f»¼õçñûñÑ%;W…ê²OÝÔ2 ¿!sRÜ€[$¦.ßñPè uÂïŸZ¨Æm/ò­'ïÛÛÕÈ <ÖÖ°ŸÑgßQZÈ'2 Yè8d_óŸ=B59TUݵ§§jñµxñ bÏç#榧3cúü©Â•þîB¹k1Ï1™28Ÿ¿™:;ÍLRf§Ë{òÖÛ_¾'nÈÝlº7¦¦9n^dó—œØ2oõ‘¶^ëUùÕ_ÿ¾8Š=ç÷¬Ý'Áv†’ý¦A)>ªcˆŽ™íù1DûžÃ=y¢qOóˆÛßß¿ÿÇ@ÑMǰúf¯ÕÑû¦31Ù_ǺÞ7ºöÈhO[¥LÄ­ÀwñZn{®ØÁlÅ3=ÃÁ\™eàóSˆfQWës&Gü]ïŠø&•í½ÄY(ax#Œ3rÆþÂ&.-ƒZMÄ]¢Èž±ûîSÛWâûùïȱŽèÊ–yçkØ<Ï*îÿSý[8Þ]3noC™7™úÁ-{ KÀPøÚ·/j2ã-ìVr¹œ3 ’¶îLuâ¹ù|¿,’¤=âG7Ž2NîõÜÿJX­)™êÔ¦t˜ç¾»ZVà/ï,?xC–@ತüጟrºTŽcHAEá°¶ÎÌ £äÞ> ×§C?pM=+)‚§nàŠÍF/Jóñ%€ûû?’°ý©=gÐ!‚ñ…ƒ ŒWÛ»SÞ¯õ˜1Óñ…­¯ì͉—ÉMÈDhÛ…4dª@Èü¶!¦V¬ÉÂo›“j\æf9C­é­ÒôÚ1öý`’ÞˆL­Ñþ„z'n8öÙ¦éžw:÷¿°mTÄ\ŠÆøÞš@ÂªÑØ•K3È2$pWg"Ù]ð¹.ÙçÝ"9ÝHHÁþkåG¶dæ©Erz:»˜^ xä,ØžQRËcæ)*’FuÙÎ6Ùøüt¦›g È™rb'd~6éY£3‰ÏW37ºY(…,íE#=Fh|ÕhWɹ? #;¼‘f­—¡,hw&%d•\Ñ›]õ,e'ä̹jLœ;Úx¦<_Š©Èø6YSQrÆôË óØâ³3)¾N|lÊÜÁÕ8Zq""Isj䨲Œç3_H²d›e2¥Ùsb­–†¿.…°.÷~O9]„ÁÖ²OÕwžT_2Ï-RBá(“¸Ê,wl]p~¹ç¤zFËlu;Ïî¼FœkwõæntÁ“åƒ`ÉÒÄksŸÿ‚Ä@ùÄåŸvàò2§†Œ!¡7 ÜÉÌP@ñÿ²-‡1á[: IWÁ|ÿmÐÜ$Ïä]½Æ³›Fx7Þ¬'±. ¬N·%²OšiîM®¢¦½ŽÇUb2Px¥?(ßÜ G«éÔ †cåEŰt]—ºÿã0øN»gRÁ'Ü~™zPv óƒ¢pÈü7—gvt:\™õ4èÊs¿Ó¾H p´XR¡'išáôSKA\ ÿzÿ%îéëðÇgl®qQLµ\‡÷½eÉŒ1£sBK+*i ¦w¼ß9åkÄ^˜¤xâ à òþZqçÕoÆþk’p™r ø\•ÓÚu¹yƒñEÏb$n`T<ZÆËžI¿#gAêÓÕó€b¼ç—ΰݙÿõ´i¼ðµ ÎÆÜ[ÉÈkÿ&îÒôwæzZü´?]ςҳàí3¾U¦~LcFIõ°ër¾~ßîD™³ ­ vmµÚò¿7SñoÊ?½ùû/?üOå—÷oßü¢¼}ÿÛo?¾ýôóûß”ŸÞP~ÿøã¶EcÀN—ý—N#ó¿~|«Ä 'ᢕWÎøÆWþߣåßþM 玳TôWr$~§¬\º].kqËFFÑ–Šú•¸-»KÓÙJù¿êæù‡7=ìïšÑíX-£Ó5¬F¯ÅšÞíh†aZü¿º¦eô-½Û²º-&€·(øùkiúždPNf$ 4)«RL EBG°©¸Nߪ™5´ 6¬É—¿Œmü6Šm6|ÃY¸üŸÿú¦;üszË?½§ÕfL^fÏç ©ß÷ÙÌß°®UVÿë+ñ+TÑ#òíÚn©—ñnÁBѦŠPüß”æÄ¹kz«ù\iýíß ‚7‘âò.ÝáÔ_Å—jÄÜ®¤cI4×»K ØhœñÄ|í`FãÝozðÖ¹g€|¬/è@ñ €Çö§©Ò¶vßÛ*˜¼{yXÂùJÜØ©ëÌ'ÃÐ᫸Ä›×DÐ7îdâxÃXBHÛjnHhÝÏGñ=¶Õy¬]•„Ï‹ hѳ †’+©¦wè’tÚÖÎí‘3—£s›]ø¾UK7®ZèÄ~ZHÚ=wQIñ“@ íd7@£rãFCJ#£â×cOœ€xºó)žê"!ê=w'Ü‚Ä÷†Ób±ÝˆÆÚ‚äº0qSoˆ¸¹ÃØ5e¸%hä.öƒ5ZziÜõQC¡Ù!I"ïùO^bá%x´4Í<^7†éºÁZûëÿg|ùô_¿ý×èÇq4^üö_ãO7·Îâëå‡wþk•Dðú¤]Ðeƒè~f-Œ7LüBÇ_èÈþ Yç×Ó MjÞ&?eîég2hº¹Rży¾?Å·G)jÒ7ÿ”(‰a$ôU"a@®©Ïd48üúBž€éà™zXK'é-ØH1;äø_<'øàLÀñ’5Ož¸JF?1䃇+uåN®ÔÁ•ª·úV«Ýîi¶9™hfg¤k#Dzµq¯ou{=}Üét®Ôµ<-)ïÓT¯Éex2zDB›Óu)¾{õýÒñÞ\þ¬½s¼¤|K†!NJÃÒUå÷%ß_ÈÃÝê½Ë:©8ºa¾&t7*ŒLTHè™Ù=3WꩤŒ™°ñÌ·.³ïúîÜ­fôBÙBÅeBÖ©oaÖg𜶙VËÒ)ç_Öå†éôºöDkuZÍì†fOô¶fÙý–5vŒ^Ë2Nk‹š®Ël™³ÿZ9ê‘c-ç–:ÿ–ošprØ1Åz‚]›^~çz3²äNzäëx!¿7«ç§eO,ÙC7v¨ŒÇS’³+e²rxÊ ‚§W²¿Ø‘òÊó‡s–=¿R¾°ß„Kgì²í$NÜ‹nÜßÒÇŒ®ð<ïPæ2vŽ«¢ÛÉí_äÍ8®ôÙ§±«¨eT•Ç%À¼Kü% ƒq^[ïê†þôuÊä;îF”™ï(.ñˆùŽF¿Ó6¬¾f°i†Ù¶Z}ÓèYf¯Õ3 Óþúµ¬&}O2"ò'ÉAñ¦8dûõ¶Lvc{Úº5ôÅX »ñ >l§»Q½)]^ãáÆËí ù†‹&¡ñÔÞJ4•±`¬E3¼>“ñ :õ° žzXˆ…M=bSq ˆÔC ðÒñ&C¶1¸¡¤<òüž‘(‰Å%J¢É•‘(‰u½•3ô½¡~@ÏÐ!SàÍ0´­ÅÔnÀQ³Ú 9¨ðŠ86¸"‰«AbÃ*B àŠn9SRÒ„¨ÉÎ|.®©~Åxé17I¸QÂJšÜrÉ´,©ƒ(ÌN«¡±;5pÂ9+ƒñt¦Ýýx·4kYùuìš`P¤¹5ôîŒj{žÙYÀpÔóüåÍì -»ªG[2MÓBÙ‡Óªôƒ*ZJSуØbªyPEìKÜ.WðˆJ)®ÞA#§)û;ÌöÃúB†$i œ Y6dW Öª<¬¥4‡UÎð‡ú%gôãò¾ÂÇõ;M8¤pG¬ëH‰"_ð²­½ÜìTÝp˜mõíÄuÐÒËŽæEIíøÖm•Ǽ‰7ÛÄöWÆ´þ Œ`\Ô+â9¯ˆU¬SÈÊrkUCÒš.·pò”Äs±êÈ2Æ’6úwЏ]ùâÂÌ×´'sR"ø¥êáRµ‰§³ ×$âãëŃkñ©êÏxúî®=ì´©æVÚÅãöÈ0G]Kë9Óëâ~WM;mÍ2̾ٚNugÜ*ßÅË“Ö!`šp¹ á_ ü÷4¼ˆíƒ?\M}Ë’ElC8Eÿ'^3÷ýÛÕRYÅÅ2…²Ó³×ß¿oÜÛ‹yãÏ–b‡q‰Í¹Ö Iºà4Jä.O½D®×Ðû†ÕîuÙ –Öê°Å´º{Ó² käÿü5A£ööv·ctZOÇw÷+Àkêv‚V7O‰?¡šºCͤª©“q4\ÞÔÔVJ\QôUéŠê Z/¹3HfJeu'÷Z¢uuEÃ-\X·ÿM ëá ë Ñ…uEXèÂ: º°Š)¬C# ë0ÈÐÂ:0&¦°Š,¬Ã“+¦°*PXÆGä}C1wÄë‚‘=nð‹ðüÀýŸØRpçŠc’M™­`ÝÛä–m¾¥„Ÿ2aP“Éd¬ ;!à~ga”Û´ý™©à»¬œê39|"‚©4³%î8¯Ó*¹¬ë úÞ“Ÿ‹÷Näß:^àܹΗð<:…_X.±Câ¼?GBW"®/ê™­îZ¬ê‰¬F:ãÈ3)a=ÁÏn‚“¡Ñ´kMæËâ*ë±À˜Êz06¢²Œ‰¯¬Cã+ëáÐÐÊz0¢`e½>¸²Œ®¬‡#â*ëÁ¸"•õ`pde=—~nà*ëá°˜Êz0*ª²ŽŠ¯¬‡cKète=Z¤²¾Ž°Ô–:ÂRGXêKa©#,u„¥Ž°Ô–“аˆQëÀáÔ:p@µE­FEQë€QQÔ:hTµN­ƒ‚DRëà°Ô:8P8µ·œQ*iB°Ô:…x@jPÂQ¥[iI[hú­3ŠÜµÁM*ý@ '·Íeà²-ù½–yU;+Þš¢¥QtíÒ² ¹ò•`}•hêI­Û@™ÖT $Žp3HÄa¡£DÑÄ ï¥ ï…œ}Dá½°Ç#ˆ.º/% f 9húŽ)^],ÜzT }Åú( ¿wqcËP eá)Ð"Äy^Paz¤kÚ#H…új»À¹Û¢ÂO œm½ouLS×zÃÑLÛ1µ~ÇœhvWïN c¬wŒvùñ#˜<ÄÇ*´'y’‚­²O®)O½¤EU%“P¾¼œ¨)y´ô¤¢‘ç3åsœÉ”{œï„ËPûL³côÛÏn„…NÛÝ?/9mQ ×$HëÒ(åÚ!._à—¸§qÔw½‰ëÍ0d`ß²,5FG°k«(³?(^\&…¸Ä£ÒQôûín»Ýb/Ôé±ÿ2-«ËÞÄ2ûÝ–  £8ôó×Òõ=É YóO‡`¬ºe&æ«¿nç-ïNˆ`âB &Ägi ‚‰ƒ­—ÜòÍÁĉ½–8ÁÄáá%˜8èa & ]5ÁD1œ`ä†b&ÀÞ1†`Š ˜Àã&PÈ@‚ 8&‚`Š#˜@“+‚`Š'˜€ãÃËP[Z²ƒ[œÔ¹énŽR ôPYˆ+µBrÜlâ7r.ëû±¿XúÏ»ÃrPÉ8_¡½äýéÎdm°†}ñƒ[•´)kÚ7(™AÉ l®Êc’û ‹ 42Ä*…Ï{Ñc›éã¬l ÈÝmb’*³m%.‘Å[°S y“9A(f40‚™Ž g&€c¢™ àÐhf4™Ž(ÆL †e&€£C™ ˆ(f8®3ÇLÇ¥Ÿ(f,‚™ŽŠa&@ ¢™ Ø:ËL‡`&¨·æõÖ¼Þš×[ózk^oÍë­y½5A[s¡’v<¬¤ˆ*i‡ãbJÚᨘ’v8*¦¤ /i‡cƒKÚq¸’v$6¬¤ .iá–3J%M²¤½VÒKq¨t&wïE¿çÂT¶ã¢ê*Û ÛEXÙ^ùò ¬loÿ¡RFCê€ÉQ¶çGØ–y;N¥u(zTÑï‘«Å ÓˆJ)¦7Û_Áé „ðñôÂb ô唦7*Fo ÜIoÆÞJbd¸JÝ8¼ÊU.<]ê*—û¸ÞÉRDrT|¥–û~b†nYO[q˜•ã1Œ|¸9»±‹M}.–yx`ЈÛ[‚ù/ Íü±Ÿù£}˜ù%ŠfH¯iÞ]$D ßÍüѳ¨ðSæ¾Ó²F¶Ù×Ú†Õ×L}Ú×zfªµGã±mXíŽÞë–?‚É=‚‚÷ÁÑ\úÈ©¬žÄ˜A–À/û@fJ„Ž’pdÚ‡²GG`AåMîu)qù¿Ä=½“0à=a x'+ýj/•½;7ð½s–wñ`ß²$a@¯+صæâ'^ª<÷ýÛÕRYÅ5Ì…²Ó“)öñLã5ÊZ’õ°—{{1oüÙRì0®ðß¹Lðë‡W|¦¼(¯ö µþ¾°b²PWÕ¬6{j§>( Íõâp“õµi?Ýrèxwd`;ÂVEÁ)6$œƒwbSíhðÞ#>¢képƒr¥&…Îî¨cfz|¥^ysö‹sçÌù®7õùg7¶7™;?òb¼p | VΕ7bÊyûã›þ_±â‰W^0µYúA®pl8…Má‡FSø  >pD1 1|(…:©ˆEáÇ ðƒã(|ЉUĈ` ,‚ÂŽŠ¡ðA ¢)|Ø:KáƒÏðÖ|íÁ×|íÁ×¼$¦<Œéˆbú€ãb˜>E¦8*†é gú€cƒ™>p8¦$6Œé fú8ƒªóÉN*ù¢8J“]x—Ê¿ì}¼ÛnwºOÅ£ PvÃ&@)ô`A˜ ‡‚T{¬GõX!C´&Ь‡ {¦`Er¨X4OŸ†-.ò É2"¥I2D¥“d ‘ÓÊiRma&ŸvYá‚úoi^AOR¦ƒü/ž|p¦NàxÉåÐrDIìÄ$®xó•:¸RõVßjµÛ=Í6'ÍìŒtmäX¶6îõ­n¯§;Εº–£Å^­ž‘"Dæ´,u ÏýÝòÊèíþ}NlЏ £JoÑZþK?TÓ­ÏÇvËͪztS¯.uþ*H©®¥¡KT@¹ŠÚË‘ ­ªiöâ¯å¹¤h±Êe1?<3O®ÇL[æ=g5ž¹½ðnÜ'Wµ5b×}`uºí,õjÒL­0oª¿âbÐjDqÞ‚áÒ \R…ÀŽ^Ík£q‚ K×+z#À­u4¢*zŸÙ—áÊs¿V!ìYAtU:V¢ƒvÏ”»B_Ë}çÛë8žÔÄ3m¸“™ÚV—oKæ%BeKÃllÎ×eó¯r6¦HÐ&nP;¯µóZ* óV±Lñ]_zHØ|^é~Ö>¹tòŘ·ÌÊüúßÃË÷? {óëU¸;{¾r~ üBŠ#2‘´€k¹I¥ ÐzÐËß<[ʲt‰F¬•5d]‰¤õ¹¯”ÇuS¹½®ØIM¥Ê:÷9'û>÷¡_:8‘V’åPÕÊÁ$FþØ™Ë>½½TëÙ^oÍ^üÖìe[…ÊÇo+±*écOÞ{qX¬œzS_i»%Û,éM’¿õ[¦að4.qщ}—kÚªq!gñ’WËô ¸ŠÝaöº¿úþöfK¯ 8“¼ßo§’ü°®ƒ‚ÏcL‹etÿƒ³nÀ*ðeÎwDÆ*ÝJì‚ÛÜYZm óÈjM„æ5½ Eq{Úò ´*³¹ÖçæÙ’£Ò¶“‰È`¯hD#®h„cïh„c¢¯h„C£¯hD@¯h„#Š]Ñ(†½¢Ž½¢ˆº¢Ž+pE#wE#—~n ®hDÀ"®h„£b®hD ¢¯hD`Kèìph+QK¯ïN–w‡e @{7¤¼CbÒy‡p®áX@ Õñ=!Î;„—æáÂËò m 0¼Câñoœ%Ûe†û‘ŪiÛdVësŒŸ²7I¹­d™ci³JpU¶¤;9Y0Ò+šÈoÓ2}­„g«"aûI¶ªjÀâ,‰–Arä\ªÝ‘êåälqhU0¯bw˜k\ì'Õc©Ç+]x¥;›ÜI N…³ôëªE^®æóKŸ9¸÷Õ ¯Ð|m<ìÄrV<°‘,\/ŽÒüʶ.¼Ë“<Çã¶áCþ$ç¸ËQ¹õx´—›¤ÖØhòdä*MæÑ^>s1s)½G¿±/• ®F’|)U8”ÛþD޶­v*j§âœœŠ¬xìÅ/f[EelÖÆEeW›²«xz{ù-­pOë쎹ÊU.9Wo÷òÖØzORïI¾‰=É3*Æz‡ò’w(' m9R×Z×êÝp½~¶N+Xê0¥Ä„¢éô—÷Ërw£D¿þ÷pŸX¶s3Ûí§ï™1=Ôn7€d£L/º{’!ÌsE.ühý¾yžjk]iª“V¤ŠÆF$‰ #R ¯Ø.@Ȥ:Z¹  #¾˜UF|Y !8ŒXhš0"¥XÑ0¢ä6aÅÀ„_‚ߨɻ¾%Óˆê¼,³ã+«ÒlYÍa<µvW³²é©¿È&ñ"8õè^V@2Q„ƒ@k3E8ûðBD8ûD¥sö MJE°)áy¸púÎR¶Púš»‡ÅŒ¡gc”/d¨çí^…2jÃ}îú{’œE,ö$ßdœhU8’•81/W} l/t3ÒÚêä&«ku2IjPUç‰v˜šù‡]›)]Á„T¯œÏ\ÙŒ¾L™D]oVÏézN×súÜöÁèžÙSYµ¢cóÀ}aIÿù£øò–É»<{Ѹ턦&#.#eõÈû¡±í˜l #—E‹HÜùH߿𣋈“;–}E,ï@‹W£,7=¾Š+±lÄBöUÚÝ‚  Fö\Ó¼;ä~?ÎØ ºe'5\Úé`&yVZäÌ&2 “òü&Ïd£ø@«zÛñ¬|—7\oØ/v4¾iÞµ¨u>» æ#ùŽt”á:F-påÆï¹ná¥ÎëÌZæ´Ê´º-*ýɺx<™Øf¿ÓÕt}ÚÒÌnÛÖlöµ^»×v¦m{Ú3Ú廘`‰T!ŒÊð×ßDG×ɆpãÄd;/‡0¨šœ¿ ‚lb1†Ù¾_@ìï"–w‰¼L]À{yBz~Àž˜9ZS)ðöœb îêñ¤7—f¨–þ¯*¹£þä–ŒXÐ5WMw÷ꇭ?) ƒÞx4=ÐÃ3ôèïºä­ðÖK¼˜¹³Y•éLŒÇÞK+0×’kQ·Õ¬N4›HiälKA%h7Q´ 5{2á>éë˜ C½(/š‡¿YøõS*>þ÷Æ8ˆ¤K¿uîw g_H>öý[×I ´©;w^?g¿l¦õÃ4×]fƒü¥ã…7î4Ò²@ŠT¬½–m˜Û°Z2ϱ¯¹"šÍ¹?¶ç7~qU4š½ÿ„í>øÛj«`¾~¸R›WÜ]=†›ø¿¯žœ®Ô öq¶¸¤åb0aòÀ,ðWËçßæ#4ɃwN0Jž›ÅÅ#kUÊû_K £g4Mê_+þv~sÓÅM?pg®§åˆöÜé e©¸úœ¿I}3çC"K`æYä^Q–¨Œ¬IÞÊð y{=N‡b&¹"³á‰WíÞÓ?#Ö„[¹"x2Æp“”ÈÔÑå¾F0Ë`Xº.ù n‰"$·öe¸òܯ2…p굦„Œ–½:JÕ©N»gžyl!ž$Ù-áLKn~©1…äíÔz[VoËNɵg6±2g~SJ_;òµ#¿u¬Âi¥.ýyráô\lãÞ^ÌÏkMn¡9[ó“Yƒ_ÿ{˜]#ÛÁ/J)+걨;½Dº´Ä…Åðdrã7Í–’,]¦°ô¬¥JX×á~ˆËÎíèKuØÓ׫OïpÉl¹v§€«Øöà™=£gtôÕƒ‡ŒÈžß•5"â°ˆÍi !„›SÚVÀ7§%ä"6§âRŠŒJ9dÍ)@Üæ´œÀõÅË–üfª²ñÙJé•-5w5I¬y!ÇŽ“£Ò¶“xQóW=¨o“ã·ŸÜ À'ôô’ˆ'* æéÅ›$†Íä‘÷:‰xr‹¢v ´¸¬#¥Š,º»¡­ |¼©~½—³  Òr$¹J òrf]ݦ6[º¼õJÝ¢û.›ï$Þú¥L‚»$I«Áô¹3G¦£z>=e,?£y,PÖ&i&ËÖàmWþÖâ=iŽõÂ'°ðm¹v½§Uˆæþ{>ÿcHøªÏféø†u¨²ú__‰[/¹3䛯æ* Ö«øÓå}tã{í³­`¡hSE ¨‡ûoJœÉù.•ÖßþÍ x“Ö¿e^àæm®¹'Y.¨£Žoloæp¢ë)ß×—Ûש®wÇ—]íQ>ŠØ¶aZœEʶ—îSè“‘"AïÀÅ)(à¥ãM†7vxC3Hyä8õsl9i"mߎç®ãEò€É•a¼É/#u½•3ô½¡19åб-¨ë¹dsW¬j ‰½¹Ú!Ù,QBi‘bð›DÅŽW³¤Àƈ½BcClÈËšâo¼YLlšñ{zÇJ©Z“Ÿ x »Í-:t$ËÇ‹r÷¡ nå¾-Õà.lÏž9“Ÿ²s#º]4uQ^Xõ‘z<ü”VÇ&ýñOIR®”Üt -Š›“!;ž»ÕÐ_•0­6k)M’ô¦ì-%FŠ«¬Ä†ËÖˆ'†üìÛÿœ‰ÎÔ /¥È>ãQ¿<\©+wr¥®T½Õ·ZívO³ÍÉD3;#]9–­{}«ÛëéãN§s¥®%Ž}ÜñBÆ2 %ÒžÃo­ôìù§ÀžNÝñ†¿öAžZgaþ³Wfþ"L›ã üÕ&€ë÷§·—©úJkC%/šÙf¹”.)=è¨BÒãGÒ¶Î=$Ù49vMî—?‘)%æü{3E$Û·4‘J ¯Oúä<Ù€ò‘Tß3ÿûÍåÏZÊØã“š3äþNø|~R¹— ¯ûÇIžÊ×Ûôz›Nµ‹ÆÄ—Odc¿Õ¢sÙ3œ‹AÏÝc[KLomÒ%§\Ó&aKÏg+Êc#’ò<&r&qî·µƒG:Â×F[Qâ§R"Á×:båŽÍ>Í䃩íγ/SRíðÖ].Ùg†’"°-Áx•=äÎeSvƒ\Æ¢ýjýýaÎÌ"áiFtÂÎ;÷ýåðÎŽƒŠ¼V.˜‹„äâ„`¡¸8¡€(.N4(”‹ ŒàâÄ"C¸8Q˜P.N(‚‹S˜\ \œhP$' ÈÁÄžƒ¢± O±ˆ‰+š˜ò7å±·¿1õùn0P®Ô„—ÚuÌfúð•zåÍýÙ/Î3çO¸ÞÔçŸ1ó:™;?òñ  ¿ ìʱ•äöÇ;¦z_±ãË®¼ˆ_kGWž¢h _YJr)-ÿDQ’µ…½ï+~¹_ub®è|hüÕ:y4‘$XüÏÏñÂUrIßüî8¼ò¸)pý-oúèÆY…O[ÀBÞ£ÕmèìÿŒíKA ö¯/ntÃK%>òR‰WÉÛ³æ©d£µ&T¥,èÿvsý-!8è”[HW÷œ#ðQÞ{µí}DУM‚´&3—p*7`(• Hå†ÂÄQ¹¡ qTn8h• Q€ÊMDå†BQ¹ááTn(\,• Aå†Â¥Ÿp*7,”Ê … ¦ráâ¨ÜpØ:E冂ÆR¹ÕN|íÄ×N|íÄëN<ž7à ÂÂyƒP¸`Þ *˜7… æ Bò¡°a¼AhHoÀ„…ñAqË™’uÙ£~øBrÈ‘Yÿ½'HêQ_AÂ$»N‚[JëŽÖnè©»¡®„·“ÞÝÒ­?¨Ü 8¥Ö$vÀ%noÅjõJH)¨ÕAN+ɤl~À¥ø¥À§ƒ]†øaM.M @«$Kv†-¥Œ(éƒýœ-ÅÔù­¹ÆT3 {¢™ú¸­õ[†©™i{dØ­‘iL ÙRÊ4I‚ž‘"„h)»,\Ý£ˆ½u|­<±ÐÈ5í‰ÅbRâ•()˜JÈ“rOÄ*!…#+!å?©¦ezEá¥NîÝ•}“jn¥]<îúqÛÐF [3[v[³Û½žÖ6Gý~×Ô{–ÞQþµÀ/Ér²EþìÞiõu£ LRßût%éã4M•‘>¾OV%éãÂ…ÓÇ!¸ðôq4}€…KâÒDZ àôq0&}‰ JÇ`‚ÓÇQ ˜ôq`re§cA±éã|hæ šÃb‡Tˆ/?äïp¹‡¢üÎÜ#w:v0¾ÙèÿýÃ/\t\I>h6·ž×œ0.1ô[ºÎV¡óé—i¼Ÿý;š‡oãY÷Ö‰O^5ðf|lÛä'Î1û&ÿè?œû=O²™›=iïÛ9¸q¶Ào銽³á›'9KöÞ'9vúvAîøÔ#Í”ãÿÓ}t<þx›þàïvèŽß¬âñ+>Aêïšpr¡E°à°€¨Èì=DÌ+?.î!Ú¡\ìh“ ­ÉDB½08¡ƒ M¨Ç`"ê1ÐÈ„z4(¡ƒ(’P/ŠK¨Ç ÃêQˆˆ„z .:¡ŽI¨ÇàÒÏ DB= œPA…'Ô£P‘ õ(l €K¨Ç@£êëmM½­©·5õ¶¦ÞÖÔÛš²%(xH‰ Qb€Á…—`Pá%Tx‰*´Äƒ ,1ÀBbJ ÐØ4(°Äˆ[Δ¬Ë‡–@ÂÊ”T𢘤ÄÚuuZ½vÌkÇüÜsçJŠ#Nï€C‹.úP9Zˆ¢ ¤cM¼%‘,º—RTt!€ .ºĆåá—Ç]J“ðࢋr„*º(!Jú`?/º°zm«ÛÓ­Õá7Lº#­oZ¶ÖïŒz½‘méú¤øŠÚ2M’ g¤ˆÄ#!\t!" Yt!"bÑEÿS ]Óžj!JW¢è#UtqJî‰`Ñ…ˆ`lÑ…”p0µÀ´"²(¼Ôɽ³èÂìPéOÖÅÆ´71zv_3:Ó¾fv©6²¬‘Ö2íIËnéývÏQþµÀ/É*èŠ.z6 ˜øGáN0áêœGdÔAQ/T'Vw@°Ý¢:÷gñrŸ­_¨‹pÆþõÁ±'Š?úÓGÊ4ðÊØß°Õy£êàAý9üþëƒó×Êá©;x¨—vtÃ@šÌú5ïŒæfU›ÏÖ„æf ›OíˆãÎ÷W ªlŒ˜™ÃÌŒÊü”ËÀ™º_Ù'LjòÉ;žïÃ>Hþµ™1ñ.Qýíq©ÚµR©6/ª>6‘}þq5zì5bÃ5þÒŽ£÷Ÿ·±1×ëõNµèµ;&û¼×iðÞ§«¨Å!jjZ‹óΉ¶ q”MPGùÑ»sß[°5I´òÄèøL¬£ëĽ8ñ,7È¡ï\>ËþÓ #?¸ÿÅ]ĉ-õ+d¹Ýä‘F§£ñÍ/<‰<”" [ù6Á:MÎüÚ&ÛÚ94ÝUò¬I×ÔŠñ”ٽE ØÊõf顃4UZØ_?®‚YlE-ý_eió»gßÙîÜf»€L½ÂÐ7_MË5Ô[rÒš™Ë¸”aâdyÛE8òìgÕ6ô1W‹fiYøE8POÁ´Ùøv:OûHjchôé ¿,Ë ÅÛÚ/ÙÌ –kyS,™fIèã³T)±¤˜²0ÔìÉ„;ï=½×–8GåFó„ñuÓ‰ÆMö¯æ2pïØÜá猋•µâÖ¹ßÝ!¨¢cß¿uf8c¤6uçNÒž8HßL>æaô;úÃäß•4Ì_2‹uãN#-Û*Ú㱿ò¢×U-mY-™çèØ‹×19h³¹ Èr•5š÷/ço¯­‚yøúáJm^q›võ¸Áæÿ¾z¾Å¾R/ØÇÙb–>”Ûð‡Éq±òóoóÉɃwN0Jž›9Ñ•º^«RûáZn7«îÂN¼ê¿Vümýæ¦Ë›~àÎ\OóíUt£ÅScÀ½µPöTØDlr’e‹ŒóÄ+Y¤KØ^–/ý8œË—™‹jDgÃ/tªt™ë³žwþ|µp~å6þEéß‚¿QrP­¨O{µbMÜ¿s‰ wJ*ÐÓ‹—;¶[ŽSÕ£›úi©;wöGºD|9»3Í^üµ2'³ ÿ–Ëbžufž\™$¶p{Î8j/->k ŸÜ¨öbÌ[fe~ýïáåû†¿½ùõǪ Ü=_9?þBê1ùn€δR©8Iòã7Ï–²,¢k@e YW"i]‡–˸©Ü^W줦RëÜó;ÌáÌEIêBU+“ùc?&‹úôöòÛ;HºÙší`¼Êµû´ýÜžŸÎ 9BÁ¸àõÅ˶üÆ¢òñÛJe¬JzàØ“÷^\&ÆK|åkŽT ×ÒÐå´ûZÎ(g¹Ho’®ß2 gr•¬rM»\Û€:¼—;MÕôdyP9Ì^÷WÂßÞléòMPú~¿ÑdK”o“<ë$qÅ”<œÅ2ºÿÁ ’Æϼ*|™ó‘ñ†é´» P…!y–V¥…ÛY­‰ÐD¨´(n/P[”Oÿ:é•IŽgKŽJÛN:4$"€¸O v/t/s/s/ºø^<$"ú^¼ø€{ñè€{ñ°ˆÐ{ñ¸¸{ñàà{ñ¸ôsz/v/x/s/[B' îÅCBãîÅÃ/½ÔCHHê!$:(ïÝPR J£Bº^„c¨Uè aê!YPê!!hê!9@ê!± ‚z¨DüM›Q&ÌOÂBqÛ1jE‚Q *†z¨Äðb¨‡Ê‰ÁQ * }óñÔCÇ×L õ¨‰“åQlïsB©Á·*mhb-*¥*ÛØ#}9ñB–AŠ;÷<-î¬Ï¹ÅDQt4õ•ÜrÔC”­¦¢j9õUÃ(¨‡¨ÚR†zˆ¾?Γzˆ Kò"9õáQ*”zˆ@$$[ý¬’9±Ùê²8 ©‡Î"R²â!ŠyÏ<™J=$AžL÷ÒÅáÔC2²Å+Ì=9ë¼í‹zwVèq¨‡ˆ¤—zˆ¬ËŠ©‡ÈDA©‡ˆP‘½Võ‘ bê!²7*¦Ú) Zn·³­ëV×xv}Ú¦ao¿ûçrˆŠˆ„ሊÈÞ°ˆ¨ˆHP!QQy9×r_ƒŽ¨ˆ -¢"i˜½ ‰ºkoüÙRú†Ì[½ñc*™UR¤^™,æÓô™‹ ËžC…Ö¸Ãç“íy>[ Rb;:×.¯…œw,ðØž%ä猡 ïC=©eœ9”qW|bö…-‘‹¸ÿªÑúƒj½L™ú’Þ0ˆ@¶gÏœÉOG]DÖó—6ùžg<„ŸR¶Œ¤Sþ)I Õ±=R§ò‰¿C†ÿ…)ãgêŽ7vB©;=µ!9°¥Þ®Ô•;¹RWª©›ºÞ1¦šiØÍÔÇm­ß2LÍìLÛ#ÃnLƒ=¸ŽÛ$¥Iô¨úŽé„;¢ŒŽí'“¥ LhŽLª”Da%æ)Ã$JòI•›®¬ÿ0ÌRåÃY*"ëÌp?YU#m›Ïêá,ÏVØ›¤ôV²Ì±´Ùå¸*Û?Ò‚œ,ïMä·Ši™¾VBµU‘°ý<[U5à wÖú>Ó%t€ ÷™VÏèvöV0&­Ú"Ý:ضÝpXu‰g®Ÿ±÷œT¥þ±táP3@'Œ™‚¤§B‰YúuÕ"/Wóù¥ÏÜáûj…ÃŒ¥ÄÌÎV<°‘,\/ŽéüÊ6:¼Ë“<Çã¶áCþ$ç¸ËQ¹õx´—›¤ÖØhòdä*MæÑ^>sHs)½G¿±/• ®F’|)ŒÕ"ÇóV;µSqNNEV<öâ³­¢26k㢲«MÙUw}Š=IÎ9{’o2µ*ÉJȘÅ+Œ>¶ºÅmur“Õµ:™‹$‘¨ac‡©¹‘.ëqJË—¢à+ç3W6£/SÞQ×›ÕsºžÓõœ>·}ð…,ÆÅ—·LÞ婨äɃ“—‘²zdýPÀØL¶„}»± ¾#á‡'5v-ûêü"áj”å¦ÇWq%¶ªª0I»K&!Aº¦ywÈý~œ±to,Nj¸´ÓÁLò¬´È™;Ld@&åùMŸÏ#B•ïò†ëM»ñÅŽÆ7Í»µÎgÄ|ä"ß‘Ž2\Ǩ®Üø ×-¼ÔyYËœV™V·Õ£š[i¬C-Cïkº=éifÛ™h£ÖÈМ֤m}}l·Æå»˜`‰T!ŒÊð×ßDG×ɆpãÄd;/‡0¨šœ¿ ‚lb1†Ù¾_@ìï"–w‰¼]À{yBz~Àž˜9ZS)ðöœb îêñ¤7—f¨–þ¯*¹£þä–ŒXÈ)Ç©é~]ý°Õá'¥aÐ/¦GÖqØÎKÞ o½Ä‹™Ë1›U™ÎÄxì½´s-¹6ßNgãi¿Iiäl™ÒN}–wÈ ÝDÑ2ÔìÉ„û¤¯c2 Uâ¡¯ÑøÍ¯Ÿ’ëñ¿7ÆA$]ú­s¿[8ûBªð±ïߺNšê¯Mݹóú9Ÿe3­¦Ùë2ä/™e¹q§‘–Rì¤bíµlÃü؆Ւ9x޽xÍqÐlÎý±=¿ñÈ«¢Ñ¬èý'l÷ÁßV[óðõÕڼâ6èê1ÜÄÿ}õ<àt¥^°³Å%}(ƒ “f¿Z>ÿ6¡I¼s‚QòÜ,.Y«RÞÿZR=£iRÿZñ·ó››.nú;s=-G?àNW(KÅÕçüMê›9Y3Ï"÷вDedMòVÉÇËO‡b&¬HTqßfxâ Ì>I]‹}Ñ~úrš¶û˜¦å(ˆ8õÔÁ9_JÚZ òak" «"8…Ä…ìeR›ç®™ûZ›û3µºVl¬ÖOÌS%÷ kÐgiš÷ð/RgPž(äo³¤ÎCœ„mêÂ%Ní‹—3v[qU£·]Š{v6˜U‚BÉQ$Ð6¹Ô©šf/þZÙî@¢6ª\ÛefÄõ˜é`Ž’çŒ£Æ³G#¼7ÆóUÈjÄ»¦Õé¶›Üê¬FîÜ!—í¢kÅ­\<{c¸ŒYMd êèr_#e 0,]—ü ™ŽD’Û?û2\yîW™B8W[žS^§B©S²ږzVÛ‡gÁˆ8Ú’¤Ã„3-¹„(Ù|B\`æÍ$o§Ö;³zgvJÞ=³‰•ùó›júÚ—¯}ù­s~®+uéϳ§ÿÛàb÷öb~^«hrÍÙšŸÌäî—‘m€À7«”õX×^g"]ÚŽ”ã¢zx:¹ñ›fKI–_ÓˆGXzÖR%¬ëˆ?Äeçvô¥:ìéëÉ:11¤[;–Ì–kÙ±a}XÖÛÍz»y”íæ :ã˶G•ÏVV¯l©¹ÛI¢`åÔRDâuFÍßö ¾MÎÔ~rr/S}zσßR}z·Ã&3aó¹DìÕ""ž\ä€)(+.ëH©"‹®ohëÄ/kø|.ñAL®$W Æ+^.‚¸Íl¶t‰ÑÊ-ïã%1­¿á¨7œ— Ð ÑQ=ŸÓŸÏ<¨l“4“« •e®ü7¬Å{rë…O`áÛrí´*²A×§¾Í"C£iWy‚™­ÂHiÁ/¥nQÐÒùá´sTÝIÔ¸›ü‚^òUã–­¹ÔG²F5¥„ûèDÊíJët&Ö¨;¶¦ã+U¹±C%\Ç rºšÏÆ™4è# )ï£ú›óå±åB`òmr6ÔOÁŠ=cȱŸÒ™bÂeþ8““Ëa²ueN¶6¬èÊÄwBÅó#6îeázîbµPRÚ“8Á]`ö\gϯ‰¸Œ2O·rPfßì[ú³ÒHÜÓ°ùö“=…™pö‚ M‚tMóÂêNÖTƒÌ¿‡Ü͇J†þŒüÔ( \n¼Å½–==5Ú?»…w{zW·ú=XS÷=}@¬ò†-/L›”Ovx«|Œ&ïÙ/ŽØÔOo>þCùœw¢”’¬J‹•Àùkå„LS•ѽݸ¡³O*)ýäµòï›?8éÿòÙø¶¾ˆx_,íèf 4ýeÔ´“>jþÜ ·Š9šüѰ¹°]¯q¿˜ ÝܯwÒ“Rï £Û6õGáN0áêܹsæê@u½©¯^¨ò(?¨?‡Ò}HÞ@ðƒî 59äW›öÒ ›é…IÍÍpq~›'£Õ|¼'l>ó±ãÎw’ «ÿt>3íRß\þ|8S÷+û„ËM>zÇIWâO–é'›¬Õ8iUý­@‰>lÞW͵‘}ñ1Ï)­¦HÏ5vë/í8ïó\àï¯×{´¢oz»g´NZ+ õaÞ­ñÝÊ>¬Û*QV›ˆÔ†ýߨÄ6cŸZtZV·ÝjYhùÙût¾QSSg"µµ{]„ݲR¯Ó¿(Ÿ7äU×Êë¿)ß¹‘³xýðŠ+û«ò*wá/iüÙzµþž?¸?zP$5IÇO:8÷ýåðÎŽùû¹è{C×3‡“5OùV³$šëݱÞÉ®§(…•f”ñD–aZ/T0«lr¸"BM4è°^ ¼t¼ s«ÂšAÊ#ǹk”˜c{ȉÿhûv÷•ò¸n*Œ\ÔI-æü«½ÔÚK•îíuØ€\`rÆÊ F*Ê VkÎaÍIF­És¿Âª5Çá…’îXK„×[ÔJÛ}-+£åW)Ñ&®ßÔ9Œh¨ àó ³À8ŽÈ gîr-çºZ7¾èÛÕÄÝ˦äáБéþ6•Púš¡cãÍIn¼—¬ëµÖퟟPv¬3W;1«×>9ä©çÓbÑ.±nìêwúzËxÚ¬îñ]<]g†9s'r†ö|ªgAûËŒv«°Ž=õã:PÒöÎ’g˜Z4ÂæÞ¸“‰ã ã(9qOðBHZDw±t‚Ð÷xWÌx)o(ò"hJô´‰q5rR§“7NfÕļp‚™“r,PâÒÏ|%¬?LJëIQ—v~ñb ãóÁ £¡ -“Ñ rxãØ‡Zm³Ôò!¼ .Z ‰€-†Ä¡CKP¨äÅbÒ‹!QÂÖ„cÉžÇ÷„x1$^¸RZ¤/Z ‰FÆC ,ä—ET#Ô€5)âšZ‘€ÅbŠ„*†^T1d)1ÈbH1…¡o¾@1äÑ5Z )hâdyÈbHaüªmè1Š!K6öˆFŸÞð˲ RÜ9bÈRc-oŠSÊKKACÒH;Z1$UgŠ!©D‹!iCR½Va1$ @1$ÕŠ!iDUô> bHaÈbH: +ÑÁÂbÈÒr®å¾a1dù¶`Š!ËK䙓¨Ëƒt "iæt‚‘Yn$‚×r'Ÿ4ôõEí¹’C-SâÅ'oáÅçdÞÐÅt¢ÁÅd"QŤRŠ!éå C’6d]‰¤õ¹¯”ÇuSÁÅtN*¨²öRk/U®·wÔaƒCJ?D1$tD1d­9$Åôš/†ü¶·¨R¯eåc”-† ,†<åM=¼’Ъ`>ß0 ¸’&B] YºUëjÝxH1ä7ª‰Åå…ªÒht¿l1dy]¯µŽ¤òœÕN¸ò”µ‹¼¯,pFGïw:†ùì:=¨¼ç÷%²oñˆth4H-&öÇ`ºg˜]ÏVTÞ‚ÆÄ€Q’ª.',ª5AàærN [Ë«ý€dÔ;{îNÈÛš¡Çû‹C¯"!yýØ$«™"nî0œ;÷ê-JÐÈ]8ì^ÒÒKã–3%%MHrèá%ö¹]ÍÓ«LÕ£¾á‰nü¸¸m•¤Í—Äc&‹mÚhú _×FÖóQY÷ˆùd{žÙY©ýÙúœ×B^ xNä„ü,++ñâ½c¨'µŒ3÷3îŠO̾°%rÖ”«¿(ð"8ÇÞÚx2Q„ƒ@k3E*×ñBD*×E¥W® MJD¡š°)á¹™p‹R¶Púš»‡Å¼gc”/d¨g¯Z…2jÈtîú{’¼–6ö$ßd•ÁU8’•8quj} l/t3ê–êä&«ku2InZ5ÂÇSs#]Öã”–/ TZ^Îg®lF_¦|®7«çt=§ë9}nûà Y.Œ?ŠII'ïòcòäÁIvÊHY=²›|¨F`l &[Â¾ÝØß‘ðȈ“õ8–}u~Œp5ÊÊbBêÄVU&iw)Ã$$H×4ï¹[zA2Rj¸´ÓÁ|zõ:•”ç yŸÏ#B•ïòF|™\#¹Mî®E­óñéG.òé(ÃuŒZàÊß B#øRçuf-sZeZ]£M5·Ò.îš=}Å«c9c' ØxôºŽçÜ‘™—©ÜÉà*)’ÑRžBõ³„/ªÙ:g~é¬Å›ý2AÝÏF0x<óe…Ì/ë™5UPHûRÐü¨5ö h­²ETvÅmSžôè±h.ì ¦]åQf¶ÌÆ+ÃüRò ‹ œ ;Gå\ ›™½Ná%¯°-lH”í€K}ä[Ó¬îÏ(’®V»ßŽÑ»–¥©JalYq»îƒ”eq!»x27ÈÐýc×”>à;àl2È—A ŽžåäøKà,1à*˜¹FÒÃ0׺"çÚ›%¯™dû(”aJØ zgº¡°)ôhžÙ¢tN+od2Ñ€À™sbÙ!7s”€¡¯’hå§RÅ]à—|O+°6 ×Ûº¡ªý.[S·=½C¬ô/X›¤K3¼•>GöGü‹6õòõç_¥«¼W$ ¤Ä¾KŸ“ƒsi쥀Rˆé?¶ýá“þ—+í?uu‘¾˜›Ñt 5ýyÔ4“>j¾‹Â•l„&y4lŽÌ ÓsýÆÃÌì˜7‡Tûúº®õ…£ À¿Ë.ö^]y ;ÞØ—Oe|!o€À‹¾ìúÊðCÿ‚}±!u|‡SÓ³éu‘< 'øÛ«¹k>Œ|ÿV¢½Oo³nð·ÉaÅS:D^¨ÌPDr-›øYäÉÝ-ùf;OÉ©ô«ãÙg«´"rÒÎd)”³Ö }oH8¤ ÄO}ñGøënÇPÛxœÚºÞïvñŒ6þî-AxCæ’ã™S¯åÅæq1tCëvú*› m{º«ÓÔ]V!ñP Ð×…xý’FR4uÂíF‚OzQ«03/±ª±ÝL8{€…·ûFW{~ö OÏóìÞ뫽NOoaß¡§wZ!³aµ¬½Ådæ”Ñ5HÿpÊJÝKÿv ]ÑTá©F7ÒÙߥŸœÍξŸõ;H'k7}4¡ñ¥u²xEžß¾ÿß'<‰OÆ`èúþ|xgR=Ò‚{A&\ ÏÄ "„Ic²µ,‰æxw¸“2ŠÈRXiü‰2¦)+å³àúá-zØKjÁ zÇ²Ï ª÷+ þ&ÿè¯èaË“xæfOš›-3G¶^I0#ypc×Of×¾Ÿ$ØéÛ¹ÈGs3 ïýÀ&ÿá<:ž§?øÙ ëuLÇt 0L€“+ãß<_n‚3±Wš½Û‚²³=(ë¾ ;8dáFi¶€¸(ÂΚ낯LïÂÓpÀUuì'¼Íí$× h˜ª"`s§Žm#o˜”Z†Æ¦Ñ™Íñ ä{¤+&ÏCqøÄâ§æ1¡4‚—é 3‚SàNž¡`‚ÒsH\ø¹‘·º°þpøß€·™›ŒJío•h™ˆN Ã)2m­¶Y6î°ÞÖÔÛšz[SokêmM½­)´’С`:6ÞÛ¤G¦€ UXÀ€QB5WWî®`Á…›c±l-15ð®àé:6x[3Ôa’ôŠ}oÒrÛP9\FŽÃ7wº‘l¾6$(©t„!Ñh-µ4n9S²({F.è@.Â6'âån僾(^¦>]Uã}5¶˜ð°åÂkL× pÔaôÚ1¯ócsÌœ+!Ž8¼¾±oR߉-…h¿ËÎx¯Àkà-‰À#G6Âð2ÍNHúãAR0.ô–-AÙ.ñ÷•Q.>lZš¾/À¥ xZYÐf<'c½£QÂ;1ɤ$xìØ´"¸ÞÑl£Ý+Öhd*F§ÓQzýVWé™F·¯«ZKï÷ÒRá !M g ˆÀ#‘/ûq޼×ÿTÒL%?€¶z|…q ‰ˆv%?³Ã¢Ø[­œÒ/\Ë%…§pí³rOŠ®-$˜³p­˜ã`hiUU–Eá¥Nî…k ü¾Pg¡Ym`u´vW骑b誩ôû†®èêXÕŽeuõ®|蓱¿.ðK°L°Ü˜n×P ¶¦n{º’¤‹®¡©ºÖo³6uóÓ"’.°¬v“âIö’ €=ÛbÔÂÙ,¸ìÙ,h¬Ù X|ÙŒ€|Ù¼ ÌÙ|À<ÙœÈLÙ<˜ÌÙ\ <Ù€Á•9Û‚”7Û‚Ÿ5,‰sƒÓ‹•.l‚ÍŽçö‘…íȆu¯ÃLSËƺƒ…¥ à¶‹½…r õDFlõ™¶ëxè‘–¸£ªÀ½ÎFÉRúÎ!³ìNùÁÃ{gFïï5èWÈ¢zÁ"=qÂ;à÷ûëp­|<”×¥dí-u½œî2x ˆ hEŠHJÂäAŒ"¾‹'ó$=H¦J3óÛç8HhÀÚê_Ei“góJ…Á+ |ó—\^ŸVäYkf.°L„‰åQ¬¦_ˆ³ŸUÛÐÇr@­d‡^ ƒõôh[1ng½±1¾µ¦B{@£oøEY!îÜÊ~ÉÄî\°çJ äX‹›bÉ4ÛWB •DƒBÅ´mâ Ÿ zjO8GåFnNtF \á5çs‡çù;‰]ª¬·èas#È A°|ÿÖA)}·2v\”´‡¦%5“ÃfÊò=ÜË›×0޼pêŒ#%Û*šIù˜³ª€Ç¶Äsì9"svFÃìšÍå,QY­YqT`òöJ¸áÙ÷k¹yMlÚõã›üûz}‹}-Ÿâ³Å,}(·á“hšêú·ù ÃäÁ;Œ’ç&(º– Yh?܈íæõ² Ë.oú3q<Å7ãhªÐ©±¿@(H›2‡#'Y´H[Éð]¸„Õeù§ǹd™9­Ft6|t¡“…Ë\õüc®lzdú·^R3·ØËkâö 5.Ä)©@OO_îØ®8NU.k5–#18ÂÐ*àËÙ)æìë|ˆÌ*ü[" {Ö™yr¾~xýÛÛª ÜéÆè]àτޙoNôý„Æ•JåN”!Ÿ¾y¶”e ª•5dQ‰¤E}Î\ÆM%öºb'5•ZßìßÍΛð$Ž¡ª•KŒ|˧9—çõ­Ò±oÍ.ÊnͶ ké½^»¿–ÅÆ°‘ÛüË 6r ‚âßí{5½·~½”m,*¿•¸Æª¤È´?z4g, b$^s„J¸†.¦Ý7bF9 LzÄs}È4Œ=¬«¸èd5Ø»” ±¶ë&_ì4•ÓkæAæ0{Ýß|›¼½ÑRÅ› ôý>…N”n“8ë$pÅ<Ðl=¼q‚}¬P3¯ _æxGÄZRVb ¤dž¥UiáªGVk"kTÔ ZW¨Jáˆ{Î+“϶ph0H@6€¯ã!*ß6ʘƒ>"™°w™H+8à 0Jr2åy€Â½4츹tOÀÖ¢VîvÔ;Óulð¶f¨C’‡Âbß›”‡`?o$¡n±3ºàæC!â1µ!A#g†ð#„“¡¥–Æ-gJJšÙÁsdQÍyŒ{ ½ko|iÉ}Cì­N}Ê+'ë%ñ°ÉÂ1LŸ Ôaæ¥ÒaCeMxgŸO¦çùx1HYîà\»¼²ÀÃ{–Ü3fì*¤w4ùY-ãØ¡¤]q‰í ^"g4/ Uíý µ^¦´}Ioh@ 3Ó3'È~—öÁÂzþÂ&ßzÄCx™Rg$ò‡ )WÄöHÀ'þþ=VÆOhŒäY(ºÓ“‚¶äÛÁ÷k9vìkyp-kÖXõPO·íŽbtÇ]e¤Ù}¥ß7ôî¨?jiȺ–´MBš* ï°°I”ѱí a¢” ÍQ… •²‘5L Ä<˜@)ŒäRå¦+î?š©òÇY*NÖ±á~dµªFÚ*¹Õ÷£¼[Áo’r]‰2ÇÂf +áUÙþîäd±‘`ÁœüV1-Ó×Jx·*¶t«ª¬ëƒÁú`°>¬ëƒAÐ£šŒ‚«>§ùÌvŽì:‘¬crêSÈ£8…*A°&à‘·½°²E<ñž"3ˆªZSdÇ. >TcÂiº—NôpŽ·è[T•Ð<§á!dV×Á9Oô—À´Ð­;Åž¢ЄGGñ+Te«Ò“³j¾Á—|RýÙ#s×qïjð¢³Â¤Y@øæÒ]ÛŒ¶Ñé»juí‘,þÑf1Ð^ ˆ”Ý="âéÝé ƒÕëM‡Æj5 %²Y 0‰•wj«-¼¶ 1iŠíK¶Ì,´õÄ,#Ü«vJV¿Œ¼D#°¤¯~É€üHõF¨qèbáQa50å®#Ê-œ#ïõÅ?•_ŽåÂø#Z¼Åþ%OÅþ]œ©áÜL3‰³R"ä",2“²^Iàê8N¨ò]Þp¼q`6îÍÈš6ïZÐ:ŸˆùLDþ:Êì:-0vè;±”[x©ó:³–9­2Ú]M…š[iz‘¡öZÊxŒGÔè Ké#/:¨Õí·õ±ÝéËw1À)³0*³¿þöUÂ¥£An,¡A59_ lbaÛ¾÷LìïE,Ëfš0L£×E6ÈŸ#/œ:ãHÉRÌ$cíL´a~lC<Ç2ggDͦë[¦;õȨ¢Ö¬èým¼û o«Äž}¿–›×Ä]?7‘_¯8]˧øãlqIÊÁ„É“ÀçëßæOh’ïP0Jž›Ðt…,äýo£g4Mòט¼ß\vqÓœ‰ã)9êüqºBQ*.¯ó7ɯ]2$¢fžEîE‰ÊȚĭ ‚¯—Ÿ2Q&±"³á¡+ž,XXŽTˆIÉÏç¶’än{¾ÝjkzçiKJZ“Í ,ZÎÁ)T\ÈV!¹i£»fîkÅõ'ru­XZ­wØS%÷ kЕ){íI)äd·# YãmÔy7a˼pYXK§/gìV<âªFo5Wà@ A¾G Pb‰i›\jÈdE1g_çC¼;¨2‘·@™q¶‰•9÷ËÔúÚ±¯û•KrÉ+téÏS§ÿÛ bæÌ=®U4©Js´æ'³¹b3¢ s™•Ò¢“¼ÓÚ&Â¥mˆ?Þ—'—¾i¶”dÁ6 :°*aql+Ôa\vbG_ªÃž¾ž(1LׇÏÙs_­slˆ;Äòyî ³Po7ëíf½Ý|Y~Ùö¨²ñY ñ-5Wª$ bT@€"¯3r¾ôƒ|ž\°½sp/S~ZôAŒo)?-ô° SX~.‘·ÎCOª:ðä”—u¤P‘ûj9è*°ÀÇÊ WÇr>Èx#ÈUb#/w‚¸J$l´T§•+„Þ‡‹hZüÀ§Þì¹ ÕãéqfNòã™ÇÒÜÍäjŽÊ2WþÖâ-ŒõÂW`á[qí”*BCÏ}›†Ó®ò(3[fc¨e~)y…·£òFÖÆÍá§ð’Wx7PnuÀ¥>27Ê)?ÜgI׌ÔWJÛhÚÃî¯eij†R[†Ç®û e„0ÈnÀŸ0¤$òtÿØò%;0ø69› òeƒ£gT9*T8S ¸Ìf²¦¹L¶®ÈÉöfI‘.Ù> %Ïðü¹CÒÌñœY<“RÇu¢³ç·D\Æ ™ç^6ƒÞ™n(l =Z^ü é/Ûïh†¦÷ž6§€°ˆ›òŒFÞH¡ª3“q±Cn&GC_cBÕJ—ïâ¿ÞýKCïwûOÉ÷´ûg³ðžÑ¢»¹L^XøwÙEwÈ•²ã}ùT&×µòl¤eןPrWú¼vñO½h85=›Þȳp‚¿½š»æÃÈ÷o¥È o%zEqƒ¿Mv—»˜óÜœ›¨9åÉÝ-ùf;Eå©ô«ãÙg«Œ’rÒÎÄtÉY놾7$ Òâ§¾ø#üµÞÑô¾¡jz«¯õÔv¯Ý2ðwo –ìïVÚ ¤d‘Ò6‡R€¾Æ(Äs@=HÑÔ %Ú$)kÓbÛ0éíNWë°iÔ¶§wh‡ô{xÒK—dh>GöGü‹‚…wÓšÞemêæ§/_þUº*×™7Ò,ÿðIÿË•öŸº:£j:7£é@júó¨i&}Ô |…+97MòhØœ™Ž×x˜¹M5¶›,Õèöû=æÚøt桇—év·§©LMÝútzÔÔ]z÷9Ñ;iìÛíÉ>éEõnd†h˜N‹Dÿ¶jŸø!…Ô¾–Žÿ¯U/NB§­z½e5êõ°ÑÄ_ô`Æ%ˆ=/7©AP’%ôÍ!>®ñøÖ½¾Úëô°Ž´žÞiaomûˆù®«Ð.Ý;b[ß¿v_×Ù&ζ§7Z=²3“èhe&ð'>u9•¶¾nsí_I;æÌ[ò=}ñþõKŸÞž¿¾ØnHyþðIOýù%=1ëÜAöœ{¦i’DŸ²°™ ûL%OÄÞÖãOð¸&ŒñF*û’þ¸3 oùÖ’R„…Vœ=äLÕ¾òÒ±¸K’­°(ÒݶôÙÏœâ©kI’.alãT¢ø?¯¤«_ÎÏ%Mk´*ýNíj}é§OX§þaFéJëÕôÓ“–§CúªD‹¿8ÞsµÁ £Ö4’2‰‘.w…ÑîúÜl°’æ§va)!Š""'žKŽGŒ ûÒB>‘‘XȽŽCöå0ÿÙ#T“@UÕ][zª_‹/>AL×a7=ùKÓç%Ò¨ôw§Ò] {ŽÉ”áóù›©³ÓÌ$evª1o½yÿmh;!q³áÞ`˜šæx¸|‘å_rb˼Õgrà°òZ'9ä“S²þÝ#ÉtI&ÙCaf(ÙoâÓ¡:„hÛaº‡í{ˆx>âDó=MNÜ~þøñ×4ÖFªÖ·GoáºíVOµjw:#Ô¶õvÛ”J¸íö]ü$ðÈt%3˜Ä$'À•™þl5›ûº¢°>ê}ò®©ø&5¥ëÄý5ÂpZg„,††‰K‹¡b»¸K™rb'DþÇҀȦ÷+‰M㤹9[Vü¦À®’®? #3œ ³ÖóP´3rd•ddBvB(ÀÎUÃFw°ç™â|)¬"Öm²,¦¢ÄŒéý{lôîLˆ¯C¯M±;[QLŠÿšS#dŠ2žSäÎY²å2˜Òl¹±–KÃß”BX”{?9Íc“Ò˜pÃ[ˆ¾UßxS}=·H ý’lše–»¶ÞsïÎ#ýbÛMõ6£­uUíé+°ßkwwõæfð‚7Ë;Á’¥‰äæî¾ÿÝ BòÒfà4§=72O¾9„¼´°'ù/ZšÖ6Ng!(p¸ ¤ t´ngµŸ=?rÆPEÑ;)gJvvÃQ<£`HiÖÒ¾ÚÅ…ñCçßÃÓ2¶@ð “r¦C8Á´Ðõ@ê¨p¸þ<ëi0PRHy˜öEj}w×]áBOb4àÜ“KAÜþõ‚ÿ—|Oo\„IŽÓ y„D3bè‘eÁExÛ[–åæé<£u•+b‹MïH¿—ĕtè„a+xWª‹ró†Ç=Š9ø ³ â9ÐÒ^öHús¬Vj¯çA©yðñuMñÖÌÿöÀ´h¼ðµ€†Ç iE•áZÑÓå¡KÓœ‰ã)ôé¤þJ= JÏ‚ó)²n)©úæ„Q’:ìß:„l…ýs³Æ ›ºZ°k«ÍÎÞÙÿ»œŠ—Þ~¾|ýóû~þ‡ôþãùë÷ÒùÇÞž_þóãéÝÇOÒïŸß~Â[4¼ìtñ©02ÿõö\¢ô&áTR,éYS_úÿR¤¿ýM ]„æ’z"FâOR<#:ÐíY³[<2’2—äÿ‘h[6ç¥ã-”ô?òòù½/³yðß­Ûi·´NWë6z}£ßoЦõ»J«Ýjwºm]ïhíN_ÃH‹(pŸŸ kÔ+Ay6s ¡ŸI)•({Á2Ý:}«fÖÐ=‰ÉÌš|ñû%ÃØÒà¡ìhv?è~½ûª’¯õQÔmÇséò#¬~4)s™éº¡ßwmæZSܵRü¾¿B="Þ®m¦zïÌ$e,)ôÀÿ]¢Õ¤½Øu¥Ößÿ¦¼‰—wî Ç~ìÙi[X‡±$šãÝ¥ìk0Îxb>‡f0ñî—=x‹H‘W2Ö§p )¹Ù0Üi2°eIŒ*lk-×!L¹Â€Á»×ZV¯;ȵ‡!"«¸ÐlÍ è©cÛÈR !l«‰!Etf$Å÷ðVç1qU>É,€E¿u¨-”?﫰LJÒPØÖºæ¹btÎc)—(+¥ƒ–61xIñ“ƒØ! Œu÷~`£ãFCJ#¢è×™6 €§;™â©.¢Þ™®CJáÐE ¸Å÷¦Áx@+ÃeAøæ©kŠqÛ ‘3CøRe¨¥–Æ]ô(4@ ­b…ç{þ!ÂKÄ3ÃóÓsºn —¥eåßþK»¿üׇÞZ‘5ûð/ërz‹fß.>ýâŸÉ ‚ÏÚ%ÀK› ßϸ…fVK0ŒÌÙ|sõNíO ×H#²I›ý.sO¯À áæJóf};^¦Uù’¾ùC $Œ=ÐW‰„¸¦®ÉhøÅ©8ãÁšú¾"NÐ[à7b46Èñï=|Bc /Yóĉ«dôC>ø~-ÇŽ}-®eµÕo·t½§˜†m+Fg¤*#Ô6«×ow{=Õêt:×òBœ–”÷iª×¿´È¨€Ðæt]"òÇ9ò^_üSIË€úCœä…¥«JR]W„˜hkQn¼¬ƒŠƒæ@w£Â“‰êN'Z3»GæJí`”1–žùJá6x9™Æmb½L‹ÐÁ Å ‘ɲNý³>Û€ç´T±V!ç_Öåšz]ÓVZVG1zš¦˜¶ª+m³ßj[H#EŸ×5+þýqn~‘|à³–c ?'›fÄvÈ@±^Á®M+ß9Þ칓^ù"/$EóˆúrZüÄ?45Ci„'%wW’#rCž á•ø/f$xþÐõ'ÉÕó‰tΑåà­M÷hMá€Ö¬+<Ï;±ŒÃªèjp{Z7ôµE3}¶ilÅ*ÚÖªŠã*@¼Iücã6̶ÖëwÖJ<wÜŒ&2Þ±¸ÄÆ;öj¯Ûo«]üB}]iµ{mµ­w Mow:­½ñŽ»~&¬Q¯@Eü<Ù)¾Â(Ç=̪[&ÀÑî©nh´&w+ÄjÈÔ«ÂÅ6îi½àÎo¾`ŸÝkhÜ7ÜE#w¼ DãN0îÄýhì!ˆ{±xC™yCù@9By€çȳ‡xƒ0…¤<²ûŒÉ0ÉÊ0É ® “| Ž£¡ï QøðÐÙhìxØÜ-x{̇ýkßÚAJ`½BäBÝ~.Øt–Ãx0a ¤˜*»ØOb?|ŽP`ì)—v~ý€d{'Éý²†²CsYs@3Y³# ².†ÏdÍŽžZ9HD®kvÜõk@ð &h˜žÝâÂϼՅ„åˆÈfGå‰Èæ@åŽÈæÀÐ ¼ÙìÐÙMܰv°jë…8XI5‹aèØØËJ·‘€S& ,`Àˆ ÈÄÞ(5fÕô 6‡Ú„»2°ÆåîKàݬeå×±€AæÖÀ»3²éy~df€£ž'1ofÅ,”¬^2Ç:›†‡âÇUéÔi)LfÇî”'«‡k €Ýbۅ™§ñX¬:g:cIý;Ĺ\㎙o`o愜à—ÊSd—™£ø|–Á¹‰ü‚ùó‹_ÌIä•xÄÓwsbG‡š[i[úH3FݶÒCãîâ~W;ºÒÖŒ¾ÑUdµÊw1Àò$‡õ0Ìp¹ŒÂ¿.ðK¾§Ù“Ù>ù.C‰ Þ·,™ÌÖÑ Çè¿#‰1®ï߯s)¦3{e§w¯!©…B¾o<˜3·ñ¥%™!ͳ9Ö„!A¥žGªÜÅ3O•k© MÓµN[S4½«â·êtº$²‹_~oe€Ý¿>+ݤ­=Ýkëªñtl·¼{VÝFÔê&É.ñÏ(«nW3¡²ê,õß3ÇÄëwVÝn•äKªc}S°¤º=Û ÓD|Jݳ{«‚uûƺpBÝö)P· Œ?¡n/GBÝ>,î„:@î„:.Pž„:`΄:dÖ„:fLž„:vP΄:^`peàI¨ã-PÇŒÏïÍŠ¹áœ.™VƒÂóçßÔR¿Š\_‚M™•Cºó¤Ä6ÙBÂ3ß.ñ î8D“±ì„€¸œ{O‡x{¶?3ü%K£º‡OD`•ƶıò:-ƒËº9…ïfòóâ½ù·È ЃîÃãèR°\`‡Ðx?$ +@§õÌ–7-VõÄήâÑdE¦…¥„õ?º †Ó®˜/Ë—QÏ Ì“Qό͑QόɟQÏ ÍŸQÏÍšQÏŒX0£¾>sF=3:sF=;"_F=3n‘ŒzfpÎŒzf\ø¹Á—QÏË“QόʕQÏŽÊŸQÏŽ- ¸3ꙡ‹dÔ×',õ K}ÂRŸ°Ô',õ K}ÂRŸ°Ô',õ ˳:a)F©ÃÏH©ÃÈG©ÃŒËE©ÃŒÊE©ÃŒÊE©ÃÊA©ÃŒÍN©ÃÉI©Ã‡ÍH©ÃÊN©Ã‚[Î(•4!¼”:{ñ)u˜Ž*ÝJ ÚBÃo¹H]¸6¸I&¢(¡}ۜޒ?(™Wµ1Ó­Y4% ®]JV+ŸÖØ–&?«u›‘h¦õ'ÔÉG4Ã3HÀÇB9E+žp_P CÂ}!gŸ#á¾°ÇÏG ST s²})”yf§é;¤x…©pá&À£ è+ÜGÙñ+p77¶ u·,~ênÅù]¸ŽéAn`¯ ¹¨¸/ V›»-(ü4±ÙTûíŽa¨JÏÖb˜ÈPúÃV̮ڵ5ÍR;š^~ü&ðµ ìMž ÃVÑ×'7·^ÂNU]“@¾¼˜SSðÓÒguy<3PÜ5Ç‘L@±×üp±µ¶¡tÚªÑíP‚ `>‚ .dF‚ vL‚ P>‚ n`peà ˜àå'˜`ÇgOàÚÒ‚]ÜòIÝ»‘îæ ¥°^ªòŸ,ÐL­7›øœ‡‹ûÞògsß#qw|—Pò ܯ@‰ž‰þxc0‹2pÃîýàVmÊöÍ ¤ÌpÉl®LÎ8>%•Z`û ;Þid€UŠ?BìEmzx Î*À©ÙVLRe¶­DñX~ öÜŽ¼Áœ .fn`fvlvfvLnfvhnfhFfvÄbÌÅðY™ ØÑY™ 8¹˜ Øq 0°ƒó1°ãÂÏ .fXfvTfTnflÀËLÀ]€™ Þš×[ózk^oÍë­y½5¯·æõÖümÍ ¥´sÀ³¥´sr¥´³ãò¤´³£ò¤´³£ò¤´ó£²§´³c3§´óAò¥´sb³¥´s‚2§´3á–3J%MgJû~<¶”v¶‡J÷`b÷^ð{.žÌv>‡¨ºÌvÀvf¶W¾|3f¶ëÊ'Š!ôÉA¶çØ–x;¥u\ô\¿G®þ(®0½AQ)ûé Ší¯Øé áóÓÃJoPN@iz¡âÙè 7AЛá·x2\¥nì^å*ž.u•Ë}\ïD)"8*å,·ý¬«½nïiK¶³r<#ïnÎfÜý¦>w–¹{`¸··ó¿,næ~Û™?ôÝÌ\¢`†ôæÝ‹³ƒofþ赡ðSæ>jµG¦ÑWt­ÝW uÜWzF¬è#Ë2µ¶ÞQ{Ýòã0yX¯ Ø{àê‰]÷•SY5|cÆzµÄü²ßÁLI¡«$> "íCÙ«#fAåMîM)„âò ü’ïé„¿ è [À9 VúÍœKo½;'ð½v–7ñð¾eI€^·`׳w$UÙõýÛx.Å4‡y¯ìôf I^ÿ;rÃsšRyŽ‚ˆ¾AÞ¤µMrÇܰð7ùGE[ž¼ÅߤOš›-3÷™Æ|HÀƆ/Ÿ´m}’`§oä e¯äã?œG«þçyúƒŸÍб^ÇtüH§àÜÂ{FË… ræ²ÆòÏÞÌÝÚtG•_Y³þлà*2¸}py”r%uv¹ÚbGd>¼3éY YïKŽ*ïÁ~4Ú"W`¹ŸØæÄÈ?”§ýÍ]ÿaÕÙÞAÇu@O»ß~Qžv´I;Å " c— EV\â ÃZ µej»«hm]S4­×oëz_×5Ýh÷÷†íþù™°F½”kþó! cVÝ2„aúÿ²÷®Ím#ÉšðÙ/D̸½+/àe×ëqÛžŽqÛ Û=< P¤Ð6JÖ(¸¿ý­Â…%^* Y )ÃsÎŒ-‘OÖ%++++ó©? sª·,Â°Ý " _¥Â„a{Z/y0ä›/°£ë–(aؾé% ;‘pÙ‘;ðÜÄe|n<€¸ t™ÅËU½‘—9‰Ëø1ÄePq]Äe0P8q?>utÛ®cÛul»Žm×±íçÛFÛ@@¤O``é?6?é?&˜ô‰Lú€æ$}âG#}Ãç%}§¡a"‚HŸøqHŸøÁa¤OàT°% ”ô ž<ª5õ±¦>ÖÔÇšã?Öœ+ÿcëÇÍ^×ÄÆ¤ï}k æd uzÆDw»c­Õ±œ–ÕÒí>Qþ¶è7ËU:I¹$ã•îjΆb(Š“pc-Tvÿ@bÁð–©ŸÅl'vÑÖ.ñrJ…%´t°ßjµÛ-3«séпº½g:ý.Géய¿’Ö¨—(“r`-<¢ÒA^Õ-S:8_|³7*Ü¥°ÒAáUZ¢tpgë%†|ó…U:xdÝ/Ü=ÝÂ¥ƒ»<ŽÒÁ§îWÍàÞ{1ÁšÁ½¸ šÁýמü5ƒ|1}@Í ·³ ©ÞH‰Ãk!ȼ5ƒüW&€šA~P`Í ] 5ƒ PšAn|@r-ä ÿ(C•.j¢^­‚.?¯L/&º´«žˆaáH{²)}ˆs͉?ó0˜Òwô3±ÏõÉVؑܿšºŽ<ê!Ir!¸}ã²Uö7Šƒðîƒ;KòÊ ì.äµ)rnMg,2ûëÑñË;j„ ŲօìŒÛþ`—^QÌ ë¦wr)¤ãJOþÙ—4UšYß¿,ÂibE»ú_dió›oÝX®gÑS@.ì$nãóÚ>¯MÈQkf!áY†‰“žœàɵŸUÛЇTIxIàÊU›M'ã«N·g¥6ö€F_Rþ„Ë Å[;/YÔ q¯ªå^]?]fièã›T)‰$M³fÎG‹Ð“¸2úEe ›«D]z`éŒùÔµl<94¢»a§Oˆ4’øÛ°köÚyÌÞifËÊbwrWEó5-I­†‹:ÅêzFsº³[`¿Õéš/çìæOC›gê"m‚B8¶«dtu½¢q<¼‚#ª¢þLoG ßý^…°'ÅUé`T‰ší~G•*çRn7Twf¥g¢?,é*h&yWiÚuÊÆ¡ëLÉ0ÉнVq¶T¨li7·˜‘_ÙÚÞ»q£¨Ë½t ÙU íѹ³[ˆdýU°Ö à’ò$ÍqCUºà¥ÜÅ' })oN䪗Ÿ‹dÆ3B•*¶)ÛyÙ^óiAÅI[øôݤgcÞr+óëÎ?ý<úøú×·U¸Ë[wa°·`MäCõÇË’¨RªÇðå'=Ï·²<×H4 ²†,+‘´<õò°n*³×;©™ÔÚK­½Tå€ÞÞA§­è5U>k7UI‰å|ò“l¬8\Zs5§HÈPµædÌ Z*¼>¢VÚnIŽBž•½’ýþ²µ¸èt‹–»;K>Ô“Ù<¾ûÙ ÷¯#:PlÀ§f±WŒ+Uœú„Š¢¥¶jY­Ÿ»µ&rn›’§'JÈ–ªÑýhEìTèæ:±“d]¯µnûú$‹?i™B<[µ[óT´G½>Uí“ãA¢£â¶  É@_^áÇæi„üÒ?4ø¥4çK#üˆb/ˆáó¾4ÂÎûÒôÒ?®ÀK#üà°—Føqñ×è¥,à¥~TÈK#TðK#l ƒ}i„Zà¥ÐÖ+R -†„¡ó–.€½ÔbH1é‚Å0× q.x²çá#!^ —Å] )-R —Ã[ )t€€CŠÇ™á…<%Âé8u1ÇpÃV$ÎbH1ECŠO/¨²”`1¤˜Âà7_ òàšÉ[ )hâdyÀbÈR©*mè!Š!K6ö€F_N¼P†eâÎ CÖIâbHi+†ÜÔüõ Åmßè­žÑ~Üq´ÒÉÍðK'qî+†ÄêÖÞbHAÅX=â(†ÄUQ¸Š!q„‹!ñt0ªD÷C––s)·ˆÅåÛ)†,/ ’fŽ¢.÷Ò%ˆ¤™ã f¹¡^Ê]|ÒЗòæD®šqù¹Hfœ'Ïi›/†б©Î)Ÿ2UÚɯ‚¤¸m‘¦Í—Ä£&‹ÃpÆL ®ZÏLJʛuÏ¿ž,ßb+/µÇsíŠZXxÜ šy‰C=ªmœ:”ÉP|¥ö…n‘³¤8à jKÿk¿Ì¸ÒÑ0@g–oM‰ó.g À‹¼ázþÒßÓ{òèkV¿“Ê¿$I¡¸2ŽGêdˆ¾ð7Èn©2~&ß&‘Ô“žÚNU¯‡÷êÂu.Ôá…Úí·»½¾N´–é8ZÇéµA§kisÜï­®®;æ…ºLÚ$¥Iø¨ÆŽêO5KÛ^¦,K¨ÐB½²T)K—%J,1K”ÂYáZn¹ÒñƒÔº–g5ª¸Ï¡†û¡´¶ië¶÷'?¥=É ne™ci«…·ê¶ìøHw ²ø*qq"¿U,ˬ[iñoE¶WþVÕ€GÕ¼÷Ë㊜?y`që—û½Á Ë[Á¸gp7bñ¯B.w©ŒŒLÎZ°\iÎ3ÓÏÄ{NkŽ2ÿXºpn3€&Œš‚´p£B‰yÒnÕ"ÏžwPwø®ZáœÆQbng+žØ˜„3×Ob:¿Òƒò4±ï°m8À”?JÏ®ÆrTn=ìå*‹31š,o»J“y°Îçi!‡õâWö¥2ÁÕH’/¥‚¹zäOdyeµC)1-e|öFv¥D…ÇÉÌVìG·sî Oláï¬çÅ"Óe¥ xv»Y}¾ªÏWõùª>_!œ¯ŠerõAë9´¬hÅJÞZÑ~O·PÈü©y¯ªãU'¯’*A²&Йwü¨2o1uÓc+Œ«ÙWÄYx$üX OR¡ìEèÆwo?&ß㪄ëê!³º.yÞ‡–MÎn^þô=„&

õÂ,!¼Ú°Ã¶‘çhVd`ÏÙhÖNSo䆭¤¡ËAÆGÅEDÖÀ¬&›)·úiNü×ç¿hïÓÚï T‘eó‡šrõ7ʰFÀ±µ6Mâ$àÚL¡Êu°¡ÊuA)•ë"‹R¨&jJX¶%€Äbƒ˜béÑ–ïµõv¯ß7Aï¦qì [`ù xX6vnnâ-ÀG•0TIý²/öé…ø0¤g—”?Iž”jô)ñ;Yåmâw¾Îëˆ÷»åäVÒ¹l®X|ýkhù‘›½T'7Ý‹«“9KsÓª+ÊÌtYKZ¾,žBôr‚ª[Ñçû†ëOë5]¯ézMË[Ór\®3Y.L0N(L÷EB²{yò¸)yJIYZ„ž*ñ*ƒÉ6›¹^»~LB:#>õøOÎðèÆnØÞ"¢jxmyîÙk7Ù2 _pšÙ²ðƒØÜÉl¶¦©ük¹"X@j4OJ²d 2u¹ÝǶL=rKîAZ (Q„äöOoG ßý.S+4oJˆêmÕ©HªN™í~G•‚)éš"§FRÿ\°ø^ÐLB|i„º-)Ûï9³‘,UVŸr%©¯=ª ²æjL{'K ?'R ![Tº9Þ4 ¿Ö¼`ªV׊ÕT¾s=iCüˆõHž[#97¢È?‘ØDUòµ7ìmñc¼þ¾<…ŒÃÓõâ³g¾enýE*¥ìLlòPáií¢)ßêÉšŸÜhTe ^Ñò¢Û3ÖNéÒ6Ü¢î)@”›ô4ßJòa#™aé XJ•pj ZrÙ™}®{Ö½Úc¯=öÚcWO1 ½Òi)z˜•ÍÏÚ]l©vÊ8\Z#öhD‘'¸*X£ñ:¢cöãGÖ¶~w`vz}®jÇ݇ò8¥ò›¡ˆ{\2(œZ¤oTߤ7LïÜp¯çô˜¸‘Ïß‹yLÖ¸º8^ý]"«QHÄ#fFP–@Iqù@J¹±­# |`_ü†¾âïÑÓ /¥H PMÈ žÊˆó²8‰‹Hmð…R§¥ŸÉ“%ê£Jit ^÷b`-ÞìÅI~.:ÂZ/kÔùFÄ í+DÉXJZµÆ>â ­UVDe×Ü6íшžŠæâ¡ÐÐpÚUae«œ¼2¼R×Xdð|lÜ5ªnäZØÌv†/ymaC¡¬‰.õoAͪº¿X¹à,XÕœA×wÌNÿBU®¬H‰¶M' Ï»Sò*.â4ðƒsƒú‘Ü>4|Eéƒ~΃ú5\ £çõþå–zr©•–$]™Ï?¼þ/åóÛ7¯Ï YâÐ’]éëíÓ]i˜.¸~eŠ’|*S™W:ûħABW@úƒ µ•ù/‹ºKÖR2:Kö"ÿ;õƒ0û‡Ìñ†}Z[ýÙø½a´Ì^G×¹¤lý´¶ýò:µ+ÊWfo¾ÄÎ'ú m÷™MÍŒïyèR—/ f„®æñbª¸þ$gÉ>~¹W‘2WʰzûÀRG6šî<Ãó€$@EZå_Vè²…°+ͬþ`È˪Š0 Ø%§Iae¶³ /Bo¨H©7å!™µ—øL%œ4 ”.Âéh¼˜LH8J®´©Û¸§vR?rÿC(|RF‡ŸÞÅæê90ìe…¶Œk7˜ç#Ê 9GÙXdåî¤Eú ÇAD—ŸZ âRøÛKø7Ѷa‘?Û„·;ý¾ÞzN ¿W=rC-ßCkêÆ£ð›+b_+ÔSÈw7Š©~(6U—(éíØ &½ôQ¸­oYÓUÌ%ÒšN…÷{]Ó]Ó¡5 {’{xD ;o[Ú¬£¤¼K<½ÛUú¼Å´©‘7;íNgZæaðýn5 Ÿ‰å(Áøê4*“0 Çazü¥Û«úÀ?{¯þ}Îþõ9F©Cæåœ©iÒ³Ú´ænóÆh®&†®–Çó’ÿ$j>šÚˆøÑjjÿEwy 8eÉêëó_ÎC2q¿ÓŸP éOÞ‡ÁbÎLgò¯Uý[Rþ¦~Ü£ŸWZrèO¿ÙvÕ æ©®5ôÜb9:ÃoEœíŸ¿\nžÚŽiv[-Ýx¦Sk×;‰©]k(ÇÔ>¿uj{mzÈ0ŒÁQOmÔÌ“Ú=ÁÏþlžãMÔÉ;'R}DaüäúÈ:U¢ƒ»Èöqä\Êíù>R[89ñ‘¤qÒ@â©Ë½t @ZHdÁüÔCx‚—rŸ4ôåYíÉî5ã{ø+1·)!"øÓ°ð»‰áOμAˆâ‘EóÆãŠL« ÷ÇãKÝpç‡zÝÇßs‘<~C–•HZžúNyX7•‡pÙIÝÇ_{©µ—Z·wÐiã"7—5|$çÈÒùÈÎkÍÙ§9\$è’4‡‹ ½>¢Êi÷¥¬üŒ”×%Eïg¢>úC=35¶UÁ|ºaæjĈÀ¼\˹¬ÖßÃ2ý#k"Œ}Iè^J_DÝ/Á¤ëµÖm_Ÿ| Ò'¯v",ÏG¯}r<Ècϯ…¢­=‚´å‹†ntõ®¹ë¤]r·|}—õ u$<“‘åy<õ-pà`žSSï«Í`O‚¤µ½Ó”÷“Œ2‹†ØÜ+×qˆ?J¢äÈ#Á #qÝÙœ„Qೡ˜²²ÞHþ"b5јèYq&âbL2§7IåÕȃ<#á”d˜¸øk£X… ŒÒJ{TÔ¹E·Aˆ¬al=¸Q<’¡e2AŽ®ˆålµÍSÍGÜÅ‘üà‚Å‘@ÀâH :g) »8RPºXq$LØq.8²éFB¸8R@oq¤´@q¤€ÎâH82°8RL€XaO Y8u2b X¢".±‰¯8RP‘ Å‘%¦RYN ¬8RPað›/Ž<¼frGŠš8Y¬8R¿jz€âȲ= ÑÇ7ü²,ƒw^Yn®å-1Þ”òòR Å‘HÒŽ¥8mðöG¢‰â-ŽD¸§8­[ûŠ#‘í/ŽDëÑþâH$Qõ‡§8I¬8Q£Jtp_qdy9—r»W‰Ð@q$‚4@Ú9ŽºÜK— vŽ(–õ†#x)wñIC_žÕž,Fq$Ö6%\yü~qäI™7hq$¢hÞâH<‘âH\©ðâH òÅŠ#q²¬DÒòÔwÊú©¼Å‘ˆN*Oqdí¥Ö^ªdoï ÓÆ])cþø‹#¥óGÖšƒR)As¸‹#ð#ªäKYù%‹#Kˆæ+Ž<êC=wq$¦UÁ|ºaÞâH¤rqdùV-«uã9Š#TM„G"åªRCÒý’Å‘º^kFqäI«hqä´ïœ }+b¯ÝÓûªüv·cã·J–RŠ¡â¶ iÅà…IüqŽ"×!£üù¶ýå.üð¡ §‰¢ê<$ÑþÚnÜB*bkYõwA7êå¹z[sÔQê8 bßZIqG1+’Õ“9y rsG‘Gó께 ±;#ô#¬P¤¥—Æ-gJJšô±Ð}[0O7 §šÇOªíáŒÄWARì¶HÓèKâQ“Em8c&RçÇ ®ïãBåÎÂç^O–ﱕ—ÞߣíÏE-d•Ñ¡Ob±»¬¼ä‹Ž¡Õ6NÝÏd(¾RûB·ÈYR,ðµeü޵_f\éhH 3Ë·¦Äy—³|C›Ô{TgPÚâ{z«}ÍêyÒAù—$)wˆhXÑŠ•¼µ¢ýžn¡ùRó:^UÇ«N"^%÷&RzÖ…ãG•y‹©›[a\¥ÈȾ"ÎÂ#áÇjLx’Üd/B7¾{ø1ùW%´XW™Õ páÈó>´lržpó–JÈcµrÛ>o´½îàq“! ~`عŸm$!GÐz t­ì˜cóFiC¥ù‚˜ùª]Ñ$V>¨ùf”rü8¤zákU¾»=!4Ñò¥ToÖ+¢k;PÛÚüv`¯å9Ûn za–îW»$«ßFž£XQ‡=gÀOÊv¢z#U‚%ž$«¥M<É×yepŽd%dRÅ_CËÜœº¥:¹éîZÌYš›V°XQfn¤ËzXÒòeq•–—ó™+[Ñ矆ëOë5]¯ézMŸÚ9øL– ŒRRç}‘bì^ž<~’2Rì&Ÿ«˜XgMØ»`'v3²jù¯N/‚-Æy¹CBHÚªªÂ$í>f˜é§ïÆ%à½<"óÚa Ì-±ƒ“•¦ JOzE©võ¿¨èŽú#öÇDÈ1^Ç©Ùy]ý¼6àG¥a¼LŽ@Ó#ë:l#yùÞ×àb<9f³*Ó™­dŒ'á´qßnÍîu<ÒÈØxÜ`¦Á@õ¶ÖL‘…ñúJºŸÌ¨ˆ¾É»dÐ4köç|´=UâU“1l6s½vý˜„tF|êñ5žœáÑݰ½ED?ÔðÛò†]³×nZþuiã&[ý$ ›ÙIÖF#¢–Ð#2»¡icÚžk¹"X€j4OJ´d 2u¹ÝǶLFW×%÷ ­ ”(Brû§·£…ï~—)„ž7ËEùx^ÝÛŠ7h·{Ɔ7÷2 ÜæÝøun 4ÛýŽ*ÿRN³ÕœXIýsÁ¢ƒA3 ¦ñAêô¤\ÁCæ G±*³ kLKêkjެ¹Z…ÓÞÉ’`T²•?I¥[ëM³ðkÍ ¦ju­XMå;ד6Ä8“¾I[Ú÷ÒÓƒa½"± ­ÔÚ à|™¼”À¥ƒˆŽ*a‰ÊQ¾3@©,$\âf¹FÄ”ýoƒ‰M.<­]4ek=Yó“[ƒ «lÄM?ZZÔC¢{Æù)]Ú†;ؽhr“žæ[Ipl$3,½K©–§¶CÆegvô¹:ìY÷j½öØk]¢Ç~ö<¦¥èaV6?k7E²¥¸-ãpAjØ£E–áª4bÖ«>fã¶ÙÉR‹tŽê›ôÆé¢ûRêc"G9”ú˜¼qu‘¼úºD(w£ˆˆGL ¬’âò”*r?c[GøÀ¯øíT¢`Òò‰^×éÄùyšJE×I’:-]bR õGξ¬æèœû¥?°oöË$ ?'áàɬ—5r¿|#b…ö•F¢d,%­Zc1ƒÖ*+¢²kn›öhDOEsqPhh8í*‚°²UNžÞN©k¬2x>6îU7r/lfú:׼ƾ°¡p¶‡.õAͪ¼¿X¹à,`ÕÌ®=éN:ãqÿBU®¬H‰¶M!' Ï»Sò².â4𣕃ú‘Ü>´|Åñƒ~ÎWƒú5\ £ç!B‹ÙXÌ­øj¨4ƒyÜ´Ò1j†G¢µr„&ûhÔœY®ß¸›yCCïl×;铉ªwTñúƒp†Tø½êQÇÕS‡ªëOõLeyêºÝ«^0MÈ~’¿P/l”ø¼£+Ëw’›"uMéo¿Í=ën×J2îÉEÖ%ým§ØÅÌQäjÙDÕ¢No®Ùo¶S–œ)ÿt}çÕ:Èš¶3ÝÕ¼u£À±Ž’ÒOýŒé¯ ³ßïv{†ÑoôN»ctuú»· ág–+—F Ôr­.7NS¯Ûnë}CoáLS¸ðý¼d–AK¬ï.k_ñL²„žÙÑ»º> íôzôÿ:Ý­?Ғ꺽¸µ×¦Þi™|ö{ë§7š?æU)Éäå¶ð'˜öœ)[»Û|ÒÁ—Êö%ˆÔKاÏ?¼þ/åóÛ7¯Ï &´Ä˜ôlçN*2¯‚(f–ô©s:Ì?\¿2ÚŠ’|ʦFeJœW:ûħ.9ý ×ôê2å¿L¾l΢kw>§?3” žíEþ!wêaöé:¸Szæ 5èòJÙüéuMÿ™ŒSå3‰¨’R»1z–Lfg–íÏÚj#ùÆ.§”VÃè5ºÛ’û`%½ÞR&T}”WJ’–ûÙÿ6ìÉ´´€Õ5e° %½ýH¼*òÛ‹æ"¢c{e…+‘Ô%¡£~åNâeú–¡)ó»ø*ðsñlñ±cë.“œ,Ʀ玛éçÚ £ÕŒèN Q«}MÏßQÞ,„¦ØŒ ÐNÄÛQtÁù_> 7ŒQá×%F¾{3C²e`Æ®ß|¬}%äesq“¦úSQl¸]å'æ`žÑ¥k+J[¡Ø3%Á ÿ÷RùöþÍÅ0݆žüNïå§ÏT§þaÅÙ/´ÖËKå§G-Ϧôe‰ÿáúXë ntJàQMc;éë® Ä}@¿±¨ßÖ«XQqi K‰H39‹¹âúÌiÂ;eî-¨³+®uËa …Å7}¼p=*P™„ÁLÀ¶Ö‘çF15æ6#s¨µ ÓcåÖ¹>§Ô™AØLÀ掭¢YÜ$žîÍí»ÆjÜš¬Y›—6Ìu’ ÞÜŠØ¿#¦`îänÄfë§— ýêUàˆO›ºó¶÷Ö"~Ô¾³ë˜¬EÊÊÍTèx¬ùœBÐçt éx~,QZ~¢ÜºÔ•p}7³LÂ=¤NŠ2‹DÕå'v8fNCôò±…j\÷£;z²±-¼=’Teæ¿Ã´d¤r¯ãÿrTüÙT“AU5\[Fª_‹_ –ç©›ž­ü•é & kTö½3å¦E=ÇtÉÀ|þfæì4sI¹jÌïÐ[oÝ~9nÄÜl¼Œ2ÓœVYý¥ ¶L¯¾°€ÃZ·^_œ±ýï–(–ÇŠpîÒëK+GÉ¿ÓÀŸMÕ!D'÷²–wÑO˜ç#O4ìÓ,âö÷OŸþ9TºcÇž˜öÄÐ{ãNÇé[ãn¿Û™ô&ý±ÕtÆJ©ˆÛnß%H“,O±Âé‚]¤Gø2ó0øƒZÍæ¾¡ÖêLŽY_ø!5ã•£ã5T¢èJgLì`FF©KK¡ޏK[S:c?½ jûâLü<¿òÖ]Ù2} ßãÐÝPwœuâÿVß âß4“F°6”éÉ$¯i'ºÂ†Âöv$xV‹na•o«Þ¥"éN£­o¿ßÓC+;/…³4iÙ‡ã+¢d–Û»Õš’·ßmL‡Ù ‚ëż™Ý$ ×4äÕßv$çíKùZ2ïrbÙñî´_¸œf4 ƒÅ|wÊ//^‚¹#o „CÛ’©›0©Se“Ñp<ôy„…•\íGØ \ÐÕèÇ®-¥÷<ÝÿHÂ"ªö,E rÌ6,0VäNظì9£¦ã–çÈÀó´ -¤§†L`mÐ1¶b937©…‘j\êfQ‡ƒ…í°G%­gÇ×;B'«ÅÆÖèÀÁžFÇ쀚îq'^p›½ýÖ Ó˜äà†vÆêª\šA–!¹:Žä„kK rÀ†E rv‚ýç"ˆ-)ÈÔS‹åŒtN‰!<ªD ø‚.t9*’EuùÉ6=ìfx¦›e ÈYÓ 'åƒÇeç»Ñx1™p” •}¯m ãGîÈPa\ ],ø”½7×Ìa¬>Qö4+m´Ž‡Ìó‘FeO޲±Èlðî‡+@èi¦&A\j)ˆKáo/áß„}zãVÌ*Ö($”¤.& \ nÅÛzY–2Å<¢Ý”·Å§wlÜG«×ÁGÉ $ †ïÑ¥R X–[7wô$Ö@ê)p¬‚Š×@ËxÞk wà*X-¸^¥ÖÁ§×‹øŠЂïw\›ÀÖÁ3ß ’$™ÑœÍÂèÉÃ{«ÐK3Ý©ëkɧµäÓõ*(½ Þ\ûZ™ “U§ÄÁµË(W]÷ŽQñ*hë‚C[möΆüŸÕRü›òöË××ÿðË—(>½yýAyóéãÇ·o¾þòé£òîÓgå·/o?Ó =š=ú_:ŽÌ¿}£$$'Ñ•¢ÙÊ b_Êÿ{¢üõ¯Jä2Wôr$þ¤,fLz=&kvMgFÑæŠúßJÒ–ÍÕéô¥ü·ºúüÞ¯jzèß5£gv[†Ù3ú Ýèš½íahF·ÝêÐ#0#ì™=ŠÏ”à¿ýJV“^¢ÌÈÑ,€”&cUJ¤PèV×Y¯šyC÷Ô&s«ñùo_9¦6ù‚ff{Ü›’Aל°_ßþ1ï¶âÑHùú U=š w™åy¿°»ûdÕÚWtd•ÅÿúŽÛƒ ÆC¾EÛLõ,ºÎm¢h!ò¬ÿMIžáõž§´þöW¡#R\ݹ;š ßÉ^þÄuK¢¹þMƽ†ã„§–sd…S¯~5‚ׄ=½Èæú ô†óÙw°m’·•P[k{.£Í•Œ>¼öêMIDЉKI+a噯½â†/'׸Ç/Òž—?h†/”nTL&Ï>õ#¬úü^жN·ÕÕ1×_>äF‡ô{–£µÌ–©uú†¡YŽÞÖºÖ Õµ‰Ñouã:¢æÏö~š[.ˆzàXË©¥Ì¿a‡fÂvȱ¾àÐfïÞ¹þíÙÌ®|‰±'ó˜ú1jZú‰ù"N^±â+éÝ•â,˶aŸ`i•ô/V¬¼ðƒ‘LÓ«çÊ-ýN4'¶KO´N’°—<ˆ&/Ö ¯s3‡Ñ<¬Š®'µI}‘×vRá³Mc+VÑ®QU —ð&ñçÜ™‹ÛPݶÑ×wJ,Ïq3–ÌßÌÖâ‚#Õo?^vÍn”°“¦OŸœS-Kë öf§q«¢±95Ða\•¡=™j7„þ5Î^V~»D˜in ¾;£Z¾ÄVp@œõ"uy3ÂBË_éÑæTg³´PúÃIUú-Å©èœN!Õ< ‰Bv‹%„+xD¥ì¯Þ#g)ûÌöýòL†$i ‚ Y6dS ÔªÜ/¥4UÎôGú%gö“ò¶Ã'õ;;M8OáŽØÐ¡""OD±àe]{™ بºÑ(?ê[©ë eïyû’Úá­[+y¶‘%l¯ŒiýŽÁ8«wÄSޫا€”åöª†¤=]náä1mˆ§bÕeŒ%môoq ¼òÅ„™/qoæ¤DðKÕ'òKÖ&Ï6(\“ ¯G¿ ¬E„ ĪC<á廹öÐlc­­lˆíöØèŒ{]­O&-:ăž6ž˜m­ktÖd¢»U~ˆ¶'5ªCÀ8!àr7Âßø&ìÓüElŸãI h/K±™†pŠþ;VãÁõb®,’b™½²³»×ˆ½}Â~߸³f^ã–bEI‰Í©Ö Iz à8JäμD®¥7Zý¿¶f´û]­Õé¶»}}Ðí´ Sì«‘ÛóõWhÚ2æÝè÷ºÆãY~ÜÞšº-pU­“Ý⦦nw3±jêú½ÛVWnà5u{”RTÇßU´¢º}­—<(+E~YÝñuK°®nït Öíê ¸°n7´°Ž»°n?°°ŽXXå/¬ƒ ë`È|…uLþÂ:(¨°Œ® ü…u@PpaŸ;ï›sC¼.[vƒ=€„îKÁœ+v‰¶dÖ‚uoÒ¶Ù9žó– †º#˜Œ&c‰8!ó;÷D‰à#€;ž¹ ¾ÏË©¾¡Ã§"¨JS[âÚEVÑe]žá/ùy‰Ñ‰ƒkâ‡äÆ%·Ñi {¨\â€$yDÂP ".Ïê•­nÚ¬ê…_,Æ;¶l*%ªøÉ-p44œv-Ñ|YHe=˜¿²€Í]YÀ„VÖ ¡•õh¾Êz¢Pe½ >ge=³²‚©¬àÂ+ëà Êz.þÚ€TÖC`ù+먀Êz*´²‚-a€•õhxe}a©#,u„¥Ž°Ô–:ÂRGXêKa©#,Ga¡ÖÀsQë@!Ô:\µ@­@Pë rSë°y©u€ j(6µ”—Z‡·œQ*iB`Ô:x\Ô:œ G•¥%¡ñÎrà7­H B-r®›óÐ¥Gò;-÷ª6V¼5ÅJ£0Û¥år+ÁÛ*ÑÔ£Ú·9 gZ¿cmÂØ$!‡…E-¼–²·ð^ÐÙç.¼/áñCbÄÅpÝ—0Ðì4}‡¯ñ<,Þ|T cEÇ(¿"rcÅ)dA)Dˆò¼Ãô(H—¸W  ¨õç^ ?+p¶ôA×ìtt­ïDëX¤£ ÌŽ£Y=½ç†­›F»üü!,äkÜ› £Æø»ë;®?ÝÁíeYj Sph+)³ß%^@&E ‰¤£0º¡÷»mÚ¡¤$ºoöM³ÓÕz»ÓÛûd󞯿’Ö¨—(“r`Í?"‚ ^Õ-C0ѺX^?¼ ˜Ø­0‚ áU*L0±§õ’C¾ùÂ!˜8ºn‰Lì›na‚‰]þ˜`bŸ«#˜Ø‹ ˜àqCA¼Þ1ˆ` !˜ & ȼܘ‚ ~P Á]  P‚ n|@ùäH‹wq ’º!w#;ÍaJá¾TG’J­7_ø‚‡KÇÞfóÀgywÀK$ù"÷+H¢çLb0̢٘MBÚ°Û ¼VQ›²Äí™HÉ DbsUãøœ¾Ø‚;ydx§‘AV) ±ç<·Yð?Î*Á ¼Ý&(©2Û&þˆ¬€;¶7šc&€C˜ ¸±Ìܘpfnh83?4/37¢ 3>737:73?"Œ™€W„™€ÈLÀ‹¿6`Ìü°fnT3?*œ™€[ €™ ¸¡E˜ ê£y}4¯æõѼ>š×Góúh^ÍŸÏÑ\¬¤ž³¤VÒÎ *içF•´s£‚JÚÁ¨€’vnlþ’v$°¤†ÍYÒå/içÁ-g”JšhIû^<Î’v®‡JÏ`rÏ^øg.Pe;È!ª°²¯]˜•íUoßœ•ííßÜb„09ÈñüÇòDZ´Fo ü3rõ¡8qzA)ôBç+½¾½¨nzƒRÊÓÈÏIo ÚI=£½’®R7vïr• ϶ºÊå>ìw²>Dk–Û¾htÚ]½û¸-ÛX9ÂÈ»›³u¿©/Ä2wO Xr{K0Àe™?à"¶3´w3€DáLé%NßEBÄüà›™?ú],üŒùc@ZݱÕhm£;Ð:úd õ;ƒ‰ÖÛ¶etÛ¦Þ?„ÅÃ{Å?WOüÂÀWNeÕð(æŒ÷j‰»³÷h¦Dè* &A¦}({uÄ-¨¼É½,… ._à›°Oo$ xOâGlo’d¥_­¹òÖ¿qÃÀŸQgyo´—% ú=Á¡íÌÞ±Re/®se‘Ô0ï•ÝLÑO5V£¬¥9\3kÞ¸³f^ã–bEI ´ð„o¨€üÉ¥‚_Ý¿`+åÅPy±Uø‹å˽“{uUÍk³'Væƒ"˜ÐÂ(ŽVY_«öãm‡Ä¿AÛ¶Úœ‚aó„sàNl¦ 6º‰b$W´I-ýÏn8T.Ô´ÐÙ›\/Ô ß ¦È ñØ'\°Ÿ]Y¾ã‘·¬/*_ùðÇT9¯ßÞÐåÿ÷ýá;v‘xáÇ!U›yÆÑðÂWMaÊ:T"úY³Ÿ(J:Ý´¿/ØJgŠœuãÅ2ýh*7L±ØŸñévìzn|Ç®&B׎.|vYžñŠ­KŸ‡»#‹èq ØÚb-0Z½†Nÿc¼8S^°æÓöõ¾AÿuëÆW,–ÿ…Åò_¤½§ÍC<œáû+3©§¼¡G¸®æÎÅ£›'6ËéÍ“öÄÖaŸùªp?ðN}åQÊ!”ÔÙÕÞB·ÝùèÆJ"lw+9« *ÿýhI‹P6¾•÷¼mËV(¿òg2÷‚»u×rùÔýÊA÷Yù•Uí/‰‹_&=–¸ÄƒÒc ŒN§? êºÖÖ{ýn{ÐîëƒN·ÏÁ޵ãÛ¯d5é%ÊŒX퇋[oËpcýa]ÅW­Gˆk‡>€¨±ÄWh j¬]—;ò 1ÖqõJœkç\‹ÒbHPèÈwnz.>÷@Ϻ²áed‚ÞHTé¹@Èœô\ü˜z.(Œž Œ® z.(œž‹Ÿ¿¸ŽàÖÜ:‚[GpO3‚‹f.AD>``‘?6?‘?&˜È‡L䀿$òáG#òÃç%ò§a"‚ˆ|øqˆ|øÁaD>àô*dDn",€È‡Bä@ù°% ”Èžç9ªøÚ‰¯øÚ‰¯ø®4Ê<åDùÁ ¡üT(?øQ!”pT~Ê~lnÊ$ŒòˆÍGùå¦ü8LªÓÉQwô¼·‰1èm“;uiã§áL({qDœX.L.‡€©vZí´òÌÒAe N*(·sÊ­‹||*Fëw„æÃ©4¸a÷|AdÙ‘Ò„¢Röf€‘³*jTma)Ÿ 7â=µà"Ò$t@URf€‚[Ÿ„ŸÉ„„ÄOŠ–#Júd§&yxÁŠ™/Ôá…ÚÑ;ºn­cXŽÖÑí¶6h­cNÚcÃj;ýàRŽFìwl…ô y&ʰ€eÁY À"¶²°½MΔ^"íÝø,è‘+$)Oí݉¸'ÛÈûš7-lÅ_ÃȱeìU4lÅϦð\÷F‘ö ƒµ¶²!¶ûãi· mL±µNËjkV»ß×Úñ`Ðëèý®n"œàJ!ˆ[ô›åꙜ¤à‹q€ ™v5gCEÅ£+ocAGv9ûPtÎð–*ŸÅl'våÉ.ñrŠŸ„%°ø©E[ÞÖ{]3MØ7Úf¿Õ6Ív·Û6zúÞê§Ý_%­Q/Q&åÀZxDõO¼ª[¦þé?Ýë?[ÿ±ZõO»V%¼J…  ö´^ò`È7_8%PG×-Ѩ}Ó-\µËƒá(‚zênpU?í½¬~Ú‹ ª~Ú‹¨~â‹éªŸ¸mHõôFBH^ýAæ­~â¿2T?ñƒ«Ÿ ÀèÊ©~ T?qã'!ùGÁªtQõÂhtùyez1Ñ¥]%ðD GZa$Ñt7P q.¢9±ñçaSzàŽ~&–ã¹>ùÂ’ö“ûWSבG=$Ée?7Сo\¶ÊþáFqÞ}pgIΕ݅¼î@έéŒEf?p‘¼‹_ÞQ#%–µ.dgÜö»ôŠbV45½“£H!=»Ó“vÁ%M•fÖ÷/‹pšXÑ®þYZDÅüæ[7–ëYô ;‰ÛøüA¥ÏkrÔšYH–aâ¤''xrígÕ6ô!1ÇÞ0¸rÕ¦öõdjOf,µ±4ú’ò'$X)îÜÚyÉ¢î\ˆ{U-÷êúé2KCߤJI$iWq<4Ëq˜3üjØ×ûm‰käAnìE;Û¿j’Ønұ׊nèÚaoØa\Y+®ÉÝæF°A°ƒàÚ%Ô<Ø!‰5v[‘¶')œl¦?ŽèÿFÌÑ¥ÿ®¤aÁœZ¬+wkùQÑJ‰þ_Uµ<´e1§ž#±f¯˜Â›ÍU@–©¬Ñ¬x<Yë½¶½èÕý…Ú¼`6íâá€Íþ}ñôˆ}¡žÑç›Yö¡Â?J?Ò?ým1‘%ýà Çéç¦$¾P—KUê8\ÊfÕY©Wýç‚õ6h®†¼„îÔõµ€=4¢%Kc˜ÜõË^ «ˆMA²l‘IQB%À½t ëÛòy„sÙ6sVè|ú’N•.syÒëï&ð3ò+³ñGª«"­m_juÍ~¯Ïó¦Úö³ëÿ¹³»õ±k°{ío–^Fo·ŸsSÄ\Uz‹–ò;}_Ͱ>Û57«êÙͼºÌù«`"¥J¸”†.Qå*×YejUM³fÎGÔ%­@‹U&‹úá¹yr}j’è6ï;nìš½v³PÙÌ g ;Ö%¥¡Õˆb,£9 ÝÀ©B ©WÓ­plW!ÈèêzE=âxÌGTEý™Þ޾û½ aOÊ£«ÒÁ¨’%Õí¶»r7éK¹ÝxzÂNB iiL4ÕÆ¡ëL ×ɺ|[r'(*[älsº-]•û° a‚æ¸aí¿Öþk©@xBhXÅ6Å~Ù=aóiÑûI[øô-ÊgcÞr+óëÎ?ý<úøú×·U¸Ë[wa°—ðMäCÇkݨR>ðå'=Ï·²QRÃPˆŠÚRЍ}ê!¬ní¥ÂÄA=„Õ#ê!Qõ‡‹zGzO£J–Ô^ê¡Ó;3 Sá•y¨‡êÓ R}«–¯&¤ö_kÿu_h{-Ò6%N=tôžƒzè”̘zO47õšÈ4éž“zUꆬÍ} ›øò©‡P²¬DÒ²-—qS¹©‡ðœT.ê¡ú2ç/sx©‡ð$C¨‡êÕ^ÍžÅÑìßñQI˜?õžtõÐ}¨—‚,ÉØ–§ÍI=tÌa4ê¡òMácYÀ ”À¨‡pdâR•nÓ² >1qS!À*ðeà}y(ÇÚú­~ÛlÀ·ÐvÏßFD)DE¥EqOQQéVí™çÒã+@TTJ¨ÄþÜW4R¢¢òR9·PåG"**¯óò´O ò%:*n;ñÐpl@rN á(r2²æs‹ÕÚÈ€qZ†©ÎCíåàÇ-Tx"¶–1ñð’»ð£ÞXžë ·5G±Ò«ûÖJˆ öKÁ ·‹“ó™ 7wy„0©‹ »3B?ÂHZziÜr¦¤¤ Q]ºfP6Ղǘӻ$7ó?ZêA{H½Õ« !žY¤Eê%ñ¨É¢1Θ pîðC¹vøP¹kܹדåûÝ 2<<×®¨…Œ¥,ôé™%b·’9ý C=ªmœ:”ÉP|¥ö…n‘³¤ÿ ªÑúk¿ÌxýÒÑ0@g–oM‰ó.gôû†6©¸ž¿´Å÷4?"úš±e¤ƒò/IR(®Œã‘:¢/ü 2‚[ªŒŸÉ„„Ä·I$õ¤§68Â`¥\ï/Ô…ë\¨Ã µ£wtÝ4&Zǰ­£ÛmmÐ2:ZÇœ´Ç†Õw úÁeÒ&)MÂG•0vT¸¸#JèØv 1YÊ@…¸Ä¤JÙH+&Qb‘`L¢^>©RË•ŽˆYªt8K¶Ê ÷‘U5ÒÖù¬î—§?¥=Éè­d™ci«…›ãªäøHw ²8y¯P"¿U,ˬ[)ÕVE¶ólUÕ€GÜY-ƒäȹT»#Õ*ÈY£Ñª`]%î0Ó¸ÄN«Ç2WºðJv¾¸Óœ %æé×U‹<_xÞy@Ü»j…Wh¾Vvj9+žØ˜„3×O¢4¿Ò£ ò4Ïñ°m8À”?Ê9®ÆrTn=ìå*©51š,¹J“y°Îç.f!¥÷âWö¥2ÁÕH’/¥ ‡rÝŸ(0·ÕNEíTœ’S‘=ûÍl­¨Œ®Ú¤¨ìbUv•¬ã¯oΤîqÝ!w¹Ê%êížß[ŸIê3Éq&yÂÆXŸPžó å´­ÀëZëZ}®OÃONÃYK}Æ”˜R4ÿö~.&p›ˆNgÐo·ŸVÂ=R¹Ó‡Úm•l”ñ…òžždHææ¹B~°ñÎz^¤ÚZVÚ€ê¤U#©¢ùã#¢‹„„1…WlxȨ:Z¹ À#>›]F|^!w±6Ð8aDL±¢aDÉm8ÀŽ #>3¾!°“výH¦Õy^f»ÀWV¥Ù>°šóeðÔÚ]aÌòÝéRäJ¬ tæ?ªlO½§Ø ã*EFöq ?Vc“r/{ºñÝzÜ ß㪄!³º.x¢ïCË&çÉ»SÜ%ŠMx`}”¿CU¶+=ŠUs°V¹IWº9$6ÅÇÜu⧺é¬ñnV,|±ìþÙø!•‡uv·ÚÔv ¶?¤Xc}Îv€›W¶^˜e„WŒ8À6òÀоú9~bðÕ¹Á,ièrñQq‘50c£cÊ­~šÿõù/Úû”õ.UdYÁüMOýƒl.‚QneD…8 ¸6S„³.D„³OTÊ~Î>¡E  è6%,—Ÿ¾³”-”¾'&îá~ÆÐû“1Êg2TŠñv/"™ µâ‚>u}JϘD]Z¯ézM×kúÔÎÁà‘9WþÇÖƒ^çq+øX¯ÇÉã-Îû"û¾yÛ(›Œ¸Œ”Å ìçj&¶ÃY†. y0Š‘v~aW1#5&¡–ÿ Y"<Þ-ÆynzòWjÙ…l ª´{{‚*Q8Ús‰Ówž÷ýc+×+³0©ÑÜÊ&3ͳÒbâ*2D“òô%oh³x«zëñ¬â7\Z[+¶¯š7-lψùÂD¾Ge~øp“>ñ<·ð\×un- ZÕéöZ}¬µ• ±A´kèM·œ¾ÖiG·Æ†FZNÛ2ºmµìòCŒ°Eª<ŒÊüÝßþ€„©ëhS¸rb 4ÈCXM.>¶°¨GCmß.öwË;>¦.à½<"=ßa Ì-±ƒóõ˜b€Þêñ¤Wf¨]ý/*º£þ蕌Džà5WÍN÷êçµ?* ã}ñhzx/ÏÀ³¿é‘·½¯^ÂÅxrÌfU¦35[­€ö÷†ÆÒ¥_“»ÍÂé/¤ ·ƒàÚ%Ya€6q=òê)ûe3«e¹î2Ì©e¹r'±–R¬´bí•lÃüІŜ:xÄš½bŠ8l6½À¶¼« Š™*ÍŠúïÐÓë­¶½èÕý…Ú¼`6èâ!ÜÄþ}ñ4àt¡žÑç›Kö¡B &J?0 ƒÅüéo‹šôƒ7$§Ÿ›&Å#KUJÿ/%…Ñsš&õÏë]Ð\ q3Ý©ëk¢ý!sº"Y*®>åoR_{lJd Ì=‹Be‰ÊÉšäí ÷Ò×÷ãŒq(!a’+2ŸždÇS% + 12)Uš´åIÙK°­œ@jÓ!7ͯ5/˜ªÕµbe‡Þѽ_–ÜG<@'k!ŠÔOx›ÕŠlG¥·Ä¥}ö|ænÍÇ­jöÖKqOÎãgœÕß•ó¯Y³?ç#êïË>±ãÆ“³D#º±¶·ˆè‡É9hØ5{í&³:‹±ë¹ñ]33DØÞ[†hL[q-WËÇÍž™‚L]n7±-S€ÑÕuÉ=Héq$ŠÜþéíhá»ße aìkM I-[u*’º$ºÝv÷Äà Iü$Mp‰¦Zú(ðs +¤½Së“Y}2;&ïžÚÄÊüùU5}íË×¾üÚÍ »©•ºõù…³ÿm0±;kæÖ.š>Ds²æ'·…cd þ·RÊŠz¨ëÎ(‘.mCñÞzx4¹IOó­$Ϙi$3,½K©–uÄŸÇegvô¹:ìY÷ê;À㻜S[®ÄEàÚklÛ¾Ø5õþÀ|úÆš Ù WÚˆÃB§âB0§¨­NÅåB§ÂRö•RÈ"‡Óò‡ÓR—gÏcZЇ©Êæg-«W¶ÔÂë$q¸ ò4BŽGGÅm'ò>£_{Pߤ7pïÜpŸO ôø.O.æñÛ«<†Õ/Ð%BŸvñè!Hù@Yqù@J¹ïù†¶Ž,ðá±üý^ΆÊÌ‘ä*ñ±—2ëê:Óp§¥ËÛ¯Ô5ÆïÒ)OÂmÁßÊ$¸K’´šŸA·üʑ騞Έs“–ŸÎ:¨l“´’ekðº+ÿkñ–LÇzãØøÖ\; ;zL;î1 §]åQV¶ÊGaËÝ)u°ÏÇ]£êFZÇÍ$âgø’׈7°l™èR¨ÕŒî ‰• N¶+Í4î¸gw'ö…ª\Y‘-l›BNžw§ä0ÄiàG2–Hõ#¹}hùŠ>ý˜œ¯õk¸@GÏÙ# \©x¦q›?ÌbcärXl=™‹í燺â$Rü ¦ëç†(3×wg‹™’Ñž$éðWϯ©¸œ²H·"m½³¼HÚz°G¶«#sÕ<¨87£?äf†S4ô't¦Fyï©‚ø·¾ ûô××_þ©|+nàÊPI-‘’Å#%$.HDÇTß)ñ•) ó¡’Q^*ÿsõÚKã·õYlE×ÊÜŠ¯†J3˜ÇMfþ©n4ÃÀ#ÑZÙA“}4jÎ,×oÜͼ¡¡wǶ3 ‰ã†ÄŽéΪüÄlÅP™ìŠ z©d-hŒ®G?иîGJ(…·-’4HúcAñ&5ê¶·pˆÃÛ籑Q6%¬ïÊ$•g„òêoÊOnLf¯î_°£ñ‹¡òb«ÃöâLy‘äÐÐO}{øüCjHþYjJò»$™ƒ}îë›söØ §$>·¼\¾<¤Â~IG'™­úèO5 ûxò††°Ú²T:ïÁõb®,˜[¸¿ ËcXÉZ`ͼÆ-…z猌å4—PgöVêÿ7åí—¯¯ÿþá—/ÿP>|zóúƒòæÓÇoß|ýåÓGåݧÏÊo_Þ~fœºnöèé82ÿýöÒ»~3ºR4[yAì«@ùR”¿þU‰kP‚ ÿö+YMz‰2#Vûß’UŸJLÖï–a²¹ÄWò_ŽŠ?{hS3ÉG™{‹©Kžõ¥IÛØ˜ß¡èíùo_9æ2ù‚ff{Ü›’Aל°_Ϧæüú?n_ùú Uš¯ÙW,Ïû}„ØÓ'+Ô¾¢ã©,þ×wÜÆË ù†«¹ˆÂ”ò*ùéü.¾ üö©÷*œ)ÚDÑBä¹þ›’$b2ªK¥õ·¿iÍ‚kêý­:sÉ<ÈrÁÕ¾²ü)a×vž/wžS]ÿ†6.Õ£|ô:5k£¬( )„mÍÝcÏãà!…Þð¥@€çÄwFWVt…3IEä$åÓ¶FŒ/wlmÏ%~,]ìU^2¨ë/È(ðG$ òqÌ©£GO×wÑÖ®`µ {õªCzH„æeD†Ç8ˆ ]«æÉ€1íBcÅiÈÊ™’ßøÓ„Ó4§öô•JµD¿ à¡€R8´àÔá°+|.J>…"&pÇK;Çx ÇE  ®J»cL â-s—4ç,ÝÛ$á|„mX¦Ñíu{«Ýž”ÛílÔf ®*­$ê[¨[D³DE¼Äî×Z„º.°5ˆÉAÚÀÂñæi‰3¶*cÑŽéÙÒó¸i0p0Ï]¼½‡~ìI¾ ˆÙÞizMF™!Flî•ë8Ä%Ä È#Á ¸ˆîlNÏ)φ"¡@äá/¢äåJDôì(„‰¸“,i7ñÓG¹QFä¡{Ý(K#AÄÅ_E6¥ds+ŠnƒYÃØzp£x$CËd ƒ]Ë!Øj›sŒø£0 ­…aƒ¢00hÞ( Ü)DaýµƒDaàͅ©]í*æ¢dFL `v’@410ÝZɋޘq09ZÕ7â7w”¤ÿPÜ.&hìÎý+™hé¥qË™’’&„ö®‚ä(™„”\ŠìTM 9Êö$ã-áàŒ`–vP^-o~µó„ìN}âhãds~ú hþÙäcáÿ5zfK7ƒV3!ï0€FŠŽçQ¹›ÞßPáÝù]Å [GØQ9©>nå¾-ÖäÎ,ßšç]~o„wŠÆŽ"Ê «>PŽG_³ªØt<þ%I Å•r› ¥Eq 2dÇsW¢ûi¯Ê ˜ …v›û¥”&Iê)í¥ÄHq•“•ÚpÙñÈŸÜd·Ô™øL&$$~F}³~=¼¿P®s¡/ÔŽÞÑuÓ˜hÃr´Žn·µAËèhsÒVkÜ1è—g ?îx&ci/à'Vú–÷5´&×^ñÖÞËSë<ÌòÊÌ:Bµ9 Ê_¬è‰~}sž©¯´6TÒÑlÊVÛ¥tIÙEG’.<*¶vï!ɦɱkr÷¸â}ˆL) ×ßëI’("Ù¾e‰T&yyÔ7çé”ͤú‰ú߯ÏÑ2¦žýÒœ:ø+ õ7ŒÀçӛʭ xßòV¾>¦×Çt¬S4$¾|$ûµÊ™áT zè¶--5½µI—œZp‰›„-=Ÿm_’”§1‘‰s©ªÉ‰ôÄ›¶š¯®H™È÷R²€ªO“\¸IŸxGÏu)ç ï­êt;>ÖÚʆزŒ±Ñr¨±ï9-:°V_·K³¬VwÒí›­I›”b„Í 7 UµÓg¡~9gƒ`è–ÑktFWÇß•\³µÞÖiì£Rß­_íõz­Áæw,Õm±ÈüÁ®=ó°zcQÙü5s=—cûàÅLp9¿ÙsáÏ w‰ÛÓ»‡¡üBf|‰-ûk8ùÒ¶!#y†$–¦ –Àž G¥eƒáa騸åQ-î´kЂHEác¥¢ð ]¢öãp«ú1ð –ÁÉ Ï߬6ìÒ¸=ç|6„¿›^`9·<‹žsÂýA ®^–Bÿ¶À7aŸ>ÿðú¿”Ïoß¼>ß΀ ù#ÔËÇ ëíS[;| $|e´%ùTF%øJgŸXø!±èOÆI0±\/ÿeF§]»ó9ý™¡dôP`/ò¹S?³ÈoاµÕŸßë·û½N¯Õ*¼‹NÂ~ï^õÈ ¡[õ 'z¦27CÝñ;ýL“X•.|ŸùŠê,šÒ¾Îx(Ó+ä»ËÞ‹µÐþŒéGÛ¦Ñtt£Ý}½Ûï²|ØÌ{xlzmâzùA·úØùXnéõ€ 2ž¾=ût2¬JÖIå+ãHþ’Ø!åí {bãKì|¢üiGøáLù§ë;¯ŠÑ‚3ekw›O:øRÙ5·(½„}ºk°Uú YLÜ©íŽÑÖu^)›?½®é?“ñbª|NŠ©Ý=ËwsÆÜ¹g݃àZùÆèª•Vƒí·¿Ô¶VQRªˆ”Bû•’¼Íž3õæ4ÞödZZÀ‚êZN×+´¯’·¨Èo) rte…+‘ÍÕU΋2}ËД”X9ïeŒº¬»Ìž;Î ˜F«Ñ@£VûÚš’s8BS¸Ç×Ú±»|¸aŒ ¿.Ñ0òØ‹˜’-Øk_ yÙ\dÔ¾TîFWù‰½grF—®­(m…bwΔƒþßKåÛû7oÃhtzò;½g ”Ÿ>Sú‡g¿ÐZ//•Ÿ6sj¿,Ñâ?\ÿk½ÁN <ªiìí ŠÄÞLJð·-Xö¬Eai K‰HœPü/æŠë3§!$_ô=¼0ÐÂbxôd€2 ƒ™",€m­#ÏbjÌmÏõ©žÌ­0°"ÿx~]ÙLÀ掭¢YÜ$žîÍí»ÆjÜš¬Y›—6Ìu’×ÍýwÄÌÜØlýôRI+÷ħÍÝùÛ{k?jßÙ;•Y‹ /#ÑñXó9… Ïé@Òñ<ü8>X¢ôÎK¹u©+áúnf™„{|ÔdÄŽž[^6I~‡i!ÉH-$Ê£'U ô ˜Z|-~ï±¶ÜÆt˜Ól+ð—ÝíËhŒRïrbÙ{*vyàVo¾­ˆâ¿•ÆK0wä•€phÛB2u£8­C`“Ñp<ôyg ,Âv˜» ºýص¥ôž‚¡ûIØADÕž.;,È1Û8°ÀX}’;aãJ°çŒšŽ[º¿Òž#ÏÓŒQ$´žB2U ¢~=c+–3s“L6©fÁ¥nñ™líˆì+â,<|í°ƒ t\_Æxøƒ=ŽÙ=4Ý!ãN¼à–½0I]ŠÆ”ÄU®±Võ(Ç ËÀ\G ²;ck]rÀ†E rv‚ýç"xT‹f¸é¯äŒtDìEèÆwRÀsa)àŒNXŽŠdQÝF~²MûŸžéfrOÖl9öÊž‘FRé•%ãîÁ™ñ–Òòù,ÙˆgEÔ»¦“)Áñ•ÒrIÃMU0¢§p)MÎ9Í?“4ì„Õ©ÍCª3RÅä¡,i>~²f®í]Cêê¥IÉÂFÇö]úÿö6¸¡>Rˆv¼¼vÇtUIÒ›™e_¹¾œå•aç ŸòœÇYà»TO˜ÒãïpÉm¯‚q„|Ø—»ˆdú‘EÝ2"Ù ¬˼œXΦ>¥»@Ûº Ù©•úÈ©iøé$dwÄIîWS7¾ZŒ1…Ì­Y#»Fh|×pwI/˜F±]I³ÖóH´;•²¢&zËÖØõdߨ#—ÔN8ä7ž)Ï—ŠXE}º-f¢äÌéíõØ’»3)¾NrmJÝÁ…/B"mM‰%Ëx^o&É’­n”Ñ”f˵Zþ²²\ÿÔ¬Ž-kHl)ûV}ãMõ9õÜb% fDq’*³Âµõž{wˆôóm7ÕÛ@:í~«ÓzÜþ{íÞ®ÑÜ .x³¼,ÝšXmîîûß½ P1±Cù—º¬ÌiÇ͇¬þ`ȇ®3%C (ö_ÖìÏùhL'í:[…¨À‹Ð&6WÉ3EW¯ñ$ǦÝØ«ý$Ñ¥a×ìµ›–gÓ= Érn‚4ç&ËÂIþÁvêôØ€Õ~â3q† ÝåÐ@Y;GãÅdB‘,üx¨]]×eàGî…7Ûý|DՄĹº` #ňFÔŸsYv ©ãáó|¤Ñ@¾û}”EfãÙ =MÛ$ˆËQ-q)üí%ü›°OoÜ—YÙSbî•ÜÞ'E2ISp_ÞÖË’d†yD[-(‰‹OïØ¸³WcÚaj’H cåùsÁœÙ ™ø³if4ÕØW†ìJ.ŠËií²Üºø¦'±R·cT¼ZÆó^é¸WAæãÕëc|z½ˆ¯èi-ø~ǵ h<ó½ ɘ%os¦K ¨ý«8L3Ý©ëkɧӗ<ëUPz¼¹"öµ2 Z«(N«‰ƒk—ñ¯°gîv,ˆŠWA[Új ¶w6äÿ¬–âß”·_¾¾þû‡_¾üCùðéÍëÊ›O?¾}óõ—O•wŸ>+¿}yû™ÑèÐìÑÿÒqdþûí%a<‰®ÍV^û*Pþ߃å¯U’÷²ý…‰?)‹Ó^Éš]Ó™Q´¹¢þ·’´es©:=B)ÿ­®>¿÷ëúwÍè™Ý–aöŒ~ÃèöŒVˤj›Z«Å˜ÚZô—½^·GñYƒ\ø·_ÉjÒK”9šÒÑdK › 7Áªü:ëU3oèžBen5>ÿí+ÇÔ&_Ð ÃlÃëñ4¸±Ù¯Çsßiý1w•¯ŸPÕ£™™Yž÷û»»OV­}EGVYü¯ï¸=¨`<ä[´Í\RÏ¢káLÑ&Š"Ïúß”¦CnšþÂó”Ößþj tDŠ«;wG“`‘¼„Áš¸ŽbI4׿ɈØpœðÔrެpŠãÕ¯Fðš0öf6×gx 7œÏŠ€mkÄÒUq[k{.ñcyÀèÃËÂä;rc“7ÔF9…zR¸y‰}å:ñG“üÑzÄV3C‚‹èÎX^JàÓ#ÎC «$|Vd€‹¾z(9b-;æË¥i/¸­M:—£sùskìúZÆYŸ=¯"Êz &%H0¸SÀÈënƒÐAFeÆ?ŠG2”FÆ $­+b9$D^îl‰gºˆˆzcy.{;/Ù´[|k¹1Ž´ÉtÁq3o¹¹£Ä5¥¸]LPö ý…5ZziÜåAC ù«bJBžïù/^âÞ§Z!hÅ·NFÙ¾A[ûëÿgÜ~ý÷ÇßÚ±=ûøoûëÕ5™}?ÿü>x¥¢^µKÀõ$.xœi “ÓWºô¢ØšÍ7>ä©¿#u£ðªó.wO¿¡Aã­•*ÖM¹‡›KJÚóx³(zö¼0º¦>‘ÑØóFri«÷íŒÐýRŠ8I½à}Ga¤ž¾+O\%³Ÿòáý{¾óB^¨zkÐmµÛ}Íê8ŽÖ1Ǻ6&]K³ûƒn¯ß×mÓ4/Ô¥<-)ïÓT¯Ù›bFDB›‹/sšÿõù/Ú{âï}—[Xìmna1[ßçfÛ:ª8¼i¾Dt7*ŒLT?Û}”®”ØóÝÂÂOx ËÙòŒ7»­ÝûŒ·°Ðìiž}êGXõŸõnuuÌõ—¹Ñ!ýžåh-³ej¾ah–£·µ®5humbô[]㸎¨ù[ŸŸæÖŸ ¢8Örj)óoØ¡™p'…2A¬/8´Ù#x®?E»A6³+_âGìý<¦~Œ§–~bN?teEʘ_Iï®gAX¶ ûK«¤±bå…Œ¼`š^=¿PnéwØcõ.=Ñ:IÂ^|åFìµ>jt…×¹‰™ÃhVEדڿ¤¾Èk;©ðÙ¦±«hר*…K€x“øó]™‹Û€ºí®it÷!Ïq3°Ìݘ½^§Ý¡=é öæ9îüö+YMz‰2#òÉNñf7îÉôãÖÛ2‰S/œÍïO ±q§>¬g»aõ/§qwãå…|Ã…“Îxl½ÍdÜ3×¢™Œ;:ÏdÜ N=ÜÆŸz¸ šzÈM=„R!Àsâ;#z0¸Â™¤"²w‡Œ H”€Â%ÁÀèÊH”„ºþ‚ŒDÂ0‘§Î!×wÑÖ®à­1 û!Ç x¤Ã”À{uBÝGl:OMØi‰¦Êu‘GÔ@_# 0u˜7Ž»¾'A˜Lf{§é½ñ¦òCƒ“«МÉÕüˆbÉÕbø¼ÉÕü處ÃD%Vóã>M¬FŸ‘pJFYÌm­.&, ›’‰ @gb°% 4›:¿ÕVí`=+}Ðb¹õ²²c$â’‰B0fIEC¶ÎIDÐxõ nk!µü¨yí†TþŠ~lîŠ$¬"ˆÍWå®áÂ-gJJš5}Ë™­ÅGªß~¼ìš%Ü(a'MßA9§Z–ÖAìÍNãV Dbsj Ã¸*C{2Õnþýkœ½¬ü>v‰0)ÒÜ|wFµ|?ˆ­<à€8ëEófþž…–?٣ͩÎfi¡ô‡“ªô+ZŠSÑ8Bªy@…ìK<.WðˆJÙ_½FÎRö7˜íûå™ IÒ@²lȦ¨U¹_Ji>ªœéôKÎì'å;l‡OêwvšpžÂ±¡CEDžˆbÁ˺ö2°Qu£Q~Ô·R×AË=òö%µÃ[·Vó:9l#KØ^Óú1‚qV¼#V±O (ËíU I{ºÜÂÉcÚOŪËKÚèß0âxå‹3_âÞÌI‰à—ªOä—¬M<žmP¸&.^(~$X‹ˆU‡xÂËwsí¡ÙÆZ[ÙÛí±Ñ÷ºZŸLZtˆ=m<1ÛZ×è :­ÉD'v«ü#lOjT‡€qBÀån0„¿-ðMاù‹Ø>Ç“Ð^–,b3 áýw¬&Æ ‚ëÅ\Y$Å2{egw¯{û„ý¾qgͼÆ-ÅŠ’›S­’ôÀq”Èy‰\Ko´f¯¯í®µz=Ã0õŽ©·i÷;û*äv~ùUÙmf³m ڽǻ¥ùÜÕt›Q+[!;ÅO5ÝÎf¢UÓ±OÕ?àÕt;TLÇÝQ´bºÝm—:(kD~)ݱuJ°’nÏD‹Òíè¼n'¸n?!Ý^,h! ´ (¤ƒÃ é@Èœ…tü˜€B:(¬ Œ® €B:(¼ŽŸ?Ï›sC|.[vƒ=x„îKÁ\*vm‰¶dÖ‚soÒ×µÙ¹ž÷V „º#xŒ&c‰8!ó6÷E…À#€;ž¹ ¾Ï˧¾¡Ã§"¨JS[âÚEVÑe]žá7Ù¹øèÄÁ5ñCrã’Ûè4…=L.q@’š×Góúh^Íë£y}4FGs¡Rv<_);TÊÎ )eçG…”²ó£BJÙá¨ü¥ìüØÜ¥ì0HX);›¯”Ê]ÊÎ…[Î(•4!ÀRöýx|¥ì|)•žÁäž½ðÏ\Šv˜CT]E;b»+Ú+ß¾9+ÚÛ¿«˜Å;`rãùŽå>ŽciˆÖ4@øgäêCq´¢RöÓˆ¯øi „ðá´Âbxi Ê (Mk U<­p$õŒöJbd¸JÝØ½ËU.<Ûê*—û°ßÉRDtTøm+¬Ü†Ño ÝÖãf­±q<„‘w7g3Ô~S_ˆeîž°äö–`ü€Ë3~ÀElgühïfü‰Â™ÒKœ¾‹„ˆùÁ73~ô»XøãÇ€´ºc«3ÐÚFw uôÉ@ëw­=¶mËè¶M½ß+?‹‡÷ Š®žø…¯œÊªáQÌïÕwgïÑL‰ÐUL‚LûPöêˆ[Py“{Y A\¾À7aŸÞHðžÄXÞ$ÉJ¿Zså­ã†?£Îò&¾h/Kô{‚CÛ™½cUÊ^\/æÊ")_Þ+;»™¢?žj¬7ÀÒº¹á%f‚ÞHˆWYº@Èœ,]ü˜–.(Œ¥ Œ® –.(œ¥‹Ÿ¿¸äÖÜ:[rO2‹f-At>``?6??&˜Î‡Lç€æ¤óáG£óÃç¥ó'a"‚è|øqè|øÁat>à$+dDn:,€Î‡Bç@Óù°% ”Ξí9ª}øÚ‡¯}øÚ‡¯}xi¼x>Þ ˆ÷ƒÂû(ð~ð£Bx?à¨ü¼üØÜ¼0Hï›÷ÊÍûqùT§“©T²£0‚“Mxià¶o :­V«û¸0:”Í»éPöù°|˜<Rí³Ög嚤%‚Æ}T>P^ß”_99Uú¿#4N§Á »·è „$ËŒ”&Í•²Ÿ4ŒœUR£j£S8ød¸Éï®’&¡º’2Üú$üL&$$~úH´QÒ';5ÉÃû VÐ|¡/TÞèã>ék“®cjÞ¤§ g QíÞx0nľP—r4b¯_+¦g¨ˆÈ3Q‚É. Ìd±É ÿ»Š& gJ/‘önôLôŽÀ’”§öîDÜ“m~Í›¶â¯ â ‚Ø2ö*¶ÀŒæ‚gSx®‹{#“H»ÝÃZ[Ù›=§5!d¢é»­u¬vK³Ì¶£§ÕïÚ“nKo÷Np¥Ä¿-úÍrEMNRõÅø ÕL;›³¡¦ƒâÑ•·±¤#»š}(É¿“ (y‚ÞH…ƒKž@Èœ%O€«þ’'(¬ä Œ® €’'(¼ä‰Ÿ?]t€„¡J5Q/ŠVÁ–ŸW¦]ÚO¤°p”•>Ìrƒpç"šæa0¥íègb9žë“/,U?¹w5uyÔC’\ò3púÆe«ìnáÝw–¤ZØ]È« äÜ–ÎXDöÁ»ø¥5B@2QqYëBvÆk°Ë®(f¥RÓ;9ŠÒc;=ôg[ÒTif}ÿ²§‰íê‘¥ETÌo¾uc¹žEO¹°“¸…ÏSú¼6!G­™…`&NzR‚'×~VmCòYPüáý«V­s=ëO:“kûJjchô%åMH° Rܹµó’Eݹ÷ŠZî•õÓe–†>¾I•’HÒ®âxi–ã0gøÕ°¯÷Û×ÈƒÜØ‹4v¶Õ$±Ý¤ÿb/ÝеÃþްø²V\“»Í`‚*aÁµK´ˆØ!‰5vQ‘¶')—l¦?ŽèÿFÌÑ¥ÿ®¤aÁœøÑ•;‰µü¨h¥$ÿ¯ªÚÚ²˜SÏ‘X³WLa‡Íæ* ËTÖhV<Œ¢€õ^[„^ôêþBm^0›vñpÀfÿ¾xzľPÏèóÍ,ûPáÀ¥HÊçŸþ¶˜À’~ð†„ãôsS_¨Ë¥*u.å³êάԫþsÁz4WCÞ BwêúZÀÑ’¥1Lîøe/…UĦ Y¶È¤¡’ à^º„õmù}ÉF§J—¹<éõwx‹ù•Ùø#Õ?Ø›j]7ú}½·õMµÍg5Öÿs+fw/êc×`×Úß&®ŒÞn?ç$¦ˆ¹0ªô-åwú¾ša}:·knVÕ³›yu™óWÁDJ•p) ]¢ÊU<޳ÒÔªšfÍþœ¨KZ«LõÃsóäúÔ$ÑmÞ'vÜxâö6¢»a§O·5×}Ø5{í¦åßÙV7J#›™KþáúÓFd±Ô¦*z¤iIyh5¢“ÁhN»8U4õjºŽí*%dÇÕôhï«vX¢*êÏôv´ðÝïU{R"]•F•è ÙîwäîØ—r»ñô¸ÄÒú˜hªC×™Žc6F[r(*[ÿAç”Ý[ºþ*whÒÍqÃÚ™­ÙRQñ„Ó°ŠmгKÃæÓÊ÷“¶ðé£”ÏÆ¼åVæ×ÿúyôñõ¯o«2p7–· ïÂ`é¢ÈšŠ½Ïv#KÓ|ÈŸô<ßÊòô‰F¢•5dY‰¤å©ï”‡uS™½®ØIͤÊÇqtZöx$cë`ÄZiÖCU;•vp™}}s®Ö«½>š=û£Ù3¾0Ê •ÏßZ^cUÒCb9Ÿü¤f,¤>ÔWÚnIÆ6OLzæs}Ì5Œ7­«ŒèÔ¾Ë5í÷Õ¸–BÔX¾ÌéΈ½¢Ö­Ä.ˆ°ßÉ]¥Õ†r¬ÖDÞ<§g´)®oPk”Vev×òÔ<[tTÜvâ¡á !ÙØ³ÀÜÏ6B°yŸm„`Ÿm„@ŸmAs=ÛAy¶QŸïÙF:ß³ DÀ³\ð³pȳ\üµx¶Ëýl#•ÿÙF*ðÙF¶„A€=Û?ÛÜzá§3N"iÇÄC„5~Ž2eóCkQiQœÆÆZTºU{t¶ôø °•*±?÷€µ¨¼TÎ-Eù‘X‹Êë¼<í“‚|‰ŽŠÛN<4$$œH8Š\‡Œ¬ùÜ㢸À‡62`œÖdªóD{I!øq 垈­e´<¼L/ü¨7–ç:èmÍQG¬+žµÖ‚ý,S0HFôâää&ÈÍE!Ìcêb‚ÆîŒÐ0‡–^·œ))iBT—®”Mµà1æ\/ÉÍ|ã–zÐRoõ*HXhiÅzITî‚wîõdù~@7ƒŒϵ+j!£, }zf‰Ø­dÎÅÂFÇPj§e2_©}¡[ä,©Ë‚ª÷ÇÚ/3’¿t4 $Йå[Sâ¼Ëéý¾¡M*®ç/mñ=͈¾fÔé üK’Š+ãx¤N†è ƒŒà–*ãg2!!ñmI=é© Ž0X)×Ãû uá:êðB5ì‰>6é:¦ÖéMzÚØpÚ`Ði÷ƃqË ö…ºLÚ$¥Iø¨ÆŽê‘D ÛÎ'&K¨Ð±˜T)9Æ$J,²I”ÂK.Uj¹ÒñÑL•gÉ6@¹á~`µªFÚ:¹Õýòã§´'ו,s,mµp^•éAA' Jä·Še™u+åݪHØvÒ­ªðˆHK¢e9—jw¤:@9kœZ¬«Äf—øÃiõXæñJ^éÂÎwZ‚S¡Ä<ýºj‘ç Ï;¨ƒ{W­ð Í×ÊÃN-gÅ“pæúI”æWztaCžæ9¶ ˜òG9ÇÕXŽÊ­Çƒ½\%µ&F“%#Wi2ÖùÜÅ,¤ô@üʾT&¸Iò¥TáP®û·Ú©¨ŠSr*òâ±g¿™­•ÑU›•]¬Ê®’uüõÍù´Ã=®³;ä.W¹äB½ÝóÛcë3I}&ù!Î$O¨ëÊs>¡¶H^k]«OÃõiøÉi8«`©O˜SЦãßÞÏ·á¶zz»ÛÙôÙ6•+0=q¨Ýf|ÉF_(ïéI†dnž+táï¬çEª­e¥ ¨NZ5’*š?þ0"ºHHSxÅv7Œˆª£•Ûì0â³Ù…€aÄçµr‡kFÄ+F”܆ì0âs0ã;9a×dºQçe¶ |eUší«9_O­ÝÆ,OÑ-.E®Éš@gÞñ£Ê6ñÔ{Š­0®Rdd_gá‘ðc5&<)÷²¡ß½¡Ç ò=®Jh‘ñ2«à‚'ú>´lrž¼;Å]¢ˆÑ„ÖGù;Te»Ò£X5;a•›t¥›óAbS|Ì]'~ª¡›ÎïfÅÂ×ËîŸR¹qXgw«í@mj;ðCÚ5Ðçl¸yeë…YFxµÁˆl#ÏѬ諟³à'?Q½‘Ì’†.Y36:¦Üê§9ñ_Ÿÿ¢½OYï‚PE–ÌØôÔß8È&á"õèVV@4Qˆ“€k3E8ûàBD8ûD¥ìçìZ”ŠaSÂòpùé;KÙBé{bâîg ½?£|&C¥o÷"’©P+.èS×§Ä“d,b‰'ù:çD«Â‘¬ÄLx¹¢økhù‘›“ÖV'7Ý]«“9KSƒªºO´¢ÌÜÈ¿ìZ-é .¸xHõÊùÌ•­èóŒIÔõ§õš®×t½¦çÌQµ ®Ý5[-óiåÓîqÜü-NÖëqòx‹ó¾HÅ~/O?q)‹ØÏÕLl‡³& ]."ò`#ìü®.bFjLB-ÿ²Dp¼."ZŒóÜôä)®Ô²! ÙTi÷vU@¢p´ç§ï<ïû1ÆV®WfaR£¹•Mfšg¥ÅÄ#Tdˆ&åéKßÐfñWõÖãYÅ!o¸þ$´·Vl_5oZØ:Ÿ?ó…‰|:Ëü:†-pá&}âyná¹®ëÜZ´ªÓí:ÖÚʆxÜ7ǽßÒ&:£“ØÚ€´é¦CZ½A·=qÌþ¤ü#l‘*£2÷·? aê:Ú®œ rããV“‹OA -,êÑPÛ÷‹ý]ÄòΩ x/HÏwXÂ#3GK,Åà|½¦ ·:D<éÕ£jWÿ‹Šî¨?z%#‚§xÍU³Ó½úymÀJÃx_¼šÎË3øìozämï«—p1ž³Y•éLÇÖG+ Ï’këYÒ™\ÛWRyŒÀ4¨ÞÖÚ)¾Vñ }ïåD¥2’ªoRÐ ÚUÏ#Ír擾JÈ0Ô3‰òb/ÒØË¯Sñ±¿7ì0–.ýšÜmN!U¸×.É ´‰ë‘WOÙ/›YýÀ(Ëu—Ù `NüèÊÄZH±ÒеW² óCsêàköŠ)â°ÙôÛò®‚(fªh4+ê¿CO¬·Ú"ô¢W÷jó‚Ù ‹‡pû÷ÅÓ€Ó…zFœo.Ù‡ 1˜(ýÀ4 ó§¿-FhÒÞpœ~nš,U)ý¿”FÏišÔ?¬wAs5ÄÍ t§®¯ˆö‡ÌéŠd©¸ú”¿I}í±)‘%0÷, ]”%*'k’·3ÜKC^ß3Æ¡„„I®È|z’O•,¬@*ÄȤTiÒ–'e,AâB¶r©M‡Ü4 ¿Ö¼`ªV׊•zG÷~Yrñ¬…(R<ámV+²•Þ—öÙó™»5·ªÙ[/Å=9ŒŸ}pV|WοfÍþœ¨¿/óˆÁdÐCMnF\ŸšêúøÄŽOÎèÆnØÞ"¢j$ç a×ìµ›–g[QÜdÖ'H¬O3³GÉ?\Úˆ¬ÙÜ#RKÚ˜6éZ®–œ1š'¤%2™ºÜn„c[¦£«ë’{råH!¹ýÓÛÑÂw¿Ë¨ؚ2\¶êT$U§Ìv¿sⱆ$˜’f»DS-}!ø¹ÆÒÞ©õ1­>¦“«OmbeÎýª´¾vìkÇ~íš…]ÛJÝú‹dÃÙÿ6˜ØÆ5óNkM_¥9Yó“[ƒÂó1² ÷Ã)¥E=yg¯•H—¶!£x_q<žÜ¤§ùV’§Ï4’–Þ€¥T Ë:üÏã²3;ú\ö¬{õ…àñ]Ω-×oÏÅn·‰ètZ†1xÜ)Ž;ÄÍ_Ä0"ÂÈ»ˆ0,äp*.ópŠÚ ÀáT\.äpZJóî¥!‹NË NË­Ú³ç1-ÅÃTeó³–â+[já©’8\S³ã訸íDÞgÔâÓê›ô:îîóIá‚?úÀå‰ÂÅ<~èa•Ô°úºDè;""½ê©%(+.H©"÷½åÐÖ‘>¼Ü€¿ßËÙP@i:’\%>JòRf]]§î´tyû•ºFÿ]:ÿI¸-ø[™wI’VóÓé–_92ÕÓqnóÓYÇen’V²l ^wå`-Þ’öXo|ßšk§IiÇÃ=f¡¡á´«< ÂÊVùøl¹;¥®±×âùá¸kTÝÈñ¸™Qü _òËãÊ-]êÏ£šñÃ}!±rÁI}¥u;Ýq×ì8ƒÉ…ª\Y‘-l›BNžw§ä„0ÄiàG2ÊHõ#¹}hùŠKý˜œ¯õk¸@GÏ©$ Ä©x¦q›?ÌbcLsXl=™‹í硺â$Rü ¦ëç†(3×wg‹™’q ¸žßI\=¿¦ârnÈ"÷Š´ôÎò"iKèÁÙ®ŽÌU7’¢HàÜôZü›éNÑПp›å½§Râßø&ìÓ__ù§ò­¸+C%µDJŽ”ü¹ Se|§ÄWn¤$4ˆJƃx©üÏÕh/ÿÝÖg±]+s+¾*Í`7™ù§ºÑ DkeMöѨ9³\¿q7ó††ÞÛÎ,$Ž;¦;«ò³Ce°+‚襒µ 1^¸ý@ãº)q ºHÒ éśԨÛÞÂ!oŸÇVDFÙ”°¾+“ TVÊ«¿)?¹1™½ºÁŽÆ/†Ê‹­Û‹3åE’CC?õíáó©!ù^d©)Éï’dö¹¯oÎÙb+œ’xTøÜòrùò û%dd¶jè£?Õ(ìãÉÂjËR!è¼׋¹²`náþ&d”_Œn%k5ó´ê3f–Ó\BÙÿY©ÿß”·_¾¾þû‡_¾üCùðéÍëÊ›O?¾}óõ—O•wŸ>+¿}yû™8êºÙ£ÿ¥ãÈü÷Û7JsìúÍèJÑl屯åÿ=HQþúW%ò™+ú 9R3¦Œ½“5»¦ó¡hsEýo%iËšj4ò¿Ä³¹òßêêó{?œÿP£׌žÙmf¯ÕnôÍA¯ÓÓè? ú_ý¶Ù ZíA»=èP|Ö þíW²šôeF¬ö¿%«>•˜¬ß-Ãd36s;‰¯ä¿öЦf’2÷S—þ<ëK“¶±1¿CÑÛóß¾rÌeòÍ0ÌÖ8¼Oƒ›ýºk]ÿçÖ¾(_?¡êCó5ûŠåy¿{úd…ÚWt<•ÅÿúŽÛx¹C!ßp5Q˜ò_%?ßÅWß>õ^…3E›(Zˆ<×S’DLÆ{©´þöW¡#­YpM½¿Ug.™Y.˜£ÚW–?%ŒðzÂÎóåÎsªëßÐÆåO|”^§fm”e!…°­¹;bTz¤¤0Ðþ¢ðœøÎèÊŠ®p&©ˆœ¤üabÚÖˆ‘'⎭í¹Äå£+ƒ½Ê+Cuýþˆ„aÂDŽ9uôèéú.ÚÚ¬Ö‚a¯žxHI˜Ð¼ôÈð'àEQ¡kÕ<°1¦]h¬Y9Sòšœæ<Ÿþ¡R©–è·<|ÐbC 烜:ªåƒÏEÉwQÄîxvç¯á¸˜¤á#ÁUiwŒiA¼enâ’æœ¥{›$ä…tÛ¾Ómm³õ¸U3¾r»¬€ng£6 àªÒJ¢¾…ºEt1KTÄKÜqà~ºE¨ë[ƒ˜¤­,onq–8c«2J혞-=Ë‘óÜÅÛ{àÇžé3˜í¦WÐd”bÄæ^¹ŽCüQBœ€<,‚‹èÎæôœøl(>ôHþ"Jž±DDÏŽB˜ˆ‹1É’–Qq?}”eäAžº×²4D\üµQôX1aƒQÊ쎊:·¢è6‘5Œ­7ŠG2´LÆ 0Èѱ‚­¶9gÀˆ? ÚzQ6( ƒæÂÀB@FÐ_;HîÞ!;Û|Q˜ÚÕ®b.JFaÄ Fa`' DÃ…WFH;ïå  QqI¼Q˜ê¸JW©ã*‡‰«$yíá(r2ʯ˜=å(´‘ãôiUj5HDö±Êóã^mEl- àŸEo,ÏuÐÛš£&7÷Ȧ[+yÞ3Æ G«úFü排ôŠÛÅÝ¡a%-½4n9SRÒ„°ÓÞU%Ó‚’K‘ª©!GÙžd %œñÌÒÊ«²åͯvžÝ©OmœlÎOŸÍ?›|,ü¿FÏléæ`Ðê`Æ!äæÐHÑñ<*7cÓ#ã*¼¿«˜aë;*'ÕǭܷŚܙå[Sâ¼ËïðNÑØQDyaÕÊñèkV›ŽÇ¿$I¡¸Rr“¡´(nA†ìxîJTc?íU9“¡Ðns¿”Ò$I=¥½”)®r²R.[#ò“›ìà–:ŸÉ„„ÄϨ±OxÖ¯‡÷êÂu.Ôá…jØ}Ü'}mÒuL­Ó›ô´±á ´Á Óîã–Aì u)qÖðãŽg2¶)‘ö~òz¥oy_Ck2qíoí½<µÎÃü'¯Ì¬#T›“ üÅ*€žè÷×7ç™úJkC%ͦlµ]J—”]tT!éá£ik÷’lš»&w+އȔ’pý½ž$‰"’í[–H%a’—G}sž@ÙLªŸ¨ÿýúü-cê Ð/Í©ƒ¿bRÃ|>½©ÜÊ€7øý(oåëcz}LÇ:ECâËGr°_kÑ©œNÅ çnÛÒRÓ[›tÉ©—¸IØÒóÙöå±!Iy9‘8w‘ªª‘<‘ØHßH¼ia«ùꊔ‰|/% ¨ú4É…›ô‰'pô\—ržð^ЪN·mb]Ú®†¸Ý3H·GÖnw´ÎDok–î´´M&“Ag@ŒŽY~ˆ6+Ü4TÕNŸ…úå<½Ñ2zNÃhc­Ï¸fk½­Pا]nû¶ÙêôÌÎãFíÕïÍ_ÛXìµ%™?Ø¥bÉ™¿³f®çîß>¸1Ü_Îov_øsÃ]âöôîa(¿P‡Ù#_b˾ÆN®´mÐHÞ£! ¥iÃ%ð¦g CÒ²áð tlüò¨oÚ5lÁÃSQ`øH©(ÜB—h£ý8ܪ~ |‚eprÂó7« »4.BÏ9Ÿ áï¦XÎß-Ï¢çœpoPƒ¯—¥Ä¿-ðMاÏ?¼þ/åóÛ7¯Ï·3 CþõrÅ1ÈzûÔÖ _mEI>•Q ¾ÒÙ'~H,ú“±GÒL,×Ë™ÑiG×î|Nf(=Ø‹üCîÔÂì2Çöimõgã÷F¯Ó5;m>Æì­ŸN(¯SÚIå+c þ’¬Håí {lâKì|¢üiÇAüLù'=:¿*ž›Ï”M6—0Äæ“ ÇKEz/aŸ®d]l•.°.Œ²ë¢%¶.0u°bMÿ™ŒSås’¶êÚqÏæÜ±sϺÁµò7+­=åv·¿Y¶VQRÒ„”Lú•’¼RžsÖæ„ÖödZZÀ‚êZN\+´¯}*ò[J]YáJdsu©ñ¢Lß24%¥ÎÅ{·,ëî±çŽs*â†ÑjFnL› ­)Yqh#4å…{­ûX¼‡ƨðë #߉½ˆ™!Ù20Œãø±ö•—ÍEFrKE±ánt•ŸØËgtéÚŠÒV(vçLI0èÿ½T¾½óF1ŒF·¡'¿Ó{Æ@ùé3Õ©Xqö ­õòRùi3»ôË-þÃõÿ°ÖÜè”À£šÆ^a Hìõ0a ” ~Û‚e<–¶°”ˆÄ Ùýb®¸>3‚Ax§dtñ ¨Ï@ ‹áy¾2 ƒ™",€m­#ÏbjÌmÏõ©žÌ­0°"w~q× ¾OÂæŽ­¢YÜ$žîÍí»ÆjÜš¬Y›—6Ìu’w¾%ýwÄÌÜØlýôRIkØÄ§ÍÝùÛ{k?jßÙ‹Y‹ oÑñXó9… Ïé@Òñ<ü8>X¢ôöG¹u©+áúnf™„{|ÞcD;lyã#ù¦…|$#µ(ÏT5\Ð×Pjñµø½ Äò¼1uÓ³•¿2}ÁDaʾw¦Ü´¨ç˜.˜Ï¿z'—”Û©]ÏÆˆ¶Þºý>r܈¹Ùx=e¦9­:²úKAl™^}a‡µn½( ¿8cûß-Q,/$–s—¾pjå(ùw˜â³©:„èäéVË;„èÀ'Ìó‘'qûû§Oÿ*Nß²úºÑŸôF§Kȸ;h‘îxb÷lB:zW‘q£¾K¾+lyŠNì­ÝhxWfP«ÙÜ7Âú@É1ëëÝP?¤Ò“£Ÿ: C%Š®„qó댌R—–B-q—(¶¦tÆ~zAÕöÅ™øy~å;2¬º²eú@¾Ç¡5º¡î8ëÄÿ­¾Ä¿i&`m(Ó“I^ÓNˆ«»¡°½… žÕb¸Ó(>èUéìÓ_q}O­ì¼ÎÒ¤5fޝˆ’%ˆmïTkJ>;ÜÆt˜Ó·\+ð—O¾íË}RïrbÙ{jWyàV¯Ÿ­(Ó¿•ÆK0wdX€phÛB2u£8ÍÈg“Ñp<ôX ¬Ä6EØ \ÐÕèÇ®-¥÷<ÝÿHÂ"ªötÙaAŽÙÆÆ*uÜ W‚=gÔtÜÒý•öxžæN"¡…ôÒ©õè[±œ™›ätI5 .u³ˆÏÄ`kGd_gáák‡¡ãú2ÆÃìitÜÈè¡éwâ·ì­EêR4è4¦t¦vµú?9Y†æê8RÝ[ë26,R³ƒ„ì?Á£*Q4ÃM%g¤#b/B7¾“ž3êJgĺrT$‹ê6ò“mz(ØÿÌðL7˳x²f˱WöŒ4’šç¬@wÎŒ·”–Ïgɦ@<+¢Þ5L ޝ”–Knª‚=…Kir6Èiþ™¤a'¬bkR‘*&eIóñ“m4smïRW/m|H¢H®6:¶ïÒÿ·'ȰÁ õ‘B´ãåµ;¦«J’ÞÌ,ûÊõå,¯ ;Oø”ç<Îߥz”,€Kn{xŒ#äþÜE$Ó÷ˆ,ê–ÉV`åXæ…µr6õ)Ý*ØÖåHÈN­ÔGNí„LÃO'!»k$Nr¿Ò˜ºñÕbŒ)dnÍÙ5B㻆»KzÁ4Š­èJšµžG² Ý©”5iÔ[¶Æ®'ëüÆž{¤vÂ!7¸ñLy¾TÄjËÓm1%gNo¯¨Ç–Ü•ôuη,×IH›.ìx’µ5µ Ül™ÝNûœ„!mû½ê‘â©CI›§² $uÄïôwt‰%ŒKjz“K2‹¦ôŸY±–þX!ß]jC”haÛÔ—œ,<ïŽ~ô`L?j˜ý~·Û3Œ~{ wÚ£«Óߥ5ðêÖëÝì#ÏþžåÖ^w[Þ“š¿ýµù{ÃgÄ‚m0üÐWÄ›Ö-?páÖ rÛ­¾Zþ²²\ÿÔ¬Ö/™Hl ÿ&ìÓoóÏ©w+Q0#Š“Tâ®ö÷ä&@{Yò6¿'<²ÐûôýsžXµÝ·Þ\š»Æq¥üË ]VÜ!,9UüÁ7]gJ†P쿬ٟóјNÚu¶®P¡7Lþ6l®R†ŠnãÉÒˆnìÕ.šèÒ°köÚTïljÕ›l+ .‹f¶9%ûR#=)a5žøL?œ¡B7v4Pv¾“ Gv°ðã¡B7]]—¹ÿ!Þl÷;XðÕ纂90Œ#QÖe ‘¦Ž‡Ìó‘F]øî÷Q6™}gs,ô4S• ®ÅrÛþå‰m³¬Ò+±õJn쓺 $p{\Û¬aÑ> Ê[ãÓ;6îìÉǘv˜¤d’Ã(yþ\0ß4h&îišsM5ö•!»…Œbµv5×@ê3p¬‚Š×@ËxÞk wà*ȼz`¬ƒO¯ñ=|ßï¸6­ƒg¾$IB£äaÎt µVi¡;u}-ùtúŒg½ J¯‚7WľV&A Œâ´€:¸vå {ãnÇ‚¨x´uÁ¡­¶F}gCþÏj)þMyûåëë¿øåË?”ŸÞ¼þ ¼ùôñãÛ7_ùôQy÷é³òÛ—·ŸéÍý/Gæ¿ß¾Q’—èJÑl屯åÿ=HQþúW%y,[Ñ_È‘ø“²˜1èõ˜¬Ù5E›+ê+I[6WçÓ#”òßêêó{?¼ªi¢×’ÖfÏè7½n»3hÑ™¦Ö2Ûö Ûë·z]³gR¬E °À×_IkÔK”I9š5’ðdÄR ‡ #êè<ëU3oèžòlnM>ÿí+ÇÜ&_Ð cн¢ ªÿç÷€ýÚ÷îÚsv¥|ý„«ÍäJÇò¼ßGØý}²rí+:´Êâ}GîB#"ß®m&Ñz} gŠ6Q´{âÿ¦4rÓôž§´þöW¡'R\Þ¹;š‹ä9 ØÄuK¢¹þMÆA‡ãŒ§æsd…Sï~5‚ׄQ8³¹>ýá|[ l[#–©‹ÛZÛs‰ËF^– ß‘›<¤6ÊyÔ“šÕK¬Kx×qˆ?šä/×#¶š\DwÆRrŸuÊw%á³ú \ôÕÓ@ÉkÙ1‡.ÍøÁmmòÚ¹Ëß„X£Ø×2âúì•xQ–ÐËX0)AˆÁÆÛw„2*3þQ<’¡42! p]Ë!!òrgK<ÓEDÔËsÙzɦ…Üâ[Ëq< 5H¦ Ž›yCÈÍ%®)Åíb‚²×éG(¬ÑÒKã. ÍŸ‹P²rð|ÏÉð÷¾× A+>x2Êö ÚÚ_ÿ?ãöë¿?þ{üÖŽíÙÇÛ_¯®Éìûùç÷Á+Eðò¨]®wqÁãL[˜˜¾Ò¥ÅÖl¾ù5Oãw¤nžAuÞåîé74h¼µRź)÷zsII{^pEÏÞF×Ô'2{J.-`õÈýƒº_J'©¼i#ŒÔÓGd剫döSC>¼¿`ox^¨Ã Uo º­v»¯YÇÑ:æXׯ¤kivŸ…ñúºmšæ…º”§%å}šêõ/{XLˆHhsñyîOsâ¿>ÿE{Oü½s ˃=Ð-,fû#ÝÆï*ª8¼i¾Dt7*ŒLT¿Ý}”®”ØÞÂÂïx ËÙö–wàíË[XhöØ4Ï>õ#¬úo{·º:æúˇÜè~Ïr´–Ù2µNß04ËÑÛZ×´º61ú­®q\GÔüÁÏOsëÏQk9µÔù7ìÐL¸“Ù(ÖÚìý?ן–¿A>Ï!3â?bO2õc½ôsú¡++RÆ„lÃíºÑï?nª¯¤7]г ,A‡á±dLú+V^øÁÈ ¦éEõ å–J`ïÛ»ôüë$i~ñ•±g ©‰Þm6K_ëæãN°%ü›°OïRèõTø/©çòÚNê‚¶é7´—%ºkmOŽì/)ß‘_&V¾£¸ÄÃå;¶ô†ž¤Üô5£Ýh†ÞéwMsÐj™}0Ø—ï¸ç믤5ê%ʤXó+ÌrÜ“ðÇ­ºe¯œvßðF6<ÁqB¬§¼au-·q_ë%†|ó…’Öx|ÝÌhÜ;Ý¢;zÏhÜ NAÜÆŸ‚¸ š‚ÈMA„R!Àsâ;#z@¸Â™¤"²w‡Œ H˜€Â&ÁÀèÊH˜„ºþ‚ŒDÂ0‘§Î!×wÑÖ®àí1 û!× xXÔÀ{…BÝ—Gl:O0MØi‰¦Êõ“GÔ@_# 0õ˜KŽ»¾'A˜Lf{§éýñ¦òCƒ“¬МIÖüˆbIÖbø¼IÖü處ÃD%Xóã>M°FŸ‘pJFYìm­.&, #›’‘ @gd°% 4#›:¿‰ÕVí`=+}Óc¹õ²²c$â’‰B0fÉEC¶ÎIDÐxõnk!5ü¨y ‡TþÊ~lîÊ$¬2ˆÍWå® áÂ-gJJš5}Κ­ÅGÊß~¼ì%Ü(a'MŸ‚9§Z–ÖCìÍRãV DbsŠ Ã¸+C{2Õnþýkœ½¬ü>v‰0)ÒÜ|wFµ|?ˆ­<à€8ëEšòfþ¤‡–¿Z¤Í©Îfé¡ô‡“ªô+ZŠSÙ8Bªz@…ìK<.WòˆJÙ_ÅFÎR÷7˜íûå™ IÒ@²lȦZ¨U¹_Ji>ªœéôKÎì'e@`A”¿  *¨ƒ!óÔ0ù ê   ‚:80º2ðÔAÁu|î|o~Ì qºplÙ ö^ºÿI,ó«Øõ%Ú’Y Ò½IŸÜfçOLxÎÛ%êŽ 2šŒ%â „ÌåÜ‚îxæ*ø>/£ú†ŸŠ *Mm‰kuZE—uy†?:¼äç%F'®‰’—ÜF§1(ìÁr‰’äû CЏ<«W¶ºi³ªv~E°ÿAìØ²©”¨^à'·ÀÑÐpÚµDóe!õp`þŠz6wE=ZQ€†VÔC ù*êˆBõ‚øœõtΊz"¤¢€ ¯¨€ƒ*ê¸økRQ寨 *ê!¨ÐŠz¶„AVÔ áõu„¥Ž°Ô–:ÂRGXêKa©#,u„¥Ž°Y„E„RÏE©„Pêp”:T¥@©#€ÊM©Àæ¥ÔB‚(u Ø\”:PP^J>ÜrF©¤ QêpàqQêp&Uz”–t„Æ?:H]€Ü´1µÈ¹nÎC—Éï´Ü«ÚXéÖ+‰Âl—–?ÈU¬kl«@Sjßæ$šiý޵ABˆf`“„:HM´à^XÊÞ‚{AgŸ»à¾„Ç!†ÃYl_R@Â<³ÓôR¼Æó°xðQ%Œ£<üŠ< ȧ¥!Êï Ó£ ]â^A¨. Ö ›{-,ü¬°ÙÒ]³Óѵ¾c­c‘Ž60;Žfõôžc¶níòó‡°x¯Upoò$[e_Ÿ\bÞzI‹ªJº&Áì¼pÔ”änû¼Ñ¡ÿ17aû0qÄV7CŠÄ.¹‘v/}n˜{´nñ¯W¬Ø¥lBÎð{¹Â|ùq°AàZ¶ð8Ò…X¥\;Äå |öi%Æß]ßqýéf h/ËRb˜‚C[E…ýNñr^lÈÄ"–—xPb‰Ùn :´CÝ–©µZm½×í Zf‡ zÌ»¾þJZ£^¢LÊ5ÿx¸%¸U· ·ÄnþÓúƒLÚBÜ»D.!¾JKKìl½äÁo¾°è%ެ[âü»§[”`b§%˜Øëª&ö£ñLp¹¡‚ nïB0L@€a dN‚ ~LÁF0FWÁ N0ÁÏ_þ \ܤnÈÝÈNs˜Rx/Uá‘…¤R+BÇÍ~£àáÒ±·ƒÙ<ðYÞì’K¾Àý –è9“L6&³h“6ì6¯UÔ¦,q{&P2’±ÄŒFÒ…ø9}©9 ›Åzwd•‚gˆ=ë¹ÍÂÁøqV 6PàÍ6AI•Ù6ñÇc,Øó¼{‚2€ÌüØüÌü˜`f~h03𓙀QŒ™@ Ÿ—™€—™€b&àÇ`&à‡1ðã⯠3ÀLÀ a& ‚™ ØÊLÀ-ÀLPÍë£y}4¯æõѼ>š×GóúhþŒŽæB%íx¾’v ¨¤RÒÎ )içG…”´ÃQùKÚù±¹KÚa°’v 6_I;”»¤ ·œQ*iB€%íûñøJÚùR*=ƒÉ={៹ •í0‡¨ºÊvÄv!V¶W¾}sV¶·W1#ŠvÀä ÇóË|ÇÒ:½h€ðÏÈÕ‡â„é D¥ì§7;_ñÓáÃé „ÅðÒ”PšÞ@ªx>zá&À{v¾v3RËÐÛƒžñ¤°ôa x6¨m(hšTV];÷ÄÊ…gcårvÇ]j[B2>ª„!b¬ad9 ±Lä¡Fn¯0ó‡ˆ, 󇈈íÌí]Ì@Q8Sz‰Ówxˆ¾™ù£ßÅÂϘ?¤Õ[Ö6º­£OZ¿3˜hí±m[F·mêý^ùùCX<|WP(}õ¼r*¯†G1g|WK€Îâ¹CWIP 2íC¹«#€ ò&÷²‚¸|oÂ>½‘0à=‰±¼I’•~µæÊ[ÿÆ FÝßM¼Ð^–$ è÷‡¶3{ÇJ•½ ¸^Ì•ERüWvv3E<ÕX²–æpͬyãΚy?ZŠ%5о¡ò'— ~uÿ‚­”CåÅVá/–/÷TLr誚×fO¬ÌE0¡…Q­²¾VíÇÛ‰ƒzÞ|¶Úœ‚bïçˆ8±™v4Øè&Š‘\Ñ&µô?»áP¹PÓBgwlvr=¾P/|/˜~ 7ÄcŸpýIÀ~veùŽGÞ²b¼h¨| äÂSå¼~{C—ÿßô‡ïØEâ…‡TmæAGà _Q4…)ëP‰ègIÌ~¢(étÓþ¾`+)rR|\ÔËô£©Ü0Åbr<ÂäF>»ϨÄÖÏŠb׎ˆÚWå²õÛçLôUÏ£a³¹öyDûÅpÐÒõgÊ‹ED¾~øò"í:ýwìEo’’Ê7$Œ“¤àÍ䢶Éî˜6ýMñ£ÿ$w[>yM“}ÒÚ h[¸/I~ÀÇÌlløê“vHâ­ŸdØYïÂÂØòŒWöã¹Vý÷7ÙþnE®ýz‘Ì:aˆ'£3|Ïhµqa®\¾`¬ÈêÍÝ­ÇwqÄõ´Dùµ'Öû\…C†w.R¡¤Î®v[êˆÌG7VKaû}ÉYðð %-BqVç‰mNŒúCyÚ?“¹Ü­;Û;è¸èiºÏÊÓ–O›´G¼Â0ˆL°2HÖj hO»†©¦®kƒÖÀè·ZÙ能|a;¿ýJV“^¢ÌÈÕþXØÂz[†-lÖ¾#Ó` Û©²°2+T˜,lwãå…|Ã…Cvl½e Û3×bDa'&;rÇ“°Œ×}ç&,^bñqTAAo$„e@d.Â2&7aBX&Œ® Ü„ePP(aŸ·*ºŽi×1í:¦]Ç´ë˜ös‰i£m ²'`n²'6/ÙHö’= ¹Èž ˆ"dO¢ø|dOég˜ˆ²'.˜ì !{HÁCFä${Ár“=APùÉž@¨@²'¶„A€‘=‰ä~¤cÍy}¬9¦cͶ©3º=]RjU‚vhûæ!:  ‡ 8ï!„*ç$ÖÁCl¶QtF„VÏC«Ðê@pùiu ¨ü´:T~ZT^Z6'­B«Ææ¡ÕƒrÒêpâ–3%%MWÖW7Jf½Iï(„?ˆ‡?ˆwè`n=&ê_»ñõíÄ©9æΕGßçäejé¿c9ZJ c|$‘`YKïˆKÙG¼#€œ±1H9BøKÄÁ'ÃM[Óý]š„pS• àÖ'ág2!!ñÓ'ä划>Ù©IÞ_0R„ ux¡¶MÃét͉fÇ–Ö1MSëZ=­ouzƒ¶n´Úƒþ…º”£xqyˆÈ3Q‚ . ̆±• …í•G,ºDÚ»¥ÛC’òÔÞˆ{²´yÓÂVü©F„TF,v§¢a ̨rx6…纸7²uh±b¡éwû1é·´žn޵N[·´Á ÓÖÚúDowLÛîµ{ê¡#cÂßýf¹*@')“dœ#ò¿ÍÙPEñbn,ƒÊ.ðÈ+ØÞRµñ³˜íD®×Ú)^JÉ ¸Ä– ¶=sÐ1Ûý´Ì¥Õ¡ÿi·z«ßï½½5ƒ»¿þJZ£^¢Lʵðxª¹U·LÕà(ЦþÌ»¨Ü­ ²AñU*\6¸§õ’C¾ùÂ)<ºn‰Vî›nÑÒÁ GéàSwƒ§fpÿ½˜XÍà~\HÍ Çµ'wÍ gLŸ¿fßÙÔ Â@o$„ÄÁ5ƒ dΚAÀ• Í V3FW@Í ^3ÈÏŸ\ KŒ[ÆP¥‹š¨F« ËÏ+Óûlòú GÚ“MéCœ‹hNlüy˜‡Á”¸£Ÿ‰åx®O¾°ÂŽäþ•zÈ£’$‚èÐ7.[eÿp£8ï>¸³$¯ÌÀîB^›"çÖtÆ"³¸ž¿¼£FHL,.k]ÈθívéŬ°nz'G‘Bzv§'ÿì‚Kš*ͬï_á4±¢]ý/²´ˆŠùÍ·n,׳è) v·ñùÃlŸ×&ä¨5³ð,ÃÄIONðäÚϪmèCª$ Ž?¼…$påªûvklv¯ãÔÆÐèKÊŸ`¤¸skç%‹ºs!îUué«ë¼°oÛwZF·5è=nÍÞv¤‹2 ”ììíf|ÎÞ®$iš5ûs>Z„Þîu„#É6WÏŒºôxÒùõ©#ÚxrviD7vÃNi$Ѻa×ìµ›–G=ï¸É_¼NÚÌ–c²‘E­>©¢;š–$fW#ŠUæ$t§ ¦^M·Â±]… £«ëõˆãQõgz;Zøî÷*„=)N¨J£JtÐl÷;ªT9—r»¡º3+=#ý¹`IXA3ÉÃJÓ°¨“6]gJ†IƆì%°Š»¥BeK» ¼ÅŒüÊÖv5[ó½t ÙÕ íѹ³[‰dýU°Ö â’r%ÍqCUºà¥ÜÅ' })oNäªY•žlF°RÅ6Eb;/ãk>-°8i Ÿ¾ŸôlÌ[ne~ý¯Ñù§ŸG_ÿú¶*wcy ò. ö£‰|¨†âxaUª@5¾ü¤çùV–Gç‰TÖe%’–§¾SÖMeöºb'5“Z{©µ—ªÐÛ;è´½¦Êçoí¡*é!±œO~’‡ RkŽ æ ªÖœŒ©AK…×GÔJÛ-ÉQÈ¥²×²?Â_¸nÑrwgɇz2›Çw?»áþbvDª‚ øtÃ,öŠ¥ŠSŸP‘´ÔV-«uãs7¢ÖDÎmSòôD ùR5º­ˆž Ý\'z’¬ëµÖm_Ÿ$bñ'-Sˆg«vkžŠö¨×§ª}rª„±£: ·^e2Ü^¶,K¨ÐBý²T)K™%J,5K”Â[ñZj¹ÒñÕ¾–g5ª¸Ï¡†û¡Ô¶ië·÷'?¥=É pe™ci«…» wƒŒsÀøÌÈ6ävGïõŒÇ­çs6¦ƒ|u¼¥%ñ.bA“aZ:\‘°íuÃU5àQ-ðîuXZª_Õç+„óU±L®>h=çƒÖ­XÉ[+Úàé ™ 5¯ãUu¼ê$âUro"Ïdßå9~T™·˜ºé±ÆUŠŒì+â,<~¬Æ„'©Pö"tã»7“ïqUB‹uõ‡YÝŽ<ïCË&ç 7/wúF¸žSfÉzTs­ø9È«WðÕ¯žøA™n/)KÁ¯Cª¾V·{_ß ß­Õ8×v ¶µøoʳdÐçl¸9}ê…YFxµa‡l#ÏѬÈÀž³à§Y;Q½9Õz)Èø¨¸ˆÈ˜Õd3åV?͉ÿúüí}Zû„*XÖÎB!5˜SÜmpV»Ûn?©Êj°ã hØŒ°óeB7%¦ØXI&j‰×ê{Üþ ԹňԹ‹JÙ_çFÎoe8ëÜ…ðóÜL~Ê a1¼;h9Ì™Üϲ±{sn>ª„¡Jê—­xÉT¨Ò©ëSâw²ÊÛÄï|×s¸¥äVÒ¹l®X|ýkhù‘›½T'7Ý]«“9KsÓª+ÊÌtYKZ¾,®BôR‚ª[Ñçû†ëOë5]¯ézMË[Ór\®3Y.L0N(L÷EB²{yòø)yÊHYf˜é§ïÆ%à½<"óÚa Ì-±ƒ“æ ÆJOzE©võ¿¨èŽú#®ÈDÈ1^Ç©Ùy]ý¼6àG¥a¼¼@Ó#ë:l#ÕùÞ·àb<9f³*Ó™­dŒ'á´qßnÍîu<ÒÈØxÜ`¦Á@õ¶ÖL‘…ñúJºŸÌ¨ˆ¾É»dÐ4köç|´=UâU“1l6s½vý˜„tF|êñ5žœáÑݰ½ED?ÔðÛò†]³×nZþuiã&[ý$ ›ÙIÖF#¢–Ð#2»¡icÚžk¹"X€j4OJ´d 2u¹ÝǶLFW×%÷ ­ ”(Brû§·£…ï~—)„ž7%Dù¶êT$U§Ìv¿£JÁ¿”tm‘S%©.X¼/h&!¿4âGݘ”ýwÈœÛH–*«O¹“Ô×ÕYsµ ¦½“%À‘$.d+#’J7Ë›fáךLÕêZ±šÊw®´=U}Ä‚$ÏÍ‘œ+Qä£Hl¢*ùøÖø^‡_žBâézõÙ³ß2·þ"µRö¿ &6y¸ð´vÑ”õdÍOn ´ª² 7¡hiQ©ë‹§tinUÑSþw÷4ßJòb#™aé XJ•pj [rÙ™}®{Ö½Úc¯=öÚcWO1)<-ç%¦¥èa~Çlu÷ öi¡Ù\»)i#DjÛ2Dš°¥¼~ÜË¡uý)² Ëž›\#ÖHÀTyS$ù·—¸“¨éÕ7éÓ;7Üëy=&räó·Àb“7®.’W¿@—ånñˆ©”5PR\>REîãglëÈØ¿¡¯x9^å§—õÑ„ âÙ©Œ87«S©L]§Tê´tyµ*ú#g —®Áë~é¬Å›ý2IÃÏGOx:ëe 0ˆX¡}¥‘(KIë£ÖØG<¢µÊЍìšÛ¦=ÑSÑ\Ü#N»Ê£ ¬l•“g†·Sê« ž»FÕÜ ›™¾Îð%¯±/l(œí¡K}à_P³*ï/$V.8 X5³kOº“ÎxÜ¿P•++R¢…mSÈÉÂó¬‹8 üèAFå ~$·-_qü óÕ ~ èèyAHÐÏ#nó‡Yl¬^ü‹­/s±ý¼":Sœ€DŠÄtýÜeæúîl1S²J&×sã;‰«ç×T\ÎðP¬ ’¶‚ÞY^$m =X€#ÛÕ‘µêFj œ¿H–r3i ú†£¼÷T AüÛKÙ×y__ù§ò­¸+C%µDJã”ü¹ Se|§ÄWn¤$dJÆfp©üÏÕh/ÿÝÖg±]+s+¾*Í`7™ù§ºÑ Dk‰óMöѨ9³\¿q7ó††Þ[s]»ó9ÝV©ë™”Ï\Q|©¼ú›ð–fÎR¯öÊò§„ïO˜U;+‡–`ŒVžrÎ#RèDP§Ìõ‰£–”ÂÆbô°¼ÉÅYüˆz}Ê-õäR+-,Iº2Ÿxý_Êç·o^Ÿ4²Ä¡%»Ò%ÖÛ§»Ò0ÿ\pýÊ0%ùT¦2¯tö‰…O'‚þ„®€ôj+ó_u—þ¬¥dt–ìEþ!wêaö™ã û´¶ú³ù{ÝN¿Ûju ß#aH¿w¯zô˜á©CÕõ'z¦²œuuÎT/˜&ÔLŒ?âûýÁ,šÒ}&–£ã?ˆ+“0 #NG˜:êçѽúKô9û×çÔà©C–@p¦¦WåjÓš»Í£¹âÖ¡¶éqU^3šdVk[LåLý Çršüãõù/ç!]ÈßéO¨Œô'ïÃ`1§?HÿµÊ²L’,Õô>›Ø}ÔÏ«ne¡žˆþðK‘âIÍPØ'v7õÜb±àá·ÒÎo\.—&¸ÛÐ;ƒ^»×Ó»j´ãÓÚö?ÊëtãP¾² åKì|¢ßÐvÿ‘ÙÔ›ëV–N;®]¢¤cv©ˆÚ£ÒKo®íþƒ]Å\ŠüÙ*¼ßé馨]é®Ùöê4Œ’ÈLjÚm'ÉÈìÌ·¹gÝéÔ*ɰ'é —ô·i´úI%ð<-)ÑÍ>²kIOo®óµº™¸êLù§ë;¯Öy¦Ô´éQHÍ[7 ükà(i ýÔ³FÝÑjõÛ=³ßî´õÙîÐ_½e?³ôꢹحÏê `ö»F·ÝÓù´fÛ§+±8MÝeÖ|håËZBÅ6ƒ“^Útí@ú\âY*¼30FmdZ€]ú¼Å mÓè½~›K‰¶~º ƒ€ÔÔÌ"œ‡®+Q0#ô7^L¦€á,‰Ýìw€2Wî“î~°4xÉ4~ëÅ HTÔXå_Vè²ÃÏöü2Äx`ÈËJÉ1 Ø%¬# ›FM@^„ÞPA!avŒ~ÁÉ©Fü v'wX%>Ó'ÍçÆ ™ /&Ž5Tv³e”ÀÜÿ Ÿ' Á§Ùw¹n` ‹ÊGµ c[Äà æùH£2êŽQ6™‹³«Lˆ~COéADמZ âRøÛKø7Ñö`4GŽ oµÚ=ñ‘˜#—ìÐ{|¸Œe·.\Pë2.¸~ðÜú†ÑêzË0Œv»otZÝ5×ù?¯“Šzu‹3Ö2[tr;Æ€K¶~º gŒ ïèºÑÖÔ£¯Röyô{ôb‹KEìk…º¤ ùîF1®>›Cowu^Úüéͧæ=MÚá6ä—›é[-ÒUal]³ßéô;­£2¶ñ£JÌmk0hƒŽÑ7 Eúº4¶ttíN¯Ã@Ýúé*Œ-N=×S7~ZâÉ—Éìw{ã仌ëäË"vòåFF:ùrËÛò冂ž|áÀx'_fiNV~øÝÿ̰ɼç_¨ÀùW?=ÿšÝn»‹:ÿÂùÏ¿@\¾ó/|þ¡sŸ¹QÕR—Âß^îøfž$Ú­çö¶åÓò]2³¡ÝA¯Õk _d˜G’"‘üÄJ¯¥JTœ±ÖÞ‰õ¤ˆÇëÝœ Aç²×îÚ½6ÏI`ǧå;€hM•æî w÷ƒq8€| " ÅÈÛç ` 0ž¸o=ùõ‚öQ®ÛÏç AÁÞ` |žÛ <À„"ózƒ`\o ôèœÞ U-q)üí%ü›hÛ2ž7Øk÷M*¼‡ã b݆°²ÀƒèÚƒ^§ÝÑN×èšôÔÙ @::º}£Û1âýÆiúÚu:ò–  :FÇÐ9Wú¶OWâ€ã4µ‚k•Òå]«T1—xV› §~cϨÖjó\]V~·²çêrß- âÕ¥Ù0:=º_ô;.Úúé*ÌRSùs“?Óµ¹ÿh“^Ú˜ÆVs .ÑÌÞÑ[Åë²c1Ï77™éó63ÀN¥F[ïó©Î¶OWbpšZ‰W°UºL¯@þ\â™ve®SßàÍ@Õº=f`ßÑ×+è ôv×èsªÐ¶OWbpšZAÍâéòj«˜Ký£ß-ÝÁk–Lêaz·ovÚm½Ýï· ·t-½ßêu;½ÚQ–ë(ƒkû“©iF·Ã·F·~º ¿©©}ì.죊¹D³·­Á`@­íÀäkê¶OW¢v8M­$€»UºÌ®ü¹DS»6õÂÛÆ ×?ÒîmDûv}Ôî ×íº­A‹K…¶~º s€ÔTصîßédÒ±„šƒ­ÒË_ëš[Íü¹D3T¸Ùê÷Œv}­[íµn¦Ï­A¯¡ë=ê„xMs¯Nê;â¤>:Á­î Ó3:ƒz‚Ÿé÷LÝèv{õ?Ó 6 S×[Ô5äØäw|Z¾Ã†ÖÔ nÜwH—wã^Å\"9l©pꃴ:8v¥NÀ¾q§sѦ;¸n˜>µÙöéJLNS«1Û¤K5ÒçÏPáýŽ®ëÇgžIf>Àttj^ÝŸÚlût%&§©\ì.ïò Š¹Ä3Txßlwúµ Õ _'²©1º½¶ÑêúÁï‡ç>¡sÙ3ôV¿ÕkñY„mŸ®Äºã4µo›t©žô¹Ä³îT¸©Sû~¤Öýôïè^_oíÑåS›mŸ®Äà4µo›t©žô¹Ä3T¸9h÷zõOêOÀÁké¦Ñ5ÚfŸK‹¶~º ‹€ÔÔJ,ÂVé2-‚ü¹D³Lø ³–\;œ‹@-uW7|ûÊÖOWbpšZ%Ãéò(ª˜K<‹@…÷[¦8%C¯Î)Gädè5z£×éZ|GÌ­Ÿ®Â 553ïIüh”ÞþÄþjÍ•·þþŒø±X1æñÛ‹1rc2{uÿ‚éÞ‹¡ò"šR ý/Ö®™5oÜY3¯ñGëÅòå®âÍý“ʣÌôŒ&–ÍÖZI¸ô¥ó‚.Žòp×hÕ~!‰ âß ¥ƒ1wWÑ>% ÷áa;tµ¢¶6ëT;ltÅ` Ÿ{ jvár¡6QØôܱÙif¾P/|j2?0KË>ÁL-ûYj8߆aFCåk¸ ~RH˜X›¿/è“çé/ü8¤j3Â8^øŠ¢)LY‡Yñ!û‰¢¤ÓMûû‚ísL‘YÞÚÞöb™~43Ø)û“ãåOÞ±3[èÚÑ…oÍçžk'õÑë¢ç!«7½"‹è±x¶°˜x£Õkèô?Æ‹3åk;ýa_ïô_·n|õÕ‘/±5›¿H»NÛ¦¢MÕQ®é†Â&ye§0•”Ž¡%GQÓÝVÙž*«=1tˆ[5!ÛÍ• Û9Þl£ •G)‡PRgW‹óѲAg[[ÉYUmj.¦„-¤ú¶$ZÒ"”]/Wð­ûu9ŸLo¢y}h'†Þ``˜ú ßçlê–OWã ¢4uãAõg2÷‚»uµÌ}ÖV饪ƒîµ“=—¨j70ºƒÁ‘TO½Èd“>o9¢úzGï›NpâÙ ý†ÞéuÚÝ–.ÌÐÛ¯çòhæ²ßiµºÝOáúŽOËߙњZAy‡ty!ä*æigN…w{í6’ ©óË…û £«· ³%~ËWÛô£±éF¯70{Áe¶~º ›ŽÔÔJlúVé2mºü¹D³éF¯¯ëf§Ý:>›þŒS‡vØôV—žæŒö€ÏØúé*ìRS+ öØ!]µGs‰f˜ðžnêÇèÛ=ïô€]ÜýF[ïô© ®,ÔŸ®Â 5µ·`«t™nü¹D3Lø Õ)²ŽÕQïyæîp ºf¯O]ÄNï´z«º¨ º•®ð øÐÄÇ?XãW‡Ã5lƶS£Ùoµ[­v¿ÃeR¶~ºŠí©©Ùþ v+”%?cl¿p±‹}N\΋}N4׿ ÒD¡×û|XifLà,h§­„+0Ï7]“; è/<ï ô†+‹ <'¾CwÌè g’ŠÈÞ2¦mlB·FÔ±µ=—mÆÒ€Ñ•Ú€˜|ñA]A˜BXº òÔÑÍÈõ]´µ OïÀÞ“5G¬Ó;”Þ Ÿª%¢ñ§w €s¤w **fzg‰&ÀÓ;fi‰f+=:¼#ËóÐp̓Ո»L‚ä ƒÚÞ)ñIH-Ï(SEÄæ^¹ŽCüÑÄ%žƒ<Ì|á"º³95¶Ï†bÊŽ¡‘<üEÄð˜è™ùÃD\ŒIjspq=kL¼QDØFwT\ð §$‹ù`â⯢ÕÅ„ FiXunEÑm"k[ndh™ŒA`£+b9[mWÅ^µ_{ðµ_{ðµ¿ßh²Ž&A:ÔÏaˆ69 mdÀ˜Z‡ÄŠQsŸÜ|áàÆd6÷RhÄÖ2ßë¹±<×AokŽšÄ‘·æ[Ë‘O\ ’9=N¾Ñ#7wy„°rñ.&hL÷ú k´ôÒ¸åLɲì%ÇîêEÀõ†põb5e®C쪋y²îJâQËE÷8œ¡“à“âú¢µzÔ>(‚!ÅçÄ÷5U›%šÐiaã±qdm~‚ªwÇò)f–oM‰ó.Ü}Có!‘½o‰Û4⚌À×4 ¤¦ãñ/IR(.öé$EJ9ù¬ð zy&|2Üd…ï—èÒ$t€6^Ú¹³ #¸õIø™LHH|›DÒDIŸìÔ$ï/Ô…ë\¨Ã Uo º­v»¯YÇÑ:æXׯ¤kivÐíõûºmšæ…º”£xG^yˆÈ3‘þ$7éÓœø¯ÏÑÞ§DAˆmõ‚9ÃÍL÷oþéìвu¯<²¸È%î]ÅRûQHRžÚ»qOv¤0c+~îØ®çDWùÄHw&gSx®‹;¿)*hU§Ûê÷±ÖV6Ä“­w'–© Z¦®uã–6èZm:ØÖÄŒ­ži¨‡ [à›h©ØhÕƒ®nöZ­~«©[?]EÖ8RS+©1Ü*]f¡ü¹ÄT»žÞÒõ^ÍQ1Ä®ÃACï Ú}º3ò¼f¸ãÓòÍZS+0;¤Ë3UÌ%’9H„wÚf¯#Ì^2¨©d˜£gêýn§ÓãR¡­Ÿ®Â 5µs°UºLs .ÑÌn´{í~ÿøÌÁ3/9ÞmZ­^«Ý3Í.— mýtæ©©U¼S°C¼üªÓýÂAïðÁ•|§€_Ç;00Øm¡öž[8âóOAHÌå®ÄâYQìÚ±BûjSîÁoŸ?0ÑWq<†ÍæÚç5iìÃ=®²¼„ED¾~ø’¥ ÐÇ^ô&)}C’„…³A3I#k² ¸†MSüè?ÉÝ–O^ÓßdŸ´6ÚVîK’½ø13¾ú${¬rë'vÖ»°ð$#KÛg?þ—û`Ë~“}áïVäÚ¯ÉüíÉË€ëïqqñçi€sÜÑ ®Þ]òDùW1%šÏ ˜m¤ò(åJê,œã—“ãmOú#ÀNäœáo |ÍF;MµÚôdÐéõM¾¦nût%^;NS+x¼a‡ty7T1—¨j×´ÍÞ⟠÷ë ƒF»×íwõΠ3’(Ã"ú_óh/q˜“ô›·w2‡kiX´ÎÆQ–9¬ÐF1ê°õÖ¯XÃÖq9¿¿™1lÐèvŒÖ §wÚ\Ædë§«ØšŠÎ–Èj·æ cØÃ¤rmöIu#¹p¹ÝH.4.ª0>,U? €*Lô†3æ¦ ƒ#ï§ bòQ…AA¹©ÂĀѕ*LDÄç"€anˆ°f^ÒêX‹ý¼2½˜è\¡ (*oVîšç€/]$ ¶Dœ‹hNlüy ð”ºtÑÏÔ÷õ\Ÿ|a$7I“©ëÈ£’$®ÍÀ tè—­²¸Q„wÜYR¦l`w!çéAŸˆ¬Ú€Ä>0B HŠ€|ç[%_krÖ×úÕØJÈÎÜhô,Q—ØŠ3’±éE 鸺þ4+"‘¦J3ëû—E8M¬hWÿ‹,-¢b~ó­Ëõ,z È…á+ ~óÕŒzKý¼6!G­™þ &N–G±N¨&Ï~VmC®½ÙU‚–‡_4²&-îµzq׌§ŽÔÆÐèã~Y–AŠ;·v^²¨;î*+?×ò–XºÌÒÐÇ7©RII‚A¤YŽÃœáWþÞoK\#rc/MYxÕ$±Ý¤ÿjÎC÷†®öw–QY+®ÉÝæF°A°ƒàÚ%Z”äwh×#i{’ }3ý1 £GÌÑ¥ÿ®¤aÁœøÑ•;‰µü¨˜½„øªª à¡-‹9õ‰5{•¤ò4›«€,SY£Yñx0r_Ö{mzÑ«û µyÁlÚÅÛýûâéûB=£?Î7³ìC…”~ !ž}úÛâ½UúÁŽÓÏMI|¡.—ªÔq¸”;̪;³R¯úÏëmÐ\ y3Ý©ëkµˆ¯´di ™·É^ «ˆMA²l‘IÊ]%À½t ëÛòy„sÙ6sVè|ú’N•.syÒëï&ð3òkúÚí3Ò¿ëQzQ­¨7{µbMÜ~rIŒ sJ*ÐÓ³ç;·kŽSÕ³›ùi™;wòGºD|>§3Íšý9Q'³ ÿ–É¢žunž\Ÿš$ºqûÄŽOÙFtc7loÑ5g|Ø5{ífN'ØÌ¬Û~ÖÔñJêªÅx|GsºS…@S¯¦[áØ®BÑÕõŠz”Y«UQ¦·£…ï~¯BØ“Zžªt0ªDÍv¿óÌÌI„ Ml¦Ú8t)©ø œ Uë£ ‚;K×_ålRݧ9nX;¯µóZ*®¼çSÅ6ÅN}Ùµ_ói©ìI[ø´øùÙ˜·ÜÊüú_£óO?>¾þõmUîÆòä]̤ޑo&uþL&•J…–¹K‘Ÿô<ßÊòˆF¢•5dY‰¤eW.ã¦2{]±“šI­orNï&‡=C‘æ-TµsP‰q` éÆ×7çõ-R}4<šSÁÛ>ßÑ;þ¤ÒWúAn³XÔë$÷Ò%l¹NÊêY5-ØL¦P•ôXÎ'?©cõ½ò5Gª„KièrÚ})g–óD¤×iþÖÇ\ÃøÓ¸ÄE§»ÁÞ Ôdȵ  ›{¹ËTÍ®•‡˜Ã¼»¿ë}§¥Ë7AYÿ>"¥J”n“<ë$qÇ”¼Èlßýì†;_B^yUø2§;#öŠ­» ò²ŒÜUZ•®{dµ&òfA=£Mq}ƒZó 4@î×1ïLr<[tTÜvâ¡á !Ù••*Äddyu8˜'|­<´üØ“ aZBmï4}¿ŽŒ8ž3B_¹ŽCüÑ$&:ˆb\Dw6'aøl(’:“H>#£ÅEÏx?0c’m訸I=î('N@ä §$#§ÃÄÅ_EÊLØ`”–â ¢æLÉȨì6ŠG2´LÆ 0Èѱ‚­¶+–x~Þ!ÐÖ+Â;å‚¡ó²€½TÞ!1邼C0× q.x Õá#!Î;—ÅÍ;$-Â;—ÃË;$t€€ð‰Ç¿áœ%Âü8ÇpÃV$NÞ!1Eñ‰O/ˆw¨” ï˜Âà7_€wèàšÉË;$hâdyëçœHjð­JšZ‹jy‡J6ö€F_N¼P†eâÎ=M‹;é{nžâœ(:œwInIÞ!ÄVˆó!5Ÿw©a(¼CHm)Å;„>'Ê;T>Kò">ïÞU*7ïPy‘\Ùê§”Ì ÎVÇÏâÜÏ;t i”’’1~ÚÉÄܼCøšxðdºçž(à’-^aîÉIçmŸÕ§³½''Còaï–(nÞ!ûx‡°ºµ—wGïV8x‡pDUÔ.Þ¡MÂÎ1x‡¶ 4M½øDåªw3ð7°YŠpíg)*-çRn7YŠÊ·ÂRT^è`ƒ¡.÷Ò%ˆ”Ââ †„`^Ê]|ÒЗòæD®šñ¹º8fœ«ºET –¢£·ð,E§dÞÀ,Ex¢¹YŠÐD¦÷œ,E¨R7¤lîÍÖD—/ÈR„Úe%’–§¾SÖMåf)ÂsR¹XŠÊ‹ã»÷9!û ¿÷Aß:xYŠð$CXŠêÕ^ÍžÅÑììyN€wþ ¼ChÒ!¼C?ô¡^ ²$c‹À;$,š—wèˆÃ x¼C¥›ÂI±€(ò¡ÈDæ*Û¦e|câçÂ;€UàËœîŒðó¡¬QtÞ¡Ò«´Ú0ïЩ‰B¼C'µ)bññÎ$dzEGÅm'’ H<Ž"×!#k>÷øH*øáC0Nk0ÕyH¢ý´ܸ…òNÄÖ2bn®nÔËsô¶æ¨#VwábßZ ïO’Qµ89= rsG‘G󘺘 ±;#ô#Œƒ¡¥—Æ-gJJšÕ¥keS-xŒ9[Kr×Þø£¥´‡Ô[½ ™EZ¡^š,ê㌙…0˜:‡ •»À{=Y¾ÐÍ cµÃsíŠZÈHÇBŸžY"vϘ³©°Ñ1Ô£ÚÆ©C™ ÅWj_è9KêðŸ êæïXûeFӗކ:³|kJœw9A^D×ó—¶øžfc‡)1·³OlL™ë'1_éA‡ yšçxØ6`ÊåßW"·rëñ`]WI­‰‰eÉȵé|îRz ~e_*\$ùR*˜«GÞGä­ÊR;µSQVb^<öì7³µ¢2ºj“¢²‹UÙU²Ž¿¾9ÿ‘v¸Çuv‡Üå*—\¨·{~{l}&©Ï$?Ä™ä qc}ByÎ'”#жl­kõi¸> ?9 gõ.õISbJÑôì·÷•¸›~$3{¸ó7sºðƒwÖó"yÖ²ÒÔçÎ:0XëÀ`¬ƒu`° ÖAÔPMNÁUÇi~³]` û"’uNN…<‰(¤T ’5μãG•mâ©÷[a\¥ÈȾ"ÎÂ#áÇjLxRîe/B7¾{Cä{\•Ð"§á!dV7ÀOô}hÙä“¡RŒå{ÉT¨sô©ëSâI2αē|3¨UáHVâ@&,^Qü5´üÈÍ)n«“›î®ÕÉœ¥‰DÕ ‰eæFº¬‡%-__9Ÿ¹²}žñŽºþ´^Óõš®×ô©ƒÏd¹0Á8y¼Åy_¤b¿—'ŸŒ¸Œ”Å ìçj&ÖÀYöãÆ.؉„]FČԘ„Zþ«Ó‹`D‹qž›ž<ŕڪªÂ$íf˜é§ï<ïû1ÆV®wcaR£¹•Mfšg¥ÅÄ#Tdˆ&åéKßN#BUò†ëOB«qkÅöUó¦…­óù1_˜È÷¨³Ì¯cØnÒ'žçžëºÎ­eA«:Ý^ Kò!¶Çê Ìž¦ë“–Öéµ-Í2,Kë·ûm2i[“¾Ñ.?Ä[¤ÊèÌßýíH˜ºŽ6…+'Æ@ƒÜø8„ÕäâSh ‹z4Ôö}àb±¼sàóèÞË#Òó–ðÈÌÑK18_ï€)è­Ozõh†ÚÕÿ¢¢;ê^ÉH„ãuœš×ÕÏk~TÆûâÐôȺÛøÈÛÞW/áb<9f³*Ó™­V@ž%×â^«wÍxêHiälé«„ C•x©Ájc4ö²ð«Çäzìï ;Œ¥K¿&w›…Ó_HnÁµK²TmâzäÕS>ËfV0ʲ×e6(˜?ºr'±–R¬´bí•lÃüІŜ:xÄš½bŠ8l6½À¶¼« Š™*ÍŠúïÐÓë­¶½èÕý…Ú¼`6èâ!ÜÄþ}ñ4àt¡žÑç›Kö¡B &J?0 ƒÅüéo‹šôƒ7$§Ÿ›&å KUJÿ/%…Ñsš&õÏë]Ð\ q3Ý©ëkêü!sº"Y*®>åoR_{lJd Ì=‹Be‰ÊÉšäí ’¯—3%$LrEæÓ“ìxªdaR!F&¥Ïm%Oà6<³c´º­'»­Éæ/ñh9€SH\ÈV!µé›fáךLÕêZ±²Z﨧 Kî#Ö oR¤ìµ'¥ÓÓv(ä o³¤ÁÜ„­êÂUi-Yž=Ÿ¹[󈫚½õR\‰%ùU‚BÉQ$®cr©)S5Íšý9ÑÓDmT™ zÊ͈ëSÓA%ŸØqãÉÉ£ÝØ Û[DôCäÔ4ìš½v“Yú§™Y!λñ²ã3¦M¸–+‚¥nŒæ ¥‰LA¦.·áØ–)Àèêºä¤L:EHnÿôv´ðÝï2…0¢¶&þKyФê”Ùîwä숲ÎO"I¨%Í…‰¦Zú~0W¢dàq¹+“öN­eõ±ì˜\{j+sæW¥ôµ#_;òk—0ìRWêÖ_¤"Îþ·ÁÄ6wZ»hú ÍÉšŸÜ—‘m€¸ŸU)-ꡨ;{ËDº´ ùÆûŠáñä&=Í·’<¹¦‘̰ô,¥JXžÚu—ÙÑçê°gÝ“%†ëºð˜ 9øºoï˜S[®qÜ–—¹3üÌB}ܬ›õqóy]ðåÇ£Êæg-¥W¶ÔÂÓ$q¸ uyŸQ‹O=¨oÒ µwnˆîeªyã[ªvX¥%¬~.ú®ƒˆˆG¯8@jÊŠËRªÈ}o7´ud/5|;•ø $ÑF’«ÄG*^.‚¸NÜi飕kÞ‡Ë`ZþÀQo~B\„@ƒDGõtFœ›ƒütÖ±@Y›¤•\M¨,wå`-Þ’¸Xo|ßšk§U‘ º<öcN»Ê£ ¬l•‘–»Sêÿ,žŽ»FÕ,›9ÁÏð%¯ñ4n Øê K}`jT3>¸/$V.8©®´^¿gvºNoйP•++R¢…mSÈÉÂó†8 üCFú¨~$·-_±£“óÕ ~ èè9uDúÏ#nó‡YlŒYî‹­'s±ý¼¢DWœ€DŠÄtýÜeæúîl1S2Î×sã;‰«ç×T\ÎYäZ‘¶‚ÞY^$m =X€Ãíê ·ôt£e´7ewèwów6Òûm¢L5À¹É·ø!7“¡¢¡?a>5J/K!ˆ[à›°Ok¸6 ï2¾½V‹³©Û>½C¬òšn/T›”¯Vt­|‰Oôlê××_þ©|+:QÊPIw%‹ÅGJøÿ³wµÝm"Kú‡ì—>ú0NîZ @H»™³OfnÎd'™Ä‡‹ %#PxqâÉÑþöíj@–c!ÑP-;'óbKðT¿TWWUWuÑM§’ÉIç~BøÕ“¤¸{òœücýGŒú¿½Sÿc ,R‹¥“ÎǤ-Ó¾“Q?ŽšÜHæèãIáøaïjŒUE¯æ;铉Êw¦©hú5qÇŒø—N@/iÐwüpuŽ;&ÐÙÁ”ƒNÍø%Âü¦³ÙìÕ0µçNèñ³§Î"™±oß-çjE„;?;gßæ^]7~nÞ»í ØÎìò¾©¾ õ˜üæ‡Þ“›7—vòvæ[f§l…64Ðæ dO}ˆ&ìkc¤jš5šÖ@(#s ³¯žÀÏ–˜»:íº³Ú>K#kÄvXE«ÇPUOD:à4u—tx&™Fqµ8øêõ¦Òaâ$Ô.æ:—Õ2Bú”âÉF|4`lÿ #dʈJ¶® CKÑ#ÃRjqPåÓ‡ HM-¤Â¯4½9@ä¤<Ü!ÏÂK?ŽB°'wHƒâ ùBߌ.ÆäÝúΪsòäGòÈOéâÉ—#àÀ£19ºuîÂó@z´£Õcx¾Ú}°xŸO‹=uÜt·c²\n”n°¤]†_Ú×çFD6ÒKpÀD“„±÷^æ)Š˜õ­!ˆ92&öc¸¬Ÿ%q?ð'¦^†Í½ï¼™à|òž ŸåâóYGq2&à’{ò¼|.t~ÊØ‡ÜËð>LÁ¿Ä£ÂÇïCBº˜uLòÓaø„|ºY`ÓF¾•Ó|´Ê-ÄvŽJ<—úA”;HAîúnò>t–ÜÌä>ïÄ!К¦sš%_7–4@Õ†=…ýUŽÉ´þˆG‚«ì·O~:§Õk¸õ(ï™ ÞDÓûÐ,‡¡¥+¦5ª5›ZñôaTT”¦î´[kÚª{íÖJê­½Z#cÛÉžKT¶ 5M½‡ëzç¼ë`4Ôº¢ê†j˜ÆÀ`–¾€ÅºÏJ5˜ê¦¨C}¨)Vù0”sQ\±ZŒýu<M>P7%S¦K×qç”}}]þëKçyrVüv–;ß:cHÑ8îäé)>³ ú—j=p!óW“Ñ_ï I› ‚ù6]¨ÃlçDiužž>?éÔÿÌ>aTóO~…‹‚Ùùok[†›2—{XälÝÑÎuÙç¯7K u   ÿIuãOžDúî&¶Æùª‚-tÝ0tU3÷K“]OKßðšzçÅ.òÒ5ˆ‹8/jµs^Ùï¼r^4ÁÞm6@üþ “,5¹_Þ‹³µBd¥ÚÞ‹&àû½MyµÂ{³,ê½hÓaïE“ÙFAjÒ¡%Ï {/êâÖó^ÔEÛí½Ùøšz/êNGã·¼‰¦øá˜‘@Ü ¬¡2ÕkjÕÓÑQqš*ß{±‹º4ïÅAæ“íôÁ@3‹Ó{q½sÞï…6 Ô‘®Z¦ª(¦eXŠŠë½PÕRŒ‘jÔb›Ê§!šz3µ’ü!ÌÔ}ÄÍÔ:p­ÍÔºDj™©"`¢fª0ö^Õ_ñû7S¹ˆÜe™RHë`Æ+ubw¾Í8}{öHó;ÎÆýþç»4á—ŸGš¢€áš%ôׅ͋Ê~Oƒä$ðYN(·h ïhŠü%l\i UýŽ6ý^UÍøüí5ÜEùw…¸¸D waðZ†{£Õ»+ì Ò[ß„&†»ðl£ µGi‡Ð’g›îµpkîµÐöîµU†{­éhüvƒ7ÑTa4 JU ­ahê5›Zñôa´v”¦Äp¯¤.Óp—?—¨l7)ª~ÿ ÷ëó~î¦20F#ŰL}0P–¥!‡¨öWÔIýÛõôADNS ÐlÑ Ò:ˆ•¾‡xc½¡n}½¡š^Fn™sÜ®ÇÅåªp§£]”ÎhXÚø6”C“0 ‚c<Ð˺ž1à% =&“9Î$m"ók\11]Çswl]nXËFgw}W(2¨f6 .ä©c©úhk·‘KM{¿Q.ˆøàR{p©Ý+—š ÿ®—KM¼žK­ÉêÝuYd—ZÓ&4r©‰Î6 Ò m hÊ”µ @ß™80³´‹»äwÕićQÛ;ËoÞ¡vÁŠˆÍûžGC›×‹B í¸ˆþbÉv („¡˜Aìw"$1.z!þ0³ -nvEÅ œ ì„Bil¦fà‚/h<£…³mlJ]LØÈÎs1PQK5í$µep™ŒAH{Nb³í:êàÁ¬y0kÌš³æÁ¬y0kí$ü6ÍØN|Ù6…Ëq£Jb0e"3/îH~†ƒ›ÒÅ2È¡[ ¢_¼tßCok‰Ê=ÑÈúÊ'ÇO‘ÍP€´×w°ã7×NJ!˜ÓÀMýeÀE­šÒ·(Yµ=ÛFS÷ ¬MÍ: ÙßUóûvÛžJ2ÉÅö8œ¡“ ¨ã*èŠùƒbþ­)æÊ•E_ï°‰qÊb ä0o­ ŽþÄR´Nę̀÷Kéã}‡¦X#›$] ¹ožÀ›âšÿ|<~—D…áb›l9úXŠ9¸Æïí*¬×|:Þ¶5}Y¡S“ÐÖxiÆøèSHã3:¥1 ‹2îRHIŸì\$¿¼ïd¾÷¾3~ß1¬1´ÚÕLÏëêÞpÒé†Ó™Ëš8†¢xæûÎJGàùä!"ÏD!øyhß«% Ÿž>ïE<¢[êEËëâ ·Jûíú,Uuv`¯¼g΢sÜS­;ðÒíóÎ!Q¹-ï¾õdG”76ã—ŠíͰñ»ƒ± ²]èÕÙ¾×Å]ž)np•nè&ÿ”C¬N-OµœQW5§£®>T§Ý‰aLºšîxš£)£E;wíküvƒ7Ñ2Ð`Š¢kºn*ßö]e±–þ®b‡»×±lŽà­ŽeC×w8nàT?_u_£1òúXƒïöÏWšß÷kdÊØ‡¡©š¡k†1™ŽC©;1ºc)*iåqÛ­»D Æwâij n–NÆw Ê,ã!úû†¢1?0er}½“æF*³Ã\Y“$™7Æ´ëµs•–Ae^s•(uflÆ1¶=:nnϯuGÀºCU¶Mèç4và22> ÿuø^Ðð²ÏmhÓ“i_°NT{ ˜ßÕb—°7®É“t¦ÑÖ­÷ˆB-°—âEî€t&ðp:§Ä ²„ uu—D¹¦å=¢L…9ˆ¢‹ly}yëEŠ_ƒ –/¨·¾Ëo}_É»ÖxsGŒk[Lg~’æÑ›0=?ÂC‡s¬<ñ»[aZ$Ibÿ/IØQÂØž-;,È lX`aäOa\)öœ1Ññ‰í¯¬çÈÀË(ðÝ+,´˜Y!=™,0=€ÄØŒå-üŽ„¤ŠŸ©Y42ØÜ‘¸sêe>w¸Q{~(c<ÂÈÞFÏO܈MWÈ¸Ó ú7‡2•¢Ç¦1¿Ì@®Ñ[•KÈ2(€ªãIAö°Öe G0,R CB öÇ,J)ÈLSKåŒtBÝ,öÓ+)àå}RÀy¶²)¼º½Ò²Í‚þ·ÀÝ1 gñÍ–#¯Üí&Õ-R¹p÷àBxKiùrÁ7…"±œM¦ÅWJË% 7cÁ„YáRš|#½_Ò°3#‰ÆË˜ñŒT2¥+KšŽÏ·ÑBµ½êI]½¬ñ1M¹D`tÜÐgÿºSdØè’éH1šyyáOت’Ä7 Çû¡œåU`—Ÿò”ÇEúŒO€éñ÷À pÉmçÑ$A6öå."™ºGâ0µŒJ–kÅÒ1:œM}Ævlër(V+Ó‘s9!Sð³I(ΩÇÏWz3?gL"KgÑ+ŽzŸ»¸»dÍ’ÔIæÒ¤õ2‘íϤ¸¬˜HcÚ²3ñYöܵÌä„G/qý™òt)Æ"îE¾-¤äÌé§9ÓØøÙ™]‡›2u0sÓ ®¦‘´¦&Ô‘%<ç4XH’dëe4¦©8±î´†?o…°j׿¯ Ÿ5[É>UßzR}Ê4·”$Ñ‚g™m[ï9w¡~ZqR]‰1šªöuε‡;Gs+zÓ“å]`ùÖ©¹{Î÷p ÍÀò»ûæ´Oã®…,r‘½Iì{3:Æ€‚ÿ8‹K›ßNY¬BTà,Æü§q<³©êõnÅØô’Kw½Ÿp^æpP²{ý"ö¦Î­:Bí¥!°„7&è Æ=ɦSÛn”…阨†¢(2ðÿ/ÊàÍ¥cÁç‘–ì90pCvb3ý͇h@SÁÖåH£f¡ÿÙ.Æ¢Àéb‰…ž‡iRÄå×iqÞøí•ø›bOo݇!ÍéFZ“b¸×²é>\ÑË–cªy¶V± ­Z|ã·§¬ÃL ñà ®Tú˜òõ¹þš\WŒá.IÛqíªÝºÒE¿…5« 5VÁ×€¦~ßk wÁUPètëc¼‚‹¶™u}¾ªµ ÜÑ:øÎ÷!“WmË—À&÷¯ý.ý(ög~ØåOwùÓ« õ*8™S÷‚L#~‹U’æÙÃÑ…÷­À÷;ÄWÁ@i8´NÐÞÕÿ\/Åɳ×ožþôâùë’¯Nž¾ '¯^¾|vòæù«—ä—WgäíëggÌDc 9dÿQphþñì„ðN’9éºäˆºóˆüß5òÄ׉!Ê‘ŠH¶Öâ‚Í é.Iç_„·e{j:3¡È¿:ëç÷>¼Nèa?wÕ¡ihª9Ôôžb µ¡:쪦5êÑ5Pu]5ÔÑ@aøÐ Ž+þöYMzŒ2#÷fä×ÏW*ñÛ£Pî"X§[½ê— Ý—˜\—Oß¾©1µü6“Šqå,Kw‘À×­à£yé…äÍ+Töèó‹Ëœ øÓÆîî­UëÎÙÈ’ìß?ãöàã!_¢m¿;ê»èZ¼ Ý)éÆÈ³þ#é{ô²Έöã*BG¤¨ºKßžF¿œ_銪(¶DóÃËââ5%<—œ¶Ïp´úõ^Ðý%…A/k²v^*·µ.¯Q%}xÁA?#7–Wí¹QóüÝ9ôÜ÷<ÚÓ²þb«Aà"ú ˆC‰B(K¹ÎY•„I¸èëJ Ü4ÄZv ËÕ¬€.„8Èá¹u}G°W»…ÁÚM(Ïìë&¨ÃÓ¸Pˆ•È®W1^µ¬v‡Œ Â?áÅRÑ™FÆ pÇÖœ:‘—{ír¼B¨‚…s…°ëΆ(œ+Ž]£p®8h½Â¹uqWwê-K$(8xºçï2´Ä½%êDЊðr¾oØÅ¾ÁZûßÿ£~zóÇË?&ÏÜÔ]¼üÃ}3¿ ‹Ï§g¿FO:(„W÷Z%¨U Pxœë•UÔ?‘º!XTo­bÝ´+Ú’Òžr¡MÑë– mƒ_«’dkÓñ-!ôe%…œ¤^Ô­Š0R"5DÛ’;Èìß®%ªh#C ¬®£CÙ8s¢t'Ôpº®52†–¥¸¦¹¿–hÛf}kü—æâLˆHhsÓê¥é‰U0mL¦²Š)lë¨äð¦ùQÝ8 gâpÞ áR¦÷R•jVÒ´1qÁ²¦éì*mZT3Ã'ZÔÞ¬³OýVýÖR§š¡`®¿rÈUZCÇëj¦fvuKU»Ž§ º†3Ò —ª–f¨÷ËD-öe¶Í93Ú¹c_Ë·2F3­v—bVá-ŠÞùá íÙ,Ž|i˜@½<`?¸—–=±dÍ„L( I~vE¼ŒB´ <a•ì'%GadÑ,?z>"ŸØ;É’º>³h=°—Îýªó1¡Ûx›˜1ŒæÝ²èÍ ö×¹.òÔå>U{`5ÔC…p5¹ x ùSÈÅ*Ü©XCýën5‰sÜŽ$3α9Å;Œs4zÊHÓÔûDSºª¥²_ Õ(Cm4Úç¸óí'²šôeFä/’äݸ'Ò¯6ß¶ lêŠvµÈ¬;ùáf´VOñbw7^îPÈ\8áŒ÷­WM#÷ÌuÓHÆdÜ &z¸­~èá^,ÑÐÃZ€¢¡‡b ¡‡"ÀKz63 æ8“´‰\!c J €ŠJ £3ƒ@ ¤¨fÔŽB›Æq#OG§~裭݆§ÆbØ×1›&&…ºG‡B¨ÕþxĦ×q£[¡ ­ÐX9`*²ÍÔô5™¢Ú8îúžF1Ÿ&ÌöÎòs;j× ¬-\-]3¸º>b³àêføuƒ«ë£RQ(°º>îíÀjDðgÔ.|öˆ¸økcSêb Db×G‰Ä@ŽÄÀ–0¢‘Øõ¡Ë8ûAÁzP°¾+/`a'¾Ç´¬ÂŒD\2Iì"¦T4†uNŠÆÀëÒ/¸­ÉݨZænÈA­ŸR»vFˆ¤XFˆ v½ŒAÐÚ!µpÛ‰’–"¤“×n†µ˜ÕõÛWs£¸%ì¤yÝ“SÆeyÄÞè´Ú¬¨Al ôà®ÊØÎº—âðg/k¿#LŠ4µ_é8a¥Nép@œõÍ{ËûeýŠnY¢§»d<[„…²§‡â,o)NF€u*’Í#4QÈj±Ds¡qOS*û³w„‘‹ý-bûËêX%iÐ Y2d[Œ¨Tù²’Ò|T9ÓŸ\ó—œÙçé;°Ãóü"¼NâN³¡CEDžˆÍ„—›Ü 2`+ë&viê;¹êÐ-Šû‚ÚÅ[w#=æ)7¶‘)TgÆh"z0ŽvÄoyG<Ä>%˜@Ùn¯êIÚÓå&NÞ§ ñ[‘ê‚iŒ-eô[ ¿^ú⸙ÏqOæ¤xð[å'Ö§"˜›x¶ÁÆ9‰â„Åó›5ÌE'ˆ•‡ø /ßí¹‡ækmCì&ª>]‹N56Ä£aw25]CÕGº6*ÔÕÚ1ÂöÔI\À8.àv'ßnð¦ØÓõ“ØÎ¢ FI Ñ^¶Lb3ÕÆ!ú¿@NLEÙ’d+ïäYO¬ÇÝötL·­°Ýýδ¥àÁ"… °P¶FÁýío.(‘Df! exÏNÛùd]²²².ÏSxBÝA04¡. N¨ËÅÂê@€XBA¨Ããu(d ¡މ Ô!@q„:40¹3 u8P<¡Ž¿ï ÆÜ±Oç-»&¾ó|ç?Q¤y•8¾$2[›t¯ãWµÅú“zº„B=°‰LfcMؾH9óv‡Ð-@Ûž© ¾MiTŸÉácÜ¥y,qì¬Oëä¶.Ïè[,z.ß:¡wÍ\ŸÝ8ìKp"(WØ Ñ}?¦ )H×gÕÈÖwMVÕÀNVã?˜Z6·Tüä8M¹Öd¹,ŠQF0êáØpF=ͨ‡C£õh £Ž(Ǩ—Ç2êáèPF=Ũ‡ãJ0êáà8F=—~l õX£ŽŠaÔ#PÑŒz¶‚FÀ2êáÐŒúj‡¥Úa©vXª–j‡¥Úa©vXª–j‡¥ÚayZ;,R’:x˜¤%©ÇÅHêÀQ1’:pTŒ¤.©ÇKêà q’:Hl˜¤,©Â-” †¤¤N>LRvá¨Ô¥´¢%4ýÒ#ê‚[àÆLDÏ7‚Éu}é;|I~k¤YÕN¦[]’EX.#}ˆ+Ë«íc éOjÞ Í4§š QB3¨N"Þ:Ê.š4á^ÖJ>á^.Ù‡îå3~”0Œ´(Ù¾˜Hyæ`è;¦yô°tèQ´o£tû•¸ˆ [@:o -€7!­ï‚Û¦'Aº¤=‚ÄHà ¶‰Í½&~Bl¶ƒN·Ýný‰ÉŒ¶ÅÚÆ ÛžV¯Ñ›˜¦Ýèš­âýG0xˆUhOòm¶ª>>¹¤<õR¶«ªè˜„²òjvMÉwKŸÔnäéŒ@uÇ'2Õgàá4÷"ÝVïaḶÐK¤ua”bå·/ñMܧq’w܉ãÎ(c`kYT£+Ù´¥0ì™W$C·I%C!oñ¨2ƒA{0èò õÛü<Ôöû­A£Ýj´ÛŠC_©¬Pß“tÊ‘=ÿ iK@]·ˆ¶„5ÿãf¶:RÚ‡'.!=J ˆK,½âÆP¾¨ä%žXµäõ%w·´ÀÄ¡ü-0‘—ªá&rÑ4%0ÍŽQ(PŒÀ)0A† L€11pP¤À˜Ü0(P 0>‚þ€YÒÒÜ¢¬î¸»‘¬æ(­€UÑ; S+ ÇM~-“áò¶·½ÅÒsŽ;ä!‘}™ó"ÓKaÑ›î¼ÌbL}^°/ž­“eM[3Ê Æaqu±Çñ!~©…¶ ÒáƒA†Ø¥$nˆ=ç¾M6ó·¥±  ʼÙ&g©´ØVàñX|{j[ÞdIN™ ŒQ&c#” À˜xe04^™ U&#J*Hვ Àè`e8"N™Œ+£LG*€qéÇN™‹Q&£¢” à¨xe8¶‚F@+€¡e” ª¥yµ4¯–æÕÒ¼ZšWKóji^-ÍŸÏÒ\ŽÒ‡RÚá€8J;Ei£¢(í`T¥Š ´ƒ±á”v$’ÒŽÃRÚq pJ;·XP*B°”ö\< ¥tÅ¡Ô5˜Úµýš ÅlG%D%2ÛéÊEÉl/{ú2Û[¿ë”;Šõ†ÉQ–çGX–y9Nåu8yLѯ‘Ëߊ“—7´7Z_!ä dð%ä dÍ€å (.o Òr*ê†O¢Ï GKàÊÞ‘…©£$œ•ñ¡èÑØPñ{YAÞ¾Ä7qŸÞ)ð–…Ô^G—•~±–Úîã{î‚'Ë»t°µ,(ÐïI6m{ñFP•çžw½Zj«ˆÃœk;9™â?ž‚£lÄw¸Ö²vk-æµ?ššDhéßÁ€üÎá†_Þ½#åÅP{±×ø‹õ÷¹ŒÉ\_ÕSnöÔJrP‚šiÅÑæÖצütÓ!soÈÀvl[åmNá°!Û9ø$6ñŽšhÝÈ1¢#ÚˆKÿƒãµ =&:;ãn;õã ýÂ{³ŸÙ ›‹O8îÔ?»²ÜÉœý(ÈxÁPû䝨…;æÎyýã þ_ñ¾‰nès·Yz~ /\M34á¬C-àŸe¡ø‰¦ÅÝÍëûBŒtáÈù8ë/ÖñGc»~Œ%þ¤x‰^ÀDœKøŽ\¸â¤<Û6½ô=q,ÆVÁCób` óf³Wkðÿg¾8Ó^ˆ²óö}“ÿë‹^‰üb#ÿE\u^6ÂEÀ}°‰Ñ”N ÝwÄ;jšYì=v2:ê_¹Ý’¯8J1„‚>»™Xøœ»ÝXѶ˜Ú ö*ŠâŸ•ˆdÖÛ¤Îûæký›J*`˹w»WPž:bR9è<«¤² … ƒæicÁmRicÉ[<¢6V¯Öïö[ÍvKh¸´ùÏ›A§ßoÚƒf·—«uøë/•ê{’N9²ç?m,°ëÑÆòç^ßžõm m¬ÃÒÆ’¥ÒÚX9¥WÜêÃ6Ö“«–¬6V^wËjcÈÎÐOàÁ]°4¡Ñ…:·Ê2á@oìV¡5ºPÈ@.8&B£ ŠÓèB“;B£ Š×è‚ãÉÀÕ6nµ[mãVÛ¸'¸K+QR>h`„”.åÇDKùÀ¡ÑR>h ”QNÊG*僾\D‰ˆ’òãJHùÀÁqR>è VĈ`),BÊŽŠ‘òA ¢¥|Ø +僿é9ª2ø*ƒ¯2ø*ƒ¯2xEŠx˜â¥øÇÅ(~ ÈÅ8*Fñ Wü€cƒ?p8Å$6Lñ Vü8»T§sK©`EqÒ&»ð2 À}_ë6zÝAã1ë(„²û뇅Pò2X&$!!Uë13VP­ ü™¡Â@¡™)ÜZ*ß Š—ÑÃæ’½PHª‚Ha± Y+ùbhä„AMê2 òàÓá®(|˜.eMA2%EÈûâ2ÿ›2Ÿ¹ñ#ÑjL)ïì8$ï.‘ùB^èæ Ólµú†ÕžLŒvwÜ0ƬcvÐéõû »Ûí^èk5‘›ÕÊù)"qOP0ÀÛB+àMìW0èü®“™¢éÒK¢¹›|û gúÀ¶‘•ÇñîDÒ“}Â}õ›&µãoˆðá@j¹ŽFm0‘·€L ÏupïQéS­¤‰§»Ñ™Z]cÐì6Œö`Ü4«Å»Ýœšƒ±Õëš+¸Bòß–ýf1:Ó$â{ é`qv°98y;Éɱì=á\ à= Ÿ¥,'1ëä y%Ü'y‹Gä>õk½VkÐlv’‹úÝf«×4 ³Õj÷:¹Ü§Ã_©¬Pß“tÊ‘½ðépŸÀ®[„û0¯¿jo$¸O‡Å}’¥ÒܧœÒ+n õ዆ûôäª%Ë}ÊënYîÓÁ À}zœn@HO¹Y—$é)CzÊGƒ“ž€{úpÒ<ÙFžp 7 ¶ÄѤ'2ô„82“ž 8Ҙܤ'(žôLJ_™D-älÆp§ ê¤F›M—6¡—]ÙQdÇ0³¤U±$y× ·‘CØÁ’Ùôý°ô½_p?0k2w\öQ\×Î_»q«û,:ìà&9ô#FÙ?œ ôüÛŸEtáʤ®BÊ8Psjº;³?ƒÞåïxBЉÊÛÚ6rpßö;ô BA—šÝªq$Ÿ·+_ù'\Ê\ia}ý¸ògQí4þ¢Ê‹¸™_]ëÆræ_¤ÆNâ4>}LéÃV‡~„òÙ„·4ÊüòïÑùûFï^ýòcYîÆš¯ØßË•:"3y/Ox¦›ÔªŒ¼¹ý¨æéT–^—¨EPZAÖ¥XZŸúLyÜ4UÄë’“ÔĪjs°sŸŠßøsò©CjÅ·Êš9¸Åг½HÁìÓës½íÕÒìÙ/ÍžñQºT(½ÿ¶î1–eÝgÖä½qÄBŪE}©åVlÓ‹H¯âû[ïRC\ã’6Çwµ¡ý®œ4r¯x¶LŠËX¦ÕýÅ›ˆÚ·›6gâú½{*—ÖÕ¦àã=¦Å2¼ýÁñê„t °r™Óí{#©[J\Q½S;JËÝH3²Ê¡÷šžÑ¤¸=AmePF™·¹Ö§–Ù’£Ò–“‰(`ŸjD#žj„cßj„c¢Ÿj„C£ŸjD@Ÿj„#Ê=Õ(‡}ªŽ}ªˆzªŽ+ñT#÷T#—~l žjDÀ"žj„£bžjD ¢ŸjD`+hìSph‰§QS¯”îÊZw…V Àf7´ºCRÖeu‡P©a_€ˆêè–( ;„¶×’€–ÒBÛëÉ, PºCÒûßšòÛüD¥,Çb*ß¾oôÌnßl=,ƒ”Ìn( •"4rÞcë.ÀJÈeðu9?½©Ûïu:ÞB;Ü;¿­D¥¨°)`ðÄ©.UN?n_ •¢BFÖ箤–B¨· œBIœŸH¥¨¸Ï«ó>%ȗ䨴å¤C£A"ŠÑ:ù£À™°‘µ\ÎA’xß& c¦¾ôY+ÇÍÐ; K+dx Ê.pÔkîLÈËš¢Žï* ÅþbE*ùªR8H!ì2IÅLˆ‹; 按Œ©C : Æ?"šÂ¸ÅBIÁ¢;|ÌLª™Œ1Õv‰Næk4õ£Ög«W^¤:³ŠêñxÈâ 1M›IîÀ€±B;0T0Á<ž,×õødhàÑ¥vY/e¾Ë×,8•LµWDë˜ú“šÆyB5Å'_ø¹ˆxøPÝß©æËDÔ/n “ta¹ÖŒMÞ¤r~ŸÉ:•6óW6øß>%Rq£ü¦È ÇU±<Ò§Cò¿Ã†÷…;ã6e>sm(]éé5À6X!×û }åL.ôá…Þh:ÍV«oXíÉÄhwÇ cÌ:–a÷^¿ß°»Ýî…¾ŽÊ¤¤Hô¨ ÚŽûH8¢€í×Så ÜhFHL©•šb -fÕÅZŠI®¼ýP²R…·³T 4pß«X•cm[Ìên}Šû§¼&‰¶•ªp¬l´€® ¶ò„ c (zE²ó[ưLªël•dl¿ÈVYx œ¥02(Þ9Ww”&@;[Z%Œ«(åÃ1{,Éx•/u`§ƒ;¦à”h1½~]¶ÉóÕ|~îñ÷¶\ã%†¯M†GÎ’;6dþÂq£]š_øÒE4y|Ïñ¸e8B—?¸s\Nä(=zÜÇËÍ¥Ö(hŠËÈe†Ì£U>M13Wz`~_J3\Ž%õVÊH(·ó‰Œl[•TTIÅ)%)yìÙOf[¤2>j#RÙņvãO¯Ï¿¥î!Ï\é–3|»ç7ÇVk’jMòM¬II1V+”ç¼ByÞ–u­|­Z W«áG«á„ÁR­„)-ÆMOz?/p7NôË¿GûÌö›½A§û°ž©ÒÀív(ÊôF¡«'–Á:WäÆÖÞIͳR[ëR Pžµr,•ÔðmDr“˜mDJã%Çè6"©–¨·ŸÍ,„ÜF|^!x± Ð4Ûˆ”fe·—á3fñ9„ñ;©`×·º»:Ï+lgôÊÊ ÛGvsØ žÊ»Kܳ<Å´@¹µ{ïù‰”6‰ÇÙShùa™&ûŠMVsæ¿+'„Gt/{å;áík¾Ü`_òŒfa³¼Îd¢o}ËfçÑ»S`Š"EîUÕÏP¥ÍJv¶ê„eNÒ¥NÎGÙ›‚)wøª†O:[º›%ßR,»{6yHéÁa[Ý­ŠU¨âÀ7¶T@ŸsëÊV³ˆñr7#Ž0<Ç °‘¯~Î. ~¢~£v3KºdzTZDbLÔè„sëï—Ì}uþ“ñ6V½ó|Ø–·¼WÓÓˆMâMéѽª€d¦;6fÊhöáÈhöÉZÉ×ì“”‰éP"îáÂå; ÅBåsb”æ+†ÞLP>SáRB·{¨t¨ô©ûS”I ±(“|•j¢•‘H–’@Fº\AøÉ·ÜÀIEk˳Ï®åÙ\ÄWƒÊ:O´‚$ܨ?ìÚ é¸ ¢zÅræÒFôy¢$긳jLWcºÓ§¶F·ÌfÕ^ˆA«Ýèx/,n?o=Þ2y›•bÏë·ÐÔbÄE¬¬îU`?”c0Š“-cä¶h‰#»Ó!Ö/âè"¢ÆÌ7Ò_[Äïw M«qz7=zŠ+ŽlÄFönª´z9›*S4ÞsISwÈû~B±ôÊ,Îj°´’ÎŒïY!›3nÒ'³òø%Ïd½xGëzÛûYÙ&¯9îÔ·j_¬Ð¾ªß4©}>} æ£0ù–´—á>FmpåDu‚<·ð\Çu-3^ÕîôšTþ“6±=™XíA·g4Ó¦Ñîµ,Ã2-Ëè·ú-6mYÓ¾Ù*ÞÄS¤QT†WÿÝFƒ¬ 7IŒI¹óq“ªÈÙ§ ÈÏhxìû¤þ.y—ÈÇÔ%²—¢ç"á Gk*ǾÞs Ô[2™ôæÑ ½Óø‹Nž¨?x%#2BçtÅÕ“Õ½þa«ÁŸ”‡A_¼@†èáº÷w=ò–ûê%ÞÌ\MØ,+tÆÁcgɰ×ì…n8›()äb<ífeÀ ͶîÇLöµŠÏä}x§fW*©ú¬=²`\…á20¬ÉDä¤/#1 ýL¡½pâeá—¥øÄßk¶*·~Ínwç¿PjÜö¼k‡%ÄcêÌÙËÇê—õ„?0J,·dnpåLC#ÝH±bÆÚKÕù¾ «%Oð˜µx)qX¯Ï=Ûš_yA(\Ѭ—Tÿ _}ˆÚ+¼¼»Ðë"]Üo7‰_<ÞpºÐÏøÓÉ%ùPf&ˆ?0ó½Õòño³;4ño˜?Ž?7‹È#k]Iý/m£§2MúŸ+Q;¯¾iâºç;3Ç52BûC‘tª\\¬ß¤¿š‹.Qe0Í,2UTe*kR73Ü)CޞšH„I­É´{¢OWl,#*$ĤteÖÖ'0*AÒFöjéõ »©g~m̽™^^)6qè ŸûUÙ} t²"+ýñH·Y/)V˜Þ ‡öÙóé»­·¬ÞÛ¦âž\ ¦¿}pV-|7É¿a-þ\Žx¾¯r‰!lðEMF—‡žú¸ÌkÖµàÆ®ÙóUÀ?T‹ÖAÃN·×ª‹¨Ã¿0©'Qˆú´{OûŒy®Õš—1FËH¤D¥¡nCm5ü±­Ò€Ùi4× ÖÆQhBqùg_F+×ùªÒˆ^«+¸Ñ²×§¥>ÕmõÛ'¾·mžÄ·[‚™¿ü\÷âÚéÕ²¬Z–=¥ÔžÇÄÒ’ù •¾Jä«D~ëXEÓ*ú³âÂÉkÂlíÖZÌOk_¡9Ùð“Fƒ_þ=JŸ‹Q€à¥5uOêN^'QnmÇ â\2<™Ý¨¦éT’^—©E=¬¼k¥ÖÕv?$eqô¹&ìIõªÀ§w¸ä±Ü8‰S@ÈSlûðÝn§ÑØÉ<Dv¯p‘†Å,NåP.NIKXœÊÛÅ,N¥­ä•BÈ2‹Óâ‘‹ÓB×gÏ£[²‹©ÒúgëJ¯j«™§IBÅÔy„š8NŽJ[NâyFÏ>õ ¿ŽßÞ8~^NŠ7ôð‘P&Š7óða‡Í%†Í/È-bßu1ñà w ¨¹´!•šÌ{»¡Õ 6xÿRý|¯fBA]ËQ”*Á$È …u}[f¸Ýl¨›¯ô-¹ïÂ÷¤ËB?•)H—y5\>·øÈQ™¨žN‹ƒËOgKÐÚdÕ¼ÊÃ^¼çšc5ñIL|[©¸8ú”f<ÚeM¹Š£Œl¦_ ®”¾¥VK—‡ÓŽQ}§¦ãnñ3zË[ªŽ;$¶ÚäVïuõDî# µ  Ô•Ñë÷ºíΤ7h_èÚ•hÁʶ9ät5Ÿßj© ›Ôèw‰Hýûr_òv0ù29 ú'EŽžJGd„RéB1á4œÁ&”åŽ0Øz*Ûumâ±@s½Ÿ¦-×Y¬Z¢yâÌðVáèù%6—jAfµV” 7ÖýéÕÇjŸ³¸6ÔâH¤%ûÀæ³?W,àmªoµðÊ ´HöPKt/µÿÚüÁÖÒüïVcZÁµ¶´Â«¡V÷–a]„îuß›³`‹vP ê Ëqk·‹ùÐl´%Û¶½ðÙÄñ™ò™UûNÄŠ¡¶ðÄAð½–” 6^9sþÚu?ÐBOË¡Ý*Ôžo‚ØZÑîo†¿HnfMŠxIÛÐ'[䪎Ÿ$íÐL xãt}Kƒ´¦i[]Hi‡|m9ŸCi<°·LS¼¼E{êÅÏR–wE³Qˆ ‹{åL&ÌE Ä-!6@hÅ’¯Sæÿ_³×m6ºƒA³M¹¡n1„Bʶç“J3v=.¾ƒéCÅn[Ô»rJsÜÒs[ªÎ]X®5c“7é¹Ý*šzQݶê½ôxð)aÇÆíñ›"+WÉBn:T¶‹›±¡z?wcª–+UÐÀt(5ÛÜ­•IQMy-î—ÙYq WíùÉu¶÷…'Ø”ùÌM$²O¸×¯‡wúÊ™\èà =‚jµú†ÕžLŒvwÜ0ƬcvÐéõû »Ûí^èk…½F¿ïx¦bP²ÓžÁ^­t­ù'ßšN{£_{§Î­Ómþ“wfQîÍѦüÅf=òïO¯Ï÷UV†R*štÙfºTn)9è(ÃÒýG Ö¶Î=Å45qMí—=Qi%Òü{5.Š(ŽoÉE*¼~Ò'çñTô¤þžç߯Î2ÅüМ'ø% ýWŠÏÇ'•{•ðz¿?ÉSùj™^-Ó©Vјýå'²°ß*Ñ©¬N% §ݶeÄ¡· 銯\Ò^ÂV~Ÿ-ï‘•Ç{"'²Ï•¬ªEO%Öâ·ošÔn¾9"&ß*¹Tþ5É•Õ ²qô\‡rzá=ãUíN«Eå?i{–e¶Í1î5ZFÛ÷¾Õé“I{Üt,fõš˜`²¢½†ªÛñóP?‹F0µ¦Ù«µk]ª”ôŸ6jmõHþz¾›¼µÙ[Ü‹5èú­‡EÚÞ‰LŸíÊé†H;Kµ|c-œ¹˜< ˜îOç79ÇýP¸KÚšÞÞ7åGž.ÏÙÇв¯©šviÓ’wdHr—´ÑÀ—³ñȨKÙhxÜel üúI ð¥kÔ€—¸ˆ‚§ºˆ5º&k퇛­ú;ÏeT'•=½™® ãÔöx¢šsÏšüÝš[|•ãçoi€jYAþÛßÄ}úüçWÿÖ>üøúÕù~dÌ©ZnEmÇÚá½áK³¥iѧ!Á— ñ‰•ë3‹ÿdLÉ ƒSrC!"”Ü0¸h%7 8FÉ ƒK?6Jn(X°’®ä†BE*¹¡°4NÉ Vr«rø*‡¯rø*‡ÿ¶sx Õ ïü{ãÌ÷¹ñ;}.¶ôa´û ŸébSTßÁW‹úÜ›EÓ}rhz¦/‚ÿ×fM4oü³CmÊWÿšmÙW|vÞøŸ>¼Ó >$ÿúÀþ\1q)#ä‹û3ýÜ ¯8HG¿úYßÌ AýÑœPßÌAAýA ˜쟰t>ÆÜÈŒ…üi·øÿðÏsŽIÄYðx삘4N&ÛÊqŒ`Û3D!ŠzG¦Œ÷Ø[úglã¿¿×+úf¿Õt`,§½Ÿ.ƒDTTü£½¶Êàå—åpÁü#ÁBñ €(þÊ?B#øGXdÿ… åá@ü#`rg€òРHþ xwƒ‰=ÞAcçnËcŸÿ©±X‘¬ÆÎÜ oŸÜ±1¶·Ö„®„9FFƒƒŽ¤|uÏÞáµ+}d./¢{›iM.á$`( … $ ¡0q$4Ž€„ƒ†Pˆ$i| …" áá$.–€„GP¸ôcN@ÂÁB H(T0 ‡Š# á°4Š€„‚ƪ$¾Jâ«$¾Jâ¿õ$Ï@ÂÁH8@8 … f ¡PÁ $*˜$… d ¡°a $4$‚„Ç0ð 0·X(Y=êÈc 9 0ʨ(‚Â0ÀM§ -¥MG«4ô©§¡©„’´“>ÝRÌæïTiœ‚„M#‰p…Ë[9 R+9$d(IÄJ)ަ ÉZSP(©`a(HEL)ïìǤv£ÝhtÍ©Ñ6­‰ÑnØ-cÐ4ÛF»;mM«9n›“\ R‘")ð3RD➥ IÙÂQ¤Lì¥ ‰¹ò‰m\ÒžXaOJž‚„²‚¡ =©ôDŽ‚$eIAR³ùIm0áÇ@&…ç:¸wSmª±•4±ÝºvË4ÆÛh7­–aµú}£Õ½v£ßitõcïI[â›dw²É(HƒÞ Ñê´0¶ÔÞO—q}œ¨¨ ®ïµUÆõñ<ã²×ÇA¸àëã 4àõqêú8u} ½>ŽF\Ç"C®£0¡×Çq ˆëã2Àäν>ŽE^Gáož`0±[shìÜ-,âóßò˜i‡vùÙÜ BǘåÛW»6úýð³0½d9¬×·>o° zâr8h6â`°O?Löûù¿Ãyð:u¯Yt:ð"a™Ö£cÛº8q®Ùü7Ùþ“Ýîù$¹é'­Ý€¶•ûÝx—ÌØ; ¾ù¤í³pï'vR;?óèÔ#¹)'~ü›sŸxüþ:ùÂß­À±_­¢þË=ÁúïšppaEÐà  Q©Ñ{Hc/ë—¾2wqнM‚´&›@àêe€¡êQØÀ õ(LÜ…z4îB=r¡…(q¡^t¡…ºPC„_¨Gáb/Ô£ÀêQ¸ôc~¡ ½PB_¨Ç¡â.Ôã°4êB= {¡¾ZÖTËšjYS-kªeMµ¬)J1ÀÁ(8@8Å… ¦ PÁ*˜b … ¤ °a4$‚b€ÇP ð 0Š·X(Y=Ë£€ P ʨ(‚bÂP ÀM§ Q§MЫļJÌO-1'H®”$âô 8tÑlüN•hÁIØÄšxI¢p DŽtQÀJéBJºÅÝÃ/Ž&]ÈZSP(é¢`aHEL)ïìǤ‹Vל´;Ý©aÇ–Ñîv»FÐì}«Ý´f³5èç’.ŠIŸ‘"÷„,éBÊŽt!eb/éBÌ•Ol³è’öTë»tò¤ ” éâI¥'r¤ )ÃHÒ…ší`jƒ #2)<×Á½“tÑæõ¥Ú ›¸ÓŸ°®Ùé½Fwl´[ +zUÇh5¦V»kÛ½Cï¾”´3&ým‰o’1ˆHf­ÑiµÛÍÙõÀ§Õ“.ÈŠJNº8` Nº¸ˆȶȷ*ǶâÙ@4Û†…`[Àl PÛ f[à‘óÙHLÛ f[È“;Œm!Šb[ ñA×’p˜;’Þä%¢Ñ±•Üþ° ½”è€-<êÁ„yßk?ôÖñW'ðÆÖ„},™MßKß›ñD:øY“¹ã²âJl´ÏÝm4ˆ[ÝgÑ™“7É¡o1Êþá¡çßþì,¢ó{“º é­^òŽHvœø øgq}8Pb ù6 pCÍøºo®-#‡†»N^€5)âšÚ‘BAI˜Ýªq$Ÿ·«ãÎ’De®´°¾~\ù³(ŠvQåEÜ̯®uc9s‹¯RcôC_|=¹¨¯Øê'í™™‹e*BœªŒb›~¡.~–Cﯤˆ j#Ýô2 Ôímc1›Ž¯Úž=VZØ#}úÀ¯*2(Iç¶ÖKOçüCGÅûZÝ‹‡Y¼õñY©•È’aX‹?—£•?W82îëÅm ë›wZ¾`öy¹<µ¬=ZÔ‚»fÏWÿP-Úvº½V] #þ…I=V®:ÓÛ2ŠoѶrL‰ûÓ£%óoR†Án£œjùc» Cf§Ñ(©F¶·"Ý|Økª¤ú̾ŒV®óµ c®q–åƒA)>ØmõÛºR;—j«¡; +^åü¹ÇÕ^=:±Ž¬yš5öÉŒ EÖ¨›³Ø¨jk7Þ|µ`¿ˆ±]ÎT|§ÜBr¸Âk?³¿FÂ6u ºØmL_Wnx­vð)C_ŸU™knOÈåeLS,´SÂCýñUÔ“Žð̽yNá-2¿ü{tþþ‡Ñ»W¿üXV€»±æ+öÆ÷J·`vßÿÀ¦¥ZÅÝ»Wd?ªy:•¥ûkµÈJ+ȺKëSŸ)›¦Šx]r’šX­²Ô*KÕŽ˜íµÛ²YSéý·uP–uŸY“÷nt¿*ôW¬òIÏÉRYËöœ„ÓjÄÆ«%j©å¾Tu#>\xeG›µïÒ QÆUxŠ>éã(¶X†·?8þAÚuUÂ|ºÛ,ö†«^ƪO—8sW9×å¦ñiQy"pÚTÜ=A$SQŽïIŒL5·%1ûzåuûÇ' Äþ“‘8iju»­LÅxPëSõ>5äS¿O‹E;?DJÝÔm6ƒÆÃb-pLjâ`írÀ0v$6H£‰‰ÑhGBc4Ú±ÐùíHD´F{|€F; ÑŽE„j´#qqíHp°F;—~l@5Ú±°0v$*P£‹ŠÑhÇb+h„F;§ÑŽ—%Câ `É8t(u…JN†”³.I†D[öäö<¾%äÉx[`2¤ ´ oJ†D#cÉR$‰<ò¶ˆx1RX“"®© H†”s$R¾{QdÈBfdH9‡¡/¾òèž %CJ†8U’ )_v =²`aô鿪Ƞ$“ CêkuC |¥¼°’ÆÚÑÈT CR™“!i æ‘!©ª•K†¤1 CRÕ@†¤1UR}@dHcH2$¥ø`.²°KµÕ $C/ † YÜæš9‰»Ü)· sÍœÎ0ò–‰áµÚÁ§ }}Ve®$dH¢iJž ùä#<€ yJá M†¤3 &C’™D‘!I­J!éíK’!I ².ÅÒúÔgÊ㦩`2$]’ "CVYj•¥ªÍöŽÚmp2¤‚þC!é¬#È•ç!é=N†ü¶—¨J/UÝÇ(J†”7 $C>åE=œ I˜@•0Ÿî6 ˜ I³BM†,\ªu¹i<„ ùz¢²¸Q+Æ÷‹’!‹ûzåu$dÈSv;i2äSö>,ò>Zà>Œ^·5è¶=§Îw¿Èí[4" Òሆ‰òqægÂFéól¹ô¼o†ñÅP}é³ —kÇÍÜ9%,­`ûA dpÔkîLÈËš¢ŽâÄû‹‘!òÉ«8HÁ›¤œ)â⎂9c"«ïP‚Š÷ÀùG1¤Ù(Œ[,” !ñc 9S0¨™UÍçLõ£ÖpÁÂ+/"·­¿Äã!‹/ÚhÚL‚ׯòù`¨Ð[÷ðñd¹®Z)ÕþŽl~Îz¡`Bû. Y βRŠ—hSRÓ8O?£¦øÄã Ÿ"9àj³ñ;Õ|™hÄ­a.,ך±É›T5à3Y§Þ‘&ƒÊßãSõàSÂ߉å7EV8bô!ùÀßaÃûÂñ›2Ÿ¹6 ”™ŠÌÕò7_‹¸Þ]è+gr¡/ôN¿ÕéõÌhv'£=éA»cƒî¸ß[FcÒ½Ð×Q™”‰UAÛq€°YŠøØ~š²*gàF3|e¥VvR—ZÌ’˜Z2\‹ WÞ~®k!c¥ 4pßSk˱¶Í°UΠLM­ªp¬l´@Y·EÛGyB±câ¶TÚ°Lª“K2¶Ÿù[V°yF5 O9qGi”±³Eì-a\Eé°ð¸(Ž9GIÆ«Üx©;Ü1q£D‹é¥Ý²Mž¯æós'¸·å/1|m2ì8r–ܱ!óŽíÒü—.¢Éã‹}Ç-úüÁõìr"GéÑã>^nnqFASÜÛ.3d­òiŠ™¹Ãzó›øRšár,©·RFB¹O$÷ʪ„‚ÒbLe|öAvãDŽã·fKΣ²Ó9”áInühíÔ•[ÛqæšC ´Õ4JÒ ÆZÔÃÊ °Vja}j3ÔqRvGŸkžT¯ÊØ«Œ½ÊØfìgÏ£[²fiý³u2¤ÚjFË2ôW¬òˆȪ —å[2^Õ2›¶œÄI–ž•oÔ_Ç'LoŸ<—Ò 7ªÉ ô‡b›ƒãÍ/È-"µ¥L}À¬öŠOÜ›´OVp­} 'ïù7ŽXÔO¯>þSûœÍŠ´¡Çw-Ù84Ÿý¹b÷Tm|«…WN EŠZ" q©ý׿Îúÿúlþw«±E[,­ðj¨Õ½eX·â6ªûÞœ[l„ºøhP_XŽ[»]̇f£½ßï”w&¥ßuÍ^«ß¿7Î|Ÿ¿ÓçÝësoiûDáYØ(JyGW–;‰ŠôE0ã¿ý¼œ[·cϻ֢vα.ùoãmŠCBYi–]Ê,úìæZüf¿BÉ™öOǼÜÑãrÆ“ ž–nä¹#QÀQT@þ©?¼1ÿu§ošÍ¶Ùhš¦ÙjõÍv³Ã÷£@øA\•‹7 ôb­¯wvSËìw›Â"M7ù+×ÍôKŒøÇûêˆòe×$9P¸÷Öº×ïtÌhíýôÎð'²*-ê¼4~‡óž3mouë*ø½¶Õ÷éóŸ_ý[ûðãëWç™ZàÎz2sGÌ+/E$}œœÓÏy×/MSÓ¢OÙ<¨ÌØäeC|båò”œÿ„÷küƒ)O™Ò_F_î.‚kg¹ä?kj _)Ú«ôCÎÌõüäÊ}ð  :A£µ²ûÓÛžþ¯fÚp'=¦wSÔ,êÌö"™ŸÍDòYœMiÍšÙ«uöïGæÁjZ|º¥M¹ûh/µèXš $ÿ­ÙÓYa+îkÚÂ÷'´øð#Ê>¸ÉÏ/ê«À¯W–¿1ÉSæWÎ4|Q¤n š¶¼ ¯<75/ŸX‰ê ËÑ`¬Ïq=þ\«f6ëŸ µ¯ùú;H‹EP”„o-ÌgË‘MÁjé_î? w´Qæ× ƾ2{Š@²§aÆŽ[è}ì%}qßôç¦Ds×:Úw"Á<ãC×Ö´–ƱÛgZ„Áÿï{íóÛׯ5Ó¬ujèwž9оûÀ}êV˜üÂh~©}÷ äI—~_ Ä8îÖvkíxÜÓ_‡#}:t1èW±ë·wÀjVÚÒV†ÂÎj©9®‚ž«-ç+žìÊ{ÝÏž5 ™Á}¼ræÜ 6õ½…&m@L­£¹„<˜ÛBl£7˜`[§þ+ez~=N릊zv’x<Ô÷Ï›v«‹b¢g^0gmâ-­@ü;æLoG¢·¾û^ã WÞD¾ÛlßY~³µ·V¡÷­Ö]œÆ$%Ò6i¦ÆÛc+甂>ç ÉÛóøíx‰bö‰öÅá©„ã:Id’®!OR_D$î.߉Åñ0I‚ïF¨Úu?‰¥§hÛÌS#)3ýe„|`#޹‰CúËQög÷PuUVsíi©Ê|e^~€Xóù˜§éÉÈß„>oª‰B%ß;Ónšã¡ã Ÿ_y͉—1›Í竚Jokð1µcMND…Qžf1W˜¡öŽ„ÎNï¶çù“„ŠMíÑÞ„º'N`{|ÑtKŒ;{_’§Þj¼£;¸¾‚Y£¶9*WUX©ÎD r$µ¥ÙÍ¢9YH(Áþså…–dž©…jZ:UÄPž¾L¢|źIvukéÊ6^xâ? ºÐ-n ¨Ÿûá ›‡ÿÝ0{ÝNÓìöšZShM™}þ£V×h6zíæ`ÀdšüÜ€(Q,ñõ—Ê õ=I§<™1ËÏ$’J‘z‰Á†nÔªžô01îÉç¿~ômôÃìú«¯¡32¿ˆ_7þtúöíHûôžÖ?ê‘r™5Ÿÿ>¢®ï£‘k_ñ¦ÕVÿû+qÊhõqm·|Ô󨛿Ќ©føÔÿ7-z‡×]ÍçZóo5 j¢$å]:£©·r'ÉÓŸ´ cA4ǽIÔ×h’ñ8|Ž,F“ÝoZ𚉷E_ŸÑÞß}GÛÖ(z\‰´´öܹʀɛ×Þ<*I:uØ|2 ˜˜Å=?bk^A_9“ sG‘…€¶Ô"Ð": qÅsùR瞸ª_0 hѯ(êóÇ¡®ÆÉPÚÒέ1›«ñ9òÒ¡­DZÛ%èMdÅ‹7bh»@(Ö}ñü 1ªþA8Rá4*!ÚàºbÖ„ùÄÃ] ñÄ Qo¬¹#½ÅÏ6’b±œ&Ú‚mÞþ¢/î(JM9n‡4tŒD¼'ÒlÆ]u+Ôg±¬b@r ‡.÷üME–ÈG†E–!'wÌ£yc´yRÿåÿ3¿|ú×»´C{ñî_ö§«k¶øzþá­÷R'1¼~Ò)ŸÚ,úvæ%´ÒwÀ‚ÐZ,w>ËÕ0'ªFr#[Þ&oÒôô34ÝX)cÜ<^Ÿ’µâ¶ùM¡%Ž=TÐV±…!¹§>²QËy€½°éðQº[+1§¨¼J‚Æ;Þ—ùØ”ùÌç­%júpïû¥õçŠéGÞk9µ«ó¯Å¢™/‡ó¢X_²i“—ïwFv‚ÜMŽ|™ˆGó„û qZþ‰å*ŒÞ±3æjñÙ•6Y1qåF|B\¯ä±Bí…ëæÞ,>z~¡}áß –ÌvøŠv]Ü‹žÄô£7ë¤Çy—ò.c÷¸.º}¹ýcœ‹¼²#¦Ï>-ÙE;fY÷¸ðrÀ;ÍŸÃn0¶úk$}ßq7œÊûŽòxß±[k¶Úv³É4è-^…A¿ßïwr¯;üöKUEúž¤GÔ’ƒæK¼â˜sÛì·En7¶þðF«ÿô—·úÃöu7ªšÒÝk<\xµM¡>pÑ\h|jµ’½Ê˜Óײ7T“ñ úêa>üêa.öê!{õЏzˆ^2w2â ƒ+šNÊ"Ïo‰1% ¸‹’h`rg@\”Ä:M¼£&Z7rŒèˆ6âÒÿàøCíB‰ÎθÛNýøB¿pçÞìgvÃæâŽ;õÄÏ®,w2g? 2^0Ô>ù+vᎹs^ÿxÇÿßWü‡oÄAâ…úÜm–žà WÓ M8ëP øgY(~¢iqwóú¾#]8rD>ÎúÆ‹uüÑØ®c‰?)vƒ Wœ'RbÛÙÜ BǘåÛWíŠõ뇟…é«0\Ãz}ëó ñ‹á Ùh¼8Ó^¬öéç/âªó‡óàuD©|Íü0ªA ^jë⌹fóßd?úOv»ç“×ü7É'­Ý€¶•ûÝx—ß|ÒöY¸÷“;©Ÿù€øQzãUüø7ç>ªÿþ:ùÂß­À±_­¢þÂ;ŒpetFŸm&.Ê‘ ÝŒÅÞ4ÝÚu9¿ñ(úS¯‚ËHÈèÖÁÅQŠ!ôÙÍlË‘åèÆŠöRÄ|_°WQºùhQ‰HRÍzb_£S™öl9÷n·“ír\GÌ´g•i—!›tм"Á0¸M*Á0y‹G ÔZ^·Ñ3šN×h Ýàn£Õ6{ív¯›«vðÛ/Ué{’9²Û?µ0°ßQ ›„æøË­y%¡vÐPbaò#TZ,ìpáÕ6…úÀE#öÔj%«–ÓײBa'²MöÄw°`,}G–¡± U8Ð[whÁ22P° މ,C€âËÐÀä΀,ÃâËàøpVtµ§]íiW{ÚÕžvµ§ý<ö´É&”Ø!ödž‹=Á1ÑbOph´Ø(öG”{’ÇŠ=¡¯ŸQ"¢Äžà¸bOppœØú 1"Xì ‹{‚£bÄž¨h±'¶‚FÀŠ=áïªeMµ¬©–5Õ²æé/kàÜ¡v³Ûkvp‡èA»M*Z¡Œ­IÖ4µ—“ÕAÀÃdu€(Y8.FVŽŠ‘Õ£bduð¨pY86XV‰“ÕAbÃdu `Yn±PR0„€n½ªQðÖ›òŠâôƒòñ`úA°¦Ã¥õ0LHB€ˆØU_¥ñO&‡ÅU‚A„LÛa Ðt>:aºLæàwªD %ɃJ¬ïh× "kaáY+ùÂ;häDÔeÔNäÁ§Ã]SÓݚܚ‚ $Š4÷Åeþ6e>sãçÕ˜RÞÙqHÞ]Q„ }x¡wú­N¯ß`F³;™íIol ÚËtÇýþØê4“î…¾Vãtûê‰{¢€ ÞZ ob¯Š˜+ŸØfÑ%ÑÜ}¼]º¼Ý9"+ã݉¤'ûD@ë7MjÇ߈jdDHUìÅt4jƒ‰TdRx®ƒ{§Q»Kå?i›ÓþÄì[ÃìNF»gNq§36šmkÒ´šA«ÏôcïŒI[ö›ÅX€“ˆ&)4G0ô¿ƒÅÙÁƒâx!ówÒ ’ü{ñ 1€÷p£v~–²œÄ|­ƒæ•På-2ØjÔš þ·f7Ṙ½v«ÁÿÙjõFg0çë/•ê{’N9²>Ö Øu‹°í/7llãYƒ9¢ ÊRYÚ`^é7†úðEB|zÕ’dæv·,uð` >N7 œÁüs19Î`>.†38ös{úpÎ <ÙFpq 7 ¶ÄÑœA23ˆ82s 8Î ˜ÜœA(ž3LJ_®E-älÆp§ ê¤F›M—6¡—]ÙQdÇ0³¤=Ù+}„},™MßKß›ñwð³&sÇe±#:í6Ä­î³è.„7É¡o1Êþá¡çßþì,¢{e&uRnŠšSÓ…Ø™ýôX„üáBHaby[ÛFîÛ~c‡^A(ˆu³[5Žäóvå+ÿä€K™+-¬¯Wþ,Š¢Æ_Ty7ó«kÝXÎÜâ«€ÔØIœÆ§³}Øê'í™™ Ï*BœòË sµñ³ìzURlŽß¿…$qäj,fÓñU»Ó³ÇJ {Ä ¯èþ„‚È $ÛZ/Y<óiªÕ]?fñÖÇg¥V"K†a-þ\ŽVþ\áȸ¯·5¬ouø‚Ùç=æòÔ²öh5R nìš?R‹ö߆n¯•îÙOêɰr½Ð™Þ–Q|È®V—cJðzFKæ;Þoð!mïwfÓìç¼aµ£xÝFN™vââÁÛe2;FI5<¸Bcª¤ú̾ŒV®óµ cÈeù`PŠv[ý¶®ÔÎ¥ÚjèΊ×D®Ä¥+¯Ý»Š¯]ñ¤lì;“F74TÍ>[lTµµo¾Z°_ÄØÎŸ¸)ÜåN¹…ä(†×èÜ Å)D4þJ Û¸ˆždL_Wnx­vð)C_«ëµnËsiÂx"¨RÆ4ÅB;¥íÕ*N:ÂÇï%=›ð–F™_þ=:ÿÃèÝ«_~,+ÀÝXó{ã{ù„a*“÷ì'À‹’¤VeØcäö£š§SYºW‹< ´‚¬K±´>õ™ò¸iªˆ×%'©‰Õ*K­²TíˆÙÞQ»-›5•Þ['eY÷™5yïF·±BÅ*Ï‘ôœ¬ CÙž“(3±ñj‰Zj¹% éŨäuìwø­åMÇS´ÚÙYñ¢ž-–áíŽ ¯Ó%P%LÀ§»ÍboWÊXõI‘¢•–j]nŸ¦•'§MÅÝDbKåø~°vÊTs[ØI±¯W^·|²@ì?‰C<[·ÛÊTŒµ>UïS“A’£Ò–“‰(`_A#^cÃ_c¢_C£_A@_#ʽ4"‡}iŽ}iˆziŽ+ñÒ÷Ò—~l ^AÀ"^£b^A ¢_A`+hìK#ph‰—FPS¯eM†D¡ƒ© Øì†– )e]– ‰J½ût{ÝÈh[p2¤´mL†”Y@ ÈÒûÌDùít"^ÌXŽQ;” )åH82¤t÷âÈEÌ`ÉRC_|2ä±=L†” qª2 ,²ÈŽT™1ô(dÈb…=bÐW³_¨"2(IçdÈÕ%q ’ÄÚñÈ;Š~¨øû€:Ýî 3€°aÔÉÝx ©“4óÈTÕÊ%CÒ!©j CÒ˜*©> 2$1$’΃R|0— YØÎ¥Új’!‹—C†,n sÍœÄ]î”[¹fNgyËÄðZíàS†¾V×'jÝ ”ç…qÈ=w¢iJž ùä#<€ yJá M†¤3 &C’™D‘!I­J!éíK’!I ².ÅÒúÔgÊ㦩`2$]’ "CVYj•¥ªÍöŽÚmp2¤‚þC!é¬#È•ç!é=N†ü¶—¨J% ÅÉò¦dȧ¼¨‡“! ¨&àÓÝf“!i6@¨É…Kµ.7‡!¿QO” C7 c¥Ñø~Q2dq_¯¼Ž„ yÊn'M†|ÊÞ§&ƒ$G¥-' Q ˆ2lægÂFéól¹ô¼o†ñÅP}é³€‘]àÈÞ9%,­`ûA dpÔkîLÈËš¢ŽâT€û‹‘!òÉ«8HÁ›¤œ)â⎢·;9n‡4tŒDCšÂ¸ÅBIÁ?J1©fÖ)Ÿ2ÕZà ¯¼ˆÜ¶Š¯ÍÄã!‹/ÃhÚL‚ׯòù`¨Ð[÷ðñd¹®Z)Õž.µËzaæYpÇ«§/Ñ:¦þ¤¦qžPFMñ‰Ç>E."rÀ#Ôfãwªù2ш[Ã$]X®5c“7©jÝÎmæ¯lð=>'>%ü¸Q~Sd…ãªXéÓ!ùÀßaÃûÂñ›2Ÿ¹6 ”®ôôšâíTýzxw¡¯œÉ…>¼Ð;ýV§×o0£ÙLŒö¤76íŽe ºã~luI÷B_GeRR$zTmÇ}Âf)âcûiʪœÍð••ZÙI]Vh1KbVhÈp-6\yûa¸®Å·³jeœçðÀ}O­-ÇÚ6Ãöî$÷OyM­ªp¬l´@Y·EÛGyB±câÒìü–1,“jÅäß’Œígþ–U€lÞ»õÓÚ9?ÏA߇Õmõš]s/ƒñpãîþ2|BÒ¥B6;[4`µÖî“gáŸQösŽ’üX¹qh 3ÆCALÜ(Ñbzi·l“ç«ùüÜãéðm¹ÆaÁŽÒbgKîØù Çöt~á Ñäñžã–á]þàzv9‘£ôèq/7·8£ )îm—2Vù4!ÍÜa=‚ùM|)Íp9–Ô[)¡¯äɽ²*¡ ´SŸ}Ý8Q†ãø-…Ù’ó¨ìtex’?Z{'5Ï’L×¥àÙÍfÕúªZ_Uë«j}E°¾ÊÒ䪅Ös^hÙѲLÞÊѾL7Cdþ†Ü¼Ú¯ªö«Nb¿J©ÅžÀ{~â¥e‹qšZ~X¦ÉÀ¾b“ÕœùïÊ áÑU({å;áíkÏ Ù×°,£Y^ý1l–×À™%Ï[ß²Ùy¤Í ¾¾GQ„{-NÀËš•ìjn‰Ÿóyùû0þê‰/”ùô«üâMXùÆ·x»wÕ ™ô ÙǹŠU¨âÀ7|Rž\}Îq¬éS Ì"ÆËÝv8Â4òƒÀF ì9¸ÌÚ‰úÚm+eèjéQi‰=0ád çÖß/™ûêü'ãmÌýö|Ø–·¼ç”ë¿$ð&„Ç^n<™)ÂN ™2Ìu¼溬•|æºÔ DÕ¤C‰¸m ±ØeæõhL¯Õn›íÃï¦AfÐÝ8pDeãðä&]zTMñ—­p•ëÒͯ]bý$uVÊñ§(ïÌÛ(ï|•òˆig!»¥T.é+±¿þÉ·ÜÀI…^ʳÏÅåÙ\ÄwÓÊ1æ3+HÂr[÷CZ½-½¡òFôy¢¾á¸³jLWcºÓêÆ´š”ëLU ã# ÓÉÛ¬ Ù:{pIž"VV÷Z(Ê1EƒÉ–1ú4™‘¸1²;bE"Ž.B!íÃ|#ý±Eô~ÞD°§t‡H:ŽUÄFönª´z‡7UP¦h¼ç’¦î·À$ã¬K+é̇µSYy¬§÷™¬i·ûYÙ&¯EÉÕâ×änšÔ>ŸÊ¤~&ß’ö2ÜǨ ®œ¨NÑÁç:®Óh™ñªv§g¶¨ÆVÒĽv¿aO[£Ý²ÛF»ÃLc`÷y‹³Á`:h7[Óv¯xL‘:D)^ýý2ŠÝFƒ¬ 7IŒI¹S"Ѥ*rV‘l`ñŒ†Ç¾ŸAh2‘w‰|ŒK"{y ýu >±p´¦r  †%Î1PŠ•2™ôF:Rï4þ¢“'ê´"##t@W\=Y¯ë¶üIyT÷z€‡gøÞß%užûöÞÌ\MØ,+tÆÁc¯t#æI8c1›Ž¯xd•ò1žv3‚2`f[÷c&«Âø™¼O&RDŸÕ2†µøs9Zùs]áQ†°1¬×S¿vÜù¼G\žñÕ­ákÁ]³ç«€¨6÷lk>ìt{­ºü “z2,\/t¦·*‹mcnÿZ­ ±!5ZF”,•†º µÕðǶJ&_r+®AÌThBqùg_F+×ùªÒˆ š×ìêíõ©@©Ou[ý¶®ÿRÑ1E*¤ÿ¹û{^=Úâ‹wøxÚ«ýE2¨reý±V’þjÎ}AU_m6ãÚ©²‚ÐD’7²WIç“ãM=ókcîÍôòJ±éÊ7Î\Y?P=R—Ö(¾‘ÕŸˆb¢®øØù¶ø<þ¾<…‡§›Å'Ï|«œú³RJÉkÂlôPáiÍ¢±ÞêɆŸ4ddTU °€haS÷ÛÕNåÖvœ¢æèìF5M§’t˰õ°ò¬•Z8µ ZGJÙE}® {R½*c¯2ö*c×Oñz©Ý’Í0K럭³ÕV3ꔡ¿b•GäxDV'¸,Ø’ñzBËìœGÖöAõ=³ÑÝÍv<¸(ßýÅ"÷›Ñˆ‡S24œž•oÔ_Ç'Lo?/óÂz(ÜÊ·ðfŠ5nŽ7¿ ·ˆÕj”1ñ@™sK ¨¹´!•šÌÓcl5ˆ Þ«/~&ñwäˆñ XRDÞ2G“ ˆg§Òâ`'iq Þ’Pj7gêlI\õÑ•”F¹og±ß°ïÎâ5?LŽðtÆË–ô_Ú„³|ûÊ`AÔ–ŠÆGå±tC+—•qÙ­´ÍxТ§â¹´K(24šrG!Ù:LW\)}KE†.Ǧ£úN­…Ý:`gô–·Ôve»äVïõô„Õý‘…Ú°jLî¸Ým÷/tíÊ ´`eÛqºšÏoµ”ÅÅ&5ú̓D¹AǾÜ|#éC¾NƒþÉ_‘£§üŒ~ ]$&œå3Ö=üc­§r¬ý°Ñ5Ó& 4× ùø¹aÚÂqÅj¡%Ä%gî„· GÏ/±¹TÐ!K˜R6‚ÞXó@ÙºOlR'Ú§Õw*™˜Dà`N,r·F ú#A³xòTAþÛßÄ}úÓ«ÿÔ>gçom¨Å‘HK¶xÍg®XÀÛTßjá•h‘v–ˆ\jÿµùƒ­¥ùß­Æ"´‚kmi…WC­î-úÿÜ7ê¾7gÁÖ½ùºøhP_XŽ[»]̇f£-Ù¶ÝEpí,—|Zå™gÄ–¹ò‚ðR{ù7é)­»ˆ“Ú+Ë1ÁÕŸŠ¨vV -ÂmåŒzG ñŽà9™ã²‰^Њh‹Ñýð:5gÍyç<éÓ¾ðL.ŽÒÒ–”;óùϯþ­}øñõ«óŒGø#5d7¾$jûxV¦Ÿó®_š¦¦EŸJ\æeC|båòŽà?á# þÁ”ÇÊô—Yßå?kj ï%{•~È™¹žŸüCe{ã>mlþìü^Ç웆ÙoÞù>ÿÞ>竌¹>Ôwêégº¸R£ï€àÉ™>÷f‘“‹øz˰fü_˜5Ѽñ̵©ïñç-Ìý^âèNÿ)øüëCðô¡¸mp¦ÇçêzÝZ:A=Q>®oux„zHÅ«ßkÔõ‡Ë&æÌ þ)æ?ÐùÑcþ˜›™‰],ýÕùOç>Ü_ùO„ÝøGo}oµŒ~²L~²¹}]¾ÔßÝËüìRùÑ?lê«gÊÈñ1+÷¤'Hѧ€¥?·ÄŽñðó\à÷/×ë^ÑíõÛýF«9xæ^078]¯Ø_z˜Wìûþ>¯è5úýn»×l€"ÒÞOûÿh¯âDû$r“áä=ÿ†qøÊ¢&‰Z’™íM¿vÿAÚÚLP2£ïœ-^Þ½^þb¨½ÈHq 6Cíæ‹õ÷‡S¨<«ñ­ò¸ñGsÏ[Žn¬HYO˜>æ€pÁ©Íqoxë¤Â‘…°’;\âîÉ(¡½L/è®Ùm®Ôôxí ¼dîdteW4”EŽîRbÚÖ(:Û#m[{îðñ¤˜ÜìÍ&bPÇ]±‘çŽø„é[Qv]´nrÈÆî>Lþ+h©zàýn!%:H|‹zðNɾ|’ÞúõK2ck¾Èt‘j I=)[°­CIh´¬ž”¼ž 2JfOÖ€ŒfT![¹’L›á®“`MЏ¦v$ˆŸ´#Áeù u/\ž¯¨ŒLŸ´ÃÐ+Û÷<$ã'âTe½½"øeÇP¬þ^&z¤¿×¾^ô§íéµ}¥´°G úô_UdP’ÎauúŠöµº!Sü °+\…á20¬ÉD$Ã/‡ýF¿¥pŒÜÛ çñ…à—m“ÿ«¾ô>vÄßk¶–VŠkv»»b‡ ŒBØžwí°äš©1uæ,.O´I_,¶Ñ£Û¨£Üû½tó–Ì ®œih¤KE+¦9½,k¸/ËjÉ3Gf-^ ‡Öë› Yá²f½äö˜ð娽±òçÁË» ½~!bÚÅý[üûâñûB?ã?N'³äC™`&N2ÿ6û"CüÁæãÏÍXx¡¯×ºÒv¸TÛÌÅ.6M^÷|g渆g­Â+#ù²$eJŽŒeÕ&—ž–3Ü)·°=-Ÿ{Ñv®˜fÎÊ1v_4ÑéÊm®Ozüõ:NÌÿ Ed&{½dOÜ¿r‰‚‹HJJðÓ³çÛ·[‰SÙ½ e HÀQ†®ÐŸÏê ¢®NfDeÝrom+ë"ªyQTKõÖ£8î¬X‹åœ•’±ç °š‚ ±“<(ÈNX­ÃÂìd†òÚ k”'ÔNfª¤úä ·“ø“ú`PŠt?É´œÀ;éÂ9Ww½Zº€ÓÛÃÒƒjZ€¼M•ÌVÉ,`Ÿ;G#‘nš’?…Ÿ'>~bá 'FNj&JNi2f²AÄÉ©­î¸”Iz^s¤X9uAÖ¥XZWûÌEÒT˜¨9i’š¯5^ì<Ñ“%áñ=†²fn1ôlo‰ž¼>ÿöN•Îð{0fÓlïQÆ,äöàŽ  9Bø…áõÙóì6 Æ¼šþƒjÍ“Z‡jΓyŽR —ÊÐÕ”ûRM/”­.d¢&]´3ÔÆÔI¾Úa 䤙aN2›Û¯N/“ºè¤pÆT<€Zë¤#¯Œ\æt{¦ÅN6F%(ŠGiY^ÔMÿv=­N}r“"…‚ôŸ™Ôd¶ä¨´å¤C£A"Šº .„ldÍç) <°·L…¬ód0ØS/R^"-ï,V e£$F÷Ê™L˜;ŠÎ;ˆ[BP^hÅ’ù犦ˆx':üU äÇ(ÑJÄÕ˜%:)nÄÏ¥B ļ`þŒÖ5!.ýØÈJPÂz£˜šCŠº´‚à‹ç{˜NŽTx™ŠF£+fMµÛ¦D¸X‡7õJè! uˆè@Õ|vC©C$i]N‡™zö€¸.ÑÒ:D¶ :DRÐ:Dv€:Dr „Qýo´†F‘m~IŠ'±£v$˜‘¤#atˆ t/F‡¨˜œ‘¤ÃЯCt|ÏêɆ8UÅö:'PºùVf £E©:DE {Ä ¯f¿PEdP’Î=¾wÒçÜrÑ.:Z‡ˆÊn1"ÊRHëQ‚\‡ˆª`:DTe)¢CDß§©CDpKñ"¹áQ*T‡ˆÀ$ä¶úI]æÄÞVWp‹3W‡è$®Q*v¼oã8F‡H'ý2Ýs¿(×!Rq[¼Ä»''}oû¬Zåfœ0""kOH‡ˆ¬ýòuˆÈLAuˆˆ æè‘U+O‡ˆÈP¾YòuˆvšÚæÞíûf³Ûït;v9Ë©ýn`5ªEDÆpªEd5ÌS-"2”«ZTÜÎ¥ÚjЩ”¡ZD` ³Ð!q—;ådÈ®t†‘ÃkµƒOúZ]Ÿ¨u3PêKÆ!l[¢iJZµèéGø|Õ¢“ oXÕ"BÓPÕ":“ñ |˜j­ÕW8ónoÒÛ—S-¢-ȺKëSŸ)›¦BU‹“Tˆj9Ð9Ð)Åoô9ýÔT-"´ŒP-ªF{µ4{K³³çÙmp"ý‡Ð!¢³ŽÐ!ú¶õJÛâ:Dò¦:DOy„L‡¨xQ`’ 4%8"›´:D…Ë´®6ï1uˆ`%ä2§Û#`"š1J­CT|”–» Ñ!úF=QF‡è´&E"¢§<3©ÉlÉQiËI‡FƒD¢ÌŸù£À™°‘µ\ÎA¢xß& cN¦¾ôY+óÇÍÐ= K+„v Ú-pÔkîLÈËš¢Ž+ ÅþbE:ùºQ8H!Ý2IåJˆ‹; 按Œ©C : Æ?"4šÂ¸ÅBIÁ¢;|ÌLª™Œ1Uo‰ÎÚk4õ£Ög«W^¤+³ŠëñxÈâ 1M›IHêÀ€±R:0T(á>ž,×õød¨ÜÑ¥vY/"d¾Ë×,8gLÕUDë˜ú“šÆyB5Å'_ø¹ˆxùPýß©æËD¶/n “ta¹ÖŒMÞ¤‚}t;‚´™¿²Á÷øÆCð)‘Έå7EV8®Šå‘>’ü6¼/Ü?°)ó™k³@éJO¯)ÞØÒ¯‡wúÊ™\èà ݴ§qŸõigÒ5Ú½iÏ›“1´[½ñ`Ü4™}¡¯£2))=ª‚¶ã>’(âcûÂT97š‘ Sje§j˜B‹Yý0…V€âRņ+o?ŒÌTñí¬Z;ëOtGwVéjLWcúÔÖÁgªRo=Þ2y›•b¿Sg.F\ÄÊê^öC9£h0Ù2öíî]ˆ‰8Œ…¨1óôW§·ƒ¬ÆéÝôè)®8V•µMÒêQn“ ]ÒÔò¾ŸPl½‹³,­¤3ã{VFÈæŒ›ôɬ<~IàóiìPe›¼æ¸Sߪ}±Bûª~Ó¤öùô˜Âä[Ò^†ûµÁ•Õ òÜÂs×i´ÌxU»Ó3Tc+iâq¿;n7úMc:å=Úî2Û°ŸtX³7è´¦“nZ¼‰ ¦H¢¨ ¯þþ$ºYn’“ rçã&U‘³OA ,žÑðØ÷3Hý]&ò.‘Ï£Kd/DÏDÂ'ŽÖTŽ|½ç¨·:d2éÍ£z§ñ”Ùƒ âÌ|oµ|üÛìMüÁæãÏÍ":ÈZWRÿKEÛè©L“þçJÔΫoš¸îùÎÌqŒtþP$]*×ë7靿¢KTL3‹LU™JÅšÔÍ Š—*E"LX“ç“ûðº]þͽ,¿MgFóãánÜ …iŒ‘žÒ•Y[+A¾TÓ<:BSHÞÈ^!½>a7õ̯¹7ÓË+Å&j½á™‚*»Tƒ>+ó¼;µ#(+òH·YõðÍ_Rnxá ‡öÙóé»­Œ¸¬ÞÛ¦âž\ &GUàPj ´L.ÔeºaX‹?—#¾:P躰Á—@iq\:x¢ä2;¬=ZyÔ‚»fÏWÿP-Z5 ;Ý^«n¹·¶„u}¼(úÔ“xýÃqgµÀZ,ç*£"o¯1/ÒµZâ*ÇhIœ¨4Ôm¨­†?¶U0;†âÄÊ: M(.ÿìËhå:_UÂmuø}˜â>(õ©n«ßÖOj-ñhg"Úz‰ïÆ3#~!´#Q° ø yƒij×N¯–iÕ2í)¥ú<&––Üo¨õUb_%ö[‡2âWéÔŸ•&Nþ[fk·Öb~Z³hü*ÍɆŸ4d›Q€ÀϬ6uOòNÞ6QnmÇýãå@Ž>>¤›;–<–Õ©`u*X-7«åæQ—›ÏèÀ/]•Ö?[W|U[Íf¦õiÇî,ý¹ÝÆûüøýËõ¯è4ú­Ï{aqfß§K™4hŠzhÒø÷¨6õüý³Äƒ?8ë²“ÆØ Ø(q¸xòØ?u(ïRº©ƒï™V¯š:”N{ýzÏ\Ñï6ÍnÏl7@.´÷ÓeD¢¢&a!m/àØOþ m%‹ïz¨}Þ™]j/ÿ¦}ç„lñòî…ð·CíEæñ7ÁªýÑ|±þ^|pÿæQžÕ˜7þhîyËѽå LØáÚ|Ì͘†‚hŽ{Ã['}ª¤Vr¡PÜc%t±â€)±e$$óÄUÑ 7@º xÉÜ ÁM'e‘£«‹”˜¶5"´mkÏu•“;ƒ½¹G ê¸+&f>åFŠê”]ÇgSÇuÈÆîYþK«hé{÷›Ê”è ¹g,ê¡›sÜ;+ÒÖ¼·BflMعÂR-!ùpƒ”-Øž±$4ú!); dãá|ØAÖ@:óa®ù²•'~?Üuò¬I×ÔŽyBÚ‘àAê^øƒEÍ`†vúâcŠx ž z8B>Ä©Ê(2çûJãgÙ14Žà² Ô£föõtfOÝPiaô鿪Ƞ${L%?|ã®h_«b0¥ +±bîÅ:»E^Ž -…ä t… ~I‚®`Å_” +‹üË*Úã_˜ h‡KµÍLýâI™pA˜ÌWt¡rê;å$^HMo?ý Üæú¤ÇX*äÄüOöA%ž¸å á’ùéÙóí[èƒJzÊ.;‘€£ ]¡>ŸÕä¡2k$ˆh¶Š)éK™8e4YÞ[„¦`o<øvaµ¿a@f(ï-Âå½i@fª¤úä¿q@f óÖ©¥ ©N§Õyfkf¹·H×ʹOT«pF{XèPM Б©ò×*lmç(2ÒMS’ºü§áótúO,¼átûIMÃôû)MFfA:þÔVwÜÃ$½‚ ¯9Rןº ëR,­«­å"i*LÿŸ4IÍ—å¯sžèaH¶ŸÔ2X¾ÿ¹Žös™¥Ù>ͮٴ–´ÛýUÊ‘BbAf!Gg¹#1¼>{žÝT´WÓPe{RëP…{2ÏQjáRºšr_ªé傪ç…LCÄÈ‹v†ÚØ€:¼W;L!Z­T#£ÙJfó h9ú¶Dá2©‹N gLÅ£(jN:òÊÈeN·G`¢çdcT‚…¡x”–å…@òo×ÑÂå'7)Rˆ‹?ñ™IMfKŽJ[N:4$¢  ¶BÈFÖ|R@{ËTã«Vg¹'PzˆÆÚ‘¥‡¨š =De ,=Dc0OzˆªZ¹ÒC4†ÒCT5Hí2¥ÛíÃl5î£wfï –SûÝ_W$TDc )TDUÃ\¡"CùBE…í\ª­¡PQñ²`„ŠŠ[ìmHÜåN¹~+a$'„ÄðZíàS†¾V×'jÝ ”í…qÁ–hš’*zò TtJá -TDg,TDf2¾t*"µºãÖfÞ…Mzû’BE¤Y—bi}ê3åqÓT°P]’ **ntôsJñ}ôC?u@…Šè,c„ŠªÑ^-ÍžÅÒììyv\zHAÿ!¤‡è¬#¤‡¾íE½dEÁ¶¸ô¼i ôÐSÞ!“*^˜ÊÍF NzˆÆ&­ôPá2­«MÁÇ{L`é!ÂX ¹ÌéöXzˆfŒRK¥ån@¤‡¾QO”‘:­I‘Hzè)ÏLj2[rTÚrÒ¡Ñ Å€(ógþ(p&ld-—sNÞ·‰؆©/}ä*;Àq3 OÂÒ m¨\ õÆš;ò²¦¨#A½ h±¿X‘ô@¾TR¨µLR…â⎂9c"cêP‚†Î‚ñ†f£0n±PR0„è3$“j&cL[¢³öÚMý¨5äÙê•Iɬb’zA<²xBLÓf*:0`¬z Êq‡'Ëu=>$Âvt©]Ö …î˜ïò5K ÎSAÑ:¦þ¤¦qžPFMñ‰Ç>E."*þ#T³ù;Õ|™(õÅ­a.,ך±É›T£nG6óW6øßx>%jq£ü¦È ÇU±<Ò§Cò¿Ã†÷…;ã6e>sm(]éé5Å[úõðîB_9“ }x¡·íF£kN¶iMŒvÃnƒ¦Ù6ÚÝiklZÍqÛä\GeRR$zTmÇ}¢QÄÇö‹‚©rn4£¦ÔÊN¡0…³’a ­õ¤Š WÞ~e©âÛYµ2vÖyྲ*ÇÚ¶žÕÝIž­ðš$òVªÂ±²ÑÕ¸*Ú>Ê‚Œ-˜îÍÎoÃ2©V,µU’±ý:[eàvÖݺ:Ó%L€r‰ŒûÀÚfÏ4{{Œq©¶D·–m7Ö]¢äYøg”=Çì±$?VnèŒñPSpJ´˜^¿.Ûäùj>?÷x:|[®qX°£´˜ÆÙ’;6dþÂq£=_øBG4y|Ïñ¸e8B—?¸s\Nä(=zÜÇËÍ¥Ö(hŠËÈe†Ì£U>MH3Wz`~_J3\Ž%õVJè«ùDFç­J*ª¤â”’Š”<öì'³-Rµ©ìbC»ŠÆñ§×çßÒ ÷gwÌY®t˾Ýó›c«5Iµ&ù&Ö$´«Ês^¡<o˨ÀV¾V­†«Õð£ÕpÂw©V”c‰¦g?½oœ(£Ýô-…Ùã­‡ÀÊUäÆÖÞIͳâYëR P­;«Ájc°Ú¬6«Ájc°Ú¬6I·jR ®jŸæÛ²ohG²º“SíBžÄ.¤R Š=÷üÄ J›Äãì)´ü°L“}Å&«9óß•Â#º—½òðö5_n°¯aYF³š†Ç°Y^g2Ñ·¾e³óèÝ)0E‘¢÷:Ž%Üò/kVz°W Ð|·!åïMÁ”»vY<—±¸×@·×íõw1]@]°óÛÒ/@ ·u7K6¾¥o–“Q™Vo¥„6|xvº¥‰T­YªË£¡E`Ô U¨â@; Åö9ǰ m50‹wË’åO#Ï1l䫟s€ ƒŸ¨ß(µ ] 2=*-"±&Úu¹õ÷Kæ¾:ÿÉxkäy¾NlË[Þkï鿤)ñ&„Pé^ A2S„@3eþðFdþd­ä+üI J„ t(÷pábŸ…b¡ò91JóõEïN&(Ÿ©p)¡ò½ T:ÔF9úÔý)Ê$…æX”I¾JÔÊH$KI #¯ üä[nषåÙg×òl.â‹Dåó™$áF¹­û!­ÞH‚¯XÎ\Úˆ>OtGwVéjLWcúÔÖÁgªRo=Þ2y›•b¿Sg.F\ÄÊê^öC9£h0Ù2öíî]ˆ‰8Œ…¨1óôW§·ƒ¬ÆéÝôè)®8V•µMÒêQn“ ]ÒÔò¾ŸPl½‹³,­¤3ã{VFÈæŒ›ôɬ<~IàóiìPe›¼æ¸Sߪ}±Bûª~Ó¤öùô˜Âä[Ò^†ûµÁ•Õ òÜÂs×i´ÌxU»Ókö©ÆVÒÄ&oÐŽÙ kÒ7Ú-61ÆÍ±i°æ¤e™ƒ†m5íâML0EêEexõ÷? Ñm4Ⱥp“Ęd;‡0©Šœ} ‚l`ñŒ†Ç¾ŸAêï2‘w‰|]"{y z~ >±p´¦r àë8Ç@½Õ!“IoÍÐ;¿èä‰úƒW2"#Oñ8NOÖëú‡­R}ñzT‡í|ä-÷ÕK¼™¹š°YV范ÇÞG+0Ï’3ûz:³§ƒn¨¤Gˆñ´›”ƒ4Ûº3Ù÷'>“÷¡¢óÉDv곺Cã* —aM&"'}‰aè 57Æ/ ¿|(®'þ^³ýP¹õkv»Û8ÿ…Rã¶ç];,¹êoL9{ùXϲž0FÉíu•ò–<²\9ÓÐH7R¬˜±öRu`¾/Ãjɔك âÌ|oµ|üÛìMüÁæãÏÍ":ÈZWRÿKEÛè©L“þçJÔΫoš¸îùÎÌqŒtþP$]*×ë7靿¢KTL3‹LU™JÅšÔÍ Š—*E"LX“(rߦ{¢wÌ>K–i+w h»¿€)ZF‚HHOó…¬­• Ž&Ò°:BSHÞÈ^!½>a7õ̯¹7ÓË+Å&j½á™‚*»Tƒ>+ó¼»ÿ¥te…Bé6+j<ÄI؆®phŸ=Ÿ¾ÛʈËê½m*îÉÅ`rT¥Æ‘@ËäB]¦†µøs9â«…Þ¨ | ”†Ç塃'J.³ÃÚ£•G-¸±kö|ðÕ¢UÓ°Óíµê"ê¬ÆÎÜ oëI m¢1/ŵZâöÆh©š¨4Ôm¨­†?¶U0;†âÄb: M(.ÿìËhå:_UZmuø˜â>(N«£ŸÔòáÑfD´Û_‡ fFü(0h¢`ð›òÓl&®^­Ìª•ÙSÊîyL,-Ÿß°é«\¾Êå·ÎaĹ®Ò©?«Fœü·&ÌÖn­Åü´fÑø!š“ ?i4ȼ/£:_V)lêž×S{ãøäY¦þð5¹¥þðm‡ÍÍ„Í/È-bŸv1ñà! } ¨¹´!•šÌ{¾¡Õ 6xÿXÃçSÙÄܵQ”*ÁtÅ‹í nk·› …»•[ÞǻĴþ†w½áš¸ ÕÓiq° ùéŒc f›¢‘\ÎVYšÊÃ^¼çîb5ñIL|[©QÆmÐõS_f‘¡Ñ”«8 ÁÈÖa¢´àJé[´ty8íÕw 5î–?£·¼%Õ¸Ce«Knõ^¬QO$á>²P»ª]Ýî¤3îÙ©}¡kWV +ÛæÓÕ|~«¥0lR£ßaHtõwìË}É7‚ÀäËät4èŸü9zª‘Q?¥ Å„Óüq›—;Â`ë©l?lTѵ‰ÇÍõB>~n˜¶p\g±Zh‰ìItÁ]bôœGÏ/±¹T2+·rØæ ßì4V÷iØx{cÍ©Â@Ü}¼ Ã_“ ]ÒTXß©šjƒõ·à»õPÉЉŸš…‹õ·ü·×ª‡§Aûg·ñžÙôƒ6¬¨û>}À¬öŠO/Ü›´OVp­} 'ïù7ŽXÔO¯>þSûœM¢´¡ÏZ²h>ûsÅî©ÚøV ¯œ@‹Ô'µD~òRû¯ÍœõÿõÙüïVcжXZáÕP«{˰nÅmT÷½9 ¶ÈuñÑ ¾°·v»˜ÍF{¿ß)ïLJ¿ã.ßigŒ3ßçÆïô9»as}¨;îÔÓÏtqM@ßÁ“}îÍ"áè/æfL¨2OÅš² šãÞðÖIß¡(„•\7VF 1¨8`Ja ½·\åL,è ”^2wÂCcpEÓIYäè’%¦m„ÂmÛÚsGD]eÀäÎ`onBƒ:نO¹‘\6e×ñÙÔq²±»‡1xFk }Œà~û¦å‹D=tfo3gnDyø YëÓ 3¶&ì‹|}X™–U嗱ܔƒÆ«ôËØ©õK Çà ªÚ/i ùP:‹ØÊSx¾î:yÖ¤ˆkjG©ûË:Bå¿H÷"Ôþ šA©þË: }ñѯ<Ï„½ âTe™“\¥ñ³ìG °œ6z$ç?îÛÍq·s”öˆAŸ>𫊠JҹǤ᜻UûZÝj¢XAèRY#Ñ;´Ü[žK‡u1œ¼hç7•=ŒÆV-°xgeT €Hg ¨„Heð°""]µr”© å*$ÒÕ(W)‘ÊTIõ('RC)(Rú`PŠv[ý¶®ÔÎ¥Új),R”.|Ha .—@ä.wÊ-àå( £ØiD†×jŸ2ôõY•Éæ†ñ|²iJVíï"|®úßi…7¤ ¥i * ¡ÉÈ,LØêŽ3?Òã>xͱjÄY—bi}ê3åqÓT ª e’ û«²Ô*KUíµÛ "`Jú,Fi, VyÎAω{­.î‚e{ IÇ6bãÕµÔr_ªºŸQL뫈i×Ó^ÔCÅ‹H¨&àÓÝfŠQm€HœÁ«œërÓø|!¢oÖwO›Š»¢SDåûÛBI5fùö•Áâ§îûzåuûÇ'Xë´ÝNRºêi{Ÿš ò©ß¯Å¢mé™ìùb¿ÑtÚ½Gä9 Ý=_ß%ÐuFÉ9 ÙÈšÏ!ü<°·Lõ¶ò¸9ì©çGD ÊòÎbu6J"aq¯œÉ„¹£h—œ¸%1’ÑY,™x®hŠ™ï­–:üU XÉ”è 9‰q5fIÒIŠ]¥ìâF^0ÆÞ4%.ýØÈò¢(a½Q¤¡L‹º´‚à‹ç{˜NŽTx™ŠF£+fMµÛ¦WÍG`r$\’‰4€$G"ÑT*59RÒº9glMØ€Ûô-!MŽ”°%GJAK#%ìÉ‘xd$9R΀±§€-žŒ\Ö¤ˆkjG‚‘#% CŽ,нrd138r¤¤ÃÐOŽ<¾gÉ‘²!NUF#GÊã—C@Ž,ZØ#}úÀ¯*2(IçðäÈb}­nˆA¯”·‚!GY{*äH²ÆË'G’™‚’#‰ æ#ɪ•GŽ$2”OŽ$«Q>9’ÈTIõ#‰ŒáÈ‘„>”âƒyäÈâv.ÕVƒŽIP9’ÀâÚ9»Ü)· qíœÐ0îÖáµÚÁ§ }}Ve²äHªiJšùô#|>9ò¤Â–IhJޤ3‰!GÒZÅ“#Ø—#GÒd]Š¥õ©Ï”ÇMS¡äHÂ$Bެ²Ô*KUœíµÛÀäHý'GZ‡“#+Ï!!G*ð09ò_¢*A¾Tu?£ 9²€i9òI/êÁäHʪ„ øt·Y äH¢ brdñR­ËMãäÈoÕñäH£ –‘ï$GøzåuäÈ“v;Yrä¼ï„¾Ñì4›Á–ßÁrìþVA*¥*m9éÐhG 0L”386JŸoË¥» à}›0Œ/ŠêKŸ¹Ü8næ*aiûJ(ƒ£ÞXsgB^Öu'¤Ø_¬ˆ‘OfÅA >Ù$åPwÌY}‡4tŒDEšÂ¸ÅBIÁ?š3ƒª‘YÕ<|êT?j ,¼ò"²Û*¾F_‡,¾h£i3 ž ËBoáÃÇ“åº^h¥Ôû;²ù9ë…‚í»,d8ËJ)_¢uLýIMã<ýŒšâ/|Š\DdG¨MñT6ÁDK n “ta¹ÖŒMÞ¤*ŸÉ:õŽ4T6øŸªŸ>OÜ(¿)²Âq‡Ä£ÉþÞîŒØ”ù̵Y ÌTd®–¿ùZÌÀõðîB_9“ }x¡·ºæ¤ÝéN {<¶Œv·Û5úƒfÏè[íÞ Õ0›­AÿB_GeRR$zTmÇ}Ân)âcûi˪œÍð—•ZÙIeVh1KjVhÈx-6\yûa¸¯…Œ•€ÒÀ}Oµ-ÇÚ6ãVQ8S€25I¸ªÂ±²Ñeámå AÆŒ™[ØRiÃ2©VL.ÉØ~&pYxÀîUÔío«;J Œ-¢o ã*J‡…ÇEùpÌ9J2^åÆKØéàŽ‰%ZL/í–mò|5ŸŸ{<Á½-×x‰ák“aÇ‘³äŽ ™¿pÜh—æ¾tM_ì;nŽÐå®g—9J÷ñrs‹3 šâÞv™!óh•OSÌÌÖ#˜ßÄ—Ò —cI½•2Êí|"¹WV%”c*ã³²'Êp¿¥0[r•Ρ OrãGkï¤æY’éºÔ<»Ù¬Z_Uë«j}U­¯ÖWYš\µÐzÎ ­#;Z–É[9Ú7éfˆÌß›WûUÕ~ÕIìW©=‰T~ëbâ¥e‹qšZ~X¦ÉÀ¾b“ÕœùïÊ áÑå&{å;áíkÏ Ù×°,£Y^ý1l–×À™%Ï[ß²Ùy¤Í[èBžàÊíû|³iòÿ{XdLï•Îg» )¸‡Bch{t‹v ˜¼IÊPj…Q¹ ¥EÛ•ÌbéšNF±¦Á/Þ„•o|‹å{8"3­ÞJùa`›]Å*Tqà›Œ[z-Ï9€€ªYĸ[î,yŽA`#öœ\”íDýF©uèjéQi‰=0ád çÖß/™ûêü'ãmÌýö|Ø–·¼ç”ë¿$ð&„Ç^n<™)ÂN ™2Ìu¼溬•|æºÔ DÕ¤C‰¸› ±( •ωQz˜¯›qw2AùL…K õªU Ò¡6ŠH§îOQ&)¸´Q&ù*e—‘H–’@FìÔ üä[nà¤Ò-åÙg×òl.â»iåó™$áF¹­û!­ÞˆZ^,g.mDŸ'zŽ;«Æt5¦«1}jëà3U)Œ7ŽDI'o³cwêìÁEvŠXYÝ«›|(Ç` &[ƾݽ ±"‡¡ëa¾‘þêôv0‚Õ8¥;D‚Ôq¬*k›¤Õ§Ü&!Aº¤©;ämðÉ8«ÁÒJ:óáSìTV+ä}>ªl“×¢Çäjñkr7MjŸO…O? “oI{îcÔWNT'ˆŒàs×i´ÌxU»Ók ¨ÆVÒĽv‡uíŽmôúí¦Ñ¶¬¶ÑïÛcÜì5Ç­~К¶Š71Á©C”‚àÕß/ŒØm4Ⱥp“Ęd;EMª"g%ÉÏhxìû¤j&y—ÈǸ$²—b^"á Gk*ǪRâ¥A)“IoÄ õNã/:y¢þ@ý12òãôd½®Øjð'åaP%GdèQu¶S¼<÷5¼™¹š°YV范Ç^1FÌ“pƸo7ÇÝÎu8PRÈ#ÄxÚÍÊ€AšmÝ™¬ ãgò>Tt>™H}VwÈ`ÖâÏåhåÏu…GÂÆ°^OýÚqCæóqyÆW{´†¯7vÍž¯þ¡Úܳ­ù°Óíµê–{ËSÚ°.†ƒÇ?Éüz2@¢±Q x$œ3•Õ0Œ1/ϵZbƒj´Œ(Z* uj«ám•ÌN£¡¸13P¡ ÅåŸ}­\ç«J#‚x^/¶Ëyuo^«Õìšíoî%˜³Í»ûë`ì¶úm] þ¥šbë©°’þçJìzõhƒ0ÞäIO¬<©pê*˰¥´¤¿šsÏQÕW›-ĸvª¬ •äìÕOÒùÔzSÏüÚ˜{3½¼Rlºò3WÖÄ4“>+ÚwÊã…aF½"Š  £ÔÖ¾L^ÈàZM@$GU0DÕ¸ h Pl&K W8Yn 1%ÿ­ ³ÑÃ…§5‹Æj­'~ÒhaU€Àò£…MÝ_tO4?•[Ûq›G ³Õ4JÒ ÇZÔÃÊ °Vja}j3ÔqRvGŸkžT¯ÊØ«Œ½ÊØfìgÏ£[²fiý³uR¤ÚjFÛ2ôW¬òˆȪ —å[²^Õ2›¶œÄI–ž•sÔ_Ç'NoŸ<—Ò 9ªÉ ô‡â›ƒäÍ/È-bµeL~n˜¶p\g±Zh “É™;á­ÂÑóKl.UxÈ2¨” 7Öh´íV¯×u律Õ^ñ ƒ{“öÉ ®µáä=ÿÆ‹úéÕÇjŸ³i‘6Ôâø®%;çæ³?W,àžªoµðÊ ´H"BK4".µÿÚüÁYÿ_ŸÍÿn5¡h‹¥^ µº· ëVÜFuß›³`‹ŽP ê Ëqk·‹ùÐl´÷úúÎ$õ»n0èßg¾Ïßésž¸Îõ¡î¸SO?ÓŽ }Ÿîõ¹7‹Ä~¢¿ð,lå¼£+ËD'Eú"˜ñß~^έ۱ç]kQ»GY—ü·ñ>Å!eެVË.©}vs-~³_²äLû§ãN^n+Œèq9ãIPOK7òÜ‘(à(* ÿÔÞ˜ÿºÛhuƒF§ßm·ZV¿ßðßý(~wå⽘GëëÝÔ¬5ší^·7htuàÓêÃYQ“øpî;<× ¼Ó&l¼šiÂýE4gGÿî?H›É„â]ó•vļò‚ðR{ù·½|>XœÁ ÷ß»z‡D@Y÷Õ~³|Gäû7¡ȘHBa/aQ@‰ÿ‰¨§¥r¨M&Ê2À+>ÔH˜§¶Ð@³ÃIÊ7Í—›B–—¹Â%&ñ½*P_Äôñj:eþ(:ÌæùâAÖdüÀùãð%Ž>>…M݃²aÄê,H(¶Bu‡×[¦-M*(œ£¤-’œçÐD$ú óÇ^À‡Ÿ^âRúÛkü7ɦa¢Ä.2Þo™ÍfO6±kJ%vÑ$} §ãáψ÷%åtþŠh_dtÞõ&“k7»ýV£gš½v»ÉãL»ÝÛÊäD ô*âJé{ò1³Ýéµ{íNä{?]F>FTÔ˵×W̾ÖxB¦±¯NrÑlîÓ⃱ÎzáåZ«±wP«ïK²A-ŒšíN§ÜA Z­•=²sVkyc|÷j-Ï¡÷„n Û5ûƒM¿ÄAkÓI$0â‹’‰åcöÈ(gZx}½·ÖÂV« 8{?½3܉Mo-ê­4ö}‡s—3mouë*ø½¶ÌÕ÷éóŸ_ý[ûðãëWç¹k\Мõ$Ú,Eè||v0¼_/¿4MM‹>eó(2c“—8áô™ÅÂû5þÁÔræé/£/wÁµ³\òŸ5µÁg½J?äÌ\ÏOþ¡ÜùÀÀ˜}ІÞOo{úÑ~Æp'=¢w“Ô,êÌö"™ÍÌñYÜÔøÄÕ«uö'äy°šß>Ô¦Ü}´—ZDÐIçþä¿5{:+l`Å}M[xâ~»_N‹² nòó‹ú*ðëÁ•åoLò„¹Á•3 _©[‚¦-oÃ+ÏMÍ‹Á'6¹Du…åh0Öçθ®U3›õ€ÏÚ×ÖŒç? AQ’] a>[ŽlÊUKÿrÿÙ`¸£2¿.P0ö•Ù«P’= 3vÜúCï+`/é‹›˜‰ÍM‰æ®u´ïDByƇ®­i-c·Ï´ƒÿß÷Úç·¯_k¦YëÔÑïxú1оûÀ}êV˜üÂh~©}÷ äI—~_ Ä8îÖvkíxÜÓ„žGú´Ÿ –ô«¸”±wÀjVÚÒVF¹Új©9®‚ž«-ç+žÝÊ{ÝÏž5‰ÀÍ`H‹>^9snP›úÞB“6 ¦ÖÑœ§™<˜ÛB¿y£7˜PÃJý9~ÙÀóëqX?0UÔ³“Äã™ ¾ÖØ´[]+=ãð‚9“èŽÅÒ Ä¿á`Îôv$zë»ï5zåMä»Íöå7[{kzßjÝÅm¹¤DÚ&ÍÔx{låœRÐç¼!y{¿ï#Q¬ }qx*á¸N™¤kȓlj»Ëwb5°GÈÜÄ!ýå(û³{¨º€*«¹ö´Te¾2/?@¬ù|ÌÓôdäoBŸ7ÕD¡’ïi7Mž9ÆC—óדd§žZJãTmyK^zëË×ÑÄ DšMWƒQšàѦ"›¿dÌ©ÕG±á°U­ägbþûÂ4k.4nãÛ¥VŠ’~§Fi>éªc˜Ž®ÍZóc˜ö\&2u¦qŸ;nÿþŸCµY»mö~£×î¶Xb²I§Áüßq¯£ÙqËÉ]¼øN·5×,¶÷œƒáR™¥ïýÁ£f=¯)¤ý!>,vÂÛ¡&¿HM._ðöjAp%3f¶·`£8¥åP«‰|JZ3Þcß½ànûâL~=¿ÉÖSÙ"u`_CßÝðt\Tâÿ–_ æÞÔ£Bˆ2©ÉÔó¯y%äÝÝÔÄÜ"–Ïj0ã#ìÖ½:5g$eÝyÚý–/Z3×5k,>^1-¹èµ¿JX¯)zÚM™0Ï=ïzµ,!_F޼̋å4“ìrjÙá!V& .UèÍ|oµ Þé„âÅWÎhpxÙ|6s¢‡®¸‹ŠÎ¨9ú2 ÂŠbS@]ÀnèØJjÏÁ=ßù"l/àn/ŒDc1qP }g*Ú•Q÷_øüÊkN ¼Œ%²ˆÐ|¾ ©©tñ"_S;ÖdáDRJÂÃÓ,æ 3ÔÞ‘ÈÑ{‡íyþ$‘Ê¢öhoBÝ'°=¾hº%ÆÎ½/ÉÓÜ5ÞEÒW0kÔ6Gåʲ "Õ™(Aޤ• {¢Y” ' %Ø®¼ÐR‚Ì3µPMK§Š…JÀÓ—$•€¯ø@Wã"É®n-]ÙÆ‹OügAºÅ5ƒ')¶šxe/XMdRÆÂry+ù´sp¼•”|¹ˆ&…D·‹w¦‚ÄWIÉ57wÁ€¯Â•yKMQ³3A^[úÜg”šI·²”åøÑ4𤶷5¥£—^h̨5"ZÇvþÿí)1¬wÃs$ŸlyyíŒù¨Rä7 ˾r\5Ã+ÁN/|ªKžëp?NO?fÀ—Ý¿òÆñb_í R™{Ä/Ã*Ž›Ä2â»Ö\ͤ>OݪŸÖÕXHV­x`:M˽µùPwn¼ˆŸªûD‡ÜµxÍ@Ux¨À TBàG&ðƒ‚G üàá?H\˜À -ðƒB ü€QõB—Òß^㿉ûôÎIYpž¶´þ´ˆ!maJNÊûjYTÛ²û„æYÔ .˜ß‰v‰ ^a¢ˆ ìuÜBX7˜Äô$Æ@œ3FAÉc i>ï1·;r$ ^5(ÆÁûW«ðŠ/Õ¼¯· IàHãà™ÏÑu™ÑRôÂèÑ é›M˜ºç;3Ç5¢Oѧ«QPxPËÇ©äã7m¹líƒù?›¡ø7íÇŸ^ýýçŸ>þCûùýëW?k¯ß¿{÷ãëO?½§½yÿAûõãø/»=þ? ›ÿúñµÉWšak/˜}åiÿïÞŠö׿jÁœ±¥Öx¡ÆâwÚj!| ×¶×¼g4c©éÿ£EeÙÍSçK(íôÍçs?¼a÷ð¿f¯ÛišÝ^³[k5½VÏ0ûfß0Í®Ùo·Z­^wÐêö8¾(P„‹ÿöKUEúž¤GžÌˆµh}¥HJŠD˜`ýNjUO šÃR»ñù¯Ÿ]}wnËݸ³àϯѯ›_Û½i¿çkŸÞ“ºG=R1³æóßGÔÕ}4jí+Þ²ÚꥭA í¡>¢í’zUóš1Õ Ÿ¸×ÿ¦Õ'ì¦î®æs­ù·¿šQ’ê.ÑÔ[¹"·š´‰bA4ǽITØh’ð8rެHs™pÓ‚×ì–о>£MDÖÄBáÆÔ‰€mk=‚KZZ{îÅ\eÀäÍ+¶#ØWâÂN6ŸŒ&&pÏX›—DÐWÎdÂÜQd! -µ$´ˆÎB\Jñ\¾Ä¹'°* Zôk'Š…úÇÃcQWãä*(miçÖ˜ÍÕøœ y‘˜ÐV" ­íRò&²âÅ0´] ”ë¾xþ„Uÿ ©pml]1kÂ|âá.†xâ‹„¨7ÖÜoF“q‰¿XNH“mAŽ6o4Ów¥¦·C : Æ?"ž}l6 㮺ê³X^1 ¹C—{þ¦"Kä#Ã"Ë“»æÑ¼1Jæ ñ¶íÿg~ùô¯wÿÿh‡öâÝ¿ìOW×lñõüÃ[ï¥Nbxý¤S>µYôíÌKh¥Ï5¡µXî|=¹qàt £1ñmò&MO?“AÓ•2ÆÍãå@ð)yø8n›ßZâØCm[’{ê#5¿>Sg`:|„îÖJÌ)ª¯’ ±ÃŽ÷Åeþ6e>sã9O¹Rz?äû }åL.ôá…Þh:ÍV«oXíÉÄhwÇ cÌ:–a÷^¿ß°»Ýî…¾Vç%Åsšòý/yÇ]A‹((s2/‰ ýý’¹¯Î2’wÙ=_E Žùaɬòkôö¹ 3"­Þ;­“š£ëæKÂt£Ä‰òv'bzvO,•: ,©b$l2ó­Üèí¤wà1:z£|¢6!óÔ·0êÓxÆÛÚf§A9þÒ&7۬߳&F³Ûìí¾iÖ¤Ñ2:Ö Ù±™Ùov̧µDMæe>ÍY®˜~ä½–S»2ÿZ,šøRØ1/ˆõ%›6yÏqgd'ÈÝäÈ—¹x‹œ\Ýß¶ïÖg*,)[el¨Š!»¸0بr·VRzT5ÝÜû—šÞhxsÀÓØZ$³uMé;úo1fîy׫¥¶Š3¹¶“³×@¼"~_»µóÚMÍ "žÍ©†= ð4¨rçO*ׯµzýÆ cæ ß0š¼n­nÛl´Úý®iæRå]–*·¯…ÛíÙk?ìÓü*€‰u» ”6Nš:ĺƒÅ¤"ÖÍì«Ùòë­+A¬;ì(b¸ªtĺœÒ+n ’‘R±îÉUK–X—×ݲĺ5Áë‚¡‰uùhpb].–XÄëp bG¬C!‰upL±Š#Ö¡ÉA¬Ãâ‰up|ø½o0æŽý:lÙ5ñžç;ÿ‰"…H®Ä1&ÙÙÚ¬{?¹-Ö¡”ðÐS&êÍd2kÂFðEÞ™·K„nÚöL]ðmJ§úL›à.Íc‰cg}Z'·uyFß:`tùÖ ½kæúìÆa_‚Óhñ`¹Â‰îý1MAЏ>«F¶¾k²ªvzT°ÿÁìв¹• à'7ÀÉÐhʵ&ËeQÌz40‚Ydž3ëá˜hf=ͬG@™õpD9f½>”YG‡2ëˆ(f=W‚YÇ1ëá¸ôcŬGÀ"˜õpT ³ŠfÖ#°4–Y‡–`ÖW;,ÕKµÃRí°T;,ÕKµÃRí°T;,ÕËÓÚa‘’ÖAÀäu€(i8.FZŽŠ‘Ö£b¤uð¨pi86XZ‰“ÖAbäu `in± T0„ ¥uòñ`Ò:° G¥.¥-¡é—ÎqÜ7f$z¾L®ëKßáKò[#ͪv2Þê’Ô(ÂréÃ\Y&XmMRó6Pp¦ù;Õ‰œAuñ¶ÐQvѤ‰÷²Vò‰÷rÉ>œx/Ÿñ£b¤Í@I÷Å D 4Cß1Í '¥‹@ª ­x¥Û¯ÄÍ@\Øx[h ¼ iÜ6= Ò%í$FBµMpî5©ð‚³ÕtºívÃèOLf´-Ö6ÝöİzÞÄ4íF×lï?‚ÁC|¬B{’§h³UõñÉ%å©—²]UEÇ$”•W³kJ¾[ú¤v#Ogª;æ8‘¨ö8ßç¨}ßê´:½~÷a¤†ín¨‚ÃzI‚´.ŒR¬òö%¾‰û4NãïŽ;qÜÙ… l-‹Jct%›¶ šýAóŠä(à6©ä(ä-QŽbPk6½^£k4mþ?½þ ßé íNË4så(ý¥²B}OÒ)Göü§#0vÝ"á_Ãù퓘8ì( ùQ*-0‘SzÅ¡>|ÑL<¹jÉ Läu·¬ÀÄÁü+0‘›ª¡&òÑà 4#0ÎŽ18P„À'0B LÀ1PœÀ˜Ü8P¼ÀN@-iÉnqVwÜÝHVs”V ‡ªø…ˆ©ã¦¿–ÉpyÛÛÞbé¹âÞîƒÊ¾Äù •饰èMw^f1¦>/ØÏ¿ÖI‹²¦­™eeƒ°¸ºØãø¿ØBÛéÎðÁ CìRøbϺo“Ícú}V1Pæí69K¥Å¶Èâ#ØSÛò&K‚PÊh`„2®LÇD+À¡ÑÊh 2QN™@ªLG‡* QÊp\ e88N™ŽK?6PÊX„2£L€@E+ °4V™-¡LP-Í«¥yµ4¯–æÕÒ¼ZšWKójiþŒ–æR”v<ŒÒŽDQÚá¸J;Ci‡£b(íxT8¥Ž ¦´ã q”v$6ŒÒŽSÚA¸Å‚RÁ‚¤´çãÁ(í°+¥®ÁÔ®½è×\f;.!*ÙNX.Bf{éÓ7ÙÞú]§ÜQ ¨7L޲ýüñE\uþïp¼Ž(•¯™F5ˆÁëÑAm]œ1×lþ›ìGÿÉn÷|òšÿ&ù¤µж2p£ûï’ °³à›OÚ> ÷~R`'µó3?Jo¼ŠÿæÜGõß_'_ø»8ö«UÔ¢Qx‡®ŒÎè3£ÍÄE9r¡›±øÑ›¦[Ïâ˜37"ç7EêUp Ý:¸8J1„‚>»™my"²ÝXÑ^Š˜ï ö*J÷ -*I*°YOìKbôo*Óþ-çÞív²}@Žëˆ™ö ó¬2í2d“šW$·I%&oñx‚a­FÍì·»®i4MÓ4ÌV¯Ý4[~×4Zƒ<Á°œ¯¿TV¨ïI:åÈžÿtÃÀ®[D0¬»ì_ÏGæ ^0,Ç!P‚aò£TV0,¯ôŠC}ø" {zÕ’ ËínYÁ°Ù.{â ÉáCEŸÐ×Ð(Q¢Op\ Ñ'88Nô },ú„€Eˆ>ÁQ1¢OT´è[A#`EŸðw‚GÕ²¦ZÖTËšjYC¹¬9?´¬Ù Ôë¶ÌÆÃRÂA;¿Zî"Hª²‹ Œ±5 Òš¦örò:x˜¼%¯ÇÅÈëÀQ1ò:pTŒ¼.¯ÇËëà qò:Hl˜¼,¯Â-J †Øí7H5ŠÞ~S]QœŽP>LGÖtÈ´„I:ÅWi|•Æ?¥4W 6m‚O!À£¦ÏÔlüN•h¡¤yP‰5ñ’DAd-,À#k%_€œ¨2(YbTOäÁ§Ã]SÓݚܚ‚ ¤Š4÷Åeþ6e>sã‡çÕ˜RÞÙqHÞ]q„ }x¡·ºæ¤ÝéN {<¶Œv·Û5úƒfÏè[íÞ Õ0¹µþ…¾Vãtûê‰{¢ˆ* Ú^mb¯*Š˜+ŸØfÑ%ÑÜ}¼]º¼Ý9"+ã݉¤'ûÄ@ë7MjÇ߈kdÄHUìÅt4jƒ‰ddRx®ƒ{§*Q›×—j/4nâNºf§gôݱÑn5,c0h·ŒVcÚhµ»¶ÝkõôcïŒI[ö›ÅØ€“ˆ.)´GP4ÀCÅÙA†âx!ówr¡’ü{ 1€÷¤v~–²œÔ¤­CæÕP¥-‘:hÖx-ý^Js1û½^¿Óìö{­~7—9xðÛ/Ué{’9² >!Þ Ôo‹ð'Ý?ybyó‡oð ?àhƒÒ#Tš6x¸ðj›B}à¢! >µZÉrsúZš2x(sP§ ®`îy˜$W0ÅÌ?î„sa{ù® 8ÉÆpQ 7 ¶Âñ\A 2”+?*Apá H® ˜Ü0\A¨WŒ¸T‹YÀ?Ø„áNÔIŠ6›-?lB/%º²#ÈNaf){²Wùû"X2›¾–¾7ã íàfMæŽË> BGtîÚm4ˆ[ÝgÑn’Cß8b”ýà BÏ¿ýÙYD÷ÉLê*¤œ5§¥ ±#û3è±ùC;„°ÂÄÒ¶¶ܯýÆ»‚Pêf·jÉçËv¾èO¶”¹ÒÂúúqåÏ¢(ÚiüE•q3¿ºÖåÌ-¾ HÄ)|ú0Û‡­yÒž™¹è¬"Ä)¿”0W?ËŽ¡÷W$Ŧøý[HG­Æ¸o7ÇÝÎu8PZØ#}E÷&D%éÜÖzÉâéœO{D­öÈúñ0‹·>>+µY2 kñçr´òç GÆ}½¸­a}óp¨ÃÌ>ï1—§–µG«‘Zpc×ìøéZ´ÿ6ìt{­ºåÞò\:¬‹áäEïÖ“­Z`ñ8ÎʨŽaDW¬ñ¦ÎQ¦ö!÷ÚN¯ù°qÁh´d¾ãMo7¶%º2¬†?¶Ë0dv’jx€…ÆTIõ™}­\çkÆ‘ÊòÁ ì¶úm]©KµÕÐ…¯‘þ\‰ËW^=º_¿âIÚØw&36Œnj¨›}·Ø¨jk7Þ|µ`¿ˆ±;‘“¸Ër ÉÑ ¯Ñ¹ŠS‰hü•0¶‰pMɘ8¾®ÜðZíàS†¾V×'jÝ ”÷…ñDX¥ŒiŠ…vJß«?&Vœt„ßOz6á-2¿ü{tþþ‡Ñ»W¿üXV€»±æ+öÆ÷r‰Ãd&ïYP€&I­J°ÈèíG5O§²tw®y@iY—bi}ê3åqÓT¯KNR«U–Ze©Ú³½£v[6k*½ÿ¶Nʲî3kòÞng…þŠUž#é9Ya†²='Qh0bãÕµÔr+JÒ‹RÉkÙïð/\Ë›Ž§hµ³³âE=[,ÃÛ?ŸÄN˜@•0Ÿî6‹½Q^)cÕ'EŽVZªu¹i|šFTžœ6wO‰.•ãûÁFà)SÍm'ž^yÝþñɱÿd$ñlÝn+S1ÔúT½OMIŽJ[N:4$¢€}q ŒxqŽ qމ~q~q |qŽ(÷âˆ>ôÅ8:ôÅ"êÅ8®Ä‹#ppÜ‹#p\ú±zq‹xqŽŠyqŠ~q­ °/ŽÀ¡%^AM½2äHœ,9‡¥2 ³Rr¤œuIr$.õ"ì Èmz|KÈ“#ñ¶ÀäHhr$Þ”)µ€À#å÷™ñÄžÛé4<™§°£v$ 9RΑPäHùîE‘# ™A’#冾øäÈ£{&”)âTeHrd¡©2cè1È‘ {Ä ¯f¿PEdP’ÎI#«Kârä.kçO…¹¯Ðýf³Óo?l"<•r7Ž*%•)09’Æ`9’ªZ¹äHCr$UäHS%ÕDޤ1†$GÒù`PŠæ’# Û¹T[ Brdñ²`ȑŭa®“¸Ër 2×Îé #o½‘^«|ÊÐ×êúD­›ò^¢0¹÷N4MÉ“#Ÿ|„#O)¼¡É‘t¦ÁäH2“(r$©U r$½}Ir$iAÖ¥XZŸúLyÜ4LޤKRAäÈ*K­²TµÙÞQ» NŽTÐr$u9²òr$½çÀÉ‘ßöU ²¢D¡89RÞ4ù”õpr$aUÂ|ºÛ,`r$Í59²p©Öå¦ñrä7ê‰äÈâFa,5ß/JŽ,îë•ב#OÙí¤É‘OÙûÔd䨴å¤C£A"ŠQ†ÍüQàLØ(}¾-—î‚€÷mbÀ0¾(ª/}0² Ù;¨„¥ì?(¡ ŽzcÍ yYSÔQœ b±"rD>™)ød“”CE\ÜQô¶'ÇíP‚†Î‚ñ¢H³Q·X()BâÇB)&ÕÌ:åáS§úQk¸`á•‘ÝVñ5ú‚xž,×õB+¥ÞÓ¥vY/̼îxõ”ò%ZÇÔŸÔ4Îʨ)>ñøÂ§ÈEDx„Ú4§š/-¸5L"Ð…åZ36y“ªÐí¼ÑfþÊßãsòàSÂç‰å7EV8®Šå‘>’ü6¼/Ü?°)ó™k³@éJO¯)ÞNÕ¯‡wúÊ™\èà ½Õ5'íNwjØã±e´»Ý®Ñ4{Fßj÷­†)¬]èë¨LJŠDª í¸@Ø-E|l?mY•3p£þ²R+;©Ì -fIÍ ­¯Å†+o? ÷µøvV­Œó¸ï©¶åXÛfÜÞäþ)¯IBÀUŽ•( ·hû(O2¶`Ì\šß2†eR­˜ \’±ýLಠð€Ý‹Yø’û`Íö`ÐÝñþ!&=Ú B¥ AÓ¥B6;[´`µÖî“gáŸQösŽ’üX¹qh 3ÆCALÜ(Ñbzi·l“ç«ùüÜãéðm¹ÆaÁŽÒbgKîØù Çöt~á Ñäñžã–á]þàzv9‘£ôèq/7·8£ )îm—2Vù4!ÍÜa=‚ùM|)Íp9–Ô[)¡¯äɽ²*¡ ´SŸ}Ý8Q†ãø-…Ù’ó¨ìtex’?Z{'5Ï’L×¥àÙÍfÕúªZ_Uë«j}E°¾ÊÒ䪅Ös^hÙѲLÞÊѾL7Cdþ†Ü¼Ú¯ªö«Nb¿J©ÅžÀ{~â¥e‹qšZ~X¦ÉÀ¾b“ÕœùïÊ áÑU({å;áíkÏ Ù×°,£Y^ý1l–×À™%Ï[ß²Ùy¤Í ¾¾GQ„{-NÀËš•ìjn‰Ÿóyùû0þê‰/”ùô«üâMXùÆ·x»wÕ ™ô ÙǹŠU¨âÀ7|Rž\}Îq¬éS Ì"ÆËÝv8Â4òƒÀF ì9¸ÌÚ‰úÚm+uè• Ó£Ò"{`ÂÉέ¿_2÷ÕùOÆÛ˜ûíù:±-oyÏ)×H.àM޽Üx2S„@3e˜ëx#2ÌuY+ùÌu©A‰ ªíÂÏy¡m/Ô aöú»YGân&\òb7!ǽ˜‘Læ«läLn²% GUÐTÙ Wêz:^»ÄúI§îOQÞ)˜·QÞù*åCÒÎ"vK©\ÒWbý“o¹“ ½”g7ž‹Ë³¹ˆï¦•cÌgV„å¶î‡´z[ "z!CåèóD}ÃqgÕ˜®Æt5¦Õi5)×™ªÆG¦“·YA²;uöà’”c0Š“-côi2)"qcdw:ÄŠD]„BÚ‡ùFú+b‹øý´‰`5Né‘ u«ˆìÝTiõs6U0¦h¼ç’¦î·À$ã¬K+é̇·SYy¬§÷™¬i·ûYÙ&¯EÉÕâ×änšÔ>ŸÊ¤~&ß’ö2ÜǨ ®œ¨NÑÁç:®Óh™ñªv§×P­¤‰{íëÚÛèõÛM£mYm£ß·ƸÙkŽ[ýþ 5mob‚)R)«¿_F±Ûhuá&‰1É wJ$šTEÎ "’ ,žÑðØ÷3HM&ò.‘qId/¤¿DÂ'ŽÖTŽÔ°Ä9J±R&“ÞHGêÆ_tòDýVdd„Î芫'ëuýÃVƒ?)ƒê>"Côð Ýû»¤Îsß~À›™« ›e…Î8xì•nÄ< gŒûvsÜí\‡%…ï—g|µGkøZpc×ìù*àªÍ=Ûš;Ý^«n¹·<¥ ëb8xü“̯'$µ€GÂ9SY Ãóò\«5!6¨Fˈ¢¥ÒP·¡¶þØViÀì4Šk3šP\þÙ—ÑÊu¾ª4"ˆçu»|{}*PêSÝV¿­+Á¿Ttl‘J%é®Ä~ŸW¶üâ?žÆÄê¿C‘ܪ\Y¬¤¿šs_PÕW›MÁ¸vª¬`4’¤ìUDÒùdySÏüÚ˜{3½¼Rlºò£lNÕ¨ ©Ksß•ÈêQD1QW| Ž|kü ‡_žÂ ÄÓÍê“g¿UNýYi¥ä¿5a6z¸ð´fÑXõdÃO 2²ªª\P´¨©û‹î‰Š§rk;NUs dv£š¦SIº…X‹zXyÖJ-œÚ…­#¥ì"Ž>ׄ=©^•±W{•±ë§x)½ÔnÉf˜¥õÏÖÙj«µÊÐ_±Ê#r<"«\–GlÉz¡mLƒÜýv£Ñ5ƒÆÃRKyö¡[”K¢Ò–óPJ&§gåõ×ñ‰ÓÇ?œyÉz(äÈ·dÌ<oÜ$o~An§Ý(gâR#üÖ@qsiC*5™§ÏØj¼WcüL>âï”D&*1RÄ*G“ ˆg§Òâ@U§&â¼%©Ôn6ÎÔÙ’¸ú£+)rÞÎb¿a/ÞÅ)j~ˆ<á)—-)À´ fùö•Á‚¨-ÊcèˆV.+ã²[i›ñ EOÅsi—Pdh4å*ŽB0²uˆÎ ¢Rú–ª ]ŽM;FõÚ »uÁÎè-o©/ì ÎöÈ­Þë/è Ëû# µ ÕèvìigÚûºveZ°²m9]Íç·ZJëb“ýîA"å ¿c_îK¾Ñø!_§£Aÿä¯ÈÑSBHFЄ.NóÇl‚/~„ÁÖW9Ø~Øišë…|üÜ0má¸ÎbµÐ&“3wÂ[…£ç—Ø\ªðeP)Ao¬y lÝG€'6«mÔê;¥ML"p I¹[´„ ý‘B‰Y<{*„ ÿm‰oâ>ýéÕÇjŸ³¸6ÔâH¤%{¼æ³?W,àmªoµðÊ ´HÌ@KÔ .µÿÚüÁÖÒüïVcZÁµ¶´Â«¡V÷–a]„îuß›³`ëâ|]|4¨/,Ç­Ý.æC³Ñ–lÛî"¸v–K>­òÔ3¢Ï\yAx©½ü›ô”Ö]ÄYí•åΘ ïOET;+†aŒ6™rFÎ#ÐxGð¤ÌqÙD/hE´Åè~ x𳿼óžõi_x&GiiKÊùüçWÿÖ>üøúÕyÆ# ü‘²_µ}<+ ÓÏy×/MSÓ¢O%.ó²!>±ryGðŸðÿ`ÊceúˬïòŸ5µ÷’½J?äÌ\ÏOþ¡²½qŸ66v~¯Ýé7 ³ß¼ÿó}þ½;}Ηs}¨;îÔÓÏtq§FßÁ“3}îÍ"i&¡ñõ–ÿ`Ìø¿>0k¢yã?˜jSßã-Î[˜'ú½æÑþSð!ùׇ8àéCqÝàLÖõºµtê7f}£­ÃcÓCV^ú“ ¾k¹Ññ¸Õߘ?æ€3±a¥¿:ÿéÜçÃø+ÿ ·ÿä­ï­–üñ¿6w,£+–ú»{qŸ]Ú>ú‡M¥6±‚ÿôcVáIO`øßôÜûÀÃÏYœýŸ¿\¯wvm×löÌ~»õL»6`np]»UP@×f>¿¯kûfgÐï5ÍM×ú+×J÷í«80âkì«#’ì†ÿèžhðÖ ×nµf»cvºV§?ðß¹ù.îlñ}µîš]^í6("îýt µ¤’Ú'‘}Œ–DÚb_…ÿcòžð»jUgÚ?wò2+.u¦í­îc§þ^Û‘‰j‰ût)sø^ësx«ènÊÍá”>xÐZ³•߇­ìþô¶§ÿÀÆ«™ö!N,èÝ5‹:³½H–,ÆrnÝŽ=ïZû,N–µfÍìÕ:ûOò`5->›Ö¦Ü}´—ZtÙ3]%ÿ­ÙÓYa+îkÚ·Ÿ´øè2Zq“Ÿ_ÔWoÛ+Ëߘä«4>5\9ÓðE‘º%hÚò6¼òÜÔ¼|bí!ª+,Gƒ±>wÆõøs­šÙ¬|&0xÔ¾¶f|BN€Šb O;2Ÿ-GvUZKÿrÿÙ`¸£2¿.P0ö•Ù«P’= 3vÜúCï+`/é‹›8-á¦Ds×:ÚwbÍ}Ƈ®­i-c·Ï´ƒÿß÷Úç·¯_k¦YëÔÑï=s }÷ûÔ?¬0ù…ÑüþRûîAÉ“.ý¾@‰ÿpÜ?¬í×Ú𸧠nGútè²qЯbË~ï€Õør:3´¥­, …ÕRs\‘4xþ­¶œ¯fN¯ûÙ³&43Ò¢WΜŒ3mibjÍ äÁÜê~mü¸Á„VBêϱî­ç×ã°~`ª¨g'‰ÉûþYcÓnuQ¬@ôŒÃ æL¢ø¥ˆÂÁœéíHôÖwßkü«WÞD¾ÛlßY~³µ·V¡÷­Ö]œ¥&%Ò6i¦ÆÛc+甂æ+º€·çñÛñ>ÅkRí‹ÃS Çu’È$]Cž¤8¾ˆHÜ]¾Ç;Ã$i¾¡j×ý`$–ž¢m3Eìôw”ò8Bæ&é/GÙŸÝCÕTY͵§¥NÕü¦¬ù|Ì3ÅÄù6£Ï›j¢PÉ÷δ›&O^â^Ã¥õd¾­§–Ò¡R[Þ’—Þúòu4q‘éÑÕ`”D‡x´©Èæ/³Ejõ19s¹¯Ö‹ ò‹3‚¿0Íš Ê×m|üm¥(éwj”æ“®:†éè\ßšôç21ùª3û´Øôùûû÷ÿjͶն›vk2íOÚ}«×o¶ív{Ò6y7µm»¯Úô9<}zAzæeù³•¸ˆ 0›.}Olf×óšBÚx>3u½jòë¤D—·×P ‚+iœ1³½ÅY‡ZMägåКñûîwÛgòKÊMú"°Ž˜M©ûúÖè†g„¢ÿ·üZ0÷¦B”¡HM¦žÍ+Ñ‘051·ˆ¬ôY f|„ݺç h[¢¬;祿åë&‘²û‹xÌ‹‡WLK4P÷W ë5老(æ¹ç]¯–%äËâ0q&,VJ²Ë©e‡‡¯CàRÁ¡ÑL ¾2Å‹0\žAáð²ùlæDJüÜEEgÔ}PaE×¶ê®øhtCÇVR{îùÎa{w{qÅšr,&*0A s¦¢]uŸñÐñ…ϯ¼æÄÀ˘ÃO„æóUHM¥ ˆÇZø‚˜Ú±& 'âR) O³˜+ÌP{G¢‡@ï¶çù“„ËOíÑÞ„º'N`{|ÑtKŒ;{_’·k¼£;ܾ‚Y£¶9­UUX©ÎD r¤ì¦ÙÍ¢9YH(Áþså…–dž©…jZ:•TQž>u£|źIvukéÊ6^xâ? ºÐ-­Õ ž¤Øj╽`5‘I¥o´ÒÎÁIðVRòå"šaÞ™ _%%WÔÜܾ WRä-õEÍÎÄ».KŸûŒR3éV–²?šF“Ôö¶¦tôò ¬Z#¢ul×áÿßžÃz7O®jvC»Ÿ©.—â.b_ÇÓbbJMŸ~¹â[tv¦$׉ŽMy:¸²Ã•xèDј3KUð¼bó…¢H¶9Q&sš='ÖzaøËBëbõ{È —[«>UßyR}Î3·P ¼Ó&Ñ)slsî^òIuï G„O‚]={UCûÍòÁ¡È¡uñoïü‘½äYÂÃPíNcÐ4¡n¼ûÓ ‹ÿ‰Þ»ÞqMF¡L1ö¯üùPSüÜmô1Sç¾x‹*?s…CM⧨@E9GãÕtÊüQ¤¨=Ôòžn•Æœÿ0½âIKA§îBÙ0B"*HÞùjÝ®·L[š T¼#;JÚ" ȇßLA¡Ç×6ápÔ A\J{­: íœ—ófK·H‹xÑ.¦ä¼¼¯–EUºÒM‹˜s{w‰ æw¢ÝG›§æGQDöÞW¡¬‹Lnzc N2£ ä1Ð4Ÿ÷ˆÛ9 ¶ª®ÆA¡qðþÕ*¼â«5ïë-h8Ò8xæsAtcfI:½ù¸Ù‡©{¾3s\#ú´}º…GÁë+f_kS/RV ˜Ðê];BDh¿%‚VC²iËå ,ÈÿÙ Å¿i?~üôêï?ÿôñÚÏï_¿úY{ýþÝ»_úéý;íÍûÚ¯üÀ—h|ØíñÿiÐØüׯµHt#¸Ò [{Áì+Oû÷V´¿þU æŒ-µÆ 5¿ÓV ὞°µ¸æ=£KMÿ-*Ën¶4_Biÿ£o>Ÿûá Á‡ÿÝ0{ÝNÓìöšÝšÙçUíôøúm£ßjf¿ßm¶Z Ž/ áâ¿ýRU‘¾'é‘'3bE”Då'4"¡Çoè×I­êiAsˆÊ`7>ÿõ k£/ðžl5:ƒÎàë¼k‹_7¼éuÔŸhŸÞ“ºG=ÒÒ²æóßGÔÕ}4jí+Þ²ÚꥭA í¡>¢í–3zUóš1Õ Ÿ¸×ÿ¦E/@»«ù\kþí¯&AE”¤ºKg4õVî$yt–6Q,ˆæ¸7‰MGΑåÏh²úM ^3ñާèë3:ЛT2U‹4Si€mk=ÔEZZ{î07TLÞ¼öæRBÐ©Ãæ“QÀÄîùqó’úÊ™L˜;Š,´¥„ÑYˆ{)žË—8÷VEø‚d@‹~íD±Pÿ˜÷Ðu5NnƒÒ–vnÙ\ϹWÓm%âÄÚ.=h"+^¼CÛB?í‹çOˆQEð‘ §QÑÑÆÖ³&Ì'îbˆ'¾HˆzcÍñ~Ê(~”û‹å„4ÐäÖëÄÅE©)ÇíP‚†Î‚ñˆ§išÂ¸ë£n¦Âó¹‚  ºÐåž¿©ÈùȰÈ2ääºy4oŒ6/Œê¿üæ—Oÿz÷¯ñvh/ÞýËþtuÍ_Ï?¼õ^ê$†×O:%àS›Eßμ„Vú¤\Z‹åÎÞqD¹o“7izú™ šn¬”1n/‚OÉãlqÛü¦ÐÇ*h«ØÂÜSÙ¨ øõ™:Óᣠt·VbNQ-x ”v¼/.ó?°)ó™ÏyêÌ•Òûq Þ]è+gr¡/ôFsÐi¶Z}ÃjO&F»;ncÖ± »?èôúý†Ýív/ôµ:/)žÓ”ïÉ[“ ZDA™“yIlèï—Ì}uþ“‘¼éù*qLKf•ø‘UfÂ}·ŠiÔ]7_¦%îL”·;;У°{b©ÔqI#a“™gÈ*ì¤÷PÓ›oØ¢7Ê'*a2O} £>]€g¼­Ýiv”ã/mr³Íú=kb4»Í®ÑaM-£c š›™ýfÇ|ZKÔô è÷KëÏÓ¼×rjWæ_‹E3_ ;æ±¾dÓJ½œ™¨ß¸xÂM¸ŸÐ©åŸX®Ä“î6fÌÕâ³+m²bâ¶ø„¸VÉÿb…Ú ×ͽY|ôü"z|7X2Ûá+ÚIta/z…9~šWzœw)ï0vë¢Û—Ú?ƹÈ+;bøìóØ’]´c–u…KBøÛ½¹xþdn.î+u§cvÚnÜsû¦W3;ÝV§Ó6Ì~·e˜ ž ™Ín»gvšÝAîEÇÃ_—½é˜_(ðUÇÝQÚ89h¾Ä Ž9—ý“ênã"l»WΉ»‡bûÆUUéî5æ”^qcŒ”.5>¹jÉ^hÌënÙj‚¿Ñx }1 ~1 {ˆ½‚ˆE\AÄ/™;ñÂM'e‘ç·Ä˜ˆ “PÜ…I40¹3 .Lâ@wÅFž;b¾ïùÄ]7aSÇuÈÆ®äé1ûþ®AviGiz„ˆBÝ¿/OXtÈf<™±5 Қ̕ç›\å?œ–åT»¥4ÌÄêÃêAuqZ¬p¹ Í䑵’ÏâA#'W÷w„í»õ™ KÊ@ªbÈ.. 6ªÜ­•‡UM÷÷þ¥¦÷#˜á#ÏÁ!ðÈ5)"qGd‰/ÛÞ+bÀN× FéRߊS#yühžw¹_º-šÌ«h±Mla?C¦ù;áÆY5#žòŒXÆ<…$R›«jŠætµʧ4!žJTGÒ Æè_)ö5èhŒGØf¾¤=™S²ƒ_ˆ§·‚ä(>iPš›ˆ7Œç%ÊIrñ©øˆ'<|ws»-ª±•4±Ý›íq¯côÙ´É›xÐ3ÆÓnËè˜íA»96˜Ý,ÞÄÓ“T[À4[ÀÅN0¤¿-ñMܧád¶Þð4¶–Él]SúŽþAŒ™{Þõj©­"ÆL®íäì5o ˆß×n­Å¼öGS³‚ˆgsª„!EÄ:yŠ„,±NÞâŸè×Ìf¿g¶ sÐf£Õé÷šÆ Óï·óŸ8øí—ªŠtÊ<¹ócðäö°Ûî ºÝG¤ÊƒT³Ýß!fÕ…ÞsÑ;¬ºƒÞƒ"ÕkJGª;\xµMTòÍBC©{jµ’eÔåôµ,¡î@Eð„ºƒ`hB]>œP—‹…%Ô±„:(‚P‡ÆêPÈ@BA¨C€âuh`rg@êp xB~ߌ¹cŸÎ[vM<€çù΢H!²0q|I6d¶6é^ǯm‹õ'%<ôt …z`™ÌÆš°|‘ æí¡[€¶=S|›Ò¨>“ÃÇ&¸KóXâØYŸÖÉm]žÑ·Xü\¾uBïš¹>»qØ—à4E©ÝÈÓêŽ9Ndª=Ψ¢ì@,ŒR¬òö%¾‰ùô9RäâïŽ;qÜÙc­‹}F{AË<,¢ä׊Kbt5ç>£%ññšW$C·I%C!oñ¨2ƒ^»Ýë͆`D·[ý®Ùi7{ý^«ÝÈPøöKUEúž¤GŽìöOç¹f°ß–¸úÏâÁwa‰þ€–¡„%^mS¨\TÂO«VòÂûZVXâ@Eþö¾þ¹mIû¹_X¬ºõäΔHФ>î`‰ÈEçÃŽ[€Î‡œÎ‡û0"3,;**7¶€Fà¥óá?íÙ¯}øÚ‡¯}øÚ‡¯}xa¼ðl¼€\¼ì¸<¼)¼ì¨<¼ü¨ì¼ìØÌ¼||¼œØl¼œ Ì¼'pžêtN*•¬(ÁÉ~<6‚¶¦à•öF/7[ìÃx¡Û ÞµLÍ0Ÿ6Aí³nk®œ&&^• ”Õ7eBãáTéüP|~: fؽI_\HŒ?~ÒŒ¢Rö“fp#§y× ÚX„© 8ø¨·ÉjïÎ/$M@8èJÊ4Ppï£ð3¡ùÉ%ÑbD ïìÄ$÷¯IBóµÜ»–5g¤:¨£ŒÌ¡¥íQ[hîÒí­ö ;Ð5ä\Ë 1±×¯-¦g ˆÀ=Q‚÷€_ÖfÞPÛ™ :¿Ë`¢`ºôhî`ñ‰Þ¸’òÜÞˆ{²À¯y§C+þ2!>G -c¯¢A Li.X&…—:¸72‰´Zm¨±•6±Õê#„FŠÚuZŠa·tŶZC õŽéŒL]mµVp¥Š¿]ôÍrIMCšõEøx²™vgCNÆÃ#ocJGº5»J<'xKžÇÆg!Ë œ{²S¼ ¨â—ÕRív[owä¸~ËjµU\•ŽÖív÷å?í|ùBP^tÇõïxrŸ˜•¶LîS¨}›ÞéS“?÷i§:pe>E3Ÿv—]hCˆ7Y YOÇV©‚9O{:ºhÆÓN…!ãé¹{Á’ê´×Ë*˜ê´—'Õi?{ªc Ÿ=ՉݹæHuâ½çNuâBfLuâØ"aOuâåKuâWŽT'>PþT'v|öc’\ ÷'Á¬tQtƒhdùyiz!Ñ…m°DsKXa£‚§Ûø7€}Íß³0ãvô3²‡žë£/äˆ>ÝoµT¸ÕCD7÷ ¸}ç’QöW7Šƒðáƒ;¥G¬4è*dYbvI§$û‰Ø½øf6Bœ$¢Åe­ Ù§ýÎ6¹¢˜¤HÄ(RˆWìx½Ÿnh S¥©ýíË<S+jªÿ.J‹°˜ß|ûÎv=¯2a'±ûž]¢ôy­CŽZ3sgE˜8á‡<±ö³jº:5H‚á«{K l±*Æí´32F·ÎDhahô—`„¸skë%»s!ìÖ´Ø­êçÃ, }|*…JR&q<‹{8$ÎðE¯£vZÇÈJnìE YÛ_4Qì4ñ_䆢;:ÑÉÂe.NzüÝÞ|Š~%6þEéß”ÔèÒŽÉnŠüt²—+ÖÄí+j\ˆSRžrWzC^×–×ÛªÑnuÚOKX\¶Âhš›µS‹±ÌuL¼ºÔù“… _•p# }!®_Ä*ËZ¦keE±§ÎúØ%­@‹e" ûá™1s}lÀð4ï#'nÏx?i Ÿ\FùbÌ[fe~ýßþå§Ÿû_ÿú¶*wg{sô. ö‘Á‰\ÑSì¿®V*?½‡ù´æÙT–ŸhP ¨¬ ‹J$-N}¦<¬›JìuÅNj*U´8–} “²ß¼û@¦B¨•œz¨jæÀãÀ (ãÙÕ›K¹íõÒìÅ/ÍÎ_f·å— •÷ßڹƪ¤‡È~òiÎXÎQ½¨¯´Ü‚Œmv0éuržëc¦aÌǺJˆNì»XÓþXȲ“/x¶L7Ž«XfÕý5’ÚºZAp&©ßG˜£åË´¨ƒ‚ÏcLÓYüð³îe'„\€UàËœn8KJÝJìBÖ;±£´Ú0@æ‘ÕšÈzÎéMŠëÔš¥Tyºkqjž-8*l9áÐ`€lçuüÀì×5r`3_×ÈÉ{]#4ïu<Ðl×5r º®± >ãuèŒ×5ò ò\×ÈË]#8×u¸ðcƒçºFXöë9P9®käAå½®‘[@#p^×ÈÍ]#ßÔ[€‡ˆS'':#k¿wÉCTPz1"N× °/× ´Da¢²Xyˆ Aà!* ‡‘‡¨Ø‚ƒ‡¨Dü››C£L˜„’â(–cÐŠÄÆCTP‘xxˆJt/ÑF1—,b¶j­–¡uŸ–›Ÿµh Pn’bü¬Eür€‹ÍÊZT°AXýBè«UQ$ ¿ Å-'¯ZÖ¢²……™" bq–Åù+>˜Y‹Êõµ¸!ÆœPZJ’ÀËZ%·kd) ³Aœµª`¬EPe)ÃZß§ÉZT¾nÄ63"¨jíå!‚ÄÀCU#"QÕ‡‰‡F'œF•èà^¢Ó[@æ!‚[8³ðÕK dW-[‚HíÌÖÎì¾8÷þl[ iª8ÑÑ[x¢S2oÜt€ã$'Sž…(ÚK ÁŽ›K÷,-¡åaezaG½³=w^Ö µOò°"Xì{›²ìg™âƒ$D/Ìܸ¸ýÈCˆøW&$hìN~„08èjiÜr¦¤¤ ‘]ôY6`^â6TÖ„wöñdû~€'ƒ”εËk!¡, }¼Â‰È®dÆÅBZG“jÇ%mŠ+l_ð9¥yùÏPÕÎïPóeJò—´†:µ}{Œ†ï2z¿¯` ëù |ÏÏGDW)uFÒ($ãŠXÉ£øÀß #¸ÇÊøPˆ|EBWzrc¬œ€ÛÞãµ´tIïbNQ„(ŠõQü UÙ¬ô$²ÍÀNXå$]éä|Øs׉¯jð¤³Æ»Y±ð5ƲÇã‡TnÖÙÝj;PÛÚ|—v`ô%ÛfÚz`–^m0âÓÈK4Kúê—l؉ÁOToij„¡‹A†G…EÖÀ”Ž(·üi†ü×—¿(ïÖ» ”e³›žüÙ$¿B=º•L`'ÀÚÌ"œ}üBŠpö•²Ÿ³¯Ð ä è)lJÈ9\vúÎR¶PøœHÝÃýŒ¡'c”ÏE¨áížG"jÉ}êúD=IÂ"F=É×'ZŽd%$ååŠâ«Ðö#7#­­Nn2»V'sš ªj?ÑŽRs#~³k9¤+Øàb!Õ+ç3W6¢/S&Q××cºÓõ˜>µuð¹(&ÐëX†ïóäêâä±Ó —‘2_ñº~.%)OJÞ†h¨¶a=­.µõ¢í*Ùfœ2‘nÄÝMÇ —tõ Ùºˆ ©1 •ì+`‰Üñ~Ñ|M§Wq%– XÈÖ J«½;¨Â% F{n`êÎr aleºe–Oj4³ÓÎLÎY)1ò‚Iy~“ÀW°^|„U½õxV¾É®? íÆ½;“æ­óÙ1_ˆÈ÷ ½Ì®cÐç.­Ëu /u\gÖ2§U†ÙÖT¨±•6ñ c ðKº2á5,ä(]Ô“ÒÛ]³5ZQù&˜"eFeöêo¿@ÂRU°.\:1äÆË!4¨"篂XØ£Á¶ïû{Ë;ã¼L½€÷ò„ô|‡%<2s´€R ÆÛ;øƒë®Ž"žôòÒ ÙTÿ]wÔŸÜ’A…À)\qåtu/^kð£Ò0Ö/8Mãæïoºämï­—üb<1f³*Ó™­—Vð\b®·ÓÎÈÝ:!…<€‡ F@ Pok5fò·U|ïCø!’ “„¤ê«t*A™Äñ,Rìáø¤” C€Ž¯äÅ^¤›…/žRñ‘ßN —~‹6 Ç_îÁ­‹ÒÄeäzèâ9ûe3Íè§gÝE(˜!?š¸£XÉ)v’±v!Ú0¯Ê0ŸaÙÓ ¢ˆ½fÓ Û›QLTQkVTÿ!^}Ú*óЋ.¯åæ5±A׫pùûúyÀéZ>Çg“KúP.%ŒÃ`>{þm>B“p°²•HnÑ]3÷µâc¹ºR,íÐ;<÷‹’û„èd-Džúão³\‘­`Èô8´Ï_Nß­ù¸UõÞz*îÉÙ`øÓçõÂwéü+öôÏYûû"—D^ÔdfÄõ±éÀ®œ¸ñl-шãÍ#üPƒ®ƒz¦Õn5mÿÁ±£¸I¬O@­O3µGô×7"{:óÐÅ’2ÀEº+‚ÎèÏ(i‰HA–*¶áÀ)@3UUp ®"—|ߟûî7‘B[SÀ —­: Õ)«Õ1N<Ö@ƒ)Éi—h¬$7¿ÔCR;¹^¦ÕË´crõ±M¬Ì¹_¦Ö׎}íØ¯m³m[¡Sžl8ýoƒˆm<ØSï´fÑäVš“5?™5È]6#Ú1_œRZÔ*É;½­D¸´ 'Š÷%ÇÃÉ¥5ͦ’ìøLƒö°ð,„JXÔá—ØÑ—ê°§Õ«7oCp†m¹Rï žì®à¥€åæVùÝNÇì>­ä½ÙøN‹ÓÂr¹§E¥ì7*e -NK ä]œ–¸8Ý’_LUÖ?kG|EKÍ]U‡s$N#„ ߀£Â–ó¶÷äüÕò›d;îîõI¹=½ôÍåóô¢‡å¡†åàyïy("âÉ­\¹%Åe )Tä¾»Z*°ÀÕÍ ðó½˜ …똎 W‰’¼”Y—×i‡ ]7_ÉkôßåÏ?- üT&À]¤ÕìtºåGŽHGõtZœ™ÁütÆq47A#Y´¯»òß±o9öXO|&¾5×Ná9HzD3ì2 ¦\åQF¶ÌÆgË\)y½Î‡£òFŽÇÍŒâçð’×X7PnYàRW•R×sã£ç×D\Æ ™ç^6‚ÞÙ^$l­,À‘Íê@Á\y#…ªÎN¯Å ¹™ ý·©VÞ{*…Püíoò=}õúËߤ¯ù \êI‰%’Ò8p$…èÏ9Šp›Jƒ)ž¸‘Di¥”ñFúåo-µÿj©ÓØŽn¥™OzR3˜ÅMbþ±n4ÃÀCÑZÚA“<5§¶ë7¦^OS‚mkLC4tCäÄxf•~ ¶¢'M²E½’Ò4s×Ã4n;‘Rî¢ z \P¼…ºã͇hÈZç¡~Ú%¤îÒ(¥%„tñ£ôƒ£éÅãYŸõ¤³­ÛÙ¹tFÏÐà§¾®ž_ É8K¦ÐïèaòÜÕ›Kò@l‡c÷sÏ-n¯©°_’Ö¡-³UCŸüT£°O;¯§V[r÷{p;ŸIsâî/BJùEèVÒØS¯ñ‡.aïœ0³œæ2¦ÿ½Tÿ¥·_®^ÿôá—/•>|zóúƒôæÓÇoß\ýòé£ôîÓgé·/o?GUµÚø_*ŒÌ¼}#5®ßŒ&’âHgÈ™Òÿ­¤Hù‹yÍ$õLŒÄ¤ù”(c»MdMoqHÊL’ÿ)Ѳ¬©F#û%žÎ¤ÊËç÷>œ}¨àß­m™ºfµ[Z£Ý15­«èºj*º¦º‰+®«¸:Æ'¢¸üo_ˆ*Ò+9°ÚÿFG}"‘Žß-Íä6s‡ÆW²/ûùÏVejÒó(3o>vñçi]š¸ŒÙˆÞ^þvÅЗôÜ›-ÕìšÝožå¯ï‰:™štõ Tš¯É+¶çýÞ¬é³êLp{Jóÿü[x±M!Þp5çQ˜ð_ÑOgñ$ð[§^«p*)#I ûúG‰Û$¼—’þã_4€ŠèÓà{ËÊܲ\0Gv&¶?F„ðzDÖóåÖs²ëßáÂe‚”^'f­Ÿ&e…°í™Û'Tz,¤¤\ wI)À3äû;šÀtR™ùƒÄtì>!O„m[Çs‘‹Wgy® Ôõç¨ø}†”‰²ëðÒÓõ]°±[4[‹ {yÅC²H‚„f¦GæŽqrÜ(Zh[5; Øà*4–‡$‰~ã)ÁiÆóéê(Õ|7€‰ºP“àƒ.Öu@Tˇ÷¢¸ãÚc܆cc’æn ¶L»#<ÄœæVXÒŒ5u¯¸„' t¢ò®h7—‰ø¥@0_Ʋ9Ÿ·í=³cuUíi‰ ¦†ÍèM ÜÂ`h0H ˜¶• ¥vŒ×–žÇäHs³ÌÅÛ»`ÇÉ5å'[ШŸbÀâNÜáù}JœÜ$‹èNgxø¤)(z$Ñk,ÑÓ¥$â|€ÒCË ¸ÔOïg&¸‘§ÏŒýô .üØÈ{¬°A?avEÙQt„ÀFƃÅ}Z&¢d‚ì!‚VÛŒa Ï…ášz9£0|Ø\Q>hÖ( ·WÃ…)è¯$ ÃïÞÁºªŒQ˜bMúâ]mà¾(…)&°`†o%hbX¢0ü-Á…)¤ðãªP¦¸$Ö(L ŠSë,ÌòÊL*‚µ™寗tªßWo.SõV†J*švÙrº.)Ýè¨BÒjãikû‚lš»&vŽË”B¹þ^èAÁö-=H% “G½sž,@IOÊŸ°ÿýúò%eê À7ͱƒ¿d’ƒ|>ß©ÜÊ€×ýý(wåëez½L‡ZEóÄ—da¿V¢SY3œŠAÏÝŽ­$¦·6é‚ÜÀÂ~žmß96 )Ïc"'çÎSU5è‰äŽÄ;ZÍ—[¤Dä{!§€ª?&9wiXG/u(gÞsZe˜- jÓvÙÄ­¶†Ì6nX§e(ÆHm)¶:Ô•®ƒF£®ÑEša•ob€É öªì$×BýrIAmèZ»a4´Ôø\ €5[ëe=*Ý]Ì®à‚jYwöΞºž –>Ócû§®Ý)÷lÆ;óì‡AÜJ_ q³¤7ð*×ÜvgÙ~XIJH2é ‰ÞRžqÖf„ÖÎh\ZÀëZF\!;t&”E‹üšG;\Šl.75ÎÊÔ-E“ŠáL¼—r˒ꮨˆ=wQ74½¹1nl¼€´ÇhÉ¡ P” ÷Z9ö±x÷6´QîëCß3‰!ÙÒ0„ãø©ö•—öEJr‹E‘æn˜Òäfs“\Ÿ8 Aø ¥tñ…A?`7ž€æÃò|iS©°2µö=7б1w<×Çz2³Ã¤ÁòLÜÙÆ]3œû>1ðÛ§Šf~’x>4·ÏËvk’bE¤g\\0wHïù"”8øïˆ(˜;zè“Þúá•”ä°ï6'tgßmííy|¯u'76¦%ÊÝ„ÛcÍç,}‰·çáÛqe‰’ÝéÞÅ®„뻩e*\CÎë=ú¸‚Á–;>èwò‰ŒÄB‚\ÿQUsñ݆R‹¯Å3 ÛóØMOGþÒô#‰*}ï\ºÓ±ç˜ >Ÿy7N&)³SÛ¯)^zûþ[èFÄ͆«A?5Í)pY‘å/9±ejõ…Öªu–C>;'óß=’l/Döð!¹áÔÎP²wâÓ®:„hzu«íBtà#âùˆÍqûéÓ§¿õ$»c#Kou,ǰ ½Û¶UÃR¦cZÃn Ú’¨ˆö]‚ä^aÛ“ìp<'wíF½¸2³0ø[Íæ¾¦(¬Ø™º>ô¤â‹T¼rôg¡'EѤ0a~¢~âÒb¨ù°¸KÛcÜc?œaµ=;/¾ž_úŽ뀮l™: oqh÷ï°;N*ñ?Õ×ùwMZR†25á-®„Y@“ÈÜB–/j0ØÓÈ_èUéžßÓoq}­d½N“¤= Ç$¥ĶW‰WkJ^;Ü‚t˜“»\+ð—¹®|Û–ú$ÞåÈvv殲Á-o?[R¦-G1·ž°àÄÁe ÑØâäD>錆À¡ï8ʼn…È ApŽG£»ŽÚcð tÿ%;ˆ°Úãa9 ÉÔqG¤]tŸaÓqçW\s`àYrÒ-Ä«†Hˆ°€ÄЊ5œºôL—P³àb7 ;$lÝ*Î ç¼v8A]_D{øÁº‡näxÑôŒ;ò‚{r×"v)¸:S³ÆZþŸƒ,Bqu†BÝ)ë"Ò,BÓ…„ì?çÁ“,Q0ÿÓÒræ¡?Ïu…€b]1*’FuÙÊ6Yä?S8ÓMN ˆ~|ûæê—O¥wŸ>K¿}yû¯ñÐjã©02ÿñöD©]¢‰¤8Òr&ô+)Ò_þ"Ñ+²%õLŒÄ¤ù”è@»MdMoqÏHÊL’ÿ)ѲlÎÉÇK(éŸòòù½/3™ðïŠÖ¶L]³ÚºÕÐôŽÚmé䣶¢ãtº­–ÿ±º],€”ˆxýBX¡^tÊÑŒ„z'¥“¢ÌY < ËTó´Vͬ {’²™5ùò·+†¾¥/à®Ô,3šê2]òõ´Yn`Œ¥«O°úѤ9¶çýÞ‡®ï³‘ëLpÓJóÿü\…*ZD¼]ÛLõ2êN%e$)!tÇÿ(5‡è®éÏ=OÒü‹P!.ïÌí‚9½ƒ6aÆ’h®—2ÏÁ8ã‰ùìÛáÆ»_¶à-"Äͤ¯Ïá@ï˜nãvì>9Ÿ [ZÇs‘‹o^–@߀ K¯Oëgìé4SõjëÝ‘ße÷Õ–šXDwJâ>^ꬒvᓬ Xôå…@t‰5ìˆC—œó--½ã\ŒÎe7A¬ë+)]}z7¼ («À}X¼R‚$Û„­ï>‡À¨ÄøGq_„Òˆhàš {ˆBàáN†xª‹€¨w¶ç’kóè¤\â{Ûa< 5H¢ C7õ†€‹Û§®)Æ5!AÉ‚ø «éjiÜÅAC¡Ù…bÈY8ßóï"¼Ä=·´ò¡å¯9é§ó.í¯ÿO»¿úÇÇ Þ:±3ýøçjr‹¦ß.?¿.dÁ‹£v nÃ-Ðθ„tÁt…‡^ÛÓÙæ;<µßª‘»ütø.sO¿‚AÕ*ÆM™;›KKÚyosqôôfapM}&£±ózdË«íWFèq!Dœ Z°]¡ ÒRϯŽ'®’ÞO yïñšÜÜy-÷®eUïšz«ÕQlc8T k *dÚŠÓéšíNGu,˺–â´¤¼OS½þ¥×‰ heÎ_Êýi†ü×—¿(ï‘¿çJîòx®å.!fûÕÜÚï2¨8¸n¾t7*ŒLT༱ûH]©"7w—Îu{w 9Ûnð¼}7x—š^1Í2O}£~ãÞº©BŽ¿¬É5uÚöPÑ-ÝRŒŽ¦)öPm)¦ÝÕMiÝÔŽk‰š]óùifÿ9Gòc-§vtþ Y4#æÃa‡<(Ö)Ø´é­®?.¿ƒ|™A¦t?~D. $êGˆyñ3üÐÄŽ¤B¾”ì]IÃ-Zª¡v»úÓBÏ9 CðÈaLü‹Kg~Ð÷‚q²Q}&Ýc äV{¯‡ô˜_ù¸lÁÿ&ßÓ»zý(ü—Äsyíм múÍ[Ë’ mj›¶‚‹Fv‹sÞ‘C&ÐyÇxÞ±ÝÐU£CêÓi[ø_–¥u;jÛl·[êÞÓŽ»^¾T W Ýq`¯ð|ãî£~ìJ[æhc÷ûÖ­¥÷ mÜ¥ëGÝ * w¦qgÙ…6„x“s”ñÈ*Uô ãîŽ.x‚qW=¸O0îã=rÈ€Æ|äp?ç‘C6@Î#‡œ ìG¹€gÈöñ‚`ÓIydï“ý€$(×I~`pe`? É êúsÔü> à îº!¹¾ 6v‹ísb¯Îäg· ùP·Çá‹Î|¶AZ€©²‡½ã>ö"ÀÇÆ~qÄaÇ÷(i7A–wœìס>ã@hÞCÕ<Ðl‡ª9 ª.ˆÏx¨š=µrˆ<ª9pŸ¨Ÿ¢pŒúi¬~lä­.$,û lTŽØ<¨¼'°y°4ç lèlç­_;XµƒõB¬äæŽ~ä±—•.#‡L:À€19LÔ#ãEL—wÞÀ––#gƒ5Ëكʜ ÂÍš  ɕ ‹͔  ʚ †[Δ”4!rri5‹óýGüðÒ kp£€™4¹ðåkY’ÿ°ïT»jz› WeèŒÆÊÝ¡ 3—•ŸÇn:E˜[ïÎȶï±{=OFÞÌ.îP²»‰”ÖÙô8(þpT•~@EKA2yxV§Y<|ì \.ÍÜ),eoÖ?rzTƒÙ~\œ‹$l”“!ʆlÊ}áµ* !ÅGÓýÑJ¿Äô>MÛ!3<ÍÛÙiÂv 6("pGä]Öµ—Ø€ªõ³¥¾¸Jz»“·ç0{Ò­¥Å¼¦‹m` Û3bôß#çõŒxÊ3bó_âdɹª!hNš0yTâ©Xu¾ôŲ6ú7ˆ¸XÚâ!ÂÌ7°;sB"øeò9¤ðå$Ñ4X4±€`î<Ä›AÅr Ê?<åá»9çÐjA­´‰Ö@3mSé ‘Ž›¸ÛV#«¥˜šÑ5ôÑHEŽ^¾‰¦'9ªCÀ0!àr;…ß>¢t´ÏÇp%EÅéhVñt´w$Æ ‚ÛùLšÓ<™½²Ó½×ˆÜ}B¾o<ØS¯ñ‡.ÙÍ®©SãöʼÌdn{I3 Í4ŸTH"ÝfQGžH×i覮ã9GѺ¦¡´[šeKkk¦µ7‘n×Ë‚ ô ¤;Äâ'‘ŽYiË$Òùªn;m¯@"Ý.uàJ¤+>: 'Òí,»Ð†o²`鎬REévwtÑDºõàO¤Û ÆH·=‘n/o" o"(G"0_"2c";&G"(_"70¸2p$Òñò'ұ㳟ófÆÜŸ ¶Ó Þ¡û/j)ˆKE¶-Á†ÌZpîMr¡6YwB³î*q¡îƒÉX6BH¼Í}Q!î€mÏLßgéS_ÁáX¥±-q¼NËà²nÎá[‡•ä¼DëÄÁ-òCtç¢ûè4…\L.°Aè9?$ )@çõÈ–7MVõÀζæƒ?Û–Õüä8L¹`¾,W&=70G&=;6{&=;&w&=;4w&=4c&=;b±Lúbø¬™ôì謙ôˆ\™ôì¸2éÙÁù2éÙqáÇW&=,G&=;*O&=*w&=¶€Fàͤg‡.I_GXêKa©#,u„¥Ž°Ô–:ÂRGXêËqEX QépÀ³QéprQé°ãòPé°£òPé°£òPéð£²Sé°c3SéðAòQépb³Qép‚2Sé0á–3J%M'•Î~<6*¶G•.¥-¡á—Îó«±-óL>ªy›‘`Fÿj‚ä"˜áê$à°ÐA¢h…í‹JÙŸh_ÌÙgO´/îñsÂÚd_NeœÙiú)^a¹ò·xàQ´n£,ü Ü À…-AÀ/‹›2€_Da^¾0=Ò ì$eÿÔzBs[‡ÂOšmµkZ†¡*¡†ÃF†ÒµŒ¡b·ÕöPÓÕÒZåû`ðo«Àîä ¶ŠÞ>¹ÜõU´MYùÂQÓËÏ¢¥Ûõ–©êí§…ÜQ„Í/‰]2#íúÌ0ÿU-öñÊ[lS„¯CÎáy¸ò7÷æÇÁiØò7Ã&Ç!bi”rå(.¿À›|OóQaüäúC×ï`Äà­eY* «`ÓV‘\¿S¼ ú v™P„Å%P¢ÛP»Ý¶Õµ\«Ž¢·ÍvKëª$!Úl{)%v¿~!¬P¯@:åÀš<´̪[†V"ü—òoV€Vb·BpK¥…‰%ö”^pcˆ7_0äGW­¢ôûº»(ÁÄNÿˆ—`b¯«ÆE0±`‚É å!˜`öŽy&ø@9&x€ù&¸ &Ø19&8@ù&¸Á•ƒ`‚”Ÿ`‚Ÿ=ý;²qË'uÃÙt5)…uS•?²@3µ"pÜlà7r.n{'˜ÎŸœ»ãÛ䀒_`JôŒH F³(£ì>oeТ,`kV e†KÆ2‰âçä†à€lëÝid€UŠÿ„Ø‹îÛ4 g`‹ÜÕVLRe¶­Ä¥±üìß^äÞ/3703;6;3;&73;4734#3;b1f‚bø¬Ììè¬Ìˆ\Ì츘 ØÁù˜ ØqáÇ3,3;*3*73¶€Fàe&`‡.ÀLP/Íë¥y½4¯—æõÒ¼^š×Kóziþ‚–æ…RÚ9àÙRÚ9¹RÚÙqyRÚÙQyRÚÙQyRÚùQÙSÚÙ±™SÚù ùRÚ9±ÙRÚ9A™SÚ™pË¥’&„3¥}?[J;Û‡J×`b×^ðk.žÌv>‡¨ºÌvÀrf¶W>}3f¶·~—!#ŠtÀä Ëó,˼‡Ò:.z®‚_#WŠ+LoPTÊ~zƒbë+vzƒBøüô…ŰÒ”PšÞ@¨x6zƒÂET3\«âSÎ*;tÛ[­–Ún©OË I›Ù»r÷œX¹ðtb¬\îjvÜ­¶…%ã h"Âʱ #‹‹e75pyK0ðËâfþà±ù£µ›ùƒKL—ÞÀÔ½Hˆ˜|3óGÇ„ÂO™?ºH7¶ÑUZšÙU uÔU:Fw¤´ŽckfËR;íòý0xX· Ø[`ë‰]÷–SY5<Š>cÝZb®ì#˜))´•Ä'A¤}(»uÄ,¨¼É½)…P\~7ùžÞHðÅOØÞÐÃJ¿Ú3é­ç†?Åîï&ÞÞZ–$ è´ 6­1}GR•½ ¸Ï¤9ÍaÞ+;ݙ’£¬$g¸¦ö¬ñ`O½ÆºdG4ºp‡oÈ€üÁÅ‚/ÏÈH9ëIg[…Ÿ-^í͘ܫ«r–›=²SÀ„æZ±¿<õµ,?Ütˆü;0° a«}Á)>l–p¿›jGƒ´.U ºEKsévÞt-'‰ÎîÀ22=¾–¯}/@wÈ#O¸þ( ŸMlè¡·$/êIWá]û¬œ·oïððÿiŽ?|G6¯ý8Äj3 Â8ê]û’¤HDY{R„ŸE1ùD’’îÆõ=##(2M>ÎëÆÙ"y4‘&Xäg‰‡|<»ž?­‰Ðu¢kŸl–§¼bëÒga@vÆÐP~š.v|ö\à:’[GrëHnÉ=ÍH.˜¹ä"ôáæ ôaÇf'ôaÇä&ôa‡æ&ôá€f$ôaG,FèS Ÿ•Їûˆ$"¡;nBvp>BîcVÀˆÌ„>°„>ì¨<„>¨Ü„>Ø—Їÿ¼g¿vâk'¾vâk'¾vâUaÔðlÔ€\Ôì¸<ÔYÔì¨<Ô›PŸß—¸ímÃP»fkCf\¢ÍX¥ˆBø ùˆB8±ÙˆB8A™‰B˜p¥å*Æt²‰©%O6 ¯(#Ê~<6F¶¦ãsbÙ0Yv½¯ÖÃ9­lV@e8T6PVç”]ÙxU4ýwÈèsQj0ÃîMüâBeGJg•²Ÿ8ƒ9ͦÕÆ"üÅÁG½M†øq.M@8(KÊ4Ppï£ð3¡ùÉÐbD ïìÄ$÷¯IRóµÜ»– ÕPUK)†fCuZJW× Å°F­fëCÃ.ÄhDyWU<"pO”`3à—ÅÍfÀ/b+›™+ÁDÁté ÐÜ Áâ½#r$å¹½;÷d‰_óN‡VüeR|ŽDZÆ^Eƒ˜R]°L /upodiu ¨±•6±Ót-§¥)Œ­ºÝRìV§£´ŒA·Û6ÔŽ©Zò¡ã@…ß.úf¹¼¦!Mü"œ< M;‹³!­ãá‘·1«#Ýœ]%Ÿ“¼%Õcã³åN?Ù)^HTq‰M‚ê¶ŒNWOì›–¦Yz[ï´LMgÈÚñö…¨"½é‘«àñd@1ëm™ ¨É(jýë~îÊ€Ú¡\ PÅGh‰¨]…Ûâ TúÓqÕªxöÓξ.šü´ÓsaH~zîf°d=íßý)–õ´—'ë‰as9ë‰1–ÏžõÄîdsd=ñÞ …sg=q!3f=ql•°g=q€òe=qƒ+GÖ(Ö;>ûI®ü“ Vº¨ ºQ´ ¶ü¼4½è¶X"…¹¥¬ˆðQÁcn|À¾ˆfÈï‡YŒñB;úÙCÏõÑrXŸî»Zª Üê!¢›ü\‡¾sÉ(û«ÅAøðÁÒÓSt²|1»¥S‘ýÀDò^|Ó!NBÑâ²Ö…ìŒ×~g›]QL’¥Æb)ÄËv¼èO7¶„©ÒÔþöeŽ©5Õ¥EXÌo¾}g»žW™°“Ø…Ï.Tú¼Ö!G­™¹CÀ"LœðC žXûYµ ]$AñÕ&¶Z•±s;;£® -ì¾ s,ƒwnm½dcw.„Ý¢»eý|˜%¡¯B¥PIÊ$Žg‘b‡Ä¾èuÔNKàYɽH!kû‹&Š&þ‹ÜVt‡Çù½á„qe¥¸E› A"U ‚[aóà„(VÈFERš0ÙL>Žð#âè÷“¿+)X0ÃkâŽb%[*Ú ÑÿEUÀª,óö‘=½ Ûk6—Y¢²Z³âö $¤öÊ<ô¢‹Çk¹yMlÚõjMþ¾~¾Ä¾–ÏñÇÙd–>”[ðGÉ4þù·ù,Ƀw($ÏQ|-/²Ðv¸Û̲;µ¯úÏ9©mÐ\6y3ݱë+¹hD¡C£G÷øE…eÄ&'Y´HšŒPÉÀ=‘­¥myÑ2tC·4Î;Õ6Lâ— þ’IiW‹o‘ÇÙkM§EY¸Ì…P 7bM¾ ¼ùýJf„}Ú Ó\Õ(Á”ÔèÒŽÉÞ‹üÔ5+ÖÄíëjŠˆ Sžž¿Ü¾]s³ªîÝÔ«K¿“78ÂÐ* XÅcYËÁt­¬(öôÏY»¤h±Lda?<3O®Mž¸}äÄgno#ºsNr}[ƒºî=Ój·š¹œÈfjàØŽ®ƒ5M ­Fa/èÏPèÃ*Zj5Õ N‚4SU+ªÑþËì€DUTŸñ}î»ßªö,-º*Œ*R¦Ù2å“öÙŸ­°iH!I‰‰ÆÊ t‡cIJ²(Kæ%BåzµàÑâñW¹K‰”¡Öþkí¿– „S"Ã*¦)²ðK÷ ›Ï“ÝOÚÂ'wQ¾ó–Y™_ÿ·ùéçþÇ׿¾­ÊÀÝÙÞ½ ƒ}ÔEp"WÌûo놕ÊÏì!@>­y6•e'&T*+È¢I‹:´\ÆM%öºb'5•*ZËÖÏIÙï#ØÌ!\ZÉA‡ªf,1œ€ò—]½¹¬7’ê¥ÙË_š½à=¢l©Pyÿ­e¬Jzˆìá'Ÿ¦‰ÅáÕ‹úJË-ÈØfg‘^'G¸>fÆ|’«„èľ‹5íÕ¸,Ûñ‚gËt¯¸ŠÕaVÝ_ƒ!©½¡«g’ú}<–ó‹:(ø<Æ4Å?»á^bBÈX¾Ìéöˆ³dÓ­Ä.!¼;J« dY­‰¬G›^Ф¸>A­yPJ•º§æÙ‚£Â–  ÈpÞÕÈÌ~W#6ó]˜¼w5r@óÞÕÈÍvW#b¡» â3ÞÕÈÎxW#"Ï]¸üw5r€sÝÕÈ ?6xîjäe¿«‘•ã®FTÞ»y°4ç]Ðüw5òM½¨‡8pRq¢3ð{7ÔC¥£ât½û‚!W½@K¦Ú(k•·õ­®ÚQ[ ÙxÌDE[ Á‰Š Èa$*âGN'3QQ1©›ñFæ Ë‚á¬(V€(âZ‘ØˆŠ *QQ‰îå!**'†¨¨ ÂÀŸŸ¨èðšÉHTTÔıú…µÅk?«¶¡‰µ¨”¨¨lahôá ¿(ËÀäüÌDEåúZÜcO(+%Ià%*‚’[ލ²…‰Š  NTU0¢"¨²”!*‚oÓ$**ß7b›ž¨ L¼DE"ÙN«C(õ£p O«ƒ‰æ ‚¹8éñÇubü„ô¯ õ¸&–>z¢§ç/·oy¨‡À{—ç¤Ê aèðå¬ÎX©‡@¤šz¨ÉX¨‡€D±SÜK=T­ýÔC ‚X¨‡€jÄB="ª¢ú°Qã¥ÓÁ¨’!µŸzèäÖÌÅ©‡ÀÖÊLÔCõj(¿Þ‡eÌ ©ý×ÚÝÚfH°…™¦JP»…g¡:!óÆO=&šzJdrèž•zRê†S›{l‚Ë/J=YE%’uh¹Œ›ÊN=椲QÕ›9G¸™ÃL=&™‹z¨íõÒì%,Í^ð#õ|ÿñPIç¡ú®õBoDe”¦*,š•zèˆÃ pÔC¥‹ÂȲ(á¤Ú ó²,õÐ6a¦©«óiKž–Ø EQ }÷üZ úQh“p•—Åéù|§=ÂLTTZ£ñä#**?J«ÒBv¢¢ïT‹•—Ê6…Â(?QÑ1ÏLBoÀQaË ‡ƒdè:…ýÈ¢¾=›yL¬ð¡ 'i˜ò,DÑ^vÜ\†'`i +¹ ;êí¹Cð²f¨}’zÁbßÛ”z`?±$ávf|&ÀÅíGBÄc2!AcwŠð#„†AWKã–3%%Mˆìâ12©æ<ƌޅîÌ7þÐåƒÖ{«“€ÏÌ“$õ’xØda‡¦Í pî°órí°¡²æ¸³'Û÷<¤4xp®]^ KYèã5KDv%3BÒ:š|TÓ8v(iS\aû‚§È)Mņªé¿CÍ—)¯_ÒèÔöí1¾Ëý¾‚u*¬ç/lð=?]¥lI£ü]Œ+by$zàƒŒà+ãg4B!ò ]éÉ Á-ù¶÷x-ÏÝáµÜ»– ÕPUK)†fCuZJW× Å°F­fëCÃ.h™„ U@Ûa`áŽ(£cÛ)ÄD)šc*e#Q˜@‰yÊ0Rù¤Ê WÜ~<ÌRåÃY*"ëØp¯ˆ¬ª‘¶Îgõx’{+¸&)½•(s,l´°r\•máANïLä·Ša™V+¡ÚªHØvž­ª ð„;K e8Ù=]ÑPNÎV㊺ÃDã¨?œd¥¯pá•ìlp')8JÌŽ_W-òrîy—vpª^¡ùZz؉嬸ccN]ŸFi~ÅKÒäÉ9ÇÖá]þäÌq5–£rë±²—ËC­Ôh’ÃÈUšÌƒU>s1sGz ~i_*\$ñRªp(×ý‰s[íTÔNÅ)9YòØ‹ŸÌÖ’Êð¨¥Ie×Ë´+:ޝÞ\~O3ÜÓ<»CÎr•KÎåÛ½¼9¶^“Ôk’ïbMòŒ±^¡¼äÊh[Ž×µÖµz5üWÃ,éwÛð,µÛêZ;ï,{²vNó]ÆÒflAëf8a¬ëfH‰ ¡S5­Yy‹nP¢ÓS•Št°ês­žDHfæ¹~°öNkž§ÚZTZ€ê¤U#©:sÈFÉF„^±]` #¾„).Œøbf!Î0âËš™Ãˆµ† #BŠ-F\†Ì~!E8ûŠJÙÏÙWhPrPô6%äd-;}g)[(|N¤îá~ÆÐÇ“1Êç"TŠðvÏ#‘ µä‚>u}¢ž$a£žä댭 G²’òrEñUhû‘›‘ÖV'7™]«“9MŽUµŸhG©¹¿ÙµÒlp±ê•ó™+Ñ—)“¨ëë1ý\.×bòV䮥¶:OkTÀlÄe„1[YÌ@›(#H ² –!¬×zyËð}žŠýQœÅຫ£ˆ'½¼4C6Õ—Áõ'·dP!p W\9]¯ËŸ×ü¨4ŒõÆ NÓúyÆÝû›.yÛ{ë%¿OŒÙ¬Êt&Æcë¥<×’+cçv4vF]+RÈØxØ`¤Áõ¶Vc&[ÅWð>|•Ji§¾ ‹uÊÊ$Žg‘b‡Ä'½ dò¹@y±)äfá‹§T|ä÷†ÆÂ¥ß¢‡ÍÂñB…;Apë¢41@¹ºxÎ~ÙLóúéYw‘ fزLÜQ¬d;ÉX»m˜We˜Ï°ƒ‡ìéQÄ^³éŽíM‚(&ª¨5+ªÿ¯>Hm•yèE×róšØ ëU¸‰ü}ý<àt-Ÿã³É%}(ƒ‰’Æa0Ÿ=ÿ6¡I¼Cá ynL“G²úß £g4MòŸsR» ¹lâfºc×WrDû=âtE¢T\~Îß$¿öH—ˆ˜y¹*Š•‘5‰›Ånä=e¢$LbEfÝCgö÷E.1ˆ ¼¨É̈ëcÓ]9qãÙZ¢Ý9 Ç›Gø¡]õL«Ýj«3¸ž?4SC½á½¥‰¸·bEóýå))ÈRÅV#8"h¦ª ®AB#P„àòïûsßý&Ra_k 8Ô²U§"¡CÂ4[扇hü$9à•äRà—VHj'×+³zevLÞ=¶‰•ùóËlúÚ—¯}ùµ²S+têÏó §ÿm±{êÖ,š\-s²æ'³¹cD ö»RÊŠZåu§”—¶áñÞ|x0¹´¦ÙT’˜iÐ^€…P ‹:âÏâ²;z:;ÉIÜö|GíZíiÙÝû´1vj³°ÃÂð{Í~)äb;†åEf]3Ö_aØ6,/‘gÛ°”´…äAÊ˱8-.rq Z ŽÅiq¹<‹Óc6œ‹S8#Á¸8-7jÏ_F·äS•õÏÚ©^ÑRs·“ÄášG…-'ð<#ço{ß$;pïÜpŸOÊ/èé=Lž(¿˜§w;,Ï1,¿—È{µCO.ràI(+.kH¡"÷]ßÐR®.k€ŸïÅL(\'s¹Jl,ä¥Ìº¼Î4l誸ùJ^cü.}ä©pYà§2î’ ­fgÐ-?rD:ª§Óâ̤å§3Ž d¶ É¢5xÝ•ÿ޵xËIÇzâ+0ñ­¹v ÇÙÑcšñ`—Y`h0å*0²e6 [æJÉk„µp~8ì•7Ò:n&?‡—¼FÕ¸eË—º"k”SJ¸/(–®Ù®Ëšƒ¶cŽœkYšØ‘ÍCŽæž÷ e0hØ€0¤¼òGt¿*ù’>|™œù*œƒ£gì9®T8S 8Íf°r¹ ¶¶ÈÁöó’C](’ü ÆãçIS×w§ó©”ÒžÐãðGϯ‰¸Œ2O·"l½³½HØZY€#›Õ‚¹òFT œ™Q‹r3Ã)ú3:S­¼÷T ¡øÛÞä{úêõ—¿I_ó¸Ô“K$¥qàH ÑŸsá6•RëIg[¶³s錞ŠÁO}]=¿:ì‘=p–6¡ßÑã乫7—äØÇ(îçž[Ü,^Ra¿$­C[f«†>ù©FaŸv^O+¬¶ä(î÷àv>“æÄ-Ü_„”å‹0¬¤%°§^ã]ÂÞ9!c9Í!dLÿ{©þ?Jo¿\½þéÃ/_þ*}øôæõéͧß¾¹úåÓGéݧÏÒo_Þ~&œªjµñ¿T™ÿxûFj\¿M$Å‘Î3 ¤ÿ[I‘þò)òšIꙉ?Hó)QÆv›ÈšÞâþ”™$ÿS¢eYSFöKáM„m[Çs‘‹Wgy¾ Ôõç¨ø}†”„²ëðÔõ]°±[0k‹{y»C²X‚„feFæur\&Zh{5;Øà*4–܆$­‰~ã)·iFñéêHÕ|W€… ºX“òSAì:–å úâ²À•(Ûð»F»£™O‹Ë|Êæ×÷oÇý<`Âpåf¢œæo ¦Œ;nؽ»… ¥»—4cLá+!áI" 1Iô7—‘.fŠx<ÚYom)TõSC19@S·p¸¾…A²©2aÓŽñÚÒó˜inà`–¹x{ìØ£ ¹!²¼ãd+õSÓ X܉;"¿O €[‚@`Ýé ¯SŸ4¥BÄáÏ#zƒ% zº‚DœPzx—úéýÌ(7òá¹®Ÿ'Ä…y6è'¤î ¨3;ŠîƒXÃÈxp£¸/BËD4ìO=DÐj›qôÙ£0|«¾( 6W†š5 ÃïrDa úk‰Âð»wÀÎ6[¦vµ«è‹’ÓÖq•:®RÇUê¸JW©ã*ì®,=ßö#wˆúÙ3 §…0`œÜªŠ­Â}éXþÂVÀÒ’ÀüZôÎöÜ!xY3Tºs¼`º·éÍÞq0Ù_æ9·Oÿ`\4v§?BR'tµ4n9SRÒ„ÕÞ$ KÉ$1¤äP$«jlÈA¦' h gø³°…òrl{³Éβ;öÑPÐÉùùm Ù³ô±ð´¶¥«V·«qq‹ù²h{•›±é~ñ ™ÞÆï2dØ:‚ŽÊ õq+÷m¡:wjûö ßeûFp«hè(¢¸°êŠz<ºJ³c“öø» )WÈBnÔÅÍÉÏ]Šjì§¿*'`Ô+4Û<.„IPMq-FŠ«ì¬Ä†‹Öˆ'†üä:;¸ÇÎÄg4B!òSŠìîõÛÞãµ³œ‚ÿ`¹>„£š^`²=¯sÂ}A ÆZ–B(þ¶p7úòÃëÿ•>¿}óúr;2ÏO¡Z.9ImŸÛÚÞŠðBkI}*¥¼PÉs?D6þdà¡äƒ‘ízÙ—)­vtëÎfø3MJ°›ï̳‡Ü±„é"Û›ïieù³ù½NÛlV~Q†Â¿÷({èá©{†£@>—‰›!o€øçc«J“˜Ïåi4Æ}FöP `ë/ðü/9¸…±%Z.äÞ£üKô9ýësB/÷Èݾçrr¯±ÜÄKùæÖ\†8–TÓ«G39Y”rSo»mê\ÆË±†Ó?^_þr¢‘û ‚e$Ÿ¼'< øƒä¯åò†àå«(˦ ‹üyY­ô,ìëÈ_æƒUuå…<±»¨—6uì¾æv¾q³Xlì`­«]£Óí2©ÑÖ§•í?Òë„nTº"dÑ_âá'ü†²ûGdQ7²¸¿!§o‘äÁ­‹¤¤Ín¤âöh«ôÒ×´:[ ¶ø¾,ò³M¸Õ2Ív Æ®_ðâ©_õã>¶ÛCzÛrjg¾Î<ûa€»V¢ÍN/ƒ¾Áß&k§Žá¨ÎÂdùÀéÆ!=¾»ÍÆêæøá¹ô7×^¬‡û䤜‰¯$g¥#D˜¤€}Z@üÔ±F†nuZj[ÓÚ†¡·:†a´ñwo ÂÏvl7òöb·BË›M€®é†ÕÕÌNÝBûbíZˆìˆìbí­]c¶­®ÅvqÅÖ§«°Î@EÝew5à6cÍ'½´u6·_®!¾/¬³ÑPM {}F·°u6ŽÄ룟؉†<ñþ*v÷ÖJÂêö­ûyO!ÖÝìàá¾lkVÛ4;-µÛñ´xVÔÔ†\†®KQ0EÒ æc‰¨l8¥ÛØû];N™ü|öûÁ’}~2f¶†oÙ@(P~Ö“þn‡.YÊn¶r ó˜yƒÐŽQŠüËžþ9ëp§Ý¦é× ÀóÐëÑßzÍfæÜн'ð}lÏ,G#ºséSƒêRÏ´Ú-< =8v7äzõ‰š©—DÿÀ.W#²§3¬üÈ'*2ìIñ®;F9AI9ûƒùh„Â>µd=I3UU¹ÿBÞÂŽ|„ÕÅ™º@6 ‰PFýîÈ7¹¥Âᳬ¥Á@ç¾û­Ÿ¶Eê'ÅÓúž—ƒG¹ÄMá·üo‚MËpÞ`[Ã+žN>vXÆd]ÒI{ÇÒ›ÁJ—†áì, ƒÛÕ‚P5Iˆ´ÛVõ®Ö5µIÓÏ-‰KôšÞö"Ë›ý3­Ý2»ªÖî2)ÂÖ§«ðÏ€ŠÊ¾ÄûŒ×[û]5>饗x–¶uP‹ïK°A…[-Õê¨Õêï<èCz‹ÐÕnWïêz›Iw¶>]…*êf;0AÎ-½E}s#zÇ!kPžOzù@¼ºÕˆïK0;@„·É>ÀÚªgø}v`Ï\¿ÅìQè-æ c´;ÝnKÓ˜ThëÓU˜ ¢ Ûl•Y$l³Œ)lÃR,lÃŒ ¶a–·?là ŶᆠۤWÞ³`Äî誰¬1.Ð1šÂøl1.x® 2{Œ†—-FÃÊ£áBgŽÑ0£Ê¥ n ¿½àlsã°ð®©¶[úqÅhÈJŽb2Fcu,ËhYS×-ò{×hqÆhºíV·ÛU[“"l}º g ¨¨|1šŸp¿`uá]›m•^>FcmÔâûlPã…™ÚÒÔ®YÇh*ŽÑ¤ ½Ñ˜ µku[ªÞ.7ëC±G|(ÖlhFË4 SeÙœÙñ´x{VÔ bq;¤‹‹ÅUÑ—@ö>Þé´u »nï«ôäöØû}>`,Îl`ºJ,>“ m}º sTÔ ÎÈï.îŒ|} f°ð–ÖµÔc4/#4ÏzF÷…©­®ÙUë¾ÚÜgä“®évñDÀ6¢·=]‰u6USWU«ÅZÔÍOWpF~‡tqgä¡­N[eo OWa»š¥uv×b*êÖ§«P; ¢¦z÷ÅO´ì ½îöW{&½õïÜ0ð§È‹íÕí¿}¯î7FÓ‹Ç3büÎzÒY4Æþjˆ”äÞ©=k<ØS¯ñ‡~¶xµkoo¿ðôÚ-ý‘íìäJfƒ[‘ ¤Ã¥¿¼;wY~!TòïÀÀø˜( `ïánäGLÚ:ÑŽi]ª”~Ä›ã ùg7ìI×rs…MÏXF3}øZ¾öñœýLõä 2דϒ™ûmaÔ“®Â9ºöé¾î~šãß‘ý‘k?±ÚP6–Þµ/IŠD”µ—îU‘O$)én\ß3bV‰"“ ›5Sz¶HM=†‹üdxÙn#qB׉®}reë$ •k¢ ã Š'h=O¯é톊ÿ§Kg¤ìg”‡DÃÝ»ñ„ÜTó…ÜTs–T—M몠eœkK;©¤ $£u Û*ôª<3t€+qÿmÞA*R¡¤Î.'/fý;›’{’©­d¯Ê)‘žëÑh‰@f½LÁ·Î×å¬xgxÌëƒsPu Çk1¶¢n{º¦¨×E?£™<¬{¬e¢V[¥—^uÍíj'¼/!ÕN× #ªý¨‚Ø'¿i¹I¡·„Fº-Üí¶Î¶œÞút%&¦¨Â‘îɈt?Ã!R6"‡H9A‘rÈÛwˆ”Šïi`¸C¤„Lp>p=7~ÈΑîgïæ,2ÛQRNP%𓣤–i¶L(xŽ£¤¼È¬GI¹qYŽ’r‚r%åDgo†»¦mjݶ¥3jѶ§+±0E­€°g‡tq„=Uô%œEhcïvc´/÷êV«ÑRÕv§Õ5u&ÝÙútv¨¨$ í..I¨Š¾³Xx§Õ¶ÔÖñÙÊct{ìÀ¾h`’Õ°ºmMÕ[ÝÎiÇ}–gp¢&×¹³ B«"îˆñ~)ZÃæÁØBê`•0ð´ÍvØút³PQÓi¢Ø¹NYâÏ„ï^ìè#.ãÑ=F4׿ œìïr5N(Ã9®´Mw<³åý[D.¦óçžwzÇtNx†ü!ž8£ L'命`LÇî;ˆÞ' ضŽç’9Y0¸2`£o1<¨ëÏñEIî:<‘ ¡Æ.Gì=çâùëŽ%pðwÕPØ8 €3$pTTÈŽEàOà(ÐÛ H 0[éáæíÛžn„)p0££vÁ+NÚMå#…ØòôSU,îđ߹ȷ1_°ˆît†mà“¦“eh$Ñ+ÑSó‰8 ÄæÀâzöyýìBoàFž¢pŒÒÐ$.üØÈ[]HØ Ÿ„…@QgvÝ!°†‘ñàFq_„–‰hÙŸ {ˆ Õv™Î]{ðµ_{ðµ_{ðû&©(Méb7> „Úä(t€cl¨ÃæžÁÑtæ%Ѐ¥%.*¼×sg{*/OÍ÷¶¯¸$qz†ÙD\Ü~ä!DaLHÐÏ#ø «éjiÜr¦dQv“c7?ÇöFa~‚j*J\‡€Îªów%ñ°åÂsLÓ ðIa}ÑÚ=jÀâsÂûš²CÎán!í‘v$e~†ª’¬: 8ŠíÛc4|—EùÀÞ·À…mq¥-p•”ä¤=þ.H Æ…^$è=!+Ÿ%~ƒ@/ÎÅ€z›¬ðã\š€ à [wæd÷> ?£ ‘ï H˜(á˜äÞãµ]Å©q ¢ #nÛ!“Ÿ¸m?qHâ6dâ6yûˆÛ8 øˆÛŠÃ·ÙþƒcGq“¤HtÞÌøÛh"S#²§3¬ðlnœ Ün%ðYnæ„ç pãEf¥pãÆe¡pãå¤pãDg¤pã@•KAÜ~{Áÿ&Øœ è>tTµchjsD÷ÝLÒÄ~pËhw[m­ÓéêzÛ4IZ=…[»¡iØ-Ä Ì’,¼ãiñÎXQ+HüÞ!]\âw} 4¨áݶ¦Nün‹%€8"J˜}c0ñ»ÝеV·evµ.L¿Ô„#üÃ}iv»ºÕ²4&s°õé*L;PQ+¸è}‡tq½WÑ—`¦·´–®©i¹Ü>».zo7:ª¥¶,µ8r%¦=ÂÿšE{ ü2«ùvR{änÕÖÍ>)DYÓŸ+c1nõÒ/çƒu\Æ÷·MµÛnékô?§9á×Î[:XÃŽ#vô ¦™cëÓUx@E­€áo‡tq Uô%˜@„·M­m¡ðr9?·2üµ]r+Ö‡6“îl}º ;TÔ è¿wHGÿ]E_‚Ù,ÜÐ5ÕêŸx! ¬ô߸/°ii–ƨ6Ûž®ÄÀµúïÒÅÑWÑ—p& o“mÐÚ5Üô߸kº†fšm“mÇ`ëÓ•X˜¢‚:îÅNè¸ZÒ329î—ZŒÉ‘—‘É‘‰É‘ ‹ƒÉ‘ƒÉ±(“#703“#?ò~&GNL6&G^Pf&ÇbÀàÊÀÆäX”‹É‘Ÿ‰†sÃyô4& 4:ÖÎÿ¼4½è ÙTü¨¬Ikqcxéüü€}Íß³0‡(Š~FöÐs}ô…pÑTKU[=D4ß™€kàÐw.eu£8>¸SÊ"¡AW!£Qïˆ4 {à_[$D@6ó-sc1ãkÕ\kBv¦®€`Џ€V¤˜p@ŽÄ(^“ãÁÇg“YúPnÁ%P^ðçßæ7,’I²SòÜÅ×òb! m‡±Í,»S;ñªÿœ“ÚÍe“7ƒÐ»¾Øóx¢Ð¡Ñ#ÞZ$z(,#69É¢ERR¶J&€GáÖ§åË€†sÉ4s^è¬ûèD' —¹8éñwxó)ú5ITxAú7%5J¤JòÓÉ^®X·¯\¨q!NIzzþrûvÍqªºwŸœë=uƒ# ] ¾œÕ™’Q@Táß‚Ldl¯µÄ~R.Ðö¢Ô Õˆ"” )ãBwp:€V+8UÚMÛZ£äU¢*ªÏø¾O4ªöŒí¹*Œ*ÑAÊíò²Ì4BœhŒÆJÊtTíB9*×Kwv/Ž(–ó(C7¬×Úy-צ׭U1M‘U_ºí×|Nã}Òùw/ɼeVæ×ÿí_~ú¹ÿñõ¯o«2pw¶7GïÂ`*t|3çþg4ªT*ß‚äÓšgSYv¢A5 ²‚,*‘´¨ãÊeÜTb¯+vRS©õNÎéíä[‚’s UÍXb8½–éêÍå÷·‹tYpi¶M†ÞnY†ù´ ûGÊæ÷ G ˆ„" 98Áœ 9Á‹ó—Ùmù…Eåý·vޱ*é!²‡Ÿ|š#Fx|ÄkŽP 7ÂÐÅ”ûFL/g‘Òt♆±ã*.:™ Äšv±¶kç^ì0Mé¤ö­%AFfVÝ_ƒ!©½¡«âMPZ¿@G%J—Iœu8c  h:‹~vô¼*|™Óígy«h%v¡ÈÅ_bGiUZ¸î‘ÕšÈz êMŠëÔNNÇS™Äx¶à¨°å„CƒA²2IUˆQßö<&ênà`Fïñf¡½`Ç”n´¼ãäzQÔOm`q'îpˆüþ(»Å:ˆbXDw:Caø¤)hžI$Ñ+^ÑSÞHÄù¥:(.ÍÇígÄ À¼¼C|è¬,ÜÞ (ïP1éy‡ø\/À¾`ITço‰â¼Cü²˜y‡Š@áâ—ÃÊ;ThÁÃ;T<þÍÏ™Q"ÌCAq Ë1hEbä*¦H\¼CÅ»—‹w¨”NÞ¡b _ü¼C×LVÞ¡‚&N”G±¾Î‰„ߪ´¡‰µ¨–w¨dahôÅÄ EX!îÜócq'½ÏÍ’Eçç’[’w°Åy‡€ Ï;T0Þ! ²”âoå*ŒKð"<ïÜV*3ïPy‘L§ÕOé0'÷iuøSœûy‡Ná¥`ÅãIæ=íÃÄ̼CðšxðÃt/ý 8ï€Óâž=9ésÛçõêl¯ÇÉÈ;#í¼CPíÅÀ;%Š™wFà>Þ!¨jíå‚ÄÀ;U#Þ¡M¢¸rí¶^i†q;FçÙõiûYŠ6¿(ˆ¥F'KT ÷²ÁÚÏRTZÎØj²•/ KQyi< uy.¡Hr+œ`΄Á ±ƒOúB\ŸˆU3&WÈŒ³d×MSÅYŠŽÞÂ3°’yãf)‚ÍÌR&29qÏÈR*uÑÍ}§5áåd)-È¢I‹SŸ)ë¦2³Á9©L,EåÅ1íûœ’ýæÞ÷Ÿ:XYŠà$ó°Õ£½^š½ˆ¥ÙùËì6vÞ!ýÇÁ;'ƒwèû^Ô AdlËóÍÈ;tÌa0Þ¡òEa£X€ ”ðñÁÈ„å*]¦E|cbæ\€UàËœn0óÁŒQhÞ¡ò£´Ú0 ïÐwª‰Ex‡NkRâ:æ™IŒg Ž [N84$ @=ö#wˆúölæ1‘TpÀ‡0`œä`ʳE{iØqs逥%Ä:¬\-ì¨w¶çÁËš¡öIÞU‹}oSÞýžlßðd²ÚÁ¹vy-$¤c¡×,ÙgÌØTHëhòQMãØ¡¤Mq…í ž"§4ÿªjý5_¦4}Ikh@ SÛ·Çhø.#胋ÂzþÂßóÑUJ•‘4ÊßIÁ¸"–Gò¨>ð7Èî±2~F#"ßA‘ЕžÜØ’o{×òÜ^˽kYÕ»¦ÞjuÛèʙ¶âtºf»ÓQ˲®å-“"Á£ h;¬,Äetl;#˜(eÀBsÔ`B¥ld (1Ï&P #™T¹áŠÛ‡Vª|8«QEdî‹U5ÒÖɬOro×$å¶eŽ…V‚«²í#Ü!ÈÉb#½‚‰üV1,Ój%<[ ÛN²UUžg=.ê=]@ˆ1‘q¤ajª©nÈ`LJµÆ¸µ³l›xÕ…:ÏD?©÷œd¥þ±pá¬fN6I N…³ã×U‹¼œ{Þe€Ýá‡j…³;H‰™­¸ccN]ŸÆt~Å ÒäÉ9ÇÖá]þäÌq5–£rë±²—ËC­Ôh’ÃÈUšÌƒU>sHsGz ~i_*\$ñR*è«'þDŽä­v*j§â”œŠ,yìÅOfkIexÔÒ¤²ëeÚÇWo.¿§îižÝ!g¹Ê%çòí^Þ[¯Iê5Éw±&yFÜX¯P^ò å´-G[ëZ½®WÃÏVÃi¾K½†”˜P4½øé}©D9î¦ïÉÌn=ÄÌ\.ü`íÖ´tIïbNQ„(ŠDZ‚SþUÍJObÕ |ƒ/y3¤úØs×&‰—ÅW5ÛĘºÙ6ô§Ûß ›ß+|Dow³báküf»ý#0Ñâ¥TІO÷N׸à¬Xk²X 8a¬VR"›Õ€ÚÔv øŠ4Åö%ÛfÚz`–îW;$«ŸF^¢XÒW¿dÀN ~¢z#T‚8t1È𨰈À˜r×å–?Íÿúòå}‘„2°¬`¶âÞ“c ¦äAˆJ·r‚‰ìX›Y„á_H†¿¢Rö3ü”„>…M 9‡ËNöYÊ Ÿ©{¸Ÿ_ôñdŒò¹•",ßóH¨BeTcŸ™)öʈ[UŸºúRÇ•PœQÇõuÖŠUø­•ø«”4,НBÛÜŒQ·:¹Éd^Ìirn©a!²£Ôº —µ² âe11þ•sÑ+Ñ—)Í©ëë1]ézLŸÚ²[˜  è]1Ã÷yæw.9ç÷PwV"%¹Cb¸V¥ï7HB–>d×#&ìÉ(T²¯N/TÍÙ!xzçWb¥ªŠÇ˜*d<é¦î, jX¦ jù¤F3;íÌä@—#a‘!˜”çW|=PX¾É®? íÆ½;“æ­óÙM4_ˆÈ÷ ½Ì®cÐç.­˽/u\gÖ2§U†Ùî¶ ÆVÚÄÎph]«­¨êHWŒvËVlͶ•N«ÓB£–=êh­òM 0EÊ,ÔÍìÕß~S…¥ª`]¸tb40È·PhPEÎß96°°Gƒmß&šù"–wÆy{ïå »úKxdæh¥Œ×„ð)×¥ E<éåí²©þ» î¨?¹Žƒ 9Æ}?9]©ËŸ×ü¨4Œõj NÓ#jßmãmr{¯×äã‰1›U™ÎÄxl½ƒçþs%nëíØ´âñPH!`ãaƒÔÛZ™üE_ÁûPÐFhÊoõUÜö‚2‰ãY¤ØÃ!ñI/(ë†,p;ƒ$á(ä ã‹§,~ä÷†ÆÂ¥ß¢‡ÍÂñB…;Apë¢4§@¹ºxNœÙLSúé1y‘ fÈ&î(V²@ФÆ]ˆ6Ì«2ÌgØÁCöô‚(b¯ÙôÇö&AUÔšÕˆW¤¶Ê<ô¢‹Çk¹yMlÐõ*ÜDþ¾~pº–ÏñÇÙä’>”‹ÁDÉã0˜Ïž›Ð$Þ¡p<7¦y' YHýo…Ñ3>(ùÏ9©]Ð\6q3ݱë+9Žþqº"Q*.?'Š’_{¤KD Ì<‹\E‰ÊX¡ÄÍ ‚7–ŸRQ¶'^‘iá¶W,]kwŒ§¥d,`Ö™t~ÜÝ›åð´FŽëˆp\ɤ-„ ߈i™ƒ¼¨¸­TErsˆîš¹¯/ËÕ•biµÞaOA”Ü'ôD_…iޣؔg$yF-zøî_R.Ðíó—ÓwkqU½·žó{r6U€B‰Q$¦er©.“Åžþ9ëãÕ@m”‰ ¼Ê̈ëcÓ%9qãÙÊ£Ý9 Ç›Gø¡]5õL«Ýj«ƒ_6S+ĸ7^¶}¸·bE£ýåN)ÈRÅV#8"h¦ª ®ABÙ#P„àòïûsßý&Ra„k²Ÿ)¯S‘P²ZC>©µÃ³H µ$ga¢±’\=Ì(YþDq™+“ÔN®—eõ²ì˜\{l+sæ—9ûµ#_;òk›0dSWèÔŸçþé¦ÂòFîU ü)Ñ*.8äs U­4p¹ž.þv7ùžV`6ï4TÕÒ ­­j EÝñô±Òk<—à¡-]ÙÑ­ô%~Âo°¨W¯¿üMúš÷˜¤ž”˜~) êGRˆþœ£kª4xâ‰I”ÃRJI,o¤ÿXþðIÿ·¯ÚµÔiLÚbfÇ“žÔ fqÓNÚ¨ŠÖ²BšäѨ9µ]¿ñ0õzšjlÑ»*:Tï:­Ë×Gaˆ…?ʺCžÜ“]Èç29o°Áè`O@ö‚1e#¦¿`­_õãþÄö‡tKžFcüí×™g? ‚àV¢íN÷Ønð·IxduhžLv—¬<¾»%ßlçT=—þæúËu T9)g2ýÉYéúß'ìÓâ§þøkC·:-µ­imÃÐ[Ã0Úø»·ágrÀ1 PÈå4Z^lî¦VKU5M³Ø4jÛÓ•˜˜¢î2_’Æ”FA¸Ý<ùá“^Ô< ìõÓ¾NÌÄv#!¼KáŒÞjwºíÚH5[õz‹U0,UïZF ¨_Rœ´>coV  '–Fa0•Û™à•‡¼âh”‰>§}NìšÜ#çhÎåä ‘Ü´gnóNk.{†°f=队֮Èî¹üw’Ó?^_þr¢‘û ‚e$Ÿ¼'ÜMøƒä¯åÙwzô]þ¸G9>/«%/é ä/yJz9EÉúo{Q/mzŠ÷kiç7‹Íl¶µ–¡Y¦Ád#¶>]…Ù*êF³ÿ†Ðh")ae“’6Ûaê÷šý­ÒK{…­ÎV{/¾/Áì=ÞÑ­¶y„ö¹žBŽÃÞ·Œv·ÕÖ:®®·Mÿ£3ØûM ½ÅÆ›]<‡X]­Å¦6Ûž®ÄÀu— X›³ù2=ûµÍ"ðI/mÌí.Ÿø¾„3Xx«µQ›¡&`—Bo±–Ñmwt­£3iÑÖ§«°@EMMÂ{?i¯7Ùé é­ç†O6 ö{œâÓ8cpÛ“¾.IOo¤‹¥ÜM/ψžõ¤³gwh"qãýlñŠ<ÿ¸Õ4ìžlA'ÝÒÙN¼kg› .ÙWÈie?suû«ƒGBrùÉ0`|Ù­°÷°Áó#&mý,Ü$­yslvCÂ$ÛœGaÓs–‘å]\Ë×>6žˆÍ%O£K>KLèÛ0 ¨'‘=¡kŸ;Q»óÓøÎö"üiL6iZaïÚ—$E"ÊÚ“’ã…äIJº×÷ŒÌxD‘Ÿ‘âœ-’GSÓ`‘Ÿ Xå Ù'¶×u¢kßžÑíz°aM8ÉÔCñÍ£§ C‹@ÓÛ ÿO;;—ÎHéÏh*¡†ÿºwã Ù™üBõÏ’ÊãÒÉ`µÔ¤ìꥥ¥‚TS†‹ ªê¶3ŠdêÝvFñ¾;®ƒëm¤ò(åJêìrjñ‚`Ö¿³éÍ‚dr+Ù«²ƒ Æ‘!0"æ¨$-ȼ·ùŒjnÆ.§`Å;³À›`~ØêÁ2 ]3;Ý.[Q·=]‰‹ SÔÛŒ[{­[¥—^´vÍíj'¼/AÕÎ4»¦z¤‹ÖcÚ§PMµÓ6»mUïj]SkéÇ>ÃÞ„…ÍO«Õ6;lj³íéJLLQÙãVŸñxü w V.ÞË­ÒK›ËÚn„÷%œ À»ºÙUt«òˆâVVDzŒ–Õ1uÝ"¿wVѸUN¡·˜ƒŽÑѺ­–¡úVerŒ£¹kºÊʤ8;v*w4·M¹ÄÙþü¶MÊNWëh-jȸkÓ{•žìF¢W³’°nCoèÐÄú£›û²ÛP5ÃPµ¶Î²ÆØñ´øY¬¨|³ö~ïOzùY{ÛnS} 4k'–Þn5!]a³v„üè˜wMÓ[-³Û1;ŽÕîhZW+3kË›í€nh–n¦C¹ïçz 5Jò±„¾¹ä=jÏyªÒgÁ¶ÖºC¤XL#fëÓíIßh7eFï>=9—¶V÷ùlöJÚ>Ø€jÉ÷ôå‡×ÿ+}~ûæõ%[ü£™Ý*=­-7ïˆÍ|rî­vú.´–$Ñ§Òæ…Jž˜ûxáOp¿&Œl×˾¤/[ÓèÖÍðgš”"à™Û™g¹c?Ó?„ëà.Ðõ¶jémF)[ž^×ôŸÑ`>–°#ƒ•ô€Ú R3Ú™Æ4ˆ•å”ñ•D‹%½¡µæÍÖ©wl¶ã%‘«E¥ ‰2­e“~ú߆3—0Ǻ&MÂ%%EÈ u3°È¯gt³1šØáRäêȳ2uKѤÙC< üL<|d'T—ns’ÁH6;›És­†¦7#<(ØjßÚcìø¤@EI¯Ú!âóåÈûZì—Õ³QoCå¾.Q0ô 9ó˜’- 3pýæSí+!/í‹»dQ‚E‘æn˜ÒÄ“<ÇCב¤–„±s‰bàÿ¿’¾¾óFÒ´†ÙPéwx"îJ?|Æ:õW;N¿PôW7ÒOJžvé«%þÃõÿ°× Ü0JàaM#[;éj;Óß~ ßHòÖ+ÙQ~h–¡8&ræ3Éõ‰Ó„RrT ¸Ö}l¡É†¬èƒ¹ëaÉ»°2µö=7б1w<×Çz2³Ã¤ÁÈ‚LŸ±™ m\¥fâ6wLÍü$±aÕ¾}ÖX¶[“+"=ã₹CÉb\®ˆüsG}Ò[?¼’È©„`X¼ÛœÐ}·µ·çqð½Ö0C¤%’–n¦„ÛcÍç,}‰·çáÛqe‰’P”DNâàOÝÔ2®!vRÜX$¬.?ep/u¢WO-Tã¶õÉÒ“´íí|€B—5jЃUÙwò‰ŒÄBîu²/ûùÏVPMUUsmi©Z|-¾ø±=o€Ýôtä/M_0’H¡Ò÷Î¥;{ŽÉáóù›©³ÓÌ$evª1{/½}ÿ­?t#âfÃÕ Ÿšæ¸¿¬Èò—œØ2µúBkÕ:Ë!Ÿ“ùïI¶Gù)¤‰}‡ÿÈP²wâÓ®:„hJc{‡øˆx>âDó=M"n?}úô·žÔÒÛꦦ·,ch›Cë =nôvg(•‰¸íñ]‚„¿Èö$;ÏÉñü¨wWfd ±¹¯) ëv&¤®=©ø"5½A·WOŠ¢Iaœr‚)ê'.-†š‹»D±=Æ=öÃVÛ³óâëù¥ïH°èÊ–©ú‡69K䪯òïš´¤ ej2 Â[\ ³0€&‘¹…, ^Ô`æ·°kAbö4@ʺq››$T‘õR8Mö€<O”ÞV¾½J¼ZS6¯Òaö‚àv>«À_Þ˜3öXŒ+Œ .ˤêÉÙ•]ì£ìxskÞ'.[ˆÆn'lm¤3n‡¾ã®0N,º‰ApŽG£§ITÀƒÐý— ì ÂjONAÈÄæ 0vG¤]tŸaÓqçW\s`àYr ZˆW! ‘*a?/ˆ¡k8u)wŠP³àb7 ùD ´v¤WûÀk‡á0½–Z£ƒ!t7ÝÈ ð¢éwä÷ä JìR4p7R¶ÏPÀ¬ÑXn• 3È"$Wg(™^È*9 Í"9]HÁþsĶdì©ÅbZ:»LxŒ¦3r'°ð9èbT$ê6²•m²(Ȧp¦›œ3xÒb‹±WÎ5ˆ'¥Lm·R;§Æ[HÉgS:) Bvî:¸38¾BJ.¨¹± Fx.¤Èi#'çÏ5;^$¡pb*& e óñé4šº¶ ¡£žÜ§ ViÇwñ?Î6¸Ã>R¶¼¼uxT Ò›©íL\_ÌðJ±³ŸâœÇià»XOˆÒÃÏ9pÁe'Á ^ì‹D"}ÈÆnl–Ž¥KÌ´o{b&õ1ž*˜ÖÅHHW­ØGNì„HÃ;!ÝkDCº¿Ò»ñd>€2³§t¡ñM%½`Åv4f­g‘(hw,$d•\l’Ýw#d%„Bì\5†è6ž)ΗÂ*âÜ&Ób*JLŸÞO°ÇF÷΄ø:tÛ»ƒs'žö8Acj€lQÆs‚¼© K¶ÜQSš-;Öriø›R‹rõ{JÄUl!zW}ãNõ%öÜb) ¦HÒ,³Ü¶õž}wé—Ûvª·VKk=#èeß×nïjÍÍàw–w‚%SÉÍݽÿ»„åvH·C—¤9íñ¸ÙyÍ!ä Bw8F=(ò/{úç¬OY:ÓQ <½ý­×\žÉ»zgglѳœO¨.õL«Ýʲ›éÙ›\Š;Ly‘OTbØ“â]—Ùq‚’Å]0Pا´=I3UU¹ÿBÞju (ø„5SȆ!wF}쿹ä4 ¥Âᳬ¥Á@ç¾û­Ÿ¶Ej€ãé =9¦‰‡Ÿ\ â¦ðÛ þ7ùžÞ8yFÁM“bhÔ²à<¼­–eïg³Žhjå:´Å¦w¤Ý OwŒ+Œ í:`«éŸsâ¼Mê¿&.S¢ÓÙ‚‹ârZ»(7nx|Ñ“‰›À0 *ºö²Ç@Ò õéêq1>½žÇ¼: ¾=0M/|. 'dú”8/yí_Æ]šAèŽ]_¡O+ôéz”o&ȹ¥7ö¡on'ÙÃl÷”U< ZjÁ¦­6A{gAþ{9”Þ~¹zýÓ‡_¾üUúðéÍëÒ›O?¾}sõ˧һOŸ¥ß¾¼ýŒ—hxhµñ¿T™ÿxûF¢ 'ÑDRé 9“@ú¿•é/‘"¡™¤ž‰‘øƒ4Ÿh·‰¬é-îI™Iò?%Z–Í©éx %ýS^>¿÷áeBþ]ÑÚ–©kV»¥7ð¿»f×TtÝÐ˲Úiu»] OÊCa¹_¾T W Ýq4ÚŸpϤ|J”: „ˆ`™kÖª™tOV2³_þvÅгôE×ÚšªÝø_Ý|=ÿ5ø¶tõ R;š”´Ìö¼ßûе}6b nXiþŸß@+ ¾5Ä۲ͬQ/¡fáTRF’ÂvùRsˆîšþÜó$ýÇ¿hõââÎÜþ(˜Ó›(3¨ƒXÍõïRÂ5ç;1š};Ãxó˼Eôõ9èÓ•iœÀŽÝ'ÇRaKëx.aÅ Þ¼$ ¾vä"oØ™»ƒ&hÞAOÜáù}*!‚-51$°ˆî”œ? |¼´Yåª Â'ɰèÙ-p_Ò ÂPçƒôÔ'li={€<1:·¼vkçýép²v\ %%H/°]@HêîƒpŒJŒ÷E(ˆF ­ ²‡(îdˆ§ºˆzg{îX2i—øÞvch ’èÂÐM½!àâö©kŠqMHÐØ"ü†Õtµ4îâ ¡ÏìRäà œïùw^âž›JùÐÒcåtÞè§ó.í¯ÿO»¿úÇÇ Þ:±3ýøçjr‹¦ß.?¿.dÁ‹£v n„-Ðι=74¹u7"·î’‚?CVµßª‘Â&9nÃw™{ú n¬T1nž/¢+zSˆ$'mów’0vO@[%zàšúLFãÿ³÷öÍmãȺø¹ÿ°XuÖ“sM‰Ô»t®·n6“™ÚLâJ2ûÛ3±K‡"!‰cŠÔ”¯K÷³ÿ¾È”õ†”ä0³;ËÔÓ Ðh4ÝüòRž€ñ`Ã=.¥ˆ“ôô ¤-rü¯ >’1 ˆ—¬yòÄ•2ú‰!<Þ¨ ǾQ7ªÞè·ÍfO3[¶­µ:#]‘¶©Y½~»ÛëéV§Ó¹Q—ò´¤¸OS¾þ%Éè mN×¥ø‚ìsâ½¾þEû™xI¹– Cœ”‚¥«Êos¶¿!†¹Õ;—uTqxÃ|‹èn”™(/:‘(ІÙ=3Wj‰¤Œ™°òÌ×.iדiÜž[Ìð…Ò…ŠÉäY§¾‡YŸmÀsÚÖj7Ú:æü˺Üh‘^×´µF§ÑÑZ=ÃÐL[ojm³ßh[Äè5ÚÆimQÓu™.sæŸ ¢9Örn©òé•‹¼É`ÇL ë vmzÙãMÐN;é‘/ñBvOS?ÆGKŸ˜Ó‡¦f¨Œñ”äìJ±„%Ú°'X:%ý‹)ž?týIrô|¡|¥ß çÄrèŽÖŽõ¢©²[ù¨ÑžçÌÜÅÎqUt=™ýS⋼N®Þ¥±%«hÛ(+{K€x›øk‘ŒÅ]:F«ÑÞ(”ä7n™ß(.ñˆùÍZ£©·ZíŽÖhtšZ¿Ól7{†ÞnµŒFï`‚ãÞo_ÉjÒ+”‘?IöŠ/1±ñ@–·ÞÉi´û ô[ä4îÕ‡õd7¬7ÅËgÜßx¹]!ßpá$3žÚ[‰&2kÑLÆ=/ÏdÜ N=<ŒÆŸzx šzÈM=„R!ÀsâÙCº1˜â RÙ}@Æ$J@a‰’``te$JÂ@oA†¾7$AàÈCg“±ã9hsWðÔ†ý”cßÒaJà=:¡îŽÇ#6'&l‰‚´DSe—ºÈCêF Ï‘˜: ÌÇßc?ˆ‡ ³½“äÜŽ ySù¡ÁÉÕhÎäj~D±äj1|Þäj~ôÔÊa"‚«ùq7«Ág$˜a³GÄÅŸy«‹ ÈÄæG…dbPÁ™Øl ÍÄæ‡ÎNà†•ƒU9X/ÄÁJ.®†ŽM½¬t‰8eÂÀBŒXRÑ€Ís4^]ù‚ÛZHí?jV»!•¿"„›»" «bóU„A¹+B¸p‹™’‚&DMîlfsqÁ‘êw/=æF 7JXI“ûN®©–%u³Ó¸Uу؞h3ŽÊÀO´û?ïg-+¾ŽÝ" Š4·ßQMÏó#3 8 Žzž¯¼žÝ[¡eWóhsª³iZ(ýp\–~`EKq*z»SH5h Ýb‰Ûá Q)‡«wÀÈiÊþ³ý¸¼”!IÚ('C– ÙVµ*K)ÍÁG•3üá“~Éý¸|‡­ðqýÎ^ÎS¸#Öu¨ˆÈ‘/xY×^f¶ªn8̶úfâ:héåFvxëÖÊc^Ç›md »+c¿#F0.«ñœWÄ2Ö)`e±µª&iM—[8yJ â¹Xu`cAýF\¯|ñaæ[Ü“9)üBõ‰üR€µ‰§³ ×$ÂÃëŃká±êÏxún¯=ì4±æVÚÅVsd´FݶÖ#ãíâ~W;M­m´ú­Æx¬«Q¼‹–'5¬BÀ8!àb'Âßø&ìiþ"¶¾Ëqô- ±u áýŸXMŒëûw‹¹²ˆ‹eÊNÏ^Cvç û}íÁœ¹µ?ŠÆ%6çZ+$é €Ó(‘»>õ¹V­ÑlÑ¥Yk4z=­Ólv[Ín»Ûoµú݃rû¾|%©A¯vu·Ýî÷ôçÎ;xÛ¿]Ú Ù+þtªéö6«šn8t#ýß‹±@5Ý>åÓq¿(^1ÝÞ¶Kí”9RB)݉½”h%Ýþ-¤ÛóðBº½`àBºÃhü…t± …t\€ÐB:(  +¤!sÒñc é °B:00º2 é` ðB:~|þõ>AUšÚÇÊë´Š.ëö¿w¸ÉÎÅ{'òïˆ{‡| Ï£SØ…ä;$Îó#ºqyYÍluÛbUMììh`1úƒX‘iQ)a5ÁÏn‚£¡á´k‰æË‚*éÁÀ€Jz~lþJz~Lp%=?4¸’ÍYIÏ(VI/†Ï[IÏÎ[I@UÒóã TÒóƒÃ*éùqñ稒 ¨¤çG…TÒPÁ•ôl ­¤ç‡¨¤¯",U„¥Š°T–*ÂREXªKa©",U„å´",BT:x>* ˆJ‡B¥Ã ¡ÒáG…PéÀQù©tø±¹©t`0* 6•”›J‡ ·˜Q*hB€T:‡ñø¨tøŽJÝJKÚBão!d.° nRèZhßÕçC·äZæUm­p« –B!¶KË.àÊW~ÕvUž©'µnsÌ4~ÇZ A3 AB %Š&\h/*åp¡½˜³Ï_h/îñƒa„ÅðÙ3Îì5}ǯq]ý+Ü|T }Eû( ¿"wrc PÀe)à"„y]`az¤[Ü#Heüj½ ¹ÛÀÂO šM½ßî´ZºÖ³ ¢µLÒÒú–­™]½k†¥wŒfññC˜<ÈÇ*¸'y’‚­²On1O½¤EU%“`¾¼œ¨)z´ô¤¢‘ç3åsœÉ”{œqñúä®ïö[Ín£½Qè ¶ÛaD¦-7Ò²0J±vˆËø&ìiÆßÏv¼ÉF è[¥ÂèvmÅõ{ÅK¢Ÿà—‰E?!.ñ¨ôý~«Õiif£E_ªÝ§f±Ýé·{vƒbÏ·¯d5éʈYíO‡S‚[o‹pJDÃîÂêÏÚBœ{ôD*!>C Jìk¼Ü®o¸°h%Në­Äy%ö޵(±Ä^¿J,qÐEKFã'–àr?!ÄÜ^1„X –€Ãˆ%@ÈœÄü˜b (ŒX Œ® b (œX‚Ÿ¿ì´•E;°…IÝ’³‘îâ0¥ð¦‚PŸ*´BtÜlâ×rÎ-í{ËŸÍ}åÛÁ7°ä œ«`‰ž3‰þxk‹6hþúÁŠÚ”%î› ”Ê€d 6We±ÉÍ,¸}E„÷d•‚g†½è±MƒÆ£±àN`Eîh“Tšm+pY,Ü‚Z¨Í 1€ŒüØüŒü˜`F~h0#š“‘€QŒ‘@ Ÿ—‘€—‘€b$àÇ`$à‡1ðãâÏ #ÀHÀ a$ ‚ Ø:ÊHÀ-ÀHPmÍ«­yµ5¯¶æÕÖ¼ÚšW[ójkþ‚¶æB¥ìx¾Rv ¨”RÊÎ )eçG…”²ÃQùKÙù±¹KÙa°Rv 6_);”»” ·˜Q*hB€¥ì‡ñøJÙùRJ݃ÉÝ{áï¹ í0‡¨¼ŠvÄv!V´—¾|sV´7W1#Š!vÀä(Ûó#l˼ÇÒ:­¨ƒð÷Èå‡â„i D¥¦5Û_ñÓáÃi „ÅðÒP˜Ö@ªx>Zá&Hz3úV#ÃeêÆþU®táéRWºÜ§õN–"¢£Â»èš‡_ã)0¼ÜÝ4Ívk£hs_sv|åpsr±Ì½°Äm¯8㇀,(㇀ˆÝŒͽŒ0Q8Cz‹óî!bøvÆ^ ?eüè“F{d¶úZÓh÷µ–>îk½V¬5G–eífGïu‹Âäá<‚ô@ñ£'€0è‘Sa5<‰1ã<:âTÜäÞB—/ðMØÓ[‰~&Ñ3–€7q²Ò¯æ\yëÝ;ïͨ³¼/ú–‰z]Á®mÍ~bUÊ®ïß-æÊ"._>(;=™¢O4Vž¬%9\3s^{0gní†b†qù³ð€o©€üÁ¡‚¯/ØL¹(;…_,_ª˜<¬«jV–=6SÁ„æzq¸ÊúZµo9$Þ=Ø–°Õà›#œ#àĦÚQc½+F|D—ÑÿèåFMÊœQ§•éñzã¹þä¹'.{ÂñÆ>ûljz¶KÞ²b¼p |äÆQå¼{{O§ÿßôߨAâTmæ~…ƒOQ4…)ë@ é³$bŸ(J2Üô}/ØLgŠ×çuãb™<šÈ ,ö'ÃK©lv.8Vx㱓ò”Ll]ô<ðÙ±Y„Ïų‰ÅÄnM§ÿ—Êk;ý°§÷ úÓW'š²@þ'È¿H^¶ qp‰ï¬l4¦’rÆ5ó,v;i†{ÃW†ï·å+ŽR ¡ Î®ºæÎ‡÷f6`K[ÁQ…”øs Å-BYõV®ó®õZý®œÊÉÜõÖýÊ=ŒSGt*ûíåT–@´_¼N,€L$N¬ȉծõûÍN¯ÝÐÍnK3ú¾Ñê¶{ݦÞ?̉µ÷ÛW²šô eDެö'Éů·E8±ì:ŒÃæœX{õ‰U`† sbío¼Ü®o¸p8±Ní­D9±Œµ 'Ö¹D„NÜqçåæâtßù¹¹`ç5œtL@Ð{ Q*(7 ™› €ÉÏÍqsÁÑ•Ÿ›  ææàsWáÛ*|[…o«ðí9†oÑl%„ÂÌOáÀæ¦ð`B)|ÐP 4…QˆÂGŸ“žT„‰¡ðàÂ)|à xb2"/…–ŸÂ€  ð B)| Ø:Há#á9¬<øÊƒ¯<øÊƒ¯x<Rå±Ócå¢%‚¾=T>P^Ï”_99TÚ¿#4LŸÁ{°È „$° †ã dK9H’GN+§QµQ€™ øx°Í ï¯ÿ’&áøéI uÿÕ#ÁG2&ñ’K¡åˆ’>؉I<Þ°æup£ê~»Ñlö4³eÛZ«3Òµi›šÕë·»½žnu:u)G#zµbz†Šˆ<˜ à²ÀÌp»™ Ú¿«h¢p†ôiíF_ÁDï [!IÙ´wgâžì"ì«ß7°UŸ# Ä–qPѰ¦´<‹ÂKÜ;˜CzXs+íâqßÒÛc³£õ]kõG ­ß6›´³[±Ñ™Ýްƒ+„ þmÑo+c²ã:/Æ©_ÚÛœ-…μ­ué±ìS¡9›À;Š;¶>‹ÙNä‚“½â¥Ô<‰KmºÊ$h#ÿ,C•.¬£­‚.?®L/&º´£žˆanK+#Œ$˜ë ä ŽE8'þ8ÌB7ÜáÄ´]Ç#ŸXº~|þÚÑuä^H|ØÏÀ tè{‡Í²¿;aäïœYœpe`¿BVq çÔtÆ"³ïxˆÝ ÞQ#$—µ.doÜö;;ô #V.5y£HíWºóO¸¤©ÒÌüöiLb+ÚÖÿC–Q1¿yæ½é¸&ÝdÂÎâ4>»DéãÚ€œ´fæ2e˜8éÉ ®\ûY¶ }Ê dÁñ§{KŽ\µ¨ÛèFíN4±¥6öˆF_Rþ„Ë Å[Û/™Ô pªå]oN³$ôñEª”X’6¢y¨™¶Íœá«AOï5%Α'¹‘jloU'‘U§?±ŠîéÜa¯YATZ+îÈÃöF°A°|ÿÎ!ZH¬€D;­HÚ—LÖ“Cúß9úÃäçRæÏ‰Nq¤e[E3!÷¿*kxjËbN=Gbή˜ÂêõU@–©¬Q/¹?M{{m¸áÕãZ¿a6íæiƒÍ~¾ÙÜbߨ—ôãl1KÊmøÃ丄~ó·ùD–äÁ{Œ’ç&$ºQ—KUj?ÜÊífÕ™™‰Wý炽­__uyÝœ‰ãi>»\D‹§Æ >ë—=V›œdÙ"㊄R€GéÖ—åk?ç²eæ²ÑÙðÅ *]æò¬çß½ï.fäWfãOTÿ®…”`æï’Ñìêz»óü-èû_›;{QŸ»ûçþv¸"z»{Ÿ›"æÂ¨Ò[´”ÿÒåtëÌ>¶knVÙ£›zu©óWÂ@J•p+ ]¢ÊU<®½ÊЪšfÎþœ©KZ‚«LõÃ3óäxÔ$ÑeÞ#VTÛp{ká½U³’+Ûj±ë>hwºÍ,ÿÊ®§Ö3«¿â¢ÐrD1þ‚ᜎo—!°£—óZÁÈ*CÑÖõ’Þˆãö:Q%½Ïäëpá9ßʶQ]–†¥è`§ÙkÉ]¡o徯æö:Ž'$u1áDŽ=!\ÛêâmÉ< D¨liÍùº³tþ•îÀÆT ší•óZ9¯…¢à1aËÛõ¥‡„õÍŠ÷³¶ðÉå“/ƼeVæ×ÿ^øqøþõ¯oË2p÷¦» ?þAª#4‘Oô×s£J ÷À—¿y¶”eéµXJkȲIËs_)ë¦2{]²“šJ•-ŽëÜçœì7øÜé`„ZI–CY+•ù–3˜}~s­V³½Úš½ø­Ù > ʶ ¥ßZcYÒbÚ¼¸F, ¤ÚÔ—ÚnIÆ6KDzäo½Ï4Œ?K\tbßåšöÇrÜ@ž³xÉ«ezP\Æî0{Ý_}›½}«¡—œIÞïý©$?,« àfŒi6~t‚Ã섈°|™ókE©[Š]a½“;KË dY¥‰¼yM/hQ\_ Ö<(­Ìl®å¹y¶è¨¸íÄCÃAB²ЫÁÀ€«ù±ù¯jäÇ_ÕÈ ¾ªÍyU#?¢ØUbø¼W5ò£ó^Õ@]ÕÈ+pU#?8ìªF~\ü¹ºª ¸ª‘rU#|U#[B'@¯j䇸ª´ôŠðÁ@y‡`è¼,`ï•wHLº ïÌõB žBuxOˆóÁeqó‰@‹ðÁåðò m ¼Câño8gF0?Å)lÇÀ=Ä*w=ßê÷»Í»ÙгmÇEe)‘j7€¥¨ K‘¬%~óXŠÀr›ÍÍR$Ö!¼þ‡¸¶ð± ã ZÜbòJf)*ØXœ%B¸˳° <Οø$àg)*4Öò¦wq@a)Iq˜¥InA–"ÄVˆ³!5Ÿ¥©a(,EHm)ÄR„ÞgÊRT¸nåv³–¢âm³É•­Ž¢ÔÒ%ˆe«ã‰æg)B‘¹<ëùÉ?'ýÛL=ææÂ×Ä¢©w8zzùrÇÀ;„?º€L•s08ÒÐ%*àËÙqòáH;&ïVqða‰âæÂxˆwëµòáâàÂz#Þ!Q%½ïŽ0 ¥èàAÞ¡óÛ0 óám”yx‡ª­ Rq«–¯ ¤r^+çõP\ûpu-Ò2%Î;tòžƒwèœÌ˜wO47ïšÈ$ãž“wUê–”ÍCÙšøòy‡P²,EÒ²Š+qS¹y‡ðœT.Þ¡ê$çOrxy‡ð$Cx‡ªÙ^mÍ^ÄÖìññI?ïžtïÐ÷½©—‚,ÉØçÍÉ;tÊa4Þ¡âMá£XÀ ”Àx‡pdâònÓ² nƘ¸y‡7`%ø2§4"׫íúrÇèèÛßmÊøXжc›¥¨p«ösax–¢BB%¾ÏcI=`)*.•o ÅQ~$–¢â:/Oû¤ ߢ£â¶  ÉÄû CÇ&Cs>w¹(-ð… %5˜ê< áA~Ü\y'bk /³ ?ê½é:6z[3Ô!«» q±¿š1KÁaV)$#v±32äæC—æ1µ1A#gFè#Œ±¡¡Æ-fJ šÕ¡seQÍyŒ·K|2_û£¡õ ©·:õcÖ™ER¡^š,êãô™á0”h‡•·À>™žçÓÅ åÀÃsíòZÈ(ÊîYBv*™q¯°Þ1Ô“ZÆ©CwÅgj_è9‹ëð7PõÎïXëeJê—ô†:3=sBìŸ2:¿/hƒŠëùK›|›ùáç”*#é”J’BqelÔñ}âo‘á¥Êø‘ŒI@<‹„Rwzjíp¬˜€»Áãºpìup£ê~»Ñlö4³eÛZ«3Òµi›šÕë·»½žnu:u·IJ“ðQ%ôÕâˆ":¶›?L–2P¡9"1©R¶rŠI”˜g“(…“LªØt¥ý¡•*Î’m€2ÃýÄbUŽ´u2«Çå9ÆO雤ÜV²Ì±´ÙÂKpU´¤;9Y|¤W8‘ß2¦eúZ ÏVIÂv“l•Õ€gÄY-ƒäȹT»#ÕÊÉYãÐ*a^Åî0Ó¸ØNªÇRWºðR'v6¹“œ%fé×e‹¼^¸îµO܇r…—h¾Vvb9K؈3Ç‹£4¿Ò­ ëò$Ïñ¸m8Â?Ë9.Çr”n=žìå*©56š,¹L“y´—Ï\Ì\JïįìKi‚Ë‘$_Jåº?‘£m«œŠÊ©8'§"+{ñ‹ÙZQµqQÙͪì*žÇŸß\O+Üó:»c®r¥KÎÕÛ½¼5¶Ú“T{’ïbO²AÅXíP^òå´-GêZéZµ®vûᴂ¥Ú cJL(š^üò¾R¢w“ "­Ãíúf—þÓì@*á;k» 2vO2$só\¡ ?Z§ož§ÚZ–Ú€ò¤•#©¤ñã#¢‹„„1…—lxȨ:Zº À#â›å#­BÀ0âËZ¹Ãˆ•Æ #bŠ #JnÃV Hñ%˜ñ-Œ°ë{2Ý€¨ÎË2Û9¾²2Íö‘Õœ/ƒ§Òîc–çèH—"W‚dM #o{ai‹xâ=Ef•)2´¦Ä^¸$x_Ž Ë½¬EàDoèvƒ|‹Êšg@<†Ìò:8ç‰þ˜¹Žïâ.QÄhÂë£üª´UéYd›ƒ°ÌEºÔÅù(±)>æ®3ßÕÐEgw³dákŒe/Æ)Ý8¬³»Uv ²•ø.íÀ èK¶ܼ²ÕÄ,"¼Ü`Ä–‘—hVôÕ/ÙðƒŸ©ÞÈ fIC—ƒŒŠ‹ˆ¬)SnõÜx¯¯Ñ~NXïü@E–åÏŸØôÔß8È&á"õèNV@4Qˆƒ€k3E8ûàBD8ûD¥æìš”ŠaSÂòpùé; ÙBékbìf }<£|)C¥o÷"”ªPyØGnÒ¼"âVÔÓ箾±ãÊHËbÇõuÖ‹eø­¥ø«1 X}L/t2ŽÜòä&‹yy2gI&RYÇ—f˜Z7ùgk+ RÂy‡_1½´}—:Þ¤šÓÕœ®æôél»¯ ïÂêµõn³¿µòi?nÿ"'Éö(¾+Æþ9Ïüþ(OËó{à÷劉*CJr‡„½öJè²p‘;#Ra%vF1ödhÙ¯%‚+páb”%ÁÇw~%6 YÈÎèM[ß½‰ÂÑž[œwç¹HQÃr]g “ÎÍt0“„.-".¡"4)›W|AÅG\Õ[œå»¼æxãÀ¬}5#kZ¿o`ë|vÍ'&ògÔQæ×1l '~'ž{^ê¼Î¬eN«Zín¿‰5·Ò.¶lÛlõ;]M×Ç ­Õmšši˜¦ÖköšdÜ4Ç=£Y¼‹–H•‡º™ÿõwßTÑÑu´!\91äÖ[( ¬&çïœ@›XÔ£¡¶ïͼˆåomð^ž±«ï±„'fŽ–XŠÁyML1@—‚ˆxÒ«Û9Ô¶þ*º£þì:ŽXžà5WM÷õêǵ?) ã½Zhz8Oéࣿí6¹ƒ×kŸrÌfY¦31;oǀܮEÝF7jw¢‰-¥‘G°ñ¸ÁLƒêm=Í™üµ_ÐÇðQNT*eÃú"=– M£hj¦m3Ÿô*fÝP/%Ê‹ÜPcW_=çüc¯YA$]úyØ.œþBªpË÷ï’V hcÇ%W›4›õ´Pa˜&ÕËl?'^8uÆ‘–R̤4îJ¶a~jÃbN±¢ÚÆ^¢Þ[5Ë]„ô¡Z¼´;ÝfYú»žZ!ìÓîý3¢M¸“+‚%c ç1ŠLA]îk#K¦£­ë’ß !á‘(Brû'_‡ Ïù&Sãx«KÈhÙ©S¡Tê4{­3-ÄÁ“$»%œhÉÕÃ/5¦¼Zm˪mÙ)¹öÔ&–æÌ¯jö+G¾rä׎UØ1­Ô¥?Ïbœþ·ÆÄÖÌ™{^«hrÝÍÙšŸÌän±‘m€¸od),ê©z<½Eº´-ćªîñäÆoš-%YºL-aé XJ•°¬Âý<.;³£/ÕaO_¯:<½À9µåÚYœfˆ;¾ÓÒõ~»»qO\!#²³¨‡lN AܜⶂsZ@.`s*.åQ)†,°9EÛœ¸¼|Ã’ßL•6>k)½²¥æî@‰‚‘§rì8:*n;‘×5§„ú&9~ûÉ ø¤‚žß&Á㉠ˆy~ƒÄ*‰aõ t‰À $„D<».P;PX\Ö‘REº$¢©# |º½—³ €Òr$¹J\\çÅ̺ºÎgÜjèòÖ+uW¼h¾“x[ð—2 î’$­ææéE˜92Õóéq^jô3šÇem’f²l ^wå¿c-Þ‘æX-| ßšk§ñ'ŽžÔŠ‡»ÍBCÃiWq„™­råò¿”º×ÀÑzuÏÇÇÿêV¦Èí4è—ø’׸"7¥6[èRŸØ"ÕtÌ?‘H¹á¤ÑÒº½n§Õ¶»ýÖªLÍP –E!Ç ×}P2rb×ð£)ñ¤úž|}jùŠ‘} žÍ4õs°@GÏh)rô«xfÑ…8Îdc¬uåO6 Rs“íÇ |P»ë…Jä+¹«8â|Jú± øµå–»°‰ÍûÎ#3$ÃtHØ»+c?PVÌÊÕ_•œˆÌ®/Øûb \ìôÎ..•‹8‡>õåéù§“ì‹4Ç%þ]œžûüæš=™Á„DÃÜsËÛå«c*짤wâžÙ©¡Ïþ”£°Ïo`«-Ë© ãîß-æÊ‚ù€‡›r…1ž–´æÌ­ýÑP¨OÁ(]Îs µfÿg¥þUÞ~úüúoï~ùôwå݇7¯ß)o>¼ÿöÍç_>¼W~úðQùíÓÛŒùQ×;]ú/Gæ¿Þ¾Qê#Ç«‡SE³” bM}åÿ=IQþò%t ™+ú…‰?(‹SÆn—ÉšÝÑñP´¹¢þ·eM5jÙ_¢Ù\ùuõüÁ‡³5úwÍèvÚ £ÓmöjzWoêý–Öhô_¾Ñ3ú½^«Ûhö›TkQ ,ðõ+iz…2(GÖüß≟HŒ§ðŽ~JËpY<%ûå0ÿÙS›êqnËÜ]Lúyú.uÚÆÚüEu¯ûÌ1˜ñ´†Ñ5Ü@ïGÿîì×ŽÃæ¿}åó\…¨¿fß1]÷÷!â«nÌRkJ;TYüïoÈ­—ÜòÍW} }Vüéü!šú^óì_+˜)ÚXÑìáþ«gv2âL¥ñ׿oÒ˜ùwÔ \½Í-ó$‹EpTkjz³Ǧ’bû:Õñîiã²ÛAЇÃÛ6L«¼°bâsgȸø³šA﹫\@ÀsâÙéNq)çbbZæ±/âö­å:Ä‹ä£+ƒµJTCu¼úÞALeŽ9tt êxÚÜ+ÿb¯îˆH6K˜ÐœüÊQNþ»OÅÎi³ìÂÚˆ¾BmÅÈê£âßx“˜!5# õŽ•›µD?à ”ìR0¡´èСp5,Š]¬"(pϽ=§xöÆCE-Ð<¥{'™gÄY7·UÒ5wU£:Ø…ktt½ÕݸWïPåàŽïqµçYýž$1Iô7W‰.f‰Šx‹Û¼w¿ˆ½ºÀÒ &ii Ç[¤%NߪŒ“;¢{K×år¤ÁÀþ¹GÞ0}5ãûÁ1ã` r¸*jÄoî0Nÿ¡¸mLÐÈ™ú+hè…q‹™’‚&„íö¦~¼•L C NE¶«¦†ey’±–°qÆß0KÛ(¯6Ȧ;ŸîÝ!;ØÚ(^œ7ïÍž þ¯Ñí4ôN¿ßhaÆ!ämæÐHÑþ<)7cÛ-å[ʺ»¿«˜aë;*'ÕÇ-Ý·ÅÜ™é™bÿ”áí¢±£ˆòªOæáç´.6éJ’Bq¥läÆiQÜœ ÙñÜ•¨Úa­bÆ¡Õæq)¥I’Þ”¾¥ÄHq™ƒ•ØpÙñÌŸÝ`û_©3ñ‘ŒI@¼”kûŒGýnðx£.ûFܨz£ßn4›=ÍlÙ¶ÖêŒtmDÚ¦fõúín¯§[NçF]J5ü¸ã¥Œe@J¤=‡_é™îçÀkE„û(O­³0ÿÙ+3{ªÍqPþf@õûó›ëT}¥µ¡”M‡lµ\J—”t”!ééÀ£ikç’lš»&wËŸ‡È”“¾lj"’í[šH%a—'}ržl@ÙHª¨ÿýúú-åêñÑÍ©ƒ¿âRÃ|nžTëþ~’§òÕ6½Ú¦cí¢!ñåÙØ¯µè\ö çbг@·ej‰é­LºäÔ‚[Ü$léùl‡òؤlÆDÎ$Χ¬ªÅw.Ö’KïØj¾:"e"–’T~šä‰߉'pôR§r–ðžÓªV»ÙÄÒŸ¬‹G]Ó4ZF_uõ¦Ö²F=­g¶»šm·F~Û$f¡‹+Ü4TÕJî™úåšu‚¡×F·Öªu°\Ò'|\«µÖÔ}¨MúqÏ(¬¯Ü0ê®Øbv£—Š%gþ“9s\çðrÀãþr}ß:±MÞôá©+?QØ%Ÿ"ÓºÃêN®4lPO>¢! ¥]Ã%ð¦[ CÒ¬áð ôjüò¤&o5lÂÃSK`øH©%ÜB—h½ý<|ª¾÷=‚ep2 ó7«¸0.›sÞ+Âÿš®oÚ3]“î[‚ƒA ¾·,„ þmož¾~÷ú¿•oß¼¾ÞÍl ù#ô–+Î@ö¶›¶vðD0xe4%~*¥¼ÒÙ / &ýdä’䃱é¸Ù/SšìðΙÏég†’"P'ßZd9ÏÒdö7ìimõgë÷šÝN¿Óí½§ï‘  ß{T]rOèÒG=ñ¯^ªÌÍØrçDÿwú;ןı§´(ùR…úÓGbÚŠ?úƒZeL×Å¢=L-Ñjs¤Õ_ÂéOjwuÀ.ý½T“ Õ:Ýš×ïú*d±¢Ž~ XÔWw²…õçfš8»/p»TéölD…Lؽ_êëë_®2v¾ÑO¨Ô䓟Sý ùiµ‹ƒìêû§8ʶ0Šúqõ¢êSé矣§>PS ú7Xã¯ÍØùû²Ž Á¸].·ªE«×hF¯ß?oµH?1“khS*ò'g¡ÄÑÖ’=*ð¼«AÞ„Xt×X¶^»×íµu.C²óim÷åuB «|fôߟ"ûý†¶ÿ̦¦Äü?'û~ú(k7qíó+6f_F}±|µŸèöð´Œ!¡Ôv}>¼7ã“Ö‚=Ž.7.'.ˆ@—D  å%Ðt¡È<º L^](€@W]x tÁ @]>'u š¼Æ>xÜELv›‰)¯1S[ñ8ÂßW𣠔5!“wFV=}øF½ñ¨Cô޹Oì æ?±Ï¨yµ]ò–W8PØ­c7Þˆ®$woï©êýmA?ü‰ ⱋçâ€ßàÆSMa+Ë@I®¤fŸ(J²¶Ð÷½`7°U'&xÏŸg],“G¹A‚ÅþdxÌaô‡‘92ŽÞxÌ8‰Ý^ÎÂz$š’Eø¼ld 0ÝšNÿ1Ö/ò1èO_hÊÊ›>±ò¦‹äåiëT´ÁZ"jRvP÷fu÷5"8WfŠªî:ûÛ»³Ào‚i x´Q–hÖ’Ÿ~Q˜—~„ÍI¿„Ñ/‚ aô‹0húE¢ý¢0>ý"‹~†ÈO¿Â…Ò/‚Àô‹ \ü¹ÁO¿ƒå¥_¡rÓ/ÂPaô‹0l ¢_ACé+¾òá+¾òá¿sNõƒç ú‚òS}p¹©¾@¨ÜT_ Tnª/!TNª/6Õ@õÇæ ú‚ƒòQ}ñâ3%Ë¢ìèåˆ#³þ;ÏÔ£¾(€ÓŒ ƒÓŒ»ë$x¥¸Þhå…ž¸ŠàIHñ:ñ½M^±ÞïX^? Ô‹Dö¿%nnŪk H9P]+‚œÖ~JÙûp“g¶ÙáÇ%º4 /À[×\°ƒ üFEDIìM~#Ãë£éiã¶ÝÑZÝqWv_ë÷[Íî¨?jÄ:ÈoT¤Iô y$DÉ'„dÁ*•…Dì®Tîý~j‘‘[ÜóŠ#„¤Äk—AR µË'垈Õ. Ö.ˉ}b L ky…—:¹wÔ.w±æVÚÅ®Ý2Öô¾ÕÔZf³¡™¦­»Ñk[ãvC§"þ¶À7ѲEþlÞi4ûÍ^³ÛåjêΧËÈGjj’:ž7¢Ê@yöÆd=“\ùè»äp:9LúÿúbüWSŸE¬/XÜe ÔýyTOs»ë®Å`êìѰ>3¯ö0sc§ÚÉKTµëvšXUIì/Ž7¦áªU9Ê—¹k>Œ|ÿN‰»=ŒhïÞÒßz»Krb ô‘} “û»¬¢cûŠ|©üƒ®¡Wë ¨š´3I2Q³Ö±¬^ÖÀaÜ@úÔ>«]i¶ºýf×èõúF·Ý¦ÿ§Ë¼Gü~¤[ÂZ¾ª„C¡Õí5$]Ýh·z¯mçÓeؤ¦¦†àg=ë¥UOyëÝ;ïÍhW‹–•ì_FYÉ!áke%cÓ:@Ë÷TYêâp•™ò´OÀ "Þ=4Æ>À"¾üó…ØNî;S ®FŽ3°¦ÛŽ~ûøŽ‰Žy)õúÚó cŠA¿¡ëìÈa’Ïï>¥§ ôçÈ ßÄõ!oH|q‘–¡Öã3â:;Þ®Yô7ùGÿAvin´Ìܧ85á}j¶6|õ$cÑßù$ÃNß.È=Ÿ±¤Yyìã:O¶ì÷7éþf†ŽõzßÁ#¨þ.'ä Î~š½ûˆ»båEY„› ’øm¤â(Å ê¬H'.w'ÚÁÜnW @n×p[à›h®0Ú6ª«w»z«Ûjà6jeÂNcÕéu:­f§×n4:ìïýV“cµ­Â|çªÛívô^‡Smv<]ΪÛ3:-£ËÛÔíOo¤ˆÕäì”^8’Òoï1Ý^«Ñhô¸;hëÓe˜€^ÏÐûÝV£ÅÕÔO—¡vHM…ðþFíµi{T&½x¯³Síä%¦Ú­V§Ù9½•'$^˜ÂÊcf³Ýïµ{½^‡3£o ॠ½¹ õé8ÔtQé·{Ͷи0ˆ2x}Bú¯yxÝÇŽ×~ÛËú”블?á:çkDQÞŸ\ÅhŸÖ[¿"ZÇåüþ† X+ZÔKlµºýƒ þþ§%/˜MÅeÚ/‹?”û4¨<1\.©»J~\ž]%?Úaj n,^j  /5(505òj 8&5(500º2pP‰òSÁñ—ƒ1·¸¤^ÒìX ÿ¸2½˜è‡"ÃB¨¼ùwkž¾t`PXHØq,öÝ? ÞÔžP—.ü‘ú¾®ã‘OŒÒ"®fèè:r¯$>æbà:ô½ÃfÙß0òƒ‡wÎ,.I4°_á¸0ršWL7eïöÞº\Hw¼¸¬ƒäOYè X¢".±)b”B“9Šø.Ì“4]\š*ÍÌoŸÁ$¶¢mý?dió›gÞ›ŽËØ­3aø ƒßüÛüǵ9iÍÌÕÊË0q²<Šuú$yö³lú”ÃNµ,ü¢ 0h­»YoÜßYS©=¢ÑÇ7ü²,ƒwnm¿dRw.ØYø2Öò¦X2Í’ÐÇ©RbIÉÍ>šiÛÌ¾Š¯Y—8GžäFn’ÁtU'‘U§?ÕçsOçû;K—*­wäa{#X„ ŒFX¾ç-ŒÓ½´±ã’¤=q¾ž|ÌÂèñõ0ÃäçR¶ºV˶Š)‡ýUY ÀS[sê9svgöÕë«€,SY£^r0*OööÚ"pëǵ~ÃlÚÍÓ›ý|³¹Å¾Q/éÇÙb–>”Ûð‡É1ÍäæoógXɃ÷$%ÏMHt£.—ªÔ~¸•Ûͪ33¯úÏ{[¿¾êòº8ÇÓ|sMµxj ˜·Êž «ˆMN²l‘{/‡CUêGéÖ—åëw¾á‹Î†/¹ÂNºÌåYÏ¿{ß]ÌȯÉ=%/Hÿfì’ƒjE}¾Ø«%kâîKl\˜SR‚ž^¾Ü±]sœÊÝÔOKݹ³78ÒÐ%*àËÙiæìÏù:™eø·Lõ¬3ó_,LnXQmÑ­…÷V-½l¹;ãƒv§Û¬›Þƒe†Qý‰B¬žÚ¹øÇ›ÔBs6wI)»—9•#Šqxçô}» ½œ× FV‚Œ¶®—ôFñ¶ Q%½Ïäëpá9ßʶQêW–†¥è`§Ùk½° t1H’^É6 {BJÞ8'BÕjë‚àÞÒùWºCÿj¶TÎlåÌŠsÇ·y”±L±]`z X߬¤?k Ÿp#¼ó–Y™_ÿ{xýáÇáû׿¾-ËÀÝ›î‚üø3©gæÛé\?’q©R,òäÇož-eYBD-Ö€Ò²,EÒ²Š3qS™½.ÙIM¥V';çw²Ãè“<†²V*1ò-?æäùüæúû;Uºæ„ß…ÙnµF£¸ ¼‘ÛŽƒ9SP$ˆläð7r(‚——/sØò‹ÒÇo-¯±,é1í^\3Æê}åkŽT ·ÒÐå´ûVÎ(g‰I¯“|®÷™†ñ§u‰‹Nì»\Ó.×6€NòåNS5=f”`³×ýÕ·ÙÛ·º|”¾ß{¤Ô‰Âm’g$®˜’g™Í£‡`÷Ýø3¯ _æ|GÄZ±%–bDî”;KËÒÂu¬ÒDÞ¬¨´(®/Pk”È;å•IŽg‹ŽŠÛN<4$$ ²Ò…ˆ M×墒ûó˜Î™‡ƒ{ìÇÌK¨í$7W‘á¡«ŒáÐSǶ‰7g$bBûa„‹èÌæ$}uE\wÊÃg\Õ¸è)&âbDÒ7®ÏfD È<#Á„¤Äu˜¸øs#OA‚ ë“ÒTÔŒH•†ÑP†–Éè9œÓ&Øj»ºD‚Ÿ‡´ôŠðÁ@yˆ`輬`HLº ÌõB žÂuxOˆóÁeqó‰@‹ðÁåðò m '”|+Ó†&Ö¢\¢‚=¢Ñ—/”a¤¸s›iqg}ÎÍS€E‡ó!É-ÈC„Ø q"¤Fàó!5 …‡©-…xˆÐûãLyˆŠ§qI>@Äç!Â;Jåæ!*.’+[ýœ’9ÁÙêøYœ‡yˆÎ!R²â}9à "|M'kªÔ-)œ‡²7ñå ²¡6dYФ幝”ÇuS¹Y‹ðœT.Ö¢ââ¸ÎÎÉ~ƒÏð—^Ö"<ÉÖ¢j¶W[³±5»|™ÃÆÏC$aüw¹H+ð… %5™ê< áAš~Ü\¹'bkÑ/w ?ê½é:6z[3Ô!«Ã q±¿š1ÁaÞ($£n±3ºäæC—æ1µ1A#gFè#Œ“¡¡Æ-fJ šÕ¡seQÍyŒ{K|Ö^û£¡õ ©·:õc^™ER±^š,êãô™¥0”J‡•·à>™žçÓÅ e¹ÃsíòZÈHÈîYBvΘ±«°Þ1Ô“ZÆ©CwÅgj_è9‹ëòŸ¡özïw¬õ2¥íKzÃ@™ž9!öOa^D×ó—6ù63ÂÏ)uFÒ)ÿ”$…âÊØ©ãúÄß"ÃÿJ•ñ#“€x ¥îôÔšäÀ–z7x¼QŽ}£nTÃë£éiã¶ÝÑZÝqWv_ë÷[Íî¨?jĺQ—q›¤4 UBßQà!’(¢c»Âd)𣠓*e+k˜D‰yþ0‰R8É¥ŠMWÚš©âá¬Z‘uj¸ŸX­Ê‘¶Nnõx–g+ôMR®+YæXÚlá%¼*Ú?Ò‚œ,>,œÈoÓ2}­„w«$a»I·ÊjÀ3"­Çeu¦‹è¥…Œ»¾ÒÕ½^óy³¸gà×Þ¶mU—Øyfú{ÏIõXêKÎkð„QS”à”(1K¿.[äõÂu¯}ê?”+œÏØaJÌìlÉ‘`æxqLçWºÑa]žä9· Gòg9ÇåXŽÒ­Ç“½\%µÆF“%#—i2öò™CšKé=‚ø•})Mp9’äK)a¬žù9Ò·Ê©¨œŠsr*²â±¿˜­•ÑY•ݬʮâyüùÍõ÷´Â=¯³;æ*Wºä\½ÝË[c«=Iµ'ù.ö$DŽÕå%ïPN@Ûr”°•®U»áj7¼±Në]ª0¦Ä„¢éÅ/ï+%Êq7}Oföxû!næ*táGëïôÍóäYËRPí;«À`¬ƒU`° VÁ*0XQC5W§ùÌvŽì;ŠHV99Uò,¢R%HÖ:ò¶–¶ˆ'ÞSdQ™"CkJì…K‚÷å˜ð¸ÜËZNôð†n7È·¨,¡yNÃcÈ,¯ƒsžèÏi‘ëøÞ)îEŒ&<ñ8–å_Öªô,VÍÁ7ø’CÊMñ1wm“x½[âN˜N£¡·awuq ÁV\á ë¼›% _ã7;àa‰–/¥„>|~vºÆÇc5°z³T•å·ˆ9­ę̂ì@e äP¤%¶/Ùp³ÐV³ˆp¯Ü)Yþ2òÀоú%~bð3Õ©ä¡ËAÆGÅEDÖÀ”»Ž)·úaN¼×׿h?'y~ "ËòçOÜ{êoÔ”pŒ¨t'‡ š(ÄAÀµ™" p!" ¢R3ü MJ¡°)ay¸üdŸ…l¡ô51vó‹>žQ¾”¡RŒå{JU¨Œjì#7Å^q+¢êsWߨqeg±ãú:ëÅ2üÖRüÕ˜4,Œ>¦:£nyr“ż<™³$o©a1ÃÔºI—õdAäËâbü+梗6£¯SšSÇ›TsºšÓÕœ>·m·4ÆÅwÅØ?ç™ß%ºL,Ïï¡ï,(EJr‡„½öJßo„m}Ø©GÄØ“I e¿:¿PI¸eIðñ_‰•*+Ó60ã1(H·8ïÎs‘ £†åº &5œ›é`& ]ZD\BEhR6¯,ør¡°|—×o˜µ¯fdMë÷ lÏn¢ùÄDþŒ:Êü:†-páÄïÄs¯ÃK×™µÌiU«ÝÓ;Xs+íâQ¯3j齆6Ómuˆ¥õI“.:¤Ñí·›c»Óïb„%Rå¡næýÝ7UttmWNŒ¹õ «Éù;'Ð&õh¨í{ÇE3/byçÀ{ؼ—gìê{,቙£%–bp^S Ð¥ "žôêvµ­ÿ‡Šî¨?»Ž#rŠç~jºSW?®uøIiïÕ@Ó#ëÜmëmr¯×„‹qå˜Í²Lgb~l}ê©=Šp¼I-4gsW¦U¤ý5¢Mº“+‚¥r ç1—ŠLA]îk#K¦£­ë’ß ¡ð‘(Brû'_‡ Ïù&Scˆ«óçÃשPªNuš½–zV{‰ÈDzIrc‰–\EÌ‘(ØxDB\`æÚ$o§VÛ´j›vJ®>µ‰¥9÷«þʱ¯ûµCvÈ+uéÏs §ÿ­1±µsæž×*š\s¶æ'³¹[md îû\ ‹zª&O/Q‘.mKþñ¡*|<¹ñ›fKI–lS‹GXz–R%,«ð?ËÎìèKuØÓד%†ëøð” ùçÔ–kÕ©`u*Xm7«íæQ·›/èÀ/Û•6>k)¾²¥æîD‰‚©¨ˆÈ댚¿cB}“°ýäè^¦úüv 9¾¥úüF‰UšÂêè¡Jˆˆxv}¤– ¨¸¬#¥Š(Ù ±køÑ‹”ZR}O¾>µ|Å9Œ¾Ïfšú9X £g49‚U<3èBg²1»ò'›Ô2Ä]_îÓ/wõ}÷ æ§æ+Vøx’ÍÏ™-fJj®׉̰íòøfد‰¸lMàœcp‰9GÏæØSóÑð—(H·8/¬nåb5ÀŸ¯âá¢CnRª…‹´ø·¾ {ZÃý³Cx¿ßjë.gSw<½G¬òš®%tj+ŸÍðNùÙè7ŽØÔϯ?ýCù’÷˜”’s% ê‡J@þ\jª2zP¢©*1§¥’’ZÞ*ÿ¹ú“þ¿¾ÿÕÔg당MJÝŸGu3é£zà»$\« ©³GÃúÌt¼ÚÃÌzkÞÉLT½ëé½^ïI8 *üQuÉ=qÕêxc_½TY¾º‚zªëObvâø/ÔAÒ¯zÑpjzv|ˆ¥Î ýí—¹k>Œ|ÿN‰û=>c»¥¿MÂ#û¨Dóä²Û¸eÕÉýûÍnŽÕKåŽg_­S¢ªI;“åOÍZ7ô½!kà0n }êDÝÒÛz¯ÛîwõFß –¢ÙhÑß½e?²Ç$@¡Óhu¹u˜ÚzË NU£Å¥Q;Ÿ.Ã< 5u«yxÃøÕÈz)é\ý[ªwy{ðìLzaóÐéì´òÇÍ:0áÝV»Ñ9Që†uhRKÚì½^¿Ñè¶Ûôÿ ëpH¡wšƒ~³ßn7›8ã’â¤ã𑺱Š?úƒX‘2ü™b™Ö”n9Ô'²öGõ—ðcúÓÇÄ ©–@s©&ÉCjÝœ;õ{£¾FŸõl`ê«ó°þ|t}7Ý-‡!—ê?IÀú|Âbèêëë_®2v¾ÑO¨Ô䓟­ý ùi•gÁ«ï¨ËÇÕ‹ªOM¤ŸÊÖ«)ÐÕÜßøk3Nñý²Ž Á¸]îP Ãèt ÝhòY–]O—²Jà4uŸù)Ye컽F®Ub—tÑUbd†d˜*^âLî^,¤)ÞbA…÷õN¿U¹’R]Éz½c±`áºnôùThçÓeX¤¦¦f!ë/ιŸþÊJƒþÝ@ù²bH¼U®þªüàDdvõxÁôíb \lØñ¸Ê°öGãbùŠ=¿;€|Hxr>•ŒÁÐõýùðÞŒïŠa-Ø+äµèÔ›v1ÀØtCRÍñîi'e— ÂJ–Yžä0-G-˜Î åè!òf0è=g9xN<›ZÈpŠ3Hyä85Ó2‡Œd·o-×aÆW0º2X«ü[dPÇ[¶èЕ7¾±sèè¢êxÚÜ©Scäy‡"&§1µâ¬üÌ]P‡åG'`±õEÔ]gÔie7êG}šwÌbO0_ˆ}–x6oÙx……îÜx1ESìümA?ü‰ â±ãÀ¸@ppã)Ц°•e $‰‚ìEIÖú¾Ìe«Î½ÍÅ2y4õ¨,ö'Ë™ðÆc³ßIlõº@ÂN+$f`MŸËeËßoß1Ñq)Þ ^_{^#a\£7è7týâR¹X„äó»Oɫӟ#7|Ϻ7t6Ço€×Ùìi¿’€‘Q_äýyØñ$¹Ù“æv@ËÌÁ1ß*IÇgnmøêÉ,)ië“ ;}» ÷ûhn†áW?°ÙÇÿtžßߤ_ø›:ÖëE<~¬S耩hú»Dœ\Ù=SoVIšˆà\·4ÍÞ] ˜{·íøMØsWÞh£ -Ñ—îÁ†¦ë¢¯L10Ý|§9Tˆ«*ÝfÅÄÙÞIrüL†©*"6wêØ6ñ†1rO0ÓŽ‹èÌætò=Ö1{(ŸYb\ôÔüa".F$Í–FÅ/Œfy!wòŒ’>0qñçFÞêbÂúÃ$‚ŽŠš¹ ȨÌÑéVI‚–Éè9œÓ&Øj›Eò‡Õ¶¦ÚÖTÛšj[SmkªmÐJ§”ÃбéÞ& ™".Ta`!FÉ•Štu%!ÙË ÂÍÝÖˆØZfjð]Á{Óulô¶f¨q$Ù_ùjÆ×úbnCäpU{„ßÜaèÂ*wÛ˜ ‘3#ô–³ÜÐ ã3%Ë¢Çaì€å l{Ñmî”Q=ê‹Ò`êÇ«j’z^ôT’Z.ºÆátG×A¯óÊ1?7ÇÁ¹’âˆã;à[o}Þ¬™3ú¿c9Z3Ó3'Äþ)‹ñ~As¬‘·$C OLÃáç´z-éJ’Bq±·l ú@Êvp…_;@VS|<ض4=.Ñ¥IxÚxi›ñœ ÿ«G‚dLâ¥Ô¨RDIìÄ$oÔ…cߨƒµÝk¶»=hŽmk-»;Òú­¶©õ;£^od¶uÝîܨK9‡ˆ<©áSû>̉÷úú-­dõl«çÏŸ*dÕß0œöͲã]õål­<±`Ñ-î©Ö¢t‡¢sHR6íÝ™¸'{2¾±?sl×SÈKc ¤«“dz(¼ÔÉ)æ´ªÕnu°ô'ëbcܳžÙ׌θ¯µºÆXµÛ#­Ñ2í†ÙÐûÍQþ¶À7Ñ*Њaºz³Ñé÷9ëvv>]FÑRSÓ¢‹ŸIô¬œqµ{WÞz÷Nà{Œ«E´c§ø2ê0 _«Ã›V´ßÛçK¹•i¸Ê[xò0„ä.‘ƒÀØ7nPÄ—j ‰.N$V8Ö¾¸+»O…DS²·]Y ŒF·¦ÓŒ8ÎèÇáΞÞ3èO_hÊbŸX@Œ'°­%¢*A`p® ƒ®îpñØ(ïd’½Äo‚ȉ?x´QŠ£C(¨³"5~\¸Ü5~\h5¹¾‡š\Ã!ümo¢9~ˆ>j§ÝÕû}³©;ž.ÇGEiê^ºX™ðnº€Ò “ÊôÛ{ÔNöX¢ª]¯ßíè§Çð´ržO€a4šÍv¿×îõznÏ0ú€'à 7€Q3bØv[˜àÇ8 "™Œ]«¾÷¡<Ò˜¬9ʘ|CW19œÝÏo'ƒ1jÍn»a4zzƒÃ"ìyZ¾uO„wzkÌ!šºõé2"{ÄË@Š@ðÁŒ@ð áˆ@ÀÀ`ì»:8b•ìU%{N²\—ˆ“‹?&#Γœ½ûn÷žïh<�Ú(HÅQŠ!ÔYxL†—3&Év &p„c2œÃ!ümož–¿9fÂ{½Ûh4p6dãjÍ1‹f§oðî v<]Î ¥©%ÄÇöH—+c,ÑL@«Ñï=£Ù:é˜LHÿ5FfìÕí,{9~sƒz R®‡jX#Іkrm#ù]oý*|³ŽËùý]᜶Þ7Zn«qÞ‘ºÝ”ÏûO-Oœòù@ã¹(Ÿ÷bìT‹ž®wfÏÀQ‹`áy9!]ž´äc…|sØÅù{ÌPÚ¦ãÝùÖÍF¯ÝíqYóOo]ƒÙ=KJìEd ò07æRÙùº›╲{!@zKØÓ×ï^ÿ·òñí›××|K¼ °Szêʯ§l=ßÜážb­WFSQâ§ÒýҕΞXx¡&Œ]e•|067ûeüåÎ,¼sæsú™¡¤t¢[‹ì!gâјü ]÷ê@»Ùkê-^)ÛŸ^×ôÉh1Q>Æ%´ÇÔnŒ7‹³5KDmµ£ù ¨Ò¨ÝZûv§[x6 >*cª>Ê•R'‘µrHÓÿÖ¬ñ¤°€Õ5%!3V’@_ìS‘_.âpo85ƒ•HêÓÅbꌣ‹"ï–¢)ó‡hê{™x7%hf¯šÙddáæzò\³f4ê!] 4jµïÌ ]çS „¦0·ˆz!L|¾ù}@-ûËÓ³á`Kå~] aä±3$;:fäxõçÚW@^:)S4ź»ÖV~`»œK:u-Ei*»u©Äô¯”/?¿y£¿¦Ç¿£>A_ùá#Õ©¿›Qú ­ñêVùáYËÓ!}U Å8Þæzƒk­xTÓX$‰"±ø´0Ðoì¦ÐV1ÃüÔ–’(brsÅñ˜ÓàJrX#®uï|“]þ’Ÿ YÓG Ç¥/]X[Z‡®FÔ˜[®ãQ=™›AÒaìH'Óç¤Ìê‰Xß³TÔó‹ÄÖà®UcÕouÖ¬ŒCæØŠçG ;Р?‡LÁœñÃÖ¯”„OB|جÀ™·oo."ÿ{}wv…sÚ"eåf*´?Ö|N!hºË i¿Ÿ,Q²sUX26ýÔI-“ðR'Å ˜E¢êò‹ÒR§!|õÜBÕîzám=Yß2^ÓÀ£m kñÑvö;L ùLFb!:Ù/‡ùÏž ê ª¬îÚÑS•øs¿ÒPÓuGÔON§ÞÊöøc…5*ýÞ¥rß ®[¢³0§»žzõLRf(jóôÖ›_¿ m'd~.Þ SÛ˜W/²úKNl‘·úÄvük¯u‘C¾¸d ÐW¢˜n|“³25ïéJö¦øt¨Ž!:¾(Ýt!Ú÷s=䉆=ÍB^ûðá¥k´û–1ÙfÇhéã‘Ù%ý~kÜ'z»Ý=¥PÈk¿óà‡1 Ÿé*f0Yħƒ#øóÀgÇõC]!¬Ô›±w}(â»Dºuó’Õz „áT‡Ñ­ÏÈ0ñ))ÔÂ÷I"sBGì‡ ª¶—âê•óÆ°ŽèKyò- L– wÈÿ-ÿ-ˆw_ÁÚPäMÆ~pG_¢- `(lma>ù‹šÌp »–-éP£­[s XN9Û°³$hŽØÃÑ”(–»iWï~%¨ÖÌhb:Ì®ïß-æ%øË[Óæ…Á€iðŸÅ‹BZcHS‚ã+¥å’º›ª`HwáRš¼Vé)©Ûé&‰ó€êŒT1Y(Kš/£©kûP“:{iã†r…°Þ±<‡þß#Ãú÷ÔG ж—wΈÎ*Iz33­©ãÉ™^)v–q)ÏyœùžCõ„)=þ˜—Üö`êBä;ÜI$Ó÷Mê–ÉV`åX:ÌL{¦+gQŸÐU „e]Ž„t×J}äÄNÈ4ütÒ³FbÇç+µ‰M#L!ssVKjß4ÜUÒõ'ad†SiÖzÊ‚v&RBVÔ¤QoÙLXPåì„H@«šMîqã™ò|)ª"Ö]²,¦¢äŒé×)õØâ³3)¾N|lJÝÁ…-ޤ95"¦,ã9%îL’%[(£)ÍŽkµ0üm!„e±÷{N¼! ¶”}ª¾õ¤úšzn‘ú3¢Øq™WîØúÀ¹{É'ÕÝ£ž_S°]ÏwF£ßy.±Cù§8¬Îè€Ç͇ a!À7 {BPì_æìÏù0&*Kg!*ð"pñßõUòLÞÕ«mäØÔÂ{kµžÄš7hwºMªí]ê,çÆOrnÒ,œø¶R'Û¬ö©ˆ=PXé=(kçp´I0´ü… £­ëº üÐù7¡ðìr,ø„£.SÌŽa7e‡CêÏ9,;°£ãáúó¬§Ñ@žóm˜öEj£Ù =IÛ$ˆÓQ-q+üí%ü›°§·®ËŸ6XIã*•8Š)¸.ïzË‚ë²ÑîZèÂ|p”aI\|zÇúQ—Fô…©AŠG ž0ŒÕìÏsfýzìÏ& ˜)ǀɅQ1­]›7ßô,æ@â6pÌ‚’ç@ÃxÙs éwà,H}¼j`̃Œƒ•îÖüo\‹À‘æÁ _ ⌙aL†•L¼ö¯â0u?p&ާÅOkñÓÕ,(< ÞL‰u§Œý˜W*Œ’r^ÿÎa(ŒþxÏ„(y4uÁ®-·bzoCþÏj*þUyûéó뿽ûåÓß•wÞ¼~§¼ùðþýÛ7Ÿùð^ùéÃGå·Oo?Ò-Ývºô_:ŽÌ½}£Ä”#áTÑ,å‚XS_ùOR”¿üE ]BæŠ~!GâÊbÆt Ûe²fwtdm®¨ÿ£ÄmÙ^+N·PÊÿ¨«ç>¼*ð¡׌n§Ý0:Ýf«Öhvº½žÖhôúšÑìô;}½Ýíö»ÝF‹â³Ÿðo_ÉjÒ+”9™ ðÁ¤G1 9Àªü:}«zÖÐ…ÊÜj|ýÛgŽ¡¿@‡²ÝŸÑW³m±_w§½  åóTõ¨ÇLb¦ëþ>Ä~ÝYkMiÏ*‹ÿý ÷ Jèùm;™Ó‹xµ`¦hcE Gý¯JÝ&÷uoáºJã¯1^DŠ«;w†c߀“¬¢:ŠÑï>eBÃqÂË94ƒ ŽW¿êÁ;ò@ÙX_âÞsÞ&¶Ì!KWÅm­__"½{Y8‚|CnìØ!®= [Àý .ܼE‚ž:¶M¼a,!Äm53$¸ˆÎŒå¥øÝâ<Õ°JÂgE¸èÙm0ÉÍ8XÓŽùrIÚ nk]sD\9:·º~ƒíWµtê…$®ôÓBÔî9p‘ –? ÀàAv2*3þa4”¡42:!lM‰i“yº³)žê""ê½é:6³ ñõY¸Ø_M'Âñ€Ö ™.ØNê !7w»¦· 93B¡°FC/Œ»®ãÙkÂy xĺy ò…/ëÚËlÀVÕ ‡ÙVßL\-½üÈ=”ÜoÝZ™Ìëx³,aw…LãwÄÆeµ"žóŠXÆ:,¤,¶VÕ$­ér (OiA<«,g,h£Èkà•1!Ì|‹{2'%‚_¨N‘_ °Fñt–AáÚD¸`x]¢øa`M"\ V=âOßí5ˆ&ÖÜJ»ØjŽŒÖ¨ÛÖzdÜ ]Üïj£q§©µV¿Õub5Šw1Âò¤†U'\ìCøÛß„=Í_ÌöÑw9®¦€¾eÁb¶Ž!œ£ÿ+Œq}ÿn1WqÅÌAÙéÙkÈî@a¿¯=˜3·öGC1Ã¸Îæ\ †$Ö‰—Hˆ^ p–¥rZ³ÑíuºZ£¥·´ŽÑnëÝè5Øu• åö}ùJRƒ É]sèüN¼>]`õçË5C¶‚Ÿ^Iݾf¢•ÔݵÚÝ~s$PR·Oy`u¼/ŠWP··íR;‚{F w N1݉½”h)Ýþ.¤Ûý…tûÀà…tÑ…t‡°À…t<€àB:(¤ ,¤ƒ óÒqcB éøA…tP`te€Ò@ é¸ñyÞ¼˜[âsÁÈ´jìâ;?pþ[ æR±cK´)³œ{“ܲÍö˜ðܧJÔ=Ác4KÄN˜·y0*íÜþÌTðç¬|ê :|"‚ª4µ%Ž•×i]Öí%~ïp“ž‹÷Näß/ ÷ùžG§° Ê%vHœçG$t*âò²šÙê¶ÅªšØÙÑÀbô±"Ó¢RÂj‚ŸÝGCÃi×Í—…UÒC!•ôÜØ€JznLx%=74¼’žš·’žQ°’^Ÿ»’ž»’žVIÏ+RIÏ ¬¤çÆÅŸ°Jz~XH%=7*¨’ž^IÏ-¡À•ôÜÐ"•ôU„¥Š°T–*ÂREXªKa©",U„¥Š°œT„EŒJ‡ž“J‡F¥Ã ¢ÒáFQép£‚¨tÀ¨*nl~*$J†ÍI¥å§ÒáÁ-f” š(•ÎA3PÞ1Ç™L@¹Ç/w"^ï|å](}1¸liÚ ü Êm!qùß„= £Âø›ãÙŽ7ÙÈ}Ë¢TÁ®-ƒ~b¯xIôü2±è'Ä%‘~¢[ë5ÛÝfCk´ºM­ÑnvÚz¯Ùëµ:ÍVë ÿÄÞo_ÉjÒ+”9²ÚŸ§·Þá”ø÷"ê5û¹§Ä^}‘JˆÏPaR‰ý—Ûò ­Ä©½•(¯Ä±%–ØëA‰%ºh b‰ÃhüÄ\î'„X‚Û+†KÀ@Ä`±™“X‚@,…K€Ñ•@,…Kðãó—=pc¢ØÂ¤nÉÙHwq˜RxSA¨OZ!:n6ñk9ç–ö½åÏæ¾Çòí`‡XòÎU°DÏ™D¼5‰E´a_ý`?7)¸)KÜ7(•É@l®Êb“›Ypû ‹ï52È*Ï {Ñc›FcÁ ÁŠÜÑ&&©4ÛVà²X¸CC»Å u£9A F00€‘€›Ÿ‘€ÌHÀ f$@s2ð#Š1ˆáó2ð£ó2AŒü¸Œüà0F~\ü¹b$À øQ!ŒT0#[B'@ ø¡ ª­yµ5¯¶æÕÖ¼ÚšW[ójk^mÍ_ÐÖ\¨”ÏWÊ•²óãBJÙùQ!¥ìü¨Rv8*);?6w); VÊÄæ+e‚r—²sá3JM°”ý0_);_ŠC©{0¹{/ü=¤¢æ•WÑŽØ.ÄŠöÒ—oΊöæï*fD1Ęe{~„mù‘·ãXZ¢5uþ¹üPœ0­¨”ôbû+~Z!|8­°^Zƒb ÓHÏGk ÜIoFßJbd¸LÝØ¿Ê•.<]êJ—û´ÞÉRDtT ]Äø5žÃpמ¢“;‘û-£ÕÞ¸ öÀÀlý’xð W€ñ. Ìø±›ñ£¹Ÿñ$ gHoqÞ](DÌ ¾ñ£×ÆÂO?ú¤Ñ™­¾Ö4Ú}­¥ûZ¯ÕkÍ‘e™F»ÙÑ{Ýâã‡0y¸ ¸{ãè‰[üÈ© žÄ˜q-ñ¾ì#š);JIi ñ *nro !ˆËø&ìé­D?“èKÀ›8YéWs®¼õîÀ÷fÔYÞÆ}Ë‚D½®`×¶f?±*e×÷ïse—/”žLÑ'+OÖ’®™9¯=˜3·öGC1øüYxÀ·T@þàPÁWl¦\ ”‹Â/–¯WLÒU5+Ë›‰Ñ{D0¡¹^®²¾VíÇ[‰w¶%lu08Âæ ç€ØT;j¬wcňhã2ú` Ü¨I™³3ê´2=¾Qo<ן¼#÷ÄeO8ÞØgŸMMÏvÉ[VŒ”ÏÁ‚Üx#ªœwoïéôÿÛ‚~ø;H¼ñ¢€ªÍÜ¢ppã)Ц0e(!}–DìEI†›¾ï›éL‘ãÚã¼n\,“G¹A‚Åþdxq\?>“`'c…7;+OéÄÖ…Ïöè”,Âç `S‹5Àhtk:ýǸ¸T.Xëé‡=½gП¾:Ñ”…ò?±PþEòò´uˆÛ€K|7`e¥1Õ”;òVÕÌ·x~ðD99xÒ6Lö–¯ ïoÓW¥BA]--tÕïÍ8pÀ·‚£ +ò?ˆ·eÝ[9Ï»Vlõ»r+$s×X÷,÷pNÑ­ì·_”[Y =Ð>ñ²X±¸e¢±b K<"+V¯ÖmµZNWk´[ÍèÓ—î6û–Ñm5i±öýJZ£^¡ Ê‘5ÿ„ˆ±xU·1Ö¾ÝàA€k¿BÀ˜±„g©03ÖÖKî ùæ ‡ëä^K”ëÐp ³cGlèÄx~–..7ÂÒ9¹á&fÞKˆWÁYº ȼ,]ܘ–.~P K] ,] P–.n|@)pÈ­¹U · äžc ÍZÂè| À:nl7&œÎ‡NçÃÍKçÃ(Hç#„ÏMçM0ÂD„ÑùpãŠÐùpƒé| IVȈüt>ü°:nT?*œÎ‡[B'€é|ÀÙžÃʇ¯|øÊ‡¯|øÊ‡—ÇûÁÏÉûÁãýàÆñ~ð—@x?¸QA¼`Tï76?ïÈûÃæäý€òó~œ~>Õùd*|Q ÁÉAl³Ã{ëÁŤIx~º’BäõHð‘ŒI@¼ä’h9¢¤vb’7¬ ùFܨ†5ÖG=ÒÓÆm»£µºã®62ì¾F½‡fwÔ5 bݨK9qȯÔ3TDä‘g2e2±›É ÷»Š& gHo‘ÖnìPôžÀ’”M{w&îÉ.¿ú}[ñWñ9Al [`JsÁ³(¼Ôɽ•I¤ÙìbÍ­´‹;]»1&d¬é}«©µÌfC3;M[#v£×¶Æí†Þì"ìà !ˆ[ô›ÅŠšì¸ê‹ñª™ö7gKMÅ£3okIGz4ûTxÎ&ðŽ:­Ïb¶·öd¿xP$±ª_k÷[ôïiº~£Ýíô;½^¯ÝíõºÍƒPû¿~%­Q¯PåÈZx2Püª[¤ªÑ^„óÖWS j¿B@*  ÌRá ¨­—ÜòÍNÔɽ–hԡᬀÚïÁpT@mº¥O‡½.±Ò'\@éwéoLŸ»ô àló—>Aï%„Ä¡¥O0d¾Ò'È‘ wéTúFWþÒ' (¸ô €Ï6 ÛÈ? ÆP¥ ë¨F« Ë+Ó‹‰.í('b˜ÛÒÊ#‰e»9ˆcΉ…?óÀŸÐ wø#1m×ñÈ'–²Ÿ¿vt¹×ö3púÞa³ìïNùÁÃ;g§\دUÈ95±Èì;¢÷‡wÔÁHE ÈZ²7nûz…+™š<ÈQ¤€îÝéÎ?=à’¦J3óÛ§E0‰­h[ÿYZDÅüæ™÷¦ãšt ;‹ÓøìR¥krÒš™Ë–aâ¤''¸rígÙ6ô)/ÇŸî18rÕZw³Þ¸5¾³¦R{D£/)B‚eâέí—LêθGÕr®7§Yúø"UJ,I›FÑ<ÔLÛfÎðÕ §÷šçÈ“ÜÈ 5¶·¿ª“ȪӟØE÷t׬ *­wäa{#X„ ŒFX¾ç-$V@"V$í‰Ë&ëÉÇ!ýoÈýaòs) óçÄ §Î8Ò²­¢™ý_•µ<µe1§ž#1gWLaõú* ËTÖ¨—ÜŒª€½½¶ÜðêñF­ß0›vó´Áf?ßln±oÔKúq¶˜¥å6üaò@\F¿ùÛ|"Kòà= FÉsݨ˥*µnåv³êÌÌÄ«þsÁÞÖ¯¯º¼îÎÄñ4Ÿ]6¢ÅScŸõËž «ˆMN²l‘qMB) À£t ëËòµ‡sÙ2sYŽèløâ…N•.syÖóïÞw3ò+³ñ/Jÿfì®Íˆ¦¨Ïûý³¹Ýìôú:¦&îÞ¹ÄÆ…9%ªô-å¿ôc9ݺ9¶kŽSÙ£›úi©;WÂ@J•p+ ]¢ÊU<®ÝÊЪšfÎþœ©“Y‚«Lõ¬3óäxÔ$Ñ…Û#VTÛpdká½U³’KÙj±3>hwºÍºé=XfÕŸŠë©‹p¼I-4gs· «Kû/.ü,Gã(Îé+úv;z9¯Œ¬2m]/éßW‡$ª¤÷™|.<ç[Â6ŠŸËÒÁ°ì4{-¹+ö­ÜרÜ@ǃ¤ò%œh£À±'„kã\¼-™G”•- ²u9_÷–οÒÚ˜A³ rf+g¶Pœ;f+,c™b»Àô°¾YÓ~Ö>¹nòŘ·ÌÊüúßÃë?ß¿þõmYîÞtä§À?Dg„'ò‰€âð…ܸR<ðåÇož-eYBD-Ö€Ò²,EÒòÜWÊ㺩Ì^—줦Re‹ã:Ù9'û >ÙÁ_:eV’ÇPÖÊA%F¾åÇ,eŸß\«Õl¯¶f/~kö‚Œ²­Béã·–©X–ô€˜ö/®‹‚©6õ¥¶[’±ÍR^'Zï3 ãOÔØw¹¦ý±7çl^òj™—±;Ì^÷Wßfoßjè%g’÷{*ÉË*(¸cšÍ£‡à0ÿ â¬_æ|GÄZ‘æ–bDxíäÎÒrÙGVi"ožÓ Zר5J+3»kynž-:*n;ñÐplðBF80ÿ…Œlî ˜Ð ÐÐ !Ð|2….dÄç¼€Îy!#r!#~!#t!#n@.d„Àò_È@\ÈA…^ÈÁ–Ð À Ðð aK¯³L”Y†ÎËönP™…Ĥ 2 Á\/ıà)E‡÷„8³\7³´³\/³ÐÂ,$ÿ†³bóãLœÂv ÜC×ÛávAtŒ^»ÑÜv{€‡h;*‘ˆT»" ãà!“à!ú¾7õR%Ûâ%õ€µ¨¸TÎ%Eù‘X‹Šë¼<í“‚|‹ŽŠÛN<4$$ïH0 › ÍùÜ墸À2`”Ôdªó€„I!øqs垈­e´<¼L/ü¨÷¦ëØèmÍP‡¬+ÄÅþjƬ‡Y¦`ŒèÅÎÈM›; ]B˜ÇÔÆœ¡0‡†^·˜))hBT‡Î”E5ç1f\/ñÉ|í†zÔ7¤ÞêÔYhIÅzATî‚wîùdzžOƒ”ϵËk!£, <ºg Ù©dÆÅÂzÇPOj§eÜŸ©}¡Kä,®Ë†Úè½ß±ÖË”ä/é  tfzæ„Ø?eô~_Ð×ó—6ù6ó#ÂÏ)uFÒ)ÿ”$…âÊØ©ãúÄß"ÃÿJ•ñ#“€x ¥îôÔG¬€»Áãºpìup£ÖXõHO·íŽÖꎻÚȰûZ¿ßjvGýQà ֺŒÛ$¥Iø¨úŽê‘DÛÍ'&K¨Ð±˜T)[9Æ$J̳I”ÂK.UhºÒþÑLgÉ6@™á~bµ*GÚ:¹Õãòã§ôMR®+YæXÚlá&¼*Ø?Ò‚œ,N,”ÈoÓ2}­„w«$a»I·ÊjÀ3"-‰–Arä\ªÝ‘êåä¬qj•0¯bw˜i\ì'Õc©Ç+]x©;›ÜI N‰³ôë²E^/\÷Ú§îC¹ÂK4_+;±œ%lD‚™ãÅQš_éÖ…uy’çxÜ6aÈŸå—c9J·Oör•ÔM–Œ\¦É<ÚËg.f.¥÷âWö¥4ÁåH’/¥ ‡rÝŸÈѸUNEåTœ“S‘½øÅl­¨ŒÎÚ¸¨ìfUvÏãÏo®¿§îyÝ1W¹Ò%çêí^Þ[íIª=Éw±'Ù f¬v(/y‡rÚ–#y­t­Ú W»áÝpZÁRí„1%&M/~y_)QŽ»IP‘6Šáv}¹Ûî¶šîJ8ÁÎÚ.¥ŒÝ“ ÉÜJlйëÌw5tÑYãÝ,YøcÙã‹ñCJ7ëìn•¨ì@e¾K;°Æú’í7¯l51‹/7q„eä%}õK6üÄàgª7rƒYÒÐå ã£â""k`ÊFÇ”[ý0'Þëë_´ŸÖ;?P‘eùó'6=õ7²I¸F=º“Mâ àÚLÎ>¸Î>Q)‡9û„&%€¢GØ”°<\~úÎB¶Pú𻇇CÏÆ(_ÊP)ÆÛ½¥*TFö‘›4¯ˆ¸õô¹«oì¸2Ò²Øq}õb~k)þjLFŸÓ Œ#·<¹Éb^žÌY’‰TÖñ¥¦ÖMþÙÚÊ‚”pžÆÃáWÌE/mF_§Ä¥Ž7©æt5§«9}:ÛîëÃÈ»àè?ýfo³òi?nÿ'Éö(¾+Æþ9Ïüþ(OËó{à÷劉*CJr‡„½öJè²p‘;#Ra%vF1ödhÙ¯%‚+páb”%ÁÇw~%6 YÈÎèMÛØ½‰ÂÑž[œwç¹HQÃr]g “ÎÍt0“„.-".¡"4)›W|AÅG\Õ[œå»¼æxãÀ¬}5#kZ¿o`ë|vÍ'&ògÔQæ×1l '~'ž{^ê¼Î¬eN«ZížÞÁš[izQKï5´ñ˜Žh«C,­OštÑ!n¿ÝÛÞ¸x#,‘*u3ÿëᄅ¢£ëhC¸rb 4È­·PXMÎß96±¨GCmß;.šyË;ÞÚ.à½~l}ê©=Šp¼I-4gs—HÝ,i#Ú¤;¹"XrÆp³£ÈÔÑå¾F0²d 0Úº.ù R‰"$·òu¸ðœo2…0ηº„ —:JÕ©N³×:óXCLI²]‰–\EüRc ÉÛ©Õ6­Ú¦’«OmbiÎýª†¿rì+Ç~혅ÛJ]úó¬ÆékLlíÁœ¹çµŠ&×ßœ­ùɬAîVÙˆû†–¢žªÉÓkQ¤KÛ’Q|¨ Onü¦ÙR’¥ÏÔâ–Þ€¥T Ë*üÏã²3;úRöôõªÁÓ;œS[® ž ^9Ü%¨ßlµšÆóWÛk·È#ï7"°ͩ¸ÌÍ)j+›Sq¹Íi!Í{”†,²9-.¸9-6k/_ưä7S¥ÏZНl©¹;Q¢`AÎÍŽ££â¶yQówL¨o’㸟œàO ôüv .O.æù«¤†Õ/Ð%B/”ñìúH-AQqYGJyèÒˆ¦Ž,ðéŠüõ^΂JÓ‘ä*ñqŸ2ëê:¿q«¡Ë[¯Ô5žñÂùOÂmÁ_Ê$¸K’´šŸ··øÌ‘騞OsS¥ŸÏ<(s“4“ekðº+ÿkñŽ´ÇjáXøÖ\; HzJ+î6 §]ÅQf¶ÊGœËýRê6š\GëÕ5N\<wþ«[™#·Ó¢_âK^ãŽÜ”Úì K}bTÓ1ÿD"冓VKk·Ú£v§e÷Ç7ª25C%\X…/\÷AÉÈfˆ]Ã^¤D”ê{òõ©å+†bô-x6ÓÔÏÁ=£©Èѱâ™yDâ8“±Ø•?Ù$HÍM¶W¬ðñ´™9ž3[Ì”Ô;®=Hœ3¿&â2+æ³æ©ù'æ$ ņխìªXëòs*U<\tÈM’T£¸VAüÛß„=ýùõ§(_ò«µ2P³£¤åP ÈŸ Ò>UFJ4uB%æSTRBÅ[å?W oiüWSŸEfx§ÌÍh:Pêþ<ª3[O'j=ð]®Õ/ÔÙ£a}f:^íaæ ½%Ø·­Y@l' VD—Qåf%ÊÌgg á+%mAm´p\ú@í®*‘¯ä®æˆó)éÇ‚â;Ô–[îÂ&6ï;Ì Ó!aﮌý@Y1Q(WU~p"2»z¼`{ì‹r±Ó;»¸T.âdúÔ—§çŸrL².Ò—øwqV{îó›kö@d sÏ-o—¯Ž©°Ÿ’Þ‰{f§†>ûSŽÂ>¼!¬¶,§‚Ž»·˜+ ænBÊÆx[Ò˜3·öGC¡>£x9Ï)ÔšýŸ•úÿUyûéó뿽ûåÓß•wÞ¼~§¼ùðþýÛ7Ÿùð^ùéÃGå·Oo?2&H]ïté¿t™ÿzûF©¯NÍR.ˆ5õ•ÿ÷$EùË_”Ð%d®èr$þ ,fL»]&kvGÇCÑæŠú?JÜ–5Õ¨e‰fsåÔÕóÎ>Ôèß5£Ûi7ŒN·¥×Úm£ÝÑZ£Ó1´v·×èè£ÓÔÛMⳟðo_ÉjÒ+”9²ÚÿÏúDb<wt“ÅhÑ­8˜’ýr˜ÿì©Mõ8±eî.&ý<}—:mcmþ€¢·×¿}æËø Z£ÑîO‡Îè«Ù¶Ø¯‡ý;'xè˜Êç¨úP;bºîïCÄ7ݘ¡Ö”ö§²øßßp/·+ä®ú" "­øÓùC4õ½æ¹¿U0S´±¢ÈcýW%ÎèdšJã¯1^¤1óï¨÷·z™[æA‹Ü¨ÖÔô&„1gM7$ÅösªãÝÓÆe·„ƒ'fm˜VwaÅÂçÎqòq°›Â@ïù«[ ÀsâÙéNq)çbbZæ±0âö­å:Ä‹ä£+ƒµJPCu¼úÞALiŽ9ttëéxÚÜ,û‚a¯îŠH6I˜Ð¼<Ëðè&àT¡óÙ,«°6¢¯P[1%²º¨ø7Þ$fJÍC½cåd-ÑCÿ<ÄÒb] '–:Îæ£EÁ VÄçÏܸ(©á=ÁU²wŠùE¼õrÛ$]óU3î€lèz¿Ñh>o<_ÅàŽ/sµç›§$1IÔ7W‰.f‰Šx‹Û¼wÀˆ½:|i”ƒ³4À…ã-Ò§oUÆÍѽ¥ëò8Òp`ž¹x‡6챟Ü7ˆÙÞIrøL†©!FlîÔ±mâ cäž`\Dg6§ûßc]«‡òða|&"zºÂD\ŒHšýŒŠûéÃÌ(#wòŒÐµn˜& ââϼNJ ëŠxTÔ¹†_ýYÃØ|pÂh(CËdtƒN‰ilµÍȆÜQØÒ ‹Â±!Q 4gFÀ)äˆúkLj¸wÈÎ6W¦rµK‹bQAbQàNÑÄpDaz‚' #¦r‚À(L‘¨GW©â*•±©q•8£=†ŽM†Ù3¢§2`”ÜÑJ­ ÉzznîúWÄÖ²Àþ^ôÞt½­j|r¼aújÆ÷„cÆÁäpÜßÜaœþCqÛ˜ ‘3#ôV2ÑÐ ã3%MÛíMýx+™„œŠlWM 9Êò$c-a㌿a–¶Q^mMw>Ý»Cv&±µQ¼8oÞ-š=?ü_£Ûiè~¿ÑÂŒCÈÛÌ¡‘¢ýyRnƶÛÊ·”s÷W1ÃÖ!vTNª[ºo‹5¸3Ó3'Äþ);7ÂÛEcGå…UŸ¸ËÃÏi=lÒÿ”$…âJÙÈÒ¢¸9²ã¹+QµƒüYŒB«ÍãRJ“$½)}K‰‘â2+±á²5â™!?»Áö¿Rgâ#“€x)ÇöúÝàñF]8ö:¸Q k¬z¤§ÛvGkuÇ]mdØ}­ßo5»£þ¨aëF]J5ü¸ã¥Œe@J¤=‡_ƒé™îçÀkE€û(O­³0ÿÙ+3{ªÍqPþf@õûó›ëT}¥µ¡”M‡lµ\J—”t”!ééÀ£ikç’lš»&wËŸ‡È”“¾lj"’í[šH%a—'}ržl@ÙHª¨ÿýúú-åèñÑÍ©ƒ¿âþQÃ|nžTëÿ~’§òÕ6½Ú¦cí¢!ñåÙØ¯µè\ö çbг@·ej‰é­LºäÔ‚[Ü$léùl‡òؤlÆDÎ$ΧªªÅw-Ö’ËïØj¾:"e"–’T~šä‰߉'pôR§r–ðžÓªV»ÙÁ:´]uq³kv—v¬Õli­±ÞÔLÝnh}‹ŒÇýVŸ­Nñ.FX¬pÓPU+¹_ê—ë¸ôZÃèÖZ5£‰5?Ÿ™­åºkmÝ…h0Ä6äË_ë77Œº+™Ýü¥bÉ™ÿdÎ×9¼|pcƸ¿\ß·N¬G“7}xêÊOÔavɧȴ“+mÔ“hHBiÚp ¼éÙÈ´l8<(¿<©‰À›v ›ððT>R* ·Ð%Zo?·ªï}`œŒêüÍjÁ.Œ‹ðæ|÷^ÓõMûo¦kÒ}Np0¨Á÷–…Ä¿-ðMØÓ×ï^ÿ·òñí›××»!„ÞrÅ1ÈÞvÓÖž ¯Œ¦¢ÄO¥T‚W:{báĤŸŒ\’|067ûeJ§Þ9ó9ýÌPRº)°ÙCÎÄóƒô™ý {Z[ýÙþ½>õ^»¾Î'e×ÓÚî?Ê넌RùÌ8„?Eöú mÿ™MMÙ½3:oN ïôPÖnÌMõ쎌8ûbùê]æ©i2tÂÉëúþ|xoÆñD&zßrÀƒËOÃɃÆKÃÉ£áä„ÑpBA¹i8aÀN 2 '“›† ¡áFWnN((”†‚ÏKÀÜÁ¥JÖÑfÇZ¤öé$Lt¾³I êÞè¯ïº¹§¯±v1â;«˜ ["ŽÅáP¥HOdwÊýHLÛu<òt%yGב{ó #1è{‡Í²¿;!Ý£?¼sfq¡¢ý |Qäô¸4²¦ï8´…ŠÈ:~XMw½KTÄ%¶"EŒümò G‘Ú¯Ž7IÏÀ¥©ÒÌüöi$7ðµõÿ¥ETÌoÞê.ÕL¾Âà7hú¸6 '­™¹ z&N–Gª,€_¶ M¬Å$½]ËNp5êù•ðÚl2M[í®5’ÚØ#}|Ã/Ë2HqçÖöK&uç‚ÇjÇZÞK¦Yúø"UJ,IÓÌÙŸóá"p%ÎŒ§÷¢²õz6/âSk:bu-k»‘ZxoÕÒƒÿZ´;Ýfvi•]O§•çGÎø¡ŒækÚˆ¶ã®QìLd8'ãÛeìèå¼V0²Êd´u½¤7²üjða§¨’Þgòu¸ðœoe‹¯»’ïØ©ƒa):ØiöZªT9·r_Cufé=ã.Xî¥_Ó/“ìKêfÇžA|u¨ì)°Šœ%BeK»÷ÝÅŒüÊæv9Kñ£t éá }£k3bç ñü+a&¬ _HWPi¶¨Ò/åN>ièËËÊs=hÆSï2–)YõtW™ý·ÆÄǧ½çmá‰wÿ’Ì[fe~ýïáõ‡‡ï_ÿú¶,woº òSàϤ†`¶—é}$ãR¥‚ËeÈß<[ʲøZ-Ö€Ò²,EÒòÜWÊ㺩Ì^—줦R+/µòR•#z{G¶¼×Túø­”%= ¦ýÁ‹ó«¢`A*ÍÔœdÔê ‘sÉšC\3ŒKK„W[ÔRÛ}++#9\xmÅÁÚ÷™‘(#$Y¢Ïú8ŠÌæÑÃÎA¦!Tª„ø|Ã,‰üÕœ—²ëSÎÜåZÎe¹n|æFTšÈ¹lJž„жÝOd½_͘5ÕH¿¯d]¯´n÷üLÊ%Snâ—«vkžŠöì­ÏUûäx§žO E»ÞW&¾ ¨Ñoô fìq`™3v¶0÷mØlÞÛ°!˜ÀÛ°!ÐÀÛ°AÐ\·aCEnÃÅç» ‚Îw6p6|6r6nnÃÁr߆ Aå¿ „ ¼ „-¡`·aC Á·aÀE‹!a Å0tÞÒ*z1¤˜tÁbH°%âXðdÏÃ{B¼.‹»RZ¤.‡·Œ -† XÈ#. ©.F¨KTÄ%¶"qCŠ)¨R|xAÅ…Ä‹!Å¿ùÅG×LÞbHA'Ë£C ã—mCQ Y°±G4úø†_–eâÎ CkySŒ;¥¼°P1$Ž´£CbuG1$–(îbH‡Š!±^ë`1$Ž ŽbH¬7â(†ÄUÒûpCâCâé`XŠ,†,,çVîk Co ¤²¸4Hš9ŠºSûB—ÈY\ð µ7hè¿c­—)w@ÒèÌôÌ ±ÊX¾  ê#ª3(mòmžª‡ŸÓú¤Sþ)I Å wR‚>@Ÿø[dø_©2~$cÏ"¡4Q±¸Úáàk1wƒÇuáØ7êàFe@ÝžN´FǶµ–ÝiýVÛÔúQ¯72ÛºnwnÔeÜ&)MÂG•ÐwTxªYŠèØî2eYÊ@…æê•¥JÙZº,Qb¾ˆY¢Î ×bÓ•ö¤Öµ°R Pf¸ŸJkË‘¶^a+ÉœI2@¹7I ne™ci³…·ê¶hÿHwr²ø*q K*mZ¦¯•ÿ–$lwåoY xVÍ+Ñ2Èqxʱ;R œœµÂÞæUì3‹ýá¤æ(õx¥ /ubg“;)Ü(Qb–´[¶Èë…ë^ûÔÁ}(Wx‰ækåa'–³äH0s¼8Jó+ݺ°.OûŽÛ†# ù³ôìr,GéÖãÉ^®²8c£Éò¶Ë4™G{ùÌÅÌå°Aüʾ”&¸Iò¥”áP®ûi^YåP`JLJ_¼‘])Q®Æñ{2³%ûQù圷Â]øÑú;}ó|‘é²Ô¼¸Õ¬Ú_Uû«jUí¯öWù2¹j£õ’7ZGV´|%o¥hß§›+dþŽÔ¼ŠWUñª³ˆWÉ=‰”žua{aiÞbâ¦Gf•)2´¦Ä^¸$x_Ž “›¬EàDo|/"ߢ²„æëê!³¼Îmy~L‹\Çܼ…ò®9š° ¯Õ4ô~wguÛjwtpc´HB Ž õèZÙ1ÇâÒ†R_ä bJä«vE“Xz§f‹QÂið«o“ò…¯Uùî÷„ÐDË—R¾X¯ˆ®ì@e*;ð]Ú5¾–—l¸€ª‰YD¸Wî”,y‰F`Eö’ ?)Û™êT òÐå ã£â""k`Z“Í”[ý0'Þëë_´Ÿ“Úo?P‘eùó§šrõ7ʸFÀ±³6Mâ àÚL‘Êu¸‘ÊuQ)‡+×…&% PMØ”°ÜL~‹B¶Pú𻇇y3ÏÆ(_ÊP)Æ^µ¥*TVBû‘»t¼ˆ¸Ó¹«o츲ÒÝØq}õb~k)þj\ FŸÓ Œ)¦<¹Éb^žÌY’ Wް€˜ajݤËz² òeqU²sÑK›Ñ×)}‡ãMª9]ÍéjNŸÛ¶[š ãbTûç<£™D—‰eô=”ã¥HI¸íµWú~ƒ$lëÃN="Æ D-ûÕù…JÂÅ(««ˆ™¯+UV<¦ÝČǠ Ýâ¼;Ï%Ü71ä†s3Ìçw¼cIÙ¤âûr¡°|—×â[ëjɵu÷ lÏV?1‘?£Ž2¿Ža \8ñ;ñð¾ÔyYËœVµÚ½†Ž5·Ò.î¶zº5n¶µVÓji­61´¾Õ£=Núýq¿ÕhŽ[Ýâ]Œ°Dª<”Dü¯¿›±£ëhC¸rb 4È­ìŠV“ó\Šh‹z4Ôö½ã¢O±¼sà­_ÞË3Ö°=–ðÄÌÑK18é/aŠ"»ñ¤W¬“j[ÿÝQF3 9Ås?5Ý©«×:ü¤4Œ—2hzd»meI?xm\Œ+Çl–e:㱓õr÷œ6›ŒGÓV»k¤4ò67i0P½­§9“§{ü‚>†’BSΣ/òŽ4Íœý9.W•xˆÁd êõL¯/"z|µ=|-¼·j–»éC5×·LwÐît›u6 èìz:-gˆ”ãA©ÏY!WÇ«_ K„’BŠˆxF É(*.ëH©"?6udOÄ_Î% ÆÏF‚°ñèuOÜ‘›ªXdp}©ÕÐ%F!R}ä,áËr¶Î™_úkñv¿LR÷ó1žÏ|Yc ̺0$f`M5Æ})i~TûŒr´RY•]sÛ´g=z.š‹»…BCÃiWq„™­òØp¿”º®ÆÀÑzu›ÏÇÿêV‡íôd—ø’×86¥6;èRŸXÔtÌ?‘H¹á,†Õì~»3juZ½U™š¡.,‹"Ž®û dbÄ®á&R>õ=ùúÔðQúî:›hêç`ŽžÕ–äXQð¬<¢qœ¹ÆJÏËŸk¤ææÚ+r¶xÚÌÏ™-fJj׉$Ι_q™‘?óYóÔüó¾êVJ¬eù9ÿ .:ä&³‰QÜ +„ þmožÖpÿlÞÑõ~¿×ì7¸šºóé=b•×tu S[ùl†wʧÈþ@¿qĦ~~ýéÊ—¼ ¤ ”Ę+i>Tòç‚„TS•уMP‰©%””[âVùÏÕ˜ôÿõÅø¯¦>‹X_ÌÍh:Pêþ<ª›IÕß%áZYC=Ög¦ãÕfîÀÐ[;õNþ`"ê}‘~So= 'A@…?ª.uR]u :ÞØW/U–¿¡n k»êú“˜$(þ u¹†±;œšžŸ8©³pBûeîš#ß¿Sâ~Änéo“xÇ>F<ÇË6ŠurÇ~³›êäRù‡ãÙWëÌ$jÒÎdùS³Ö }oÈ8ŒHŸúÃÑ_wzN«Ùéµû{¿Õ¤¿{Ë~d9wIÄA-¦Ñêrû0µ:½^¯ßäÔ¨]O—bpššÚ‡´ûvÎú퀲҅Ŀ£»é¸ºpê‡Ñ­rõWå'"³«Ç ¦~åbãp N^®ýѸX¾bÏïvë O‚É ]ߟï͘v‹µ`¿Ç…kÑ™8!Œcelº!)ˆæx÷´“2>¹BXiÊ;˜¦Yîų|Üáy8X½çÌòωgSƒNq)§ abZæ0ü£ö­å:ÌKFWk•ð€ êx ÂÖ ºÇä7˜CG×XºcE›»"éï`샔PÄ'ˆµÚð¹» þËN0PnÔú" ê®3ê´²¬ÂõÆ£.Î;æ±'˜kÄ>K·l¼ÂÂvè7^\'{[ÐbƒxãE,H3÷ƒ(ÜxŠ¢)le(I¾ûDQ’µ…¾ïsLÙª³Qc{±LM¬‹ýYá/\$Á¹HŽÞxÌ8‰á^—>|úД,Âç-` !kÑèÖtúqq©\°æÓ{zÏ ?}u¢) ý|b´#ÉÛÓæ©h£µDT¥Œ îÍ*³œ‹ÞEHWwd-°QfÃK‚-Y —øMØC²ˆ7Ú(HK4séÒîš®‹n‡c`ºçLq×±Ÿ›b¶w’ÌÈ0UEÄæNÛ&Þ0¹ôšš/\Dg6§öÖ÷XWL1åá/˜x=5˜ˆ‹ISŒPqc¦©aƈÜÉ3LHºëÇÄÅŸy«‹ ëéšÿ Ù!ž›aøÕ5ŒÍ'¤ Z&£äpJL›`«mV®6¬œøÊ‰¯œøÊ‰¯œøppO‚aèØÔ“OÃaˆf9 ,dÀ(a¥k ÷šƒps¤¦ˆ­e^*¾ãsoºŽÞÖ u˜ä€£b5cökÌMƒ®’ýð›; ]BXqG”]|Aa9 ½0n1S²,zÔÁ_P9¶×eäNÔ£¾(süxU]ºr… Z.ºÆát·×­ÜÐSwC\ )n'¾»¹• |3'ÓhüŽåV$÷@Ù?eñ»/hn$².q{ûDŹº€”—C‹ 'è)›Ÿþ¡Û# Û ñã]š„བ»`mÞb%G”ôÁNL2» uáØñE¨-½¥ëc¬µ ÓÖZºÕÔú £¥µ:ãæÈ0£–a§7¤.¥4I‚ž¡""DþnÀsâ½¾þEKóªýÛêÁn±óÂ>¶VžXhä÷Äâ1)ñ[û@R ·ö”{"vkŸ`à­}r‚ŸØÓ+åx…—:¹·ÞÚ×ì·°æVÚÅVoÔïXMCQl­Õ0›šÙìõ´fkÔïw[z¯­wÔcÇ„¿-ðM´œl´,ÿ^»§·Œ~»‹“å,§Ÿ5”ZUk‘=äL<ê%?H×Á½:Ð1Œn£Á+eûÓëšþ#-&ÊÇ8{LíÆx³x0[³´¾B[Õ!}a¾­Ò¨ÝZ{7/Æ!Ø,Ê©Œ©ú(WJLDš•’¥ÿ­YãIa ªkJR.¡$$ñ’N]KQš Ån]*1ýß+åËÏoÞ(†Qk×ôøwz×è+?|¤:õw3J¡5^Ý*?é[þØn6úz×êv•B·ý¾‹ŸÌ™®b“#] GpeæÿµšõC]!¬Ô™±w} Ý- ’^gNûk „áT‡UØÍÈ0qi)ÔÂw‰"sBGì‡ ª¶—âûù•ïȰŽèÊyò- LƲwÈÿ-ÿ-ˆw_ÁÚPäMÆ~pG_¢- `(lma[‚5™ávÿGÒ™F[·’§ýL7­l¿Ì’¤9bGS¢XÉÅ6»_ ª5ÉÓš˜³ëûw‹y þòV†¨Ga°5Ƨ±iEû³ñxàV$E«Òô/…ñbÌ=) Ú¶€Lœ0J’¹Ø`Ô}baŶ)Änà‚ÎF/Jç%€ûóoIØ~HÕžÑ)#AŽØÂÆê¹œ1ëW‚=fÔt|¥ë+}sdàyrZ@w!5™*R?€nˆ±Ëž9ñ• RÍ‚CÝ,â11ØÚ‘^«†¯–ïvz%¶Fû6ö0ÚNhùtÓô€Œ;vý¯Œº5:ŒI%¯„U£¶:*—feH`®Ž-9¾òY ²ÏºE rº‘‚ýçÂL)ÈÔS‹äôtv3£ð¬˜\ 8«)—£"iT·–íl“MÏþ3Ã3Ý,c@ÎäI›-Ç^Y3Rcž”–Vvà®Á©ñ–Òòù,^ÒûÉè`Jp|¥´\RwS é.\J“×.“Ôít“D‚y@uFª˜,”%ÍÇ—ÑÔµ}¨I½´ñì¹BXïXžCÿo‘aý{ê#hÛË;gDg•$½™™ÖÔñäL¯;Kø”ç<Î|Ï¡z” ÌKn{0õG!òf_î$’é{„&uˈd+°r,f¦=Ó•³¨Oè*P².GBºk¥>rb'd~:éY#±ãó•Úĉ¦‹¦¹9«¥Çµoî*éú“02é4k=eA;)!«äžªìú2);!Pçªf“{Üx¦<_Šªˆu—,‹©(9cúuJ=¶øìLН›RwpaE ÆT!iNˆ)ËxN‰;“dÉV'ÊhJ³ãÄZ- [aYìýžßè" ¶”}ª¾õ¤úšzn‘ú3¢Øq•YîØúÀ¹;DúõŽ“êýn¯Ùzþ€síý9 [ÑEO–÷%K«Í=pþ{$Ê'v(ÿ4‡•9ò¸¹!uÍòFcOÈŠýËœý9ÆÔlé,D^î þÛ ¾JžÉ»zµ›Zxo­Ö“X—íN·™%²Ûõ4÷Æó#gü€ÕXâ1}°J´ÿbR(ÛÙ G‹ñ˜CË_xÑ@1Úº®ËÀ ßiöZXð _¦˜øaÃ!uÞ– ØÑñpýyÖÓh  Ïù6Lû"µ¾ÑlŽ…žähŧ‚¸þöþMØÓ[aVã´v¯žWÄÄ!KÑExÇ[½k³sBë*,c‹KïX¿3žßˆ¾05HñĆѫü¹`ž«_×$Û2åd°ó·0*¦µËbó䈞ÃH|ŽYPòh/{$ýœ©CWÍŒyðáõ"šÒ­™ÿík8ÒÀÕzÌ\fºîïCì÷ݘ¹Ö”v­²øßß_¡Œ‘o×¶ÓG½Œw fŠ6V´{àÿªÔmr_gWü(¿þÅ@x).ïÜŽýEÌÌ›¸cA4Ç»OÙ×pœñÄ|Í`‚ãݯzç2z0è=çå `ËŒ/ËÂm­å:Ä‹ä£w/ KoÈoòX»ãöË-4ìNe4ÏÊ D;•…ñ¹îT¡¯ØÓã-"Ö´Ü« ÂݼWMçV7œ±}«–n\µÄå}ZˆÚ=—À¤ð^“ Bå¾&† »&†-¡@×$ƒ ¹/¤¡¯Žaó] †\ Çæ¸:Êwu$/îò¨¡Ðìö…% Ï÷ü§ /ñàµU´4Ç<^7†éºA[ûëÿg|ýü¯÷ÿ½µ"köþ_Öçé™}»þø³¥¢^ž´KÀu=¸Ÿù® Œ/€ÀÊe o®”1oŠ]XPÒ+EÑy¯,‚Ïu»\aãÁ†z\J'é-xïDè)Ƚ‚EÅ•2ú›÷ ê~;Ý™-ÛÖZ‘®HÛÔ¬^¿Ýíõt«Óé¼_°h³ÎMÿ¢ÄœIè m½ÑPXìVCa1;o6dË:ª8¼a¾Et7JŒL”_ox’®”Ø5‡ÂÂW ËÙqÝaÌzyèºCa¡é}|<ëÔ÷0ë·^Øhë˜ó/ër£Ez]ÓÖFGkõ C3m½©µÍ~£m£×h§µEM×eºÌ™.ˆzäX˹¥Î¿a›fÂvÌD±ž`צ7ß9Þí¹“ù/d—æ1õcä´ô‰9}hj†ÊˆOIή{AXÊ {‚¥WÒ¿˜‘ráùCן$GÏÊWúpN,‡îhí8q/š:!»¢]áyÞÁÌeìWEדÛ?%¾Èk+®ôÙ¥±%«hÛ(+K„x‹øk¾ Æ]Ýn«Ùë<#á|Çíp2óÅ%1ß±Skth»®Ö`×ýf_o·ûô×~³}0ßqÿׯ¤5êÊ ÈŸ'{Å—˜åx á[u‹$8zÝÖÃlÑ& Žûb=å ëUñr´^rgÈ7_8i'÷Z¢‡†[4£qÏ›À3÷‚S£ñ§ Ä‚¦ rBSa €Dðœxön¦8ƒ”Gv1 “PXÂ$] “0PÇ[¡ï IøòÐÙdìxÚÜ<=†a?åä·v˜xA¨»ãòˆMç Æ£ [¢ -ÑTÙ¥~òúès$¦žsÉqç÷ØâaÂlï$9¿#CÞ”@~hp’5š3ÉšQ,ÉZ Ÿ7Éš=µr˜ˆ k~ÜÍkDð &d˜ÆîqñçFÞêbÂ2²ùQ!ÙTpF6[B'@3²ù¡³“¸aå`UÖ q°’Û,†¡cS/+ÝF"N™0°#–\4`óœ„MW÷Àà¶RÃÁšÕpÈAå¯ áÇæ® AÂ*C€Ø|•!@PîÊ.Üb¦¤  Q“‹œÙ\\p¤üÆK»QÂVÒä”kªeI=ÄÁ,5nÕ@ô ¶§ÚŒ»2°ÆíþÀ»ÃYËŠ¯c·ƒ"Í­ÁwgTÓóüÈ̈£ž'1¯g—YhÙ}=ÚœêlšJ?—¥XÑRœÊÀîôÿoï_›ÛF²-aøûûæ Ý®:#/àåŒ;Æí²Ýí²¶«žî²< ˜¤PJV+8¿ýÍÄ…)^r'v‚¤ Ïœ.["×ÎËÎ;/k%„Õê(ä´XârA˜É#jå0‹Œœ^Ýß¶2,I[ålÈŠ!Û¸0Шò¸R|T9Ý®üKNïÇ46ÃÇ<ž½!œ‡À#Öt¨ˆÈ‘'¾¬{/‹[]7dK}+I´ôÅ#÷ÐåvxéÖh2¯âÅ6²…Ý ™Æïˆ;ÕŒxÎ3bóHYl®ªIšÓå(OiB<—¨¤3ŒÑ¿bìkàѰÍ|{2'e¿O‘ß £x:Ó 07nÎK? ä$ bñÏxønç šM¬±•6±Ý­a§­uɸA›¸×цc³©µV¯Õub7Š71Âô¤†Õ0Îp± áo |öi~2Û'ßåx¢ZË‚d6Ó¾£ÿ–c\߿ϔy̘9h;={ Ù[(ì÷µkêÖþh(VólΕ0$éi€Ó Ê]ž:U®Sëöt£ÑëjV§¥ÑÙÛh˜f«ÙÐõnë Snï·_-Ò®†î¶z¦ÞÛìÚ]àfÕm‡-mì5:¬º½ÅÄbÕ ï¾ýÙ¸ëݰêöº$ˆTÇ]Sÿ}onÌ-ûtÁвkì!J;”›ÒÎ…[,( !@Jûa<>J;߇R×`r×^øk.³–•ÇlG,"³½ô雓ÙÞü]ÅÜQ ±7L޲¾`#åE_y±Óø‹Å‡“‡}U͸Ùc+ÍABh®Ë[_ËòãM‡Ä»CÛ²mu`s ˆÍ±#ĦÞQc­;F|Dsér‚¾r¥&Dggh¶2?¾R¯<ן¼'wÄeŸp¼±Ï~vcy#—¼ad¼°¯| æäÊRç¼}sG‡ÿßçô‡oÙAâ•Ômf~…ý+OQ4…9k_ égIÄ~¢(IwÓú¾`#9rL>ÎûÆ‹EòÑÄn`±?avÃ+§Rbë‰k…‘c‡Ä ì›M»lDýúé=3}E³°_¯¯}^#¡Æ~Ñï5týÅ…òb’/ï?¿HªNÿ¹áë˜RùšQ\ƒ¼ÔÖÙsͦ¿ÉôŸäaÇ'oéoÒOZÛm+÷9¾ð! [ ¾ü¤hç'vZ» ÷ö£ìÆ+ûñoÎ*ªÿþ:ýÂ߭б_ÍãþcB; qetŸ-'.̑˹+0z³tkÛY\ìüÚ“è½ .#!Ã[G)†PÐg—³-MDfƒ;+ÞKaó}Á^…èp Å%BI–ë‰]IŒú]eÚ?‘™ë?¬'Û{与˜i÷ÚÏ*Ó.A6i¿y9‚a›H‚a,Q0¬Wëôn[k˜–Ö4»m³Õè´ŒF¯uX.lß—_J*Ð(ÝqdŸ?©0~§-"6˜ŒFw¦1 Ûç¡°£SX(loÙ¥6„ü…#vb••ÛßÑ‚aç²=vâ ;¯PgÚÎ/T;¼âÔ¦‚ÞIزƒ •Áù„ʘüBePPÝø…Ê€ `¡2>7ºÚË®ö²«½ìj/»ÚË~&{ÙhDä Ì/òÀæy`BEžÐP‘'4ŸÈQHäIŸSä ~í "òÀ…‹<ÀA"Oð«wȈ¼"OX~‘'*@ä ‚ y‚`Kh È“ÀàAµ¬©–5Õ²¦ZÖœþ²æé£l;¿Ýif{wHÂ"h«IY‹ ˆ± Ò§öBr:x.9 DN€ Ó ät¨9Tn96¯œ$§Åæ’Ó‚òÊéðá %Cßm7žj½í&»¢ Ý <.Ý Î¦¦õ\˜\ Ä®Òø*?4ž+®" "hÚÎÊ®sN>=&£÷;V¢‘â%Ö¸ë ‘µ¨àް•ƒ‚;päT…ÕTN €ûÛ¦¦Çº5 à—:*Ô@þ½G‚OdLâ%ÍË1%½³“ܼbbWjÿJmw›íNW'Zô֨3Ôz­¶¥õÌa·;´Úº>2¯Ô…ÀÛ‡ˆÜETPÀ¶à*(`;UPØ\yb›E×Hs÷ñvéíÎ!YyïÎ$=Ù%þY¿k`;þRL#'>*c/v¯£aL%rx&…ç:¸·ªµL,ÿÉšØwGF×êi†9îi­Ž1Ö†íöPk´¬QÃjè½f—¨ÇÞþ¶è7‹±ÿF1=’i€hûг…Eñ"le@¥ø+Ñ 6€wТ¶~³œØT­}æåP…-*ØÒkf¯ÝÒVBs1tú£Ù4ônÃl5‘|ý¥´BýˆÒ)GöÂ" òºn w÷çÑŸÿ¹‡8Œ2(5u¹Õß…`à:ôÃFÙ?œ0òƒ‡÷Î4¾Wf`W!ã¦È95²Ù÷{íÓZ‡œ´gæ.<ËqÒ/'¸rãgÙ1tuU’mޝÞ@8rÕ¦“ñð¦ÕîØC©…=bЗtBBd’έ­—,šÎ¸GÕr®Ÿ³dëã«T+±%M³¦ÎóÀ•82Võ¢¶úõ僡]0´Ç<šZÖž¬Fjá]³“'Cjñþ[¿mvšÙžý¨ž+ÏœñCÅ×´øju9¦¯g0#ãà7™h»¾k´ôV«Ãù†U®x¦¾¿LÛqá í2 m]/©F­à˜*©>“ûÁÜs¾•aì ¡, KñA³Ùm©Rí\Ë­†êL­dMôçœ]ºòëñ½«äÚMʆ3š~|CCöXî³%Fe[»óÝù”üÂÆöÁ‰Å]¥[Hbh.­ˆBÄ㯄‘°N€‹éIÚÈ Té†rŸ4ô…¼>‘ëf\y.ROUʘ¦Hdg´½úSBÅYGøä¤gÞ²(óË¿—|xõË›²ÜåÎÉÛÀ?HF3¹b?q¼$‰jU€=†o?®y6•e»qµØJ+È¢K‹sŸ)›¦²x]r’šZ­²Ô*KUŽ˜íµÛòYSéý·vbP–õ€X£^|+ æ¤òAÏÉ 2”í9©2ƒ–¯–¨¥–[R¢]ŒJ_ÅþÉZÜt2EË%/êÉt=üä‡Éëˆ T ðùn³ØKÅ•2V}B¤h©¥Z”›ÆgiD剜Ӧäî c±¥r|?\ ;媹.ì$Ù×+¯Û=>IÈöŸ´Ô!ž­Û­e*ÚF­ÏÕûäd訸åÄCÃABŠЗFÀÀ€—Fø±ù_áÇ¿4 ~iÍùÒ?¢ØK#bø¼/ð£ó¾4@½4Â+ðÒ?8ì¥~\ü±zi xi„òÒüÒ[B#@_á‡xi4õŠ!a dH:/uœÝ ’!Ŭ ’!a©b_ðÜž‡·„8n‹› )-B†„Ûá%C - dHñ}f8‘§Àv:/æ–cØŽÄI†s$R¼{AdÈBf€dH1‡Á/¾òèžÉK† q²2 ²ÐŽT™1ôdÈ‚…=bЗ³_(#2HIçÈÕ%qÇÚÑÈÛŠ¹¯ø»€F«Õêñ°ù¨“Ûñ$R'q "CbUë Ç«FdHS%Õ‡‹ ‰c H†ÄóÁ°eªµ†SÝø1¹mž\›/ˆGC]†á´™¯ÊçãCå½uÏ?ž,Ïó#+£Úã¥vy/Ì= îøõŒâÅZÇPOj§ eÜ_h|¡Sä4&l vû ýw¬ù2ÕHZÃ@Zž5!£·™jÞÎnæ/mð=='¿¤ü¤Q~“d…âÊX©ã>úÀßbÿ§Îø‰ŒI@<›„RWzjMòvªzÛ¼RçÎèJí_©ín³ÝéêDk˜£‘Öu†Z¯Õ¶´ž9ìv‡V[×G敺ˆË$¥Hø¨ÚŽú›¥ˆí¦)Ërj4ÇW–je+uY¢Å<‰Y¢N†k±áJÛÂu-¾U+ã<‡îµ¶kë ÛdzÜ?¥5I ·²Â±´ÑÂ˺-Ú>Ò‚œ->&.ÎÎoÃ2­VBþ-ÉØnæoYØ`ó>.NkçüòúN,šÚ5õ Æ»õËü£+]*b#µ³F–km•<3ÿŒ³ç„s”æÇÒs‡4c4$Ä-f—vË6y9wÝKŸ¦Ãåç vˆ³8[rÇF$˜:^¼§ó ]è°&O.ö· GèòëÙåDŽÒ£Ç*^.oqÆA“ÝÛ.3d­òYBš»ÃzóËøRšár,É·RB_mäé½²*¡À´˜PŸ}]:QŽãø=…Ù’ó¨ütÎÍðÄ6~´öNkž'™.J-À³›ÍªõUµ¾ªÖWÕú a}•§ÉU ­ç¼Ð:²£å™¼•£}™nŽÈü¹yµ_UíWÅ~•T ’=öüÈ KË“4=²‚¨L“¡}CFs—Ê áñU({8ÑÃkߋȷ¨,£y^ý1l–×À¹%Ï»À²Ée¬ÍË}¡+-NÀËš•6v5׈ÄÏyƒ¼üý Nþêy/”éô’¨üâHùÆ×x»Õ ™ð ÙǹŠU¨âÀw|Rž^}Îq€_Ó§˜Œ—»íp„iä9¥Øs™µóô¹ÛVÒÐå ã£â""{`ÊÉfέ~œïÕåÏÚ»„ûí*²-¶â”«¿òH.€M0ŽÜx4Sˆ€3…˜ë`#BÌuA+Ìu‘A !ª‰†vÛ b±ÅÌêÑ.˜fËÐÆþwÓxfÐí8ü$•ý“›p ðQ%4UÌ_¶¢ùA¿næ~áö7Ѽˆ¹¥\“¼J•ã¾qšËˆ¾qšû*kEŽ,·ÝR*—öÛÎÿX^èdº2åÙM¦þòlN“«på ˆ¦ÑMº­U‘o‹‹÷^ÈPy#ú2ûp¼I5¦«1]iycZN†'-…ñ‡±bêè]^ÿLbÊÄnô=”“¥XI”GkUÂÏÇQ‘#¿¥Â–>ìŒ$bB$в_![o¬ÀM„óaÆ«ˆ•¯“(…ldçîM»¹÷d Ç{®qêÎóˆ÷KÌ0«áÌJ;sóEx,+O…û¾¢õ"nʶ±q–oòZüj]-y¶î®íó™ëgfòj/óû¶Á¹׉GÝð¹Žë,Z漪Õî6t¬±•6q§ÕÕíq³­µšvKkµ‰¡õì.mqÒë{­FsÜêob„)Rå‘$â¯þn½FS×Ѻp™Äh[µ ¬"ç•ÑÍhhì{Ï%¶&ygÀW¿²— ±=‘ðÄÂÑË18Å2aŽ’Æɤ—•j[ÿ‹Šž¨oˆRÆFð¯¸jºRW?­5øIy¯À$0ôpžÒÁ{›¦úÁG&àf\9a³¬Ð™‘·ç´éd<¼iµ;öPJ!ãq7#0j¶µ3y¹Ç¯è}(é 4Õ<ú*ïxAӬ音Áð>b~‚ß×çpµñ|³øô=q™S^³)ýo™_D<¯Y4v=Ûð“Eƒœ^«ìÄ­TZØÔê}**ÝÚ–SÔCÌ<»qM³©$Û2¬Å=,½ ©ÎíjÖ‘RvGŸkžV¯ÊØ«Œ½ÊØÕs¼~^j·ä3ÌÒúgí¬G¶Õœ fÌIå<"/H\–G¬é…í&ñq×w[z§Ùmíg;ò¦<Û±Š.Ê…Pq˹?%éyHõurÂôÖ e^pC› ‘\ùÜ̦*äòàxù t‹PQH[EÍe )Õä!áǦŽlp%óø}Ä?J‰L*¿v‰¸ `Ž&/ΥŹ墄M$1xM«©ÕÐ/äÙ¸ê£J)t^Ïb¿c/ÞžÅIj~>ÝÃó/kƒY†Ä ì„q[J•Çn”V.+â²ki›¶Ñ¢ç⹸K(44œrGAÙ*Ÿ€ w¥Ômr5Ž×«kÚ4xù;îøW·*8l3»À·¼¦áðÔjÓD·ºRqPÓ>ÿL"劓 «zmsØ2[Ý+U¹±B%œÛ6EÏ]÷AÉbdTÃߘHõ Ôä~Uð¥Púê:hê—`ŽŽžqKrª(xQ1ƒ8ÎXcÔóòÇš«¹±öÓRœ-6SÇs¦ó©’Æ_Çu¢‰cæ—Ä\äÏ|Ô¬Šb9Ò¶¯ºUkZÞÔ?ÁÃE‡|ªlbÏ !ˆ[à›°OyõùŸÊ×üd­ô•$ì(é^q¨äÏ9 i›*Ã%ºqB%APR„kå¿– µ4þ»©O#+¼UfVtÓWêþ,ª³XOj=ð]®]À¯³†õ©åxµ‡©Û7ô–`ÛšÓðÖ™ÍèJÓ̘vsã‡ÑµòòoÂó—9M2ØË›Fú[nH.Š¡ÅƒeVœ“ Ú4s<2R Zam1XEþ×™9Ë¥Ò O¹§óÏ[VUØ’tg¾|ÿêßʧ7¯_]æ<²À¡!»ô%VÛ§óQ?ûœûÒ0%þTê2/uö‰¹G;‚þ„Ž€äc:qe¿Ìû.ýYCIh/ÙóìCÎÄóƒô2ÛöimùgË÷5£ÕÔ›=]ï­¾G‚€~ïQué’ÂUûªã}õBewsÔ-4S]K:1݉oôÓpBÿõ‰N_Š?üƒØ‘2|Úâ´…iª¡®´’ÕŸÃOé¿>%Oí³k jr@¯Ö­™S¿3êKM›6Ù|õåÖzXß\g÷>ü…ú †ÔÈ„m†©¯.¾ èÐþFB­&?yøóýAò¯åýÍøú¦úa%´M'Hý´¬¨º*"ýùç¼^”šÑ¿Á i±}çþ×ulÆõb±Õ-íV¯Õi›]çÛóim÷åU2Ý(_Ø4ô9}¤ßÐöÿ‘YÔtNN'á3íö?@[ËXôdüÁ‰Èôåã æì/úÊ‹'=_„¯ýÑx±øqߤyØx²¥”ôÁÀõýÙàΊEÙX vNlœ¸œ“1'šãÝÑFÊÔ a¥×ص…Aʘ(˜ÝíÜ’‡*  w\7ÆÁÀ3âty{ƒÓIyäøê&¦m âc!Ô¶µ]‡ÆDyÀèÎ`/¯Ã ƒ:Þœ |o@'þX ³ëâLÙA»p*…öA8âJ%dÎD >sçÇûÉ úÊ•ZŸ‡AÝu†f+»¡z¥^y4·zÏ21ö –бŸÑð:rÉÖ_a_aû7W^¬"ð†ÍóŸÓÆ«†+/b[x3Ÿæ ý+OQ4…Í,}%¹Â~¢(ÉÜBëû‚-Ù¬ó„ýb‘|4±$XìO†çá•ÇF¿“Äêuƒkç¼›vÙô÷ë§÷ÌôMÍÂ~½þä\˜ý¢ßkèú‹ åÅ<$_Þ~‘Tþ;rÃ×ñ¨{MGs\ƒü¥NÚ®$¨Ùô7ùþ“<ìø$¹Ù'­í€¶•ƒûÌöâÛqìƒ[ ¾üdvޏõ“ ;­]ûûÑÌ Ã{?±ÿæ¬ß_§_ø»:ö«yܬQh‡©hþ»@\™ ãëåM Dp9$ÁÑ»ë–ÏÞ¿{DIñzi6¸$¢Éšë¢ÏL1°?ËŽ=gÕ±Ÿc–w’l0“AꊈŽqF#â ’'…¡ý9cq¦3:ùkŠ [›‡òðY$ÆEOÃ&â|HÒ+y¨¸±2Û SÌDnä) &d!ââ|ÔÅ„õÉ^*j–& £²D;Œ2¼LF#0ÈÁ ±FÛm³=»Aµ¬©–5Õ²¦ZÖTËšjY#4“Ä—F‚AèŒèÚ&Ý2Eœ¨ÂÀFŒíb:»’pß“@Üœ,2biY¨ÁOï,ס—5C$,Tì{+ÖÏÇ\†2ȵ‹ÈÅ„.!ŒÖÆeOçа[I ½0n±P²(zÆèP¶3»r§ŒêQ+J'€?žUçûmâÄKîÔà4„D7A¯ó*1?·Ä!¹’’ˆã'à[ŸW€1Y ;nñÛz£·ÙïW´ÄyI"q d%†~Iï§'íñ›$+{É– ÷¥,—øû_ä->îo›šèÖ$T ~ZWÒbOl³è÷Të»t¢/¡­ð¿„zbé‰ÈK¨‚†A/¡ÊÚÆ6˜>ÓÉ3)<×Á½õ%Ô­/Ö^höØìˆ˜F»£uts¨µšº¥õz­¦ÖÔÇz³eÚv§ÙQ½3&ümo¢1Dþl5ÞlèLðQoŸ4'¤ÿ3 2rFKõJNH¼P£Ÿ"ÁÆãÝ{Ù8á:‡¢(%'WF~NÎîÒ/é8븜ßßEÅiõzzËÔ ƒËw~º *RQ%PqvÚâ§â¬:•›ƒsȪ(‡ —›ƒÃ…ÆÉÁáÁqpxA0(/ àà@‘y88 L^ ÀÁFw^ÈÁás^Vƒ`nY ¥YÒèX[ò¬Y0ѹ6ò ¨û–Q;3|ë"j Æˆ}qà=^Á–È4®~¢¹¯ëxd¥~lê:r«óHªCß9l”ýà #?xxïLã[v²»Þè‘îCFöÍûCÏ}‹Èf>~eí‚¶½¨½î*z¨ˆ lGŠQeò Ç‘ߥƒy’n/Ks¥©õíó“—Ÿþ6Œœ|ðŽÃäs]©‹…*µ®å6óÓ·J—M^÷gâxšoÍ£-‡^-E*S–pä,Ë6_’.ex”na}Z¾ôãí\6Í\”c:ë¾x¢S¥Û\œõøã|nõìüïé;Ÿ¹É^-Ùw¯\âàÂ’’üôâùöíZâTvïò=úr6GºD|>«3Íšþ9Ð$³Œü–Ù¢™už†$:q{ÄŽjOÙZxg×ìä ÓZœŒ÷Ûf§YgÑlž<„POÏ•PÄ&‹Égå˜bÌçÁ,~=³ ƒ¦^Nµ‚¡]†!£­ë%Õ(yʵS%Õgr?˜{η2Œ=!`–åƒa)CªÝn¶ŸÙš9Þ$H®š‡m8£ )y­œU«Õ BFKÇ_é9ìÁ‰«üµÊ_¹¶¶cY¸2¦)¶ðKOþêOI¤gá‰w÷œÂ[e~ù÷àòãOƒ¯~ySV€»³Ü9yøS©ÇäÛߟȸT«PƼûqͳ©,»Q‹= ´‚,J±´¨¶–‹¤©,^—œ¤¦V«Ãœó;̙ў\](kæ #ßöc¥¤/¯/¿¿ƒ¤K‘¥Ù.N»Ùël–Ÿk!·ý«˜#Å‚ÈBÏ0p!‡bxqñ<»-¿°(½ÿÖ®2–e=-ûÑ‹ibŒâ+ßs¤Z¸–†.§Ü×rz9»‹ô*¹Âõ!ó0þ›\⦓Ù@nh—@‡÷r‡©šž,÷K‡YuñG¬ö­†.?¥õû€t[¢p™äE'‰3¦äÑ@¦³èá''8 †:òÊÈeηG쥦e)qA€…!y”–å…ëY剼¡žÑ¤¸>A­ePàú×)ÏLr2[tTÜrâ¡á !ÅÐ;y"À¼ïä°9ßÉaÂÞÉAÃÞɃAó¼“Bx'OŸë<:×;y0Dþwò@¸Ðwò@à€wò@¸øcƒÿ<,ï;y Tîwò`¨°wò`ØôNúNtê‘‚€JÁÐy…ÀÙ ªô˜uAé!Xê…Ø<\uxKˆKÁmqK‰@‹HÁíðJ - ÒCâûßpÙŒÛü8*§°Ãv$Né!1GI‰w/Hz¨ ô˜Ãà_@zèèžÉ+=$âdeëëœPêæ[™14‰åJ,샾œýB‘AJ:÷ôZÜYŸsópvÑáÒCHv J!–B\z©øÒCHC‘B*K!é!ôö8Sé¡â׸$ âKá¥rK7Éu[ýœ.s‚o«ãßâ<,=t×(%;„Ì{Þ—‰¹¥‡ð=ñè—éžûEq€ô„Ûâ%Þ=9ë{ÛÕêì`ÆÉ)=„cíÈÒCXMÆ!=„eŠ[zÇà!é!¬j”Â1Ä!=„U#é¡m¦xév»0›ÍŽÞî=y>mY°µßþuIBE8Æ€BEX5<(T„cè°PQa;×r«(TT¼,¡¢âÖ kwy”nA„ߊgÈ A1¼;ø¤¡/äõ‰\7ãÊv‘Â8Áiš*:ùÏ!TtNá ,T„gš[¨ÍdréžS¨Õê–[›‡.lâÛ*B-È¢K‹sŸ)›¦r á%©\BEÅÍqýœSüýàO¼BEx–!BEÕh¯–fÏbivñ<»_zHBÿ¤‡ð¬¤‡¾ïE½dIÁ¶¸ô¸iNé¡SÞA“*^>•œ˜ôŽM\é¡ÂeZT›‚O÷˜¸¥‡`%ä2çÛ#ÜÒC8c[z¨ø(-w€Gzè;õDé¡ó𑤇Nyf’“Ù¢£â–  )Ä™? ¡3"k6s¹t*ð %4Luð ²?nŽá‰XZ¦­Ã+×Âzg¹Î½¬ê€Q¯B\ì{+–8,ƒdj-£L¡¹¸ƒÐ%„eLmLÐÈ™ú&ÃÐÐ ã %CˆêÐ1ƒ2©æ2ÆL°%>k¯ýÑPZCš­Þø±”Ì‘Óy«’Š*©8§¤"#=ûÉlTFGmL*»ZÒ®âqüåõå÷4ÃmòìŽ9Ë•n9Ç·{~slµ&©Ö$ßÅšä‰vcµByÎ+”𶜠låkÕj¸Z ?Y §|—j%Œi1‘hzöÓûÒ‰rÚMßS˜=Þzˆ_¹ ÛøÑÚ;­y^¡„ÝÃåû, ¥Ï‰qzxX_ôñl‚ò… —b*ßóPªCeRcŸ¸%öŠ˜[ UŸ»ûƉ+“8‹×WY+–‘·–’¯Æ¢aaô%°¼ÐÉu˳›LæåÙœ&÷–Ê1+L£›t[«"ß—â_±½´}™Êœ:Þ¤ÓÕ˜®Æô¹-»¥¥0þ0~+fô.¯ü.1eb÷üÊÉ΂R¬$oHŒÖªôýn’°¥;õˆ˜z2 ´ìWç·U·Ù%øøÍ¯$J•µÓnaîÇ  ]ãÔç!A& Ëõ@-Ìj8³ÒÎL.tiq 5 YyúdÁ×óØ Ë7yÍñÆU»·"û¦~×Àöùì%šÏÌä;Ô^æ÷1lƒs'®Ï»Ïu\gÑ2çU­v·‰µñ™5±A´mè=M·F]­Õ$#mØiŒš–ÑÓm«aob„)Rå‘næ¯þî—*L]GëÂec An}…ÂÀ*rþÍ ´E3ûÞsÉÌ‹DÞðvìeC]}O$<±p´Àr ÎgB`ŽzD$“^¾Î¡¶õ¿¨è‰úÆs±‘S<÷SÓ•ºúi­ÁOÊÃxŸÖ†Yçn[_“;ø¼&ÜŒ+'l–:“à±óu ÈûçÚľOìqÏŒ¤ò1w33` f[«1“èâ+zJ:Mõ­¾Ê;^Ðn¢hjÖhÄrÒ—±ê†*ñ8ƒ‘p4ö„ñËM?ö÷šDÒ­ß’‡íÆé/¤·}ÿÖ!)§@;.yùT8³žRé5y™òg4²Ü8ãHË6R¬„÷Rv`^•a>£ ±¦/™#öëu×·-÷Æ#æŠF½¤úèêƒÕV›nøòñJ­_±tµÚnbÿ¾zºát¥^Ðg“Kú¡ÜL˜|`øóÙÓßæwh’Þ‘`˜|nóNª”ú_KÚFÏô Ô?ç¬v~}ÙÄu?p&ާå4úû,é e¹¸úT(J}å².‘e0Ë,rU”e*S…’73€g4¥OÝ”6ŠÕžö™lë=]onVöiH³ÎŒçGU hc9­#¦q¥J³¶‚|-§yT€x‘¸‘REj}Dîê¹_k®?QË+Å2j½¥™‚,»òD_¥yޣܔW$y"-{ø^R. è‡öÅó黵Œ¸¬Þ[çüž] FG•àPr‰k™\¨ËTM³¦Îtu ÑUfƒ.²0âx4tÐDÉ#vT{²ò¨…wvÍvç!ýP-^5õÛf§YgQg>t\'z¨§ˆóx¼h i)nåš`·7³X>E¦!S—[`hË4`´u]r Õ‰&$—r?˜{Î7™F˜(\ÿ LqŸ ¥‰v»ÙVÏjùðd3"ÞmI®Ã„-y}˜k¢`à›â³l&©Z­Ìª•Ù)e÷4&––Ï/iûU._åòkç0ì\WêÔŸ—=Nÿ[cfkÖÔ=¯Y4yñælÃO rÙÈ@ÜO¸6µ"§ï¦H·¶åÊñ!â=žÝ¸¦ÙT’ݯ©Å=,½ ©ÕŽ?OÊÎâèsMØÓêÉ2ÃubxÊéÄPdî˜ÑX®UÕA`µÜ¬–›G]n>£3¾lyTZÿ¬Ýê•m5÷ JÌIµŠˆ<Ϩùg%Ô×É™Ú['@Ï2ÕÍ%ää–êæ#Ë› Ë_ [„¾!!bbãÅ} ¨¹¬!¥š<ôNDSG6¸zâë¹ìBîÚHJ•øÌ‹í ®‹·ºÄÝÊ5±ðã]bZ|Ç»Þüâ» Õóiqn½óóÇÌ6I#¹œ­²,•ÿ޽xÇÝÅjâ˜øÖR;­ŒÛ ‹S_f¡¡á”«8 ÂÈVùÔo¹+¥nÓº5p¼^]¶ÅËñqÇ¿ºUþq»¶ù¾å5ȧV›&ºÕ•¤šöùg)WœJZšiŽÚÃŽÝÛWªrc…J8·m 9ž»îƒ’éËQ ÷"U“T?ûUÉ—2ÃèKðl¤©_‚9:z¦L‘ÓTÅ óˆ)Äq®+° Y½äl?-¥Ýãa³Ó†Ù3íÍšMϙΧJ®ã ö‡FØV ¾öKb.›xÇØ"hŒÑ³1¶*>þé§ÂêVùU |Skò©ŠªQ¸XO‹[à›°Ok¸¶oëÝF·Õêñuç§÷˜U^ѹ„må‹Þ*Ÿ£ÑGú#õË«ÏÿT¾æ3&¥¯$¡_I7õC% ÎIH=U>(Ñ*±Œ¥’êX^+ÿµü³þ¿¾ÿÝÔ§k‹™Ýô•º?‹êVÒFõÀwI¸Æ ©³†õ©åxµ‡©Û7ôÖN¿“ß™˜~×l4: seœ5þ¨ºäޏj_u¼±¯^¨ì¾º‚fªëObAâø/4AЯzÑàÆòFñ!–: 'ô·_g®õ0ôý[%n÷øŒíšþ6ÙÙ§š×“Ý&'«NînÙov˪^(ÿt¼ÑËuT5)g2ý©Yé¾7`ĤŸúÃÒ_F£Ùl÷ºín·kvº†Ñ3èïÞ0„ŸØÇdƒB-æÑêb{7µô¶nÍŸGíút)᧨ûÂÃç¤1•±ìŽ`ÖEÃÃÐ É íë$LìÒ»/HPã-³ÓnWABjØé×;¢B¯côšf·‰Ô/©NÚŸh6«øÃ?ˆ)ãÀŸ*¶eßЕ‡º’iT?¥ÿú”Ä5µÏîÑ\¨É"µnÍœ°ž>üV_ö“ÏÚèžúêA¯°¾ÑÉ6q\-î… ­rõ7°VŸ°ÍtõÕåÏ—;ßèO˜ÝäGSü“Yú“奸øN¼úá€×|ZÖWÍ•‘þâs^²^M‘žºèîÒ_Zñe߯¸œß¿^lóŠfÍhê͆Þ4{eϧåÏhEM'‹lqÎé ­tÑáßö•¯K´kååß”œˆL_>¾`^þ¢¯¼È½PÇèDµ?/?²îÚ):l5ÙˆNàúþlpgÅï@0Ó;78qm‰'„‰~-7$ÑNöÀI!¬ôf"»5HygÅ3†Ì€É îf½ãâÝ€gÄÑ 3¼Áé¤áMDCû8Qk´_ˆÑTIõ9$̉hŒ_ ÙÃR|Ðlv[ªT;×r«#à‰TN]M$kœjxîò(ÝPÙ0?ùÏðBîà“†¾¸¨2Ùƒaü€Œæ4%$&y~¿¸äÙ…7ˆØ$²iÑI\“±YñI|«[ÎüPûøk£Ä/È¢K‹sŸ)›¦òˆV"'©‡´$«,µÊRKÈöŽÚm\s²úOkÙ:Ÿæ\å9‡<'éµ:» –í9„‘d[KŒWKÔRË}-ë~F)¹‚¦+¼ü¢žK ;*a>ßmí,Ä 3x¹‘sQn@çê{öÄíÓ¦äî9(ƒ…èûëUi5CböF¸¾’}½òºÝã“OuíìÝNDíä½ONyê÷k¡hkÚ:»¾ØÒ]ÃxBžãµ»ýëÛôß.P`h"á’ˆ ,×åâ·€ýY&¹v›Ã=öc:(jy'‰Z ¤ ±¸7ÎhD¼A¼KŽÜŒ‰‹èLg$}5Å„ÑzCyøóq¢1ÑSr&â|HÒ¤7¾4<ÈØÈ<%Á„¤lzL\ü±‘çEaÂúƒ„iŠ:³ÂðÞ=Œ'Œ2¼LF#0ÈÁ ±FÛm³«æ~r$7¸(9fJŽ„¡óR@¨èäH1ë‚äH±b_ðܦ‡·„89n‹›)-BŽ„Ûá%G‚‘¡äH!‚Äq[H<¡,PØŽÄIŽs$9R¼{AäÈBf€äH1‡Á/¾9òèžÉKŽ q²2 9R¿ìz rdÁÂ1èã~Y‘AJ:'@Ž,Ô×ò†÷•òÂV@äHk'CŽÄj<r$–)nr$ŽÁCäH¬j$Gââ GbÕˆƒ‰cª¤úp‘#qŒÉ‘x>–âƒÉ‘…í\Ë­"9²xY äÈâÖ ×ÎQÜåQº‘kçx†·ÞP /ä>iè‹‹*“E!G"MSâäÈ“ðäÈs o`r$žinr$šI9Õª9ß¾ 9µ ‹R,-Î}¦ßmnr$Î69²p©å¦ñ<äÈïÔÈ‘Åò±Ôp|¿(9²¸¯W^‡BŽ÷x²<ϬŒzÿˆ6?ç½1£D$dgY勵ޡžÔ4NÓϸ)¾ÐøB§ÈiLØ@íöÆïXóeª%´†:µßn´t£µYdHWÊ{ç³í†$ÜCÁ1´¾ºF;昼QÊPj…A¹ ¦E>¶+šÅÒ5›ŒMƒ_ü)ßøËw&„fZ¾•òÃÀ:#ºŠU¨âÀwÖôZžsàVªfã^¹C²üiä9¥tØsü¢lgê7R-ÈC—ƒŒŠ‹ˆì)'›9·úqF¼W—?kïȶüÙŠS®þÊ!¹7Á8vrãÑL!vnÌa®Ãˆ0×E­f® JQM8”°»™ü"…b¡ô91Nëf<žMP¾áRL½jJu¨ŒBû‰›:^ÄÜR€éÜÝ7N\u7N\_e­XFÞZJ¾“aÃèK`y¡“)Å”g7™Ì˳9M®Â•c, V˜F7é¶VD¾-.&{±½´}™Êw8Þ¤ÓÕ˜®Æô¹-»¥¥0þ0Ö@½Ë+šIL™Ø¾‡r²³ +‰6âh­Jßï& [ú°Sˆ©‘@Ë~u~[%á|˜ñ*båë$J•µÓncîÇ  ]ãÔçî—˜aVÙ•vææ›ïXVžJñ}=­°|“×âWëjɳuw lŸÏV?3“ïP{™ßǰ θN7ãÊ ›e…Î$xìT}„¼=§ »vch¶o£ž”B!ÆãnF` Ôlk5fòr_ÑûPÒAhªyôUÞñ‚¦YÓ?gƒyàª1˜~½žùµãE$ =âÑŒ¯öd _ ïìšíÎCú¡šëÛ–Ûo›fÝòhJÕÙpðé'IPOH<6j!„.‘Y MÒòÜÊ5Á6¨³˜ &Ó©Ë­F0´e0Úº.¹ Q¢ ÉåŸÜæžó nä’ÓÈNÌnÛh›OÞÜ‹ùðuÀžàVˆ†2 f³ÛR¥à_Ë)¶š)8©ÎÙî _7“ýAšô$¢Ä}– ‡‘*³ k’Nê+—ú‚¬¾Zn!&µ“e Ý$nd§P“J§Ö»zîךëOÔòJ±ìÊ·Ž+­‰7Ä™¾JÚÒ“…aN&#މ£ÔÚà|½Á…œ€ˆŽ*aˆÊq¾5@±©2}\âd¹¦ø”þ·ÆÌÆ/$ž×,šÈžmøÉ¢ANíUvâÖ9-lju£>•nmËìA&šÝ¸¦ÙT’m8Öâ–^€…T ‹s›¡Ž“²³8ú\ö´zUÆ^eìUÆ.1c¿xÝ’Ï0K럵“"ÙVs"šQ0'•Gðˆ¼œqY±¦V-³qˉœd©yÝHõurâôÖ Ðs)uS1RN¥nªD.’—¿@·‰1±! º5PÐ\ÖRM‚lêÈWBŽ_Ïe  NR|á!1ë:Ÿ}G~A¨B;ƒëjL­†.qRàêœ)|QÎÒ9ËK¿c/Þž—Ij~Neó/k*‚Y†Ä ì„q[J•ÇnHV.+â²ki›¶Ñ¢ç⹸K(44œrGAÙ*§  o¥Ômò5Ž×«kZ5xù;îøW·*:l—+»À·¼¦éðÔj³ƒnu¥ê ¦}þ™DÊ'9V3Ûö¸=n ‡Ý+U¹±B%œÛ6…Ï]÷AÉ(cdTÃß™H"Ôä~Uò¥rúò:iê—`ŽŽž‘Mr2)xa1…8Î`c\ôò›«¹ÁöÓR­-6SÇs¦ó©’`Çu¢‰cæ—Ä\åÏ|Ô¬ŠbIÒ¾¯ºU#k^ÞDÁÃE‡|*ubOà !ˆ[à›°Ok¸¶oµôV³mê|EÝõé=f•Wtv C[ùb…·Êçhô‘~ãˆEýòêó?•¯ùHé+I0WÒ-øP ÈŸsROU†Jtã„J¬5¡¤b×Ê-ÿÀ¬ÿ¯¯Æ7õiÄÚbfE7}¥îÏ¢º•´Q=ð]®1êì£a}j9^íaêö ½µÛï¤w&ªß™=£c¬Œ“  ÆU—f©®ÚWoì«*»À¡n s»êú“X5(þ M¹q‚;¸±¼Q|ä¤Nà ýí×™k= }ÿV‰Û=>»¦¿M6<öI|äE_¶i¾¨“»[ö›ÝÚ'Ê?oôr]ªDMÊ™LjVºï Xqé§þð‡ô×M&üÑ1ºÝ^£Ñi·éÿ5èïÞ0„ŸØ¥»dËA-æÑêbk75›žÑh:N7ÑåÄ·‡e·|¢ó–âÿ v¤ŒªØ–}Cs u¥šô¨þ~Jÿõ)©ÚgW.Ôä°]­[3§~gÔ—E‡Ïf?e? ë]/ÔbBµú XsOØ–—úêòçË€Œoô'ÔBò“w?ŸÑ$ÿZÞ»Œ¯]ªxʧe¥²_‡ô§ŸóQj óÔ'× zi±äþ×<ÎîÏ_/vtm§ÙiºÞÆéÚ`îy¹!—}-ù±B¾9ÌõòkËþ]xlîªuË0:Ín‹+DîüôÖ™eÊJ<.³iîX`¸PvV÷©Sÿ¨ìŽ®Hµ„}úòý«+ŸÞ¼~u™› üYO“²˜µ{㇛$Ÿ®8úÙçüÛ—†¡(ñ§l:_LÈè¥Î>1÷h:MBû5ùÁ˜.M²_Æ_6§á­3›ÑŸ5”a{ž}È™x~þCºîõ†©7]^+Û?½îé?‘á|¢ÐPFô˜ÞQ³¸3[Ó4õÒ–9ÂWv€©4jF§ÖÞ½i}VQ’#PeLÝGy©Ä·³4/ýoÍO ˜S_S¦>»d£$'dqbIM~}QŸ‡A=¼±‚¥IšmÒ©áÆG/ŠÔ-ESfÑïeæÙàck[V]f9Œu×Ö“ÏÑ´¯QéL Ñ¨}kM脜!Åfr‘vl>_Ž|v]Ëþ²úlØßÒF¹_(ùFìyÄÉŽ†:^}Óû ØKûâ.IK¨)Öܵ¶ò[;\Сk+JS¡Ø­ %Æ ÿÿGåë»×¯èµkzü;½cô”>QŸú‡¥¿Ð?^+?l”<íÒ ”øÇûÃZ/p­Uz#uQ¤/ûn©ú•íÞî°Š懶°•D³3Ÿ)ŽÇ‚ <(3wN×1â^÷Þ·F 47²¢çŽK &™¶°6µ\'Œh0·™ˆÜˆF› i0F²Ïü9‘Wõƒz’Ö÷Lõü$±%yß=k,ۭΊ²žqhÁœ‘âù-WÈþ2sÆÖ[?ü¨PÐ$Þmvà̾ÛÚ[óÈÿ^ëÎŽÕÒ)Ë4S¡í±–s AÓ]HÛóøí¸ŠDÉšT¹wh*áxN™„kH“'`‰ºËlߣŸ& ᛪvÛ léÉÚ6÷NÌÜÍ~‡!7l$ò`âýrÿÙ ªÎ Êj®-U™¯Ì‹Ëu‡4MOGþ2ôùc…*ýÞ…r× ™c2d`9=Mvꙥ,NÕfè¥·î¿ FNÈÒl¼ ÒМ–Yþ%g¶H­>³ ‡µj½È!¿¸`óß=Q,7> Tn¬;ú %ûN Ó|ÚUÇ0Ÿ´[î1Lûa™<Ó°O³·¿üøÏ¾B,ËlëN[o¶[†1l[£ÑÉÈh·;¦ÙP í¸íÏ]üäò‡å*V0™³ aÿ©Ì,ðÙIBýPSûM&‡¬®}E|‘šjÒöê+ax#Œ3$¶?%ƒ$¥¥Pó‘xJYÚc?¼ nûâB|=¿ÌÖSÙ"u ߢÀÜÑtœUâÿ–_ âÝÕãB°2©ÉØni%ÚÂ†Âæ¶$xVƒa•¯ËÞµ"éL£¬[/6¼£‹V¶^ ¦É¤5dŽnˆ’êœî®Ôk ^lhb&Ì®ïßÎg%äËì$©¿æ!/ÿ¶ç’å!°äwš]Ž-;Ú5œ.“ LØéõþ«Û¼x1æžË€ Z¶€LœXmŸº(댚ãã¡ÏB,¬86…ØœÓÑèEŽ-¥öÜœÿHÂöCêöìª3äMX`Œ$æŒY»ì>£¡ãžÎ¯´æÈÀ³„§„ÐUHM¦ °Yè‚Û±FS'æKI M³ˆÇÌ`{Gªy€ï¶ï£”¯íÑþ»GNhûtÑô€Œ;výûô}ÀíÆøbu aÖ¨-Ê¥dXª3’‚ë±IAöY³HANR°ÿœû‘%™fj‘œ–ÎdS¤€gÏÙHŸÓ.ÇEÒ]ÝZ¶²M>ûÏ/t³rOZl9ñÊž’ˤ²wXqçà4xK)ùlO ©xíL ‰¯”’Kjnê‚!]…K)òšBƒ¤f'ìí–Y@}Fª™l+KZŽO£ijûP“:ziáU®Ö:¶çÐÿ³ÇȰþÍ‘´åå­3¤£J’ßL-ûÆñä ¯;»ð)/yœúžCý„9=þ˜—\öàÆ†È‹}¹ƒHfî‘_©Mœèf>Ä42³¦ËçÒ¾i¸³¤ëOÂÈ o¤EëY( Ú™HÙ²J8ä™´€”• hrU‘;ÜýLy¹uû6™SSrúôþ†flñÙ™”\'>6¥éàÜŽæìyIcjH,YÁó†¸SI‘ly¢Œæ4;N¬ÕÂð×…Åꧦ<6µ¯Œ-7$Â` Ù§ê[Oª/iæ)¡?%Ê(f™åŽ­œ»C¬_î:©ÞÒj´L³¹YþsíξÖÜ.x²¼,™š7wÿùïA(±CùÍ Fs:qó!C”0ì¥Ob@±ÿ‰ß´ßjMG!*ðyõ÷÷?þ‡òþãëWï•×?|xóúËÏ?(o?~R~ýüæ]¢Ñ Ù¡ÿ£ãØü×›×J,wÞ(š­¼ ö¯ü¿•å¯UB—™¢¿cñe>e>Ðé0[Ó[Ú3Š6SÔÿQâ²lç©Ó%”ò?êòó?¼d÷пkFÇl7 ³ÓìÔŒ–©·Ú ­Ñj›´V=³Õd7ŒžÞ X‰b`¯¿”V¨Q:ådÆ@"G“J,ÅjR(ÚKúuZ«zVÐDenO¾üõ G߯_ÐÍnÛ¼ù#œ~3oÙ¯ï;Þ°Œþ£|ùˆëõXÉÌrÝߨõ}2ríÚ´ÊüC®B-"?®m—“zu ¦Š6V´»ãÿ¦Ä7{s×Uû«P))ïÌŒý¹7Jß‹ÅM ¢9Þ]ªÆ†“Œ'ás`œì~Ù‚·„=ØÉúúô.­UbÕZ`ÛÄ/r¡–Öv¦‘, ½yíåK¤ˆ c‡¸£AHØ,î1{ó úƈ7ˆ-„¸¥fÑ™²Ë)¾G—:+"«$|Æ4ÀE¿uâX¨~>ô¢ u>L¯„â–Öµ†Ä•ãsÏóhˆ¶Ryhe›"7’?ÙˆÁí¦`wï#dTüÃh Ãid4B¼ÁuC¬ ‡;â©/"¢ÞY®ÃÞ7$o}¢bß[N„“­A–ºáw§¦· 9SB?žŽiè…qGÝ Í¤ÿC”[9x¹ço2²D:2,´ 9½sσåS¢ê/ÿŸqÿå_þ5|cGöôÿì/7·dúíòÓ;ÿ¥ŠbxqÒ)Ú,üv¦%´²÷ÝÂȚζ>·¦ï}Ql4!ÀÞfééW4h¼±RƸyº¿¤Ï¦%mó›DK»/¡­ }tO}b£Æàò ŒûO‚ÐãBŠ9Iµ 54¶Øñï=|"c/™óä™+¥÷“@Þ¼RçÎèJí_©z£×n4›]ÍjFZËêÚ´-ÍîöÚnW·MÓ¼Rò¼¤xNS¾ÿ¥¯@Jh eNç%¶A ~œïÕåÏZúª£ÈÄ O,U’Oe˜‰v½¢Ê¦uTsxÝ|˜n”¸3QÞîDâ@OÂR{&eŒ„ef¾öd¾Ìã6U0}×ÍžzÃ7J'*f“gžúF}¶Ïy[«Ýhë˜ã/kr£Eºk¤5̆©µº†¡Y#½©µ­^£m£Ûh§µDÍ^gþ8³þœõÈ{-çvuþ5[4îËaǼ(ÖlÚô%<Ç›  ›é‘/ñBöˆs?&VK?1›'­ ñ”äìJÍ »rÃ>Á®WÒ¿X‘òÂó®?IŽž_(÷ô;áŒØ]ÑŽâ‹{ñë§Aü†ð871ï2šÇuÑõËퟓ\ä•3}vylÉ.Ú6ʺÇ% ¼Íü厌»0Ú­¶Þzò8Ö}Çíè2ï;Š[<â}ÇnÍ4ZFÃìhv³£õt½×2{]Ý •4^wÜûí—²Šô#JÈ${Í—xÅñÀm?n¿-r»qÜ ¾Í›£¶ÀíÆ½þ°~Ý «¦x÷÷^nSÈ\8O­V¢WôµèMÆ=ßdÜ ¾zxÿêáA,èÕC.@èÕC(àê!xF¼Ñ€. np:)ì> c.J@a%ÁÀèθ( u¼9øÞ€ w݈ŒÏA»‚§Æ0ìÕƒü’ÓïÑ!u÷~êbÂnbó£BnbPÁ7±Øz›:;T V•`=“+yÕb:#še¥ËHÄ!62`Ä.õÙ8'!Asàå{0¸¥…p7øQ3î†T~F?67# c„±ù!@PnFn±PR0„¨ÉƒÎl,Î9®úÆK¹Q¶%̤Éc(—ÔËÄÁÛiÜ®˜Al¿8bš•=žhwÞ-Î\V|»Fèii ~:£ZžçGV¶á€Øëy1ózö¨…–½Û£Í¨Ï¦×BéÇeùÖn)£°:…°y@…œK\.3xD­fÓ+û[ÂöãâB†%i  œ Y1dURŠƒ*§ûÕÉéý˜¾Ãfø˜¿³7„ówÄš¹#ò„—uïe1`«ë†ƒl©o%©ƒ–¾|äºÔ/Ý=æU¼ØF¶°›Óøq㢚ÏyF,cž(‹ÍU5Isº\âä)MˆçÕ4Æ‚1úWŒ} <úâ¶™¯qOæ¤ìàâ'ò[rOgæ$ Ãùˆâ‡A‚\D¸A,âßíÜC³‰5¶Ò&¶›C£5ì´µ.7h÷:Úpl6µ¶Ñêµã±NìFñ&F˜žÔ°ÚÆÙ.v‚!ümoÂ>ÍObûä»OS@kYÄfÂWôß2NŒëû·ó™2É2m§g¯!{…ý¾ö`MÝÚ Å cŠÍ¹r…$= p¹ËS§Èõjf×ìu ­aê=Íl½6#?šÍŽ©dÈíûòËÚÕÂf»Ûìô6ût_ñ¹ÙtÛ¡K!{ÍŸ›no1±ØtæÀ ÌIo,À¦Ûç 2wEñÈt{Ë.µ!PÆH Tº«”(“nG‹éöÔN¤Û &ÒFã'ÒÄ‚鸡D:(€H†é@ÈœD:~L‘ #ÒÑ@¤ƒÂ‰tüøü÷¼¹1·ìÏCË®±‡ïüÀùO)XJÅŽ-цÌÚæÜëä‰m¶îÄ„ç=U¡îÙ?Ñ^<ã ›á%Ù3+Îì }Ç4¯q=ý+\|T mEÛ(Û~Enä €ÛKÀMëºÀ¶éQ®q !’ð¨uBs§…Ÿš-½×6[-]ëŽ ¢µ,ÒÒzfk¤Y½32 [7fñþC<ÈÇ*¸'y’6[eŸ\cžzIÛU•tL‚Yy9»¦è»¥'µy>#PÞ1Ç™ @¹ÇðF¸ÜÙ»P:ݶÙklˆkØnÿjÁa ½FAZF)Vqûß„}&…ñwÇ9Þd"´–E¥0LÁ¦-ƒ\¿×¼$ù ~›Xòâ'?ÑÒkí¦ÞhêM­ašºÖh¶¦ÑÒõNO7»ÝC¾þRZ¡~Dé”#{þéÈJp»nY‰Žgtng6\Vâ€C€„%ÄG©¨°Ä¡ÒKn ùá E\âôª%(/q°»E&öæGP‰ƒ©H`â0¿ÀW ˜àÎŽ!0P€À&0Bæ˜àÇL@a``tgLÀ@áüøüôÐ’íàfuËÝt5‡i…÷P¾³3µBtÜlà×r.m{ÛŸÎ|Ý»ƒr`Ù8_Á2=cýñÖË,Ú8 »÷ƒ[µ( Üš Pf@6‹«²=ŽOÉ -¸mí ï 2È.¿!ö¬û6Ý<Æßg•EÞj³TZl+ðh,<‚Ú–7ZR&” ø±ù• ø1ÁÊüÐ`e4§2?¢˜2>¯2?:¯2¤LÀ+ LÀS&àÇÅ e,@™€¢L@+°%4T™€Z@™ ZšWKóji^-Í«¥yµ4¯–æÕÒü-Í…(íx>J;DiçÇ…PÚùQ!”v~T¥ŽÊOiçÇæ¦´Ã a”v 6¥ÊMiçÂ-” † ¥ý0¥ïŠC©k0¹k/ü5„ÙKˆÊc¶#– ‘Ù^úôÍÉloþ®bî(†Ø&GYžaY~äå8–×ä @ „¿F.+NXÞ@ÔÊay±õ¿¼>\Þ@Ø ¯¼A1…å ¤šç“7.‚¤šÑZIÜ.Ó7öÏr¥O§ºÒí®æ;YŽˆŽ o¢Ár××zM£Ý|òÄïnUŽÕ6òþâlÇ=ês{™û;l¹¼”?à¶ÀÊp»•?šû•?@¦pºô§î"[ÄüàÛ•?ºm,üTù£Gí¡Õêiô‹=­¥{Z·ÕkÍ¡m[ KïvŠ÷Âàá=‚âo„£'~cà#§¢nx}Æ{´Ä]ÙG´P"t”³ 3>=:â6T<ä^B·/ðMا· ¼#цZÀëø²Ò/ÖLyãÝ9ïMi²¼M7ZË‚‚ÝŽ`Ó¶¦oUÙõýÛùL™Ç惶ӓ)úã‰Æ8ÊZr‡kjÍjÖÔ­ýÑP¬0æ@ wøä5üòñ)/úÊ‹Æ_,~<Ș<è«jÆÍ[iŠBs­8XÞúZ–o:$ÞØ–m«C›S0lžíx›zGµnìñmÌ¥ÿÉ úÊ•š¡ÙÊüøJ½ò\òžÜ—}ÂñÆ>ûÙå\ò†‘ñ¾ò%˜“+oHóöÍþŸÓ¾e‰W^P·™ùAö¯žJ‰­$®FŽ+°o6í²õë§÷ÌôMÍÂ~½¾öy„ûE¿×ÐõÊ‹yH¾¼ÿü"©:ýw䆯cJåkDq ðz|P[ggÌ5›þ&ÿÑ’‡Ÿ¼¥¿I?im´­Üçø~À‡4l-øò“v@¢ŸdØií‚Üز¯ìÇ¿9«¨þûëô ·BÇ~5û5 í0Ä•Ñ~f´œ¸0G.ïf,|ôféÖæYq\-v~íIôÇ^—‘á­ƒ‹£C(è³ËÙ–&"³Á殺ù¾`¯‚t£Å%BI–ë‰]IŒú]eÚ?‘™ë?¬'Û{与˜i÷ÚÏ*Ó.C6i¯yI‚aü6±ÃÄ-Q0̨µÚ ³ÑhiNÃÐ:F¯Ûk™-½cÖA½°½ß~)«H?¢ôÈ‘ÝþtÔ¸ý¶ˆZØünv ¢¦€ZØ^‰…‰Pa±°ý…—ÛòŽTØ©ÕJT)ì@_‹ …É6Ù‰'îÜ‚e|é;@° tˆÅ«Q½“°u,!s –ñcË 0Á200º3Ë` pÁ2~|~Vtµ§]íiW{ÚÕžvµ§ý<ö´Ñ&Ø öÄÍ/öÄ {⇋= 9ÅžøÅÄžÄðyÅžÀ×Ï0AbOü¸bOüà0±'ðƒ —%ƒ¶NëÉ Y£ýÅÛþ%hñL½ +š í2 m]/©F‡`A2UR}&÷ƒ¹ç|+ÃØrBY>–âƒf³ÛR¥Ú¹–[ Õ™ZÉéÏ9»„å×ã{XÉ5,š¤ g4!ýøÆ†ì!°ÜwKŒÊ¶vç»ó)ù…íƒ9Š»iè y}"×͸ò^¤0ž ¬”1M‘ÈÎh|õ§‹³ŽðÉûIÏ&¼eQæ—.?þ4øðê—7e¸;Ë“·@Œfrņ:üÂ$®U6¾ý¸æÙT–íÎÕb(­ ‹R,-Î}¦zñí¬(˜“Ês='/ÐP¶ç¤J Zb¼Z¢–ZnI‰BvQ*}-ûø…릓)Zîì,yQO¦³èá''8LfGL J˜€Ïw›Å^*°”±ê"IK-Õ¢Ü4>K#*Oäœ6%wO‹/•ãûáRè)WÍu¡'ɾ^yÝîñIB¶ÿ¤¥ñlÝn-SÑ6j}®Þ''ƒDGÅ-'R ¾<æy€Íýòúòúòšïå¢ÐË#‚øœ/Ð9_ B^àÂ_€ƒ^àâ ÈË#Xþ—G¨€—G ¨Ð—G Øøòþòlê!G @É‘0t^*8»A%GŠY$GÂR/ľà¹Mo qr$Ü79RZ„ ·ÃKŽZ@@È‘âûÌpbOítžÌ),ǰ‰“)æH r¤x÷‚È‘…ÌÉ‘bƒ_|räÑ=“—)âde@rd¡©2cè1È‘ {Ä /g¿PFd’Î #«Kârä6k—§JŽÜU‰†ÙjšíÍ&;L¥Üþ=)TJ,S+*å¨ ƒ‡È‘XÕ:HŽÄ1ÄAŽÄª9ÇTIõá"Gâ’#ñ|0,Å’# Û¹–[ Drdñ²@ȑŭA®£¸Ë£t "×Îñ o½¡^È|ÒÐòúD®›qå½HaœçÞ;Ò4%NŽ<ùÏAŽ<§ð&Gâ™æ&G¢™‘#Q­ #ñí ’#Q ²(ÅÒâÜgÊ㦩ÜäH¼$•‹Ye©U–*7Û;j·ñ“#%ô€‰g@ެ<…‰ï9üäÈï{‰*YR¢Pœ)nš“yÊ‹z~r$bUÂ|¾Û,ÜäHœ lrdáR-ÊMãyȑߩ' #‹åc©áø~Qrdq_¯¼…yÎn'LŽð·Øðï©3~"cÏ&¡Ô•žZ“¼ªÞö¯Ô¹3ºRûWjÓ4F­¶9ÖìáÐÒZ¦ijÝ^££u­V§×ÔF³×½Rq™¤ UBÛQàa·ñ±Ý´eYÎ@æøËR­l¥2K´˜'5K´ÂÉx-6\iûA¸¯Å·³jeœçÐÀ½¢Ú–cmqûx–û§´&)WV8–6ZxY¸EÛGzB³ÅÇÌÅÙù-cX¦ÕJÈÀ%ÛÍ.«ì^øÈð%wÁ6MÓhu6+s =Úþ%”(Uš3]*d#µ³F –km•<3ÿŒ³ç„s”æÇÒó†þê™/”éô’¨üâHùÆ×x»Õ ™ð ÙǹŠU¨âÀw|Rž^}Îq€[Ó§˜EŒ—»íp„iä9¥Øsü2kgê7r·­ä] —‚ŒŠ‹ˆì)'›9·úqF¼W—?kïȶüÙŠS®þÊ!¹7Á8vrãÑL!vnÌa®Ãˆ0×E­f® JQmþ%'é.±ØÝj5›M}³è<’Û¿‰Èq/f€%“‡U6öOnÂ%ÀG•ÐT1ÙŠæòzš¹_F¸ýÄM4/bn)×tîî§¹Œè§¹¯²VäÈr Ù-¥ri_±íü/å…N¦+SžÝdê/Ïæ4¹ Wޱ€XaݤÛZEù¶¸xï… •7¢/S±Ç›TcºÓÕ˜–7¦ådxÒR+¦ŽÞåõÏ$¦LìFßC9ÙYPŠ•DIq´V%ü|¹1ò[*léÃÎH"¦!D-û²EðÆ ÜD8f¼ŠXù:‰RÈFvîÞ´Ûûwo@¦p¼ç§î<8p¿Ä ³ά´37_ˆÇ²òT¸ï+Z/â¦lgù&¯Å¯ÖÕ’gëîØ>Ÿé±~f&ß¡ö2¿aœ;qxÔ Ÿë¸Î¢eΫZín³‡5¶Ò&î´ÚÄ´Û¶Öé¶Z˲ZZ·k÷´a£Ó6»Ý^sÜ,ÞÄS¤Ê#IÄ_ýÝz¦®£uá2‰1Ð ·j1XEÎ+/¢ ,šÑÐØ÷žKlM$ò΀¯~ d/c{"ቅ£–cpŠeÂ$)’I/5*Õ¶þ=QߥŒà9^qÕt¥®~Zkð“ò0^I`èá<¥ƒ÷þ6MõƒLÀ͸rÂfY¡3 ;5"!oÏiîÝšíÛ¨'¥Gˆñ¸›˜5ÛZ™¼ÜãWô>”tšj}•w¼ iÖôÏÙ`¸ªÄC f£_¯g~íx hx4ã«=YÃ×Â;»f»ó~¨æú¶åöÛf§Y·¼šÒFu6|úIÔÓZH#¡KdVCÓ†´<·rM° ªÁ,æ‚É4dêr« m™Œ¶®K®ABA”hBrù'÷ƒ¹ç|“i„1ÜëvùvúT(Õ§Ìf·¥JÁ¿–tl‘i2©ÎÙ~Ÿ_·ü’?šÆ$2Ã}–܆²\Y}*Ò¤¾r©/Èê«å¦`R;YVbLâFvJ/©t²¼«ç~­¹þD-¯Ë®|ëH›SÕ ¹%yiŽä[yá‹8&ª’ÀšŸàAøõ9\u<߬>}_\æÔŸ×pJÿ[cfãÏkM„^Ï6üdÑ §ß*;q+—6µºQŸÊ…J·¶åTõÏn\Ól*ɶkqK/ÀBª…s»ªu¤”ÅÑçš°§Õ«2ö*c¯2võ¯£—Ú-ù ³´þY;û‘m5'‹sRyÄÈ Ãûæ ÷qX»©ëÆÓ7Ö¶øÏšÚØÞ!¾#åF¾FGÅ-çþ” §æu#Õ×ɉÓ['8”yÁ m*Frå[p3›*‘˃äå/Ð-BE"ELlHBBn 5—5¤T“‡„ ›:²Á•ìãWôÿ(%2©üZ&â6€9šX@¼8—ç–*4‘©ëÚM­†~!Ï–ÀÕ9S¸t^Ïb¿c/Þž—Ij~>Äó/kšƒY†Ä ì„q[J•Çn–V.+â²ki›¶Ñ¢ç⹸K(44œrGAÙ*Ÿ  w¥Ômò5Ž×«kZ5xù;îøW·*:l7»À·¼¦éðÔj³ƒnu¥ê ¦}þ™DÊ'9V3Ûö¸=n ‡Ý+U¹±B%œÛ6…Ï]÷AÉ(cdTÃß™H"Ôä~Uò¥rúò:iê—`ŽŽž‘Mr2)xa1…8Î`c\ôò›«¹ÁöÓR­-6SÇs¦ó©’`Çu¢‰cæ—Ä\åÏ|Ô¬ŠbIÒ¾¯ºU#k^ÞDÁÃE‡|*ubOà !ˆ[à›°OyõùŸÊ×ül­ô•$ì(éfq¨äÏ9 i›*Ã%ºqB%VEPRY„kå¿– µ4þ»©O#+¼UfVtÓWêþ,ª³XOj=ð]®ÝÀ¯³†õ©åxµ‡©Û7ô–`ÛšÓðÖ™ÍèJó̘‡sã‡ÑµòòoÂó—9MRØË›¦0¶Ü\C‹1Ë´8§ *´#hæxd¤´ÂÚb°Šü¯3s–K;?¤)žrO矷¬,ª°%éÎ|ùþÕ¿•Oo^¿ºÌyd?BCvéK¬¶Oç£~ö9ÿö¥a(Jü©Ôe^êìsvý ÉÆtâÊ~™÷]ú³†’"Ð^²çÙ‡œ‰çé?d¶7ìÓÚòÏÖï™­v¯cèÝÞê{$è÷U—®)\µ¯:ÞØW/Tv9GÝA31Õõ'±Æ“Ì=ªÓpBÿù* )Zòc…|sX8˯CèGÿð‡ô£†Ñh6Û½n»Ûíš®aô ú»dãjsÅ/Ôb¶^ú‘T¼g‹vÏbG­ÍF³Ýiw¸Úvç§ãfUÒJ*_X<ýgUʶ£ÿ}¤üa€Î…òOǽÌëÝ\(;«[RÁ•}}‹RKاK‰;­ DƒfÑh`ˆELÜëôKzCçµ²ýÓëžþÎ'ʧdŠ:¢wcÔ,îÌÖ4M~´™k= }ÿVùÊ»”FÍèÔÚ»78Á*Jr\¦Œ©û(/•øþY–h¥ÿ­ÙãIasêkÊÔg2”ä4%Ní¨É¯/êó¶í,MÒ|¶ú3Ž^©[ЦÌ¢ßË̳ÁDzV]f9Œu×Ö“Ï5kF£Ò™@£Qû–®¹Ã¬XE±™´ ›Ï—#ŸßÖ²¿¬>ö·´Qî× F¾{±@²£a†ŽWßô¾öÒ¾¸KˆÔkîZ[ùeïtèÚŠÒT(vëB‰1èÿÿQùúîõkÅ0jíšÿNï=å‡OÔ§þaEé/´Æ×Ê%O»ôÇ%þÃñþ°Ö \kÀ£žÆèBé˾û‡€~e;};¬BóÜж’(bvæ3ÅñXÒàÊÌOœ^÷Þ·F 47²¢çŽK *ãÀŸ*ÂØÔ:p0¢ÁÜf‚c#m‚¤Á};óçDŠÓêIXß3UÔó“ÄÓ™ ¾{ÖX¶[+d=ãЂ9#Åó#Z®ý;d挬·~øQ¡_½ñGâÝfÎì»­½5üïµîì&-‘²L3Úk9§ô%mHÚžÇoÇU$JÈ/ʽCS ÇsÒÈ$\Cš¤8‹HÔ]~`[Äý4iÜŒPµÛn8`KOÖ¶¹GSbNhö;̹a#‰‡ì—ƒüÏVPuUVsíh©âd¹î¦jiï/Ýß+¬Pé÷.”»Í’fƒå}õt«g–2_­ÍÐKoÝŒœ¥Zx5¤Ã3,+²üKÎl‘Z}N·OWÕz‘C~qÁbà=Q,7>:Pn¬;ú %ûN Ó|ÚUÇ0ŸÌYî1Lûa³Ÿ<Ó°O³]—¿üøÏ¾2lõÆÍñØ2tÝjF»7nÐÿè­æhlë¦i*…v]öÏ_~˜m_[ÁdÎPÃþ¦³YàÿA£fýPSûM(†¬®}E|¡’j•Ñöê+ax#Œ3$¶?%ƒ$­¡Pó‘ø´YÚc?¼ nûâB|M·ÌÖÓ™"u ߢÀÜÑ”ŒUâÿ–_ âÝÕãB°2©ÉØni%ÚÂ†Âæ–>«Á °kG–’öµ1ʺõxù]¸°œ9˜&›PÖ}8º!Jª‹¸»JP¯)x¼ÜÄL˜]ß¿ÏJÈ—ÙiàPûXrç3Í.Ç–í¿JÊ—‰ &?Ÿí¿êÉ‹cî9áвdâÄêÜÔEYgÔ}baÅ×-CìÎéhô"Ç–R{ îÎ$aû!u{v5 rÈ&,0F*qƬ] vŸÑÐqOçWZsdàYÂëEB è*¤&ÓØtAŒíX£©ó+¤†‡¦Y4á`[7Ø­’p¤ñ½Ãöý`”ò{±=ÚawãÈ mŸ.šqÇ®Ÿ¾'V£Ý_Ä $̵åq©´€,ÃKuFRcµ')È>k)ÈéBB öŸs?²¤ ÓL-’Óҙ̂ðìù )às:Ðå¸Hº«[ËV¶É¢Àgÿ™â…nvj,gð¤Å–¯ì)©±L*{·wNƒ·”’Ϧñ¤’igJH|¥”\RsS é*\J‘×Ý’š°·fõ©f²­,i9~<¦©íCMê襅gä5¹FXëØžCÿÏ#Ãúw4G Ж—·ÎŽ*I~3µìÇ“3¼RììÒŸ¼äqê{õæôøs`\rÙƒ"/öå"™¹Gòœä(°L,ã'y<Ë•3©OØû<ò§u9ÒU+Í‘“8!3ð¯ÞE"£ø|¥6q¢›ùÓÈÌš.ŸWú¦áÎ’®? #+¼‘­g¡,hg"eË*áœfTd)+!Ðäª6"w¸û™òr)ê"öm2-¦¦äôéý ÍØâ³3)¹N|lJÓÁ¹ÍÙã’ÆÔX²‚ç q§’"ÙòDÍivœX«…ᯠ!,ŠÕo“ + ¶}ª¾õ¤ú’fn‘úS¢Œb¦QîØúÀ¹{É'Õ:+Ž—'ºjþª†ò›8Œ¼‚‘C«à†½ô©2 (ö?ñ«–ñk鸂_îžn_IµÜ…Ø5=³¹YÔ‚O`²»6ó$•ÈÞ¾\Ñg÷Vp{q¶UẋF‰"9([ß †óñ˜ƒXX·¯zÁQ?tþCúŠÙn7ÛXð‰"læO˜ Ô]Âô¹OZhןe-Êž“¤m‘ÆàýO'€Ð“›š„k¼ò¡ª… ®…¿½€öé­S1c»¬©Ž(17"Þ¸œŠwÕ²¨&‰)ܴйø`/Ãîmñùk÷ÁòÅéAÜñ€á{ö§PÅÆ $=‹1䣠ä1Ð0ž÷HÚ8 Öß«­ÆA¡qðñÕ<º¡ 4ÿÛ×$p¤qðÌç‚ø’Ì`Æzaðäé·åÖKÝœ‰ãiñ§µøÓÕ((< ^ßûVû±šQ%$RÿÖa²LzÏ€(y4uÁ¦-—§»· ÿg9ÿ¦¼ùüåÕßßÿüùÊû¯_½W^üðáÍë/?ü ¼ýøIùõó›OtH€f‡þŽcó_o^+±ÐEx£h¶ò‚Ø7¾òÿVV”¿þU ]BfŠþBŽÅ”ù”ù@§ÃlMoiÏ(ÚLQÿG‰Ë²¡L—PÊÿ¨ËÏüð’ÓCÿ®³Ý0ÌNÓ¬5›¦Ù3{SÑ ½c˜Žit›†ÙîP¬D1°À×_J+Ô(r2c "IÅub!Vú’tÖªžô=™Û“/ýÂÑ·ñ´F³Ñ7§™Ïlöë[ý?΃ùm¬|ùˆëõXÃÊrÝߨõ}2ríÚ´ÊüC®B-"?®mzu ¦Š6V´»ãÿ¦ÄÂzs×Uû«P))ïÌŒý¹7Jß ÄM ¢9Þ]ªÃ…“Œ'ás`œì~Ù‚·„=ëÇúúôŽór°m âw{PKk»ñ"yÀèÍk/ß+Dßn„„Íâ~s6¯‘ oœÑˆxƒØBˆ[jHp)»’â{t©³¢¯JÂgü\ô['Ž…êçCï.ÁPçÃô"(ni]kH\9>çñ<¢„h+V¶(cYñ“Ü.`Úe÷~0BFeÁ?Œ2œFF#Ä\7Ä‘y¸³!žú""êå:ì„Aò" *ö½åD8ÐäšÆ=rqqjJqÛ˜ ‘3%ô#쉆^wqԭЀ${µ¸ê‚—{þ&#K¤#ÃBËÓ›æñ¼1X>8¨þòÿ÷_þõá_Ã7vdO?üËþrsK¦ß.?½ó_ª(†'ЩÍÂogZB+{*Œ¬élë£LúÞw×ÀFÚÛèm–ž~EƒÆ+eŒ›§ËðKú¸RÒ6¿I´D±ûÚ*±ÐG÷Ô'6jÞö.l`Ü„RÌIª­” ±ÅŽï‘à“€xÉœ'Ï\)½Ÿòþã•:wFWjÿJÕ½v£ÙìjVk4ÒZæP׆¤miv·×ît»ºmšæ•ºç%Åsšòý/}+NB‹H(s:/± õãŒx¯.ÖÒ·ßü@F NØa鬒¼‹(ÃL´ë­E6­£šÃëækÄt£Ä‰òv'zvÏ,•Ú£+)c$,3óB†Ìã6Ÿóõ]7{Ü ß(¨˜Mžyê{õÙ<çm­v£­cŽ¿¬Éév¬‘Ö0¦Öê†fô¦Ö¶z¶MŒn£mœÖ5{ÃõãÌúsNÔ#ﵜÛÕù×lÑL¸/‡ó¢XW°i…^ó<™ ßx!{>¹“¨¥Ÿ˜Í“瘇„xJrv¥Œæ„]¹aŸ`×+é_¬Hyáùן$GÏ/â'4ñº¢Å÷â·T“6…ǹ‰y—Ñ<®‹®_nÿœä"¯ì˜é³ËcKvѶQÖ=.Qàï÷ãå©ß`ìÔz½v‹N°Ô[šÑmÒÿ¶»MÝè´ÆÁŒÚ®Öèu»fÓÜlÿ}Æ^J«÷}Çí….mœì5_â-Çþöë‚c{~g†ö@à‚ã~‡X¿ò†UU¼»J/¹1PFJ ×O®Z¢7u·èÆ=5ßhÜ ¾‚xÿ âA,èD.@èD(à "xF¼Ñ€.np:)ì> c.L@a&ÁÀèθ0 u¼9øÞ€ w݈ŒÏA»‚§Ç0ìÕ]ƒüÒÓï"u÷¾êbÂndó£BndPÁ7²Øz#›:;‰T V•`=“+yÓb:#še¥ËHÄ!62`Ä.õÙ8'!Asàåk0¸¥…p8øQ3‡T~f?673 c†±ù˜!@Pnfn±PR0„¨ÉsÎl,Î9®üÆK»Q¶%̤ÉS(—ÔË>ÄÁ[jÜ®˜Al¿"8bÚ•=žhwÞ-Î\V|»Fèii ~:£ZžçGV¶á€Øëy)ózö¤…–½Ú£Í¨Ï¦×CéÇeùÖn)³°:…°z@…œK\.3yD­fñ€‘Ó«û[ÂöãâB†%i  œ Y1dURŠƒ*§ûÕÉéý˜ÆÃfø˜Ç³7„óxÄš¹#òÄ—uïe1`«ë†ƒl©o%©ƒ–¾{äºÜ/ÝMæU¼ØF¶°›!Óøq㢚ÏyF,cž)‹ÍU5Isº\å)MˆçÕtÆ‚1úWŒ} <ã¶™¯qOæ¤ìàâ)ò[rOgæ& Ãy‰â‡A‚œD¸A,>âßíD³‰5¶Ò&¶›C£5ì´µ.7h÷:Úpl6µ¶Ñêµã±NìFñ&F˜žÔ°ÚÆÙ.v‚!ümoÂ>ÍOfûä»OT@kYÌfÂwôß2bŒëû·ó™23m§g¯!{ …ý¾ö`MÝÚ Å cžÍ¹†$ëÄ)¢Ä:q‹G$Öõjz§Ùd”ŽF»K§“NOï¶ £ÝiÐ?yu{¿ýRV‘*žœtžÜ%6On¾Ë|Gï5L½·ÙJä.ë VÝ^ïêvL*©náå6ç *Ð,8”ºS«•(£î@_ êöUL¨Û%Ôq qêc u|€@B”ŸPê`È|„:&?¡ "ÔÁÑŸPêøÜ÷½ù1·ìÓCË®±‡ðüÀùO)XÆŽ/цÌÚ&Ýëä¡m¶þÄ„ç<]‚¡îÙDF³±@l„€%¨v‡à-€Ûž™ ¾ËhT_ÑáÔ¥i,qì¼O«è¶®/ð[‡Wü¼@ëDþ-ñrçûð<…=X.±Aâû~DBS "..ª‘­n›¬ª̇;²lj%¬øÙ p44œr-ÐrY£ÌϨ`s3ê˜PF=ʨ‡@ó1êˆBŒzA|NF=“QA„0ê¸pF=Ĩàâ £ËϨ õT(£‚-¡€Œz4œQ_í°T;,ÕKµÃRí°T;,ÕKµÃRí°T;,'¶Ã""©ç’ÔB$u¸I*@R€ Ô@å–Ô`óJê!A’:Pl.I((¯¤n± T0„À$u8ð¸$u8/•º”–´„Æ_:D]€ Ü„‰èZ8º­Ï‡.É´,«ÚÊt«‹Q¢0Ë¥erå`µ] 4õ¤æmN¡™ÆïX$DhÖIÈÛBGÙE%Ü [9H¸Lö¹ ÷2~ˆ0Œ¸N²}A±òÌÞÐwLóÏSÀâEÀG•ÐV´²íWäf@.¬¸t€€-¨t€€ Q}à6= Ò5î$@:@àjØÜi`á§ÄfKïµÍVK׺#ƒh-‹´´žÙiVGïŒ ÃÖM£Y¼ÿò± îIž¤ÍVÙÇ'ט§^ÒvU%“`V^ή)únéIíFžÏ”wÌq&PîqF5Eba”bå·/ðMاa"w¼‘ãMöh]@kYTä„6PÊßõ•F³Ùh?yñ¸>þËRe(6‘d( X<ª E·×£Î¤5Ìý½mš ½Ój÷ŒfSç¡Øóí—²Šô#JÙíOF†‚ßo‹ÈPŒu£w;lü!$,±Ç@Ââ#´€°Ä¾ÂËm ù KXâ´j%.,±·¯E…%öT.,± ,,q_Xâ TX‚ *,K@€a dNa ~L€°&,Fw€° .,ÁÏO{àÆD=°…YÝrg#]óaZá=L¡®Z!:n6ðk¹ä–¶½íOg¾ÇîÛÁ7°ì œ«`™ž1‹þxë%mЂÝûÁ^RxQ¸5 Ê€l We;!Ÿ’ZpÛ ÛÞd] ~3ìY÷mºi|p7Üb À[m‚–J‹mâÆ D04´k¤ZR$ ø±ù ø1ÁŠüÐ`E4§"?¢˜">¯"?:¯"¤HÀ+ HÀS$àÇÅ E,@‘€¢H@+°%4T‘€Z@‘ ZšWKóji^-Í«¥yµ4¯–æÕÒü-Í…¨ìx>*;DeçÇ…PÙùQ!Tv~T•ŽÊOeçÇæ¦²Ã aTv 6•ÊMeçÂ-” † •ý0•ïŠC©k0¹k/ü5„ÑKˆÊc´#– ‘Ñ^úôÍÉhoþ®bî(†Ø&GYžaY~äå8–×d @ „¿F.+NXÖ@ÔÊaY±õ¿¬>\Ö@Ø ¯¬A1…e ¤šç“5.‚¤šÑZIÜ.Ó7öÏr¥O§ºÒí®æ;YŽˆŽ*¡‰˜¾ÆjcXŽÜî$rScoç‹kxÀm5<à&vkx4'r—[MíBhÒŸwÚ›…=4*¶ ¬øÁ #²E̾]ñ£ÛÆÂO?z¤ÑZ­žÖ4Ú=­¥{Z·ÕkÍ¡m[F»iêÝŽZØâ¢x¡y ø[áè‰ßøÈ©¨žDŸñ-qW§ë…’`dƇ¢GG܆ЇÜëBâö¾ ûôVYw$ÚÐx_VúÅš)o¼;'ð½)M–·© @kYPV ÛlÚÖô-c)»¾;Ÿ)ó˜¾|Ðvz2E<Ñ=YKîpM­YíÁšºµ?ŠÆôgá߀üÁ¡†_>¾`#åE_y±Óø‹Å“}UÍhÙc+ÍXBh®Ë[_ËòãM‡Ä»CÛ²muhs †Í³BLÚ:ñŽkÝØ1â#Ú˜Fÿ“ô•+5¡9;C³•ùñ•zå¹þä=¹#.û„ã}ö³Ë¹ä #ã…}åK0'WÞ:çí›;:üÿ>§?|˯¼( n3óƒ(ì_yŠ¢)ÌYûJH?K"öEIº›Ö÷éÌ‘cîqÞ7^,’&vƒ‹ýYâNÇŽëDìh"pìðÊc‡å©žØºõY೓127KÀÆ+ÑèÔtúÿŒÊ V|úîÞ5è¿îè†íåf{ù/’ÚÓâ©h½µ¸ÀÏ–aÓOy·ᾚ%'O¬—““'íI¬C¬XiéÞª¯8J1„‚>»œ[è´;ÜYñ>›Ý ö*ˆå-.ÊÄ·ÌžwMÙêw•WþDf®ÿ°žZ:b^Ùk?«¼² } ½æ%ÉbñÛÄ’Å·xüÐ`4§€?¢˜€>¯€øJ&"HÀ‡W@À‡&à¾V…ŒÈ-à€øð£B|¨`¶„F€ øÀïwª$¾Jâ«$¾Jâ«$¾-MêÏ'õI}ðãB¤>¬€Ô?*DêŽÊ/õÁÍ-õƒ„I}±ù¤>€ ÜRgpƒê|î&¬(LÓä0Ÿ¦ _ÓIHK…ÓÑËíû§¡»Z§¥¦Ñy¸;ñ¤u{©A<&.h’ÊÊ›œr¡tTŒÆïŇKhpÃ$z¢_D(CÔÊa¡ 0rʵFõFuqðq[ØÞϲ&¡‰’" äß{$øDÆ$ ^ò0´SÒ;; ÉýÇ+Fb¾RûWjKoéºiŒµ–a´–n7µ^Ãhi-sÜVcØ2èr<â`b+æg¨ˆÈ=Q@ën ¬u7±Së€Í•h¦pºôiîFßÁ‚™Þ³s…dåi¼;“ôd—h_ý®íøK|N4ÛÆAGÃ6˜J[ðL ÏupoUiöZXc+mb»;ì™vÓІ[k5¬¦f5»]­Ùöz®ÛÖM„\!ño‹~³i½˜F„À´·8[È޼­\ŽôpvE6gxÁcëg1ˉL:Ùk^ éIÜâIOFÍìé½^×Loì7:FÛ0ÌV³ešfë íiÿ×_J+Ô(rd/<ê·ë¡>…·½oiPŸö;ˆü$>J…ÉOJ/¹1ä‡/ÔÉUK”u¨»EIP{3ÔÓtƒ‡ýt0ëd?Æ…°Ÿ£ñ³Ÿ8÷ôùÙOüÉ6€ý½“°%f?9ÙO€#~öÆ~£;€ý…³Ÿøñù/N‚ò›1ÔéÂ:êÑrÓå§eèÅD—v”À³c˜[ÒÊØF¼îÛÈAì‹pFlü~˜þ„.¸ÃŸˆ5r|f—öãóWSב[= ña?7Сï6Êþá„‘<¼w¦ñ+» ï@Ωé”í̾çw?¼£A($*nkÝÈÞ}ÛïìÐ+Œijò Ç‘ºv§+ÿô€Kš+M­oŸçÁ$Ž¢mý/²¼ˆšùÕ³î,ǵè* 3v§ñÙCJŸÖ:ä¤=3wXFˆ“~9Á•?ËŽ¡«k„ls|õv‰À‘«6±oÇ{Ü3#©…=bЗtBBd’έ­—,šÎ¸GÕr®Ÿ³dëã«T+±%í&Šf¡fF,~ÙïêݦÄ1²²¹¡ÆÖö/ë$²ëô_ì•¢;:vØßkv•VŠ[ò°½l‡ ŒBؾëì€D;­HÊ'ëÉCúß%úƒäߥ̟шuãŒ#-[*Z‰Àÿ˲&€UYæ3š9kú’9l¿^_nÈ2—5ê%·+`µ×æ¾|¼RëW,¦]­ØìßWO—ØWêýq6™¥Ê-øÃä1‘þéoóY’Þ‘`˜|nB¢+u±P¥¶ÃµÜfV©•dÕÎYmýú²Éë~àLOóÙ#Z<4úñY¿ì¡°Ü±ÉY–m2¦0”2-_úñv.›f.Ê1u_<Ñ©Òm.ÎzüÝùî|J~a1þYùß”ÕèÒŠØiŠº9Ù«%{âî•K\XRR‚Ÿ^œ^ß^B-ìï°—¸¶¼}¶ÍÖÒ¬¾°·ˆ/¤Y]šü©Ò/¤Z¸–†¾×/R›„o-‡Ñµª¦YÓ?gš’–àÅ*³Eóð,˜9 `tš÷ˆÕž¤½µðήÙɳmµ8uï·ÍN³žãFÖÓpÈ{…©Éb"i9¦˜ŠÁ`FÇ•aÐÔË©V0´Ë0d´u½¤q`Ý–(Z¦Eµ)øti:‹~rB¼X ¹Ìùöˆ½TÕ-%.ˆßÉ¥åndY剼W›žÑ¤¸>A­ePZ™ºç–Ù¢£â–  )@ßlÞläÇæ³‘üf#?4øÍF4ç›üˆbo6Šáó¾ÙÈÎûf#ôf#?®À›üà°7ùqñÇèÍF,àÍF~TÈ›Tð›l }³‘ZàÍFÐÔ+$=2–¡s @³\é!!ë¢ÒC Ô ±/¸¸êà–( =¶Å/=$-$=¶Ã-=$²€I ï Èfˆoó#©PœÀr Û‘x¥‡„ &=$ܽ0é¡"f ÒCB.þŽà.¨v»Õ5§Œ@¡¢íPÂ~ †ã*#ƒò!ôÕª(”†/q‹Ù+Y¨¨`aq¦áB,ÐQø­Å•ü‰~¡¢B}-oˆq“ [IÈ`¡"$»…ŠK!.T„T|¡"¤‚¡!•¥Pz{œ©PQáv¸–ÛÌ„ŠŠ— ,TTÜ$×mu§~”nAì¶:ži~¡"›‹³ãç䄊ð=±èÕ;?½x¾} Âï]ÀM•s8ÒÐ%:àóYqJáX;²ôV“qHa™â–Â1xHz«Z¥‡p qHaÕˆCzÇTIõá’Â1”ÂóÁ°”!uPzèüÖÌÂÒCxkeé¡jµ‚Äo•ÃòqBªüµÊ_mm&Ø"MSâÒC'á9¤‡Î)¼¥‡ðLsK¡™L.ÝsJ¡ZÝrkóÐ…M|û‚ÒC¨Y”biQm-IS¹¥‡ð’T.é¡ê0çsx¥‡ð,C¤‡ªÑ^-ÍžÅÒìŸñIIè?€ôžu€ôÐ÷½¨—‚,)Ø—7Í)=tÊÛ hÒCŋ§²€³Q“±‰+=T¸L‹jSðé·ô⬄\æ|{„[zgŒbK¥ånðHm3šcNíúšÙ0{æ¶wÓDk¿Ão !ï»u!×*ßZØÎ–éLX¨¨paÒÐå _££â–  )Äë BgDÖlær©Zà0Jh˜ê, áA~Üñ´L‰‡WÜ…õÎrzY3Ô£^…¸Ø÷V,TpXX É´]F™ž rq¡K˯ژ ‘3%ô#L´¡¡Æ-J †Õ¡ceRÍå—™¼K|2_û£¡µ†4·½ñcá™yBR/ˆGCMŸqÚL@s‡ªµÃ‡ÊËqçO–çùt2HeððR»¼2•²À£+œJfò+¬u õ¤¦qšPÆMñ…Æ:ENc*þj·o4~Çš/S]¿¤5 $ЩåY2z›)ú}EëTÜÌ_Úà{z?"ü’ªe$ò›$+WÆòH÷Ñþþ=uÆOdLâÙ$”ºÒSk‡·ÁЏí?^©sgt¥ö¯Ô–ÞÒuÓk-Ãi-Ýnj½†ÑÒZæ¸94¬Æ°eÐ.â2I)>ª„¶£>À£QÄÇvKˆÉrj4§%&ÕÊVY1‰óc­pªO®´ý :TÅ·³d ,p¯d¯Ê±¶®~õ(qëB¦7¤òV²Â±´Ñ«qU´}¤'9[|ºW8;¿e Ë´Z‰ÔVIÆvël•U€ í,‰‘AòιԸ#5ÊÙY“Ñ*a\Åé0ó¸8NØciÆ+Ýx©;Ü §D‹Ùõë²M^Î]÷Ò§ îC¹ÆK _Ë ;‰œ%wlD‚©ãÅ»4¿Ð¥ kòäžãqËp„.߸s\Nä(=z¬âåòRk4Ùeä2CæÑ*Ÿ¥˜¹+½G0¿Œ/¥.Ç’|+e$”ëùDN¹­J*ª¤âœ’ŠŒ<öì'³5Rµ1©ìjI»ŠÇñ—×—ßÓ ·É³;æ,Wºåßîùͱ՚¤Z“|k’'jŒÕ å9¯PNÀÛrº®•¯U«áj5üd5œò]ª•0¦ÅD¢éÙOïK'Êi7}Oaöxë!nåªmÆ/‹ÏÚ{—ÁŽn6›í'ì¶m´ý[E‹9î祶¥ Ê¡q "o#¢Ç‡Ò[DlQFJvx‘mÄ*@ãl#bšÝF”\†#ÌmÄçÆ·lìd‚]ßSèìê<¯°Ó++3lÙÍùnðTÞ]âžå9¦ҭȵ Ùhϼ°´I<Éž"+ˆÊ4Ú7d4wIð¡œÓ½ìyàD¯érƒ|‹Ê2šW@<†Íò8—‰¾ ,›\ÆïNqS1аR}”?C•6+mìls¨–9I—:9eoŠO¹ëÌW5tÒYÓÝ,ÙøšbÙã³ÉCJëênU¨â@¾Ë8°¦úœã· m50‹/w3âÓÈs Kùêçø…ÁÏÔoänfIC—ƒŒŠ‹ˆì©snõãŒx¯.ÖÞ%ªw~ "Ûòg+5=õW±I¸ &=ºSÍb'àÆLÍ>¸Í>Q+‡5û„%@¢G8”°{¸üò…b¡ô91N+†>žMP¾áRL·{Ju¨L<ì·h^sKééswß8qe¢eqâú*kÅ2òÖRòÕX,Œ¾–:™Fnyv“ɼ<›Óä&RYÇ—V˜F7ùgkËRÂy†_±½´}™ —:Þ¤ÓÕ˜®Æô¹-»¥¥0þ0~ýeô.¯å.1eb7÷ÊÉ΂R¬$¯BŒÖªµµƒµ¢Ók·;»ß Ûoë—Å·T pù-¶Pbg$SO&–ý Ù"|cl"œ³Kðñ›_ILC6²s÷¦Ý:°{1…ã=×8uçyvIÃr=g ³ά´3“ ]ZD\BMhVž>Yð­q]o}ã,ßä5ÇVíÞŠì›ú]Ûç³—h>3“ïP{™ßǰ θN<ï:<×qEËœWµÚݦ5¶Ò&6hƒ¶ ½§éÖ¨«µšd¤ CC#QÓ2zºm5ìâMŒ0Eª<ÒÍüÕßýR…©ëh]¸Ly 4È­¯PXEο96°hFCcß{.™y‘È;¾Ú.½l¨«ï‰„'ŽXŽÁùLÌ1@‚ˆdÒË×9Ô¶þ=QßxŽ#6‚çxÅUÓu½úi­ÁOÊÃxŸÖ†ÞS:pïo{Mîàóšp3®œ°YVèL‚ÇÎ×1 ¯¥kûv<±Ç=3’RÈ#ÄxÜÍÌ€šm­ÆLþYŒ¯è}ˆ?D’a’¨a}•‚[Ðn¢hjÖhÄrÒ—±ê†_Ù‹ÜPcO¿ÜÔüc¯ÙA$Ýú-yØnœþBªqÛ÷o’2´±ã’—Oe6ë)Qa^ª—Y F#Ë3Ž´l#ÅJ¨q/eæUæ3šàkú’9b¿^w}Ûroü0b®hÔKªÿˆ®>Xmµyà†/¯Ôú‹AW«í&öï«§NWêýq6¹¤ÊíÁ„É&?Ÿ=ým~‡&ùà †Éç&1Ke¡J©ÿµœfU3=(õÏ9«__6qÝœ‰ãi9Eÿ>KºBY.®>ŠR_¹¬KdÌ2‹\e™ÊT¡äÍ Ò×çãTÚ(V{’k2ëžxÆS%Ë©1Õ*UšµÅYň‘°‘âCj}Dîê¹_k®?QË+Å2½¥s¿,»‚Cg!ò#O¢Õ’b¥\âоx>}·–ã–Õ{ëœß³‹Áè¨ê|¾š5ýs6 ù¾Ì%³A5Yq<:hêã;ª=YKÔÂ;»f»ó~¨¯ƒúm³Ó¬³¨3:®=ÔÓ@„}ལ‰†´·rM°ûƒY,ˆ"Ó©Ë­F0´e0Úº.¹‰D’Ë?¹Ì=ç›L#Læ­.áRËNŸ ¥‰v»Ù>óí…xÿ$¹àN´äõá纭ÔN­VfÕÊ씲{KËç—´ý*—¯rùµ“vR+uêÏ §ÿ­1³µkêž×,š¼xs¶á'‹¹‡ld î'\ ›ZÈÓ—P¤[Ûr‰øÎÜ ë5ͦ’ìÆL-îaéXHµ°¨vüyRvGŸkžV¯:<½3ÀåZuXZn^ –b—®i4ÞfM!‹ÓíØ‹Sa+ƒJ!d‘ÅiqƒÀÅi!ƒ‹‹çÑ-ùÅTiý³v«W¶ÕÜ3(Q0'òPÔ\ÖRMz'¢©#\½ ?ßË™P@7s$¥J|rç…ºº.iÜjèòæ+uMZ¼ð•'á²àOeÒ%I^Í/Õ[|äÈLTϧŹÕÑÏg 0Û$dÙ¼žÊÇ^¼ã¦c5ñ L|k©¸;zJ3î2 §\ÅQF¶Ê§•Ë])u›2®ãõêš .^Ž;þÕ­b‘Û•Ð/ð-¯ÉE>µÚ4Ñ­®#Õ´Ï?“H¹âTÒÒLsÔvìöؾR•+T¹mSÈñÜu”L_†Œjø»©ö¤úܯJ¾%F_‚g#MýÌÑÑ3eŠœ+^˜GL!Ž3ؘp]ùƒM‚ÕÜ`ûi)›©ã9ÓùTIp|Á^â˜ù%1—Eù35«âŸX’€´7¬n•_Åš—7µVñpÑ!Ÿª¨ÅÓ°Bâßø&ìÓ_^}þ§ò5?[+}% ;Jº¡*ùsNBÚ¦ÊðA‰nœP‰%•TCñZù¯åh-ÿnêÓÈ o•™Ýô•º?‹ê,ÖÓZ|—„kü…:ûhXŸZŽW{˜º}Co ¶mk‘;¢Ó¨ò‹}eê³³†ðG%-Am8w\úÚm7T"_ɽÆß§¤?4oÒXn»óñÖyh…dv «»2öe)>¡¼ü›òƒ‘éËÇlý¢¯¼Ø™½¸P^Ä—qè§¾®>¿ºc’}àEzÇ%þ]|+„}îËëKöÈ &$ä>·¸^üxL‡ýœ´NÜ2;=tãO9»Ùy}CØmÙ Úïþí|¦ÌYx¸©\“jIK`MÝÚ …æLÕå<‡Pkú–îÿ7åÍç/¯þþþçÏÿPÞ|ýê½òúã‡o^ùùãåíÇOʯŸß|bâºnvèÿè86ÿõæµR:^=¼Q4[yAì_ù++Ê_ÿª„.!3E!ÇâÊ|Êœ±Óa¶¦·´?m¦¨ÿ£ÄeYsZö—h:SþG]~þà‡³jôïšÑ1Û Ãì´µv¯Ýhë-­Ñ1»šÑ2º]£Ùlµ›Í®Ù¦X‰b`¯¿”V¨Q:åÈžÿk<ð‹ñÞÑN6C·ãý”ì—ƒüÏVeªÇw[fî|âП§u©Ó2Öf(®{ùëŽÎŒ¿ 5šæ¸9µÈ|f³_»Fk8ù#˜)_>â:DýûŽåº¿«úd”Ú7´A•ùÿþ†\zÉ!?|Õça(hÅ?=D7¾×<ûjSE+Z€ÝÝSâ{ L;Siüí¯BMSÿ–fËÚ\³L²ØŽjßXÞ„0Ñì±å†¤ØºNu¼;Z¸ì9‘âÛáIl¤,/¬=ñ™3`r|¦0Ð;~– xF¼ÑàÆ op:)ß!ÄÄ´­`Äm[ÛuˆÉFw{yQ Ôñædà{±š9f×Ñ%¨ã9hcWþÃ^>‘,–0¡y%–Ờ€çO…Îi³Û…µ!­Bm)’ÈøQño¼I,’ši…zǺ›µ@?àÑ”kR¸¦´`×áÈ5½/ ¾­"fpÏÓ=§xöÆ¥F o .êÞ)Þ3âå͉[šqr XØ`äI2“ìçæ¨ß§‚÷A—mÈ—<Á~`¯ÓkšÍ¢§†í %M `ã 44¤NÛªL–;¢kK×åJ¤ÁÀþ,Kñ.ø±Ç~òÔ fy'É!4¤±¸7ÎhD¼A¬Ä€ÜlÑ™Îè:Å÷XSÄšê¡<üy?…‰ˆž.…0çC’Þ‚FÅóôA‘yJèÌ8H/’ ââ|ÆŠ ëuxTÔ™†÷~€ìal<8a4áe2Anˆ5"Øn›I øwa@S/p† Ú…AóÈ.Œ`¾v”]xz‡›ªrîˆ5)|æÌRmä¾(¸ #fPp¶’@ 1<»0ð–àÚ…òNüq%´ #n‰w¦€È.Œ¸Ð.Œ™*â5òhçÝ…©‚ý÷´¯ßl¡3"ƒìˆ1S0Jžg¥QƒÐ¾Ž|,ÿò+biÙÆþZôÎrzY3ÔøäyÁtoÅO„cîƒ1ÈÁ’Ôˆ_ÜA|ý‡â¶1A#gJèGu¢¡Æ-J †¶Ú»ñã¥dB )8Ùªšr”éIÆZÂÂÁ,m¡¼\ [îìfï Ù™xd¤ ãÉùé³¢Ùgãÿ×è˜ Ýìõ-Ì}y‹ù#R´=O*ÍØöPùZ÷!Î'pÛ:ÄÞ•“šã–žÛbuîÔò¬ ½ÍÎðVÑØ»ˆò¶UWæá—”›´Ço’¬P\) ¹q_Ú.nΆìýÜ¥©Úa­bÆ}¡Ùæq!¥H’jJk)q§¸ÌÎJb¸lØäg×Ùþ=M&>‘1 ˆ—*sŸq¯ßö¯Ô¹3ºRûWjKoéºiŒµ–a´–n7µ^Ãhi-sÜVcØ2è{ ßñBÆ4 e§=‡¿€éYî—À{)„û(Ï­³mþ³wfVêÍñ¦üÕr=öï/¯/S÷•V†R*švÙrº”n)=è(ÃÒêÀ£kkç’bšœ¸&wŽËŸ‡È´‹¾ÇE$Ç·ô"•„N^œôÉy²e=©~¤ù÷«ËŸµT«ÇG?4§ þRHýcãóéIåNÙ»Öï'y*_-Ó«e:Ö*²¿|" ûµËšá\z¶Ñm[Zz«.ùjÁ5î%lé÷ÙÝcC²òtOäLö¹ó’UµøÍÅZòèâ]ÛÍ—G¤Ìä;)·€Ê¿&9wâ:ñl=סœ]xÏyU«Ýju±ÆVÚÄ–e ƈûΨAÖêjÃÆÈÒ,«Ñ·»fcÜ$Å›a²Â½†ªÚÉ;S?_²F0ôZÃèÔÚ5£­cµïÒnØZ/ëIyì®ÝÅìM/¬–ufo­©ã:È‚úóå]ëÄZ4©éê)?ÓØ%Ÿ#˾ÅjNá‹Ø—ÊÿÚññ®nvÚ½Ö¦uøD¼ˆg®â¼¦-`óz¶2àZ¶<ä:6 ~‚tS_Þk×°*¯¢ñq®¢ð] µöæv«úÁ÷RÓ,%Ï_/'ì¸5ç{‡PM×·F·\‹®s‚C›œµ,„ þmoÂ>}ùþÕ¿•Oo^¿ºÜ­„ ù#T贮 «íÓXÛ_ ¾4šŠ*•|©³O̽€Xô'C—$?[Ž›ý2•ÕoÙŒþÌPRº(°çÙ‡œ‰çé?d¶7ìÓÚòÏÿïÿQŸíœºÞo././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-0000755000175000017500000000000015115611514032765 5ustar zuulzuul././@LongLink0000644000000000000000000000024700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-0000755000175000017500000000000015115611521032763 5ustar zuulzuul././@LongLink0000644000000000000000000000025400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-0000644000175000017500000000000015115611514032755 0ustar zuulzuul././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-0000755000175000017500000000000015115611521032763 5ustar zuulzuul././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-0000644000175000017500000010127415115611514032774 0ustar zuulzuul2025-12-08T18:02:46.536028269+00:00 stdout F [disk usage checker] Started 2025-12-08T18:02:46.540774785+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:02:46.706935110+00:00 stdout F Error from server (NotFound): pods "openstackclient" not found 2025-12-08T18:02:46.855412873+00:00 stdout F Will retrieve SOS reports from nodes 2025-12-08T18:02:46.855624518+00:00 stdout F Journal size limit not set or invalid: ignoring 2025-12-08T18:02:46.995508253+00:00 stdout F error: the server doesn't have a resource type "openstackdataplanenodesets" 2025-12-08T18:02:47.511119242+00:00 stdout F Gathering CRDs 2025-12-08T18:02:47.716389257+00:00 stdout F Gathering CRs 2025-12-08T18:02:50.570640844+00:00 stdout F Writing validating webhooks 2025-12-08T18:02:51.424611902+00:00 stdout F Writing mutating webhooks 2025-12-08T18:02:51.552826016+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:02:56.561221204+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:02:56.782941078+00:00 stdout F Gathering secrets in namespace openstack 2025-12-08T18:03:00.929352054+00:00 stdout F Gathering csv 2025-12-08T18:03:01.095143128+00:00 stdout F Gathering subscription 2025-12-08T18:03:01.228543823+00:00 stdout F Gathering installplan 2025-12-08T18:03:01.579780509+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:01.850244588+00:00 stdout F No resources found in openstack namespace. 2025-12-08T18:03:01.857269247+00:00 stdout F No resources found in openstack namespace. 2025-12-08T18:03:02.046738696+00:00 stdout F Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ 2025-12-08T18:03:02.060765122+00:00 stdout F No resources found in openstack namespace. 2025-12-08T18:03:02.073916724+00:00 stdout F No resources found in openstack namespace. 2025-12-08T18:03:06.587553493+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:08.153903360+00:00 stdout F Gathering secrets in namespace openstack-operators 2025-12-08T18:03:11.595143033+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:12.015456040+00:00 stdout F Gathering csv 2025-12-08T18:03:12.202027901+00:00 stdout F Gathering subscription 2025-12-08T18:03:12.374744760+00:00 stdout F Gathering installplan 2025-12-08T18:03:12.851381366+00:00 stdout F No resources found in openstack-operators namespace. 2025-12-08T18:03:12.885611515+00:00 stdout F No resources found in openstack-operators namespace. 2025-12-08T18:03:13.036577352+00:00 stdout F Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ 2025-12-08T18:03:13.069802591+00:00 stdout F No resources found in openstack-operators namespace. 2025-12-08T18:03:13.076119551+00:00 stdout F No resources found in openstack-operators namespace. 2025-12-08T18:03:15.221397465+00:00 stdout F Gathering secrets in namespace baremetal-operator-system 2025-12-08T18:03:16.603851063+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:19.845817805+00:00 stdout F Gathering secrets in namespace openshift-machine-api 2025-12-08T18:03:21.612070710+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:24.239573981+00:00 stdout F Gathering csv 2025-12-08T18:03:24.417367256+00:00 stdout F Gathering subscription 2025-12-08T18:03:24.574480088+00:00 stdout F Gathering installplan 2025-12-08T18:03:25.174772259+00:00 stdout F No resources found in openshift-machine-api namespace. 2025-12-08T18:03:25.182366112+00:00 stdout F Dump logs for control-plane-machine-set-operator from control-plane-machine-set-operator-75ffdb6fcd-dhfht pod 2025-12-08T18:03:25.211724970+00:00 stdout F Dump logs for kube-rbac-proxy from machine-api-operator-755bb95488-5httz pod 2025-12-08T18:03:25.334161121+00:00 stdout F Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ 2025-12-08T18:03:25.357592430+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht/control-plane-machine-set-operator": remote error: tls: internal error 2025-12-08T18:03:25.367480934+00:00 stdout F Dump logs for machine-api-operator from machine-api-operator-755bb95488-5httz pod 2025-12-08T18:03:25.519307814+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-machine-api/machine-api-operator-755bb95488-5httz/machine-api-operator": remote error: tls: internal error 2025-12-08T18:03:25.540775330+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-machine-api/machine-api-operator-755bb95488-5httz/kube-rbac-proxy": remote error: tls: internal error 2025-12-08T18:03:25.549441492+00:00 stdout F Dump services: cluster-autoscaler-operator 2025-12-08T18:03:25.553345627+00:00 stdout F Dump services: control-plane-machine-set-operator 2025-12-08T18:03:25.557151169+00:00 stdout F Dump services: machine-api-controllers 2025-12-08T18:03:25.561267509+00:00 stdout F Dump services: machine-api-operator 2025-12-08T18:03:25.567981689+00:00 stdout F Dump services: machine-api-operator-machine-webhook 2025-12-08T18:03:25.716839079+00:00 stdout F Dump services: machine-api-operator-webhook 2025-12-08T18:03:26.179513291+00:00 stdout F Dump replicaset: control-plane-machine-set-operator-75ffdb6fcd 2025-12-08T18:03:26.183036625+00:00 stdout F Dump replicaset: machine-api-operator-755bb95488 2025-12-08T18:03:26.355009656+00:00 stdout F Dump deployments: control-plane-machine-set-operator 2025-12-08T18:03:26.358445408+00:00 stdout F Dump deployments: machine-api-operator 2025-12-08T18:03:26.624508219+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:31.640167225+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:31.947286908+00:00 stdout F Gathering secrets in namespace cert-manager 2025-12-08T18:03:36.437964122+00:00 stdout F Gathering csv 2025-12-08T18:03:36.639740680+00:00 stdout F Gathering subscription 2025-12-08T18:03:36.651318560+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:36.820334382+00:00 stdout F Gathering installplan 2025-12-08T18:03:37.281030160+00:00 stdout F No resources found in cert-manager namespace. 2025-12-08T18:03:37.297943904+00:00 stdout F Dump logs for cert-manager-controller from cert-manager-858d87f86b-7q2ss pod 2025-12-08T18:03:37.311185769+00:00 stdout F Dump logs for cert-manager-cainjector from cert-manager-cainjector-7dbf76d5c8-fdk5q pod 2025-12-08T18:03:37.470267253+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-858d87f86b-7q2ss/cert-manager-controller": remote error: tls: internal error 2025-12-08T18:03:37.484082263+00:00 stdout F Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ 2025-12-08T18:03:37.514023396+00:00 stdout F Dump logs for cert-manager-webhook from cert-manager-webhook-7894b5b9b4-wdn4b pod 2025-12-08T18:03:37.641710549+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-cainjector-7dbf76d5c8-fdk5q/cert-manager-cainjector": remote error: tls: internal error 2025-12-08T18:03:37.667653834+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-webhook-7894b5b9b4-wdn4b/cert-manager-webhook": remote error: tls: internal error 2025-12-08T18:03:37.692406908+00:00 stdout F Dump services: cert-manager 2025-12-08T18:03:37.695993733+00:00 stdout F Dump services: cert-manager-cainjector 2025-12-08T18:03:37.699995431+00:00 stdout F Dump services: cert-manager-webhook 2025-12-08T18:03:38.116542836+00:00 stdout F Dump replicaset: cert-manager-858d87f86b 2025-12-08T18:03:38.119975178+00:00 stdout F Dump replicaset: cert-manager-cainjector-7dbf76d5c8 2025-12-08T18:03:38.125032014+00:00 stdout F Dump replicaset: cert-manager-webhook-7894b5b9b4 2025-12-08T18:03:38.320905244+00:00 stdout F Dump deployments: cert-manager 2025-12-08T18:03:38.324703137+00:00 stdout F Dump deployments: cert-manager-cainjector 2025-12-08T18:03:38.328579250+00:00 stdout F Dump deployments: cert-manager-webhook 2025-12-08T18:03:39.690402584+00:00 stdout F Gathering secrets in namespace openshift-nmstate 2025-12-08T18:03:40.274807060+00:00 stdout F Gathering secrets in namespace openshift-operators-redhat 2025-12-08T18:03:40.875868661+00:00 stdout F Gathering secrets in namespace openshift-logging 2025-12-08T18:03:41.605695054+00:00 stdout F Gathering secrets in namespace metallb-system 2025-12-08T18:03:41.661425418+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:46.675525093+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:46.760328726+00:00 stdout F Gathering secrets in namespace openshift-marketplace 2025-12-08T18:03:50.968001794+00:00 stdout F Gathering csv 2025-12-08T18:03:51.151814300+00:00 stdout F Gathering subscription 2025-12-08T18:03:51.290606951+00:00 stdout F Gathering installplan 2025-12-08T18:03:51.688984139+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:03:51.759451268+00:00 stdout F No resources found in openshift-marketplace namespace. 2025-12-08T18:03:51.846028249+00:00 stdout F Dump logs for util from 1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj pod 2025-12-08T18:03:51.852120492+00:00 stdout F Dump init container logs from 1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj pod 2025-12-08T18:03:51.959336067+00:00 stdout F Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ 2025-12-08T18:03:52.057439076+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util": remote error: tls: internal error 2025-12-08T18:03:52.069620253+00:00 stdout F Dump logs for pull from 1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj pod 2025-12-08T18:03:52.079343444+00:00 stdout F Dump init container logs from 1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj pod 2025-12-08T18:03:52.199611897+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util": remote error: tls: internal error 2025-12-08T18:03:52.222147591+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull": remote error: tls: internal error 2025-12-08T18:03:52.236828625+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull": remote error: tls: internal error 2025-12-08T18:03:52.254812197+00:00 stdout F Dump logs for extract from 1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj pod 2025-12-08T18:03:52.272400648+00:00 stdout F Dump logs for util from 6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 pod 2025-12-08T18:03:52.383660990+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util": remote error: tls: internal error 2025-12-08T18:03:52.396921806+00:00 stdout F Dump init container logs from 6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 pod 2025-12-08T18:03:52.403846301+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull": remote error: tls: internal error 2025-12-08T18:03:52.419268405+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/extract": remote error: tls: internal error 2025-12-08T18:03:52.551237993+00:00 stdout F Dump logs for pull from 6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 pod 2025-12-08T18:03:52.556356110+00:00 stdout F Dump init container logs from 6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 pod 2025-12-08T18:03:52.559029432+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util": remote error: tls: internal error 2025-12-08T18:03:52.670870660+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util": remote error: tls: internal error 2025-12-08T18:03:52.716578734+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull": remote error: tls: internal error 2025-12-08T18:03:52.730333173+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull": remote error: tls: internal error 2025-12-08T18:03:52.734013972+00:00 stdout F Dump logs for extract from 6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5 pod 2025-12-08T18:03:52.744031231+00:00 stdout F Dump logs for util from 6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj pod 2025-12-08T18:03:52.883256572+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull": remote error: tls: internal error 2025-12-08T18:03:52.898140882+00:00 stdout F Dump init container logs from 6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj pod 2025-12-08T18:03:52.916233516+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util": remote error: tls: internal error 2025-12-08T18:03:52.938287687+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/extract": remote error: tls: internal error 2025-12-08T18:03:53.055672824+00:00 stdout F Dump logs for pull from 6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj pod 2025-12-08T18:03:53.060665208+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util": remote error: tls: internal error 2025-12-08T18:03:53.061480070+00:00 stdout F Dump init container logs from 6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj pod 2025-12-08T18:03:53.192205134+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util": remote error: tls: internal error 2025-12-08T18:03:53.195908773+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull": remote error: tls: internal error 2025-12-08T18:03:53.256366564+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull": remote error: tls: internal error 2025-12-08T18:03:53.277459219+00:00 stdout F Dump logs for extract from 6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj pod 2025-12-08T18:03:53.298187255+00:00 stdout F Dump logs for util from 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f pod 2025-12-08T18:03:53.413525947+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util": remote error: tls: internal error 2025-12-08T18:03:53.414759209+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull": remote error: tls: internal error 2025-12-08T18:03:53.429412642+00:00 stdout F Dump init container logs from 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f pod 2025-12-08T18:03:53.456946630+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/extract": remote error: tls: internal error 2025-12-08T18:03:53.593469230+00:00 stdout F Dump logs for pull from 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f pod 2025-12-08T18:03:53.594986531+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util": remote error: tls: internal error 2025-12-08T18:03:53.597476027+00:00 stdout F Dump init container logs from 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f pod 2025-12-08T18:03:53.733522034+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util": remote error: tls: internal error 2025-12-08T18:03:53.759438888+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull": remote error: tls: internal error 2025-12-08T18:03:53.761658498+00:00 stdout F Dump logs for extract from 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f pod 2025-12-08T18:03:53.774822691+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull": remote error: tls: internal error 2025-12-08T18:03:53.793584603+00:00 stdout F Dump logs for extract-utilities from certified-operators-58d6l pod 2025-12-08T18:03:53.898293081+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull": remote error: tls: internal error 2025-12-08T18:03:53.898293081+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util": remote error: tls: internal error 2025-12-08T18:03:53.912255285+00:00 stdout F Dump init container logs from certified-operators-58d6l pod 2025-12-08T18:03:53.932385504+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/extract": remote error: tls: internal error 2025-12-08T18:03:54.073520277+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:54.100784288+00:00 stdout F Dump logs for extract-content from certified-operators-58d6l pod 2025-12-08T18:03:54.105225498+00:00 stdout F Dump init container logs from certified-operators-58d6l pod 2025-12-08T18:03:54.255447964+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content": remote error: tls: internal error 2025-12-08T18:03:54.256283037+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content": remote error: tls: internal error 2025-12-08T18:03:54.291922392+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:54.326905540+00:00 stdout F Dump logs for registry-server from certified-operators-58d6l pod 2025-12-08T18:03:54.344684416+00:00 stdout F Dump logs for extract-utilities from community-operators-zdvxg pod 2025-12-08T18:03:54.462315369+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:54.463655376+00:00 stdout F Dump init container logs from community-operators-zdvxg pod 2025-12-08T18:03:54.482606433+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content": remote error: tls: internal error 2025-12-08T18:03:54.504092539+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/registry-server": remote error: tls: internal error 2025-12-08T18:03:54.613089171+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:54.622336209+00:00 stdout F Dump logs for extract-content from community-operators-zdvxg pod 2025-12-08T18:03:54.626432079+00:00 stdout F Dump init container logs from community-operators-zdvxg pod 2025-12-08T18:03:54.765344242+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content": remote error: tls: internal error 2025-12-08T18:03:54.766234216+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:54.779858791+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content": remote error: tls: internal error 2025-12-08T18:03:54.799741505+00:00 stdout F Dump logs for registry-server from community-operators-zdvxg pod 2025-12-08T18:03:54.811151540+00:00 stdout F Dump logs for marketplace-operator from marketplace-operator-547dbd544d-6bbtn pod 2025-12-08T18:03:54.936823039+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:54.953991239+00:00 stdout F Dump logs for extract-utilities from redhat-operators-xpnf9 pod 2025-12-08T18:03:54.961868620+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/marketplace-operator-547dbd544d-6bbtn/marketplace-operator": remote error: tls: internal error 2025-12-08T18:03:54.964175512+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content": remote error: tls: internal error 2025-12-08T18:03:54.964488510+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/registry-server": remote error: tls: internal error 2025-12-08T18:03:54.971347764+00:00 stdout F Dump init container logs from redhat-operators-xpnf9 pod 2025-12-08T18:03:55.122379543+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:55.139763069+00:00 stdout F Dump logs for extract-content from redhat-operators-xpnf9 pod 2025-12-08T18:03:55.144526997+00:00 stdout F Dump init container logs from redhat-operators-xpnf9 pod 2025-12-08T18:03:55.278656792+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content": remote error: tls: internal error 2025-12-08T18:03:55.283302756+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content": remote error: tls: internal error 2025-12-08T18:03:55.286017699+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:55.294789204+00:00 stdout F Dump logs for registry-server from redhat-operators-xpnf9 pod 2025-12-08T18:03:55.420339909+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities": remote error: tls: internal error 2025-12-08T18:03:55.443062078+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content": remote error: tls: internal error 2025-12-08T18:03:55.457849926+00:00 stdout F Dump services: certified-operators 2025-12-08T18:03:55.461717119+00:00 stdout F Dump services: community-operators 2025-12-08T18:03:55.468259954+00:00 stdout F Dump services: marketplace-operator-metrics 2025-12-08T18:03:55.470680939+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/registry-server": remote error: tls: internal error 2025-12-08T18:03:55.474866532+00:00 stdout F Dump services: redhat-operators 2025-12-08T18:03:55.898474616+00:00 stdout F Dump replicaset: marketplace-operator-547dbd544d 2025-12-08T18:03:56.039998829+00:00 stdout F Dump deployments: marketplace-operator 2025-12-08T18:03:56.513323757+00:00 stdout F Dump jobs: 1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a36aa3 2025-12-08T18:03:56.520411908+00:00 stdout F Dump jobs: 6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92100b6b5 2025-12-08T18:03:56.525114193+00:00 stdout F Dump jobs: 6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f8c0cf 2025-12-08T18:03:56.529977104+00:00 stdout F Dump jobs: 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5e559b8 2025-12-08T18:03:56.708104809+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:01.532229873+00:00 stdout F Gathering secrets in namespace openshift-operators 2025-12-08T18:04:01.717565358+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:05.775381394+00:00 stdout F Gathering csv 2025-12-08T18:04:05.969788999+00:00 stdout F Gathering subscription 2025-12-08T18:04:06.151767855+00:00 stdout F Gathering installplan 2025-12-08T18:04:06.730523383+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:06.904424056+00:00 stdout F No resources found in openshift-operators namespace. 2025-12-08T18:04:06.935827009+00:00 stdout F Dump logs for prometheus-operator from obo-prometheus-operator-86648f486b-4j9kn pod 2025-12-08T18:04:06.949220374+00:00 stdout F Dump logs for prometheus-operator-admission-webhook from obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm pod 2025-12-08T18:04:07.025157728+00:00 stdout F Warning: apps.openshift.io/v1 DeploymentConfig is deprecated in v4.14+, unavailable in v4.10000+ 2025-12-08T18:04:07.110983503+00:00 stdout F Dump logs for prometheus-operator-admission-webhook from obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t pod 2025-12-08T18:04:07.134045556+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-86648f486b-4j9kn/prometheus-operator": remote error: tls: internal error 2025-12-08T18:04:07.161232406+00:00 stdout F Dump logs for operator from observability-operator-78c97476f4-mg4b2 pod 2025-12-08T18:04:07.272432656+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm/prometheus-operator-admission-webhook": remote error: tls: internal error 2025-12-08T18:04:07.301944428+00:00 stdout F Dump logs for perses-operator from perses-operator-68bdb49cbf-m2cdr pod 2025-12-08T18:04:07.308164073+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t/prometheus-operator-admission-webhook": remote error: tls: internal error 2025-12-08T18:04:07.443289077+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-operators/observability-operator-78c97476f4-mg4b2/operator": remote error: tls: internal error 2025-12-08T18:04:07.478625114+00:00 stdout F Error from server: Get "https://192.168.126.11:10250/containerLogs/openshift-operators/perses-operator-68bdb49cbf-m2cdr/perses-operator": remote error: tls: internal error 2025-12-08T18:04:07.482596289+00:00 stdout F Dump services: obo-prometheus-operator 2025-12-08T18:04:07.486775119+00:00 stdout F Dump services: obo-prometheus-operator-admission-webhook 2025-12-08T18:04:07.490897189+00:00 stdout F Dump services: obo-prometheus-operator-admission-webhook-service 2025-12-08T18:04:07.495328886+00:00 stdout F Dump services: observability-operator 2025-12-08T18:04:08.018398038+00:00 stdout F Dump replicaset: obo-prometheus-operator-86648f486b 2025-12-08T18:04:08.023695679+00:00 stdout F Dump replicaset: obo-prometheus-operator-admission-webhook-5b9dc645c4 2025-12-08T18:04:08.029231186+00:00 stdout F Dump replicaset: observability-operator-78c97476f4 2025-12-08T18:04:08.039096968+00:00 stdout F Dump replicaset: perses-operator-68bdb49cbf 2025-12-08T18:04:08.248729406+00:00 stdout F Dump deployments: obo-prometheus-operator 2025-12-08T18:04:08.253346989+00:00 stdout F Dump deployments: obo-prometheus-operator-admission-webhook 2025-12-08T18:04:08.258404544+00:00 stdout F Dump deployments: observability-operator 2025-12-08T18:04:08.276120793+00:00 stdout F Dump deployments: perses-operator 2025-12-08T18:04:09.613147251+00:00 stdout F Dump poddisruptionbudgets: obo-prometheus-operator-admission-webhook 2025-12-08T18:04:09.807238419+00:00 stdout F error: the server doesn't have a resource type "nncp" 2025-12-08T18:04:09.969739779+00:00 stdout F error: the server doesn't have a resource type "nnce" 2025-12-08T18:04:10.143585869+00:00 stdout F error: the server doesn't have a resource type "nns" 2025-12-08T18:04:10.299047671+00:00 stdout F error: the server doesn't have a resource type "ipaddresspools" 2025-12-08T18:04:10.320890891+00:00 stdout F Gather ctlplane service info: openstack 2025-12-08T18:04:10.326998043+00:00 stdout F Gather ctlplane service info: ovn 2025-12-08T18:04:10.336000962+00:00 stdout F Gather ctlplane service info: rabbitmq 2025-12-08T18:04:10.475365907+00:00 stdout F Error from server (NotFound): pods "openstackclient" not found 2025-12-08T18:04:10.488863346+00:00 stdout F Pod ovsdbserver-nb-0 not found in namespace openstack, skipping OVN NB database collection 2025-12-08T18:04:10.497760991+00:00 stdout F Error from server (NotFound): pods "openstackclient" not found 2025-12-08T18:04:10.507868530+00:00 stdout F error: the server doesn't have a resource type "l2advertisement" 2025-12-08T18:04:10.553272274+00:00 stdout F Error from server (NotFound): pods "openstackclient" not found 2025-12-08T18:04:10.635530635+00:00 stdout F Pod ovsdbserver-sb-0 not found in namespace openstack, skipping OVN SB database collection 2025-12-08T18:04:10.686532978+00:00 stdout F error: the server doesn't have a resource type "openstackcontrolplane" 2025-12-08T18:04:10.697988382+00:00 stdout P tar: Removing leading `/' from member names 2025-12-08T18:04:10.698019923+00:00 stdout F 2025-12-08T18:04:11.740757186+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:16.753419243+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:21.763855841+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:26.776382144+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:31.787214873+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:36.795666568+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:41.803108476+00:00 stdout F [disk usage checker] Volume usage percentage: current = 40 ; allowed = 70 2025-12-08T18:04:43.659426077+00:00 stdout F The /must-gather/must-gather.tar.xz now can be attached to the support case. 2025-12-08T18:04:43.697964079+00:00 stdout F Caches written to disk ././@LongLink0000644000000000000000000000023600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611513033044 5ustar zuulzuul././@LongLink0000644000000000000000000000025000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000017270515115611513033062 0ustar zuulzuul2025-12-08T17:45:13.010062303+00:00 stderr F I1208 17:45:13.009765 1 cmd.go:95] &{ true {false} installer true map[cert-configmaps:0xc0006fc5a0 cert-dir:0xc0006fc780 cert-secrets:0xc0006fc500 configmaps:0xc0006fc0a0 namespace:0xc00061ed20 optional-cert-configmaps:0xc0006fc6e0 optional-cert-secrets:0xc0006fc640 optional-configmaps:0xc0006fc1e0 optional-secrets:0xc0006fc140 pod:0xc00061ee60 pod-manifest-dir:0xc0006fc320 resource-dir:0xc0006fc280 revision:0xc00061e6e0 secrets:0xc0006fc000 v:0xc00088a320] [0xc00088a320 0xc00061e6e0 0xc00061ed20 0xc00061ee60 0xc0006fc280 0xc0006fc320 0xc0006fc0a0 0xc0006fc1e0 0xc0006fc000 0xc0006fc140 0xc0006fc780 0xc0006fc5a0 0xc0006fc6e0 0xc0006fc500 0xc0006fc640] [] map[cert-configmaps:0xc0006fc5a0 cert-dir:0xc0006fc780 cert-secrets:0xc0006fc500 configmaps:0xc0006fc0a0 help:0xc00088a6e0 kubeconfig:0xc00061e640 log-flush-frequency:0xc00088a280 namespace:0xc00061ed20 optional-cert-configmaps:0xc0006fc6e0 optional-cert-secrets:0xc0006fc640 optional-configmaps:0xc0006fc1e0 optional-secrets:0xc0006fc140 pod:0xc00061ee60 pod-manifest-dir:0xc0006fc320 pod-manifests-lock-file:0xc0006fc460 resource-dir:0xc0006fc280 revision:0xc00061e6e0 secrets:0xc0006fc000 timeout-duration:0xc0006fc3c0 v:0xc00088a320 vmodule:0xc00088a3c0] [0xc00061e640 0xc00061e6e0 0xc00061ed20 0xc00061ee60 0xc0006fc000 0xc0006fc0a0 0xc0006fc140 0xc0006fc1e0 0xc0006fc280 0xc0006fc320 0xc0006fc3c0 0xc0006fc460 0xc0006fc500 0xc0006fc5a0 0xc0006fc640 0xc0006fc6e0 0xc0006fc780 0xc00088a280 0xc00088a320 0xc00088a3c0 0xc00088a6e0] [0xc0006fc5a0 0xc0006fc780 0xc0006fc500 0xc0006fc0a0 0xc00088a6e0 0xc00061e640 0xc00088a280 0xc00061ed20 0xc0006fc6e0 0xc0006fc640 0xc0006fc1e0 0xc0006fc140 0xc00061ee60 0xc0006fc320 0xc0006fc460 0xc0006fc280 0xc00061e6e0 0xc0006fc000 0xc0006fc3c0 0xc00088a320 0xc00088a3c0] map[104:0xc00088a6e0 118:0xc00088a320] [] -1 0 0xc000808660 true 0xae3c00 []} 2025-12-08T17:45:13.010213317+00:00 stderr F I1208 17:45:13.010066 1 cmd.go:96] (*installerpod.InstallOptions)(0xc000387380)({ 2025-12-08T17:45:13.010213317+00:00 stderr F KubeConfig: (string) "", 2025-12-08T17:45:13.010213317+00:00 stderr F KubeClient: (kubernetes.Interface) , 2025-12-08T17:45:13.010213317+00:00 stderr F Revision: (string) (len=2) "12", 2025-12-08T17:45:13.010213317+00:00 stderr F NodeName: (string) "", 2025-12-08T17:45:13.010213317+00:00 stderr F Namespace: (string) (len=24) "openshift-kube-apiserver", 2025-12-08T17:45:13.010213317+00:00 stderr F Clock: (clock.RealClock) { 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F PodConfigMapNamePrefix: (string) (len=18) "kube-apiserver-pod", 2025-12-08T17:45:13.010213317+00:00 stderr F SecretNamePrefixes: ([]string) (len=3 cap=4) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=11) "etcd-client", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=34) "localhost-recovery-serving-certkey", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=31) "localhost-recovery-client-token" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F OptionalSecretNamePrefixes: ([]string) (len=2 cap=2) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=17) "encryption-config", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "webhook-authenticator" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F ConfigMapNamePrefixes: ([]string) (len=8 cap=8) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=18) "kube-apiserver-pod", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=6) "config", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=37) "kube-apiserver-cert-syncer-kubeconfig", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=28) "bound-sa-token-signing-certs", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=15) "etcd-serving-ca", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=18) "kubelet-serving-ca", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=22) "sa-token-signing-certs", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=29) "kube-apiserver-audit-policies" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F OptionalConfigMapNamePrefixes: ([]string) (len=4 cap=4) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=14) "oauth-metadata", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=12) "cloud-config", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=24) "kube-apiserver-server-ca", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=11) "auth-config" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F CertSecretNames: ([]string) (len=10 cap=16) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=17) "aggregator-client", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=30) "localhost-serving-cert-certkey", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=31) "service-network-serving-certkey", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=37) "external-loadbalancer-serving-certkey", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=37) "internal-loadbalancer-serving-certkey", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=33) "bound-service-account-signing-key", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=40) "control-plane-node-admin-client-cert-key", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=31) "check-endpoints-client-cert-key", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=14) "kubelet-client", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=16) "node-kubeconfigs" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F OptionalCertSecretNamePrefixes: ([]string) (len=11 cap=16) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=17) "user-serving-cert", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-000", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-001", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-002", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-003", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-004", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-005", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-006", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-007", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-008", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=21) "user-serving-cert-009" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F CertConfigMapNamePrefixes: ([]string) (len=4 cap=4) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=20) "aggregator-client-ca", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=9) "client-ca", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=29) "control-plane-node-kubeconfig", 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=26) "check-endpoints-kubeconfig" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F OptionalCertConfigMapNamePrefixes: ([]string) (len=1 cap=1) { 2025-12-08T17:45:13.010213317+00:00 stderr F (string) (len=17) "trusted-ca-bundle" 2025-12-08T17:45:13.010213317+00:00 stderr F }, 2025-12-08T17:45:13.010213317+00:00 stderr F CertDir: (string) (len=57) "/etc/kubernetes/static-pod-resources/kube-apiserver-certs", 2025-12-08T17:45:13.010213317+00:00 stderr F ResourceDir: (string) (len=36) "/etc/kubernetes/static-pod-resources", 2025-12-08T17:45:13.010213317+00:00 stderr F PodManifestDir: (string) (len=25) "/etc/kubernetes/manifests", 2025-12-08T17:45:13.010213317+00:00 stderr F Timeout: (time.Duration) 2m0s, 2025-12-08T17:45:13.010213317+00:00 stderr F StaticPodManifestsLockFile: (string) "", 2025-12-08T17:45:13.010213317+00:00 stderr F PodMutationFns: ([]installerpod.PodMutationFunc) , 2025-12-08T17:45:13.010213317+00:00 stderr F KubeletVersion: (string) "" 2025-12-08T17:45:13.010213317+00:00 stderr F }) 2025-12-08T17:45:13.010490925+00:00 stderr F I1208 17:45:13.010405 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:45:13.010490925+00:00 stderr F I1208 17:45:13.010426 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:45:13.010490925+00:00 stderr F I1208 17:45:13.010431 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:45:13.010490925+00:00 stderr F I1208 17:45:13.010435 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:45:13.010490925+00:00 stderr F I1208 17:45:13.010438 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:45:13.010930637+00:00 stderr F I1208 17:45:13.010889 1 cmd.go:413] Getting controller reference for node crc 2025-12-08T17:45:13.019565687+00:00 stderr F I1208 17:45:13.019517 1 cmd.go:426] Waiting for installer revisions to settle for node crc 2025-12-08T17:45:13.021434190+00:00 stderr F I1208 17:45:13.021405 1 cmd.go:518] Waiting additional period after revisions have settled for node crc 2025-12-08T17:45:43.022258958+00:00 stderr F I1208 17:45:43.022116 1 cmd.go:524] Getting installer pods for node crc 2025-12-08T17:45:43.027588818+00:00 stderr F I1208 17:45:43.027517 1 cmd.go:542] Latest installer revision for node crc is: 12 2025-12-08T17:45:43.027588818+00:00 stderr F I1208 17:45:43.027563 1 cmd.go:431] Querying kubelet version for node crc 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.030178 1 cmd.go:444] Got kubelet version 1.33.5 on target node crc 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.030217 1 cmd.go:293] Creating target resource directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12" ... 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.030710 1 cmd.go:221] Creating target resource directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12" ... 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.030728 1 cmd.go:229] Getting secrets ... 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.033110 1 copy.go:32] Got secret openshift-kube-apiserver/etcd-client-12 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.035589 1 copy.go:32] Got secret openshift-kube-apiserver/localhost-recovery-client-token-12 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.037653 1 copy.go:32] Got secret openshift-kube-apiserver/localhost-recovery-serving-certkey-12 2025-12-08T17:45:43.108962751+00:00 stderr F I1208 17:45:43.039661 1 copy.go:24] Failed to get secret openshift-kube-apiserver/encryption-config-12: secrets "encryption-config-12" not found 2025-12-08T17:45:43.110200918+00:00 stderr F I1208 17:45:43.110139 1 copy.go:32] Got secret openshift-kube-apiserver/webhook-authenticator-12 2025-12-08T17:45:43.110258599+00:00 stderr F I1208 17:45:43.110225 1 cmd.go:242] Getting config maps ... 2025-12-08T17:45:43.113237699+00:00 stderr F I1208 17:45:43.113199 1 copy.go:60] Got configMap openshift-kube-apiserver/bound-sa-token-signing-certs-12 2025-12-08T17:45:43.116414355+00:00 stderr F I1208 17:45:43.116311 1 copy.go:60] Got configMap openshift-kube-apiserver/config-12 2025-12-08T17:45:43.118712413+00:00 stderr F I1208 17:45:43.118675 1 copy.go:60] Got configMap openshift-kube-apiserver/etcd-serving-ca-12 2025-12-08T17:45:43.225214920+00:00 stderr F I1208 17:45:43.225130 1 copy.go:60] Got configMap openshift-kube-apiserver/kube-apiserver-audit-policies-12 2025-12-08T17:45:43.425744179+00:00 stderr F I1208 17:45:43.425653 1 copy.go:60] Got configMap openshift-kube-apiserver/kube-apiserver-cert-syncer-kubeconfig-12 2025-12-08T17:45:43.625827744+00:00 stderr F I1208 17:45:43.625749 1 copy.go:60] Got configMap openshift-kube-apiserver/kube-apiserver-pod-12 2025-12-08T17:45:43.826282592+00:00 stderr F I1208 17:45:43.826221 1 copy.go:60] Got configMap openshift-kube-apiserver/kubelet-serving-ca-12 2025-12-08T17:45:44.024817011+00:00 stderr F I1208 17:45:44.024751 1 copy.go:60] Got configMap openshift-kube-apiserver/sa-token-signing-certs-12 2025-12-08T17:45:44.225870856+00:00 stderr F I1208 17:45:44.225747 1 copy.go:52] Failed to get config map openshift-kube-apiserver/auth-config-12: configmaps "auth-config-12" not found 2025-12-08T17:45:44.426120986+00:00 stderr F I1208 17:45:44.426041 1 copy.go:52] Failed to get config map openshift-kube-apiserver/cloud-config-12: configmaps "cloud-config-12" not found 2025-12-08T17:45:44.625697486+00:00 stderr F I1208 17:45:44.625584 1 copy.go:60] Got configMap openshift-kube-apiserver/kube-apiserver-server-ca-12 2025-12-08T17:45:44.826252976+00:00 stderr F I1208 17:45:44.826179 1 copy.go:60] Got configMap openshift-kube-apiserver/oauth-metadata-12 2025-12-08T17:45:44.826252976+00:00 stderr F I1208 17:45:44.826218 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/etcd-client" ... 2025-12-08T17:45:44.826498603+00:00 stderr F I1208 17:45:44.826447 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/etcd-client/tls.key" ... 2025-12-08T17:45:44.826729890+00:00 stderr F I1208 17:45:44.826695 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/etcd-client/tls.crt" ... 2025-12-08T17:45:44.826912225+00:00 stderr F I1208 17:45:44.826861 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-client-token" ... 2025-12-08T17:45:44.827023749+00:00 stderr F I1208 17:45:44.826991 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-client-token/namespace" ... 2025-12-08T17:45:44.827219005+00:00 stderr F I1208 17:45:44.827185 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-client-token/service-ca.crt" ... 2025-12-08T17:45:44.827405210+00:00 stderr F I1208 17:45:44.827380 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-client-token/token" ... 2025-12-08T17:45:44.827570225+00:00 stderr F I1208 17:45:44.827548 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-client-token/ca.crt" ... 2025-12-08T17:45:44.827752210+00:00 stderr F I1208 17:45:44.827729 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-serving-certkey" ... 2025-12-08T17:45:44.827842993+00:00 stderr F I1208 17:45:44.827821 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-serving-certkey/tls.crt" ... 2025-12-08T17:45:44.828078411+00:00 stderr F I1208 17:45:44.828042 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/localhost-recovery-serving-certkey/tls.key" ... 2025-12-08T17:45:44.828233776+00:00 stderr F I1208 17:45:44.828209 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/webhook-authenticator" ... 2025-12-08T17:45:44.828515234+00:00 stderr F I1208 17:45:44.828478 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/secrets/webhook-authenticator/kubeConfig" ... 2025-12-08T17:45:44.828716660+00:00 stderr F I1208 17:45:44.828681 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/bound-sa-token-signing-certs" ... 2025-12-08T17:45:44.828888705+00:00 stderr F I1208 17:45:44.828841 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/bound-sa-token-signing-certs/service-account-001.pub" ... 2025-12-08T17:45:44.829105451+00:00 stderr F I1208 17:45:44.829069 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/config" ... 2025-12-08T17:45:44.829210075+00:00 stderr F I1208 17:45:44.829174 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/config/config.yaml" ... 2025-12-08T17:45:44.829485723+00:00 stderr F I1208 17:45:44.829441 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/etcd-serving-ca" ... 2025-12-08T17:45:44.829617077+00:00 stderr F I1208 17:45:44.829594 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/etcd-serving-ca/ca-bundle.crt" ... 2025-12-08T17:45:44.829834663+00:00 stderr F I1208 17:45:44.829793 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-audit-policies" ... 2025-12-08T17:45:44.829961557+00:00 stderr F I1208 17:45:44.829937 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-audit-policies/policy.yaml" ... 2025-12-08T17:45:44.830128872+00:00 stderr F I1208 17:45:44.830106 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-cert-syncer-kubeconfig" ... 2025-12-08T17:45:44.830222115+00:00 stderr F I1208 17:45:44.830200 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig" ... 2025-12-08T17:45:44.830377669+00:00 stderr F I1208 17:45:44.830356 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-pod" ... 2025-12-08T17:45:44.830483252+00:00 stderr F I1208 17:45:44.830461 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-pod/version" ... 2025-12-08T17:45:44.830730380+00:00 stderr F I1208 17:45:44.830694 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-pod/forceRedeploymentReason" ... 2025-12-08T17:45:44.830888555+00:00 stderr F I1208 17:45:44.830840 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-pod/kube-apiserver-startup-monitor-pod.yaml" ... 2025-12-08T17:45:44.831091021+00:00 stderr F I1208 17:45:44.831055 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-pod/pod.yaml" ... 2025-12-08T17:45:44.831293257+00:00 stderr F I1208 17:45:44.831260 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kubelet-serving-ca" ... 2025-12-08T17:45:44.831467763+00:00 stderr F I1208 17:45:44.831432 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kubelet-serving-ca/ca-bundle.crt" ... 2025-12-08T17:45:44.831674669+00:00 stderr F I1208 17:45:44.831623 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/sa-token-signing-certs" ... 2025-12-08T17:45:44.831966047+00:00 stderr F I1208 17:45:44.831934 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/sa-token-signing-certs/service-account-002.pub" ... 2025-12-08T17:45:44.832161893+00:00 stderr F I1208 17:45:44.832132 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/sa-token-signing-certs/service-account-001.pub" ... 2025-12-08T17:45:44.832321288+00:00 stderr F I1208 17:45:44.832297 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-server-ca" ... 2025-12-08T17:45:44.832421621+00:00 stderr F I1208 17:45:44.832397 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/kube-apiserver-server-ca/ca-bundle.crt" ... 2025-12-08T17:45:44.832661318+00:00 stderr F I1208 17:45:44.832628 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/oauth-metadata" ... 2025-12-08T17:45:44.832780252+00:00 stderr F I1208 17:45:44.832756 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/configmaps/oauth-metadata/oauthMetadata" ... 2025-12-08T17:45:44.832991048+00:00 stderr F I1208 17:45:44.832953 1 cmd.go:221] Creating target resource directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs" ... 2025-12-08T17:45:44.832991048+00:00 stderr F I1208 17:45:44.832979 1 cmd.go:229] Getting secrets ... 2025-12-08T17:45:45.025767374+00:00 stderr F I1208 17:45:45.025690 1 copy.go:32] Got secret openshift-kube-apiserver/aggregator-client 2025-12-08T17:45:45.226831380+00:00 stderr F I1208 17:45:45.226703 1 copy.go:32] Got secret openshift-kube-apiserver/bound-service-account-signing-key 2025-12-08T17:45:45.426116321+00:00 stderr F I1208 17:45:45.426054 1 copy.go:32] Got secret openshift-kube-apiserver/check-endpoints-client-cert-key 2025-12-08T17:45:45.626024151+00:00 stderr F I1208 17:45:45.625945 1 copy.go:32] Got secret openshift-kube-apiserver/control-plane-node-admin-client-cert-key 2025-12-08T17:45:45.825558251+00:00 stderr F I1208 17:45:45.825472 1 copy.go:32] Got secret openshift-kube-apiserver/external-loadbalancer-serving-certkey 2025-12-08T17:45:46.025964226+00:00 stderr F I1208 17:45:46.025759 1 copy.go:32] Got secret openshift-kube-apiserver/internal-loadbalancer-serving-certkey 2025-12-08T17:45:46.226539867+00:00 stderr F I1208 17:45:46.226417 1 copy.go:32] Got secret openshift-kube-apiserver/kubelet-client 2025-12-08T17:45:46.426005023+00:00 stderr F I1208 17:45:46.425853 1 copy.go:32] Got secret openshift-kube-apiserver/localhost-serving-cert-certkey 2025-12-08T17:45:46.626059388+00:00 stderr F I1208 17:45:46.625976 1 copy.go:32] Got secret openshift-kube-apiserver/node-kubeconfigs 2025-12-08T17:45:46.825684720+00:00 stderr F I1208 17:45:46.825570 1 copy.go:32] Got secret openshift-kube-apiserver/service-network-serving-certkey 2025-12-08T17:45:47.026374854+00:00 stderr F I1208 17:45:47.026275 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert: secrets "user-serving-cert" not found 2025-12-08T17:45:47.225918443+00:00 stderr F I1208 17:45:47.225828 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-000: secrets "user-serving-cert-000" not found 2025-12-08T17:45:47.424784812+00:00 stderr F I1208 17:45:47.424684 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-001: secrets "user-serving-cert-001" not found 2025-12-08T17:45:47.625417745+00:00 stderr F I1208 17:45:47.625344 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-002: secrets "user-serving-cert-002" not found 2025-12-08T17:45:47.825531492+00:00 stderr F I1208 17:45:47.825422 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-003: secrets "user-serving-cert-003" not found 2025-12-08T17:45:48.025724000+00:00 stderr F I1208 17:45:48.025648 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-004: secrets "user-serving-cert-004" not found 2025-12-08T17:45:48.225007102+00:00 stderr F I1208 17:45:48.224937 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-005: secrets "user-serving-cert-005" not found 2025-12-08T17:45:48.426526370+00:00 stderr F I1208 17:45:48.426460 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-006: secrets "user-serving-cert-006" not found 2025-12-08T17:45:48.627086411+00:00 stderr F I1208 17:45:48.625234 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-007: secrets "user-serving-cert-007" not found 2025-12-08T17:45:48.825315970+00:00 stderr F I1208 17:45:48.825192 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-008: secrets "user-serving-cert-008" not found 2025-12-08T17:45:49.025556481+00:00 stderr F I1208 17:45:49.025476 1 copy.go:24] Failed to get secret openshift-kube-apiserver/user-serving-cert-009: secrets "user-serving-cert-009" not found 2025-12-08T17:45:49.025556481+00:00 stderr F I1208 17:45:49.025499 1 cmd.go:242] Getting config maps ... 2025-12-08T17:45:49.225606865+00:00 stderr F I1208 17:45:49.225505 1 copy.go:60] Got configMap openshift-kube-apiserver/aggregator-client-ca 2025-12-08T17:45:49.447930729+00:00 stderr F I1208 17:45:49.443738 1 copy.go:60] Got configMap openshift-kube-apiserver/check-endpoints-kubeconfig 2025-12-08T17:45:49.632933752+00:00 stderr F I1208 17:45:49.632840 1 copy.go:60] Got configMap openshift-kube-apiserver/client-ca 2025-12-08T17:45:49.826197143+00:00 stderr F I1208 17:45:49.826112 1 copy.go:60] Got configMap openshift-kube-apiserver/control-plane-node-kubeconfig 2025-12-08T17:45:50.039641369+00:00 stderr F I1208 17:45:50.039530 1 copy.go:60] Got configMap openshift-kube-apiserver/trusted-ca-bundle 2025-12-08T17:45:50.039902887+00:00 stderr F I1208 17:45:50.039850 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/aggregator-client" ... 2025-12-08T17:45:50.109182357+00:00 stderr F I1208 17:45:50.109076 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/aggregator-client/tls.crt" ... 2025-12-08T17:45:50.114844477+00:00 stderr F I1208 17:45:50.114786 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/aggregator-client/tls.key" ... 2025-12-08T17:45:50.115118635+00:00 stderr F I1208 17:45:50.115072 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/bound-service-account-signing-key" ... 2025-12-08T17:45:50.115118635+00:00 stderr F I1208 17:45:50.115100 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/bound-service-account-signing-key/service-account.pub" ... 2025-12-08T17:45:50.115541807+00:00 stderr F I1208 17:45:50.115289 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/bound-service-account-signing-key/service-account.key" ... 2025-12-08T17:45:50.115541807+00:00 stderr F I1208 17:45:50.115464 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/check-endpoints-client-cert-key" ... 2025-12-08T17:45:50.115541807+00:00 stderr F I1208 17:45:50.115483 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/check-endpoints-client-cert-key/tls.crt" ... 2025-12-08T17:45:50.115684762+00:00 stderr F I1208 17:45:50.115642 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/check-endpoints-client-cert-key/tls.key" ... 2025-12-08T17:45:50.115838376+00:00 stderr F I1208 17:45:50.115801 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/control-plane-node-admin-client-cert-key" ... 2025-12-08T17:45:50.115838376+00:00 stderr F I1208 17:45:50.115827 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/control-plane-node-admin-client-cert-key/tls.key" ... 2025-12-08T17:45:50.116050253+00:00 stderr F I1208 17:45:50.116011 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/control-plane-node-admin-client-cert-key/tls.crt" ... 2025-12-08T17:45:50.116208387+00:00 stderr F I1208 17:45:50.116170 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/external-loadbalancer-serving-certkey" ... 2025-12-08T17:45:50.116208387+00:00 stderr F I1208 17:45:50.116197 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/external-loadbalancer-serving-certkey/tls.key" ... 2025-12-08T17:45:50.116479575+00:00 stderr F I1208 17:45:50.116437 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/external-loadbalancer-serving-certkey/tls.crt" ... 2025-12-08T17:45:50.117280960+00:00 stderr F I1208 17:45:50.116654 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/internal-loadbalancer-serving-certkey" ... 2025-12-08T17:45:50.117280960+00:00 stderr F I1208 17:45:50.116685 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/internal-loadbalancer-serving-certkey/tls.crt" ... 2025-12-08T17:45:50.117280960+00:00 stderr F I1208 17:45:50.116945 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/internal-loadbalancer-serving-certkey/tls.key" ... 2025-12-08T17:45:50.117280960+00:00 stderr F I1208 17:45:50.117161 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client" ... 2025-12-08T17:45:50.117280960+00:00 stderr F I1208 17:45:50.117182 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client/tls.key" ... 2025-12-08T17:45:50.117418574+00:00 stderr F I1208 17:45:50.117378 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client/tls.crt" ... 2025-12-08T17:45:50.117574199+00:00 stderr F I1208 17:45:50.117535 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/localhost-serving-cert-certkey" ... 2025-12-08T17:45:50.117574199+00:00 stderr F I1208 17:45:50.117560 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/localhost-serving-cert-certkey/tls.crt" ... 2025-12-08T17:45:50.117791445+00:00 stderr F I1208 17:45:50.117740 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/localhost-serving-cert-certkey/tls.key" ... 2025-12-08T17:45:50.117993181+00:00 stderr F I1208 17:45:50.117965 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs" ... 2025-12-08T17:45:50.118018302+00:00 stderr F I1208 17:45:50.117988 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig" ... 2025-12-08T17:45:50.118207167+00:00 stderr F I1208 17:45:50.118168 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/lb-ext.kubeconfig" ... 2025-12-08T17:45:50.118375622+00:00 stderr F I1208 17:45:50.118347 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/lb-int.kubeconfig" ... 2025-12-08T17:45:50.118541067+00:00 stderr F I1208 17:45:50.118513 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/localhost-recovery.kubeconfig" ... 2025-12-08T17:45:50.118708912+00:00 stderr F I1208 17:45:50.118681 1 cmd.go:261] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/service-network-serving-certkey" ... 2025-12-08T17:45:50.118735903+00:00 stderr F I1208 17:45:50.118706 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/service-network-serving-certkey/tls.crt" ... 2025-12-08T17:45:50.118977140+00:00 stderr F I1208 17:45:50.118948 1 cmd.go:639] Writing secret manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/service-network-serving-certkey/tls.key" ... 2025-12-08T17:45:50.119750323+00:00 stderr F I1208 17:45:50.119168 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/aggregator-client-ca" ... 2025-12-08T17:45:50.119750323+00:00 stderr F I1208 17:45:50.119221 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/aggregator-client-ca/ca-bundle.crt" ... 2025-12-08T17:45:50.119750323+00:00 stderr F I1208 17:45:50.119434 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/check-endpoints-kubeconfig" ... 2025-12-08T17:45:50.119750323+00:00 stderr F I1208 17:45:50.119455 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/check-endpoints-kubeconfig/kubeconfig" ... 2025-12-08T17:45:50.119750323+00:00 stderr F I1208 17:45:50.119631 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/client-ca" ... 2025-12-08T17:45:50.119750323+00:00 stderr F I1208 17:45:50.119652 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/client-ca/ca-bundle.crt" ... 2025-12-08T17:45:50.120292450+00:00 stderr F I1208 17:45:50.120237 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/control-plane-node-kubeconfig" ... 2025-12-08T17:45:50.120292450+00:00 stderr F I1208 17:45:50.120267 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/control-plane-node-kubeconfig/kubeconfig" ... 2025-12-08T17:45:50.120467196+00:00 stderr F I1208 17:45:50.120427 1 cmd.go:277] Creating directory "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/trusted-ca-bundle" ... 2025-12-08T17:45:50.120559228+00:00 stderr F I1208 17:45:50.120526 1 cmd.go:629] Writing config file "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/configmaps/trusted-ca-bundle/ca-bundle.crt" ... 2025-12-08T17:45:50.209210019+00:00 stderr F I1208 17:45:50.209063 1 cmd.go:335] Getting pod configmaps/kube-apiserver-pod-12 -n openshift-kube-apiserver 2025-12-08T17:45:50.226156667+00:00 stderr F I1208 17:45:50.226089 1 cmd.go:351] Creating directory for static pod manifest "/etc/kubernetes/manifests" ... 2025-12-08T17:45:50.226156667+00:00 stderr F I1208 17:45:50.226144 1 cmd.go:379] Writing a pod under "kube-apiserver-startup-monitor-pod.yaml" key 2025-12-08T17:45:50.226156667+00:00 stderr F {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-startup-monitor","namespace":"openshift-kube-apiserver","creationTimestamp":null,"labels":{"revision":"12"}},"spec":{"volumes":[{"name":"resource-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources"}},{"name":"manifests","hostPath":{"path":"/etc/kubernetes/manifests"}},{"name":"pod-resource-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12"}},{"name":"var-lock","hostPath":{"path":"/var/lock"}},{"name":"var-log","hostPath":{"path":"/var/log/kube-apiserver"}}],"containers":[{"name":"startup-monitor","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","startup-monitor"],"args":["-v=2","--fallback-timeout-duration=300s","--target-name=kube-apiserver","--manifests-dir=/etc/kubernetes/manifests","--resource-dir=/etc/kubernetes/static-pod-resources","--installer-lock-file=/var/lock/kube-apiserver-installer.lock","--revision=12","--node-name=crc","--kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig","--log-file-path=/var/log/kube-apiserver/startup.log"],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"manifests","mountPath":"/etc/kubernetes/manifests"},{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"pod-resource-dir","readOnly":true,"mountPath":"/etc/kubernetes/static-pod-resources/secrets","subPath":"secrets"},{"name":"pod-resource-dir","readOnly":true,"mountPath":"/etc/kubernetes/static-pod-resources/configmaps","subPath":"configmaps"},{"name":"var-lock","mountPath":"/var/lock"},{"name":"var-log","mountPath":"/var/log/kube-apiserver"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true}}],"terminationGracePeriodSeconds":5,"hostNetwork":true,"tolerations":[{"operator":"Exists"}],"priorityClassName":"system-node-critical"},"status":{}} 2025-12-08T17:45:50.229674573+00:00 stderr F I1208 17:45:50.229613 1 cmd.go:610] Writing pod manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/kube-apiserver-startup-monitor-pod.yaml" ... 2025-12-08T17:45:50.229854289+00:00 stderr F I1208 17:45:50.229828 1 cmd.go:621] Writing static pod manifest "/etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml" ... 2025-12-08T17:45:50.229854289+00:00 stderr F {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver-startup-monitor","namespace":"openshift-kube-apiserver","creationTimestamp":null,"labels":{"revision":"12"}},"spec":{"volumes":[{"name":"resource-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources"}},{"name":"manifests","hostPath":{"path":"/etc/kubernetes/manifests"}},{"name":"pod-resource-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12"}},{"name":"var-lock","hostPath":{"path":"/var/lock"}},{"name":"var-log","hostPath":{"path":"/var/log/kube-apiserver"}}],"containers":[{"name":"startup-monitor","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","startup-monitor"],"args":["-v=2","--fallback-timeout-duration=300s","--target-name=kube-apiserver","--manifests-dir=/etc/kubernetes/manifests","--resource-dir=/etc/kubernetes/static-pod-resources","--installer-lock-file=/var/lock/kube-apiserver-installer.lock","--revision=12","--node-name=crc","--kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig","--log-file-path=/var/log/kube-apiserver/startup.log"],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"manifests","mountPath":"/etc/kubernetes/manifests"},{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"pod-resource-dir","readOnly":true,"mountPath":"/etc/kubernetes/static-pod-resources/secrets","subPath":"secrets"},{"name":"pod-resource-dir","readOnly":true,"mountPath":"/etc/kubernetes/static-pod-resources/configmaps","subPath":"configmaps"},{"name":"var-lock","mountPath":"/var/lock"},{"name":"var-log","mountPath":"/var/log/kube-apiserver"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true}}],"terminationGracePeriodSeconds":5,"hostNetwork":true,"tolerations":[{"operator":"Exists"}],"priorityClassName":"system-node-critical"},"status":{}} 2025-12-08T17:45:50.230202049+00:00 stderr F I1208 17:45:50.230109 1 cmd.go:379] Writing a pod under "kube-apiserver-pod.yaml" key 2025-12-08T17:45:50.230202049+00:00 stderr P {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver","namespace":"openshift-kube-apiserver","creationTimestamp":null,"labels":{"apiserver":"true","app":"openshift-kube-apiserver","revision":"12"},"annotations":{"kubectl.kubernetes.io/default-container":"kube-apiserver","target.workload.openshift.io/management":"{\"effect\": \"PreferredDuringScheduling\"}"}},"spec":{"volumes":[{"name":"resource-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12"}},{"name":"cert-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources/kube-apiserver-certs"}},{"name":"audit-dir","hostPath":{"path":"/var/log/kube-apiserver"}},{"name":"tmp-dir","emptyDir":{}},{"name":"ca-bundle-dir","emptyDir":{}}],"initContainers":[{"name":"setup","image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825","command":["/usr/bin/timeout","100","/bin/bash","-ec"],"args":["echo \"Fixing audit permissions ...\"\nchmod 0700 /var/log/kube-apiserver \u0026\u0026 touch /var/log/kube-apiserver/audit.log \u0026\u0026 chmod 0600 /var/log/kube-apiserver/*\n\nLOCK=/var/log/kube-apiserver/.lock\necho \"Acquiring exclusive lock ${LOCK} ...\"\n\n# Waiting for 15s max for old kube-apiserver's watch-termination process to exit and remove the lock.\n# Two cases:\n# 1. if kubelet does not start the old and new in parallel (i.e. works as expected), the flock will always succeed without any time.\n# 2. if kubelet does overlap old and new pods for up to 130s, the flock will wait and immediate return when the old finishes.\n#\n# NOTE: We can increase 15s for a bigger expected overlap. But a higher value means less noise about the broken kubelet behaviour, i.e. we hide a bug.\n# NOTE: Do not tweak these timings without considering the livenessProbe initialDelaySeconds\nexec {LOCK_FD}\u003e${LOCK} \u0026\u0026 flock --verbose -w 15 \"${LOCK_FD}\" || {\n echo \"$(date -Iseconds -u) kubelet did not terminate old kube-apiserver before new one\" \u003e\u003e /var/log/kube-apiserver/lock.log\n echo -n \": WARNING: kubelet did not terminate old kube-apiserver before new one.\"\n\n # We failed to acquire exclusive lock, which means there is old kube-apiserver running in system.\n # Since we utilize SO_REUSEPORT, we need to make sure the old kube-apiserver stopped listening.\n #\n # NOTE: This is a fallback for broken kubelet, if you observe this please report a bug.\n echo -n \"Waiting for port 6443 to be released due to likely bug in kubelet or CRI-O \"\n while [ -n \"$(ss -Htan state listening '( sport = 6443 or sport = 6080 )')\" ]; do\n echo -n \".\"\n sleep 1\n (( tries += 1 ))\n if [[ \"${tries}\" -gt 10 ]]; then\n echo \"Timed out waiting for port :6443 and :6080 to be released, this is likely a bug in kubelet or CRI-O\"\n exit 1\n fi\n done\n # This is to make sure the server has terminated independently from the lock.\n # After the port has been freed (requests can be pending and need 60s max).\n sleep 65\n}\n# We cannot hold the lock from the init container to the main container. We release it here. There is no risk, at this point we know we are safe.\nflock -u \"${LOCK_FD}\"\n"],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"audit-dir","mountPath":"/var/log/kube-apiserver"},{"name":"tmp-dir","mountPath":"/tmp"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true,"readOnlyRootFilesystem":true}}],"containers":[{"name":"kube-apiserver","image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825","command":["/bin/bash","-ec"],"args":["LOCK=/var/log/kube-apiserver/.lock\n# We should be able to acquire the lock immediatelly. If not, it means the init container has not released it yet and kubelet or CRI-O started container prematurely.\nexec {LOCK_FD}\u003e${LOCK} \u0026\u0026 flock --verbose -w 30 \"${LOCK_FD}\" || {\n echo \"Failed to acquire lock for kube-apiserver. Please check setup container for details. This is likely kubelet or CRI-O bug.\"\n exit 1\n}\nif [ -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt ]; then\n echo \"Copying system trust bundle ...\"\n cp -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\nfi\n\nexec watch-termination --termination-touch-file=/var/log/kube-apiserver/.terminating --termination-log-file=/var/log/kube-apiserver/termination.log --graceful-termination-duration=15s --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig -- hyperkube kube-apiserver --openshift-config=/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml --advertise-address=${HOST_IP} -v=2 --permit-address-sharing\n"],"ports":[{"containerPort":6443}],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"STATIC_POD_VERSION","value":"12"},{"name":"HOST_IP","valueFrom":{"fieldRef":{"fieldPath":"status.hostIP"}}},{"name":"GOGC","value":"100"}],"resources":{"requests":{"cpu":"265m","memory":"1Gi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"cert-dir","mountPath":"/etc/kubernetes/static-pod-certs"},{"name":"audit-dir","mountPath":"/var/log/kube-apiserver"},{"name":"tmp-dir","mountPath":"/tmp"},{"name":"ca-bundle-dir","mountPath":"/etc/pki/ca-trust/extracted/pem"}],"livenessProbe":{"httpGet":{"path":"livez?exclude=etcd","port":6443,"scheme":"HTTPS"},"timeoutSeconds":10,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"readinessProbe":{"httpGet":{"path":"readyz","port":6443,"scheme":"HTTPS"},"timeoutSeconds":10,"periodSeconds":5,"successThreshold":1,"failureThreshold":3},"startupProbe":{"httpGet":{"path":"livez","port":6443,"scheme":"HTTPS"},"timeoutSeconds":10,"periodSeconds":5,"successThreshold":1,"failureThreshold":30},"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true,"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-cert-syncer","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","cert-syncer"],"args":["--kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig","--namespace=$(POD_NAMESPACE)","--destination-dir=/etc/kubernetes/static-pod-certs"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"cert-dir","mountPath":"/etc/kubernetes/static-pod-certs"},{"name":"tmp-dir","mountPath":"/tmp"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-cert-regeneration-controller","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","cert-regeneration-controller"],"args":["--kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig","--namespace=$(POD_NAMESPACE)","-v=2"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"OPERATOR_IMAGE_VERSION","value":"4.20.1"}],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"tmp-dir","mountPath":"/tmp"}],"terminationMessagePolicy": 2025-12-08T17:45:50.230245380+00:00 stderr F "FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-insecure-readyz","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","insecure-readyz"],"args":["--insecure-port=6080","--delegate-url=https://localhost:6443/readyz"],"ports":[{"containerPort":6080}],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-check-endpoints","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","check-endpoints"],"args":["--kubeconfig","/etc/kubernetes/static-pod-certs/configmaps/check-endpoints-kubeconfig/kubeconfig","--listen","0.0.0.0:17697","--namespace","$(POD_NAMESPACE)","--v","2"],"ports":[{"name":"check-endpoints","hostPort":17697,"containerPort":17697,"protocol":"TCP"}],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"resources":{"requests":{"cpu":"10m","memory":"50Mi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"cert-dir","mountPath":"/etc/kubernetes/static-pod-certs"},{"name":"tmp-dir","mountPath":"/tmp"}],"livenessProbe":{"httpGet":{"path":"healthz","port":17697,"scheme":"HTTPS"},"initialDelaySeconds":10,"timeoutSeconds":10},"readinessProbe":{"httpGet":{"path":"healthz","port":17697,"scheme":"HTTPS"},"initialDelaySeconds":10,"timeoutSeconds":10},"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}}],"terminationGracePeriodSeconds":15,"hostNetwork":true,"tolerations":[{"operator":"Exists"}],"priorityClassName":"system-node-critical"},"status":{}} 2025-12-08T17:45:50.231094956+00:00 stderr F I1208 17:45:50.231022 1 cmd.go:610] Writing pod manifest "/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12/kube-apiserver-pod.yaml" ... 2025-12-08T17:45:50.231473387+00:00 stderr F I1208 17:45:50.231418 1 cmd.go:617] Removed existing static pod manifest "/etc/kubernetes/manifests/kube-apiserver-pod.yaml" ... 2025-12-08T17:45:50.231473387+00:00 stderr F I1208 17:45:50.231438 1 cmd.go:621] Writing static pod manifest "/etc/kubernetes/manifests/kube-apiserver-pod.yaml" ... 2025-12-08T17:45:50.231473387+00:00 stderr P {"kind":"Pod","apiVersion":"v1","metadata":{"name":"kube-apiserver","namespace":"openshift-kube-apiserver","creationTimestamp":null,"labels":{"apiserver":"true","app":"openshift-kube-apiserver","revision":"12"},"annotations":{"kubectl.kubernetes.io/default-container":"kube-apiserver","target.workload.openshift.io/management":"{\"effect\": \"PreferredDuringScheduling\"}"}},"spec":{"volumes":[{"name":"resource-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources/kube-apiserver-pod-12"}},{"name":"cert-dir","hostPath":{"path":"/etc/kubernetes/static-pod-resources/kube-apiserver-certs"}},{"name":"audit-dir","hostPath":{"path":"/var/log/kube-apiserver"}},{"name":"tmp-dir","emptyDir":{}},{"name":"ca-bundle-dir","emptyDir":{}}],"initContainers":[{"name":"setup","image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825","command":["/usr/bin/timeout","100","/bin/bash","-ec"],"args":["echo \"Fixing audit permissions ...\"\nchmod 0700 /var/log/kube-apiserver \u0026\u0026 touch /var/log/kube-apiserver/audit.log \u0026\u0026 chmod 0600 /var/log/kube-apiserver/*\n\nLOCK=/var/log/kube-apiserver/.lock\necho \"Acquiring exclusive lock ${LOCK} ...\"\n\n# Waiting for 15s max for old kube-apiserver's watch-termination process to exit and remove the lock.\n# Two cases:\n# 1. if kubelet does not start the old and new in parallel (i.e. works as expected), the flock will always succeed without any time.\n# 2. if kubelet does overlap old and new pods for up to 130s, the flock will wait and immediate return when the old finishes.\n#\n# NOTE: We can increase 15s for a bigger expected overlap. But a higher value means less noise about the broken kubelet behaviour, i.e. we hide a bug.\n# NOTE: Do not tweak these timings without considering the livenessProbe initialDelaySeconds\nexec {LOCK_FD}\u003e${LOCK} \u0026\u0026 flock --verbose -w 15 \"${LOCK_FD}\" || {\n echo \"$(date -Iseconds -u) kubelet did not terminate old kube-apiserver before new one\" \u003e\u003e /var/log/kube-apiserver/lock.log\n echo -n \": WARNING: kubelet did not terminate old kube-apiserver before new one.\"\n\n # We failed to acquire exclusive lock, which means there is old kube-apiserver running in system.\n # Since we utilize SO_REUSEPORT, we need to make sure the old kube-apiserver stopped listening.\n #\n # NOTE: This is a fallback for broken kubelet, if you observe this please report a bug.\n echo -n \"Waiting for port 6443 to be released due to likely bug in kubelet or CRI-O \"\n while [ -n \"$(ss -Htan state listening '( sport = 6443 or sport = 6080 )')\" ]; do\n echo -n \".\"\n sleep 1\n (( tries += 1 ))\n if [[ \"${tries}\" -gt 10 ]]; then\n echo \"Timed out waiting for port :6443 and :6080 to be released, this is likely a bug in kubelet or CRI-O\"\n exit 1\n fi\n done\n # This is to make sure the server has terminated independently from the lock.\n # After the port has been freed (requests can be pending and need 60s max).\n sleep 65\n}\n# We cannot hold the lock from the init container to the main container. We release it here. There is no risk, at this point we know we are safe.\nflock -u \"${LOCK_FD}\"\n"],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"audit-dir","mountPath":"/var/log/kube-apiserver"},{"name":"tmp-dir","mountPath":"/tmp"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true,"readOnlyRootFilesystem":true}}],"containers":[{"name":"kube-apiserver","image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825","command":["/bin/bash","-ec"],"args":["LOCK=/var/log/kube-apiserver/.lock\n# We should be able to acquire the lock immediatelly. If not, it means the init container has not released it yet and kubelet or CRI-O started container prematurely.\nexec {LOCK_FD}\u003e${LOCK} \u0026\u0026 flock --verbose -w 30 \"${LOCK_FD}\" || {\n echo \"Failed to acquire lock for kube-apiserver. Please check setup container for details. This is likely kubelet or CRI-O bug.\"\n exit 1\n}\nif [ -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt ]; then\n echo \"Copying system trust bundle ...\"\n cp -f /etc/kubernetes/static-pod-certs/configmaps/trusted-ca-bundle/ca-bundle.crt /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\nfi\n\nexec watch-termination --termination-touch-file=/var/log/kube-apiserver/.terminating --termination-log-file=/var/log/kube-apiserver/termination.log --graceful-termination-duration=15s --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig -- hyperkube kube-apiserver --openshift-config=/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml --advertise-address=${HOST_IP} -v=2 --permit-address-sharing\n"],"ports":[{"containerPort":6443}],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"STATIC_POD_VERSION","value":"12"},{"name":"HOST_IP","valueFrom":{"fieldRef":{"fieldPath":"status.hostIP"}}},{"name":"GOGC","value":"100"}],"resources":{"requests":{"cpu":"265m","memory":"1Gi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"cert-dir","mountPath":"/etc/kubernetes/static-pod-certs"},{"name":"audit-dir","mountPath":"/var/log/kube-apiserver"},{"name":"tmp-dir","mountPath":"/tmp"},{"name":"ca-bundle-dir","mountPath":"/etc/pki/ca-trust/extracted/pem"}],"livenessProbe":{"httpGet":{"path":"livez?exclude=etcd","port":6443,"scheme":"HTTPS"},"timeoutSeconds":10,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"readinessProbe":{"httpGet":{"path":"readyz","port":6443,"scheme":"HTTPS"},"timeoutSeconds":10,"periodSeconds":5,"successThreshold":1,"failureThreshold":3},"startupProbe":{"httpGet":{"path":"livez","port":6443,"scheme":"HTTPS"},"timeoutSeconds":10,"periodSeconds":5,"successThreshold":1,"failureThreshold":30},"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true,"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-cert-syncer","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","cert-syncer"],"args":["--kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig","--namespace=$(POD_NAMESPACE)","--destination-dir=/etc/kubernetes/static-pod-certs"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"cert-dir","mountPath":"/etc/kubernetes/static-pod-certs"},{"name":"tmp-dir","mountPath":"/tmp"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-cert-regeneration-controller","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","cert-regeneration-controller"],"args":["--kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig","--namespace=$(POD_NAMESPACE)","-v=2"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"OPERATOR_IMAGE_VERSION","value":"4.20.1"}],"resources":{"requests":{"cpu":"5m","memory 2025-12-08T17:45:50.231530569+00:00 stderr F ":"50Mi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"tmp-dir","mountPath":"/tmp"}],"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-insecure-readyz","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","insecure-readyz"],"args":["--insecure-port=6080","--delegate-url=https://localhost:6443/readyz"],"ports":[{"containerPort":6080}],"resources":{"requests":{"cpu":"5m","memory":"50Mi"}},"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}},{"name":"kube-apiserver-check-endpoints","image":"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04","command":["cluster-kube-apiserver-operator","check-endpoints"],"args":["--kubeconfig","/etc/kubernetes/static-pod-certs/configmaps/check-endpoints-kubeconfig/kubeconfig","--listen","0.0.0.0:17697","--namespace","$(POD_NAMESPACE)","--v","2"],"ports":[{"name":"check-endpoints","hostPort":17697,"containerPort":17697,"protocol":"TCP"}],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"resources":{"requests":{"cpu":"10m","memory":"50Mi"}},"volumeMounts":[{"name":"resource-dir","mountPath":"/etc/kubernetes/static-pod-resources"},{"name":"cert-dir","mountPath":"/etc/kubernetes/static-pod-certs"},{"name":"tmp-dir","mountPath":"/tmp"}],"livenessProbe":{"httpGet":{"path":"healthz","port":17697,"scheme":"HTTPS"},"initialDelaySeconds":10,"timeoutSeconds":10},"readinessProbe":{"httpGet":{"path":"healthz","port":17697,"scheme":"HTTPS"},"initialDelaySeconds":10,"timeoutSeconds":10},"terminationMessagePolicy":"FallbackToLogsOnError","imagePullPolicy":"IfNotPresent","securityContext":{"readOnlyRootFilesystem":true}}],"terminationGracePeriodSeconds":15,"hostNetwork":true,"tolerations":[{"operator":"Exists"}],"priorityClassName":"system-node-critical"},"status":{}} 2025-12-08T17:45:50.426745988+00:00 stderr F W1208 17:45:50.426578 1 recorder.go:207] Error creating event &Event{ObjectMeta:{installer-12-crc.187f4e8de169efd1.d66109ff openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:installer-12-crc,UID:158725bd-7556-4281-a3cb-acaa6baf5d8c,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:StaticPodInstallerCompleted,Message:Successfully installed revision 12,Source:EventSource{Component:static-pod-installer,Host:,},FirstTimestamp:2025-12-08 17:45:50.231695313 +0000 UTC m=+37.796675172,LastTimestamp:2025-12-08 17:45:50.231695313 +0000 UTC m=+37.796675172,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:,ReportingInstance:,}: Post "https://10.217.4.1:443/api/v1/namespaces/openshift-kube-apiserver/events?timeout=14s": dial tcp 10.217.4.1:443: connect: connection refused ././@LongLink0000644000000000000000000000021300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl0000755000175000017500000000000015115611513033147 5ustar zuulzuul././@LongLink0000644000000000000000000000022000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl0000755000175000017500000000000015115611521033146 5ustar zuulzuul././@LongLink0000644000000000000000000000022500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl0000644000175000017500000001160415115611513033153 0ustar zuulzuul2025-12-08T17:59:16.925826037+00:00 stderr F % Total % Received % Xferd Average Speed Time Time Time Current 2025-12-08T17:59:16.925826037+00:00 stderr F Dload Upload Total Spent Left Speed 2025-12-08T17:59:16.925826037+00:00 stderr P 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2025-12-08T17:59:16.925983101+00:00 stderr F * SSLv3, TLS handshake, Client hello (1): 2025-12-08T17:59:16.925983101+00:00 stderr P } [data 2025-12-08T17:59:16.925993532+00:00 stderr P not shown 2025-12-08T17:59:16.926002092+00:00 stderr F ] 2025-12-08T17:59:16.936533730+00:00 stderr F * SSLv3, TLS handshake, Server hello (2): 2025-12-08T17:59:16.936533730+00:00 stderr F { [data not shown] 2025-12-08T17:59:16.936533730+00:00 stderr F * SSLv3, TLS handshake, CERT (11): 2025-12-08T17:59:16.936533730+00:00 stderr F { [data not shown] 2025-12-08T17:59:16.936753515+00:00 stderr F * SSLv3, TLS handshake, Server key exchange (12): 2025-12-08T17:59:16.936753515+00:00 stderr F { [data not shown] 2025-12-08T17:59:16.936916160+00:00 stderr P * 2025-12-08T17:59:16.936958681+00:00 stderr F SSLv3, TLS handshake, Server finished (14): 2025-12-08T17:59:16.936958681+00:00 stderr P { [data no 2025-12-08T17:59:16.936966631+00:00 stderr F t shown] 2025-12-08T17:59:16.939930349+00:00 stderr F * SSLv3, TLS handshake, Client key exchange (16): 2025-12-08T17:59:16.939930349+00:00 stderr F } [data not shown] 2025-12-08T17:59:16.939930349+00:00 stderr F * SSLv3, TLS change cipher, Client hello (1): 2025-12-08T17:59:16.939930349+00:00 stderr F } [data not shown] 2025-12-08T17:59:16.939985601+00:00 stderr F * SSLv3, TLS handshake, Finished (20): 2025-12-08T17:59:16.939985601+00:00 stderr P } [ 2025-12-08T17:59:16.939996861+00:00 stderr P data not shown] 2025-12-08T17:59:16.940006701+00:00 stderr F 2025-12-08T17:59:16.943000960+00:00 stderr F * SSLv3, TLS change cipher, Client hello (1): 2025-12-08T17:59:16.943000960+00:00 stderr F { [data not shown] 2025-12-08T17:59:16.943000960+00:00 stderr F * SSLv3, TLS handshake, Finished (20): 2025-12-08T17:59:16.943000960+00:00 stderr F { [data not shown] 2025-12-08T17:59:16.943000960+00:00 stderr F > POST /api/v2/alerts HTTP/1.1 2025-12-08T17:59:16.943000960+00:00 stderr F > User-Agent: curl/7.35.0 2025-12-08T17:59:16.943000960+00:00 stderr F > Host: default-alertmanager-proxy:9095 2025-12-08T17:59:16.943000960+00:00 stderr F > Accept: */* 2025-12-08T17:59:16.943000960+00:00 stderr F > Content-Type: application/json 2025-12-08T17:59:16.943000960+00:00 stderr F > Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6ImFWMzc3cFlVaXZjX05walVUUlY4bWtJNUZSTTlyVFplaEIwRnBldjhZamsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc2NTIyMDM1NCwiaWF0IjoxNzY1MjE2NzU0LCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiJmMTVjZjE3NC1hMDQxLTRlNzMtODUzNS0yZDU4NDRiYjU4YWUiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6InNlcnZpY2UtdGVsZW1ldHJ5Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InByb21ldGhldXMtc3RmIiwidWlkIjoiNzc0ZGE4MzQtODY3YS00N2UzLWE1MmMtYmZmMzRhMzlmM2Q4In19LCJuYmYiOjE3NjUyMTY3NTQsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpzZXJ2aWNlLXRlbGVtZXRyeTpwcm9tZXRoZXVzLXN0ZiJ9.QPxiNrDZ6fnSoQLCE8bQx8WL4s7PmGgB7XIKsKPknC05ykNETWp6bPJsBPq61zCjGdy6eJ46AJc6HCGNnk-gPMjPUgIyWR9icaKf9L_cs_4KKs85QH7dsHDN9BaEjXbvXvEAXqGY0KKreFa6Bk3zR1j4BwU62kNQycIvo2VxaMwp8JM2mmFzbeSZc_mjJ9o19jVi5kKf9JvXgNtcU0bxldSStVu3CfBbjMZZRWFiwYt91-gUV4cK2E8IgKa77YFywucGhWMYXq2aDeasrjBF9yMoDizucMWlJZamREkzvVfVmccTozpAYDpct-z153dobmP3fnZyqC_pWhmpS6QkggnRqvmo5MgxeFOi0Cm6q06ZmEERA6PvMW9QrSYasxXXvkDyznNGrGP7DhtVCgB8q3veU9K2zQheEAdS9SeyqnECbofewDowxMkjEqhA4L2BenBuQPg_lnPHGcVCcHQxoe7znTWw8SzXp1fBnG3Fmt0et8PRS4IbL8ds5GiSdZf4o4dQA2OXzkCgIvLTyNfxNCUlt-4Kgf6Sb7k7FmeT4U76TVwneAiAIFhlxbYtfLpHwwqcd36w4kFSOXc6haPWzhEryjX3jRIPk5ID6TbdV0n6Kp23woEQzh2B0Dn9MXC9wZC49U_RfOxL6uQLRnzfKQRkafI6tzgrBb1A3X_H8_A 2025-12-08T17:59:16.943000960+00:00 stderr F > Content-Length: 116 2025-12-08T17:59:16.943000960+00:00 stderr F > 2025-12-08T17:59:16.943000960+00:00 stderr F } [data not shown] 2025-12-08T17:59:16.951511564+00:00 stderr F < HTTP/1.1 200 OK 2025-12-08T17:59:16.951511564+00:00 stderr F < Cache-Control: no-store 2025-12-08T17:59:16.951511564+00:00 stderr F < Content-Length: 0 2025-12-08T17:59:16.951511564+00:00 stderr F < Date: Mon, 08 Dec 2025 17:59:16 GMT 2025-12-08T17:59:16.951511564+00:00 stderr F < Gap-Auth: system:serviceaccount:service-telemetry:prometheus-stf@cluster.local 2025-12-08T17:59:16.951594516+00:00 stderr F < Gap-Upstream-Address: localhost:9093 2025-12-08T17:59:16.951594516+00:00 stderr F < Vary: Origin 2025-12-08T17:59:16.951594516+00:00 stderr F < 2025-12-08T17:59:16.951594516+00:00 stderr P 100 116 0 0 100 116 0 2908 - 2025-12-08T17:59:16.951607477+00:00 stderr P -:--:-- -- 2025-12-08T17:59:16.951617747+00:00 stderr P :--:-- - 2025-12-08T17:59:16.951627917+00:00 stderr P -:--:-- 2025-12-08T17:59:16.951685629+00:00 stderr F 4296 ././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611514033053 5ustar zuulzuul././@LongLink0000644000000000000000000000030400000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611521033051 5ustar zuulzuul././@LongLink0000644000000000000000000000031100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000303145315115611514033066 0ustar zuulzuul2025-12-08T17:44:21.842647077+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="log level info" 2025-12-08T17:44:21.842647077+00:00 stderr F time="2025-12-08T17:44:21Z" level=info msg="TLS keys set, using https for metrics" 2025-12-08T17:44:22.087074414+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="/v1, Resource=services" 2025-12-08T17:44:22.087074414+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="/v1, Resource=pods" 2025-12-08T17:44:22.087074414+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="rbac.authorization.k8s.io/v1, Resource=roles" 2025-12-08T17:44:22.087074414+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="/v1, Resource=serviceaccounts" 2025-12-08T17:44:22.087074414+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="/v1, Resource=configmaps" 2025-12-08T17:44:22.087074414+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="batch/v1, Resource=jobs" 2025-12-08T17:44:22.087119765+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="skipping irrelevant gvr" gvr="rbac.authorization.k8s.io/v1, Resource=rolebindings" 2025-12-08T17:44:22.122159790+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="detected ability to filter informers" canFilter=true 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="OpenShift Proxy API available - setting up watch for Proxy type" 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="OpenShift Proxy query will be used to fetch cluster proxy configuration" 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="[CSV NS Plug-in] setting up csv namespace plug-in for namespaces: []" 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="[CSV NS Plug-in] registering namespace informer" 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="[CSV NS Plug-in] setting up namespace: " 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="[CSV NS Plug-in] registered csv queue informer for: " 2025-12-08T17:44:22.175944408+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="[CSV NS Plug-in] finished setting up csv namespace labeler plugin" 2025-12-08T17:44:22.179976148+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="connection established. cluster-version: v1.33.5" 2025-12-08T17:44:22.179976148+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="operator ready" 2025-12-08T17:44:22.179976148+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="starting informers..." 2025-12-08T17:44:22.179976148+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="informers started" 2025-12-08T17:44:22.179976148+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="waiting for caches to sync..." 2025-12-08T17:44:22.385216776+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="starting workers..." 2025-12-08T17:44:22.385216776+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="Initializing cluster operator monitor for package server" 2025-12-08T17:44:22.385216776+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="monitoring the following components [operator-lifecycle-manager-packageserver]" monitor=clusteroperator 2025-12-08T17:44:22.387025936+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="starting clusteroperator monitor loop" monitor=clusteroperator 2025-12-08T17:44:22.387318523+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","source":"kind source: *v1.Deployment"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","source":"kind source: *v2.OperatorCondition"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","source":"kind source: *v1.Role"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","source":"kind source: *v1.RoleBinding"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator-condition-generator","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v2.OperatorCondition"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.Operator"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.Deployment"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.Namespace"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.CustomResourceDefinition"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.APIService"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1alpha1.Subscription"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1alpha1.InstallPlan"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1alpha1.ClusterServiceVersion"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v2.OperatorCondition"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1alpha1.ClusterServiceVersion"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.Deployment"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.Namespace"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.Service"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator-condition-generator","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1alpha1.ClusterServiceVersion"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"subscription","controllerGroup":"operators.coreos.com","controllerKind":"Subscription","source":"kind source: *v1alpha1.InstallPlan"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","source":"channel source: 0xc000581500"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"subscription","controllerGroup":"operators.coreos.com","controllerKind":"Subscription","source":"kind source: *v1alpha1.Subscription"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"subscription","controllerGroup":"operators.coreos.com","controllerKind":"Subscription","source":"kind source: *v1alpha1.ClusterServiceVersion"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","source":"kind source: *v1.ClusterOperator"} 2025-12-08T17:44:22.391140357+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","source":"kind source: *v1alpha1.ClusterServiceVersion"} 2025-12-08T17:44:22.391599610+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1alpha1.Subscription"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.CustomResourceDefinition"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.APIService"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","source":"kind source: *v2.OperatorCondition"} 2025-12-08T17:44:22.443725902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting EventSource","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","source":"kind source: *v1.PartialObjectMetadata"} 2025-12-08T17:44:22.471492759+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="ClusterOperator api is present" monitor=clusteroperator 2025-12-08T17:44:22.471492759+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="initializing clusteroperator resource(s) for [operator-lifecycle-manager-packageserver]" monitor=clusteroperator 2025-12-08T17:44:22.471492759+00:00 stderr F time="2025-12-08T17:44:22Z" level=warning msg="install timed out" csv=packageserver id=yg6vY namespace=openshift-operator-lifecycle-manager phase=Installing 2025-12-08T17:44:22.471492759+00:00 stderr F I1208 17:44:22.464021 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"34842", FieldPath:""}): type: 'Warning' reason: 'InstallCheckFailed' install timeout 2025-12-08T17:44:22.471492759+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="initialized cluster resource - operator-lifecycle-manager-packageserver" monitor=clusteroperator 2025-12-08T17:44:22.488566515+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting Controller","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition"} 2025-12-08T17:44:22.488638147+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting workers","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","worker count":1} 2025-12-08T17:44:22.490054266+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting Controller","controller":"operator-condition-generator","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion"} 2025-12-08T17:44:22.490085217+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting workers","controller":"operator-condition-generator","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","worker count":1} 2025-12-08T17:44:22.492697478+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting Controller","controller":"subscription","controllerGroup":"operators.coreos.com","controllerKind":"Subscription"} 2025-12-08T17:44:22.492729289+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting workers","controller":"subscription","controllerGroup":"operators.coreos.com","controllerKind":"Subscription","worker count":1} 2025-12-08T17:44:22.492757610+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting Controller","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator"} 2025-12-08T17:44:22.492777300+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting workers","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","worker count":1} 2025-12-08T17:44:22.514275166+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting Controller","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion"} 2025-12-08T17:44:22.514275166+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting workers","controller":"clusterserviceversion","controllerGroup":"operators.coreos.com","controllerKind":"ClusterServiceVersion","worker count":1} 2025-12-08T17:44:22.514275166+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting Controller","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator"} 2025-12-08T17:44:22.514275166+00:00 stderr F {"level":"info","ts":"2025-12-08T17:44:22Z","msg":"Starting workers","controller":"operator","controllerGroup":"operators.coreos.com","controllerKind":"Operator","worker count":1} 2025-12-08T17:44:22.653851053+00:00 stderr F time="2025-12-08T17:44:22Z" level=warning msg="needs reinstall: apiServices not installed" csv=packageserver id=6/x+C namespace=openshift-operator-lifecycle-manager phase=Failed strategy=deployment 2025-12-08T17:44:22.653851053+00:00 stderr F I1208 17:44:22.651271 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"37259", FieldPath:""}): type: 'Normal' reason: 'NeedsReinstall' apiServices not installed 2025-12-08T17:44:22.675962837+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="scheduling ClusterServiceVersion for install" csv=packageserver id=8QkEc namespace=openshift-operator-lifecycle-manager phase=Pending 2025-12-08T17:44:22.676145732+00:00 stderr F I1208 17:44:22.676100 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"37272", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:44:22.714926530+00:00 stderr F time="2025-12-08T17:44:22Z" level=warning msg="reusing existing cert packageserver-service-cert" 2025-12-08T17:44:22.817218059+00:00 stderr F I1208 17:44:22.809658 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:44:22.885566304+00:00 stderr F I1208 17:44:22.885487 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"37276", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:44:22.960246931+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="install strategy successful" csv=packageserver id=MoMg5 namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:22.960246931+00:00 stderr F I1208 17:44:22.959351 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"37299", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' apiServices not installed 2025-12-08T17:44:23.053363701+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=kvS3X namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.053841284+00:00 stderr F I1208 17:44:23.053794 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"37299", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' apiServices not installed 2025-12-08T17:44:23.097017021+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"packageserver\": the object has been modified; please apply your changes to the latest version and try again" csv=packageserver id=L5Rd3 namespace=openshift-operator-lifecycle-manager phase=Installing 2025-12-08T17:44:23.097017021+00:00 stderr F E1208 17:44:23.068792 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operator-lifecycle-manager/packageserver\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"packageserver\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:44:23.158816217+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=wRzcF namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.262591668+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=nQzUi namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.368166447+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=RGqYK namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.472917044+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=TATZa namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.574160137+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=5ol6n namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.685197845+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=M3zEh namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.784832153+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=+pITd namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.875312191+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=4eeEa namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:23.990650687+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="install strategy successful" csv=packageserver id=6T0mm namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:24.086320216+00:00 stderr F time="2025-12-08T17:44:24Z" level=info msg="install strategy successful" csv=packageserver id=q4B3d namespace=openshift-operator-lifecycle-manager phase=Installing strategy=deployment 2025-12-08T17:44:24.086320216+00:00 stderr F I1208 17:44:24.076292 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operator-lifecycle-manager", Name:"packageserver", UID:"09b3d4b2-fc47-4ee0-a331-67a39502cf21", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"37316", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T17:54:51.060930687+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="creating cluster role: olm.og.service-telemetry-operator-group.admin-6XinD3qDPRoufFZY9j0Gesfq6cfP7qeV80bzMU owned by operator group: service-telemetry/service-telemetry-operator-group" 2025-12-08T17:54:51.077576195+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="creating cluster role: olm.og.service-telemetry-operator-group.edit-5FCh2FwFebR0ucQCOrpRprVt227zr6pDCh4P9I owned by operator group: service-telemetry/service-telemetry-operator-group" 2025-12-08T17:54:51.087016609+00:00 stderr F time="2025-12-08T17:54:51Z" level=info msg="creating cluster role: olm.og.service-telemetry-operator-group.view-19H0oeQZasak9kBFyGYeMcbw9IgDiyNKbAOJvC owned by operator group: service-telemetry/service-telemetry-operator-group" 2025-12-08T17:54:54.032938807+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="creating cluster role: olm.og.cert-manager-operator.admin-cNnRMe4AZ6jFumaJsnwdwxTA3LASLEavu7WVnK owned by operator group: cert-manager-operator/cert-manager-operator" 2025-12-08T17:54:54.040009698+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="creating cluster role: olm.og.cert-manager-operator.edit-kLXRGib6xzHLCUDYHM29CXWY892FmGafLchZd owned by operator group: cert-manager-operator/cert-manager-operator" 2025-12-08T17:54:54.045509775+00:00 stderr F time="2025-12-08T17:54:54Z" level=info msg="creating cluster role: olm.og.cert-manager-operator.view-7O1zTAUZYYK1ZGD76vxclCLn1JYZ9KPomiDmSV owned by operator group: cert-manager-operator/cert-manager-operator" 2025-12-08T17:55:07.813018256+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="[CSV NS Plug-in] applied security.openshift.io/scc.podSecurityLabelSync=true label to namespace " 2025-12-08T17:55:07.816302564+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"2c8130b3-c5f9-4100-8cdb-e9f0e2fb6b2d","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:07.823808406+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"b37d3e2e-928a-45bd-a4b3-1b22a4033f55","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:07.823910009+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"e6d473a0-ddf3-4b48-b14a-ac7a265263b3","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:07.835130811+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"6eff93a2-a0ff-4292-a70e-68d573a4fc30","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:07.875378644+00:00 stderr F time="2025-12-08T17:55:07Z" level=warning msg="error adding operatorgroup annotations" csv=cluster-observability-operator.v1.3.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" id=URB75 namespace=openshift-operators opgroup=global-operators phase= 2025-12-08T17:55:07.875378644+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="operatorgroup incorrect" csv=cluster-observability-operator.v1.3.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" id=URB75 namespace=openshift-operators phase= 2025-12-08T17:55:07.875378644+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="not in operatorgroup namespace" csv=cluster-observability-operator.v1.3.0 id=w1IRx namespace=openshift-operators phase= 2025-12-08T17:55:07.875458546+00:00 stderr F E1208 17:55:07.875395 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:07.876696330+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"90d5920c-f192-470e-985d-36a6b51c419e","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:07.908277549+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="operatorgroup incorrect" csv=cluster-observability-operator.v1.3.0 error="" id=KnhIR namespace=openshift-operators phase= 2025-12-08T17:55:07.908277549+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="not in operatorgroup namespace" csv=cluster-observability-operator.v1.3.0 id=9SMZF namespace=openshift-operators phase= 2025-12-08T17:55:07.924861136+00:00 stderr F time="2025-12-08T17:55:07Z" level=warning msg="error adding operatorgroup annotations" csv=cluster-observability-operator.v1.3.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" id=nXAuU namespace=openshift-operators opgroup=global-operators phase= 2025-12-08T17:55:07.924861136+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="operatorgroup incorrect" csv=cluster-observability-operator.v1.3.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" id=nXAuU namespace=openshift-operators phase= 2025-12-08T17:55:07.924861136+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="not in operatorgroup namespace" csv=cluster-observability-operator.v1.3.0 id=PBbm/ namespace=openshift-operators phase= 2025-12-08T17:55:07.924861136+00:00 stderr F E1208 17:55:07.922894 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:07.928851873+00:00 stderr F time="2025-12-08T17:55:07Z" level=warning msg="error adding operatorgroup annotations" csv=cluster-observability-operator.v1.3.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" namespace=openshift-operators operatorGroup=global-operators 2025-12-08T17:55:07.928851873+00:00 stderr F time="2025-12-08T17:55:07Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" namespace=openshift-operators operatorGroup=global-operators 2025-12-08T17:55:07.928909854+00:00 stderr F E1208 17:55:07.928862 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/global-operators\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cluster-observability-operator.v1.3.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:07.948207134+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=M8TXG namespace=openshift-operators phase= 2025-12-08T17:55:07.948241025+00:00 stderr F E1208 17:55:07.948216 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:07.952226323+00:00 stderr F time="2025-12-08T17:55:07Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=cluster-observability-operator.v1.3.0 id=zYQ3j namespace=openshift-operators phase= 2025-12-08T17:55:07.953271830+00:00 stderr F I1208 17:55:07.953238 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"40962", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:55:07.956281671+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"cdc9bb3c-97d9-4903-ba2a-6b1b00d34c2c","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:07.982915988+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:07Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"83fb45ed-aad9-4f76-857b-f903e34014fe","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:08.045647356+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=3Ljh9 namespace=openshift-operators phase= 2025-12-08T17:55:08.045647356+00:00 stderr F E1208 17:55:08.043058 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:08.129425081+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:08Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"22be2bdb-3a33-4467-8dd8-aba4235e5b80","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:08.360011116+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=7ij/z namespace=openshift-operators phase=Pending 2025-12-08T17:55:08.360011116+00:00 stderr F I1208 17:55:08.358008 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"40971", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:55:08.450018368+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=b6Dpw namespace=openshift-operators phase=Pending 2025-12-08T17:55:08.450157962+00:00 stderr F E1208 17:55:08.450135 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:08.770014660+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:08Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"b2c38c16-8b75-411b-92b6-47bdf146dc0c","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:08.871419539+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=bN+UJ namespace=openshift-operators phase=Pending 2025-12-08T17:55:08.898964600+00:00 stderr F time="2025-12-08T17:55:08Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=Vp24p namespace=openshift-operators phase=Pending 2025-12-08T17:55:08.898964600+00:00 stderr F E1208 17:55:08.898552 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:09.390432675+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=GZRXH namespace=openshift-operators phase=Pending 2025-12-08T17:55:09.412937131+00:00 stderr F time="2025-12-08T17:55:09Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=Y2GZo namespace=openshift-operators phase=Pending 2025-12-08T17:55:09.412937131+00:00 stderr F E1208 17:55:09.410972 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:10.050496498+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:10Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"9b968426-e4f0-4947-85c2-59a7935a90b8","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:10.220904144+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=WDULB namespace=openshift-operators phase=Pending 2025-12-08T17:55:10.241042075+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=HNiFG namespace=openshift-operators phase=Pending 2025-12-08T17:55:10.241042075+00:00 stderr F E1208 17:55:10.241033 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:10.838716559+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=K4xcE namespace=openshift-operators phase=Pending 2025-12-08T17:55:10.849836308+00:00 stderr F time="2025-12-08T17:55:10Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=zv2Q3 namespace=openshift-operators phase=Pending 2025-12-08T17:55:10.849909710+00:00 stderr F E1208 17:55:10.849824 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:11.104282146+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=DdW3O namespace=openshift-operators phase=Pending 2025-12-08T17:55:11.113445952+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=ACu9G namespace=openshift-operators phase=Pending 2025-12-08T17:55:11.497315052+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=Ps78B namespace=openshift-operators phase=Pending 2025-12-08T17:55:11.505588885+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=BuPOB namespace=openshift-operators phase=Pending 2025-12-08T17:55:11.505619856+00:00 stderr F E1208 17:55:11.505603 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:11.814587380+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=AzqyL namespace=openshift-operators phase=Pending 2025-12-08T17:55:11.822008690+00:00 stderr F time="2025-12-08T17:55:11Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=2vSkb namespace=openshift-operators phase=Pending 2025-12-08T17:55:11.822008690+00:00 stderr F E1208 17:55:11.820486 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:12.190921378+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=V8bDQ namespace=openshift-operators phase=Pending 2025-12-08T17:55:12.205983373+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=elpf3 namespace=openshift-operators phase=Pending 2025-12-08T17:55:12.205983373+00:00 stderr F E1208 17:55:12.202417 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:12.605557237+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=TYtQB namespace=openshift-operators phase=Pending 2025-12-08T17:55:12.610957972+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:12Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"204c0468-c96b-4598-8efb-279267001714","error":"Deployment.apps \"obo-prometheus-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:12.614750634+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=WP65X namespace=openshift-operators phase=Pending 2025-12-08T17:55:12.614795865+00:00 stderr F E1208 17:55:12.614776 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:12.835458464+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:12Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:12.931764195+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=amUeh namespace=openshift-operators phase=Pending 2025-12-08T17:55:12.947989641+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=lLXvG namespace=openshift-operators phase=Pending 2025-12-08T17:55:12.947989641+00:00 stderr F E1208 17:55:12.947979 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:13.123032553+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.255126017+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=UjEzB namespace=openshift-operators phase=Pending 2025-12-08T17:55:13.268823776+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=JBYc3 namespace=openshift-operators phase=Pending 2025-12-08T17:55:13.269022311+00:00 stderr F E1208 17:55:13.268991 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:13.570004660+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"ad924d11-4b1a-451b-81e1-149889530fb1","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.593936315+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"19b1ff5e-d657-4d3b-ba75-62b8b9147232","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.614948090+00:00 stderr F time="2025-12-08T17:55:13Z" level=warning msg="error adding operatorgroup annotations" csv=elasticsearch-eck-operator-certified.v3.2.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" id=Pvesx namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:55:13.614948090+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="operatorgroup incorrect" csv=elasticsearch-eck-operator-certified.v3.2.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" id=Pvesx namespace=service-telemetry phase= 2025-12-08T17:55:13.614948090+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="not in operatorgroup namespace" csv=elasticsearch-eck-operator-certified.v3.2.0 id=AEVZO namespace=service-telemetry phase= 2025-12-08T17:55:13.615002381+00:00 stderr F E1208 17:55:13.614961 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:13.648926654+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="operatorgroup incorrect" csv=elasticsearch-eck-operator-certified.v3.2.0 error="" id=aklkc namespace=service-telemetry phase= 2025-12-08T17:55:13.648926654+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="not in operatorgroup namespace" csv=elasticsearch-eck-operator-certified.v3.2.0 id=R1VpA namespace=service-telemetry phase= 2025-12-08T17:55:13.665498640+00:00 stderr F time="2025-12-08T17:55:13Z" level=warning msg="error adding operatorgroup annotations" csv=elasticsearch-eck-operator-certified.v3.2.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" id=mbKjg namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:55:13.665498640+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="operatorgroup incorrect" csv=elasticsearch-eck-operator-certified.v3.2.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" id=mbKjg namespace=service-telemetry phase= 2025-12-08T17:55:13.665498640+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="not in operatorgroup namespace" csv=elasticsearch-eck-operator-certified.v3.2.0 id=V9XjO namespace=service-telemetry phase= 2025-12-08T17:55:13.665498640+00:00 stderr F E1208 17:55:13.665138 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:13.669645252+00:00 stderr F time="2025-12-08T17:55:13Z" level=warning msg="error adding operatorgroup annotations" csv=elasticsearch-eck-operator-certified.v3.2.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:55:13.669645252+00:00 stderr F time="2025-12-08T17:55:13Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:55:13.669645252+00:00 stderr F E1208 17:55:13.666088 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator-group\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:13.682096877+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=elasticsearch-eck-operator-certified.v3.2.0 id=7bThG namespace=service-telemetry phase= 2025-12-08T17:55:13.685059376+00:00 stderr F I1208 17:55:13.682652 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41355", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:55:13.717427258+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"5dd3a58f-60a0-4544-b618-712335a007a9","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.717427258+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"db5eef0d-d7ad-4956-aebc-291ff73a39ee","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.746609433+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"1d5129b4-ad68-48ee-95b1-3a0df65023f0","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.774964156+00:00 stderr F time="2025-12-08T17:55:13Z" level=error msg="update existing cluster role failed: nil" error="Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io \"olm.og.service-telemetry-operator-group.view-19H0oeQZasak9kBFyGYeMcbw9IgDiyNKbAOJvC\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:55:13.820606025+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"5232b912-013e-4942-9f8c-75fbba51ecf2","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:13.871807062+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=8t5t5 namespace=openshift-operators phase=Pending 2025-12-08T17:55:13.898699185+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=nod0t namespace=openshift-operators phase=Pending 2025-12-08T17:55:13.898790858+00:00 stderr F E1208 17:55:13.898776 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:13.905198101+00:00 stderr F time="2025-12-08T17:55:13Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=lzvIQ namespace=service-telemetry phase=Pending 2025-12-08T17:55:13.905198101+00:00 stderr F I1208 17:55:13.904335 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41378", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:55:13.934176871+00:00 stderr F E1208 17:55:13.933106 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:13.983172650+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:13Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"8d6952ee-120f-4454-9337-22f9aa197c0c","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:14.048322132+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=3070P namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.048687172+00:00 stderr F I1208 17:55:14.048662 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41378", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:55:14.063069809+00:00 stderr F E1208 17:55:14.063013 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: error transitioning ClusterServiceVersion: requirements were not met and error updating CSV status: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:14.127680368+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=44c2S namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.127680368+00:00 stderr F E1208 17:55:14.127648 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:14.198814613+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=OMzGC namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.198855564+00:00 stderr F E1208 17:55:14.198809 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:14.276458801+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=JCNIf namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.276495702+00:00 stderr F E1208 17:55:14.276459 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:14.290163360+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=Iz9R9 namespace=openshift-operators phase=Pending 2025-12-08T17:55:14.303851219+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:14Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"15d9ad89-0efc-4588-aabf-b5d5b5d38ac2","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:14.322676486+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=p0Aq3 namespace=openshift-operators phase=Pending 2025-12-08T17:55:14.322712766+00:00 stderr F E1208 17:55:14.322672 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"openshift-operators/cluster-observability-operator.v1.3.0\" failed: no owned roles found" logger="UnhandledError" 2025-12-08T17:55:14.352801175+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=3St5f namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.352834896+00:00 stderr F E1208 17:55:14.352820 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:14.447048752+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=0phK9 namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.447048752+00:00 stderr F E1208 17:55:14.446595 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:14.505844624+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:14Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:14.528510744+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=1+2U1 namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.528510744+00:00 stderr F E1208 17:55:14.528423 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:14.650417225+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=cluster-observability-operator.v1.3.0 id=RuIPT namespace=openshift-operators phase=Pending 2025-12-08T17:55:14.671153783+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="couldn't ensure RBAC in target namespaces" csv=cluster-observability-operator.v1.3.0 error="no owned roles found" id=H9Llk namespace=openshift-operators phase=Pending 2025-12-08T17:55:14.818758796+00:00 stderr F time="2025-12-08T17:55:14Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=7iFit namespace=service-telemetry phase=Pending 2025-12-08T17:55:14.945248639+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:14Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"25128cd7-27fe-4991-b7b1-1d766a81b16a","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.280586114+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=yO9ex namespace=service-telemetry phase=Pending 2025-12-08T17:55:15.280628205+00:00 stderr F E1208 17:55:15.280617 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:15.342431318+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="scheduling ClusterServiceVersion for install" csv=cluster-observability-operator.v1.3.0 id=vSaTM namespace=openshift-operators phase=Pending 2025-12-08T17:55:15.342663954+00:00 stderr F I1208 17:55:15.342618 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"40995", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:55:15.350666720+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Jz1Ui namespace=service-telemetry phase=Pending 2025-12-08T17:55:15.350702501+00:00 stderr F E1208 17:55:15.350691 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:15.382570968+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.404790356+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:55:15.477347049+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=OQsyK namespace=service-telemetry phase=Pending 2025-12-08T17:55:15.477419541+00:00 stderr F E1208 17:55:15.477358 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:15.562470379+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=wc0ZP namespace=service-telemetry phase=Pending 2025-12-08T17:55:15.562513920+00:00 stderr F E1208 17:55:15.562482 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:15.566034396+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:55:15.566034396+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:55:15.582260062+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"a7540684-26aa-450e-8729-a46b399ed3f5","error":"Deployment.apps \"obo-prometheus-operator-admission-webhook\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.600785121+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"3447785e-854c-486e-8e14-ad7cde54bd54","error":"Deployment.apps \"obo-prometheus-operator-admission-webhook\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.604071979+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"8bd5b1b5-94c9-4842-a885-3bc2da4e2e34","error":"Deployment.apps \"obo-prometheus-operator-admission-webhook\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.616324909+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"a033879b-aa61-4e98-83bd-cd6a6a855a92","error":"Deployment.apps \"obo-prometheus-operator-admission-webhook\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.640356975+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"b59ea131-1620-4bc3-bb83-8e048d97a8f4","error":"Deployment.apps \"obo-prometheus-operator-admission-webhook\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.704975024+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"2edf0a3f-3c8a-466a-a28b-f4fe64115ed8","error":"Deployment.apps \"observability-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.709276720+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.713076862+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=2Vefo namespace=service-telemetry phase=Pending 2025-12-08T17:55:15.713148984+00:00 stderr F E1208 17:55:15.713103 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:15.728978751+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"17473093-9e8e-4f8b-a0cb-10e2bf02b762","error":"Deployment.apps \"observability-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.749409490+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"518430e4-b6be-4ccb-8ace-28211fb51793","error":"Deployment.apps \"observability-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.749409490+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"faf6404e-7622-43fc-a9cc-c0c0ead05183","error":"Deployment.apps \"observability-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.754129458+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"03898854-0271-4f7d-95c3-c7f6284f6d7f","error":"Deployment.apps \"observability-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.858508586+00:00 stderr F time="2025-12-08T17:55:15Z" level=info msg="requirements were not met" csv=elasticsearch-eck-operator-certified.v3.2.0 id=9JbGx namespace=service-telemetry phase=Pending 2025-12-08T17:55:15.858544997+00:00 stderr F E1208 17:55:15.858521 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:15.905906172+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"fc2be270-cf53-41b9-94c2-b901fd28cff1","error":"Deployment.apps \"perses-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.915524000+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"00be32c4-758f-4039-8d69-85eb93224cf4","error":"Deployment.apps \"perses-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:15.988741811+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"6dd2162f-1c3f-41fc-b08d-9022fb37e126","error":"Deployment.apps \"perses-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.007240219+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:15Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.024653827+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:16Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cluster-observability-operator.v1.3.0","namespace":"openshift-operators"},"namespace":"openshift-operators","name":"cluster-observability-operator.v1.3.0","reconcileID":"43b3c1a9-87e3-4770-a3b4-0a668f8353fb","error":"Deployment.apps \"perses-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.212429050+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="scheduling ClusterServiceVersion for install" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Kn8OG namespace=service-telemetry phase=Pending 2025-12-08T17:55:16.212429050+00:00 stderr F I1208 17:55:16.212299 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41407", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:55:16.226794957+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:16Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"b5afbcac-7569-4d74-bae3-a2f4c021a246","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.297377747+00:00 stderr F I1208 17:55:16.297079 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41477", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:55:16.413023029+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="scheduling ClusterServiceVersion for install" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Ez0TI namespace=service-telemetry phase=Pending 2025-12-08T17:55:16.417215631+00:00 stderr F I1208 17:55:16.413773 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41407", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:55:16.427016106+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" csv=elasticsearch-eck-operator-certified.v3.2.0 id=saLSg namespace=service-telemetry phase=Pending 2025-12-08T17:55:16.427058177+00:00 stderr F E1208 17:55:16.427044 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:16.433561971+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:16Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.439815219+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=8KcV6 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:16.441078594+00:00 stderr F I1208 17:55:16.440709 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41604", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment obo-prometheus-operator to become ready: deployment "obo-prometheus-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:55:16.716870785+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:16Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"elasticsearch-eck-operator-certified.v3.2.0","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"elasticsearch-eck-operator-certified.v3.2.0","reconcileID":"5afa8744-ea9e-42df-95a5-ae039e20b390","error":"Deployment.apps \"elastic-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.769609615+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:16Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:16.846227437+00:00 stderr F time="2025-12-08T17:55:16Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=OhTAN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:17.056612259+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=/3fHD namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:17.153968918+00:00 stderr F I1208 17:55:17.153840 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41596", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:55:17.169499757+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" csv=elasticsearch-eck-operator-certified.v3.2.0 id=hzr0S namespace=service-telemetry phase=InstallReady 2025-12-08T17:55:17.169539318+00:00 stderr F E1208 17:55:17.169513 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:17.209514983+00:00 stderr F time="2025-12-08T17:55:17Z" level=warning msg="reusing existing cert elastic-operator-service-cert" 2025-12-08T17:55:17.247349812+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fXPsn namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:17.452764580+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=i6I5f namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:17.649468703+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=/6f/X namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:17.779038461+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ZzcRj namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:17.858592581+00:00 stderr F I1208 17:55:17.858136 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41634", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:55:17.952135538+00:00 stderr F time="2025-12-08T17:55:17Z" level=warning msg="reusing existing cert elastic-operator-service-cert" 2025-12-08T17:55:17.960982086+00:00 stderr F time="2025-12-08T17:55:17Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=/mYMP namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:18.082815795+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:18Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"elasticsearch-eck-operator-certified.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"elasticsearch-eck-operator-certified.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:18.370780795+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=AXSdE namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:18.580597471+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=3ohbC namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:18.755467717+00:00 stderr F I1208 17:55:18.755400 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41634", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:55:18.766143764+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" csv=elasticsearch-eck-operator-certified.v3.2.0 id=yYn7n namespace=service-telemetry phase=InstallReady 2025-12-08T17:55:18.766143764+00:00 stderr F E1208 17:55:18.765596 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:18.855711355+00:00 stderr F time="2025-12-08T17:55:18Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qP+xk namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:19.375134462+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=80QQE namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:19.668293192+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=aopPL namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:19.877362809+00:00 stderr F time="2025-12-08T17:55:19Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=BlqmN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:20.282020468+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=D6AzX namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:20.282020468+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=FLi6S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:20.282432219+00:00 stderr F I1208 17:55:20.282392 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41757", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment elastic-operator to become ready: deployment "elastic-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:55:20.508059381+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=X982G namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:20.696500222+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lzIvT namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:20.734997819+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=soWw3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:20.734997819+00:00 stderr F I1208 17:55:20.734543 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41757", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment elastic-operator to become ready: deployment "elastic-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:55:20.742892991+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" csv=elasticsearch-eck-operator-certified.v3.2.0 id=G567m namespace=service-telemetry phase=Installing 2025-12-08T17:55:20.742962223+00:00 stderr F E1208 17:55:20.742918 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/elasticsearch-eck-operator-certified.v3.2.0\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"elasticsearch-eck-operator-certified.v3.2.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:20.809250416+00:00 stderr F time="2025-12-08T17:55:20Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=nrHC5 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.086491427+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=hxx9j namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.196167949+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=fRMx/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:21.206122777+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=2dCDI namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.259805791+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=MQRtx namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.310546137+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=cAsTu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:21.332968790+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=uZbSN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.502380640+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=O+H2w namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.517907017+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=LEppz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:21.567468901+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=r1IlR namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.743196130+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=xBbBg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:21.806925325+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lIQFf namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:21.930657685+00:00 stderr F time="2025-12-08T17:55:21Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fvZej namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.012521648+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=fgq5X namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:22.018493109+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=5Fxst namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.069842330+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=g6J/c namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.086738095+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=bWAVF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:22.333159036+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=YvPFS namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.376453412+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=K2L0w namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:22.546349744+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=hNqu3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.594149640+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=yOrLU namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.605793044+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ZRPR+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:22.632050890+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=JkPMN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.663325292+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=YtKby namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:22.667001020+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=b1c5N namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.720748188+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=U5Qwe namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.740324164+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=rljyo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:22.755973395+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=EmHw3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.796746503+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=t6tP4 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:22.812583488+00:00 stderr F time="2025-12-08T17:55:22Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=B5+9Y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:23.041997992+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=oBxzk namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.088551445+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=FYAGO namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.089340966+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=IcDTd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:23.188941857+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=nvvax namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.209932611+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=k9c2X namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:23.254269885+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qrm30 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.300934601+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=thYqM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:23.317783194+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=EJLmO namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.467584425+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=R3wGp namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.470664428+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=446cu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:23.740370666+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=utIjr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.867947286+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=2oKqj namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.892404347+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ZuVKn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:23.930963356+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=z70Cz namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.971147088+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=8Lbif namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:23.973147743+00:00 stderr F time="2025-12-08T17:55:23Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=hciRR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.010377424+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Thlws namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.048093879+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=T1mw2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.060085759+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=TZBw/ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.100940999+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=jHjCj namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.110819791+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ybe8e namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.138145291+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=LTC8w namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.183860425+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=/feqV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.186256401+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=6T2E8 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.229595889+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=GchGu namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.246790291+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=HIoQ8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.265944047+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qa5Vg namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.300045162+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=1c2Hw namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.311013493+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=v1NXN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.337274545+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=X3LbV namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.376665595+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=wJZGz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.382046143+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fP7gq namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.514265861+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:24Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"elasticsearch-eck-operator-certified.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"elasticsearch-eck-operator-certified.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:24.557426445+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=b3Jme namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.591437108+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=jONQM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.657592434+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=o0kCo namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.714331640+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=U4/QV namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.721802606+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=OinTb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.757450224+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Q7OfK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.787500388+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=tZGYS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.811572209+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=YS0eh namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.868918132+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=nnIi2 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:24.875250267+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=w38MM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:24.953970196+00:00 stderr F time="2025-12-08T17:55:24Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=9TZFF namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.056583672+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=x5O+j namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.064348305+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=7KNrZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.114268155+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=liJa9 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.158611902+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=1ITbz namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.171079953+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=WdWHk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.191560486+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=pqsMh namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.225041545+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=pUjgK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.231550563+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=fP1yU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.269988228+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=CY+/Q namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.301665717+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=/BfXB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.308897165+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=eAcvD namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.350512287+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=JCjt0 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.367127543+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=5CC/K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.390284408+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=MLJOj namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.429938257+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lrtgB namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.445437852+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=/pk1g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.531679338+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=cIqS2 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.578536554+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=WrNlJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.584062066+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ZubMc namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.621140233+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=kFCf4 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.642475919+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Dt7ta namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.656814942+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=InKes namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.694631470+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=HHKuJ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.701577531+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=yzAAG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.739393738+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lwqlH namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.763583092+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=DRx+h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.778835900+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=cGqfG namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.822225711+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=AiK44 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:25.845349395+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=+M30o namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:25.954189522+00:00 stderr F time="2025-12-08T17:55:25Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=pzWR2 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.054791663+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=szmEx namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.059349137+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=4bGTC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.099527120+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fUVVJ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.123619201+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=bqj70 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.137636135+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=sFr9L namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.170449716+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=PHISi namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.181334715+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=uhqq5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.215005499+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=FxZ6d namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.253120684+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fCKLs namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.259417668+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=kUMxu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.298993463+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=8jltZ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.337150170+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=dVx5e namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.338905728+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ryCk+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.372854120+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"41313343-1af5-4e80-937b-c2ff98e807fa","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.380000466+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"a55c0b13-743b-4ee1-9c16-20231ccbd96b","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.380145970+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"d25c902f-7599-429f-b6c6-6b1030ea23bf","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.380245263+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"67d4d266-3b28-43bd-87a9-ad0a18a5044f","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.390487454+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"e406e557-b38d-48a6-9e84-0842cdc5c918","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.392076137+00:00 stderr F time="2025-12-08T17:55:26Z" level=warning msg="error adding operatorgroup annotations" csv=cert-manager-operator.v1.18.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cert-manager-operator.v1.18.0\": the object has been modified; please apply your changes to the latest version and try again" id=eH1z/ namespace=cert-manager-operator opgroup=cert-manager-operator phase= 2025-12-08T17:55:26.392076137+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="operatorgroup incorrect" csv=cert-manager-operator.v1.18.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cert-manager-operator.v1.18.0\": the object has been modified; please apply your changes to the latest version and try again" id=eH1z/ namespace=cert-manager-operator phase= 2025-12-08T17:55:26.392092278+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="not in operatorgroup namespace" csv=cert-manager-operator.v1.18.0 id=siOcu namespace=cert-manager-operator phase= 2025-12-08T17:55:26.392133369+00:00 stderr F E1208 17:55:26.392113 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cert-manager-operator.v1.18.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:26.471098016+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"872074f1-b488-47f9-819a-8760d25a8a86","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.564852479+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=+d+MJ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.620442814+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="operatorgroup incorrect" csv=cert-manager-operator.v1.18.0 error="" id=NL+7+ namespace=cert-manager-operator phase= 2025-12-08T17:55:26.620442814+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="not in operatorgroup namespace" csv=cert-manager-operator.v1.18.0 id=sDO/d namespace=cert-manager-operator phase= 2025-12-08T17:55:26.631704463+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"deb3bd5e-59b7-4b77-a5f7-cee6e0c84501","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.639098725+00:00 stderr F time="2025-12-08T17:55:26Z" level=warning msg="error adding operatorgroup annotations" csv=cert-manager-operator.v1.18.0 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cert-manager-operator.v1.18.0\": the object has been modified; please apply your changes to the latest version and try again" namespace=cert-manager-operator operatorGroup=cert-manager-operator 2025-12-08T17:55:26.639098725+00:00 stderr F time="2025-12-08T17:55:26Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cert-manager-operator.v1.18.0\": the object has been modified; please apply your changes to the latest version and try again" namespace=cert-manager-operator operatorGroup=cert-manager-operator 2025-12-08T17:55:26.639098725+00:00 stderr F E1208 17:55:26.639075 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"cert-manager-operator.v1.18.0\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:55:26.663624059+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=zcAVX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.767626872+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=R1RGl namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.796028271+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=cert-manager-operator.v1.18.0 id=N9Vna namespace=cert-manager-operator phase= 2025-12-08T17:55:26.796607058+00:00 stderr F I1208 17:55:26.796571 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"cert-manager-operator", Name:"cert-manager-operator.v1.18.0", UID:"e7c40b97-cb26-4848-968d-4a92c8291cb2", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42020", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:55:26.873279322+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=C0XfI namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:26.901456795+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Iou9Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:26.937791812+00:00 stderr F time="2025-12-08T17:55:26Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=Prpfe namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:26.938258334+00:00 stderr F I1208 17:55:26.938180 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"cert-manager-operator", Name:"cert-manager-operator.v1.18.0", UID:"e7c40b97-cb26-4848-968d-4a92c8291cb2", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42028", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:55:26.954655865+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:26Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"3d259d08-0e39-4af7-be18-4f69e6189288","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:26.958287054+00:00 stderr F E1208 17:55:26.958212 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:27.067069529+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:27Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"6bb15473-5bc5-4cdb-8a60-d42b9360cd29","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:27.165783958+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=v1yQ/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:27.204711506+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=Q14YB namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:27.204795458+00:00 stderr F E1208 17:55:27.204718 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:27.264557919+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=UGpkC namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:27.511841374+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=nxcnk namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:27.511841374+00:00 stderr F E1208 17:55:27.511552 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:27.596407104+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:27Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"cert-manager-operator.v1.18.0","namespace":"cert-manager-operator"},"namespace":"cert-manager-operator","name":"cert-manager-operator.v1.18.0","reconcileID":"fd4320c6-24aa-425c-adb5-5066c3ec8cc5","error":"Deployment.apps \"cert-manager-operator-controller-manager\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:27.769745500+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=625CZ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:27.869186849+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=YdLuO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:27.893965159+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=vzDdG namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:27.894016850+00:00 stderr F E1208 17:55:27.893980 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:27.975823655+00:00 stderr F time="2025-12-08T17:55:27Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=BRC8B namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:28.079743467+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=LUNK1 namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:28.079782678+00:00 stderr F E1208 17:55:28.079748 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:28.097636808+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=faDuN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:28.105177665+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=c3YSn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:28.197431666+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=XDbbr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:28.283041405+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=FxfYN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:28.321299095+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=0SXRM namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:28.376783358+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=PM3CN namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:28.376783358+00:00 stderr F E1208 17:55:28.376773 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:28.427984502+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=0dQNX namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:28.660648737+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=btJVy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:28.690796514+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=Eo1dp namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:28.690839315+00:00 stderr F E1208 17:55:28.690817 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:28.727125971+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=CVauH namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:28.824388769+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=BnxOZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:28.841973352+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=qBbRm namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:28.841973352+00:00 stderr F E1208 17:55:28.840118 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:28.917325560+00:00 stderr F time="2025-12-08T17:55:28Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=dP0Hu namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.031953615+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=oktSC namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:29.047919913+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=jRXDl namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.051909433+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=YQqmi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.132474723+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=REiCf namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:29.132542625+00:00 stderr F E1208 17:55:29.132517 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:29.146317443+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=FE8AM namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.149537051+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=noLtE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.208131369+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=gS/Ep namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:29.208131369+00:00 stderr F E1208 17:55:29.208026 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:29.221496626+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=nx0Ta namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.223533422+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=iqQcl namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.295543388+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=YMpKc namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:29.295706003+00:00 stderr F E1208 17:55:29.295688 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:29.308787061+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=n5mqx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.313076989+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=rvjN1 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.373638860+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=w9t0G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.389291110+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=x5yYI namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.395697936+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="requirements were not met" csv=cert-manager-operator.v1.18.0 id=W47CZ namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:29.395748927+00:00 stderr F E1208 17:55:29.395710 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"cert-manager-operator/cert-manager-operator.v1.18.0\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:55:29.526117085+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=4rNjB namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.530494444+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:29Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"openshift-cert-manager-operator.cert-manager-operator"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"openshift-cert-manager-operator.cert-manager-operator\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:29.572852218+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=LTb0f namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.590426860+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=c+jst namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.644914114+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="scheduling ClusterServiceVersion for install" csv=cert-manager-operator.v1.18.0 id=gaP7g namespace=cert-manager-operator phase=Pending 2025-12-08T17:55:29.645285124+00:00 stderr F I1208 17:55:29.645234 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"cert-manager-operator", Name:"cert-manager-operator.v1.18.0", UID:"e7c40b97-cb26-4848-968d-4a92c8291cb2", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42036", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:55:29.700622553+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=H10mW namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:29.742508612+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:55:29.758231784+00:00 stderr F I1208 17:55:29.758013 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"cert-manager-operator", Name:"cert-manager-operator.v1.18.0", UID:"e7c40b97-cb26-4848-968d-4a92c8291cb2", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42137", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:55:29.792488144+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=tt2ob namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:29.895502981+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=0hxRS namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:29.895636894+00:00 stderr F I1208 17:55:29.895603 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"cert-manager-operator", Name:"cert-manager-operator.v1.18.0", UID:"e7c40b97-cb26-4848-968d-4a92c8291cb2", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42145", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment cert-manager-operator-controller-manager to become ready: deployment "cert-manager-operator-controller-manager" not available: Deployment does not have minimum availability. 2025-12-08T17:55:29.993834359+00:00 stderr F time="2025-12-08T17:55:29Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=7m483 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.033656481+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=UM1ok namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.044979842+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=OLlpJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.053148976+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=krbXI namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.053148976+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=/aN29 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.127815436+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=BIRKs namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.228717294+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=trix4 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.253453062+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=V+Zbk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.267148768+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=SK1bj namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.288470464+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=yFCa8 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.316244256+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=pap62 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.326543889+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=fA9Sp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.329842769+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=B01pp namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.335132664+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=zgMAQ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.357806236+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=9npAc namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.370203036+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lDMR3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.395887341+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=IBu1b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.405295710+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=+ozqh namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.413985828+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=VAwtp namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.440993108+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=ruFES namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.454860699+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=+eKar namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.463526897+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=So0gl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.480032280+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=5RkI1 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.492634796+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=j2sRe namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.505834558+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=CbgD2 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.536409116+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=5FC2E namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.539478792+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ebe/U namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.545487476+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=W4jPQ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.551027588+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qBjWV namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.577975877+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=Rif4P namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.591478028+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ZIfJ2 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.607333643+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=+16j7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.617786040+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=cAkyF namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.629153992+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=N4QZV namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.724388555+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=N9rmw namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.758873081+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=qGXG1 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.766304306+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Lx3Ba namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:30.773643946+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=r81LE namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:30.773904023+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=7+i+R namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.921686719+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=O9b6i namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:30.939166418+00:00 stderr F time="2025-12-08T17:55:30Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=9w+FE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.021763595+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=A7uoY namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.026686150+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=TYSrC namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.128918795+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=qaMdz namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.144000249+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fPMDs namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.170278740+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=mrK4g namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.176981344+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=BcTco namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.184290475+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=St60D namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.223939303+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=t8rqA namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.291706922+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ZbSor namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.294614062+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=+G2Fh namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.305356517+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=nzE2S namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.427621852+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=z0oAo namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.466828678+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=f5Aut namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.483007771+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Kq+MM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.489816098+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=tQNMW namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.505222701+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=M6Y4P namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.534155965+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=HLcS9 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.548402046+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=mYNd1 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.557392312+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=evVCF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.565540396+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=FI+5/ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.583327994+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=oou+f namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.596839935+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=0g2Mr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.633887351+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Dag6W namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.638017315+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=VOsBq namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.646426256+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lRHhm namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.673092267+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=KfNGP namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.689396245+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=A93pg namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.696508700+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=BGP66 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.702655809+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=CBDZG namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.715358077+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=/c8jv namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.728111517+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=8fkYK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.761545694+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=WZImS namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.764912707+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=0HO// namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:31.773151393+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=1dfTJ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.775267581+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=DGHd/ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.811813194+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=Cl4de namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.833224861+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=a8+UE namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:31.935576580+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=td+3d namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:31.942023706+00:00 stderr F time="2025-12-08T17:55:31Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=BQFw2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.035063719+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=jck/m namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.038581346+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=iapPG namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.081827053+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=Lz1iC namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.097996577+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=uvUvn namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.133357607+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=960Kv namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.142767645+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=hNOtr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.147654739+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=FDmQ+ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.151068423+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=1hYdV namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.185484247+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=qCmCk namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.200936401+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=VnwfN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.229203597+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=lPskn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.238828651+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=DoPKA namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.250405808+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=LJTMO namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.352767918+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=D9voN namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.367745048+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=eswS8 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.376482288+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=efUxF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.382120702+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=S2HXL namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.396550308+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=k72aE namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.411082678+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=X6Z7Z namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.448675039+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=9+IS9 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.533139386+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Y29xp namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.630779476+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=Q+Wqn namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.640635217+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=tguB3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.647700680+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qTyPr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.649161101+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=B/jrt namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.696695155+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=pBldV namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.713591268+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=0wrP9 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.723979683+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=qiflL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.730631206+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=pumK8 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.742716497+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=VIg5I namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.763105196+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=0xQiP namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.794287322+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=Hh3Vg namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.795018403+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=y/Qdx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.802862218+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=40Ex9 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.807622388+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=giITl namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.836930193+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=LOjFT namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.852221002+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=VZAZK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.859906503+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=W/6op namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.866639737+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=NItgG namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.885640539+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=5CN9W namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.899493309+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=In5Ea namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.928238768+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=GzaTM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:32.931672363+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=fUd+I namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.947399644+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=U8rRd namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.976951935+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=mMAxB namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:32.991988987+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=as5/Q namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:32.997410476+00:00 stderr F time="2025-12-08T17:55:32Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=1f2Bb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.005530749+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=tV21h namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.033623730+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=0dNh8 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.049978449+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=UN5Ye namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.074168752+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=sa6sF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.081655578+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=dNg5a namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.088512216+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=zdy8F namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.117526912+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=hYyRf namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.129957663+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=jjCFD namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.142797026+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=F4zbh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.150359913+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=OAroJ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.169574110+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=eTXb4 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.182451634+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=N3UTC namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.207322206+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=/Iirw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.208793876+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=eNGha namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.222515913+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=m3L96 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.343365559+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=ryPkZ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.392753534+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=9qiXj namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.440251527+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=X21ew namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.446750105+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=gluAY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.542696629+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=v2gMW namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.548257362+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=MLwPP namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.577167075+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=hpVW3 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.636116392+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Jr954 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.662887467+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=OARS/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.671854843+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=cleqD namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.689175898+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=KKPNw namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.769972875+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=6yCMh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.772413832+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=AeKyQ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.784801532+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fKD++ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.823064922+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=2FgTL namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.835987136+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Pn9YI namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.847564244+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=abk/r namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.854999898+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=U26Us namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.864701665+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=G/UZp namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.877862415+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=tuoVd namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.905994177+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=M3ouG namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.915031116+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=3zpNS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:33.923143917+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=B3WNx namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:33.925594425+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=YuQf6 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.965429829+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=gC3j3 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:33.978942279+00:00 stderr F time="2025-12-08T17:55:33Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ZHa9f namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.000566452+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=0tgpB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:34.014717621+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=lH4/c namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.037403943+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=vTWT/ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.075902470+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=k7d3P namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.090005467+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=jsJQC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:34.095665572+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=QyNXr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.117707077+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=jENTQ namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.127055773+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=h1wuB namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.254717907+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=LaSN3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.415727364+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=o/4bl namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.615040753+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=TIGpT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:34.618491078+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ycNWB namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.633216192+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=NsEkc namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.673163458+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=oCJ47 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.699004007+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=gGF4d namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.816344827+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=U+BHs namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:34.923756595+00:00 stderr F time="2025-12-08T17:55:34Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=TYVQN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:34.927914018+00:00 stderr F I1208 17:55:34.924511 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41614", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment perses-operator to become ready: deployment "perses-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:55:35.020020156+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=Dxumu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:35.024407267+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=CIjF7 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:35.222498362+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=JhgmK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:35.320819009+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=U6gWW namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:35.431611380+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=9oqUX namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:35.515369438+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=nPWeE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:35.526941016+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=FwZta namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:35.721593347+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=siiYC namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:35.841817606+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=g3NJ5 namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:35.942862678+00:00 stderr F time="2025-12-08T17:55:35Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=LGDCH namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:36.023953623+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=ZlP97 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:36.039020777+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=Fr8Et namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:36.426379996+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=fnHZ7 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:36.529331851+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=4/AFR namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:36.683812900+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Gp3id namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:36.757250635+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=yfLYu namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:36.837058766+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=X94Gg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:36.931382333+00:00 stderr F time="2025-12-08T17:55:36Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=xfSAG namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:37.153338574+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=1f4Nh namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:37.260003601+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=JkgRh namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:37.352964412+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=M+MWN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:37.354962216+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=fbpR4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:37.450557860+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=quJWc namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:37.561732310+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=6J+x3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:37.664090559+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=qZHmC namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:37.854414771+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ByP43 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:37.942615041+00:00 stderr F time="2025-12-08T17:55:37Z" level=info msg="install strategy successful" csv=cert-manager-operator.v1.18.0 id=UBcZu namespace=cert-manager-operator phase=Installing strategy=deployment 2025-12-08T17:55:37.943128615+00:00 stderr F I1208 17:55:37.943092 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"cert-manager-operator", Name:"cert-manager-operator.v1.18.0", UID:"e7c40b97-cb26-4848-968d-4a92c8291cb2", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42164", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T17:55:38.150830205+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=pjQAd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:38.151652527+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=L6q5N namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:38.750198842+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=2a30j namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:38.754200871+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=IgHQJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:38.861083014+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:38Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:38.874052840+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=5wSRY namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:38.927600959+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=8OhEN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:38.949229943+00:00 stderr F time="2025-12-08T17:55:38Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=GPvDN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:39.145847528+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=uu3gs namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:39.158920376+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:39Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:39.478009252+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=13c3k namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:39.481757465+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=hP+oE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:39.680851028+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=u8tuu namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:39.980701946+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=80dRL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:39.993010124+00:00 stderr F time="2025-12-08T17:55:39Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=PZ5zr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.043660303+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qP7xa namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.073437841+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=dAQnG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:40.097386457+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:40Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:40.110998141+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=/Z14i namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.156789478+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=W4qli namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.157119277+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=mj41g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:40.206308596+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=JW2uS namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.228952897+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=CI5vG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:40.328758777+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=lzSJT namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.375238182+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=PiZuR namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.381470073+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:40Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:40.382134691+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=oVbjG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:40.724460854+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=prkkD namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.827190903+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=xOvEU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:40.830785232+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Ho6rK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.959447022+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=csKUS namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:40.965957350+00:00 stderr F time="2025-12-08T17:55:40Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=VsbYU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:41.143604016+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=NlOqV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:41.153682202+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=NZ/so namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:41.333695092+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=FffnN namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:41.360355793+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=fqpXb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:41.439251368+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Ur//F namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:41.482695520+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=buoFq namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:41.572271818+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=4XqWt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:41.579763324+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=e8+dT namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:41.747920708+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=xKcRP namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:41.762303423+00:00 stderr F time="2025-12-08T17:55:41Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=lQnkt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:42.041289358+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ECnJK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:42.149479867+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=LdePA namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:42.254529489+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=qqTP3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:42.263240259+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="install strategy successful" csv=elasticsearch-eck-operator-certified.v3.2.0 id=hHGvM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:55:42.263576998+00:00 stderr F I1208 17:55:42.263527 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"elasticsearch-eck-operator-certified.v3.2.0", UID:"2d68c5b9-c8e1-4b99-aac1-5ea5bf6beee7", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"41876", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T17:55:42.351437788+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=jPbt3 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:42.407793694+00:00 stderr F time="2025-12-08T17:55:42Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=8AcE8 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.145690993+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=9dBbo namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.261804289+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=8znzT namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.300018637+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=c4zMk namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.340979971+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=2MP+J namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.502911434+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=4OoBg namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.541179725+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=YadxH namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.599119224+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=FU6my namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.631779040+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=CM6CX namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:43.666900185+00:00 stderr F time="2025-12-08T17:55:43Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=PMzJB namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.010389899+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=0kLrc namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.120754128+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Deg3A namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.178001189+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=XwPz5 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.323834590+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=DHJHo namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.370265075+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=dhoxr namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.481620900+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=rfKfh namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.584075371+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=EtMMF namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.685008271+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=V+cqg namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.873123812+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=3W+BJ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.904303868+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:44Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:55:44.924402330+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=aibgT namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:44.979564003+00:00 stderr F time="2025-12-08T17:55:44Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=PIQCp namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.119958575+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=cQmCA namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.156069226+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=iSWVZ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.196043143+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=zV8b0 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.250643152+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=eonV4 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.292423568+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=BN2fG namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.332326163+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=gfG+E namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.370561972+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=4r10/ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.426054585+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=Yp0FR namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.533805132+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=7BAhF namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.570258782+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=AWUKK namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.635747458+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=63Avc namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.681485254+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=KcmfJ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.726610202+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=sfZtn namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.779839783+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=gxPxQ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.833036143+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=uBppO namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.926729743+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=LE8bq namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:45.962748482+00:00 stderr F time="2025-12-08T17:55:45Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ve6qt namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.028737243+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=N0MFt namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.079030253+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=DDQCH namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.129048895+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=7yZBm namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.180488536+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=ZkTHu namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.218187301+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=3BIme namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.257479288+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=W/kRO namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.300144270+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=GtPqQ namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.336658362+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=dI31y namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.372489564+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=rMAM1 namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.404417511+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=URsUm namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.545117992+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=+Pc8F namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.598117346+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=tgXbc namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.698751928+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=KSM4R namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.799928344+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=+ZYey namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.992921620+00:00 stderr F time="2025-12-08T17:55:46Z" level=info msg="install strategy successful" csv=cluster-observability-operator.v1.3.0 id=6DEfU namespace=openshift-operators phase=Installing strategy=deployment 2025-12-08T17:55:46.994037440+00:00 stderr F I1208 17:55:46.993974 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"openshift-operators", Name:"cluster-observability-operator.v1.3.0", UID:"63202f10-d8fe-4d4a-a00f-7a183b8af0c6", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"42268", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T17:55:47.366956013+00:00 stderr F {"level":"error","ts":"2025-12-08T17:55:47Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"cluster-observability-operator.openshift-operators"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"cluster-observability-operator.openshift-operators\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.521932001+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"e2058a6b-f0f5-4ba8-bf9f-e1a87d39ec86","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.531472369+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"b3283e8a-7a29-4cd9-9c5e-b578b88a6d1a","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.531557431+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"4751ebd7-aadd-4fb4-a80b-622e261686f7","error":"Deployment.apps \"interconnect-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.538244046+00:00 stderr F time="2025-12-08T17:56:38Z" level=warning msg="error adding operatorgroup annotations" csv=amq7-interconnect-operator.v1.10.20 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" id=LoCQd namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:56:38.538244046+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="operatorgroup incorrect" csv=amq7-interconnect-operator.v1.10.20 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" id=LoCQd namespace=service-telemetry phase= 2025-12-08T17:56:38.538244046+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="not in operatorgroup namespace" csv=amq7-interconnect-operator.v1.10.20 id=PvQZu namespace=service-telemetry phase= 2025-12-08T17:56:38.538244046+00:00 stderr F E1208 17:56:38.538027 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:38.541934052+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"692dd685-ab2a-45c3-a4c0-33a664881738","error":"Deployment.apps \"interconnect-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.553612637+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="operatorgroup incorrect" csv=amq7-interconnect-operator.v1.10.20 error="" id=5LxVA namespace=service-telemetry phase= 2025-12-08T17:56:38.553675028+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="not in operatorgroup namespace" csv=amq7-interconnect-operator.v1.10.20 id=ERsOo namespace=service-telemetry phase= 2025-12-08T17:56:38.563164346+00:00 stderr F time="2025-12-08T17:56:38Z" level=warning msg="error adding operatorgroup annotations" csv=amq7-interconnect-operator.v1.10.20 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" id=vE1u+ namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:56:38.563233918+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="operatorgroup incorrect" csv=amq7-interconnect-operator.v1.10.20 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" id=vE1u+ namespace=service-telemetry phase= 2025-12-08T17:56:38.563268649+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="not in operatorgroup namespace" csv=amq7-interconnect-operator.v1.10.20 id=fTMBU namespace=service-telemetry phase= 2025-12-08T17:56:38.563344331+00:00 stderr F E1208 17:56:38.563327 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:38.563388812+00:00 stderr F time="2025-12-08T17:56:38Z" level=warning msg="error adding operatorgroup annotations" csv=amq7-interconnect-operator.v1.10.20 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:38.563427663+00:00 stderr F time="2025-12-08T17:56:38Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:38.563485995+00:00 stderr F E1208 17:56:38.563474 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator-group\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:38.575959740+00:00 stderr F E1208 17:56:38.575863 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: could not update operatorgroups olm.providedAPIs annotation: Operation cannot be fulfilled on operatorgroups.operators.coreos.com \"service-telemetry-operator-group\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:38.578590808+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=amq7-interconnect-operator.v1.10.20 id=w6n7g namespace=service-telemetry phase= 2025-12-08T17:56:38.578590808+00:00 stderr F I1208 17:56:38.578270 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43752", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:56:38.584771709+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"96d40891-64ae-4c34-bf40-46fe254c49c1","error":"Deployment.apps \"interconnect-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.592632955+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=amq7-interconnect-operator.v1.10.20 id=7cy8s namespace=service-telemetry phase= 2025-12-08T17:56:38.592632955+00:00 stderr F I1208 17:56:38.590357 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43752", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:56:38.602179604+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" csv=amq7-interconnect-operator.v1.10.20 id=zmjVX namespace=service-telemetry phase= 2025-12-08T17:56:38.602179604+00:00 stderr F E1208 17:56:38.602148 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:38.630950344+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=1s1qI namespace=service-telemetry phase=Pending 2025-12-08T17:56:38.630950344+00:00 stderr F I1208 17:56:38.630360 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43757", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:56:38.645891954+00:00 stderr F E1208 17:56:38.645808 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:38.659719224+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=W71C8 namespace=service-telemetry phase=Pending 2025-12-08T17:56:38.659719224+00:00 stderr F E1208 17:56:38.659662 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:38.665809723+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"71b72985-4b7c-4eaf-9905-2af49fb0edf1","error":"Deployment.apps \"interconnect-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:38.671755909+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=KLyfQ namespace=service-telemetry phase=Pending 2025-12-08T17:56:38.671810500+00:00 stderr F E1208 17:56:38.671771 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:38.692405647+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=oaD+x namespace=service-telemetry phase=Pending 2025-12-08T17:56:38.692405647+00:00 stderr F E1208 17:56:38.692391 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:38.709810971+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=1Uvrb namespace=service-telemetry phase=Pending 2025-12-08T17:56:38.709810971+00:00 stderr F E1208 17:56:38.709432 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:38.785181877+00:00 stderr F time="2025-12-08T17:56:38Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=7nVUB namespace=service-telemetry phase=Pending 2025-12-08T17:56:38.785231258+00:00 stderr F E1208 17:56:38.785192 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:38.827154892+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:38Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"e0fdb089-1f5d-415e-920b-b62078a8c7e6","error":"Deployment.apps \"interconnect-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.099262518+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=9GEQw namespace=service-telemetry phase=Pending 2025-12-08T17:56:39.099262518+00:00 stderr F E1208 17:56:39.099063 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:39.124767573+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="requirements were not met" csv=amq7-interconnect-operator.v1.10.20 id=D6eNb namespace=service-telemetry phase=Pending 2025-12-08T17:56:39.148549374+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"amq7-interconnect-operator.v1.10.20","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"amq7-interconnect-operator.v1.10.20","reconcileID":"d881e716-b766-4e40-ac01-61c1f0d436e5","error":"Deployment.apps \"interconnect-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.511001157+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="scheduling ClusterServiceVersion for install" csv=amq7-interconnect-operator.v1.10.20 id=Aum4/ namespace=service-telemetry phase=Pending 2025-12-08T17:56:39.511001157+00:00 stderr F I1208 17:56:39.509771 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43762", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:56:39.542478448+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"249d249f-6c8f-46aa-8915-e4ebd62da39d","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.543995528+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:56:39.543995528+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:39Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"46df0fbb-23e2-4c72-988b-dcdf9908b37b"} 2025-12-08T17:56:39.543995528+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"46df0fbb-23e2-4c72-988b-dcdf9908b37b","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.565619032+00:00 stderr F I1208 17:56:39.565556 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43775", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:56:39.567140322+00:00 stderr F time="2025-12-08T17:56:39Z" level=warning msg="error adding operatorgroup annotations" csv=service-telemetry-operator.v1.5.1765147436 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" id=6y0DU namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:56:39.567191463+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="operatorgroup incorrect" csv=service-telemetry-operator.v1.5.1765147436 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" id=6y0DU namespace=service-telemetry phase= 2025-12-08T17:56:39.567220404+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="not in operatorgroup namespace" csv=service-telemetry-operator.v1.5.1765147436 id=tpKJh namespace=service-telemetry phase= 2025-12-08T17:56:39.567279105+00:00 stderr F E1208 17:56:39.567266 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:39.580888580+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="operatorgroup incorrect" csv=service-telemetry-operator.v1.5.1765147436 error="" id=4jwLx namespace=service-telemetry phase= 2025-12-08T17:56:39.580888580+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="not in operatorgroup namespace" csv=service-telemetry-operator.v1.5.1765147436 id=Q3zVJ namespace=service-telemetry phase= 2025-12-08T17:56:39.589300580+00:00 stderr F time="2025-12-08T17:56:39Z" level=warning msg="error adding operatorgroup annotations" csv=service-telemetry-operator.v1.5.1765147436 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:39.589300580+00:00 stderr F time="2025-12-08T17:56:39Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:39.589343641+00:00 stderr F E1208 17:56:39.589321 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator-group\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:39.589402562+00:00 stderr F time="2025-12-08T17:56:39Z" level=warning msg="error adding operatorgroup annotations" csv=service-telemetry-operator.v1.5.1765147436 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" id=hLDPc namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:56:39.591948129+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="operatorgroup incorrect" csv=service-telemetry-operator.v1.5.1765147436 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" id=hLDPc namespace=service-telemetry phase= 2025-12-08T17:56:39.591948129+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="not in operatorgroup namespace" csv=service-telemetry-operator.v1.5.1765147436 id=V4T0c namespace=service-telemetry phase= 2025-12-08T17:56:39.591948129+00:00 stderr F E1208 17:56:39.589843 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:39.591948129+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"dbe7e342-805c-42e5-9eb0-1fd1c7afd55d","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.603764577+00:00 stderr F time="2025-12-08T17:56:39Z" level=warning msg="error adding operatorgroup annotations" csv=service-telemetry-operator.v1.5.1765147436 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:39.603799318+00:00 stderr F time="2025-12-08T17:56:39Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:39.603828928+00:00 stderr F E1208 17:56:39.603811 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator-group\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"service-telemetry-operator.v1.5.1765147436\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:39.648862963+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:56:39.666580836+00:00 stderr F I1208 17:56:39.666536 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43775", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:56:39.683134407+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" csv=amq7-interconnect-operator.v1.10.20 id=v4q0w namespace=service-telemetry phase=InstallReady 2025-12-08T17:56:39.683134407+00:00 stderr F E1208 17:56:39.681761 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/amq7-interconnect-operator.v1.10.20\" failed: error updating ClusterServiceVersion status: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"amq7-interconnect-operator.v1.10.20\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:39.692179533+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"bc943a0f-641c-4e20-8fb7-2bca1166bb05","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.692338677+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"88ffdeba-55d1-4aa3-b6e0-6b6aa98e4dac","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.718483339+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"c38835d4-b8b4-4d92-83c5-b55c2a55c415","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.720892112+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Y6oL9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:39.721162119+00:00 stderr F I1208 17:56:39.721094 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43787", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment interconnect-operator to become ready: deployment "interconnect-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:56:39.733945443+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=service-telemetry-operator.v1.5.1765147436 id=cGSvp namespace=service-telemetry phase= 2025-12-08T17:56:39.734209879+00:00 stderr F I1208 17:56:39.734174 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"service-telemetry-operator.v1.5.1765147436", UID:"768e4f34-61bc-41d7-a3c5-f46daeb2b4b8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43789", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:56:39.799623795+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"7c642e84-12cc-47a0-9ff4-d49f2115d1b8","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:39.820647594+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KsNeg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:39.827794450+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=nMnB1 namespace=service-telemetry phase=Pending 2025-12-08T17:56:39.827989305+00:00 stderr F I1208 17:56:39.827945 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"service-telemetry-operator.v1.5.1765147436", UID:"768e4f34-61bc-41d7-a3c5-f46daeb2b4b8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43814", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:56:39.837743040+00:00 stderr F E1208 17:56:39.837696 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:39.921480324+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tPRJh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:39.929519943+00:00 stderr F time="2025-12-08T17:56:39Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=mvKok namespace=service-telemetry phase=Pending 2025-12-08T17:56:39.929562625+00:00 stderr F E1208 17:56:39.929541 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:39.960392949+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:39Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"9bcac3c8-00eb-4cbc-8b12-fb404c6ae2ad","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:40.024452679+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=wlHCV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.059888993+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=x3ZJ3 namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.059970105+00:00 stderr F E1208 17:56:40.059919 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:40.068437936+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fY6lh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.075955183+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=niRFv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.082555315+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=G6c/Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.090685177+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=sJlNb namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.090731498+00:00 stderr F E1208 17:56:40.090705 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:40.093749847+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=x0T1v namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.116568292+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5xMqY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.126929762+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZHodj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.136271195+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=HddGl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.150510337+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=GetYg namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.150553538+00:00 stderr F E1208 17:56:40.150521 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:40.216464887+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=dpyiZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.224075936+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tP+On namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.231287994+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zhoL5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.239649572+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=X/3nC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.246680036+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=Zehv6 namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.246779088+00:00 stderr F E1208 17:56:40.246765 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:40.254706195+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=SuKZt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.261922973+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=lDjr1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.267288973+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=M9Vf3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.273370421+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=sAupE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.280825276+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:40Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"72220898-e183-493d-a9c5-beb13a9bcca0","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:40.316858586+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Meb/S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.324901036+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=PnwMn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.416368301+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=94b2+ namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.416368301+00:00 stderr F E1208 17:56:40.416359 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:40.425183451+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=oN7Tn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.431611639+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0yaAt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.438288493+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=J3hdB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.444271229+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Ii1pC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.449894355+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VUxfk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.455668866+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3+JUi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.461822506+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=v/qT7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.467645289+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IwOTj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.473391339+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=cUM4Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.479519059+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gyt7p namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.490498765+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=dxiSa namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.490526296+00:00 stderr F E1208 17:56:40.490503 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator.v1.5.1765147436\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:40.526026841+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yrzFk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.533192608+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=eln/g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.539669167+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zLGxn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.546519016+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=lX/Ld namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.560453400+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VXo17 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.567724129+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fN9TG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.574605139+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Rgkm/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.582194236+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=OsbVm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.594118198+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UI8zO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.602220539+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=hhxcY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.612183848+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bC6cu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.620446854+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4DjvG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.627446707+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=urBqy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.634368077+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=7SGl3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.642451378+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BmYUu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.647267584+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MJtoi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.655136099+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/2xKN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.661781082+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UzPfz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.668007734+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KxWm+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.673967550+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=E4Byr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.680003337+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Cc41f namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.687029761+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=hPLLc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.697602997+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gwbIe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.706501599+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=q2vip namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.713560402+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UCvKS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.720058202+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=vn9lu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.726501990+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DWfWb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.734626162+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=b6bWY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.743282858+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=wFjaI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.745443274+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="requirements were not met" csv=service-telemetry-operator.v1.5.1765147436 id=1EV5h namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.750411164+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DQAYu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.757347955+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=y2J9u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.764317796+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=eL4AU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.771958676+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=9znXS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.779775000+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Tz/77 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.787741808+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ULMC8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.795780647+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rNHim namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.804960807+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=L1Kol namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.816091397+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=v5IpU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.824297461+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=L9liT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.831896310+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=cq12Y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.838274896+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=GoxFf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.844630061+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=OgasC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.851791128+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=7T09t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.857960219+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=xfvrl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.863566105+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=sibw4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.869375767+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UwBxC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.875760593+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=aiJ3t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.884045389+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=K3et4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.905919440+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="scheduling ClusterServiceVersion for install" csv=service-telemetry-operator.v1.5.1765147436 id=1e6fd namespace=service-telemetry phase=Pending 2025-12-08T17:56:40.914060942+00:00 stderr F I1208 17:56:40.911045 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"service-telemetry-operator.v1.5.1765147436", UID:"768e4f34-61bc-41d7-a3c5-f46daeb2b4b8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43816", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:56:40.922498042+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:40Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"service-telemetry-operator.v1.5.1765147436","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"service-telemetry-operator.v1.5.1765147436","reconcileID":"8a9c0ff7-0a02-4763-bd96-e2265c99f75f","error":"Deployment.apps \"service-telemetry-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:40.945970124+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=JofKD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:40.951837837+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:40Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"6253b350-dc1c-4a08-b0b1-1c11c0aebd78","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:40.964155909+00:00 stderr F time="2025-12-08T17:56:40Z" level=warning msg="error adding operatorgroup annotations" csv=smart-gateway-operator.v5.0.1765147433 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"smart-gateway-operator.v5.0.1765147433\": the object has been modified; please apply your changes to the latest version and try again" id=y7INf namespace=service-telemetry opgroup=service-telemetry-operator-group phase= 2025-12-08T17:56:40.964155909+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="operatorgroup incorrect" csv=smart-gateway-operator.v5.0.1765147433 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"smart-gateway-operator.v5.0.1765147433\": the object has been modified; please apply your changes to the latest version and try again" id=y7INf namespace=service-telemetry phase= 2025-12-08T17:56:40.964155909+00:00 stderr F time="2025-12-08T17:56:40Z" level=info msg="not in operatorgroup namespace" csv=smart-gateway-operator.v5.0.1765147433 id=eR6vz namespace=service-telemetry phase= 2025-12-08T17:56:40.964196750+00:00 stderr F E1208 17:56:40.964184 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"smart-gateway-operator.v5.0.1765147433\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:40.986910082+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:40Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"46663c40-d46d-4c35-9c3b-d14ed88af249","error":"resource name may not be empty","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.011971526+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:56:41.019411540+00:00 stderr F I1208 17:56:41.019337 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"service-telemetry-operator.v1.5.1765147436", UID:"768e4f34-61bc-41d7-a3c5-f46daeb2b4b8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43849", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:56:41.053169931+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NilOO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.054795443+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="operatorgroup incorrect" csv=smart-gateway-operator.v5.0.1765147433 error="" id=QW1Fi namespace=service-telemetry phase= 2025-12-08T17:56:41.054795443+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="not in operatorgroup namespace" csv=smart-gateway-operator.v5.0.1765147433 id=h/0m/ namespace=service-telemetry phase= 2025-12-08T17:56:41.063694764+00:00 stderr F time="2025-12-08T17:56:41Z" level=warning msg="error adding operatorgroup annotations" csv=smart-gateway-operator.v5.0.1765147433 error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"smart-gateway-operator.v5.0.1765147433\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:41.063726245+00:00 stderr F time="2025-12-08T17:56:41Z" level=warning msg="failed to annotate CSVs in operatorgroup after group change" error="Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"smart-gateway-operator.v5.0.1765147433\": the object has been modified; please apply your changes to the latest version and try again" namespace=service-telemetry operatorGroup=service-telemetry-operator-group 2025-12-08T17:56:41.065219925+00:00 stderr F E1208 17:56:41.063767 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/service-telemetry-operator-group\" failed: Operation cannot be fulfilled on clusterserviceversions.operators.coreos.com \"smart-gateway-operator.v5.0.1765147433\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError" 2025-12-08T17:56:41.095486954+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"1e69bc75-f41b-45ac-8db0-202ed391f4a4","error":"Deployment.apps \"smart-gateway-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.095697500+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"fc461d56-6886-49bf-9dd9-fbf154fb134a","error":"Deployment.apps \"smart-gateway-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.115421574+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"45868ac6-572e-46cc-ad0c-cfedc6e37576","error":"Deployment.apps \"smart-gateway-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.147101820+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DcLld namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.147405028+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Kj4wy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.148441526+00:00 stderr F I1208 17:56:41.148392 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"service-telemetry-operator.v1.5.1765147436", UID:"768e4f34-61bc-41d7-a3c5-f46daeb2b4b8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43866", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment service-telemetry-operator to become ready: deployment "service-telemetry-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:56:41.195596365+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"288c5a14-0991-4f33-99e4-96814fbdb2a0","error":"Deployment.apps \"smart-gateway-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.247620222+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=LT/r4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.247742195+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="scheduling ClusterServiceVersion for requirement verification" csv=smart-gateway-operator.v5.0.1765147433 id=Dfj7g namespace=service-telemetry phase= 2025-12-08T17:56:41.248369861+00:00 stderr F I1208 17:56:41.248316 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"smart-gateway-operator.v5.0.1765147433", UID:"07dc000f-105f-4a6b-a43d-3e7203e60ae8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43872", FieldPath:""}): type: 'Normal' reason: 'RequirementsUnknown' requirements not yet checked 2025-12-08T17:56:41.348366270+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3jMy7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.355342391+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=XoKly namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.355757023+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"bebb3510-03bd-4fcd-a903-48ebc1417ebf","error":"Deployment.apps \"smart-gateway-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.374068830+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="requirements were not met" csv=smart-gateway-operator.v5.0.1765147433 id=eXrJw namespace=service-telemetry phase=Pending 2025-12-08T17:56:41.374268645+00:00 stderr F I1208 17:56:41.374219 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"smart-gateway-operator.v5.0.1765147433", UID:"07dc000f-105f-4a6b-a43d-3e7203e60ae8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43887", FieldPath:""}): type: 'Normal' reason: 'RequirementsNotMet' one or more requirements couldn't be found 2025-12-08T17:56:41.386596347+00:00 stderr F E1208 17:56:41.386522 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:41.447620038+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PvEo1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.457813454+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ptXuz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.467970509+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="requirements were not met" csv=smart-gateway-operator.v5.0.1765147433 id=mxGpX namespace=service-telemetry phase=Pending 2025-12-08T17:56:41.468021191+00:00 stderr F E1208 17:56:41.467971 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:41.474832628+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WRgC9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.476210894+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=sQlj/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.555802910+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=XikIa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.590604007+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"service-telemetry-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"service-telemetry-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.607602342+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"service-telemetry-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"service-telemetry-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.648231641+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2IUod namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.656420614+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="requirements were not met" csv=smart-gateway-operator.v5.0.1765147433 id=9J+AA namespace=service-telemetry phase=Pending 2025-12-08T17:56:41.656463535+00:00 stderr F E1208 17:56:41.656425 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:41.679421424+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:41Z","msg":"Reconciler error","controller":"operatorcondition","controllerGroup":"operators.coreos.com","controllerKind":"OperatorCondition","OperatorCondition":{"name":"smart-gateway-operator.v5.0.1765147433","namespace":"service-telemetry"},"namespace":"service-telemetry","name":"smart-gateway-operator.v5.0.1765147433","reconcileID":"9d6199f8-7996-45d4-8406-5e121be84c9c","error":"Deployment.apps \"smart-gateway-operator\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:41.683864920+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RC1PY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.779132915+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="requirements were not met" csv=smart-gateway-operator.v5.0.1765147433 id=6uUNk namespace=service-telemetry phase=Pending 2025-12-08T17:56:41.779168456+00:00 stderr F E1208 17:56:41.779153 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:41.843117594+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nAI/8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.850587308+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="requirements were not met" csv=smart-gateway-operator.v5.0.1765147433 id=EDBak namespace=service-telemetry phase=Pending 2025-12-08T17:56:41.850623979+00:00 stderr F E1208 17:56:41.850606 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:41.880260543+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=mviJ1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.880260543+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=q55Df namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.890219842+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BIxbL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.890512859+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=x5bDm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.896309592+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xXu4M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.899449253+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tb40B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.945948135+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RpWNS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.954480988+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="requirements were not met" csv=smart-gateway-operator.v5.0.1765147433 id=tq7F3 namespace=service-telemetry phase=Pending 2025-12-08T17:56:41.954480988+00:00 stderr F E1208 17:56:41.953826 1 queueinformer_operator.go:312] "Unhandled Error" err="sync \"service-telemetry/smart-gateway-operator.v5.0.1765147433\" failed: requirements were not met" logger="UnhandledError" 2025-12-08T17:56:41.981294948+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rsWuq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.981499293+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GdvNr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.986450432+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=snSQI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.986662008+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=sphpL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.991304888+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9a+hz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.992803388+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DZVCA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:41.996426932+00:00 stderr F time="2025-12-08T17:56:41Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hJrUU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.002192192+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=964F1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.046586700+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=CJ4uV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.082261271+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hERL/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.082786225+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=AxBPt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.147299227+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=jHUwf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.147462981+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iikxg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.154585008+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="scheduling ClusterServiceVersion for install" csv=smart-gateway-operator.v5.0.1765147433 id=qdR0g namespace=service-telemetry phase=Pending 2025-12-08T17:56:42.154721201+00:00 stderr F I1208 17:56:42.154685 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"smart-gateway-operator.v5.0.1765147433", UID:"07dc000f-105f-4a6b-a43d-3e7203e60ae8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43896", FieldPath:""}): type: 'Normal' reason: 'AllRequirementsMet' all requirements found, attempting install 2025-12-08T17:56:42.247318516+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WZLfS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.247318516+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=T+Iib namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.279183867+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="No api or webhook descs to add CA to" 2025-12-08T17:56:42.282137204+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=JBY44 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.290920253+00:00 stderr F I1208 17:56:42.287647 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"smart-gateway-operator.v5.0.1765147433", UID:"07dc000f-105f-4a6b-a43d-3e7203e60ae8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43924", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' waiting for install components to report healthy 2025-12-08T17:56:42.377233564+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bq58M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.475550678+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gbFIk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.475678382+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=afGrz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.476256178+00:00 stderr F I1208 17:56:42.476221 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"smart-gateway-operator.v5.0.1765147433", UID:"07dc000f-105f-4a6b-a43d-3e7203e60ae8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43929", FieldPath:""}): type: 'Normal' reason: 'InstallWaiting' installing: waiting for deployment smart-gateway-operator to become ready: deployment "smart-gateway-operator" not available: Deployment does not have minimum availability. 2025-12-08T17:56:42.576762838+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MsPu6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.587095868+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9Jpca namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.587312684+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=eJox7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.593258308+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LygXP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.595625600+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2SkEA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.598447874+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=EU0rG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.601509054+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=XnWjF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.610993902+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=00WFd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.611797632+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=edbF4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.616236589+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=SsNL8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.620833928+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gTh43 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.623409515+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=L4ews namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.627977465+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xHljQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.633052707+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zeDk0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.634809172+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=khHp6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.639236548+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WRNXJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.642685948+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=uxhTF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.644987238+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rFLxZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.648411287+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vdX+C namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.652466593+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=kcp6s namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.658921462+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=k93I2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.661951271+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dAIB2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.679923949+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=D5G9Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.679923949+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yPwZE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.708053663+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:42Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"0c00f746-a1ae-4f74-aaea-2162840da236"} 2025-12-08T17:56:42.708053663+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:42Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"0c00f746-a1ae-4f74-aaea-2162840da236","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:42.795506264+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WLmcN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.896338304+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gjJ3Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.897350570+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=hf7VB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.910614937+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WivGq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.927922638+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Mrqu9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.927922638+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=hoDLL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.931995604+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=KU/hB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.937604710+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=sKOSS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.963245398+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=d3b8q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.963529187+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=eXyPT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.970155269+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=jl1XT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.970658792+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=47Eka namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.980996122+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2DlNY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.982730037+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/SKmW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.994513815+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qf930 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:42.994513815+00:00 stderr F time="2025-12-08T17:56:42Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=F4M54 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.011905178+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gjrWi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.109602537+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=lOG8j namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.208443244+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=qFfxG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.208666470+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Zsbsu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.215527728+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BMmcN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.216224187+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=7U0AZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.224277688+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=AY5h1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.224354620+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=22DgD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.231764872+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=SutyG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.231797443+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=1GTYN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.237424430+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Swd8A namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.314963552+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Q069y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.318749561+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=S5MV6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.410712960+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ZUIU3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.410906315+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YwnHS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.510938364+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8z+1V namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.511091588+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Bnutx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.611380684+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UKaDR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.611564088+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VZYqV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.619928037+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3riys namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.712581503+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2qT2c namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.719147884+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ILB0S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.719243657+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nn9Eg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.724222357+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+U+mt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.726637370+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=OQ0sP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.730051339+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RRHrS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.731218430+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=j+6gW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.736580189+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=SgAPR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.736661792+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=+N6X1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.820410495+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ThwEN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.821328170+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=LB8vK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.826674909+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=/eS78 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.828310391+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=q6CVC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.921521802+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=f0Jul namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:43.921638206+00:00 stderr F time="2025-12-08T17:56:43Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Ct3CN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.021587532+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=u5tT+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.029355135+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=kqkr4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.029479938+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=oH2xZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.125179524+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=CcmDQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.125339738+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uFkfd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.225664325+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2gOu7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.225841600+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Aunsz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.231341523+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Vo/VU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.234484085+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZgIlY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.334124344+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=krEFM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.334266818+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vUGV+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.349501105+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:44Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"amq7-interconnect-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"amq7-interconnect-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:44.358101349+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vEost namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.358101349+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KNu4h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.457511852+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=hSSBV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.457717158+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IY77l namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.467660146+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ja5WA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.469633898+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=xohYI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.495036221+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:44Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"23e40f07-fbac-4d4d-8ff2-cbdac2daa262"} 2025-12-08T17:56:44.495036221+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:44Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"23e40f07-fbac-4d4d-8ff2-cbdac2daa262","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:44.558339542+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ji3Bb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.559000099+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4Eumv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.566341991+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9wCN6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.567327346+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DEwzB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.573900588+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DQGGD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.573992500+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=y5DqB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.580353277+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RwINq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.583360425+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=YYihD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.589852304+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3JEp7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.665750733+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=QQXhn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.667575101+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=OVwUN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.672257183+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7oBrj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.673297121+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=khMLl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.767838267+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ds6Fj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.767999071+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=SkMu3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.776583665+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=yKwtc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.776867892+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tQuKA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.784737557+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0NYc3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.784967103+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=COa8F namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.880910465+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=oAw/h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.881129011+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/bE/9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.887089196+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=V2aWv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.888776051+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=RKN7l namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.892014525+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cqmXy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.897698753+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=w/+ux namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.901390429+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=P+U9d namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.902041307+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=WAnuG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.907747846+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=a9SEQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.909012078+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2vVXq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.912829397+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=XUGeR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.913624049+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=EOnJ2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.917921780+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=NvDH3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.919638146+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=6j8FA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.929581254+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0WA+D namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.929821901+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ms796 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.939398281+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cSkr8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.942118282+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/uNwG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.981223212+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=DL5sf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.981474918+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PdVxo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.990461873+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0D0/3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:44.992197468+00:00 stderr F time="2025-12-08T17:56:44Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xtUjt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.006406879+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=V5J8Y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.009685444+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=V2s7u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.082672788+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3kyuB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.090225625+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=KR2/q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.182617365+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ncKo2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.191446365+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=AGgjH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.191768113+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0ZRk9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.198748185+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=a73+Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.201144707+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bpGTt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.205727527+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Dzf7k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.206930558+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=5hTZs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.215244805+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Fxyrv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.215492271+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0vnoM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.222114265+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bMMVu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.223697156+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7mfBg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.230409411+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hmN0s namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.233053520+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=JpCxd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.236645474+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ns6lS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.239436106+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+ZfUZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.290573120+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8cQad namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.291484514+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=dzYIZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.296352451+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ig38u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.297645115+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Y0HbS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.304559225+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RbU/1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.306075585+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=PCoJe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.393116715+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Jix/X namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.393207037+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MrNsJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.398717861+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=0X4Ao namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.399839010+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3epqF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.404448380+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+ZRN3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.406166835+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=q8SWa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.411692089+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=qzqWU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.412232154+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Q+qn4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.418314462+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=rL+53 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.418747983+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3UwV/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.494174111+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=AQSjs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.594135018+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Ji8cH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.594207100+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tBlSi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.600774921+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=XTEuP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.601187491+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1zgDs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.605701610+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=zzNzi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.608732559+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=RQn9O namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.697991457+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8I+P5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.698173031+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HttZ1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.795131200+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=aSV1a namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.795211872+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fw5HG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.805378517+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+/PDo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.806401784+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ut3Ao namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.812207215+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=CsspV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.813119929+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8Pyqi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.817821941+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=e8F4O namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.819181117+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MYdSx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.824353562+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9oMeI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.825629445+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UjbDW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.830469872+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=kLTcH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.831180300+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dGsKJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.842767902+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:45Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"ae5378bc-8c29-4b43-9d6b-61855c7f4357"} 2025-12-08T17:56:45.842767902+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:45Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"ae5378bc-8c29-4b43-9d6b-61855c7f4357","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:45.895502288+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2Nucg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.895556119+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=WJo6B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.904473782+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xj6/y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.905306764+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GZ4Sz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.910330974+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=PKxdT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.911188087+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=vbnoQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.915832108+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=h0rEH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.915832108+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Co/n2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.920738476+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dm3nk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.921730872+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Q4t8f namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.925959392+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=kCwYz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.926454765+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=aQF+J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.931796534+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=AgC9M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.932594275+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=F0cCR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.936097367+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hOcr9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.937124724+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HfKFL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.941867738+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7CezR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.942178015+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=1afSo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.947269148+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gGKOL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.947304349+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2K1p2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.952311619+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=sEeMa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.958158643+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=6pgDY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.963039099+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uJ5i3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.967722302+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Du07a namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.972677501+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3avqX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.973508732+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=5pLNd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.981694156+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=eEYss namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.983147953+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=SHyZL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.987177399+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+k8oK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.989503260+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LMo7A namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.993395931+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=97Bb1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.994778018+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ahahv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:45.997987721+00:00 stderr F time="2025-12-08T17:56:45Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8E+Yi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.001865662+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=r2jPv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.003749521+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0H3ZC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.007169790+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=20I7Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.010091777+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Qh1my namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.011711309+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=GHL/6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.015027905+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iukII namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.018698251+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IgB+C namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.020488538+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=v3KtW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.024696817+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hqF0d namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.032055810+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=reRpF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.033625450+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2Iulu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.141789761+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uDXLW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.146046573+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=D6FxU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.151111584+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=KaeeL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.151489015+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=JtVCh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.158602910+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VxB5w namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.164459103+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=o0g3W namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.172035371+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=GW5mt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.177806041+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=5uyAH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.179902836+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9/a7O namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.185318807+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=+OL2M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.186239881+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yviEY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.191992911+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Lg0cx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.194142267+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DDdJ7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.200535944+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UBDgF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.206933800+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=UDjn+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.208736488+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=s1AEn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.212088035+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gWA++ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.212513736+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=d/C/j namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.218854762+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bj/8n namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.225324550+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8DCwf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.242344344+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=WRI90 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.246271556+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oDvuP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.250338073+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=afKEx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.251680447+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3BLGQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.271826813+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=XCx1J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.278828546+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8qD0d namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.378247428+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=bwCHl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.481011189+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=wUTuW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.481096311+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bvOnx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.523942558+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:46Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"77c8cb0f-fede-4d06-a23e-f57131516158"} 2025-12-08T17:56:46.523942558+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:46Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"77c8cb0f-fede-4d06-a23e-f57131516158","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:46.580966126+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=f/1Td namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.581122020+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uJymH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.594311825+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=qhIlH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.595310280+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GT5ce namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.604713735+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=zLPfc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.605443895+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=HhCDU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.608460703+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=txBYL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.610754583+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BGSXB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.702411453+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=I/vbn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.702729182+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:46Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"service-telemetry-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"service-telemetry-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:46.794814693+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=QXKmO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.795895012+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tPw0i namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.897460131+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BwEbx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.897923303+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Eo4zS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.910952603+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4GiKZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.912334419+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=c1WzB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:46.918486799+00:00 stderr F time="2025-12-08T17:56:46Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=R5rVl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.006594007+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=7cNa2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.014401451+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gQRuu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.014820262+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3Et/n namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.021231149+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Lolh7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.023018946+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=uWeGH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.035165032+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=W5+Iq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.035194683+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=hL7f2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.042449623+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Mq6Y/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.042713040+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=GjKD+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.048209283+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dj44D namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.048325176+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=jtWMe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.055070651+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=P2CGD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.055172234+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=igojJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.060203476+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Y7y0e namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.062032624+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5+kW0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.066836258+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ygCha namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.067768013+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Z6Yvv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.072861505+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=r2aTy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.073092442+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=z2Vl8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.079527250+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=wz/pv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.081825170+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=PZW2l namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.086823309+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=908BS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.100095956+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=mAipU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.127548512+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2bw9j namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.221166093+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0liqx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.222959810+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=u48cf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.227862049+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ytY0G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.228170706+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=irLDS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.234568363+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=/YYPI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.235430495+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=M1jve namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.239033770+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pQ/OE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.246274988+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:47Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"9709455e-6050-4c9b-ba91-f9fd00941519"} 2025-12-08T17:56:47.246305609+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:47Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"9709455e-6050-4c9b-ba91-f9fd00941519","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:47.320679999+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=wqgxQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.323617726+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yTEgA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.328445872+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=I/0A5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.329221162+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=g9zCY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.335124046+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uk8Mx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.337022236+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Hm1Rt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.342350395+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nijLs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.343643868+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=zm4vx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.350298061+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3R49i namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.350385063+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=cqWoc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.355654271+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=SMqdA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.357340115+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=F+Ev7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.425252106+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MgyCE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.425295758+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Cxq3K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.431990963+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rJ0to namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.433773609+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=JyJBF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.524987608+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=H8lVT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.525242765+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5fe6x namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.531008495+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xPNp0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.531681532+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MsAwO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.536388915+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=DSDEs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.537067053+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=G/ON0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.546334284+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nzPLh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.553705487+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:47Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"27ea9e88-3fac-4ef3-981f-4a7040cc87d2"} 2025-12-08T17:56:47.553705487+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:47Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"27ea9e88-3fac-4ef3-981f-4a7040cc87d2","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:47.626224268+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YRB5l namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.631774473+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Xua2g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.632869241+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Hk6VC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.637856192+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=u9ATY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.639117655+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=B8bOJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.644811644+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8zXG0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.645131292+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=TamZZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.654740252+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qRiAY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.654931707+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=OoSNC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.660038490+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nXYvq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.663179812+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Dl1Jg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.680418151+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=hZ1M2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.687893307+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=W+iDK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.727523770+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=oFV1/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.732472560+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=SQf+b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.828126894+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2PHoS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.928117553+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=NCfRg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:47.928554404+00:00 stderr F time="2025-12-08T17:56:47Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yQIly namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.029199849+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=hdgF0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.029531448+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Ic+4P namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.129631659+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=h20Pi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.145546073+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nPs7N namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.146430937+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ZdADz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.230094489+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dch71 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.254687890+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=r/D5C namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.254687890+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6RgTk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.261568580+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CS+Te namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.263622413+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=dG9w0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.331270268+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NlI6o namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.353366104+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=jVBsr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.353600680+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ax1EI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.358241801+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8SL6G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.454397889+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/G8VO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.454397889+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3rrTJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.460535039+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Yf4Ir namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.462192292+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=b0AbI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.467012818+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MnHzZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.467870720+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bRsOK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.476902886+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YWl5I namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.479329729+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ROMg3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.518853750+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:48Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"72475fd2-806b-47a3-84f2-f5b5465249e2"} 2025-12-08T17:56:48.518926302+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:48Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"72475fd2-806b-47a3-84f2-f5b5465249e2","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:48.554615562+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=g8SO7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.554763456+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Z4ddh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.655081673+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=h5b6S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.655416862+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Muxi1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.661248554+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=C86pj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.661268104+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9TZSl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.666280415+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=F310G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.668302888+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=6lW+x namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.672986170+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=K8F8L namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.676100862+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=21iOf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.756420856+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DgMOq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.756765186+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6qXW3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.764037455+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=eC5zr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.765021230+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=obK2Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.769581969+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uajNJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.770372570+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6z5ht namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.775015501+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=5n7rK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.776084279+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=GlPkr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.781416068+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+siUG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.782262131+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=mihPb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.787831946+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xGvsd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.789007266+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fiKAB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.794543091+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Zw/VD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.796331738+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hF5S9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.802644372+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4Nfl7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.804571933+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=TH1JP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.820299642+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=AOhpP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.821747970+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tBAIx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.828265260+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PR6QM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.829284976+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=spPlN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.864749362+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Pnl9W namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.864940737+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BZyBb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.871425065+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=w8ql/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.872346730+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2uJdI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.965973052+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ZcPaS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.966333341+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=98lRv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.974444693+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Wobm2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.975853310+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MwqSA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.981690002+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xKo5S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:48.981690002+00:00 stderr F time="2025-12-08T17:56:48Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Iel08 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.073346383+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=SZ4su namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.073383434+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=37BHp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.173768611+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4kbUH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.174249735+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MVv6/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.185624141+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:49Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"a5bde49e-c045-4da2-ab01-aff5c64b8072"} 2025-12-08T17:56:49.185624141+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:49Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"a5bde49e-c045-4da2-ab01-aff5c64b8072","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:49.274809726+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+JLVn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.274982352+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Bsder namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.402087387+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Ms8qV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.408839772+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:49Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"4b94cba6-fb34-4a45-b456-d84e996ca244"} 2025-12-08T17:56:49.408839772+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:49Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"4b94cba6-fb34-4a45-b456-d84e996ca244","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:49.501399107+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=FQgzB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.501561061+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9UvUd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.575249143+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UP1jM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.575564291+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=opkJP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.581645420+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IblvN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.582577334+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Pp1xS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.677160961+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rzVKX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.677321405+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Vmdhk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.709317970+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=J22GE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.709364911+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Ob1qK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.777794156+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=U6zKQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.777841607+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=lfXgP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.878251876+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=5C7IW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.878302607+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=77B0o namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.884543710+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=oqqY0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.885200137+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2EMaA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.894625973+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=PBPHM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.895535607+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=pQ2uK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.905147347+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DH6az namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.905147347+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MMrtX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.912256473+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DHpji namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.917205432+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3FJF9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.919696167+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=AsVZW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.924050270+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=aLAR3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.978215434+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=giusW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:49.978286155+00:00 stderr F time="2025-12-08T17:56:49Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6Izo/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.078347654+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=WtSF3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.078439777+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=95zH3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.178599149+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=JPecN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.178790944+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=flRRi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.280051335+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Xvz1+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.280160218+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tTSIF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.379840428+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LNMvq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.379932351+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/KaTO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.386207234+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DU+fD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.388681269+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Wqynw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.391396930+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xWnHP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.395066485+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=shqYj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.399027589+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gKea8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.401087342+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rmsZi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.406494473+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6dnF/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.412671375+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KzvJh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.414015960+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=FUEP+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.420342244+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GA3z+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.423184308+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Rnf+F namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.426032823+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=DZp8K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.479833077+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MSD9b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.500033613+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1YHHv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.500950687+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0uPf8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.508517255+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=tITVw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.508547105+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=AnjY9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.514324586+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gRugl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.518786732+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=N/6y9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.523508075+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dDDV9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.525021695+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=boS1l namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.530913419+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7ii7v namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.533113236+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0hRQI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.540939260+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2yQcM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.540939260+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+iNvs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.544786481+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=YjXhB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.545649323+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=cAidK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.550575152+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vFRxZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.552008058+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=rthXc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.556681551+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=L7mVh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.561661031+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ulSh8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.563947391+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=6BlMU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.596814947+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7Xt2G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.597170526+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DNVDl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.602354952+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=QEzvt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.603037440+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=cpmOy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.606526721+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=sD3Yd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.607928977+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Jb1jK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.612595739+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MXRxE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.612813825+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=+NTr+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.706525989+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9xHVE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.706905799+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MqIe4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.796124716+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=mpjf5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.796213118+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IDdjp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.802162243+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bfdEZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.802270396+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YgO+R namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.807710738+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iB/FT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.808915139+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MQ+LS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.813340575+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Eh3kX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.814130125+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xDd4y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.819365312+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VIzA3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.819410593+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UuaXm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.824324562+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ZKj53 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.824575898+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=fPpfR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.828811088+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=lqMLD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.829700791+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UOOTl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.834671971+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=X+cxx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.835716249+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0MOfi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.845614216+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:50Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"f7cd5324-f784-42b1-bfcf-00f4a79059fd"} 2025-12-08T17:56:50.845614216+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:50Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"f7cd5324-f784-42b1-bfcf-00f4a79059fd","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:50.938995242+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ioM4t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:50.938995242+00:00 stderr F time="2025-12-08T17:56:50Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=jqbBH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.035343505+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=plmHb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.035511779+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=F45Ky namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.041353172+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vIH8m namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.044171755+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=HCoi0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.054858074+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xvLxe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.058886989+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=I3Dko namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.062561775+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MW18T namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.064284620+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=jAk2C namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.067926235+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=x7gzy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.136261717+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=C7Xn8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.203215474+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NuZHx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.203215474+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CKpU2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.209179479+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NhQ+E namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.211416538+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fskoI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.213866242+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=bPdBO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.216368807+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nTUDf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.223070621+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=j+8r4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.223171264+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Mo275 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.229205041+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NTzqI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.230075694+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rBvMD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.235380942+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=RDqiC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.236852001+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=tFHCt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.245568318+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=GrL7k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.255229920+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:51Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"ac26f850-da69-47d2-9a95-68d084d24c7d"} 2025-12-08T17:56:51.255273141+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:51Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"ac26f850-da69-47d2-9a95-68d084d24c7d","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:51.347469715+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=E065o namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.347556238+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gdVl9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.406805403+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ClQCQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.407019619+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YrF8y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.412763959+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xJ+oI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.414973526+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NROqN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.418825327+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HBW93 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.420052809+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UtVCH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.424801673+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ig0A5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.425015348+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=W5COP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.447030553+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=CwYxB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.447030553+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YIJIn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.448035678+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:51Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"bb4a4389-e782-4d77-b5e4-cf958851d409"} 2025-12-08T17:56:51.448493750+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:51Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"bb4a4389-e782-4d77-b5e4-cf958851d409","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:51.507438748+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Jn5ej namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.507543530+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=0OfYw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.547100353+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8xaEw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.547279017+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fK3Bj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.648446046+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ARBup namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.648553519+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GoBTD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.654614477+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/YgLU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.655663564+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=erxPw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.659754501+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=pWRW4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.661380493+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=mBUl5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.668276403+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9vihD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.668538340+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZuJIn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.674228668+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VoY96 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.675601284+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PKfTu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.680849891+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=IpusZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.683185442+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ekPyJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.685936464+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=w0sMR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.692106324+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=22Rmb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.697267349+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=M2+Ec namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.697267349+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=zr6cI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.733607127+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:51Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"amq7-interconnect-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"amq7-interconnect-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:51.748606229+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=FyXGl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.748739962+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=b9kF2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.780499060+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:51Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"amq7-interconnect-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"amq7-interconnect-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:51.856916663+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4lFgM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.858508725+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dFTMx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.916191669+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=olY/7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.916623840+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=oMy0w namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.933682406+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=FWLfr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.939717513+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=SgWsd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.947112315+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=l1VSS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:51.952095755+00:00 stderr F time="2025-12-08T17:56:51Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=tEP8m namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.053735066+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=GdFbO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.053833169+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Hji1B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.069239771+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=aT1bF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.069324793+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=/Wm5C namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.075673609+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gpbOF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.079694504+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZNKVl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.083461102+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=yTjpu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.086030569+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6H6iI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.153804796+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=p0Wid namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.153855098+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=7OjKw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.253923848+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=e7kj6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.253983520+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qQ886 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.268748984+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=PzP3k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.269943396+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2I4ui namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.274666359+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=t0xEQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.275552582+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4Ukpy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.353784863+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5IVWQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.353958817+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vQ4+c namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.368984129+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Uxjpm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.370034656+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zo7c0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.380063838+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:52Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"24b94708-efb5-4086-8e41-b74e07a750a8"} 2025-12-08T17:56:52.380063838+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:52Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"24b94708-efb5-4086-8e41-b74e07a750a8","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:52.454065508+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3CNHW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.454146880+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pt5+J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.472090048+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=5TO+6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.472271523+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ick7M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.477944651+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=LpXmg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.478316891+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=l6tgo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.554667432+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VjjNZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.571197413+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=jW7za namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.571442779+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=E40mU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.576908312+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3+D5g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.584561392+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:52Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"917c5fc6-a9ae-430b-8fe3-087efd775b8a"} 2025-12-08T17:56:52.584585472+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:52Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"917c5fc6-a9ae-430b-8fe3-087efd775b8a","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:52.655256305+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=zCObJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.673063830+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dhPPT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.673158973+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=t5SzU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.679598651+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=OSpXG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.679915059+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=YOoUj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.757060420+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=IVqUw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.856515265+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=McAWX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.856515265+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IHluY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.874385990+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iMLO4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.875487060+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=x6PhN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.882984555+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=pAXwz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.883897229+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=mUeeD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.888549270+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1CPrG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.892629947+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZoOxh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.895336127+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=81kHB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.956954844+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4KWRC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.974481911+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=x257N namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.975151669+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Wc73A namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.984134054+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=LKNQC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.984134054+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qUTFt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.990314535+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ML4Io namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.991676280+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=PaKNS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.996697491+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1UOJh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:52.998032846+00:00 stderr F time="2025-12-08T17:56:52Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8hJH4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.003560559+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=C3KoM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.004712030+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=mgmnL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.009891325+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=jE5z9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.057311702+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BN9YH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.074571282+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zzpRI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.074664355+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=/JGyu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.079593483+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=mzRiZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.081990025+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=pUSWY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.084784368+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=evWOi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.087368895+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=l3/Ap namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.091647987+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=9qy90 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.092206742+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oVGwJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.096254358+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=S7rHQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.158130522+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=6wzjM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.175560736+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=5hI/a namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.175812193+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qa/mz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.181857150+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=L/aO1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.183759780+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=17mRl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.258802647+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=VjKWu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.277745961+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Pbjrh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.278201324+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Gr9r3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.286296174+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0+gO7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.286527880+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=yiKJ3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.359801912+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/xRVi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.380624394+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=lmMWe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.381380744+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RhIim namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.385467451+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=K72Sr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.388838699+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=o4bqv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.391736674+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1VSjV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.393971213+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=jSwYS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.397829533+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DMPBP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.399084906+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=O3Ppp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.404439166+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=UPNmR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.418100491+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:53Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"6781cb0f-423d-47b4-8b9b-55856e002b04"} 2025-12-08T17:56:53.418149143+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:53Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"6781cb0f-423d-47b4-8b9b-55856e002b04","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:53.460311763+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tuRfm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.560863765+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=4Gpuo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.560863765+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CE9d1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.580959060+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=JvTTx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.580959060+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7bYR3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.586818653+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=LZcv0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.587827909+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4BR8y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.592432319+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=524p7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.592854740+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=OZzQ5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.681603495+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=WvRjf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.681764379+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KpF73 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.781602173+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Gy4/3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.781637663+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MAiBe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.882594637+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3XcII namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.882699059+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cOhhQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.889116807+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1j9Os namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.891383776+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=dtPhu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.894897038+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=h1MV+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.896518420+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ho6xs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.902985219+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=W4NJh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.904142789+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=srf7b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.911190442+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=mwaTO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.911383868+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=fYmaC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.919912971+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3EAuz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.922327063+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=sPkiQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.928925615+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=KRUl2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.929626734+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3yByQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.985035649+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=+n/lQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.985035649+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=lJpIC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.991619530+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=av/31 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:53.998071498+00:00 stderr F time="2025-12-08T17:56:53Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0S4S8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.003600103+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dv4wx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.005767279+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6S7bm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.084602655+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZjbxQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.084934654+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tGYOi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.184024749+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uTEiG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.196321029+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=jIkDW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.198132327+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=S0oy3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.204325768+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=R4rjw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.205902330+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9Y2sz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.298907185+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=z4RsW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.301767300+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gR/sU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.303972058+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=TnQO1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.308573087+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=CXh2v namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.309471081+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6XH5K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.315771135+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pRfDU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.318072275+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=jUtnG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.322034208+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8oi73 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.325053567+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=txJcL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.397905867+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=wiUly namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.398332958+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7plxB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.498561822+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=nrbN2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.505731190+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VEs/k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.599412013+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6t03d namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.605895152+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=KSnEx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.606430636+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=pnpP0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.706331181+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hL654 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.706501516+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8hHKJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.781540333+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:54Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"service-telemetry-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"service-telemetry-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:54.811723920+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4h0vZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.811723920+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vOErp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.902971361+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=lQm4M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.903241778+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0mOOW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.944188725+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=l26kv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:54.944487283+00:00 stderr F time="2025-12-08T17:56:54Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=5/1Al namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.024239673+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3de5W namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.024298645+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bKASD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.123919643+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=VpF3r namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.132370113+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=oCz10 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.140744972+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=x8uH2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.142182180+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=fD8sa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.148130145+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BQB0P namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.152466438+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=mlbrQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.154407738+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NpTFg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.160456936+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=y2IGq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.163069334+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=wwKSb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.166175395+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=YDsLT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.172416328+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=XC55O namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.175458478+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KRebu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.178401314+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=t3WWE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.183126908+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dk415 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.187817590+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rSVIy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.191867866+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1OQyO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.194834073+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uM54i namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.201581179+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PVDpG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.201581179+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bSGqa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.208222391+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8kRxD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.208319575+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=prjzG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.216351224+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BjQSD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.216452816+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=iacJJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.224294811+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2M8C1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.224939338+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=O4ef6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.325554672+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PDloQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.325639434+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=NkFD3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.426263229+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=QnLIE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.426369161+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=T9qit namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.441699562+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gzIYk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.441810535+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=J6ZtY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.526365990+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=D9gjt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.638758441+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2cVvS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.639322666+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5Ag9P namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.651138234+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=EIDks namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.651346190+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=HB55u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.659413660+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=oCULI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.659482702+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IPMiZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.665769676+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=JTN/q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.666056463+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DB5nw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.737946149+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rcadV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.738755649+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1+Q3D namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.754580042+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1Duif namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.754687675+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=hmaSD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.760867586+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=fnS6J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.762983532+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6NrXt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.768910096+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=s3xW3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.769283016+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=f4ftT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.854940439+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ukKZj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.855834013+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=RR9IX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.861430849+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=QIHqn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.955260336+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2ImZ1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.955295087+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=fbeUR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.961678253+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=erJ+g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:55.963375198+00:00 stderr F time="2025-12-08T17:56:55Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0/R/f namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.055128291+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=fd00n namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.056317012+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yNuBS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.163670542+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IfR7L namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.163793655+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gmNm0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.264043749+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=qluvZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.264043749+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=13zcP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.270538390+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1TJSb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.272573272+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=waohM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.276097014+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ril1S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.281084344+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LVEiw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.284790861+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Y6zZK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.366018810+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pg/wy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.464508048+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=OZV82 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.472969939+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=sosQp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.473187335+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2+1va namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.478892773+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=kd/93 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.480668270+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3tbuG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.484522440+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7Tn+b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.486268226+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hTPID namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.491575195+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=RKEs8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.492248922+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MEeGw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.497480819+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=E54Qb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.498950876+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DEaTt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.502693395+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=al5Vb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.504326737+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=B+Ytr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.576317445+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NDipN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.576469559+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6ATif namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.675128162+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=VBsvs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.775859299+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=U3feO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.775859299+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CTJha namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.876628677+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7CfLF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.877173711+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=YAemR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.895057738+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:56Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"smart-gateway-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"smart-gateway-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:56.914176396+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:56Z","logger":"controllers.operator","msg":"Could not update Operator status","request":{"name":"smart-gateway-operator.service-telemetry"},"error":"Operation cannot be fulfilled on operators.operators.coreos.com \"smart-gateway-operator.service-telemetry\": the object has been modified; please apply your changes to the latest version and try again","stacktrace":"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators.(*OperatorReconciler).Reconcile\n\t/build/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/operator_controller.go:157\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:119\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:340\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:56.977584400+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pE+0W namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:56.977695543+00:00 stderr F time="2025-12-08T17:56:56Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ew+NM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.079027766+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fYn5r namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.079115668+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Nzt4W namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.178779918+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rrIlx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.179034195+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6/jjQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.282520094+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cpNwi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.282747600+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=wKYUN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.380165601+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0mWi4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.380208702+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NVLH+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.390916291+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9aXAx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.390991173+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=JLRWu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.396300802+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2oBXS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.396813005+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0vTeW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.504975835+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=P6GH1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.504975835+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=l9X28 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.593743081+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fXglu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.594149031+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2vllj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.600762835+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dPEsD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.602892079+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=trgiK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.607979372+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=zNiJw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.610308743+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bCvjX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.615668863+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yFFi/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.616491474+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=A3f+1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.621739951+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bNx4u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.627549203+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=da3gY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.628771725+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=RvYb3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.635461579+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=q7VpL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.635531031+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=m87wd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.642429041+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=OYUAc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.644117005+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zP/Sa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.648597772+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=JJv9i namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.650597644+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=91Gb0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.655053510+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=1XNIa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.656166379+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bmySR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.660547814+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=er9E0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.662748591+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=r0uM6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.665843712+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IFWIK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.667637498+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=t/pzP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.672052574+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=xXZbi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.673651336+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vDLl+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.677592008+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=XPwcW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.681716615+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ZbMHX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.687365782+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ROOWK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.692332592+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=c4OBt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.793622604+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VnpYc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.793622604+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IB3cj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.800057993+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HAKmk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.803122032+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IEnRS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.894185687+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=fHW3a namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.894587917+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=lEONE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.903453119+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4mkNZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.905283446+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0Bz3g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.919659671+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=PbzfX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.920284848+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UKhZM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.994571825+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=nPWYW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:57.995199021+00:00 stderr F time="2025-12-08T17:56:57Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/OPuc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.002681347+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Mn2hU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.003026096+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vtrwO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.094943363+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Dzkoz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.096659409+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VYSFB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.101656518+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ZPNah namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.104291987+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=W128o namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.197302913+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Cogw9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.200917058+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oFWjK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.202455607+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ki/yp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.297546187+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=OjPl6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.302403014+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=1aaUK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.304303244+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=NyHUs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.398311556+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vtx1M namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.402511006+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fXI6m namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.405471103+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yrBAe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.409292942+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8UMeE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.412628879+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=a7kKF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.413967614+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=m+xDb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.418011389+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LZooG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.420722130+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5i2ZI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.422670391+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=D0c7Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.426234644+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=zIyNQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.429777926+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=1gFn6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.431747438+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=wcjRY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.435277350+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=J8aGq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.503797207+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=twSkF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.503836448+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CGnjX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.510043420+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=kwSqs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.511530339+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/tsLY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.515898893+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xGIbV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.518121501+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8MJod namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.522586058+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nAxF/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.523031959+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1ZGKV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.527702410+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LlJR0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.529578909+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BfNsl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.533019689+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=IKH8w namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.535173435+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=EiI0B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.540233548+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tzlAQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.540259148+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Fxexd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.545443224+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=eOcTL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.546770878+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=an0Vo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.551906232+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Np9fd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.552008984+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Nl/Fq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.560016563+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=av9eE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.563580736+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=s3Vh6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.566023490+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=l/OAZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.570659151+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=CmsmN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.572770017+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VtOvL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.577175041+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vgOeZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.579448070+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NhzT3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.582165291+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=l4WB/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.584497652+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=toA/L namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.588956008+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=g/rDa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.589539833+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xUWcW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.594983775+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=tmJyz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.596403353+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=EzHkF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.601407673+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=lnvYT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.604400211+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3UMHM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.609711410+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zbV9J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.610035198+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MfKaS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.615748267+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=q1ODN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.622110673+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=mUY7P namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.626221631+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vQmoH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.627815102+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=cRx8G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.643335997+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:58Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"c9e1e72e-78dc-4a1e-b779-d893d8ce584f"} 2025-12-08T17:56:58.643335997+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:58Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"c9e1e72e-78dc-4a1e-b779-d893d8ce584f","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:58.719196275+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BoofK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.726100715+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=XJc/b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.727138883+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=paNLP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.733107658+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3NHYO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.737290117+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=XBKgc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.819758838+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=h0hPT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.826066763+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=5ILeo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.826370960+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4/u4h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.830433986+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=4Xs9D namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.920505026+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=41jcw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.927186670+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=qnJL1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.927493398+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IGakj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.932425867+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9Zu/J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.934262525+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=SxSqJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.939846271+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=pPzzM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.941383680+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=t+A4E namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.947563071+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=SH5GJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.948027343+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IG+q1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.959854672+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=otE+8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.961605307+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=eKGoN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.967334057+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=AiPiW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.968472787+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=d5wuY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.975225373+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=5MTbX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.975248503+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fYnsz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.982081342+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=e27pq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.983544710+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Im4OL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.989238718+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=rdxHC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.990728258+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=u20Tm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.994121856+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=kh+AZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:58.996670502+00:00 stderr F time="2025-12-08T17:56:58Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=N4Vy9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.001630432+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tUc5g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.001752645+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3/Hrs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.006639472+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ay4kB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.006770835+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Vak8P namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.020968045+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IzsYh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.029030606+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:59Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"c2384bff-0e52-437e-96b9-ee2cab75f1da"} 2025-12-08T17:56:59.029030606+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:59Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"c2384bff-0e52-437e-96b9-ee2cab75f1da","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:59.039458838+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:59Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"983ceca3-8350-4978-9be8-9cf4515ea989"} 2025-12-08T17:56:59.039458838+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:59Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"983ceca3-8350-4978-9be8-9cf4515ea989","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:59.052253952+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:59Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"6d440135-a93f-4c64-8985-a0bb61205a01"} 2025-12-08T17:56:59.052253952+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:59Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"6d440135-a93f-4c64-8985-a0bb61205a01","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:59.121774515+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=SmT2N namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.121865318+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=HVACJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.129084716+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=PH86+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.130070221+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=s5/oI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.134632850+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1bZcA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.136421867+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=W2rKr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.139850177+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=/4IGS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.142137997+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=QHCVN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.147327212+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=P6vmz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.148165744+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UNVuZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.231007824+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=bBW1+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.231305242+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=QeBfc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.237729000+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=wZpjt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.237976756+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oZy1A namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.243167031+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=0DBuk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.244023534+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=F5HTY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.249404224+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ryCf7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.250923344+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=zYbO3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.256689404+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=EkFIf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.257482515+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DC1kt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.262898216+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=56pAM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.262898216+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=iToj/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.267215838+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dgRkL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.271252914+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3aa0b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.285156097+00:00 stderr F {"level":"info","ts":"2025-12-08T17:56:59Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"010b1959-d847-482e-9cf1-59bc875256b8"} 2025-12-08T17:56:59.285156097+00:00 stderr F {"level":"error","ts":"2025-12-08T17:56:59Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"010b1959-d847-482e-9cf1-59bc875256b8","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:56:59.331804093+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ZSeRK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.331912896+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=QIXe2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.339176995+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=EXBV6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.339586896+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=45pVW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.344144935+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Hg2sj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.344444033+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Ww1YX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.352913624+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=flcT7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.352913624+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3NKa2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.364317631+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=sD3SP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.365787590+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=xlTJ7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.371139489+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=I0C1v namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.372408102+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IuqH8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.378235454+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9IEvA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.378312666+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BZby1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.383040670+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NLNsz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.383543073+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yc0ks namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.431408961+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=CZagi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.431498813+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NGKiY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.442012328+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=fI/XT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.443852125+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DUqU/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.532006465+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BDdrl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.532128378+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Es8tU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.538646418+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ddyNE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.538715540+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qgxWz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.544099010+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=iTSgu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.545299112+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=XsSrf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.549780598+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=l0xte namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.550002494+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VFjc4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.555715223+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GyzCc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.558196328+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Rcbqn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.561632757+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pCNJG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.564125832+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=so2zF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.567718747+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zQAne namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.570207862+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tsiSb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.572724486+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=q0r9/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.576981928+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=y18pq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.577526302+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=k37ey namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.582678906+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=rYFkC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.584969866+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nhG1B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.633046550+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yvJ/g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.633046550+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8dpa1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.639506318+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rH/iF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.641626724+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=iqlcC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.646036279+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=pkhBe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.648599746+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9WJ8P namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.653076122+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=85PRa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.654818778+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3TccC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.660686011+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tnN/h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.735603185+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fUm9w namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.741630873+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/d0iZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.742254969+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=03q95 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.835576462+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=t/OPg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.835620213+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2mAeX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.843354375+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ZDgtn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.843439918+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4LnjX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.849272440+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nm06G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.850767539+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=were9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.854299571+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xw+sX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.856230722+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1/fUZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.861412717+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=h+OTJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.861412717+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=FJzSA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.869304752+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=W0cTr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.869589879+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=IHFoF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.877922477+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=nKxVb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.879243232+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=z7sr3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.936455463+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=sqmk7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.945250753+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=wUunr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.947136582+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=YlTLl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:56:59.956111846+00:00 stderr F time="2025-12-08T17:56:59Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Olel0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.218932641+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+wnFY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.228153571+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=UvD1H namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.237378603+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=zR1ZP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.243251566+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=euc9r namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.258524804+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=coRVk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.266485211+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Jr3Zo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.274843549+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=NpbeF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.283545446+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nLkPP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.290260552+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=5sipW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.296171266+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=giV/L namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.304921574+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=szSdb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.309545994+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=4XxZy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.312020369+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Dl9Rh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.317305937+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IAFyw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.324761832+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Bmy2D namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.421569406+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=IxqsR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.427800119+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=EIlyS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.428819915+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oZgjO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.433494568+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=QAM62 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.435326945+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=pYgKV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.439678569+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vugej namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.529582184+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3Tapc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.529617514+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=z71DV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.630475925+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ouHFb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.630815914+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NuU9Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.637621222+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=mBIMP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.637645433+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Zs9FB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.644549042+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=H6dBq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.644688326+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uUATL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.732647930+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/rLAH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.733168494+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=BTErz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.739276463+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Xm3us namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.741190893+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=qb9jQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.746322496+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bMV11 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.748394601+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=YETJN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.759921681+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vo0+F namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.833923542+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=TlH8L namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.934803843+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7WECR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.934869525+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=HnC1e namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.943705175+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=z1xgP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.946380235+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=oYevH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.950531532+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=lt14K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.953566562+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=PHU37 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.960298908+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=S2W33 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.960675737+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VNqhD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.966524220+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1viL+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.966524220+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7Ggwk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.972555387+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+VN2R namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.974419226+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8dHE/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.978980104+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=llbhS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.980321230+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=iqul8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.990473455+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+8G// namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.990952847+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/tPlt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:00.999694605+00:00 stderr F time="2025-12-08T17:57:00Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hOd/U namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.000152197+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=D7wZE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.007475728+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ll6aB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.008668039+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=viHKS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.013295860+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HwA2c namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.016031051+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=8A30y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.022682585+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NqlVi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.024393810+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=zRY9c namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.028840875+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iC/nT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.033742223+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nnKKm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.037637644+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ph2EG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.052072082+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:01Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"d64429a0-2f25-4bf2-a35e-5cff13ff7ab1"} 2025-12-08T17:57:01.052072082+00:00 stderr F {"level":"error","ts":"2025-12-08T17:57:01Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"d64429a0-2f25-4bf2-a35e-5cff13ff7ab1","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:57:01.139316337+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ZsXqm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.144470332+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=y5qQL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.148031224+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Zn/FS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.157627634+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=FswY4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.171510156+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:01Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"3187033c-3805-43f0-a83e-dfd3566ce089"} 2025-12-08T17:57:01.171537787+00:00 stderr F {"level":"error","ts":"2025-12-08T17:57:01Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"3187033c-3805-43f0-a83e-dfd3566ce089","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:57:01.245866626+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/FAHf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.245866626+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ZnWGU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.252179760+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=V0ejt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.255000734+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=94L4b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.258484575+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=X3OLa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.260865327+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NV5i1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.265460877+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8TiPC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.266275818+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=G9+pZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.271597557+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nW1tB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.274348379+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=aky1x namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.276811023+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=e6UVd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.279103733+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dHtDk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.283920368+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=qxkyb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.284855843+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=XZsYl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.290379807+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vDV4V namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.293608531+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=+ILnC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.296388794+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=jOqf9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.298514649+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Fn1sU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.303904739+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8/e0b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.305962033+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=FBSBb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.346148362+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9dJ/1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.346391868+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rK8H+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.447312350+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=l897/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.447414263+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xO90Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.454814345+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=TVcMI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.455008030+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=GIyh4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.459984370+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1V+DI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.460180285+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=I3WtM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.465933145+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yQbEq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.466533061+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=U4GJs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.471260304+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=f0iqw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.472081415+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=FbNr4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.477898678+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=g0oRQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.478752220+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KoVvU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.482712743+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8LpFb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.484127540+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=YoqmX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.490255980+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vcv+c namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.490383003+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gub6q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.500859917+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:01Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"eca2dfe5-f52a-451b-bbc1-66a53ec65bc1"} 2025-12-08T17:57:01.500859917+00:00 stderr F {"level":"error","ts":"2025-12-08T17:57:01Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"eca2dfe5-f52a-451b-bbc1-66a53ec65bc1","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:57:01.547899574+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=C4IBg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.548044537+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=viPMD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.560563304+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nRMx4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.562859654+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gwNce namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.567662559+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=b62Bk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.570300278+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=M6E45 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.575491913+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KKN45 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.576304985+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=N5tgn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.581396367+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Tlc/7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.583021999+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=XtC1J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.587440315+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CBIYV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.648744304+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=VCSpf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.748360232+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=xZOmL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.749233714+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=aeShH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.849790057+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=2m2XE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.849934510+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=JFVB0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:01.860982849+00:00 stderr F time="2025-12-08T17:57:01Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vV8LT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.243531567+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=k8mDm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.244534973+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Z+tXK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.249087392+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=GKwP0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.249709988+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=FMBT/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.255309044+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Q5asq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.255887069+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=tAstq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.260694955+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=eSgbu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.261717741+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=bR/Gj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.333268148+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=5sLjX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.333314189+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=mLGOQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.343673650+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=pft8z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.343786862+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=8X2ja namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.349235395+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=8T3cZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.349627605+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=yhZ3t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.433740168+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=y39gQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.433829601+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=YPxBU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.444247352+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=I+/MC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.444432977+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7M+23 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.449945711+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vOn0V namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.452898068+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=nq14u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.454869069+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MD7UZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.458333250+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2rO/3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.462188220+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yjfal namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.466356668+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=bkdER namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.467579371+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=COjAE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.473105075+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dvEJj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.473217648+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=aXobX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.479051710+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=h+mwv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.479289276+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CuZbM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.547485015+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=I5GyE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.548242015+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=VHjpP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.648529520+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=isIk0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.650437570+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yBwNI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.657577497+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7zrI8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.659729172+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=CSopc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.665595806+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=MdWDt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.666184861+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uTzL2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.673844640+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vqyM6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.673924862+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Ksf13 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.680199096+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=hEVaW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.681441508+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=tlMJp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.748540319+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=aywRR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.750117290+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=9jYTa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.755524981+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rIl2B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.756703932+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RUcPN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.763020096+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=/Ts9g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.763526699+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=lrCgn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.768981971+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=sGe9J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.770658726+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CVHy2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.776201270+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=f2wdt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.777967957+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Glyxd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.783171431+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ZHNlj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.851490584+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ADE6E namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.856142976+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=XJkZl namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.857545922+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=UdzuO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.951912043+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=eXbBT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.952344255+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=a+CBR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.961535784+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=51lIG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.963461705+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=1JHeX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.969428180+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=buqvh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.971220537+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=iu/tc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.977181553+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+PwcH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.977469750+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yllIF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.983256791+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=x90gJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:02.983682112+00:00 stderr F time="2025-12-08T17:57:02Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=90+3Y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.051604013+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=T0Z4u namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.051654264+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=afKwF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.062290962+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=fkmuP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.063357129+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zkIsY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.069049159+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=bod2O namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.069426278+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Zj3se namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.075261080+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HSZYS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.076775099+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3Ktvu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.152947416+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=YD1NB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.152947416+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Hs/fj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.161098089+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=LhaBv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.161770657+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=kcKE9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.167506306+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=KBzSO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.167506306+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=B2Ipz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.252629966+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=w4DGc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.355239283+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xWpyV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.355902740+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=l5HuG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.367916883+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=XBwFR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.368340314+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rvM2H namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.374621289+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=lAQ1b namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.375898541+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MZZGH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.379906695+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=n9nfe namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.381028505+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=1FOLF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.387319029+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Nb/HY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.387894644+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=mUhmB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.456131634+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oIEb7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.460116618+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=VZnzn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.466168646+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=moHJv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.466370621+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=OR0e6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.473334733+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=c+d6d namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.476833854+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=s1Dcu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.556239485+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=HG2TI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.556239485+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vpuTn namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.567001506+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=n3lO8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.567001506+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6WRz9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.573172866+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Xsx3C namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.575344273+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=qr7Jj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.581556405+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=GyPkN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.581587486+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=ssLcb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.588758963+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=evAY5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.589472782+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9LG/5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.594507153+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=UGdlG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.596774313+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=LF0S+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.600698325+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iOrsf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.603288772+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=qqlo+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.607529163+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=zgFl6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.608310173+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UIAk5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.612865762+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uTZfy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.620304656+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:03Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"4b77d515-aad9-40c6-87f9-83ab846f4498"} 2025-12-08T17:57:03.620336017+00:00 stderr F {"level":"error","ts":"2025-12-08T17:57:03Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"4b77d515-aad9-40c6-87f9-83ab846f4498","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:57:03.656319745+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=qon0z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.667546048+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=RavWq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.667837726+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uLNHJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.673558145+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=g/WjX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.675199958+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=soY8d namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.679115800+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=eCMIG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.681152573+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=QOw+8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.686255216+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Mg3rR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.686429661+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=C2TL9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.692154340+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ow0mW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.694834370+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3yGdy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.697642513+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=KGU6t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.706503944+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4rZJD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.757543895+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=FZdLP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.768795979+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=c6mxO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.769417696+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=07Eqi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.858095748+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=jS0Et namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.868609942+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rLcuw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.868842168+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=R/fqx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.873334566+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=w8nri namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.875150043+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=in7nw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.881922070+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=3bcJG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.883922621+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ZvprG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.889491347+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0ndev namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.889797706+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=EFQ6J namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.894361594+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=etDpq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.896956302+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=wUW7Q namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.900229487+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+8fcN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.902744393+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Aj+l0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.906836180+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=6M8AN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.908844191+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=KCDwq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.911482660+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=xFIQ0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.915655280+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=I4n8R namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.917154868+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=kyyur namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.922074047+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ctuRY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.926292727+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0RozW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.927618291+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gzy7B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.958966639+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=OdV/h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.968709113+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=WX4PF namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.970196222+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bq+p7 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.973543229+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WPUNU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.976002763+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=F+uvk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.981535857+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=JKfLA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.982947875+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Jiszk namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.988831948+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gxFLc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.991375135+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/vIEj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:03.995008149+00:00 stderr F time="2025-12-08T17:57:03Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=x7Njs namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.060116157+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=GbrGg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.069180444+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=6kL7y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.069212665+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=vX9Bm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.076813702+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=U01t6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.077125691+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=gRRNG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.169917040+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4EFWL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.170085326+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=mv0E4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.270849934+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=/A3MZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.270935886+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=rPKj1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.278548384+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=eUxkv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.278548384+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=aXKfQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.283122513+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9kqDJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.286829980+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=FAcsL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.293045092+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=//3NO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.294628633+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2EK0Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.300346893+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=TULPB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.301513323+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=FwPbV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.305400255+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Riz63 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.306822491+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=RMTOK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.312052118+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=DCS70 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.312124269+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=9UPAA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.371972871+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=WoOBH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.371972871+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2Xtzq namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.472792471+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=eFl0e namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.473164470+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=TNU13 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.481094247+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=NHj6m namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.485515172+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=p9b4N namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.492192727+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=EBi37 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.497455953+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Yyfgo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.572865810+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=idkwD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.572917171+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=d0KfC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.673938766+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=0Sgwh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.774381396+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Yt+VD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.774478488+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=g+XkP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.780852335+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qUHBW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.781394629+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1iuo0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.788271468+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3vypZ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.790014354+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=u3EVI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.795906508+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2uGAw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.796013390+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cfqm2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.883931303+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=HkK+Z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.884186970+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=00EAS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.894554200+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7qVXR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.897524018+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=f1g+K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.902964790+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uBmKy namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.909094439+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=u0uqd namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.913396082+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=LqZYt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.920612510+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=UFzR1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.923576058+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=IBy6h namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.930499058+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=T+4mO namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.933780014+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=J4CEf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.936257928+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=nPhEY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.946097515+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=mvq/9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.947010919+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=wr9gK namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.952924563+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2PQZz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.956560897+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=S+nIv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.958569830+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gLiRa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.964498695+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=CLu23 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.969092695+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=0U1Zf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.983816669+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=TMHwm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:04.984060145+00:00 stderr F time="2025-12-08T17:57:04Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=gdwDC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.002360773+00:00 stderr F {"level":"info","ts":"2025-12-08T17:57:05Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"be0456c0-2617-4320-a668-714008efd67a"} 2025-12-08T17:57:05.002509917+00:00 stderr F {"level":"error","ts":"2025-12-08T17:57:05Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"be0456c0-2617-4320-a668-714008efd67a","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} 2025-12-08T17:57:05.084130165+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4jqQL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.084130165+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=IlYQa namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.089843654+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=LP+9k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.091297562+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=3KFn0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.096745464+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yZPO9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.100461791+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=L4El9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.106865548+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=884bv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.108448280+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=dCRyS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.114030505+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=lC4WN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.118903152+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=MdaGW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.120277918+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=vkoOi namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.125150224+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=StaOj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.128154874+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Hw+yX namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.136435220+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=NjKIh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.140764122+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=oNABS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.148778671+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Qw97U namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.150565548+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=G/t/k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.185530170+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=Az7he namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.193305863+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=NZcK3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.196592948+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=84hve namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.201300671+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=BIPzH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.214682930+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=mULE1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.215267726+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=AuQW/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.228486150+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+XLXb namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.231578020+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=J0FQA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.240946185+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=AWAQM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.241577092+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=7Rw/4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.248993785+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gLhOT namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.248993785+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=sYVT+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.255503135+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=9Jaq9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.258933904+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=bx6Hu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.261236614+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4K8iV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.266382329+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=eTwPx namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.270150557+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ADVbG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.286424371+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=m8d1z namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.295896158+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=D5j7k namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.299454201+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=LYDIH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.302483260+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Yo140 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.304096522+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=3F0eV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.309370949+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=fe1Rt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.309420591+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=4Mbmw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.314449912+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=ULvbh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.316175947+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=9Ze8r namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.319274408+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uEXDR namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.320840848+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=80Whv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.326473055+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6Z3/B namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.326473055+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=x782A namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.332020920+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=BW2up namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.332300367+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WmcZf namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.401224735+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=lJlpC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.402397296+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=5n1Lv namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.407159950+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=WkNI0 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.409805749+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=ASZdE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.501526612+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iJuC2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.501526612+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=uEkj+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.506714677+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=iqpV2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.509164941+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=24n41 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.512864097+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=VjJDL namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.513461643+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=do4J9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.602597598+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=yZ0KG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.602699820+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=wa7/t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.608675656+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=OR+hm namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.611258343+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=sTPP8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.613946694+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=p9lso namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.616176962+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nq9WG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.711571740+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=rpHvN namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.711618561+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=lj+kj namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.812187784+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=51RR8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.812230556+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=pWf2L namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.818540650+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=qSgxM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.819017582+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=yrtNI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.825186903+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=z0Bre namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.826466027+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KL86F namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.835126663+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7FYx4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.843434989+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=dV1m3 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.845818202+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Pruqp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.846908120+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=xzdXY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.912498461+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=clXZg namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.912585753+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=DjUrw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.920697504+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Jc6cE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.920784616+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=s2pHQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.925730785+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=tZo1/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.927083791+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=sjkQ+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.932297837+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=fZtFu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:05.935526041+00:00 stderr F time="2025-12-08T17:57:05Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=uf4j5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.013423733+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=SLTf1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.013423733+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=wXA+D namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.020114367+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=eN66a namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.020493377+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Xc1Ni namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.025718113+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=35Z3t namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.026692559+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=4ICIc namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.030679143+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=atbUJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.031354340+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=uc6Oz namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.035500798+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=XS8nU namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.036072814+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=7S7Ad namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.042451630+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Z6DKB namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.043208019+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=2DOzG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.047789869+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=gOBFr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.049005551+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6zwXC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.113535834+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=t/PF1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.114345295+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=cYclG namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.120701311+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=a/HDh namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.121576363+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=WTyN1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.214024875+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=Kz+0K namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.214143588+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nit2H namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.314043394+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=KnayA namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.315212814+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=jf3Ri namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.323451859+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=kqSzp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.323969692+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=RiFUV namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.327499445+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=7uO5/ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.424414982+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=PiNTW namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.525087378+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=77DWw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.526416253+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=6J4ve namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.532427690+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=lQ9R6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.534802712+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=IJ+Kr namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.540550492+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=EUnP+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.541488826+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=vVK9r namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.547992815+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Pcf0S namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.550697576+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=CMLVD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.557415981+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=+WhO1 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.561023515+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Uj1eY namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.568119121+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=FAc2e namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.572917035+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=H8got namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.575182794+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=+ENt4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.628580117+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=MS1DD namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.628653559+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=TmP7G namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.727078466+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cu6p2 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.727157298+00:00 stderr F I1208 17:57:06.727101 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"service-telemetry-operator.v1.5.1765147436", UID:"768e4f34-61bc-41d7-a3c5-f46daeb2b4b8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43899", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T17:57:06.727192289+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=service-telemetry-operator.v1.5.1765147436 id=nEeVC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.827461934+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=amq7-interconnect-operator.v1.10.20 id=grDDQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:06.827580987+00:00 stderr F I1208 17:57:06.827540 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"amq7-interconnect-operator.v1.10.20", UID:"e07dae70-f82c-4987-ac58-3cbd781597ee", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43827", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T17:57:06.928572252+00:00 stderr F time="2025-12-08T17:57:06Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1fyGu namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.044267250+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1l7RP namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.143686322+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=XF36o namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.148859837+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=OaDgI namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.154069284+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1BDD6 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.244969774+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=FMdJC namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.251888535+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cMSMo namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.257451420+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=fyLEp namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.262817190+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=eiWQM namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.269953966+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=1pBWH namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.345202488+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=hzWq9 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.350580559+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=qKjTw namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.355163259+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=T8963 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.360235410+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Ds7+Y namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.364930613+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=FTnBQ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.369719808+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=Lv7+5 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.375052987+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=V1QDS namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.379367319+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=cAVi+ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.453021030+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=JHD7g namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.459521570+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=alTJ8 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.556080638+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=2rZTJ namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.569405156+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=pmO+a namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.582766175+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=X8Ozt namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.654460004+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=U71BE namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.664677841+00:00 stderr F time="2025-12-08T17:57:07Z" level=info msg="install strategy successful" csv=smart-gateway-operator.v5.0.1765147433 id=b71R4 namespace=service-telemetry phase=Installing strategy=deployment 2025-12-08T17:57:07.664837435+00:00 stderr F I1208 17:57:07.664803 1 event.go:377] Event(v1.ObjectReference{Kind:"ClusterServiceVersion", Namespace:"service-telemetry", Name:"smart-gateway-operator.v5.0.1765147433", UID:"07dc000f-105f-4a6b-a43d-3e7203e60ae8", APIVersion:"operators.coreos.com/v1alpha1", ResourceVersion:"43953", FieldPath:""}): type: 'Normal' reason: 'InstallSucceeded' install strategy completed with no errors 2025-12-08T18:02:04.151596953+00:00 stderr F {"level":"info","ts":"2025-12-08T18:02:04Z","msg":"Warning: Reconciler returned both a non-zero result and a non-nil error. The result will always be ignored if the error is non-nil and the non-nil error causes requeuing with exponential backoff. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"9ca9ca41-6ab7-41ef-802c-ad27a76306b0"} 2025-12-08T18:02:04.151651214+00:00 stderr F {"level":"error","ts":"2025-12-08T18:02:04Z","msg":"Reconciler error","controller":"clusteroperator","controllerGroup":"config.openshift.io","controllerKind":"ClusterOperator","ClusterOperator":{"name":"operator-lifecycle-manager"},"namespace":"","name":"operator-lifecycle-manager","reconcileID":"9ca9ca41-6ab7-41ef-802c-ad27a76306b0","error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again","errorCauses":[{"error":"Operation cannot be fulfilled on clusteroperators.config.openshift.io \"operator-lifecycle-manager\": the object has been modified; please apply your changes to the latest version and try again"}],"stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:353\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:300\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func2.1\n\t/build/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:202"} ././@LongLink0000644000000000000000000000023400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611513033044 5ustar zuulzuul././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000223257515115611513033066 0ustar zuulzuul2025-12-08T17:46:03.784155220+00:00 stdout F flock: getting lock took 0.000009 seconds 2025-12-08T17:46:03.784552532+00:00 stdout F Copying system trust bundle ... 2025-12-08T17:46:03.801868263+00:00 stderr F I1208 17:46:03.801751 1 loader.go:402] Config loaded from file: /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-cert-syncer-kubeconfig/kubeconfig 2025-12-08T17:46:03.802209383+00:00 stderr F I1208 17:46:03.802154 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:46:03.802209383+00:00 stderr F I1208 17:46:03.802181 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:46:03.802209383+00:00 stderr F I1208 17:46:03.802189 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:46:03.802209383+00:00 stderr F I1208 17:46:03.802195 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:46:03.802209383+00:00 stderr F I1208 17:46:03.802201 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:46:03.802607354+00:00 stderr F Copying termination logs to "/var/log/kube-apiserver/termination.log" 2025-12-08T17:46:03.802719478+00:00 stderr F I1208 17:46:03.802670 1 main.go:161] Touching termination lock file "/var/log/kube-apiserver/.terminating" 2025-12-08T17:46:03.803432069+00:00 stderr F I1208 17:46:03.803290 1 main.go:219] Launching sub-process "/usr/bin/hyperkube kube-apiserver --openshift-config=/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml --advertise-address=192.168.126.11 -v=2 --permit-address-sharing" 2025-12-08T17:46:03.922459502+00:00 stderr F Flag --openshift-config has been deprecated, to be removed 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922393 12 flags.go:64] FLAG: --admission-control="[]" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922468 12 flags.go:64] FLAG: --admission-control-config-file="" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922474 12 flags.go:64] FLAG: --advertise-address="192.168.126.11" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922480 12 flags.go:64] FLAG: --aggregator-reject-forwarding-redirect="true" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922486 12 flags.go:64] FLAG: --allow-metric-labels="[]" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922492 12 flags.go:64] FLAG: --allow-metric-labels-manifest="" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922498 12 flags.go:64] FLAG: --allow-privileged="false" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922501 12 flags.go:64] FLAG: --anonymous-auth="true" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922513 12 flags.go:64] FLAG: --api-audiences="[]" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922518 12 flags.go:64] FLAG: --apiserver-count="1" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922523 12 flags.go:64] FLAG: --audit-log-batch-buffer-size="10000" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922527 12 flags.go:64] FLAG: --audit-log-batch-max-size="1" 2025-12-08T17:46:03.922551555+00:00 stderr F I1208 17:46:03.922531 12 flags.go:64] FLAG: --audit-log-batch-max-wait="0s" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922537 12 flags.go:64] FLAG: --audit-log-batch-throttle-burst="0" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922540 12 flags.go:64] FLAG: --audit-log-batch-throttle-enable="false" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922544 12 flags.go:64] FLAG: --audit-log-batch-throttle-qps="0" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922549 12 flags.go:64] FLAG: --audit-log-compress="false" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922553 12 flags.go:64] FLAG: --audit-log-format="json" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922557 12 flags.go:64] FLAG: --audit-log-maxage="0" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922560 12 flags.go:64] FLAG: --audit-log-maxbackup="0" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922564 12 flags.go:64] FLAG: --audit-log-maxsize="0" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922568 12 flags.go:64] FLAG: --audit-log-mode="blocking" 2025-12-08T17:46:03.922580046+00:00 stderr F I1208 17:46:03.922572 12 flags.go:64] FLAG: --audit-log-path="" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922575 12 flags.go:64] FLAG: --audit-log-truncate-enabled="false" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922580 12 flags.go:64] FLAG: --audit-log-truncate-max-batch-size="10485760" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922586 12 flags.go:64] FLAG: --audit-log-truncate-max-event-size="102400" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922590 12 flags.go:64] FLAG: --audit-log-version="audit.k8s.io/v1" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922594 12 flags.go:64] FLAG: --audit-policy-file="" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922599 12 flags.go:64] FLAG: --audit-webhook-batch-buffer-size="10000" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922603 12 flags.go:64] FLAG: --audit-webhook-batch-initial-backoff="10s" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922607 12 flags.go:64] FLAG: --audit-webhook-batch-max-size="400" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922610 12 flags.go:64] FLAG: --audit-webhook-batch-max-wait="30s" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922614 12 flags.go:64] FLAG: --audit-webhook-batch-throttle-burst="15" 2025-12-08T17:46:03.922627457+00:00 stderr F I1208 17:46:03.922618 12 flags.go:64] FLAG: --audit-webhook-batch-throttle-enable="true" 2025-12-08T17:46:03.922655738+00:00 stderr F I1208 17:46:03.922622 12 flags.go:64] FLAG: --audit-webhook-batch-throttle-qps="10" 2025-12-08T17:46:03.922655738+00:00 stderr F I1208 17:46:03.922627 12 flags.go:64] FLAG: --audit-webhook-config-file="" 2025-12-08T17:46:03.922655738+00:00 stderr F I1208 17:46:03.922631 12 flags.go:64] FLAG: --audit-webhook-initial-backoff="10s" 2025-12-08T17:46:03.922655738+00:00 stderr F I1208 17:46:03.922634 12 flags.go:64] FLAG: --audit-webhook-mode="batch" 2025-12-08T17:46:03.922655738+00:00 stderr F I1208 17:46:03.922638 12 flags.go:64] FLAG: --audit-webhook-truncate-enabled="false" 2025-12-08T17:46:03.922655738+00:00 stderr F I1208 17:46:03.922643 12 flags.go:64] FLAG: --audit-webhook-truncate-max-batch-size="10485760" 2025-12-08T17:46:03.922679979+00:00 stderr F I1208 17:46:03.922647 12 flags.go:64] FLAG: --audit-webhook-truncate-max-event-size="102400" 2025-12-08T17:46:03.922679979+00:00 stderr F I1208 17:46:03.922651 12 flags.go:64] FLAG: --audit-webhook-version="audit.k8s.io/v1" 2025-12-08T17:46:03.922679979+00:00 stderr F I1208 17:46:03.922655 12 flags.go:64] FLAG: --authentication-config="" 2025-12-08T17:46:03.922679979+00:00 stderr F I1208 17:46:03.922658 12 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" 2025-12-08T17:46:03.922679979+00:00 stderr F I1208 17:46:03.922662 12 flags.go:64] FLAG: --authentication-token-webhook-config-file="" 2025-12-08T17:46:03.922679979+00:00 stderr F I1208 17:46:03.922666 12 flags.go:64] FLAG: --authentication-token-webhook-version="v1beta1" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922669 12 flags.go:64] FLAG: --authorization-config="" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922673 12 flags.go:64] FLAG: --authorization-mode="[]" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922677 12 flags.go:64] FLAG: --authorization-policy-file="" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922681 12 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922686 12 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922689 12 flags.go:64] FLAG: --authorization-webhook-config-file="" 2025-12-08T17:46:03.922704219+00:00 stderr F I1208 17:46:03.922694 12 flags.go:64] FLAG: --authorization-webhook-version="v1beta1" 2025-12-08T17:46:03.922740250+00:00 stderr F I1208 17:46:03.922698 12 flags.go:64] FLAG: --bind-address="0.0.0.0" 2025-12-08T17:46:03.922740250+00:00 stderr F I1208 17:46:03.922702 12 flags.go:64] FLAG: --cert-dir="/var/run/kubernetes" 2025-12-08T17:46:03.922740250+00:00 stderr F I1208 17:46:03.922706 12 flags.go:64] FLAG: --client-ca-file="" 2025-12-08T17:46:03.922740250+00:00 stderr F I1208 17:46:03.922710 12 flags.go:64] FLAG: --contention-profiling="false" 2025-12-08T17:46:03.922740250+00:00 stderr F I1208 17:46:03.922714 12 flags.go:64] FLAG: --cors-allowed-origins="[]" 2025-12-08T17:46:03.922740250+00:00 stderr F I1208 17:46:03.922726 12 flags.go:64] FLAG: --debug-socket-path="" 2025-12-08T17:46:03.922764521+00:00 stderr F I1208 17:46:03.922730 12 flags.go:64] FLAG: --default-not-ready-toleration-seconds="300" 2025-12-08T17:46:03.922764521+00:00 stderr F I1208 17:46:03.922741 12 flags.go:64] FLAG: --default-unreachable-toleration-seconds="300" 2025-12-08T17:46:03.922764521+00:00 stderr F I1208 17:46:03.922746 12 flags.go:64] FLAG: --default-watch-cache-size="100" 2025-12-08T17:46:03.922764521+00:00 stderr F I1208 17:46:03.922753 12 flags.go:64] FLAG: --delete-collection-workers="1" 2025-12-08T17:46:03.922787232+00:00 stderr F I1208 17:46:03.922757 12 flags.go:64] FLAG: --disable-admission-plugins="[]" 2025-12-08T17:46:03.922787232+00:00 stderr F I1208 17:46:03.922763 12 flags.go:64] FLAG: --disable-http2-serving="false" 2025-12-08T17:46:03.922787232+00:00 stderr F I1208 17:46:03.922768 12 flags.go:64] FLAG: --disabled-metrics="[]" 2025-12-08T17:46:03.922787232+00:00 stderr F I1208 17:46:03.922775 12 flags.go:64] FLAG: --egress-selector-config-file="" 2025-12-08T17:46:03.922809512+00:00 stderr F I1208 17:46:03.922780 12 flags.go:64] FLAG: --emulated-version="[]" 2025-12-08T17:46:03.922809512+00:00 stderr F I1208 17:46:03.922785 12 flags.go:64] FLAG: --emulation-forward-compatible="false" 2025-12-08T17:46:03.922809512+00:00 stderr F I1208 17:46:03.922791 12 flags.go:64] FLAG: --enable-admission-plugins="[]" 2025-12-08T17:46:03.922809512+00:00 stderr F I1208 17:46:03.922797 12 flags.go:64] FLAG: --enable-aggregator-routing="false" 2025-12-08T17:46:03.922831863+00:00 stderr F I1208 17:46:03.922801 12 flags.go:64] FLAG: --enable-bootstrap-token-auth="false" 2025-12-08T17:46:03.922831863+00:00 stderr F I1208 17:46:03.922806 12 flags.go:64] FLAG: --enable-garbage-collector="true" 2025-12-08T17:46:03.922831863+00:00 stderr F I1208 17:46:03.922811 12 flags.go:64] FLAG: --enable-logs-handler="false" 2025-12-08T17:46:03.922831863+00:00 stderr F I1208 17:46:03.922817 12 flags.go:64] FLAG: --enable-priority-and-fairness="true" 2025-12-08T17:46:03.922831863+00:00 stderr F I1208 17:46:03.922821 12 flags.go:64] FLAG: --encryption-provider-config="" 2025-12-08T17:46:03.922855294+00:00 stderr F I1208 17:46:03.922826 12 flags.go:64] FLAG: --encryption-provider-config-automatic-reload="false" 2025-12-08T17:46:03.922855294+00:00 stderr F I1208 17:46:03.922833 12 flags.go:64] FLAG: --endpoint-reconciler-type="lease" 2025-12-08T17:46:03.922855294+00:00 stderr F I1208 17:46:03.922837 12 flags.go:64] FLAG: --etcd-cafile="" 2025-12-08T17:46:03.922855294+00:00 stderr F I1208 17:46:03.922841 12 flags.go:64] FLAG: --etcd-certfile="" 2025-12-08T17:46:03.922915146+00:00 stderr F I1208 17:46:03.922845 12 flags.go:64] FLAG: --etcd-compaction-interval="5m0s" 2025-12-08T17:46:03.922915146+00:00 stderr F I1208 17:46:03.922849 12 flags.go:64] FLAG: --etcd-count-metric-poll-period="1m0s" 2025-12-08T17:46:03.922915146+00:00 stderr F I1208 17:46:03.922855 12 flags.go:64] FLAG: --etcd-db-metric-poll-interval="30s" 2025-12-08T17:46:03.922915146+00:00 stderr F I1208 17:46:03.922859 12 flags.go:64] FLAG: --etcd-healthcheck-timeout="2s" 2025-12-08T17:46:03.922915146+00:00 stderr F I1208 17:46:03.922863 12 flags.go:64] FLAG: --etcd-keyfile="" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922866 12 flags.go:64] FLAG: --etcd-prefix="/registry" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922870 12 flags.go:64] FLAG: --etcd-readycheck-timeout="2s" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922897 12 flags.go:64] FLAG: --etcd-servers="[]" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922902 12 flags.go:64] FLAG: --etcd-servers-overrides="[]" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922907 12 flags.go:64] FLAG: --event-ttl="1h0m0s" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922911 12 flags.go:64] FLAG: --external-hostname="" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922914 12 flags.go:64] FLAG: --feature-gates="" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922922 12 flags.go:64] FLAG: --goaway-chance="0" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922927 12 flags.go:64] FLAG: --help="false" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922931 12 flags.go:64] FLAG: --http2-max-streams-per-connection="0" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922935 12 flags.go:64] FLAG: --kubelet-certificate-authority="" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922939 12 flags.go:64] FLAG: --kubelet-client-certificate="" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922943 12 flags.go:64] FLAG: --kubelet-client-key="" 2025-12-08T17:46:03.922961197+00:00 stderr F I1208 17:46:03.922946 12 flags.go:64] FLAG: --kubelet-port="10250" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922952 12 flags.go:64] FLAG: --kubelet-preferred-address-types="[Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP]" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922959 12 flags.go:64] FLAG: --kubelet-read-only-port="10255" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922962 12 flags.go:64] FLAG: --kubelet-timeout="5s" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922966 12 flags.go:64] FLAG: --kubernetes-service-node-port="0" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922970 12 flags.go:64] FLAG: --lease-reuse-duration-seconds="60" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922974 12 flags.go:64] FLAG: --livez-grace-period="0s" 2025-12-08T17:46:03.922993148+00:00 stderr F I1208 17:46:03.922978 12 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:46:03.923013819+00:00 stderr F I1208 17:46:03.922982 12 flags.go:64] FLAG: --log-json-info-buffer-size="0" 2025-12-08T17:46:03.923013819+00:00 stderr F I1208 17:46:03.922989 12 flags.go:64] FLAG: --log-json-split-stream="false" 2025-12-08T17:46:03.923013819+00:00 stderr F I1208 17:46:03.922993 12 flags.go:64] FLAG: --log-text-info-buffer-size="0" 2025-12-08T17:46:03.923013819+00:00 stderr F I1208 17:46:03.922996 12 flags.go:64] FLAG: --log-text-split-stream="false" 2025-12-08T17:46:03.923013819+00:00 stderr F I1208 17:46:03.923000 12 flags.go:64] FLAG: --logging-format="text" 2025-12-08T17:46:03.923013819+00:00 stderr F I1208 17:46:03.923005 12 flags.go:64] FLAG: --max-connection-bytes-per-sec="0" 2025-12-08T17:46:03.923033129+00:00 stderr F I1208 17:46:03.923008 12 flags.go:64] FLAG: --max-mutating-requests-inflight="200" 2025-12-08T17:46:03.923033129+00:00 stderr F I1208 17:46:03.923012 12 flags.go:64] FLAG: --max-requests-inflight="400" 2025-12-08T17:46:03.923033129+00:00 stderr F I1208 17:46:03.923016 12 flags.go:64] FLAG: --min-request-timeout="1800" 2025-12-08T17:46:03.923033129+00:00 stderr F I1208 17:46:03.923020 12 flags.go:64] FLAG: --oidc-ca-file="" 2025-12-08T17:46:03.923033129+00:00 stderr F I1208 17:46:03.923025 12 flags.go:64] FLAG: --oidc-client-id="" 2025-12-08T17:46:03.923060360+00:00 stderr F I1208 17:46:03.923028 12 flags.go:64] FLAG: --oidc-groups-claim="" 2025-12-08T17:46:03.923060360+00:00 stderr F I1208 17:46:03.923032 12 flags.go:64] FLAG: --oidc-groups-prefix="" 2025-12-08T17:46:03.923060360+00:00 stderr F I1208 17:46:03.923035 12 flags.go:64] FLAG: --oidc-issuer-url="" 2025-12-08T17:46:03.923060360+00:00 stderr F I1208 17:46:03.923039 12 flags.go:64] FLAG: --oidc-required-claim="" 2025-12-08T17:46:03.923060360+00:00 stderr F I1208 17:46:03.923044 12 flags.go:64] FLAG: --oidc-signing-algs="[RS256]" 2025-12-08T17:46:03.923060360+00:00 stderr F I1208 17:46:03.923050 12 flags.go:64] FLAG: --oidc-username-claim="sub" 2025-12-08T17:46:03.923079661+00:00 stderr F I1208 17:46:03.923054 12 flags.go:64] FLAG: --oidc-username-prefix="" 2025-12-08T17:46:03.923079661+00:00 stderr F I1208 17:46:03.923057 12 flags.go:64] FLAG: --openshift-config="/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml" 2025-12-08T17:46:03.923079661+00:00 stderr F I1208 17:46:03.923063 12 flags.go:64] FLAG: --peer-advertise-ip="" 2025-12-08T17:46:03.923079661+00:00 stderr F I1208 17:46:03.923066 12 flags.go:64] FLAG: --peer-advertise-port="" 2025-12-08T17:46:03.923079661+00:00 stderr F I1208 17:46:03.923070 12 flags.go:64] FLAG: --peer-ca-file="" 2025-12-08T17:46:03.923098451+00:00 stderr F I1208 17:46:03.923073 12 flags.go:64] FLAG: --permit-address-sharing="true" 2025-12-08T17:46:03.923098451+00:00 stderr F I1208 17:46:03.923077 12 flags.go:64] FLAG: --permit-port-sharing="false" 2025-12-08T17:46:03.923098451+00:00 stderr F I1208 17:46:03.923081 12 flags.go:64] FLAG: --profiling="true" 2025-12-08T17:46:03.923098451+00:00 stderr F I1208 17:46:03.923085 12 flags.go:64] FLAG: --proxy-client-cert-file="" 2025-12-08T17:46:03.923098451+00:00 stderr F I1208 17:46:03.923089 12 flags.go:64] FLAG: --proxy-client-key-file="" 2025-12-08T17:46:03.923117142+00:00 stderr F I1208 17:46:03.923092 12 flags.go:64] FLAG: --request-timeout="1m0s" 2025-12-08T17:46:03.923117142+00:00 stderr F I1208 17:46:03.923096 12 flags.go:64] FLAG: --requestheader-allowed-names="[]" 2025-12-08T17:46:03.923117142+00:00 stderr F I1208 17:46:03.923102 12 flags.go:64] FLAG: --requestheader-client-ca-file="" 2025-12-08T17:46:03.923117142+00:00 stderr F I1208 17:46:03.923105 12 flags.go:64] FLAG: --requestheader-extra-headers-prefix="[]" 2025-12-08T17:46:03.923135262+00:00 stderr F I1208 17:46:03.923110 12 flags.go:64] FLAG: --requestheader-group-headers="[]" 2025-12-08T17:46:03.923135262+00:00 stderr F I1208 17:46:03.923115 12 flags.go:64] FLAG: --requestheader-uid-headers="[]" 2025-12-08T17:46:03.923135262+00:00 stderr F I1208 17:46:03.923119 12 flags.go:64] FLAG: --requestheader-username-headers="[]" 2025-12-08T17:46:03.923152743+00:00 stderr F I1208 17:46:03.923124 12 flags.go:64] FLAG: --runtime-config="" 2025-12-08T17:46:03.923152743+00:00 stderr F I1208 17:46:03.923129 12 flags.go:64] FLAG: --runtime-config-emulation-forward-compatible="false" 2025-12-08T17:46:03.923152743+00:00 stderr F I1208 17:46:03.923133 12 flags.go:64] FLAG: --secure-port="6443" 2025-12-08T17:46:03.923152743+00:00 stderr F I1208 17:46:03.923137 12 flags.go:64] FLAG: --send-retry-after-while-not-ready-once="false" 2025-12-08T17:46:03.923152743+00:00 stderr F I1208 17:46:03.923141 12 flags.go:64] FLAG: --service-account-extend-token-expiration="true" 2025-12-08T17:46:03.923171333+00:00 stderr F I1208 17:46:03.923144 12 flags.go:64] FLAG: --service-account-issuer="[]" 2025-12-08T17:46:03.923171333+00:00 stderr F I1208 17:46:03.923150 12 flags.go:64] FLAG: --service-account-jwks-uri="" 2025-12-08T17:46:03.923171333+00:00 stderr F I1208 17:46:03.923154 12 flags.go:64] FLAG: --service-account-key-file="[]" 2025-12-08T17:46:03.923171333+00:00 stderr F I1208 17:46:03.923158 12 flags.go:64] FLAG: --service-account-lookup="true" 2025-12-08T17:46:03.923197094+00:00 stderr F I1208 17:46:03.923162 12 flags.go:64] FLAG: --service-account-max-token-expiration="0s" 2025-12-08T17:46:03.923197094+00:00 stderr F I1208 17:46:03.923166 12 flags.go:64] FLAG: --service-account-signing-endpoint="" 2025-12-08T17:46:03.923197094+00:00 stderr F I1208 17:46:03.923170 12 flags.go:64] FLAG: --service-account-signing-key-file="" 2025-12-08T17:46:03.923197094+00:00 stderr F I1208 17:46:03.923174 12 flags.go:64] FLAG: --service-cluster-ip-range="" 2025-12-08T17:46:03.923197094+00:00 stderr F I1208 17:46:03.923177 12 flags.go:64] FLAG: --service-node-port-range="30000-32767" 2025-12-08T17:46:03.923197094+00:00 stderr F I1208 17:46:03.923185 12 flags.go:64] FLAG: --show-hidden-metrics-for-version="" 2025-12-08T17:46:03.923216445+00:00 stderr F I1208 17:46:03.923190 12 flags.go:64] FLAG: --shutdown-delay-duration="0s" 2025-12-08T17:46:03.923216445+00:00 stderr F I1208 17:46:03.923194 12 flags.go:64] FLAG: --shutdown-send-retry-after="false" 2025-12-08T17:46:03.923216445+00:00 stderr F I1208 17:46:03.923198 12 flags.go:64] FLAG: --shutdown-watch-termination-grace-period="0s" 2025-12-08T17:46:03.923216445+00:00 stderr F I1208 17:46:03.923202 12 flags.go:64] FLAG: --storage-backend="" 2025-12-08T17:46:03.923216445+00:00 stderr F I1208 17:46:03.923206 12 flags.go:64] FLAG: --storage-initialization-timeout="1m0s" 2025-12-08T17:46:03.923234945+00:00 stderr F I1208 17:46:03.923210 12 flags.go:64] FLAG: --storage-media-type="application/vnd.kubernetes.protobuf" 2025-12-08T17:46:03.923234945+00:00 stderr F I1208 17:46:03.923214 12 flags.go:64] FLAG: --strict-transport-security-directives="[]" 2025-12-08T17:46:03.923234945+00:00 stderr F I1208 17:46:03.923219 12 flags.go:64] FLAG: --tls-cert-file="" 2025-12-08T17:46:03.923234945+00:00 stderr F I1208 17:46:03.923223 12 flags.go:64] FLAG: --tls-cipher-suites="[]" 2025-12-08T17:46:03.923253996+00:00 stderr F I1208 17:46:03.923227 12 flags.go:64] FLAG: --tls-min-version="" 2025-12-08T17:46:03.923253996+00:00 stderr F I1208 17:46:03.923231 12 flags.go:64] FLAG: --tls-private-key-file="" 2025-12-08T17:46:03.923253996+00:00 stderr F I1208 17:46:03.923235 12 flags.go:64] FLAG: --tls-sni-cert-key="[]" 2025-12-08T17:46:03.923253996+00:00 stderr F I1208 17:46:03.923240 12 flags.go:64] FLAG: --token-auth-file="" 2025-12-08T17:46:03.923253996+00:00 stderr F I1208 17:46:03.923244 12 flags.go:64] FLAG: --tracing-config-file="" 2025-12-08T17:46:03.923272766+00:00 stderr F I1208 17:46:03.923247 12 flags.go:64] FLAG: --v="2" 2025-12-08T17:46:03.923272766+00:00 stderr F I1208 17:46:03.923252 12 flags.go:64] FLAG: --version="false" 2025-12-08T17:46:03.923272766+00:00 stderr F I1208 17:46:03.923259 12 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:46:03.923272766+00:00 stderr F I1208 17:46:03.923264 12 flags.go:64] FLAG: --watch-cache="true" 2025-12-08T17:46:03.923290697+00:00 stderr F I1208 17:46:03.923268 12 flags.go:64] FLAG: --watch-cache-sizes="[]" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923325 12 plugins.go:83] "Registered admission plugin" plugin="authorization.openshift.io/RestrictSubjectBindings" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923340 12 plugins.go:83] "Registered admission plugin" plugin="route.openshift.io/RouteHostAssignment" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923346 12 plugins.go:83] "Registered admission plugin" plugin="image.openshift.io/ImagePolicy" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923352 12 plugins.go:83] "Registered admission plugin" plugin="route.openshift.io/IngressAdmission" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923357 12 plugins.go:83] "Registered admission plugin" plugin="autoscaling.openshift.io/ManagementCPUsOverride" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923361 12 plugins.go:83] "Registered admission plugin" plugin="autoscaling.openshift.io/ManagedNode" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923367 12 plugins.go:83] "Registered admission plugin" plugin="autoscaling.openshift.io/MixedCPUs" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923372 12 plugins.go:83] "Registered admission plugin" plugin="scheduling.openshift.io/OriginPodNodeEnvironment" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923377 12 plugins.go:83] "Registered admission plugin" plugin="autoscaling.openshift.io/ClusterResourceOverride" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923382 12 plugins.go:83] "Registered admission plugin" plugin="quota.openshift.io/ClusterResourceQuota" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923387 12 plugins.go:83] "Registered admission plugin" plugin="autoscaling.openshift.io/RunOnceDuration" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923392 12 plugins.go:83] "Registered admission plugin" plugin="scheduling.openshift.io/PodNodeConstraints" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923397 12 plugins.go:83] "Registered admission plugin" plugin="security.openshift.io/SecurityContextConstraint" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923402 12 plugins.go:83] "Registered admission plugin" plugin="security.openshift.io/SCCExecRestrictions" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923407 12 plugins.go:83] "Registered admission plugin" plugin="network.openshift.io/ExternalIPRanger" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923412 12 plugins.go:83] "Registered admission plugin" plugin="network.openshift.io/RestrictedEndpointsAdmission" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923417 12 plugins.go:83] "Registered admission plugin" plugin="storage.openshift.io/CSIInlineVolumeSecurity" 2025-12-08T17:46:03.923439171+00:00 stderr F I1208 17:46:03.923422 12 plugins.go:83] "Registered admission plugin" plugin="storage.openshift.io/PerformantSecurityPolicy" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923439 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateAPIServer" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923452 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateAuthentication" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923458 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateFeatureGate" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923468 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateConsole" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923473 12 plugins.go:83] "Registered admission plugin" plugin="operator.openshift.io/ValidateDNS" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923479 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateImage" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923484 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateOAuth" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923489 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateProject" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923495 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/DenyDeleteClusterConfiguration" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923501 12 plugins.go:83] "Registered admission plugin" plugin="operator.openshift.io/DenyDeleteClusterOperators" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923506 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateScheduler" 2025-12-08T17:46:03.923525824+00:00 stderr F I1208 17:46:03.923512 12 plugins.go:83] "Registered admission plugin" plugin="operator.openshift.io/ValidateKubeControllerManager" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923521 12 plugins.go:83] "Registered admission plugin" plugin="quota.openshift.io/ValidateClusterResourceQuota" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923527 12 plugins.go:83] "Registered admission plugin" plugin="security.openshift.io/ValidateSecurityContextConstraints" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923533 12 plugins.go:83] "Registered admission plugin" plugin="authorization.openshift.io/ValidateRoleBindingRestriction" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923538 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateNetwork" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923543 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateAPIRequestCount" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923548 12 plugins.go:83] "Registered admission plugin" plugin="config.openshift.io/ValidateConfigNodeV1" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923553 12 plugins.go:83] "Registered admission plugin" plugin="security.openshift.io/DefaultSecurityContextConstraints" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923558 12 plugins.go:83] "Registered admission plugin" plugin="route.openshift.io/ValidateRoute" 2025-12-08T17:46:03.923586796+00:00 stderr F I1208 17:46:03.923562 12 plugins.go:83] "Registered admission plugin" plugin="route.openshift.io/DefaultRoute" 2025-12-08T17:46:03.926158833+00:00 stderr F Flag --openshift-config has been deprecated, to be removed 2025-12-08T17:46:03.926158833+00:00 stderr F Flag --enable-logs-handler has been deprecated, Log handler functionality is deprecated 2025-12-08T17:46:03.926158833+00:00 stderr F Flag --kubelet-read-only-port has been deprecated, kubelet-read-only-port is deprecated and will be removed. 2025-12-08T17:46:03.926158833+00:00 stderr F I1208 17:46:03.926036 12 registry.go:355] setting kube:feature gate emulation version to 1.33 2025-12-08T17:46:03.926158833+00:00 stderr F I1208 17:46:03.926047 12 registry.go:375] setting kube:feature-gates=AdditionalRoutingCapabilities=true,AdminNetworkPolicy=true,AlibabaPlatform=true,AzureWorkloadIdentity=true,BuildCSIVolumes=true,CPMSMachineNamePrefix=true,ConsolePluginContentSecurityPolicy=true,GatewayAPI=true,GatewayAPIController=true,HighlyAvailableArbiter=true,ImageVolume=true,IngressControllerLBSubnetsAWS=true,KMSv1=true,MachineConfigNodes=true,ManagedBootImages=true,ManagedBootImagesAWS=true,MetricsCollectionProfiles=true,NetworkDiagnosticsConfig=true,NetworkLiveMigration=true,NetworkSegmentation=true,NewOLM=true,PinnedImages=true,ProcMountType=true,RouteAdvertisements=true,RouteExternalCertificate=true,ServiceAccountTokenNodeBinding=true,SetEIPForNLBIngressController=true,SigstoreImageVerification=true,StoragePerformantSecurityPolicy=true,UpgradeStatus=true,UserNamespacesPodSecurityStandards=true,UserNamespacesSupport=true,VSphereMultiDisk=true,VSphereMultiNetworks=true,AWSClusterHostedDNS=false,AWSClusterHostedDNSInstall=false,AWSDedicatedHosts=false,AWSServiceLBNetworkSecurityGroup=false,AutomatedEtcdBackup=false,AzureClusterHostedDNSInstall=false,AzureDedicatedHosts=false,AzureMultiDisk=false,BootImageSkewEnforcement=false,BootcNodeManagement=false,ClusterAPIInstall=false,ClusterAPIInstallIBMCloud=false,ClusterMonitoringConfig=false,ClusterVersionOperatorConfiguration=false,DNSNameResolver=false,DualReplica=false,DyanmicServiceEndpointIBMCloud=false,DynamicResourceAllocation=false,EtcdBackendQuota=false,EventedPLEG=false,Example=false,Example2=false,ExternalOIDC=false,ExternalOIDCWithUIDAndExtraClaimMappings=false,ExternalSnapshotMetadata=false,GCPClusterHostedDNS=false,GCPClusterHostedDNSInstall=false,GCPCustomAPIEndpoints=false,GCPCustomAPIEndpointsInstall=false,ImageModeStatusReporting=false,ImageStreamImportMode=false,IngressControllerDynamicConfigurationManager=false,InsightsConfig=false,InsightsConfigAPI=false,InsightsOnDemandDataGather=false,IrreconcilableMachineConfig=false,KMSEncryptionProvider=false,MachineAPIMigration=false,MachineAPIOperatorDisableMachineHealthCheckController=false,ManagedBootImagesAzure=false,ManagedBootImagesvSphere=false,MaxUnavailableStatefulSet=false,MinimumKubeletVersion=false,MixedCPUsAllocation=false,MultiArchInstallAzure=false,MultiDiskSetup=false,MutatingAdmissionPolicy=false,NewOLMCatalogdAPIV1Metas=false,NewOLMOwnSingleNamespace=false,NewOLMPreflightPermissionChecks=false,NewOLMWebhookProviderOpenshiftServiceCA=false,NoRegistryClusterOperations=false,NodeSwap=false,NutanixMultiSubnets=false,OVNObservability=false,OpenShiftPodSecurityAdmission=false,PreconfiguredUDNAddresses=false,SELinuxMount=false,ShortCertRotation=false,SignatureStores=false,SigstoreImageVerificationPKI=false,TranslateStreamCloseWebsocketRequests=false,VSphereConfigurableMaxAllowedBlockVolumesPerNode=false,VSphereHostVMGroupZonal=false,VSphereMixedNodeEnv=false,VolumeAttributesClass=false,VolumeGroupSnapshot=false 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928075 12 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928148 12 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928156 12 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928163 12 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928169 12 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928175 12 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928181 12 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928202 12 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928218 12 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:46:03.928248446+00:00 stderr F W1208 17:46:03.928229 12 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928239 12 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928246 12 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928253 12 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928260 12 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928267 12 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928273 12 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928279 12 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928285 12 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928292 12 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:46:03.928314498+00:00 stderr F W1208 17:46:03.928298 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:46:03.928358389+00:00 stderr F W1208 17:46:03.928305 12 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:46:03.928358389+00:00 stderr F W1208 17:46:03.928319 12 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:46:03.928358389+00:00 stderr F W1208 17:46:03.928328 12 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:46:03.928358389+00:00 stderr F W1208 17:46:03.928336 12 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:46:03.928358389+00:00 stderr F W1208 17:46:03.928344 12 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:46:03.928426571+00:00 stderr F W1208 17:46:03.928349 12 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:46:03.928426571+00:00 stderr F W1208 17:46:03.928400 12 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:46:03.928426571+00:00 stderr F W1208 17:46:03.928407 12 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:46:03.928448732+00:00 stderr F W1208 17:46:03.928413 12 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:46:03.928448732+00:00 stderr F W1208 17:46:03.928420 12 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:46:03.928448732+00:00 stderr F W1208 17:46:03.928426 12 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:46:03.928448732+00:00 stderr F W1208 17:46:03.928432 12 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:46:03.928471252+00:00 stderr F W1208 17:46:03.928442 12 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:46:03.928471252+00:00 stderr F W1208 17:46:03.928451 12 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:46:03.928471252+00:00 stderr F W1208 17:46:03.928457 12 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:46:03.928492783+00:00 stderr F W1208 17:46:03.928463 12 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:46:03.928492783+00:00 stderr F W1208 17:46:03.928470 12 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:46:03.928492783+00:00 stderr F W1208 17:46:03.928475 12 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:46:03.928492783+00:00 stderr F W1208 17:46:03.928481 12 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:46:03.928514424+00:00 stderr F W1208 17:46:03.928486 12 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:46:03.928514424+00:00 stderr F W1208 17:46:03.928492 12 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:46:03.928514424+00:00 stderr F W1208 17:46:03.928498 12 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:46:03.928536034+00:00 stderr F W1208 17:46:03.928504 12 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:46:03.928536034+00:00 stderr F W1208 17:46:03.928510 12 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:46:03.928536034+00:00 stderr F W1208 17:46:03.928516 12 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:46:03.928557095+00:00 stderr F W1208 17:46:03.928527 12 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:46:03.928557095+00:00 stderr F W1208 17:46:03.928532 12 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:46:03.928557095+00:00 stderr F W1208 17:46:03.928538 12 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:46:03.928557095+00:00 stderr F W1208 17:46:03.928544 12 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:46:03.928590856+00:00 stderr F W1208 17:46:03.928549 12 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:46:03.928610607+00:00 stderr F W1208 17:46:03.928555 12 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928560 12 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928566 12 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928573 12 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928579 12 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928584 12 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928589 12 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928600 12 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928605 12 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928610 12 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:46:03.928629857+00:00 stderr F W1208 17:46:03.928616 12 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:46:03.928656898+00:00 stderr F W1208 17:46:03.928621 12 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:46:03.928656898+00:00 stderr F W1208 17:46:03.928627 12 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:46:03.928656898+00:00 stderr F W1208 17:46:03.928632 12 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:46:03.928656898+00:00 stderr F W1208 17:46:03.928639 12 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:46:03.928656898+00:00 stderr F W1208 17:46:03.928645 12 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:46:03.928679619+00:00 stderr F W1208 17:46:03.928652 12 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:46:03.928679619+00:00 stderr F W1208 17:46:03.928658 12 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:46:03.928679619+00:00 stderr F W1208 17:46:03.928664 12 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:46:03.928700859+00:00 stderr F W1208 17:46:03.928670 12 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:46:03.928700859+00:00 stderr F W1208 17:46:03.928680 12 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:46:03.928700859+00:00 stderr F W1208 17:46:03.928685 12 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:46:03.928722020+00:00 stderr F W1208 17:46:03.928690 12 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:46:03.928722020+00:00 stderr F W1208 17:46:03.928696 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:46:03.928722020+00:00 stderr F W1208 17:46:03.928701 12 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:46:03.928722020+00:00 stderr F W1208 17:46:03.928707 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:46:03.928754121+00:00 stderr F W1208 17:46:03.928717 12 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:46:03.928754121+00:00 stderr F W1208 17:46:03.928724 12 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:46:03.928754121+00:00 stderr F W1208 17:46:03.928729 12 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:46:03.928754121+00:00 stderr F W1208 17:46:03.928735 12 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:46:03.928754121+00:00 stderr F W1208 17:46:03.928741 12 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:46:03.928777292+00:00 stderr F W1208 17:46:03.928747 12 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:46:03.928777292+00:00 stderr F W1208 17:46:03.928757 12 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:46:03.928777292+00:00 stderr F W1208 17:46:03.928763 12 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:46:03.928798422+00:00 stderr F W1208 17:46:03.928768 12 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:46:03.928798422+00:00 stderr F W1208 17:46:03.928774 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:46:03.929005948+00:00 stderr F I1208 17:46:03.928795 12 feature_gate.go:384] feature gates: {map[DynamicResourceAllocation:false EventedPLEG:false ImageVolume:true KMSv1:true MaxUnavailableStatefulSet:false MinimumKubeletVersion:false MutatingAdmissionPolicy:false NodeSwap:false ProcMountType:true RouteExternalCertificate:true SELinuxMount:false ServiceAccountTokenNodeBinding:true StoragePerformantSecurityPolicy:true TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:true UserNamespacesSupport:true VolumeAttributesClass:false]} 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.928911 12 flags.go:64] FLAG: --admission-control="[]" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929020 12 flags.go:64] FLAG: --admission-control-config-file="/tmp/kubeapiserver-admission-config.yaml3036224830" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929041 12 flags.go:64] FLAG: --advertise-address="192.168.126.11" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929052 12 flags.go:64] FLAG: --aggregator-reject-forwarding-redirect="true" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929061 12 flags.go:64] FLAG: --allow-metric-labels="[]" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929071 12 flags.go:64] FLAG: --allow-metric-labels-manifest="" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929079 12 flags.go:64] FLAG: --allow-privileged="true" 2025-12-08T17:46:03.929110031+00:00 stderr F I1208 17:46:03.929086 12 flags.go:64] FLAG: --anonymous-auth="true" 2025-12-08T17:46:03.929165463+00:00 stderr F I1208 17:46:03.929094 12 flags.go:64] FLAG: --api-audiences="[https://kubernetes.default.svc]" 2025-12-08T17:46:03.929165463+00:00 stderr F I1208 17:46:03.929108 12 flags.go:64] FLAG: --apiserver-count="1" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929117 12 flags.go:64] FLAG: --audit-log-batch-buffer-size="10000" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929147 12 flags.go:64] FLAG: --audit-log-batch-max-size="1" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929155 12 flags.go:64] FLAG: --audit-log-batch-max-wait="0s" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929164 12 flags.go:64] FLAG: --audit-log-batch-throttle-burst="0" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929171 12 flags.go:64] FLAG: --audit-log-batch-throttle-enable="false" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929178 12 flags.go:64] FLAG: --audit-log-batch-throttle-qps="0" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929188 12 flags.go:64] FLAG: --audit-log-compress="false" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929195 12 flags.go:64] FLAG: --audit-log-format="json" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929202 12 flags.go:64] FLAG: --audit-log-maxage="0" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929209 12 flags.go:64] FLAG: --audit-log-maxbackup="10" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929221 12 flags.go:64] FLAG: --audit-log-maxsize="200" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929229 12 flags.go:64] FLAG: --audit-log-mode="blocking" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929236 12 flags.go:64] FLAG: --audit-log-path="/var/log/kube-apiserver/audit.log" 2025-12-08T17:46:03.929259436+00:00 stderr F I1208 17:46:03.929244 12 flags.go:64] FLAG: --audit-log-truncate-enabled="false" 2025-12-08T17:46:03.929306957+00:00 stderr F I1208 17:46:03.929252 12 flags.go:64] FLAG: --audit-log-truncate-max-batch-size="10485760" 2025-12-08T17:46:03.929306957+00:00 stderr F I1208 17:46:03.929260 12 flags.go:64] FLAG: --audit-log-truncate-max-event-size="102400" 2025-12-08T17:46:03.929306957+00:00 stderr F I1208 17:46:03.929267 12 flags.go:64] FLAG: --audit-log-version="audit.k8s.io/v1" 2025-12-08T17:46:03.929306957+00:00 stderr F I1208 17:46:03.929275 12 flags.go:64] FLAG: --audit-policy-file="/etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-audit-policies/policy.yaml" 2025-12-08T17:46:03.929306957+00:00 stderr F I1208 17:46:03.929291 12 flags.go:64] FLAG: --audit-webhook-batch-buffer-size="10000" 2025-12-08T17:46:03.929330368+00:00 stderr F I1208 17:46:03.929298 12 flags.go:64] FLAG: --audit-webhook-batch-initial-backoff="10s" 2025-12-08T17:46:03.929330368+00:00 stderr F I1208 17:46:03.929306 12 flags.go:64] FLAG: --audit-webhook-batch-max-size="400" 2025-12-08T17:46:03.929330368+00:00 stderr F I1208 17:46:03.929314 12 flags.go:64] FLAG: --audit-webhook-batch-max-wait="30s" 2025-12-08T17:46:03.929330368+00:00 stderr F I1208 17:46:03.929321 12 flags.go:64] FLAG: --audit-webhook-batch-throttle-burst="15" 2025-12-08T17:46:03.929352189+00:00 stderr F I1208 17:46:03.929328 12 flags.go:64] FLAG: --audit-webhook-batch-throttle-enable="true" 2025-12-08T17:46:03.929352189+00:00 stderr F I1208 17:46:03.929335 12 flags.go:64] FLAG: --audit-webhook-batch-throttle-qps="10" 2025-12-08T17:46:03.929372349+00:00 stderr F I1208 17:46:03.929343 12 flags.go:64] FLAG: --audit-webhook-config-file="" 2025-12-08T17:46:03.929372349+00:00 stderr F I1208 17:46:03.929350 12 flags.go:64] FLAG: --audit-webhook-initial-backoff="10s" 2025-12-08T17:46:03.929392470+00:00 stderr F I1208 17:46:03.929363 12 flags.go:64] FLAG: --audit-webhook-mode="batch" 2025-12-08T17:46:03.929392470+00:00 stderr F I1208 17:46:03.929370 12 flags.go:64] FLAG: --audit-webhook-truncate-enabled="false" 2025-12-08T17:46:03.929392470+00:00 stderr F I1208 17:46:03.929377 12 flags.go:64] FLAG: --audit-webhook-truncate-max-batch-size="10485760" 2025-12-08T17:46:03.929413720+00:00 stderr F I1208 17:46:03.929385 12 flags.go:64] FLAG: --audit-webhook-truncate-max-event-size="102400" 2025-12-08T17:46:03.929413720+00:00 stderr F I1208 17:46:03.929392 12 flags.go:64] FLAG: --audit-webhook-version="audit.k8s.io/v1" 2025-12-08T17:46:03.929413720+00:00 stderr F I1208 17:46:03.929399 12 flags.go:64] FLAG: --authentication-config="" 2025-12-08T17:46:03.929434751+00:00 stderr F I1208 17:46:03.929406 12 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" 2025-12-08T17:46:03.929434751+00:00 stderr F I1208 17:46:03.929414 12 flags.go:64] FLAG: --authentication-token-webhook-config-file="/etc/kubernetes/static-pod-resources/secrets/webhook-authenticator/kubeConfig" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929430 12 flags.go:64] FLAG: --authentication-token-webhook-version="v1" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929454 12 flags.go:64] FLAG: --authorization-config="" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929461 12 flags.go:64] FLAG: --authorization-mode="[Scope,SystemMasters,RBAC,Node]" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929471 12 flags.go:64] FLAG: --authorization-policy-file="" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929478 12 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929486 12 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929493 12 flags.go:64] FLAG: --authorization-webhook-config-file="" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929500 12 flags.go:64] FLAG: --authorization-webhook-version="v1beta1" 2025-12-08T17:46:03.929528104+00:00 stderr F I1208 17:46:03.929507 12 flags.go:64] FLAG: --bind-address="0.0.0.0" 2025-12-08T17:46:03.929556945+00:00 stderr F I1208 17:46:03.929520 12 flags.go:64] FLAG: --cert-dir="/var/run/kubernetes" 2025-12-08T17:46:03.929556945+00:00 stderr F I1208 17:46:03.929527 12 flags.go:64] FLAG: --client-ca-file="/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:03.929556945+00:00 stderr F I1208 17:46:03.929537 12 flags.go:64] FLAG: --contention-profiling="false" 2025-12-08T17:46:03.929579055+00:00 stderr F I1208 17:46:03.929544 12 flags.go:64] FLAG: --cors-allowed-origins="[//127\\.0\\.0\\.1(:|$),//localhost(:|$)]" 2025-12-08T17:46:03.929579055+00:00 stderr F I1208 17:46:03.929557 12 flags.go:64] FLAG: --debug-socket-path="" 2025-12-08T17:46:03.929579055+00:00 stderr F I1208 17:46:03.929564 12 flags.go:64] FLAG: --default-not-ready-toleration-seconds="300" 2025-12-08T17:46:03.929599916+00:00 stderr F I1208 17:46:03.929574 12 flags.go:64] FLAG: --default-unreachable-toleration-seconds="300" 2025-12-08T17:46:03.929599916+00:00 stderr F I1208 17:46:03.929581 12 flags.go:64] FLAG: --default-watch-cache-size="100" 2025-12-08T17:46:03.929620076+00:00 stderr F I1208 17:46:03.929589 12 flags.go:64] FLAG: --delete-collection-workers="1" 2025-12-08T17:46:03.929620076+00:00 stderr F I1208 17:46:03.929601 12 flags.go:64] FLAG: --disable-admission-plugins="[]" 2025-12-08T17:46:03.929620076+00:00 stderr F I1208 17:46:03.929609 12 flags.go:64] FLAG: --disable-http2-serving="false" 2025-12-08T17:46:03.929641267+00:00 stderr F I1208 17:46:03.929616 12 flags.go:64] FLAG: --disabled-metrics="[]" 2025-12-08T17:46:03.929641267+00:00 stderr F I1208 17:46:03.929625 12 flags.go:64] FLAG: --egress-selector-config-file="" 2025-12-08T17:46:03.929661348+00:00 stderr F I1208 17:46:03.929632 12 flags.go:64] FLAG: --emulated-version="[]" 2025-12-08T17:46:03.929661348+00:00 stderr F I1208 17:46:03.929640 12 flags.go:64] FLAG: --emulation-forward-compatible="false" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929647 12 flags.go:64] FLAG: --enable-admission-plugins="[CertificateApproval,CertificateSigning,CertificateSubjectRestriction,DefaultIngressClass,DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,MutatingAdmissionWebhook,NamespaceLifecycle,NodeRestriction,OwnerReferencesPermissionEnforcement,PersistentVolumeClaimResize,PodNodeSelector,PodTolerationRestriction,Priority,ResourceQuota,RuntimeClass,ServiceAccount,StorageObjectInUseProtection,TaintNodesByCondition,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,authorization.openshift.io/RestrictSubjectBindings,authorization.openshift.io/ValidateRoleBindingRestriction,config.openshift.io/DenyDeleteClusterConfiguration,config.openshift.io/ValidateAPIServer,config.openshift.io/ValidateAuthentication,config.openshift.io/ValidateConsole,config.openshift.io/ValidateFeatureGate,config.openshift.io/ValidateImage,config.openshift.io/ValidateOAuth,config.openshift.io/ValidateProject,config.openshift.io/ValidateScheduler,image.openshift.io/ImagePolicy,network.openshift.io/ExternalIPRanger,network.openshift.io/RestrictedEndpointsAdmission,quota.openshift.io/ClusterResourceQuota,quota.openshift.io/ValidateClusterResourceQuota,route.openshift.io/IngressAdmission,scheduling.openshift.io/OriginPodNodeEnvironment,security.openshift.io/DefaultSecurityContextConstraints,security.openshift.io/SCCExecRestrictions,security.openshift.io/SecurityContextConstraint,security.openshift.io/ValidateSecurityContextConstraints,storage.openshift.io/CSIInlineVolumeSecurity]" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929696 12 flags.go:64] FLAG: --enable-aggregator-routing="true" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929710 12 flags.go:64] FLAG: --enable-bootstrap-token-auth="false" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929718 12 flags.go:64] FLAG: --enable-garbage-collector="true" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929725 12 flags.go:64] FLAG: --enable-logs-handler="false" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929732 12 flags.go:64] FLAG: --enable-priority-and-fairness="true" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929739 12 flags.go:64] FLAG: --encryption-provider-config="" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929745 12 flags.go:64] FLAG: --encryption-provider-config-automatic-reload="false" 2025-12-08T17:46:03.929775931+00:00 stderr F I1208 17:46:03.929752 12 flags.go:64] FLAG: --endpoint-reconciler-type="lease" 2025-12-08T17:46:03.929832123+00:00 stderr F I1208 17:46:03.929760 12 flags.go:64] FLAG: --etcd-cafile="/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929795 12 flags.go:64] FLAG: --etcd-certfile="/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929818 12 flags.go:64] FLAG: --etcd-compaction-interval="5m0s" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929828 12 flags.go:64] FLAG: --etcd-count-metric-poll-period="1m0s" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929835 12 flags.go:64] FLAG: --etcd-db-metric-poll-interval="30s" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929843 12 flags.go:64] FLAG: --etcd-healthcheck-timeout="9s" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929851 12 flags.go:64] FLAG: --etcd-keyfile="/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" 2025-12-08T17:46:03.929909535+00:00 stderr F I1208 17:46:03.929859 12 flags.go:64] FLAG: --etcd-prefix="kubernetes.io" 2025-12-08T17:46:03.929947706+00:00 stderr F I1208 17:46:03.929866 12 flags.go:64] FLAG: --etcd-readycheck-timeout="9s" 2025-12-08T17:46:03.929947706+00:00 stderr F I1208 17:46:03.929873 12 flags.go:64] FLAG: --etcd-servers="[https://192.168.126.11:2379,https://localhost:2379]" 2025-12-08T17:46:03.929947706+00:00 stderr F I1208 17:46:03.929921 12 flags.go:64] FLAG: --etcd-servers-overrides="[]" 2025-12-08T17:46:03.929947706+00:00 stderr F I1208 17:46:03.929930 12 flags.go:64] FLAG: --event-ttl="3h0m0s" 2025-12-08T17:46:03.929970387+00:00 stderr F I1208 17:46:03.929938 12 flags.go:64] FLAG: --external-hostname="" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.929946 12 flags.go:64] FLAG: --feature-gates=":AdditionalRoutingCapabilities=true,:AdminNetworkPolicy=true,:AlibabaPlatform=true,:AzureWorkloadIdentity=true,:BuildCSIVolumes=true,:CPMSMachineNamePrefix=true,:ConsolePluginContentSecurityPolicy=true,:GatewayAPI=true,:GatewayAPIController=true,:HighlyAvailableArbiter=true,:ImageVolume=true,:IngressControllerLBSubnetsAWS=true,:KMSv1=true,:MachineConfigNodes=true,:ManagedBootImages=true,:ManagedBootImagesAWS=true,:MetricsCollectionProfiles=true,:NetworkDiagnosticsConfig=true,:NetworkLiveMigration=true,:NetworkSegmentation=true,:NewOLM=true,:PinnedImages=true,:ProcMountType=true,:RouteAdvertisements=true,:RouteExternalCertificate=true,:ServiceAccountTokenNodeBinding=true,:SetEIPForNLBIngressController=true,:SigstoreImageVerification=true,:StoragePerformantSecurityPolicy=true,:UpgradeStatus=true,:UserNamespacesPodSecurityStandards=true,:UserNamespacesSupport=true,:VSphereMultiDisk=true,:VSphereMultiNetworks=true,:AWSClusterHostedDNS=false,:AWSClusterHostedDNSInstall=false,:AWSDedicatedHosts=false,:AWSServiceLBNetworkSecurityGroup=false,:AutomatedEtcdBackup=false,:AzureClusterHostedDNSInstall=false,:AzureDedicatedHosts=false,:AzureMultiDisk=false,:BootImageSkewEnforcement=false,:BootcNodeManagement=false,:ClusterAPIInstall=false,:ClusterAPIInstallIBMCloud=false,:ClusterMonitoringConfig=false,:ClusterVersionOperatorConfiguration=false,:DNSNameResolver=false,:DualReplica=false,:DyanmicServiceEndpointIBMCloud=false,:DynamicResourceAllocation=false,:EtcdBackendQuota=false,:EventedPLEG=false,:Example=false,:Example2=false,:ExternalOIDC=false,:ExternalOIDCWithUIDAndExtraClaimMappings=false,:ExternalSnapshotMetadata=false,:GCPClusterHostedDNS=false,:GCPClusterHostedDNSInstall=false,:GCPCustomAPIEndpoints=false,:GCPCustomAPIEndpointsInstall=false,:ImageModeStatusReporting=false,:ImageStreamImportMode=false,:IngressControllerDynamicConfigurationManager=false,:InsightsConfig=false,:InsightsConfigAPI=false,:InsightsOnDemandDataGather=false,:IrreconcilableMachineConfig=false,:KMSEncryptionProvider=false,:MachineAPIMigration=false,:MachineAPIOperatorDisableMachineHealthCheckController=false,:ManagedBootImagesAzure=false,:ManagedBootImagesvSphere=false,:MaxUnavailableStatefulSet=false,:MinimumKubeletVersion=false,:MixedCPUsAllocation=false,:MultiArchInstallAzure=false,:MultiDiskSetup=false,:MutatingAdmissionPolicy=false,:NewOLMCatalogdAPIV1Metas=false,:NewOLMOwnSingleNamespace=false,:NewOLMPreflightPermissionChecks=false,:NewOLMWebhookProviderOpenshiftServiceCA=false,:NoRegistryClusterOperations=false,:NodeSwap=false,:NutanixMultiSubnets=false,:OVNObservability=false,:OpenShiftPodSecurityAdmission=false,:PreconfiguredUDNAddresses=false,:SELinuxMount=false,:ShortCertRotation=false,:SignatureStores=false,:SigstoreImageVerificationPKI=false,:TranslateStreamCloseWebsocketRequests=false,:VSphereConfigurableMaxAllowedBlockVolumesPerNode=false,:VSphereHostVMGroupZonal=false,:VSphereMixedNodeEnv=false,:VolumeAttributesClass=false,:VolumeGroupSnapshot=false" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930198 12 flags.go:64] FLAG: --goaway-chance="0" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930208 12 flags.go:64] FLAG: --help="false" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930213 12 flags.go:64] FLAG: --http2-max-streams-per-connection="2000" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930222 12 flags.go:64] FLAG: --kubelet-certificate-authority="/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930241 12 flags.go:64] FLAG: --kubelet-client-certificate="/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930246 12 flags.go:64] FLAG: --kubelet-client-key="/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930251 12 flags.go:64] FLAG: --kubelet-port="10250" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930256 12 flags.go:64] FLAG: --kubelet-preferred-address-types="[InternalIP]" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930262 12 flags.go:64] FLAG: --kubelet-read-only-port="0" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930266 12 flags.go:64] FLAG: --kubelet-timeout="5s" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930271 12 flags.go:64] FLAG: --kubernetes-service-node-port="0" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930276 12 flags.go:64] FLAG: --lease-reuse-duration-seconds="60" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930283 12 flags.go:64] FLAG: --livez-grace-period="0s" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930291 12 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930295 12 flags.go:64] FLAG: --log-json-info-buffer-size="0" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930300 12 flags.go:64] FLAG: --log-json-split-stream="false" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930303 12 flags.go:64] FLAG: --log-text-info-buffer-size="0" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930307 12 flags.go:64] FLAG: --log-text-split-stream="false" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930312 12 flags.go:64] FLAG: --logging-format="text" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930316 12 flags.go:64] FLAG: --max-connection-bytes-per-sec="0" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930323 12 flags.go:64] FLAG: --max-mutating-requests-inflight="1000" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930327 12 flags.go:64] FLAG: --max-requests-inflight="3000" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930332 12 flags.go:64] FLAG: --min-request-timeout="3600" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930335 12 flags.go:64] FLAG: --oidc-ca-file="" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930340 12 flags.go:64] FLAG: --oidc-client-id="" 2025-12-08T17:46:03.930370849+00:00 stderr F I1208 17:46:03.930344 12 flags.go:64] FLAG: --oidc-groups-claim="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930347 12 flags.go:64] FLAG: --oidc-groups-prefix="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930352 12 flags.go:64] FLAG: --oidc-issuer-url="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930356 12 flags.go:64] FLAG: --oidc-required-claim="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930363 12 flags.go:64] FLAG: --oidc-signing-algs="[RS256]" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930369 12 flags.go:64] FLAG: --oidc-username-claim="sub" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930373 12 flags.go:64] FLAG: --oidc-username-prefix="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930376 12 flags.go:64] FLAG: --openshift-config="/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930381 12 flags.go:64] FLAG: --peer-advertise-ip="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930385 12 flags.go:64] FLAG: --peer-advertise-port="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930389 12 flags.go:64] FLAG: --peer-ca-file="" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930393 12 flags.go:64] FLAG: --permit-address-sharing="true" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930398 12 flags.go:64] FLAG: --permit-port-sharing="false" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930405 12 flags.go:64] FLAG: --profiling="true" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930409 12 flags.go:64] FLAG: --proxy-client-cert-file="/etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.crt" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930414 12 flags.go:64] FLAG: --proxy-client-key-file="/etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.key" 2025-12-08T17:46:03.930442281+00:00 stderr F I1208 17:46:03.930418 12 flags.go:64] FLAG: --request-timeout="1m0s" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930423 12 flags.go:64] FLAG: --requestheader-allowed-names="[kube-apiserver-proxy,system:kube-apiserver-proxy,system:openshift-aggregator]" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930430 12 flags.go:64] FLAG: --requestheader-client-ca-file="/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930435 12 flags.go:64] FLAG: --requestheader-extra-headers-prefix="[X-Remote-Extra-]" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930441 12 flags.go:64] FLAG: --requestheader-group-headers="[X-Remote-Group]" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930449 12 flags.go:64] FLAG: --requestheader-uid-headers="[]" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930454 12 flags.go:64] FLAG: --requestheader-username-headers="[X-Remote-User]" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930459 12 flags.go:64] FLAG: --runtime-config="" 2025-12-08T17:46:03.930488352+00:00 stderr F I1208 17:46:03.930464 12 flags.go:64] FLAG: --runtime-config-emulation-forward-compatible="false" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930468 12 flags.go:64] FLAG: --secure-port="6443" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930474 12 flags.go:64] FLAG: --send-retry-after-while-not-ready-once="true" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930477 12 flags.go:64] FLAG: --service-account-extend-token-expiration="true" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930482 12 flags.go:64] FLAG: --service-account-issuer="[https://kubernetes.default.svc]" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930488 12 flags.go:64] FLAG: --service-account-jwks-uri="https://api.crc.testing:6443/openid/v1/jwks" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930496 12 flags.go:64] FLAG: --service-account-key-file="[/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs/service-account-001.pub,/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs/service-account-002.pub,/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs/service-account-001.pub]" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930506 12 flags.go:64] FLAG: --service-account-lookup="true" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930512 12 flags.go:64] FLAG: --service-account-max-token-expiration="0s" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930516 12 flags.go:64] FLAG: --service-account-signing-endpoint="" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930520 12 flags.go:64] FLAG: --service-account-signing-key-file="/etc/kubernetes/static-pod-certs/secrets/bound-service-account-signing-key/service-account.key" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930525 12 flags.go:64] FLAG: --service-cluster-ip-range="10.217.4.0/23" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930530 12 flags.go:64] FLAG: --service-node-port-range="30000-32767" 2025-12-08T17:46:03.930553594+00:00 stderr F I1208 17:46:03.930536 12 flags.go:64] FLAG: --show-hidden-metrics-for-version="" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930544 12 flags.go:64] FLAG: --shutdown-delay-duration="0s" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930548 12 flags.go:64] FLAG: --shutdown-send-retry-after="true" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930552 12 flags.go:64] FLAG: --shutdown-watch-termination-grace-period="0s" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930556 12 flags.go:64] FLAG: --storage-backend="etcd3" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930561 12 flags.go:64] FLAG: --storage-initialization-timeout="1m0s" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930565 12 flags.go:64] FLAG: --storage-media-type="application/vnd.kubernetes.protobuf" 2025-12-08T17:46:03.930585035+00:00 stderr F I1208 17:46:03.930569 12 flags.go:64] FLAG: --strict-transport-security-directives="[max-age=31536000,includeSubDomains,preload]" 2025-12-08T17:46:03.930617516+00:00 stderr F I1208 17:46:03.930576 12 flags.go:64] FLAG: --tls-cert-file="/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" 2025-12-08T17:46:03.930617516+00:00 stderr F I1208 17:46:03.930584 12 flags.go:64] FLAG: --tls-cipher-suites="[TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256]" 2025-12-08T17:46:03.930617516+00:00 stderr F I1208 17:46:03.930594 12 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:46:03.930617516+00:00 stderr F I1208 17:46:03.930599 12 flags.go:64] FLAG: --tls-private-key-file="/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930604 12 flags.go:64] FLAG: --tls-sni-cert-key="[/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.crt,/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.key;/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt,/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key;/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.crt,/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.key;/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.crt,/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.key;/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.crt,/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.key]" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930627 12 flags.go:64] FLAG: --token-auth-file="" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930631 12 flags.go:64] FLAG: --tracing-config-file="" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930635 12 flags.go:64] FLAG: --v="2" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930639 12 flags.go:64] FLAG: --version="false" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930643 12 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930651 12 flags.go:64] FLAG: --watch-cache="true" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930655 12 flags.go:64] FLAG: --watch-cache-sizes="[]" 2025-12-08T17:46:03.931257456+00:00 stderr F I1208 17:46:03.930697 12 options.go:249] external host was not specified, using 192.168.126.11 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931833 12 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931854 12 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931861 12 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931866 12 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931871 12 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931900 12 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931904 12 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931907 12 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931911 12 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931915 12 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931919 12 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931924 12 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931928 12 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931932 12 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931936 12 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931940 12 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931944 12 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931949 12 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931953 12 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931959 12 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931963 12 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931967 12 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:46:03.931981517+00:00 stderr F W1208 17:46:03.931970 12 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931974 12 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931978 12 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931981 12 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931984 12 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931987 12 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931991 12 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.931996 12 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932001 12 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932005 12 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932009 12 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932013 12 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932017 12 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932020 12 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932023 12 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932026 12 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932030 12 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932033 12 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:46:03.932045869+00:00 stderr F W1208 17:46:03.932036 12 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932039 12 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932043 12 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932046 12 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932049 12 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932052 12 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932056 12 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932059 12 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932062 12 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932065 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932069 12 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:46:03.932086731+00:00 stderr F W1208 17:46:03.932073 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932078 12 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932082 12 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932086 12 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932090 12 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932095 12 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932098 12 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:46:03.932111931+00:00 stderr F W1208 17:46:03.932101 12 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:46:03.932131892+00:00 stderr F W1208 17:46:03.932107 12 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:46:03.932131892+00:00 stderr F W1208 17:46:03.932110 12 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:46:03.932131892+00:00 stderr F W1208 17:46:03.932113 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:46:03.932131892+00:00 stderr F W1208 17:46:03.932116 12 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:46:03.932131892+00:00 stderr F W1208 17:46:03.932120 12 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:46:03.932155983+00:00 stderr F W1208 17:46:03.932123 12 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:46:03.932155983+00:00 stderr F W1208 17:46:03.932126 12 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932129 12 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932132 12 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932136 12 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932140 12 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932143 12 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932148 12 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932152 12 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932156 12 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932159 12 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:46:03.932172643+00:00 stderr F W1208 17:46:03.932163 12 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:46:03.932194664+00:00 stderr F W1208 17:46:03.932166 12 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:46:03.932194664+00:00 stderr F W1208 17:46:03.932170 12 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:46:03.932194664+00:00 stderr F W1208 17:46:03.932173 12 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:46:03.932194664+00:00 stderr F W1208 17:46:03.932176 12 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:46:03.932194664+00:00 stderr F W1208 17:46:03.932179 12 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:46:03.932194664+00:00 stderr F W1208 17:46:03.932182 12 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:46:03.932213974+00:00 stderr F W1208 17:46:03.932186 12 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:46:03.932213974+00:00 stderr F W1208 17:46:03.932192 12 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:46:03.932213974+00:00 stderr F W1208 17:46:03.932197 12 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:46:03.932213974+00:00 stderr F W1208 17:46:03.932204 12 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:46:03.934315248+00:00 stderr F I1208 17:46:03.934187 12 server.go:184] Version: v1.33.5 2025-12-08T17:46:03.934315248+00:00 stderr F I1208 17:46:03.934222 12 server.go:186] "Golang settings" GOGC="100" GOMAXPROCS="" GOTRACEBACK="" 2025-12-08T17:46:03.935305407+00:00 stderr F I1208 17:46:03.935179 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" 2025-12-08T17:46:03.935596566+00:00 stderr F I1208 17:46:03.935498 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.key" 2025-12-08T17:46:03.936097591+00:00 stderr F I1208 17:46:03.935954 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" 2025-12-08T17:46:03.936607137+00:00 stderr F I1208 17:46:03.936495 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.key" 2025-12-08T17:46:03.937067250+00:00 stderr F I1208 17:46:03.936955 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.key" 2025-12-08T17:46:03.937500803+00:00 stderr F I1208 17:46:03.937407 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="sni-serving-cert::/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.crt::/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.key" 2025-12-08T17:46:04.244860729+00:00 stderr F I1208 17:46:04.244711 12 apf_controller.go:292] NewTestableController "Controller" with serverConcurrencyLimit=4000, name=Controller, asFieldManager="api-priority-and-fairness-config-consumer-v1" 2025-12-08T17:46:04.245011774+00:00 stderr F I1208 17:46:04.244838 12 apf_controller.go:898] Introducing queues for priority level "exempt": config={"type":"Exempt","exempt":{"nominalConcurrencyShares":0,"lendablePercent":0}}, nominalCL=0, lendableCL=0, borrowingCL=4000, currentCL=0, quiescing=false (shares=0xc00034dd90, shareSum=5) 2025-12-08T17:46:04.245011774+00:00 stderr F I1208 17:46:04.244951 12 apf_controller.go:898] Introducing queues for priority level "catch-all": config={"type":"Limited","limited":{"nominalConcurrencyShares":5,"limitResponse":{"type":"Reject"},"lendablePercent":0}}, nominalCL=4000, lendableCL=0, borrowingCL=4000, currentCL=4000, quiescing=false (shares=0xc000402554, shareSum=5) 2025-12-08T17:46:04.261071975+00:00 stderr F I1208 17:46:04.260948 12 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:04.263373004+00:00 stderr F I1208 17:46:04.263281 12 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:04.272596922+00:00 stderr F I1208 17:46:04.272497 12 shared_informer.go:350] "Waiting for caches to sync" controller="node_authorizer" 2025-12-08T17:46:04.273176499+00:00 stderr F I1208 17:46:04.273094 12 audit.go:340] Using audit backend: ignoreErrors 2025-12-08T17:46:04.290209390+00:00 stderr F I1208 17:46:04.290125 12 store.go:1663] "Monitoring resource count at path" resource="events" path="//events" 2025-12-08T17:46:04.292274282+00:00 stderr F I1208 17:46:04.292163 12 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:04.292436867+00:00 stderr F I1208 17:46:04.292356 12 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:04.298723446+00:00 stderr F I1208 17:46:04.298626 12 admission.go:47] Admission plugin "autoscaling.openshift.io/ClusterResourceOverride" is not configured so it will be disabled. 2025-12-08T17:46:04.298800908+00:00 stderr F I1208 17:46:04.298753 12 admission.go:33] Admission plugin "autoscaling.openshift.io/RunOnceDuration" is not configured so it will be disabled. 2025-12-08T17:46:04.298800908+00:00 stderr F I1208 17:46:04.298768 12 admission.go:32] Admission plugin "scheduling.openshift.io/PodNodeConstraints" is not configured so it will be disabled. 2025-12-08T17:46:04.309169699+00:00 stderr F I1208 17:46:04.309081 12 shared_informer.go:350] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" 2025-12-08T17:46:04.314011654+00:00 stderr F I1208 17:46:04.313917 12 plugins.go:157] Loaded 25 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,PodNodeSelector,Priority,DefaultTolerationSeconds,PodTolerationRestriction,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,autoscaling.openshift.io/ManagementCPUsOverride,scheduling.openshift.io/OriginPodNodeEnvironment,image.openshift.io/ImagePolicy,security.openshift.io/SecurityContextConstraint,route.openshift.io/RouteHostAssignment,autoscaling.openshift.io/MixedCPUs,storage.openshift.io/PerformantSecurityPolicy,route.openshift.io/DefaultRoute,security.openshift.io/DefaultSecurityContextConstraints,MutatingAdmissionWebhook. 2025-12-08T17:46:04.314011654+00:00 stderr F I1208 17:46:04.313942 12 plugins.go:160] Loaded 47 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,PodNodeSelector,Priority,PodTolerationRestriction,OwnerReferencesPermissionEnforcement,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,autoscaling.openshift.io/ManagementCPUsOverride,authorization.openshift.io/RestrictSubjectBindings,scheduling.openshift.io/OriginPodNodeEnvironment,network.openshift.io/ExternalIPRanger,network.openshift.io/RestrictedEndpointsAdmission,image.openshift.io/ImagePolicy,security.openshift.io/SecurityContextConstraint,security.openshift.io/SCCExecRestrictions,route.openshift.io/IngressAdmission,storage.openshift.io/CSIInlineVolumeSecurity,autoscaling.openshift.io/ManagedNode,config.openshift.io/ValidateAPIServer,config.openshift.io/ValidateAuthentication,config.openshift.io/ValidateFeatureGate,config.openshift.io/ValidateConsole,operator.openshift.io/ValidateDNS,config.openshift.io/ValidateImage,config.openshift.io/ValidateOAuth,config.openshift.io/ValidateProject,config.openshift.io/DenyDeleteClusterConfiguration,operator.openshift.io/DenyDeleteClusterOperators,config.openshift.io/ValidateScheduler,quota.openshift.io/ValidateClusterResourceQuota,security.openshift.io/ValidateSecurityContextConstraints,authorization.openshift.io/ValidateRoleBindingRestriction,config.openshift.io/ValidateNetwork,config.openshift.io/ValidateAPIRequestCount,config.openshift.io/ValidateConfigNodeV1,route.openshift.io/ValidateRoute,operator.openshift.io/ValidateKubeControllerManager,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota,quota.openshift.io/ClusterResourceQuota. 2025-12-08T17:46:04.314277392+00:00 stderr F I1208 17:46:04.314203 12 instance.go:233] Using reconciler: lease 2025-12-08T17:46:04.329605833+00:00 stderr F I1208 17:46:04.328870 12 store.go:1663] "Monitoring resource count at path" resource="customresourcedefinitions.apiextensions.k8s.io" path="//apiextensions.k8s.io/customresourcedefinitions" 2025-12-08T17:46:04.330369205+00:00 stderr F I1208 17:46:04.330261 12 handler.go:288] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.330369205+00:00 stderr F W1208 17:46:04.330287 12 genericapiserver.go:810] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.340795208+00:00 stderr F I1208 17:46:04.340667 12 cidrallocator.go:197] starting ServiceCIDR Allocator Controller 2025-12-08T17:46:04.346811999+00:00 stderr F I1208 17:46:04.346684 12 deleted_kinds.go:96] NewResourceExpirationEvaluator with currentVersion: 1.33. 2025-12-08T17:46:04.354089188+00:00 stderr F I1208 17:46:04.353963 12 store.go:1663] "Monitoring resource count at path" resource="events" path="//events" 2025-12-08T17:46:04.363034386+00:00 stderr F I1208 17:46:04.362145 12 store.go:1663] "Monitoring resource count at path" resource="resourcequotas" path="//resourcequotas" 2025-12-08T17:46:04.363420547+00:00 stderr F I1208 17:46:04.363322 12 cacher.go:469] cacher (resourcequotas): initialized 2025-12-08T17:46:04.363420547+00:00 stderr F I1208 17:46:04.363352 12 reflector.go:430] "Caches populated" type="*core.ResourceQuota" reflector="storage/cacher.go:/resourcequotas" 2025-12-08T17:46:04.369262523+00:00 stderr F I1208 17:46:04.369109 12 store.go:1663] "Monitoring resource count at path" resource="secrets" path="//secrets" 2025-12-08T17:46:04.375826539+00:00 stderr F I1208 17:46:04.375726 12 store.go:1663] "Monitoring resource count at path" resource="configmaps" path="//configmaps" 2025-12-08T17:46:04.391962864+00:00 stderr F I1208 17:46:04.390200 12 store.go:1663] "Monitoring resource count at path" resource="namespaces" path="//namespaces" 2025-12-08T17:46:04.394026636+00:00 stderr F I1208 17:46:04.393916 12 cacher.go:469] cacher (namespaces): initialized 2025-12-08T17:46:04.394026636+00:00 stderr F I1208 17:46:04.393957 12 reflector.go:430] "Caches populated" type="*core.Namespace" reflector="storage/cacher.go:/namespaces" 2025-12-08T17:46:04.399392157+00:00 stderr F I1208 17:46:04.398971 12 store.go:1663] "Monitoring resource count at path" resource="serviceaccounts" path="//serviceaccounts" 2025-12-08T17:46:04.406464819+00:00 stderr F I1208 17:46:04.405313 12 store.go:1663] "Monitoring resource count at path" resource="podtemplates" path="//podtemplates" 2025-12-08T17:46:04.407972345+00:00 stderr F I1208 17:46:04.407655 12 cacher.go:469] cacher (podtemplates): initialized 2025-12-08T17:46:04.407972345+00:00 stderr F I1208 17:46:04.407681 12 reflector.go:430] "Caches populated" type="*core.PodTemplate" reflector="storage/cacher.go:/podtemplates" 2025-12-08T17:46:04.412665886+00:00 stderr F I1208 17:46:04.412609 12 store.go:1663] "Monitoring resource count at path" resource="limitranges" path="//limitranges" 2025-12-08T17:46:04.413856381+00:00 stderr F I1208 17:46:04.413802 12 cacher.go:469] cacher (serviceaccounts): initialized 2025-12-08T17:46:04.413957244+00:00 stderr F I1208 17:46:04.413914 12 reflector.go:430] "Caches populated" type="*core.ServiceAccount" reflector="storage/cacher.go:/serviceaccounts" 2025-12-08T17:46:04.418772139+00:00 stderr F I1208 17:46:04.418704 12 cacher.go:469] cacher (limitranges): initialized 2025-12-08T17:46:04.418772139+00:00 stderr F I1208 17:46:04.418750 12 reflector.go:430] "Caches populated" type="*core.LimitRange" reflector="storage/cacher.go:/limitranges" 2025-12-08T17:46:04.422625034+00:00 stderr F I1208 17:46:04.420569 12 store.go:1663] "Monitoring resource count at path" resource="persistentvolumes" path="//persistentvolumes" 2025-12-08T17:46:04.424616044+00:00 stderr F I1208 17:46:04.424555 12 cacher.go:469] cacher (persistentvolumes): initialized 2025-12-08T17:46:04.424616044+00:00 stderr F I1208 17:46:04.424591 12 reflector.go:430] "Caches populated" type="*core.PersistentVolume" reflector="storage/cacher.go:/persistentvolumes" 2025-12-08T17:46:04.426391917+00:00 stderr F I1208 17:46:04.426320 12 cacher.go:469] cacher (secrets): initialized 2025-12-08T17:46:04.426391917+00:00 stderr F I1208 17:46:04.426370 12 reflector.go:430] "Caches populated" type="*core.Secret" reflector="storage/cacher.go:/secrets" 2025-12-08T17:46:04.431384187+00:00 stderr F I1208 17:46:04.431325 12 store.go:1663] "Monitoring resource count at path" resource="persistentvolumeclaims" path="//persistentvolumeclaims" 2025-12-08T17:46:04.434088368+00:00 stderr F I1208 17:46:04.434035 12 cacher.go:469] cacher (persistentvolumeclaims): initialized 2025-12-08T17:46:04.434138490+00:00 stderr F I1208 17:46:04.434079 12 reflector.go:430] "Caches populated" type="*core.PersistentVolumeClaim" reflector="storage/cacher.go:/persistentvolumeclaims" 2025-12-08T17:46:04.439530392+00:00 stderr F I1208 17:46:04.439457 12 store.go:1663] "Monitoring resource count at path" resource="endpoints" path="//services/endpoints" 2025-12-08T17:46:04.441446880+00:00 stderr F I1208 17:46:04.441356 12 cacher.go:469] cacher (configmaps): initialized 2025-12-08T17:46:04.441478691+00:00 stderr F I1208 17:46:04.441426 12 reflector.go:430] "Caches populated" type="*core.ConfigMap" reflector="storage/cacher.go:/configmaps" 2025-12-08T17:46:04.445024227+00:00 stderr F I1208 17:46:04.444962 12 cacher.go:469] cacher (endpoints): initialized 2025-12-08T17:46:04.445069258+00:00 stderr F I1208 17:46:04.445003 12 reflector.go:430] "Caches populated" type="*core.Endpoints" reflector="storage/cacher.go:/services/endpoints" 2025-12-08T17:46:04.448832071+00:00 stderr F I1208 17:46:04.448785 12 store.go:1663] "Monitoring resource count at path" resource="nodes" path="//minions" 2025-12-08T17:46:04.450819401+00:00 stderr F I1208 17:46:04.450770 12 cacher.go:469] cacher (nodes): initialized 2025-12-08T17:46:04.450856432+00:00 stderr F I1208 17:46:04.450826 12 reflector.go:430] "Caches populated" type="*core.Node" reflector="storage/cacher.go:/minions" 2025-12-08T17:46:04.464197432+00:00 stderr F I1208 17:46:04.464147 12 store.go:1663] "Monitoring resource count at path" resource="pods" path="//pods" 2025-12-08T17:46:04.475800350+00:00 stderr F I1208 17:46:04.475714 12 store.go:1663] "Monitoring resource count at path" resource="services" path="//services/specs" 2025-12-08T17:46:04.481024347+00:00 stderr F I1208 17:46:04.480930 12 cacher.go:469] cacher (pods): initialized 2025-12-08T17:46:04.481024347+00:00 stderr F I1208 17:46:04.480977 12 reflector.go:430] "Caches populated" type="*core.Pod" reflector="storage/cacher.go:/pods" 2025-12-08T17:46:04.482517652+00:00 stderr F I1208 17:46:04.482457 12 cacher.go:469] cacher (services): initialized 2025-12-08T17:46:04.482517652+00:00 stderr F I1208 17:46:04.482490 12 reflector.go:430] "Caches populated" type="*core.Service" reflector="storage/cacher.go:/services/specs" 2025-12-08T17:46:04.484329106+00:00 stderr F I1208 17:46:04.484261 12 store.go:1663] "Monitoring resource count at path" resource="serviceaccounts" path="//serviceaccounts" 2025-12-08T17:46:04.489053339+00:00 stderr F I1208 17:46:04.488987 12 cacher.go:469] cacher (serviceaccounts): initialized 2025-12-08T17:46:04.489053339+00:00 stderr F I1208 17:46:04.489014 12 reflector.go:430] "Caches populated" type="*core.ServiceAccount" reflector="storage/cacher.go:/serviceaccounts" 2025-12-08T17:46:04.491907884+00:00 stderr F I1208 17:46:04.491841 12 store.go:1663] "Monitoring resource count at path" resource="replicationcontrollers" path="//controllers" 2025-12-08T17:46:04.492813811+00:00 stderr F I1208 17:46:04.492769 12 cacher.go:469] cacher (replicationcontrollers): initialized 2025-12-08T17:46:04.492857172+00:00 stderr F I1208 17:46:04.492807 12 reflector.go:430] "Caches populated" type="*core.ReplicationController" reflector="storage/cacher.go:/controllers" 2025-12-08T17:46:04.493029368+00:00 stderr F I1208 17:46:04.492979 12 apis.go:128] Enabling API group "". 2025-12-08T17:46:04.507260465+00:00 stderr F I1208 17:46:04.507151 12 handler.go:288] Adding GroupVersion v1 to ResourceManager 2025-12-08T17:46:04.507571794+00:00 stderr F I1208 17:46:04.507452 12 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. 2025-12-08T17:46:04.507636886+00:00 stderr F I1208 17:46:04.507554 12 apis.go:128] Enabling API group "authentication.k8s.io". 2025-12-08T17:46:04.507708378+00:00 stderr F I1208 17:46:04.507670 12 apis.go:128] Enabling API group "authorization.k8s.io". 2025-12-08T17:46:04.514217934+00:00 stderr F I1208 17:46:04.514169 12 store.go:1663] "Monitoring resource count at path" resource="horizontalpodautoscalers.autoscaling" path="//horizontalpodautoscalers" 2025-12-08T17:46:04.515212303+00:00 stderr F I1208 17:46:04.515127 12 cacher.go:469] cacher (horizontalpodautoscalers.autoscaling): initialized 2025-12-08T17:46:04.515212303+00:00 stderr F I1208 17:46:04.515168 12 reflector.go:430] "Caches populated" type="*autoscaling.HorizontalPodAutoscaler" reflector="storage/cacher.go:/horizontalpodautoscalers" 2025-12-08T17:46:04.519360178+00:00 stderr F I1208 17:46:04.519306 12 store.go:1663] "Monitoring resource count at path" resource="horizontalpodautoscalers.autoscaling" path="//horizontalpodautoscalers" 2025-12-08T17:46:04.519421670+00:00 stderr F I1208 17:46:04.519398 12 apis.go:128] Enabling API group "autoscaling". 2025-12-08T17:46:04.520076430+00:00 stderr F I1208 17:46:04.520028 12 cacher.go:469] cacher (horizontalpodautoscalers.autoscaling): initialized 2025-12-08T17:46:04.520111211+00:00 stderr F I1208 17:46:04.520055 12 reflector.go:430] "Caches populated" type="*autoscaling.HorizontalPodAutoscaler" reflector="storage/cacher.go:/horizontalpodautoscalers" 2025-12-08T17:46:04.524347647+00:00 stderr F I1208 17:46:04.524310 12 store.go:1663] "Monitoring resource count at path" resource="jobs.batch" path="//jobs" 2025-12-08T17:46:04.525383389+00:00 stderr F I1208 17:46:04.525293 12 cacher.go:469] cacher (jobs.batch): initialized 2025-12-08T17:46:04.525383389+00:00 stderr F I1208 17:46:04.525325 12 reflector.go:430] "Caches populated" type="*batch.Job" reflector="storage/cacher.go:/jobs" 2025-12-08T17:46:04.529727349+00:00 stderr F I1208 17:46:04.529689 12 store.go:1663] "Monitoring resource count at path" resource="cronjobs.batch" path="//cronjobs" 2025-12-08T17:46:04.529859233+00:00 stderr F I1208 17:46:04.529826 12 apis.go:128] Enabling API group "batch". 2025-12-08T17:46:04.531442901+00:00 stderr F I1208 17:46:04.531392 12 cacher.go:469] cacher (cronjobs.batch): initialized 2025-12-08T17:46:04.531442901+00:00 stderr F I1208 17:46:04.531413 12 reflector.go:430] "Caches populated" type="*batch.CronJob" reflector="storage/cacher.go:/cronjobs" 2025-12-08T17:46:04.537639247+00:00 stderr F I1208 17:46:04.537536 12 store.go:1663] "Monitoring resource count at path" resource="certificatesigningrequests.certificates.k8s.io" path="//certificatesigningrequests" 2025-12-08T17:46:04.537711289+00:00 stderr F I1208 17:46:04.537630 12 apis.go:128] Enabling API group "certificates.k8s.io". 2025-12-08T17:46:04.546102641+00:00 stderr F I1208 17:46:04.545103 12 cacher.go:469] cacher (certificatesigningrequests.certificates.k8s.io): initialized 2025-12-08T17:46:04.546102641+00:00 stderr F I1208 17:46:04.545148 12 reflector.go:430] "Caches populated" type="*certificates.CertificateSigningRequest" reflector="storage/cacher.go:/certificatesigningrequests" 2025-12-08T17:46:04.551420450+00:00 stderr F I1208 17:46:04.551338 12 store.go:1663] "Monitoring resource count at path" resource="leases.coordination.k8s.io" path="//leases" 2025-12-08T17:46:04.551420450+00:00 stderr F I1208 17:46:04.551391 12 apis.go:128] Enabling API group "coordination.k8s.io". 2025-12-08T17:46:04.552852074+00:00 stderr F I1208 17:46:04.552774 12 cacher.go:469] cacher (leases.coordination.k8s.io): initialized 2025-12-08T17:46:04.552852074+00:00 stderr F I1208 17:46:04.552812 12 reflector.go:430] "Caches populated" type="*coordination.Lease" reflector="storage/cacher.go:/leases" 2025-12-08T17:46:04.557139452+00:00 stderr F I1208 17:46:04.557065 12 store.go:1663] "Monitoring resource count at path" resource="endpointslices.discovery.k8s.io" path="//endpointslices" 2025-12-08T17:46:04.557139452+00:00 stderr F I1208 17:46:04.557098 12 apis.go:128] Enabling API group "discovery.k8s.io". 2025-12-08T17:46:04.560299267+00:00 stderr F I1208 17:46:04.560182 12 cacher.go:469] cacher (endpointslices.discovery.k8s.io): initialized 2025-12-08T17:46:04.560299267+00:00 stderr F I1208 17:46:04.560212 12 reflector.go:430] "Caches populated" type="*discovery.EndpointSlice" reflector="storage/cacher.go:/endpointslices" 2025-12-08T17:46:04.566399660+00:00 stderr F I1208 17:46:04.563447 12 store.go:1663] "Monitoring resource count at path" resource="networkpolicies.networking.k8s.io" path="//networkpolicies" 2025-12-08T17:46:04.566399660+00:00 stderr F I1208 17:46:04.565798 12 cacher.go:469] cacher (networkpolicies.networking.k8s.io): initialized 2025-12-08T17:46:04.566399660+00:00 stderr F I1208 17:46:04.565815 12 reflector.go:430] "Caches populated" type="*networking.NetworkPolicy" reflector="storage/cacher.go:/networkpolicies" 2025-12-08T17:46:04.569965267+00:00 stderr F I1208 17:46:04.569914 12 store.go:1663] "Monitoring resource count at path" resource="ingresses.networking.k8s.io" path="//ingress" 2025-12-08T17:46:04.572908406+00:00 stderr F I1208 17:46:04.572809 12 cacher.go:469] cacher (ingresses.networking.k8s.io): initialized 2025-12-08T17:46:04.572908406+00:00 stderr F I1208 17:46:04.572833 12 reflector.go:430] "Caches populated" type="*networking.Ingress" reflector="storage/cacher.go:/ingress" 2025-12-08T17:46:04.576753511+00:00 stderr F I1208 17:46:04.576617 12 store.go:1663] "Monitoring resource count at path" resource="ingressclasses.networking.k8s.io" path="//ingressclasses" 2025-12-08T17:46:04.577717829+00:00 stderr F I1208 17:46:04.577643 12 cacher.go:469] cacher (ingressclasses.networking.k8s.io): initialized 2025-12-08T17:46:04.577717829+00:00 stderr F I1208 17:46:04.577679 12 reflector.go:430] "Caches populated" type="*networking.IngressClass" reflector="storage/cacher.go:/ingressclasses" 2025-12-08T17:46:04.582025319+00:00 stderr F I1208 17:46:04.581633 12 store.go:1663] "Monitoring resource count at path" resource="ipaddresses.networking.k8s.io" path="//ipaddresses" 2025-12-08T17:46:04.584164403+00:00 stderr F I1208 17:46:04.584092 12 cacher.go:469] cacher (ipaddresses.networking.k8s.io): initialized 2025-12-08T17:46:04.584164403+00:00 stderr F I1208 17:46:04.584123 12 reflector.go:430] "Caches populated" type="*networking.IPAddress" reflector="storage/cacher.go:/ipaddresses" 2025-12-08T17:46:04.587517434+00:00 stderr F I1208 17:46:04.587473 12 store.go:1663] "Monitoring resource count at path" resource="servicecidrs.networking.k8s.io" path="//servicecidrs" 2025-12-08T17:46:04.587690779+00:00 stderr F I1208 17:46:04.587662 12 apis.go:128] Enabling API group "networking.k8s.io". 2025-12-08T17:46:04.589720690+00:00 stderr F I1208 17:46:04.589667 12 cacher.go:469] cacher (servicecidrs.networking.k8s.io): initialized 2025-12-08T17:46:04.589720690+00:00 stderr F I1208 17:46:04.589695 12 reflector.go:430] "Caches populated" type="*networking.ServiceCIDR" reflector="storage/cacher.go:/servicecidrs" 2025-12-08T17:46:04.594121142+00:00 stderr F I1208 17:46:04.594054 12 store.go:1663] "Monitoring resource count at path" resource="runtimeclasses.node.k8s.io" path="//runtimeclasses" 2025-12-08T17:46:04.594121142+00:00 stderr F I1208 17:46:04.594095 12 apis.go:128] Enabling API group "node.k8s.io". 2025-12-08T17:46:04.594929917+00:00 stderr F I1208 17:46:04.594858 12 cacher.go:469] cacher (runtimeclasses.node.k8s.io): initialized 2025-12-08T17:46:04.594929917+00:00 stderr F I1208 17:46:04.594894 12 reflector.go:430] "Caches populated" type="*node.RuntimeClass" reflector="storage/cacher.go:/runtimeclasses" 2025-12-08T17:46:04.600325519+00:00 stderr F I1208 17:46:04.600249 12 store.go:1663] "Monitoring resource count at path" resource="poddisruptionbudgets.policy" path="//poddisruptionbudgets" 2025-12-08T17:46:04.600364120+00:00 stderr F I1208 17:46:04.600311 12 apis.go:128] Enabling API group "policy". 2025-12-08T17:46:04.602702939+00:00 stderr F I1208 17:46:04.602635 12 cacher.go:469] cacher (poddisruptionbudgets.policy): initialized 2025-12-08T17:46:04.602702939+00:00 stderr F I1208 17:46:04.602671 12 reflector.go:430] "Caches populated" type="*policy.PodDisruptionBudget" reflector="storage/cacher.go:/poddisruptionbudgets" 2025-12-08T17:46:04.603401341+00:00 stderr F I1208 17:46:04.603344 12 cacher.go:469] cacher (customresourcedefinitions.apiextensions.k8s.io): initialized 2025-12-08T17:46:04.603401341+00:00 stderr F I1208 17:46:04.603362 12 reflector.go:430] "Caches populated" type="*apiextensions.CustomResourceDefinition" reflector="storage/cacher.go:/apiextensions.k8s.io/customresourcedefinitions" 2025-12-08T17:46:04.606251056+00:00 stderr F I1208 17:46:04.606187 12 store.go:1663] "Monitoring resource count at path" resource="roles.rbac.authorization.k8s.io" path="//roles" 2025-12-08T17:46:04.609981468+00:00 stderr F I1208 17:46:04.609773 12 cacher.go:469] cacher (roles.rbac.authorization.k8s.io): initialized 2025-12-08T17:46:04.610090531+00:00 stderr F I1208 17:46:04.610065 12 reflector.go:430] "Caches populated" type="*rbac.Role" reflector="storage/cacher.go:/roles" 2025-12-08T17:46:04.613230215+00:00 stderr F I1208 17:46:04.613167 12 store.go:1663] "Monitoring resource count at path" resource="rolebindings.rbac.authorization.k8s.io" path="//rolebindings" 2025-12-08T17:46:04.619047850+00:00 stderr F I1208 17:46:04.618983 12 store.go:1663] "Monitoring resource count at path" resource="clusterroles.rbac.authorization.k8s.io" path="//clusterroles" 2025-12-08T17:46:04.620300248+00:00 stderr F I1208 17:46:04.620228 12 cacher.go:469] cacher (rolebindings.rbac.authorization.k8s.io): initialized 2025-12-08T17:46:04.620338009+00:00 stderr F I1208 17:46:04.620308 12 reflector.go:430] "Caches populated" type="*rbac.RoleBinding" reflector="storage/cacher.go:/rolebindings" 2025-12-08T17:46:04.624644258+00:00 stderr F I1208 17:46:04.624582 12 store.go:1663] "Monitoring resource count at path" resource="clusterrolebindings.rbac.authorization.k8s.io" path="//clusterrolebindings" 2025-12-08T17:46:04.624710420+00:00 stderr F I1208 17:46:04.624682 12 apis.go:128] Enabling API group "rbac.authorization.k8s.io". 2025-12-08T17:46:04.627971968+00:00 stderr F I1208 17:46:04.626996 12 cacher.go:469] cacher (clusterroles.rbac.authorization.k8s.io): initialized 2025-12-08T17:46:04.627971968+00:00 stderr F I1208 17:46:04.627030 12 reflector.go:430] "Caches populated" type="*rbac.ClusterRole" reflector="storage/cacher.go:/clusterroles" 2025-12-08T17:46:04.629066711+00:00 stderr F I1208 17:46:04.628502 12 cacher.go:469] cacher (clusterrolebindings.rbac.authorization.k8s.io): initialized 2025-12-08T17:46:04.629066711+00:00 stderr F I1208 17:46:04.628523 12 reflector.go:430] "Caches populated" type="*rbac.ClusterRoleBinding" reflector="storage/cacher.go:/clusterrolebindings" 2025-12-08T17:46:04.632479943+00:00 stderr F I1208 17:46:04.632413 12 store.go:1663] "Monitoring resource count at path" resource="priorityclasses.scheduling.k8s.io" path="//priorityclasses" 2025-12-08T17:46:04.632500424+00:00 stderr F I1208 17:46:04.632465 12 apis.go:128] Enabling API group "scheduling.k8s.io". 2025-12-08T17:46:04.633438532+00:00 stderr F I1208 17:46:04.633319 12 cacher.go:469] cacher (priorityclasses.scheduling.k8s.io): initialized 2025-12-08T17:46:04.633438532+00:00 stderr F I1208 17:46:04.633352 12 reflector.go:430] "Caches populated" type="*scheduling.PriorityClass" reflector="storage/cacher.go:/priorityclasses" 2025-12-08T17:46:04.639920117+00:00 stderr F I1208 17:46:04.637963 12 store.go:1663] "Monitoring resource count at path" resource="storageclasses.storage.k8s.io" path="//storageclasses" 2025-12-08T17:46:04.639920117+00:00 stderr F I1208 17:46:04.639281 12 cacher.go:469] cacher (storageclasses.storage.k8s.io): initialized 2025-12-08T17:46:04.639920117+00:00 stderr F I1208 17:46:04.639311 12 reflector.go:430] "Caches populated" type="*storage.StorageClass" reflector="storage/cacher.go:/storageclasses" 2025-12-08T17:46:04.643837164+00:00 stderr F I1208 17:46:04.643763 12 store.go:1663] "Monitoring resource count at path" resource="volumeattachments.storage.k8s.io" path="//volumeattachments" 2025-12-08T17:46:04.644670929+00:00 stderr F I1208 17:46:04.644614 12 cacher.go:469] cacher (volumeattachments.storage.k8s.io): initialized 2025-12-08T17:46:04.644690340+00:00 stderr F I1208 17:46:04.644665 12 reflector.go:430] "Caches populated" type="*storage.VolumeAttachment" reflector="storage/cacher.go:/volumeattachments" 2025-12-08T17:46:04.650264787+00:00 stderr F I1208 17:46:04.650174 12 store.go:1663] "Monitoring resource count at path" resource="csinodes.storage.k8s.io" path="//csinodes" 2025-12-08T17:46:04.651209156+00:00 stderr F I1208 17:46:04.651132 12 cacher.go:469] cacher (csinodes.storage.k8s.io): initialized 2025-12-08T17:46:04.651209156+00:00 stderr F I1208 17:46:04.651178 12 reflector.go:430] "Caches populated" type="*storage.CSINode" reflector="storage/cacher.go:/csinodes" 2025-12-08T17:46:04.656219946+00:00 stderr F I1208 17:46:04.655531 12 store.go:1663] "Monitoring resource count at path" resource="csidrivers.storage.k8s.io" path="//csidrivers" 2025-12-08T17:46:04.656496245+00:00 stderr F I1208 17:46:04.656426 12 cacher.go:469] cacher (csidrivers.storage.k8s.io): initialized 2025-12-08T17:46:04.656496245+00:00 stderr F I1208 17:46:04.656460 12 reflector.go:430] "Caches populated" type="*storage.CSIDriver" reflector="storage/cacher.go:/csidrivers" 2025-12-08T17:46:04.662132124+00:00 stderr F I1208 17:46:04.662061 12 store.go:1663] "Monitoring resource count at path" resource="csistoragecapacities.storage.k8s.io" path="//csistoragecapacities" 2025-12-08T17:46:04.662254857+00:00 stderr F I1208 17:46:04.662206 12 apis.go:128] Enabling API group "storage.k8s.io". 2025-12-08T17:46:04.662297889+00:00 stderr F I1208 17:46:04.662265 12 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. 2025-12-08T17:46:04.663139164+00:00 stderr F I1208 17:46:04.662937 12 cacher.go:469] cacher (csistoragecapacities.storage.k8s.io): initialized 2025-12-08T17:46:04.663139164+00:00 stderr F I1208 17:46:04.662968 12 reflector.go:430] "Caches populated" type="*storage.CSIStorageCapacity" reflector="storage/cacher.go:/csistoragecapacities" 2025-12-08T17:46:04.669078652+00:00 stderr F I1208 17:46:04.668998 12 store.go:1663] "Monitoring resource count at path" resource="flowschemas.flowcontrol.apiserver.k8s.io" path="//flowschemas" 2025-12-08T17:46:04.671716381+00:00 stderr F I1208 17:46:04.670604 12 cacher.go:469] cacher (flowschemas.flowcontrol.apiserver.k8s.io): initialized 2025-12-08T17:46:04.671716381+00:00 stderr F I1208 17:46:04.670697 12 reflector.go:430] "Caches populated" type="*flowcontrol.FlowSchema" reflector="storage/cacher.go:/flowschemas" 2025-12-08T17:46:04.677390901+00:00 stderr F I1208 17:46:04.674341 12 store.go:1663] "Monitoring resource count at path" resource="prioritylevelconfigurations.flowcontrol.apiserver.k8s.io" path="//prioritylevelconfigurations" 2025-12-08T17:46:04.677390901+00:00 stderr F I1208 17:46:04.674414 12 apis.go:128] Enabling API group "flowcontrol.apiserver.k8s.io". 2025-12-08T17:46:04.677390901+00:00 stderr F I1208 17:46:04.676079 12 cacher.go:469] cacher (prioritylevelconfigurations.flowcontrol.apiserver.k8s.io): initialized 2025-12-08T17:46:04.677390901+00:00 stderr F I1208 17:46:04.676113 12 reflector.go:430] "Caches populated" type="*flowcontrol.PriorityLevelConfiguration" reflector="storage/cacher.go:/prioritylevelconfigurations" 2025-12-08T17:46:04.682969639+00:00 stderr F I1208 17:46:04.682552 12 store.go:1663] "Monitoring resource count at path" resource="deployments.apps" path="//deployments" 2025-12-08T17:46:04.690717942+00:00 stderr F I1208 17:46:04.690611 12 store.go:1663] "Monitoring resource count at path" resource="statefulsets.apps" path="//statefulsets" 2025-12-08T17:46:04.694127734+00:00 stderr F I1208 17:46:04.693245 12 cacher.go:469] cacher (statefulsets.apps): initialized 2025-12-08T17:46:04.694127734+00:00 stderr F I1208 17:46:04.693291 12 reflector.go:430] "Caches populated" type="*apps.StatefulSet" reflector="storage/cacher.go:/statefulsets" 2025-12-08T17:46:04.696841305+00:00 stderr F I1208 17:46:04.696757 12 cacher.go:469] cacher (deployments.apps): initialized 2025-12-08T17:46:04.696859266+00:00 stderr F I1208 17:46:04.696824 12 reflector.go:430] "Caches populated" type="*apps.Deployment" reflector="storage/cacher.go:/deployments" 2025-12-08T17:46:04.700122634+00:00 stderr F I1208 17:46:04.700062 12 store.go:1663] "Monitoring resource count at path" resource="daemonsets.apps" path="//daemonsets" 2025-12-08T17:46:04.708480454+00:00 stderr F I1208 17:46:04.708307 12 cacher.go:469] cacher (daemonsets.apps): initialized 2025-12-08T17:46:04.708480454+00:00 stderr F I1208 17:46:04.708349 12 reflector.go:430] "Caches populated" type="*apps.DaemonSet" reflector="storage/cacher.go:/daemonsets" 2025-12-08T17:46:04.710041802+00:00 stderr F I1208 17:46:04.709922 12 store.go:1663] "Monitoring resource count at path" resource="replicasets.apps" path="//replicasets" 2025-12-08T17:46:04.716863996+00:00 stderr F I1208 17:46:04.716777 12 store.go:1663] "Monitoring resource count at path" resource="controllerrevisions.apps" path="//controllerrevisions" 2025-12-08T17:46:04.717013301+00:00 stderr F I1208 17:46:04.716954 12 apis.go:128] Enabling API group "apps". 2025-12-08T17:46:04.718794934+00:00 stderr F I1208 17:46:04.718726 12 cacher.go:469] cacher (controllerrevisions.apps): initialized 2025-12-08T17:46:04.718794934+00:00 stderr F I1208 17:46:04.718751 12 reflector.go:430] "Caches populated" type="*apps.ControllerRevision" reflector="storage/cacher.go:/controllerrevisions" 2025-12-08T17:46:04.720998111+00:00 stderr F I1208 17:46:04.720920 12 cacher.go:469] cacher (replicasets.apps): initialized 2025-12-08T17:46:04.720998111+00:00 stderr F I1208 17:46:04.720972 12 reflector.go:430] "Caches populated" type="*apps.ReplicaSet" reflector="storage/cacher.go:/replicasets" 2025-12-08T17:46:04.722413193+00:00 stderr F I1208 17:46:04.722176 12 store.go:1663] "Monitoring resource count at path" resource="validatingwebhookconfigurations.admissionregistration.k8s.io" path="//validatingwebhookconfigurations" 2025-12-08T17:46:04.737166755+00:00 stderr F I1208 17:46:04.723429 12 cacher.go:469] cacher (validatingwebhookconfigurations.admissionregistration.k8s.io): initialized 2025-12-08T17:46:04.737166755+00:00 stderr F I1208 17:46:04.723455 12 reflector.go:430] "Caches populated" type="*admissionregistration.ValidatingWebhookConfiguration" reflector="storage/cacher.go:/validatingwebhookconfigurations" 2025-12-08T17:46:04.737166755+00:00 stderr F I1208 17:46:04.727963 12 store.go:1663] "Monitoring resource count at path" resource="mutatingwebhookconfigurations.admissionregistration.k8s.io" path="//mutatingwebhookconfigurations" 2025-12-08T17:46:04.737166755+00:00 stderr F I1208 17:46:04.729647 12 cacher.go:469] cacher (mutatingwebhookconfigurations.admissionregistration.k8s.io): initialized 2025-12-08T17:46:04.737166755+00:00 stderr F I1208 17:46:04.729667 12 reflector.go:430] "Caches populated" type="*admissionregistration.MutatingWebhookConfiguration" reflector="storage/cacher.go:/mutatingwebhookconfigurations" 2025-12-08T17:46:04.737166755+00:00 stderr F I1208 17:46:04.736213 12 store.go:1663] "Monitoring resource count at path" resource="validatingadmissionpolicies.admissionregistration.k8s.io" path="//validatingadmissionpolicies" 2025-12-08T17:46:04.742378642+00:00 stderr F I1208 17:46:04.741536 12 cacher.go:469] cacher (validatingadmissionpolicies.admissionregistration.k8s.io): initialized 2025-12-08T17:46:04.742378642+00:00 stderr F I1208 17:46:04.741570 12 reflector.go:430] "Caches populated" type="*admissionregistration.ValidatingAdmissionPolicy" reflector="storage/cacher.go:/validatingadmissionpolicies" 2025-12-08T17:46:04.744239388+00:00 stderr F I1208 17:46:04.744178 12 store.go:1663] "Monitoring resource count at path" resource="validatingadmissionpolicybindings.admissionregistration.k8s.io" path="//validatingadmissionpolicybindings" 2025-12-08T17:46:04.744569878+00:00 stderr F I1208 17:46:04.744509 12 apis.go:128] Enabling API group "admissionregistration.k8s.io". 2025-12-08T17:46:04.745217427+00:00 stderr F I1208 17:46:04.745171 12 cacher.go:469] cacher (validatingadmissionpolicybindings.admissionregistration.k8s.io): initialized 2025-12-08T17:46:04.745217427+00:00 stderr F I1208 17:46:04.745192 12 reflector.go:430] "Caches populated" type="*admissionregistration.ValidatingAdmissionPolicyBinding" reflector="storage/cacher.go:/validatingadmissionpolicybindings" 2025-12-08T17:46:04.752606789+00:00 stderr F I1208 17:46:04.752479 12 store.go:1663] "Monitoring resource count at path" resource="events" path="//events" 2025-12-08T17:46:04.752658421+00:00 stderr F I1208 17:46:04.752588 12 apis.go:128] Enabling API group "events.k8s.io". 2025-12-08T17:46:04.752719343+00:00 stderr F I1208 17:46:04.752654 12 apis.go:112] API group "resource.k8s.io" is not enabled, skipping. 2025-12-08T17:46:04.765263199+00:00 stderr F I1208 17:46:04.765153 12 handler.go:288] Adding GroupVersion authentication.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.765263199+00:00 stderr F W1208 17:46:04.765191 12 genericapiserver.go:810] Skipping API authentication.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.765263199+00:00 stderr F W1208 17:46:04.765198 12 genericapiserver.go:810] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.765756044+00:00 stderr F I1208 17:46:04.765656 12 handler.go:288] Adding GroupVersion authorization.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.765756044+00:00 stderr F W1208 17:46:04.765675 12 genericapiserver.go:810] Skipping API authorization.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.766656651+00:00 stderr F I1208 17:46:04.766509 12 handler.go:288] Adding GroupVersion autoscaling v2 to ResourceManager 2025-12-08T17:46:04.767428794+00:00 stderr F I1208 17:46:04.767309 12 handler.go:288] Adding GroupVersion autoscaling v1 to ResourceManager 2025-12-08T17:46:04.767428794+00:00 stderr F W1208 17:46:04.767330 12 genericapiserver.go:810] Skipping API autoscaling/v2beta1 because it has no resources. 2025-12-08T17:46:04.767428794+00:00 stderr F W1208 17:46:04.767336 12 genericapiserver.go:810] Skipping API autoscaling/v2beta2 because it has no resources. 2025-12-08T17:46:04.768639591+00:00 stderr F I1208 17:46:04.768564 12 handler.go:288] Adding GroupVersion batch v1 to ResourceManager 2025-12-08T17:46:04.768639591+00:00 stderr F W1208 17:46:04.768586 12 genericapiserver.go:810] Skipping API batch/v1beta1 because it has no resources. 2025-12-08T17:46:04.769480106+00:00 stderr F I1208 17:46:04.769296 12 handler.go:288] Adding GroupVersion certificates.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.769480106+00:00 stderr F W1208 17:46:04.769315 12 genericapiserver.go:810] Skipping API certificates.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.769480106+00:00 stderr F W1208 17:46:04.769321 12 genericapiserver.go:810] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.769925299+00:00 stderr F I1208 17:46:04.769780 12 handler.go:288] Adding GroupVersion coordination.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.769925299+00:00 stderr F W1208 17:46:04.769796 12 genericapiserver.go:810] Skipping API coordination.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.769925299+00:00 stderr F W1208 17:46:04.769802 12 genericapiserver.go:810] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. 2025-12-08T17:46:04.770370562+00:00 stderr F I1208 17:46:04.770307 12 handler.go:288] Adding GroupVersion discovery.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.770370562+00:00 stderr F W1208 17:46:04.770329 12 genericapiserver.go:810] Skipping API discovery.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.772286650+00:00 stderr F I1208 17:46:04.772214 12 handler.go:288] Adding GroupVersion networking.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.772286650+00:00 stderr F W1208 17:46:04.772231 12 genericapiserver.go:810] Skipping API networking.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.772286650+00:00 stderr F W1208 17:46:04.772236 12 genericapiserver.go:810] Skipping API networking.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.772673981+00:00 stderr F I1208 17:46:04.772613 12 handler.go:288] Adding GroupVersion node.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.772673981+00:00 stderr F W1208 17:46:04.772628 12 genericapiserver.go:810] Skipping API node.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.772673981+00:00 stderr F W1208 17:46:04.772632 12 genericapiserver.go:810] Skipping API node.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.773335461+00:00 stderr F I1208 17:46:04.773249 12 handler.go:288] Adding GroupVersion policy v1 to ResourceManager 2025-12-08T17:46:04.773335461+00:00 stderr F W1208 17:46:04.773264 12 genericapiserver.go:810] Skipping API policy/v1beta1 because it has no resources. 2025-12-08T17:46:04.774763184+00:00 stderr F I1208 17:46:04.774679 12 handler.go:288] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.774763184+00:00 stderr F W1208 17:46:04.774695 12 genericapiserver.go:810] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.774763184+00:00 stderr F W1208 17:46:04.774700 12 genericapiserver.go:810] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.775182467+00:00 stderr F I1208 17:46:04.775105 12 handler.go:288] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.775182467+00:00 stderr F W1208 17:46:04.775121 12 genericapiserver.go:810] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.775182467+00:00 stderr F W1208 17:46:04.775125 12 genericapiserver.go:810] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.777009542+00:00 stderr F I1208 17:46:04.776925 12 handler.go:288] Adding GroupVersion storage.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.777009542+00:00 stderr F W1208 17:46:04.776944 12 genericapiserver.go:810] Skipping API storage.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.777009542+00:00 stderr F W1208 17:46:04.776949 12 genericapiserver.go:810] Skipping API storage.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.777960060+00:00 stderr F I1208 17:46:04.777866 12 handler.go:288] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.777960060+00:00 stderr F W1208 17:46:04.777901 12 genericapiserver.go:810] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. 2025-12-08T17:46:04.777960060+00:00 stderr F W1208 17:46:04.777906 12 genericapiserver.go:810] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. 2025-12-08T17:46:04.777960060+00:00 stderr F W1208 17:46:04.777909 12 genericapiserver.go:810] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.780431394+00:00 stderr F I1208 17:46:04.780306 12 handler.go:288] Adding GroupVersion apps v1 to ResourceManager 2025-12-08T17:46:04.780431394+00:00 stderr F W1208 17:46:04.780324 12 genericapiserver.go:810] Skipping API apps/v1beta2 because it has no resources. 2025-12-08T17:46:04.780431394+00:00 stderr F W1208 17:46:04.780328 12 genericapiserver.go:810] Skipping API apps/v1beta1 because it has no resources. 2025-12-08T17:46:04.782323451+00:00 stderr F I1208 17:46:04.782239 12 handler.go:288] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.782323451+00:00 stderr F W1208 17:46:04.782253 12 genericapiserver.go:810] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.782323451+00:00 stderr F W1208 17:46:04.782257 12 genericapiserver.go:810] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. 2025-12-08T17:46:04.782930060+00:00 stderr F I1208 17:46:04.782853 12 handler.go:288] Adding GroupVersion events.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.782930060+00:00 stderr F W1208 17:46:04.782868 12 genericapiserver.go:810] Skipping API events.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.790911619+00:00 stderr F I1208 17:46:04.790785 12 store.go:1663] "Monitoring resource count at path" resource="apiservices.apiregistration.k8s.io" path="//apiregistration.k8s.io/apiservices" 2025-12-08T17:46:04.792752354+00:00 stderr F I1208 17:46:04.792681 12 handler.go:288] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager 2025-12-08T17:46:04.792752354+00:00 stderr F W1208 17:46:04.792700 12 genericapiserver.go:810] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. 2025-12-08T17:46:04.793355622+00:00 stderr F I1208 17:46:04.793294 12 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="aggregator-proxy-cert::/etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.crt::/etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.key" 2025-12-08T17:46:04.807898249+00:00 stderr F I1208 17:46:04.807789 12 cacher.go:469] cacher (apiservices.apiregistration.k8s.io): initialized 2025-12-08T17:46:04.807898249+00:00 stderr F I1208 17:46:04.807833 12 reflector.go:430] "Caches populated" type="*apiregistration.APIService" reflector="storage/cacher.go:/apiregistration.k8s.io/apiservices" 2025-12-08T17:46:05.163701039+00:00 stderr F I1208 17:46:05.163518 12 genericapiserver.go:599] "[graceful-termination] using HTTP Server shutdown timeout" shutdownTimeout="2s" 2025-12-08T17:46:05.163966397+00:00 stderr F I1208 17:46:05.163848 12 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:05.164040609+00:00 stderr F I1208 17:46:05.163892 12 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:05.164258875+00:00 stderr F I1208 17:46:05.164174 12 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" 2025-12-08T17:46:05.164377719+00:00 stderr F I1208 17:46:05.164278 12 dynamic_serving_content.go:135] "Starting controller" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.key" 2025-12-08T17:46:05.164555124+00:00 stderr F I1208 17:46:05.164409 12 dynamic_serving_content.go:135] "Starting controller" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" 2025-12-08T17:46:05.164664597+00:00 stderr F I1208 17:46:05.164589 12 dynamic_serving_content.go:135] "Starting controller" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.key" 2025-12-08T17:46:05.164913345+00:00 stderr F I1208 17:46:05.164829 12 dynamic_serving_content.go:135] "Starting controller" name="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.key" 2025-12-08T17:46:05.165466401+00:00 stderr F I1208 17:46:05.165364 12 dynamic_serving_content.go:135] "Starting controller" name="sni-serving-cert::/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.crt::/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.key" 2025-12-08T17:46:05.165604515+00:00 stderr F I1208 17:46:05.165507 12 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:46:05.165479381 +0000 UTC))" 2025-12-08T17:46:05.165604515+00:00 stderr F I1208 17:46:05.165564 12 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:46:05.165553074 +0000 UTC))" 2025-12-08T17:46:05.165660497+00:00 stderr F I1208 17:46:05.165598 12 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:46:05.165587895 +0000 UTC))" 2025-12-08T17:46:05.165660497+00:00 stderr F I1208 17:46:05.165626 12 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:46:05.165620196 +0000 UTC))" 2025-12-08T17:46:05.165682328+00:00 stderr F I1208 17:46:05.165644 12 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:46:05.165637316 +0000 UTC))" 2025-12-08T17:46:05.165682328+00:00 stderr F I1208 17:46:05.165667 12 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:46:05.165661797 +0000 UTC))" 2025-12-08T17:46:05.165744349+00:00 stderr F I1208 17:46:05.165683 12 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:46:05.165678287 +0000 UTC))" 2025-12-08T17:46:05.165744349+00:00 stderr F I1208 17:46:05.165704 12 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:46:05.165698748 +0000 UTC))" 2025-12-08T17:46:05.165762030+00:00 stderr F I1208 17:46:05.165721 12 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:46:05.165716049 +0000 UTC))" 2025-12-08T17:46:05.165812851+00:00 stderr F I1208 17:46:05.165747 12 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt,request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:46:05.165741809 +0000 UTC))" 2025-12-08T17:46:05.166102510+00:00 stderr F I1208 17:46:05.165991 12 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" certDetail="\"10.217.4.1\" [serving] validServingFor=[10.217.4.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster.local,openshift,openshift.default,openshift.default.svc,openshift.default.svc.cluster.local,10.217.4.1] issuer=\"kube-apiserver-service-network-signer\" (2025-11-02 07:51:36 +0000 UTC to 2026-11-02 07:51:37 +0000 UTC (now=2025-12-08 17:46:05.165978786 +0000 UTC))" 2025-12-08T17:46:05.166310087+00:00 stderr F I1208 17:46:05.166220 12 named_certificates.go:53] "Loaded SNI cert" index=5 certName="sni-serving-cert::/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.crt::/etc/kubernetes/static-pod-resources/secrets/localhost-recovery-serving-certkey/tls.key" certDetail="\"localhost-recovery\" [serving] validServingFor=[localhost-recovery] issuer=\"openshift-kube-apiserver-operator_localhost-recovery-serving-signer@1762069890\" (2025-11-02 07:51:36 +0000 UTC to 2035-10-31 07:51:30 +0000 UTC (now=2025-12-08 17:46:05.166209814 +0000 UTC))" 2025-12-08T17:46:05.166498593+00:00 stderr F I1208 17:46:05.166419 12 named_certificates.go:53] "Loaded SNI cert" index=4 certName="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/internal-loadbalancer-serving-certkey/tls.key" certDetail="\"api-int.crc.testing\" [serving] validServingFor=[api-int.crc.testing] issuer=\"kube-apiserver-lb-signer\" (2025-11-02 07:51:35 +0000 UTC to 2026-11-02 07:51:36 +0000 UTC (now=2025-12-08 17:46:05.16641194 +0000 UTC))" 2025-12-08T17:46:05.166675718+00:00 stderr F I1208 17:46:05.166601 12 named_certificates.go:53] "Loaded SNI cert" index=3 certName="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/external-loadbalancer-serving-certkey/tls.key" certDetail="\"api.crc.testing\" [serving] validServingFor=[api.crc.testing] issuer=\"kube-apiserver-lb-signer\" (2025-11-02 07:51:35 +0000 UTC to 2026-11-02 07:51:36 +0000 UTC (now=2025-12-08 17:46:05.166595226 +0000 UTC))" 2025-12-08T17:46:05.166858333+00:00 stderr F I1208 17:46:05.166784 12 named_certificates.go:53] "Loaded SNI cert" index=2 certName="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" certDetail="\"10.217.4.1\" [serving] validServingFor=[10.217.4.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster.local,openshift,openshift.default,openshift.default.svc,openshift.default.svc.cluster.local,10.217.4.1] issuer=\"kube-apiserver-service-network-signer\" (2025-11-02 07:51:36 +0000 UTC to 2026-11-02 07:51:37 +0000 UTC (now=2025-12-08 17:46:05.166777551 +0000 UTC))" 2025-12-08T17:46:05.167059129+00:00 stderr F I1208 17:46:05.167004 12 named_certificates.go:53] "Loaded SNI cert" index=1 certName="sni-serving-cert::/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.crt::/etc/kubernetes/static-pod-certs/secrets/localhost-serving-cert-certkey/tls.key" certDetail="\"127.0.0.1\" [serving] validServingFor=[127.0.0.1,localhost,127.0.0.1] issuer=\"kube-apiserver-localhost-signer\" (2025-11-02 07:51:35 +0000 UTC to 2026-11-02 07:51:36 +0000 UTC (now=2025-12-08 17:46:05.166995567 +0000 UTC))" 2025-12-08T17:46:05.167249205+00:00 stderr F I1208 17:46:05.167189 12 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215964\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215964\" (2025-12-08 16:46:03 +0000 UTC to 2028-12-08 16:46:03 +0000 UTC (now=2025-12-08 17:46:05.167183423 +0000 UTC))" 2025-12-08T17:46:05.167318447+00:00 stderr F I1208 17:46:05.167248 12 secure_serving.go:211] Serving securely on [::]:6443 2025-12-08T17:46:05.167357978+00:00 stderr F I1208 17:46:05.167290 12 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:46:05.167491862+00:00 stderr F I1208 17:46:05.167305 12 genericapiserver.go:725] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:46:05.167491862+00:00 stderr F I1208 17:46:05.167347 12 apf_controller.go:377] Starting API Priority and Fairness config controller 2025-12-08T17:46:05.167526823+00:00 stderr F I1208 17:46:05.167490 12 gc_controller.go:78] Starting apiserver lease garbage collector 2025-12-08T17:46:05.167734139+00:00 stderr F I1208 17:46:05.167654 12 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller 2025-12-08T17:46:05.167734139+00:00 stderr F I1208 17:46:05.167678 12 shared_informer.go:350] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" 2025-12-08T17:46:05.167992507+00:00 stderr F I1208 17:46:05.167910 12 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 2025-12-08T17:46:05.167992507+00:00 stderr F I1208 17:46:05.167935 12 clusterquotamapping.go:127] Starting ClusterQuotaMappingController controller 2025-12-08T17:46:05.168127191+00:00 stderr F I1208 17:46:05.168064 12 apiservice_controller.go:100] Starting APIServiceRegistrationController 2025-12-08T17:46:05.168127191+00:00 stderr F I1208 17:46:05.168083 12 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller 2025-12-08T17:46:05.168158282+00:00 stderr F I1208 17:46:05.168128 12 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.crt::/etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.key" 2025-12-08T17:46:05.168591845+00:00 stderr F I1208 17:46:05.168514 12 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt" 2025-12-08T17:46:05.168624946+00:00 stderr F I1208 17:46:05.168595 12 remote_available_controller.go:433] Starting RemoteAvailability controller 2025-12-08T17:46:05.168689328+00:00 stderr F I1208 17:46:05.168628 12 cache.go:32] Waiting for caches to sync for RemoteAvailability controller 2025-12-08T17:46:05.169286006+00:00 stderr F I1208 17:46:05.169197 12 apiaccess_count_controller.go:89] Starting APIRequestCount controller. 2025-12-08T17:46:05.169590505+00:00 stderr F I1208 17:46:05.169513 12 controller.go:119] Starting legacy_token_tracking_controller 2025-12-08T17:46:05.169590505+00:00 stderr F I1208 17:46:05.169540 12 shared_informer.go:350] "Waiting for caches to sync" controller="configmaps" 2025-12-08T17:46:05.169653477+00:00 stderr F I1208 17:46:05.169609 12 local_available_controller.go:156] Starting LocalAvailability controller 2025-12-08T17:46:05.169708039+00:00 stderr F I1208 17:46:05.169651 12 aggregator.go:169] waiting for initial CRD sync... 2025-12-08T17:46:05.169708039+00:00 stderr F I1208 17:46:05.169673 12 controller.go:80] Starting OpenAPI V3 AggregationController 2025-12-08T17:46:05.169730260+00:00 stderr F I1208 17:46:05.169708 12 cache.go:32] Waiting for caches to sync for LocalAvailability controller 2025-12-08T17:46:05.169966717+00:00 stderr F I1208 17:46:05.169868 12 customresource_discovery_controller.go:294] Starting DiscoveryController 2025-12-08T17:46:05.170384839+00:00 stderr F I1208 17:46:05.170259 12 system_namespaces_controller.go:66] Starting system namespaces controller 2025-12-08T17:46:05.170384839+00:00 stderr F I1208 17:46:05.170297 12 controller.go:78] Starting OpenAPI AggregationController 2025-12-08T17:46:05.170583225+00:00 stderr F I1208 17:46:05.170506 12 crdregistration_controller.go:115] Starting crd-autoregister controller 2025-12-08T17:46:05.170583225+00:00 stderr F I1208 17:46:05.170526 12 shared_informer.go:350] "Waiting for caches to sync" controller="crd-autoregister" 2025-12-08T17:46:05.170612116+00:00 stderr F I1208 17:46:05.170573 12 naming_controller.go:299] Starting NamingConditionController 2025-12-08T17:46:05.170843693+00:00 stderr F I1208 17:46:05.170784 12 establishing_controller.go:81] Starting EstablishingController 2025-12-08T17:46:05.170872553+00:00 stderr F I1208 17:46:05.170849 12 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController 2025-12-08T17:46:05.170959426+00:00 stderr F I1208 17:46:05.170927 12 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController 2025-12-08T17:46:05.171605975+00:00 stderr F I1208 17:46:05.171187 12 crd_finalizer.go:269] Starting CRDFinalizer 2025-12-08T17:46:05.171605975+00:00 stderr F I1208 17:46:05.171365 12 repairip.go:200] Starting ipallocator-repair-controller 2025-12-08T17:46:05.171605975+00:00 stderr F I1208 17:46:05.171409 12 shared_informer.go:350] "Waiting for caches to sync" controller="ipallocator-repair-controller" 2025-12-08T17:46:05.174646447+00:00 stderr F I1208 17:46:05.173442 12 controller.go:142] Starting OpenAPI controller 2025-12-08T17:46:05.174646447+00:00 stderr F I1208 17:46:05.173477 12 controller.go:90] Starting OpenAPI V3 controller 2025-12-08T17:46:05.175961856+00:00 stderr F I1208 17:46:05.174895 12 default_servicecidr_controller.go:110] Starting kubernetes-service-cidr-controller 2025-12-08T17:46:05.175961856+00:00 stderr F I1208 17:46:05.174950 12 shared_informer.go:350] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" 2025-12-08T17:46:05.178263876+00:00 stderr F I1208 17:46:05.178163 12 patch_genericapiserver.go:241] Loopback request to "/api/v1/namespaces/openshift-kube-apiserver/configmaps" (user agent "cluster-kube-apiserver-operator/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready. This client probably does not watch /readyz and might get inconsistent answers. 2025-12-08T17:46:05.179589215+00:00 stderr F I1208 17:46:05.179452 12 patch_genericapiserver.go:241] Loopback request to "/api/v1/namespaces/openshift-kube-apiserver/pods" (user agent "cluster-kube-apiserver-operator/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready. This client probably does not watch /readyz and might get inconsistent answers. 2025-12-08T17:46:05.180175713+00:00 stderr F W1208 17:46:05.180078 12 patch_genericapiserver.go:245] Request to "/apis/operator.openshift.io/v1/networks/cluster" (source IP 38.102.83.243:52956, user agent "network-operator/4.20.0-202510211040.p2.gb0393aa.assembly.stream.el9-b0393aa") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.180175713+00:00 stderr F I1208 17:46:05.180105 12 patch_genericapiserver.go:241] Loopback request to "/api/v1/namespaces/openshift-kube-apiserver/secrets" (user agent "cluster-kube-apiserver-operator/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready. This client probably does not watch /readyz and might get inconsistent answers. 2025-12-08T17:46:05.189859084+00:00 stderr F W1208 17:46:05.189752 12 patch_genericapiserver.go:245] Request to "/apis/config.openshift.io/v1/clusterversions/version/status" (source IP 38.102.83.243:52960, user agent "cluster-version-operator/v0.0.0 (linux/amd64) kubernetes/$Format/openshift-cluster-version") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.196133292+00:00 stderr F I1208 17:46:05.196036 12 reflector.go:430] "Caches populated" type="*v1.LimitRange" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.196769701+00:00 stderr F I1208 17:46:05.196668 12 reflector.go:430] "Caches populated" type="*v1.PersistentVolumeClaim" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.197143213+00:00 stderr F I1208 17:46:05.197077 12 reflector.go:430] "Caches populated" type="*v1.IngressClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.197220795+00:00 stderr F I1208 17:46:05.197177 12 reflector.go:430] "Caches populated" type="*v1.ServiceCIDR" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.197477813+00:00 stderr F E1208 17:46:05.197423 12 sdn_readyz_wait.go:100] "Unhandled Error" err="api-openshift-oauth-apiserver-available did not find any IPs for kubernetes.default.svc endpoint" logger="UnhandledError" 2025-12-08T17:46:05.197759871+00:00 stderr F I1208 17:46:05.197673 12 reflector.go:430] "Caches populated" type="*v1.PersistentVolume" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.198414750+00:00 stderr F I1208 17:46:05.198345 12 reflector.go:430] "Caches populated" type="*v1.PriorityClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.198721579+00:00 stderr F E1208 17:46:05.198633 12 sdn_readyz_wait.go:100] "Unhandled Error" err="api-openshift-apiserver-available did not find any IPs for kubernetes.default.svc endpoint" logger="UnhandledError" 2025-12-08T17:46:05.199192823+00:00 stderr F I1208 17:46:05.199111 12 reflector.go:430] "Caches populated" type="*v1.StorageClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.200068050+00:00 stderr F I1208 17:46:05.199304 12 reflector.go:430] "Caches populated" type="*v1.Lease" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:46:05.200243906+00:00 stderr F I1208 17:46:05.199427 12 reflector.go:430] "Caches populated" type="*v1.MutatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.200319088+00:00 stderr F I1208 17:46:05.199503 12 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:46:05.200319088+00:00 stderr F I1208 17:46:05.199667 12 reflector.go:430] "Caches populated" type="*v1.PriorityLevelConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.200342748+00:00 stderr F I1208 17:46:05.199706 12 reflector.go:430] "Caches populated" type="*v1.VolumeAttachment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.200660308+00:00 stderr F I1208 17:46:05.200585 12 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicyBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.200684539+00:00 stderr F I1208 17:46:05.200617 12 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.201149042+00:00 stderr F I1208 17:46:05.201041 12 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.201550224+00:00 stderr F I1208 17:46:05.201468 12 reflector.go:430] "Caches populated" type="*v1.ServiceCIDR" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:46:05.201667518+00:00 stderr F I1208 17:46:05.201590 12 reflector.go:430] "Caches populated" type="*v1.RuntimeClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.201803262+00:00 stderr F I1208 17:46:05.201738 12 reflector.go:430] "Caches populated" type="*v1.ResourceQuota" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.202398569+00:00 stderr F I1208 17:46:05.202314 12 reflector.go:430] "Caches populated" type="*v1.ValidatingWebhookConfiguration" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.203573635+00:00 stderr F I1208 17:46:05.203489 12 reflector.go:430] "Caches populated" type="*v1.ValidatingAdmissionPolicy" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.205550145+00:00 stderr F I1208 17:46:05.205485 12 reflector.go:430] "Caches populated" type="*v1.IPAddress" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.206340288+00:00 stderr F I1208 17:46:05.206234 12 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:46:05.209302197+00:00 stderr F I1208 17:46:05.209248 12 shared_informer.go:357] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" 2025-12-08T17:46:05.209302197+00:00 stderr F I1208 17:46:05.209279 12 policy_source.go:240] refreshing policies 2025-12-08T17:46:05.209479293+00:00 stderr F I1208 17:46:05.209437 12 reflector.go:430] "Caches populated" type="*v1.FlowSchema" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.211400850+00:00 stderr F I1208 17:46:05.211312 12 reflector.go:430] "Caches populated" type="*v1.Endpoints" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.213993708+00:00 stderr F I1208 17:46:05.213927 12 reflector.go:430] "Caches populated" type="*v1.APIService" reflector="pkg/client/informers/externalversions/factory.go:141" 2025-12-08T17:46:05.218793432+00:00 stderr F I1208 17:46:05.218720 12 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.224706119+00:00 stderr F I1208 17:46:05.224477 12 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.228380350+00:00 stderr F I1208 17:46:05.228262 12 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.249909336+00:00 stderr F I1208 17:46:05.249768 12 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.253696390+00:00 stderr F I1208 17:46:05.253618 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:46:05.253926527+00:00 stderr F I1208 17:46:05.253831 12 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:46:05.253926527+00:00 stderr F I1208 17:46:05.253840 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 227.907µs 2025-12-08T17:46:05.254333849+00:00 stderr F I1208 17:46:05.254263 12 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.261366169+00:00 stderr F I1208 17:46:05.261277 12 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.263978548+00:00 stderr F I1208 17:46:05.263894 12 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.267561866+00:00 stderr F I1208 17:46:05.267485 12 apf_controller.go:382] Running API Priority and Fairness config worker 2025-12-08T17:46:05.267561866+00:00 stderr F I1208 17:46:05.267507 12 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process 2025-12-08T17:46:05.267678720+00:00 stderr F I1208 17:46:05.267552 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=51 seatDemandAvg=2.356932437727728 seatDemandStdev=8.091537823064513 seatDemandSmoothed=10.448470260792242 fairFrac=0 currentCL=51 concurrencyDenominator=51 backstop=false 2025-12-08T17:46:05.267914357+00:00 stderr F I1208 17:46:05.267797 12 apf_controller.go:906] Retaining queues for priority level "catch-all": config={"type":"Limited","limited":{"nominalConcurrencyShares":5,"limitResponse":{"type":"Reject"},"lendablePercent":0}}, nominalCL=79, lendableCL=0, borrowingCL=4000, currentCL=4000, quiescing=false, numPending=0 (shares=0xc0099b5570, shareSum=255) 2025-12-08T17:46:05.267914357+00:00 stderr F I1208 17:46:05.267828 12 apf_controller.go:898] Introducing queues for priority level "system": config={"type":"Limited","limited":{"nominalConcurrencyShares":30,"limitResponse":{"type":"Queue","queuing":{"queues":64,"handSize":6,"queueLengthLimit":50}},"lendablePercent":33}}, nominalCL=471, lendableCL=155, borrowingCL=4000, currentCL=394, quiescing=false (shares=0xc0099b5788, shareSum=255) 2025-12-08T17:46:05.267914357+00:00 stderr F I1208 17:46:05.267841 12 apf_controller.go:898] Introducing queues for priority level "workload-low": config={"type":"Limited","limited":{"nominalConcurrencyShares":100,"limitResponse":{"type":"Queue","queuing":{"queues":128,"handSize":6,"queueLengthLimit":50}},"lendablePercent":90}}, nominalCL=1569, lendableCL=1412, borrowingCL=4000, currentCL=863, quiescing=false (shares=0xc0099b5830, shareSum=255) 2025-12-08T17:46:05.267914357+00:00 stderr F I1208 17:46:05.267853 12 apf_controller.go:898] Introducing queues for priority level "node-high": config={"type":"Limited","limited":{"nominalConcurrencyShares":40,"limitResponse":{"type":"Queue","queuing":{"queues":64,"handSize":6,"queueLengthLimit":50}},"lendablePercent":25}}, nominalCL=628, lendableCL=157, borrowingCL=4000, currentCL=550, quiescing=false (shares=0xc0099b5698, shareSum=255) 2025-12-08T17:46:05.267939027+00:00 stderr F I1208 17:46:05.267866 12 apf_controller.go:898] Introducing queues for priority level "global-default": config={"type":"Limited","limited":{"nominalConcurrencyShares":20,"limitResponse":{"type":"Queue","queuing":{"queues":128,"handSize":6,"queueLengthLimit":50}},"lendablePercent":50}}, nominalCL=314, lendableCL=157, borrowingCL=4000, currentCL=236, quiescing=false (shares=0xc0099b55f0, shareSum=255) 2025-12-08T17:46:05.267981499+00:00 stderr F I1208 17:46:05.267925 12 apf_controller.go:898] Introducing queues for priority level "workload-high": config={"type":"Limited","limited":{"nominalConcurrencyShares":40,"limitResponse":{"type":"Queue","queuing":{"queues":128,"handSize":6,"queueLengthLimit":50}},"lendablePercent":50}}, nominalCL=628, lendableCL=314, borrowingCL=4000, currentCL=471, quiescing=false (shares=0xc0099b57e0, shareSum=255) 2025-12-08T17:46:05.267981499+00:00 stderr F I1208 17:46:05.267927 12 shared_informer.go:357] "Caches are synced" controller="cluster_authentication_trust_controller" 2025-12-08T17:46:05.267981499+00:00 stderr F I1208 17:46:05.267941 12 apf_controller.go:898] Introducing queues for priority level "leader-election": config={"type":"Limited","limited":{"nominalConcurrencyShares":10,"limitResponse":{"type":"Queue","queuing":{"queues":16,"handSize":4,"queueLengthLimit":50}},"lendablePercent":0}}, nominalCL=157, lendableCL=0, borrowingCL=4000, currentCL=157, quiescing=false (shares=0xc0099b5640, shareSum=255) 2025-12-08T17:46:05.267994229+00:00 stderr F I1208 17:46:05.267960 12 apf_controller.go:898] Introducing queues for priority level "openshift-control-plane-operators": config={"type":"Limited","limited":{"nominalConcurrencyShares":10,"limitResponse":{"type":"Queue","queuing":{"queues":128,"handSize":6,"queueLengthLimit":50}},"lendablePercent":33}}, nominalCL=157, lendableCL=52, borrowingCL=4000, currentCL=131, quiescing=false (shares=0xc0099b5718, shareSum=255) 2025-12-08T17:46:05.268040440+00:00 stderr F I1208 17:46:05.267991 12 apf_controller.go:493] "Update CurrentCL" plName="catch-all" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=180 concurrencyDenominator=180 backstop=false 2025-12-08T17:46:05.268054191+00:00 stderr F I1208 17:46:05.268021 12 apf_controller.go:493] "Update CurrentCL" plName="system" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=720 concurrencyDenominator=720 backstop=false 2025-12-08T17:46:05.268100482+00:00 stderr F I1208 17:46:05.268057 12 apf_controller.go:493] "Update CurrentCL" plName="workload-low" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=358 concurrencyDenominator=358 backstop=false 2025-12-08T17:46:05.268194555+00:00 stderr F I1208 17:46:05.268134 12 apf_controller.go:493] "Update CurrentCL" plName="node-high" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=1074 concurrencyDenominator=1074 backstop=false 2025-12-08T17:46:05.268194555+00:00 stderr F I1208 17:46:05.268162 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=15 seatDemandAvg=15 seatDemandStdev=0 seatDemandSmoothed=15 fairFrac=2.2796127562642368 currentCL=15 concurrencyDenominator=15 backstop=false 2025-12-08T17:46:05.268208235+00:00 stderr F I1208 17:46:05.268174 12 apf_controller.go:493] "Update CurrentCL" plName="global-default" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=358 concurrencyDenominator=358 backstop=false 2025-12-08T17:46:05.268267307+00:00 stderr F I1208 17:46:05.268216 12 apf_controller.go:493] "Update CurrentCL" plName="workload-high" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=716 concurrencyDenominator=716 backstop=false 2025-12-08T17:46:05.268267307+00:00 stderr F I1208 17:46:05.268137 12 cache.go:39] Caches are synced for APIServiceRegistrationController controller 2025-12-08T17:46:05.268323849+00:00 stderr F I1208 17:46:05.268260 12 apf_controller.go:493] "Update CurrentCL" plName="leader-election" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=358 concurrencyDenominator=358 backstop=false 2025-12-08T17:46:05.268323849+00:00 stderr F I1208 17:46:05.268282 12 apf_controller.go:493] "Update CurrentCL" plName="openshift-control-plane-operators" seatDemandHighWatermark=0 seatDemandAvg=0 seatDemandStdev=0 seatDemandSmoothed=0 fairFrac=2.2796127562642368 currentCL=239 concurrencyDenominator=239 backstop=false 2025-12-08T17:46:05.268757442+00:00 stderr F I1208 17:46:05.268708 12 cache.go:39] Caches are synced for RemoteAvailability controller 2025-12-08T17:46:05.270348440+00:00 stderr F I1208 17:46:05.270292 12 cache.go:39] Caches are synced for LocalAvailability controller 2025-12-08T17:46:05.277025340+00:00 stderr F I1208 17:46:05.272212 12 shared_informer.go:357] "Caches are synced" controller="ipallocator-repair-controller" 2025-12-08T17:46:05.277025340+00:00 stderr F I1208 17:46:05.273334 12 healthz.go:280] informer-sync,poststarthook/start-apiextensions-controllers,poststarthook/crd-informer-synced,poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes,poststarthook/start-kubernetes-service-cidr-controller,poststarthook/apiservice-registration-controller,poststarthook/apiservice-discovery-controller check failed: readyz 2025-12-08T17:46:05.277025340+00:00 stderr F [-]informer-sync failed: 3 informers not started yet: [*v1.Pod *v1.Secret *v1.ConfigMap] 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/start-apiextensions-controllers failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/crd-informer-synced failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/start-kubernetes-service-cidr-controller failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/apiservice-registration-controller failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F [-]poststarthook/apiservice-discovery-controller failed: not finished 2025-12-08T17:46:05.277025340+00:00 stderr F I1208 17:46:05.275088 12 shared_informer.go:357] "Caches are synced" controller="kubernetes-service-cidr-controller" 2025-12-08T17:46:05.277025340+00:00 stderr F I1208 17:46:05.275151 12 default_servicecidr_controller.go:136] Shutting down kubernetes-service-cidr-controller 2025-12-08T17:46:05.289834815+00:00 stderr F I1208 17:46:05.288153 12 shared_informer.go:357] "Caches are synced" controller="configmaps" 2025-12-08T17:46:05.290020030+00:00 stderr F I1208 17:46:05.289966 12 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.304979079+00:00 stderr F I1208 17:46:05.304841 12 handler_discovery.go:451] Starting ResourceDiscoveryManager 2025-12-08T17:46:05.311518985+00:00 stderr F I1208 17:46:05.310898 12 handler.go:288] Adding GroupVersion packages.operators.coreos.com v1 to ResourceManager 2025-12-08T17:46:05.311866456+00:00 stderr F W1208 17:46:05.311789 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-ingress-operator/secrets" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.317453174+00:00 stderr F I1208 17:46:05.317326 12 handler.go:288] Adding GroupVersion user.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.317606248+00:00 stderr F I1208 17:46:05.317549 12 handler.go:288] Adding GroupVersion oauth.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.322003350+00:00 stderr F I1208 17:46:05.320672 12 policy_source.go:435] informer started for config.openshift.io/v1, Kind=Infrastructure 2025-12-08T17:46:05.325479024+00:00 stderr F I1208 17:46:05.323639 12 handler.go:288] Adding GroupVersion route.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.328492 12 handler.go:288] Adding GroupVersion image.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.328513 12 handler.go:288] Adding GroupVersion build.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.328962 12 handler.go:288] Adding GroupVersion authorization.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F E1208 17:46:05.328927 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.330962659+00:00 stderr F Error updating APIService "v1.apps.openshift.io" with err: failed to download v1.apps.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.330962659+00:00 stderr F , Header: map[Audit-Id:[fd666ec4-a25b-4b86-91a9-ec70a7380251] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.330962659+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.329071 12 handler.go:288] Adding GroupVersion template.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.329363 12 handler.go:288] Adding GroupVersion apps.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.329535 12 handler.go:288] Adding GroupVersion project.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.330962659+00:00 stderr F I1208 17:46:05.329647 12 handler.go:288] Adding GroupVersion security.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.334940899+00:00 stderr F I1208 17:46:05.333808 12 handler.go:288] Adding GroupVersion quota.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.335180966+00:00 stderr F E1208 17:46:05.335129 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.335180966+00:00 stderr F Error updating APIService "v1.authorization.openshift.io" with err: failed to download v1.authorization.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.335180966+00:00 stderr F , Header: map[Audit-Id:[9984bdf3-cf11-4396-8abc-71a52ef188c8] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.335180966+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.335835495+00:00 stderr F I1208 17:46:05.335783 12 policy_source.go:240] refreshing policies 2025-12-08T17:46:05.342981900+00:00 stderr F E1208 17:46:05.342843 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.342981900+00:00 stderr F Error updating APIService "v1.build.openshift.io" with err: failed to download v1.build.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.342981900+00:00 stderr F , Header: map[Audit-Id:[c9b965a9-039d-42ea-afcc-061dd1acec77] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.342981900+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.354375572+00:00 stderr F E1208 17:46:05.353380 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.354375572+00:00 stderr F Error updating APIService "v1.image.openshift.io" with err: failed to download v1.image.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.354375572+00:00 stderr F , Header: map[Audit-Id:[a1d9ec78-dd6e-4ded-9726-4b5be87633b5] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.354375572+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.364976239+00:00 stderr F E1208 17:46:05.363508 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.364976239+00:00 stderr F Error updating APIService "v1.oauth.openshift.io" with err: failed to download v1.oauth.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.364976239+00:00 stderr F , Header: map[Audit-Id:[4078819d-648e-426c-9a26-ac56a14aabdb] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.364976239+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.376977150+00:00 stderr F I1208 17:46:05.375020 12 shared_informer.go:357] "Caches are synced" controller="node_authorizer" 2025-12-08T17:46:05.376977150+00:00 stderr F I1208 17:46:05.375383 12 healthz.go:280] informer-sync,poststarthook/start-apiextensions-controllers,poststarthook/crd-informer-synced,poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.376977150+00:00 stderr F [-]informer-sync failed: 2 informers not started yet: [*v1.Secret *v1.ConfigMap] 2025-12-08T17:46:05.376977150+00:00 stderr F [-]poststarthook/start-apiextensions-controllers failed: not finished 2025-12-08T17:46:05.376977150+00:00 stderr F [-]poststarthook/crd-informer-synced failed: not finished 2025-12-08T17:46:05.376977150+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.376977150+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.376977150+00:00 stderr F I1208 17:46:05.376314 12 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.379813105+00:00 stderr F E1208 17:46:05.377855 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.379813105+00:00 stderr F Error updating APIService "v1.packages.operators.coreos.com" with err: failed to download v1.packages.operators.coreos.com: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.379813105+00:00 stderr F , Header: map[Audit-Id:[874c3999-7b40-4003-af85-e484af5b6ad5] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.379813105+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.383047262+00:00 stderr F E1208 17:46:05.382894 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.383047262+00:00 stderr F Error updating APIService "v1.project.openshift.io" with err: failed to download v1.project.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.383047262+00:00 stderr F , Header: map[Audit-Id:[891c416e-84be-4017-88b8-aec6cd231dcf] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.383047262+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.384805135+00:00 stderr F I1208 17:46:05.383336 12 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:05.386961109+00:00 stderr F E1208 17:46:05.386514 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.386961109+00:00 stderr F Error updating APIService "v1.quota.openshift.io" with err: failed to download v1.quota.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.386961109+00:00 stderr F , Header: map[Audit-Id:[f301729a-1916-438d-aaf7-e1d0a799e7fa] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.386961109+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.390821666+00:00 stderr F I1208 17:46:05.388821 12 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="pkg/client/informers/externalversions/factory.go:141" 2025-12-08T17:46:05.393206928+00:00 stderr F I1208 17:46:05.391595 12 patch_genericapiserver.go:241] Loopback request to "/api/v1/namespaces/openshift-kube-apiserver/pods" (user agent "cluster-kube-apiserver-operator/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready. This client probably does not watch /readyz and might get inconsistent answers. 2025-12-08T17:46:05.393206928+00:00 stderr F E1208 17:46:05.392827 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.393206928+00:00 stderr F Error updating APIService "v1.route.openshift.io" with err: failed to download v1.route.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.393206928+00:00 stderr F , Header: map[Audit-Id:[2d8d1a6d-1521-4857-9105-eaadec389d84] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.393206928+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.397058073+00:00 stderr F E1208 17:46:05.396996 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.397058073+00:00 stderr F Error updating APIService "v1.security.openshift.io" with err: failed to download v1.security.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.397058073+00:00 stderr F , Header: map[Audit-Id:[d069d70f-576d-4c58-94a5-af0bf940e995] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.397058073+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.399195367+00:00 stderr F I1208 17:46:05.399123 12 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io 2025-12-08T17:46:05.400666091+00:00 stderr F E1208 17:46:05.400588 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.400666091+00:00 stderr F Error updating APIService "v1.template.openshift.io" with err: failed to download v1.template.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.400666091+00:00 stderr F , Header: map[Audit-Id:[b0c10933-62ba-4315-8c24-ddf3aa8026ce] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.400666091+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.404582949+00:00 stderr F E1208 17:46:05.404512 12 controller.go:146] "Unhandled Error" err=< 2025-12-08T17:46:05.404582949+00:00 stderr F Error updating APIService "v1.user.openshift.io" with err: failed to download v1.user.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:05.404582949+00:00 stderr F , Header: map[Audit-Id:[3e11e396-fe16-4457-8005-a3c289fa43d4] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:05 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:05.404582949+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:05.468412394+00:00 stderr F I1208 17:46:05.468294 12 genericapiserver.go:550] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:46:05.470643021+00:00 stderr F I1208 17:46:05.470560 12 handler.go:288] Adding GroupVersion monitoring.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.470643021+00:00 stderr F I1208 17:46:05.470585 12 handler.go:288] Adding GroupVersion quota.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.470643021+00:00 stderr F I1208 17:46:05.470625 12 handler.go:288] Adding GroupVersion console.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.470714793+00:00 stderr F I1208 17:46:05.470658 12 handler.go:288] Adding GroupVersion operators.coreos.com v1 to ResourceManager 2025-12-08T17:46:05.470714793+00:00 stderr F I1208 17:46:05.470693 12 handler.go:288] Adding GroupVersion operators.coreos.com v2 to ResourceManager 2025-12-08T17:46:05.470823637+00:00 stderr F I1208 17:46:05.470769 12 handler.go:288] Adding GroupVersion config.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.470823637+00:00 stderr F I1208 17:46:05.470806 12 handler.go:288] Adding GroupVersion monitoring.coreos.com v1 to ResourceManager 2025-12-08T17:46:05.471032483+00:00 stderr F I1208 17:46:05.470945 12 handler.go:288] Adding GroupVersion gateway.networking.k8s.io v1 to ResourceManager 2025-12-08T17:46:05.471032483+00:00 stderr F I1208 17:46:05.470975 12 handler.go:288] Adding GroupVersion gateway.networking.k8s.io v1beta1 to ResourceManager 2025-12-08T17:46:05.471032483+00:00 stderr F I1208 17:46:05.470989 12 handler.go:288] Adding GroupVersion k8s.cni.cncf.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.471032483+00:00 stderr F I1208 17:46:05.471008 12 handler.go:288] Adding GroupVersion machine.openshift.io v1beta1 to ResourceManager 2025-12-08T17:46:05.471032483+00:00 stderr F I1208 17:46:05.471025 12 handler.go:288] Adding GroupVersion infrastructure.cluster.x-k8s.io v1alpha5 to ResourceManager 2025-12-08T17:46:05.471078504+00:00 stderr F I1208 17:46:05.471044 12 handler.go:288] Adding GroupVersion infrastructure.cluster.x-k8s.io v1beta1 to ResourceManager 2025-12-08T17:46:05.471134966+00:00 stderr F I1208 17:46:05.471097 12 handler.go:288] Adding GroupVersion security.internal.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.471155386+00:00 stderr F I1208 17:46:05.471131 12 handler.go:288] Adding GroupVersion machineconfiguration.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.471373423+00:00 stderr F I1208 17:46:05.471323 12 handler.go:288] Adding GroupVersion k8s.cni.cncf.io v1 to ResourceManager 2025-12-08T17:46:05.471547299+00:00 stderr F I1208 17:46:05.471501 12 handler.go:288] Adding GroupVersion operator.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.471692653+00:00 stderr F I1208 17:46:05.471648 12 shared_informer.go:357] "Caches are synced" controller="crd-autoregister" 2025-12-08T17:46:05.471733815+00:00 stderr F I1208 17:46:05.471702 12 handler.go:288] Adding GroupVersion operators.coreos.com v1alpha1 to ResourceManager 2025-12-08T17:46:05.471768366+00:00 stderr F I1208 17:46:05.471733 12 handler.go:288] Adding GroupVersion machine.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.472223699+00:00 stderr F I1208 17:46:05.472163 12 handler.go:288] Adding GroupVersion k8s.ovn.org v1 to ResourceManager 2025-12-08T17:46:05.472806857+00:00 stderr F I1208 17:46:05.472731 12 healthz.go:280] poststarthook/start-apiextensions-controllers,poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.472806857+00:00 stderr F [-]poststarthook/start-apiextensions-controllers failed: not finished 2025-12-08T17:46:05.472806857+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.472806857+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.472806857+00:00 stderr F I1208 17:46:05.472746 12 handler.go:288] Adding GroupVersion apiserver.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.473069914+00:00 stderr F I1208 17:46:05.473000 12 aggregator.go:171] initial CRD sync complete... 2025-12-08T17:46:05.473069914+00:00 stderr F I1208 17:46:05.473015 12 autoregister_controller.go:144] Starting autoregister controller 2025-12-08T17:46:05.473069914+00:00 stderr F I1208 17:46:05.473022 12 cache.go:32] Waiting for caches to sync for autoregister controller 2025-12-08T17:46:05.473069914+00:00 stderr F I1208 17:46:05.473030 12 cache.go:39] Caches are synced for autoregister controller 2025-12-08T17:46:05.473215909+00:00 stderr F I1208 17:46:05.473146 12 handler.go:288] Adding GroupVersion network.operator.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.473686633+00:00 stderr F I1208 17:46:05.473626 12 handler.go:288] Adding GroupVersion controlplane.operator.openshift.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.473821517+00:00 stderr F I1208 17:46:05.473778 12 controller.go:231] Updating CRD OpenAPI spec because adminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T17:46:05.473821517+00:00 stderr F I1208 17:46:05.473799 12 controller.go:231] Updating CRD OpenAPI spec because adminpolicybasedexternalroutes.k8s.ovn.org changed 2025-12-08T17:46:05.473821517+00:00 stderr F I1208 17:46:05.473805 12 controller.go:231] Updating CRD OpenAPI spec because alertingrules.monitoring.openshift.io changed 2025-12-08T17:46:05.473890879+00:00 stderr F I1208 17:46:05.473841 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.coreos.com changed 2025-12-08T17:46:05.473890879+00:00 stderr F I1208 17:46:05.473852 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.coreos.com changed 2025-12-08T17:46:05.473890879+00:00 stderr F I1208 17:46:05.473860 12 controller.go:231] Updating CRD OpenAPI spec because alertrelabelconfigs.monitoring.openshift.io changed 2025-12-08T17:46:05.473968971+00:00 stderr F I1208 17:46:05.473920 12 controller.go:231] Updating CRD OpenAPI spec because apirequestcounts.apiserver.openshift.io changed 2025-12-08T17:46:05.473968971+00:00 stderr F I1208 17:46:05.473934 12 controller.go:231] Updating CRD OpenAPI spec because apiservers.config.openshift.io changed 2025-12-08T17:46:05.473968971+00:00 stderr F I1208 17:46:05.473940 12 controller.go:231] Updating CRD OpenAPI spec because authentications.config.openshift.io changed 2025-12-08T17:46:05.474044173+00:00 stderr F I1208 17:46:05.474007 12 controller.go:231] Updating CRD OpenAPI spec because authentications.operator.openshift.io changed 2025-12-08T17:46:05.474044173+00:00 stderr F I1208 17:46:05.474021 12 controller.go:231] Updating CRD OpenAPI spec because baselineadminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T17:46:05.474098675+00:00 stderr F I1208 17:46:05.474068 12 controller.go:231] Updating CRD OpenAPI spec because builds.config.openshift.io changed 2025-12-08T17:46:05.474098675+00:00 stderr F I1208 17:46:05.474079 12 controller.go:231] Updating CRD OpenAPI spec because catalogsources.operators.coreos.com changed 2025-12-08T17:46:05.474098675+00:00 stderr F I1208 17:46:05.474085 12 controller.go:231] Updating CRD OpenAPI spec because clusterautoscalers.autoscaling.openshift.io changed 2025-12-08T17:46:05.474108035+00:00 stderr F I1208 17:46:05.474090 12 handler.go:288] Adding GroupVersion migration.k8s.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.474167237+00:00 stderr F I1208 17:46:05.474123 12 controller.go:231] Updating CRD OpenAPI spec because clustercsidrivers.operator.openshift.io changed 2025-12-08T17:46:05.474167237+00:00 stderr F I1208 17:46:05.474144 12 controller.go:231] Updating CRD OpenAPI spec because clusterimagepolicies.config.openshift.io changed 2025-12-08T17:46:05.474167237+00:00 stderr F I1208 17:46:05.474150 12 controller.go:231] Updating CRD OpenAPI spec because clusteroperators.config.openshift.io changed 2025-12-08T17:46:05.474167237+00:00 stderr F I1208 17:46:05.474155 12 controller.go:231] Updating CRD OpenAPI spec because clusterresourcequotas.quota.openshift.io changed 2025-12-08T17:46:05.474230979+00:00 stderr F I1208 17:46:05.474192 12 controller.go:231] Updating CRD OpenAPI spec because clusterserviceversions.operators.coreos.com changed 2025-12-08T17:46:05.474230979+00:00 stderr F I1208 17:46:05.474202 12 controller.go:231] Updating CRD OpenAPI spec because clusteruserdefinednetworks.k8s.ovn.org changed 2025-12-08T17:46:05.474230979+00:00 stderr F I1208 17:46:05.474207 12 controller.go:231] Updating CRD OpenAPI spec because clusterversions.config.openshift.io changed 2025-12-08T17:46:05.474230979+00:00 stderr F I1208 17:46:05.474218 12 controller.go:231] Updating CRD OpenAPI spec because configs.imageregistry.operator.openshift.io changed 2025-12-08T17:46:05.474293301+00:00 stderr F I1208 17:46:05.474259 12 controller.go:231] Updating CRD OpenAPI spec because configs.operator.openshift.io changed 2025-12-08T17:46:05.474293301+00:00 stderr F I1208 17:46:05.474270 12 controller.go:231] Updating CRD OpenAPI spec because configs.samples.operator.openshift.io changed 2025-12-08T17:46:05.474333023+00:00 stderr F I1208 17:46:05.474278 12 controller.go:231] Updating CRD OpenAPI spec because consoleclidownloads.console.openshift.io changed 2025-12-08T17:46:05.474333023+00:00 stderr F I1208 17:46:05.474277 12 handler.go:288] Adding GroupVersion security.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.474333023+00:00 stderr F I1208 17:46:05.474301 12 handler.go:288] Adding GroupVersion ingress.operator.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.474406085+00:00 stderr F I1208 17:46:05.474361 12 controller.go:231] Updating CRD OpenAPI spec because consoleexternalloglinks.console.openshift.io changed 2025-12-08T17:46:05.474406085+00:00 stderr F I1208 17:46:05.474372 12 controller.go:231] Updating CRD OpenAPI spec because consolelinks.console.openshift.io changed 2025-12-08T17:46:05.474406085+00:00 stderr F I1208 17:46:05.474379 12 controller.go:231] Updating CRD OpenAPI spec because consolenotifications.console.openshift.io changed 2025-12-08T17:46:05.474406085+00:00 stderr F I1208 17:46:05.474389 12 controller.go:231] Updating CRD OpenAPI spec because consoleplugins.console.openshift.io changed 2025-12-08T17:46:05.474472917+00:00 stderr F I1208 17:46:05.474436 12 controller.go:231] Updating CRD OpenAPI spec because consolequickstarts.console.openshift.io changed 2025-12-08T17:46:05.474472917+00:00 stderr F I1208 17:46:05.474448 12 controller.go:231] Updating CRD OpenAPI spec because consoles.config.openshift.io changed 2025-12-08T17:46:05.474472917+00:00 stderr F I1208 17:46:05.474453 12 controller.go:231] Updating CRD OpenAPI spec because consoles.operator.openshift.io changed 2025-12-08T17:46:05.474518998+00:00 stderr F I1208 17:46:05.474459 12 controller.go:231] Updating CRD OpenAPI spec because consolesamples.console.openshift.io changed 2025-12-08T17:46:05.474585700+00:00 stderr F I1208 17:46:05.474546 12 controller.go:231] Updating CRD OpenAPI spec because consoleyamlsamples.console.openshift.io changed 2025-12-08T17:46:05.474585700+00:00 stderr F I1208 17:46:05.474559 12 controller.go:231] Updating CRD OpenAPI spec because containerruntimeconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.474585700+00:00 stderr F I1208 17:46:05.474566 12 controller.go:231] Updating CRD OpenAPI spec because controllerconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.474636322+00:00 stderr F I1208 17:46:05.474600 12 handler.go:288] Adding GroupVersion operators.coreos.com v1alpha2 to ResourceManager 2025-12-08T17:46:05.474636322+00:00 stderr F I1208 17:46:05.474609 12 controller.go:231] Updating CRD OpenAPI spec because controlplanemachinesets.machine.openshift.io changed 2025-12-08T17:46:05.474636322+00:00 stderr F I1208 17:46:05.474617 12 controller.go:231] Updating CRD OpenAPI spec because csisnapshotcontrollers.operator.openshift.io changed 2025-12-08T17:46:05.474636322+00:00 stderr F I1208 17:46:05.474623 12 controller.go:231] Updating CRD OpenAPI spec because dnses.config.openshift.io changed 2025-12-08T17:46:05.474685693+00:00 stderr F I1208 17:46:05.474653 12 controller.go:231] Updating CRD OpenAPI spec because dnses.operator.openshift.io changed 2025-12-08T17:46:05.474685693+00:00 stderr F I1208 17:46:05.474664 12 controller.go:231] Updating CRD OpenAPI spec because dnsrecords.ingress.operator.openshift.io changed 2025-12-08T17:46:05.474747335+00:00 stderr F I1208 17:46:05.474708 12 controller.go:231] Updating CRD OpenAPI spec because egressfirewalls.k8s.ovn.org changed 2025-12-08T17:46:05.474747335+00:00 stderr F I1208 17:46:05.474719 12 controller.go:231] Updating CRD OpenAPI spec because egressips.k8s.ovn.org changed 2025-12-08T17:46:05.474747335+00:00 stderr F I1208 17:46:05.474727 12 controller.go:231] Updating CRD OpenAPI spec because egressqoses.k8s.ovn.org changed 2025-12-08T17:46:05.474797036+00:00 stderr F I1208 17:46:05.474736 12 controller.go:231] Updating CRD OpenAPI spec because egressrouters.network.operator.openshift.io changed 2025-12-08T17:46:05.474797036+00:00 stderr F I1208 17:46:05.474779 12 controller.go:231] Updating CRD OpenAPI spec because egressservices.k8s.ovn.org changed 2025-12-08T17:46:05.474797036+00:00 stderr F I1208 17:46:05.474785 12 controller.go:231] Updating CRD OpenAPI spec because etcds.operator.openshift.io changed 2025-12-08T17:46:05.474849778+00:00 stderr F I1208 17:46:05.474818 12 controller.go:231] Updating CRD OpenAPI spec because featuregates.config.openshift.io changed 2025-12-08T17:46:05.474849778+00:00 stderr F I1208 17:46:05.474837 12 controller.go:231] Updating CRD OpenAPI spec because gatewayclasses.gateway.networking.k8s.io changed 2025-12-08T17:46:05.474929470+00:00 stderr F I1208 17:46:05.474889 12 controller.go:231] Updating CRD OpenAPI spec because gateways.gateway.networking.k8s.io changed 2025-12-08T17:46:05.474929470+00:00 stderr F I1208 17:46:05.474899 12 controller.go:231] Updating CRD OpenAPI spec because grpcroutes.gateway.networking.k8s.io changed 2025-12-08T17:46:05.474929470+00:00 stderr F I1208 17:46:05.474906 12 controller.go:231] Updating CRD OpenAPI spec because helmchartrepositories.helm.openshift.io changed 2025-12-08T17:46:05.474929470+00:00 stderr F I1208 17:46:05.474916 12 controller.go:231] Updating CRD OpenAPI spec because httproutes.gateway.networking.k8s.io changed 2025-12-08T17:46:05.475000843+00:00 stderr F I1208 17:46:05.474967 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentpolicies.config.openshift.io changed 2025-12-08T17:46:05.475000843+00:00 stderr F I1208 17:46:05.474978 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentsourcepolicies.operator.openshift.io changed 2025-12-08T17:46:05.475000843+00:00 stderr F I1208 17:46:05.474985 12 controller.go:231] Updating CRD OpenAPI spec because imagedigestmirrorsets.config.openshift.io changed 2025-12-08T17:46:05.475059284+00:00 stderr F I1208 17:46:05.475018 12 controller.go:231] Updating CRD OpenAPI spec because imagepolicies.config.openshift.io changed 2025-12-08T17:46:05.475059284+00:00 stderr F I1208 17:46:05.475041 12 controller.go:231] Updating CRD OpenAPI spec because imagepruners.imageregistry.operator.openshift.io changed 2025-12-08T17:46:05.475059284+00:00 stderr F I1208 17:46:05.475047 12 controller.go:231] Updating CRD OpenAPI spec because images.config.openshift.io changed 2025-12-08T17:46:05.475094725+00:00 stderr F I1208 17:46:05.475058 12 handler.go:288] Adding GroupVersion helm.openshift.io v1beta1 to ResourceManager 2025-12-08T17:46:05.475143587+00:00 stderr F I1208 17:46:05.475113 12 controller.go:231] Updating CRD OpenAPI spec because imagetagmirrorsets.config.openshift.io changed 2025-12-08T17:46:05.475143587+00:00 stderr F I1208 17:46:05.475126 12 controller.go:231] Updating CRD OpenAPI spec because infrastructures.config.openshift.io changed 2025-12-08T17:46:05.475143587+00:00 stderr F I1208 17:46:05.475134 12 handler.go:288] Adding GroupVersion whereabouts.cni.cncf.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.475194608+00:00 stderr F I1208 17:46:05.475158 12 controller.go:231] Updating CRD OpenAPI spec because ingresscontrollers.operator.openshift.io changed 2025-12-08T17:46:05.475194608+00:00 stderr F I1208 17:46:05.475169 12 controller.go:231] Updating CRD OpenAPI spec because ingresses.config.openshift.io changed 2025-12-08T17:46:05.475194608+00:00 stderr F I1208 17:46:05.475179 12 handler.go:288] Adding GroupVersion imageregistry.operator.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.475263440+00:00 stderr F I1208 17:46:05.475216 12 handler.go:288] Adding GroupVersion operator.openshift.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.475263440+00:00 stderr F I1208 17:46:05.475218 12 controller.go:231] Updating CRD OpenAPI spec because installplans.operators.coreos.com changed 2025-12-08T17:46:05.475263440+00:00 stderr F I1208 17:46:05.475229 12 controller.go:231] Updating CRD OpenAPI spec because ipaddressclaims.ipam.cluster.x-k8s.io changed 2025-12-08T17:46:05.475263440+00:00 stderr F I1208 17:46:05.475241 12 controller.go:231] Updating CRD OpenAPI spec because ipaddresses.ipam.cluster.x-k8s.io changed 2025-12-08T17:46:05.475263440+00:00 stderr F I1208 17:46:05.475247 12 controller.go:231] Updating CRD OpenAPI spec because ipamclaims.k8s.cni.cncf.io changed 2025-12-08T17:46:05.475310542+00:00 stderr F I1208 17:46:05.475252 12 controller.go:231] Updating CRD OpenAPI spec because ippools.whereabouts.cni.cncf.io changed 2025-12-08T17:46:05.475310542+00:00 stderr F I1208 17:46:05.475295 12 controller.go:231] Updating CRD OpenAPI spec because kubeapiservers.operator.openshift.io changed 2025-12-08T17:46:05.475310542+00:00 stderr F I1208 17:46:05.475299 12 handler.go:288] Adding GroupVersion ipam.cluster.x-k8s.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.475361703+00:00 stderr F I1208 17:46:05.475324 12 handler.go:288] Adding GroupVersion ipam.cluster.x-k8s.io v1beta1 to ResourceManager 2025-12-08T17:46:05.475415025+00:00 stderr F I1208 17:46:05.475325 12 controller.go:231] Updating CRD OpenAPI spec because kubecontrollermanagers.operator.openshift.io changed 2025-12-08T17:46:05.475415025+00:00 stderr F I1208 17:46:05.475395 12 controller.go:231] Updating CRD OpenAPI spec because kubeletconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.475465056+00:00 stderr F I1208 17:46:05.475435 12 controller.go:231] Updating CRD OpenAPI spec because kubeschedulers.operator.openshift.io changed 2025-12-08T17:46:05.475465056+00:00 stderr F I1208 17:46:05.475449 12 controller.go:231] Updating CRD OpenAPI spec because kubestorageversionmigrators.operator.openshift.io changed 2025-12-08T17:46:05.475512768+00:00 stderr F I1208 17:46:05.475454 12 controller.go:231] Updating CRD OpenAPI spec because machineautoscalers.autoscaling.openshift.io changed 2025-12-08T17:46:05.475512768+00:00 stderr F I1208 17:46:05.475490 12 controller.go:231] Updating CRD OpenAPI spec because machineconfignodes.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.475569039+00:00 stderr F I1208 17:46:05.475536 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigpools.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.475569039+00:00 stderr F I1208 17:46:05.475547 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.475618351+00:00 stderr F I1208 17:46:05.475589 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigurations.operator.openshift.io changed 2025-12-08T17:46:05.475618351+00:00 stderr F I1208 17:46:05.475592 12 handler.go:288] Adding GroupVersion monitoring.coreos.com v1alpha1 to ResourceManager 2025-12-08T17:46:05.475618351+00:00 stderr F I1208 17:46:05.475601 12 controller.go:231] Updating CRD OpenAPI spec because machinehealthchecks.machine.openshift.io changed 2025-12-08T17:46:05.475618351+00:00 stderr F I1208 17:46:05.475607 12 controller.go:231] Updating CRD OpenAPI spec because machineosbuilds.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.475669212+00:00 stderr F I1208 17:46:05.475635 12 handler.go:288] Adding GroupVersion monitoring.coreos.com v1beta1 to ResourceManager 2025-12-08T17:46:05.475669212+00:00 stderr F I1208 17:46:05.475639 12 controller.go:231] Updating CRD OpenAPI spec because machineosconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.475728634+00:00 stderr F I1208 17:46:05.475688 12 controller.go:231] Updating CRD OpenAPI spec because machines.machine.openshift.io changed 2025-12-08T17:46:05.475728634+00:00 stderr F I1208 17:46:05.475700 12 controller.go:231] Updating CRD OpenAPI spec because machinesets.machine.openshift.io changed 2025-12-08T17:46:05.475728634+00:00 stderr F I1208 17:46:05.475706 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediations.infrastructure.cluster.x-k8s.io changed 2025-12-08T17:46:05.475728634+00:00 stderr F I1208 17:46:05.475716 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediationtemplates.infrastructure.cluster.x-k8s.io changed 2025-12-08T17:46:05.475787596+00:00 stderr F I1208 17:46:05.475756 12 controller.go:231] Updating CRD OpenAPI spec because network-attachment-definitions.k8s.cni.cncf.io changed 2025-12-08T17:46:05.475787596+00:00 stderr F I1208 17:46:05.475765 12 controller.go:231] Updating CRD OpenAPI spec because networks.config.openshift.io changed 2025-12-08T17:46:05.475787596+00:00 stderr F I1208 17:46:05.475772 12 controller.go:231] Updating CRD OpenAPI spec because networks.operator.openshift.io changed 2025-12-08T17:46:05.475831177+00:00 stderr F I1208 17:46:05.475802 12 controller.go:231] Updating CRD OpenAPI spec because nodes.config.openshift.io changed 2025-12-08T17:46:05.475908329+00:00 stderr F I1208 17:46:05.475852 12 controller.go:231] Updating CRD OpenAPI spec because nodeslicepools.whereabouts.cni.cncf.io changed 2025-12-08T17:46:05.475908329+00:00 stderr F I1208 17:46:05.475863 12 controller.go:231] Updating CRD OpenAPI spec because oauths.config.openshift.io changed 2025-12-08T17:46:05.475908329+00:00 stderr F I1208 17:46:05.475885 12 controller.go:231] Updating CRD OpenAPI spec because olmconfigs.operators.coreos.com changed 2025-12-08T17:46:05.475908329+00:00 stderr F I1208 17:46:05.475892 12 controller.go:231] Updating CRD OpenAPI spec because openshiftapiservers.operator.openshift.io changed 2025-12-08T17:46:05.475968761+00:00 stderr F I1208 17:46:05.475939 12 controller.go:231] Updating CRD OpenAPI spec because openshiftcontrollermanagers.operator.openshift.io changed 2025-12-08T17:46:05.475968761+00:00 stderr F I1208 17:46:05.475952 12 controller.go:231] Updating CRD OpenAPI spec because operatorconditions.operators.coreos.com changed 2025-12-08T17:46:05.476015023+00:00 stderr F I1208 17:46:05.475958 12 controller.go:231] Updating CRD OpenAPI spec because operatorgroups.operators.coreos.com changed 2025-12-08T17:46:05.476015023+00:00 stderr F I1208 17:46:05.475992 12 controller.go:231] Updating CRD OpenAPI spec because operatorhubs.config.openshift.io changed 2025-12-08T17:46:05.476068934+00:00 stderr F I1208 17:46:05.476036 12 controller.go:231] Updating CRD OpenAPI spec because operatorpkis.network.operator.openshift.io changed 2025-12-08T17:46:05.476068934+00:00 stderr F I1208 17:46:05.476046 12 controller.go:231] Updating CRD OpenAPI spec because operators.operators.coreos.com changed 2025-12-08T17:46:05.476068934+00:00 stderr F I1208 17:46:05.476051 12 controller.go:231] Updating CRD OpenAPI spec because overlappingrangeipreservations.whereabouts.cni.cncf.io changed 2025-12-08T17:46:05.476130156+00:00 stderr F I1208 17:46:05.476093 12 controller.go:231] Updating CRD OpenAPI spec because pinnedimagesets.machineconfiguration.openshift.io changed 2025-12-08T17:46:05.476130156+00:00 stderr F I1208 17:46:05.476106 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.coreos.com changed 2025-12-08T17:46:05.476130156+00:00 stderr F I1208 17:46:05.476112 12 controller.go:231] Updating CRD OpenAPI spec because podnetworkconnectivitychecks.controlplane.operator.openshift.io changed 2025-12-08T17:46:05.476173727+00:00 stderr F I1208 17:46:05.476144 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.coreos.com changed 2025-12-08T17:46:05.476231279+00:00 stderr F I1208 17:46:05.476198 12 controller.go:231] Updating CRD OpenAPI spec because projecthelmchartrepositories.helm.openshift.io changed 2025-12-08T17:46:05.476231279+00:00 stderr F I1208 17:46:05.476210 12 controller.go:231] Updating CRD OpenAPI spec because projects.config.openshift.io changed 2025-12-08T17:46:05.476278170+00:00 stderr F I1208 17:46:05.476221 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.coreos.com changed 2025-12-08T17:46:05.476278170+00:00 stderr F I1208 17:46:05.476144 12 handler.go:288] Adding GroupVersion autoscaling.openshift.io v1beta1 to ResourceManager 2025-12-08T17:46:05.476278170+00:00 stderr F I1208 17:46:05.476262 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.coreos.com changed 2025-12-08T17:46:05.476329862+00:00 stderr F I1208 17:46:05.476297 12 controller.go:231] Updating CRD OpenAPI spec because proxies.config.openshift.io changed 2025-12-08T17:46:05.476329862+00:00 stderr F I1208 17:46:05.476308 12 controller.go:231] Updating CRD OpenAPI spec because rangeallocations.security.internal.openshift.io changed 2025-12-08T17:46:05.476386804+00:00 stderr F I1208 17:46:05.476354 12 controller.go:231] Updating CRD OpenAPI spec because referencegrants.gateway.networking.k8s.io changed 2025-12-08T17:46:05.476386804+00:00 stderr F I1208 17:46:05.476365 12 controller.go:231] Updating CRD OpenAPI spec because rolebindingrestrictions.authorization.openshift.io changed 2025-12-08T17:46:05.476386804+00:00 stderr F I1208 17:46:05.476374 12 controller.go:231] Updating CRD OpenAPI spec because schedulers.config.openshift.io changed 2025-12-08T17:46:05.476487477+00:00 stderr F I1208 17:46:05.476443 12 controller.go:231] Updating CRD OpenAPI spec because securitycontextconstraints.security.openshift.io changed 2025-12-08T17:46:05.476487477+00:00 stderr F I1208 17:46:05.476455 12 controller.go:231] Updating CRD OpenAPI spec because servicecas.operator.openshift.io changed 2025-12-08T17:46:05.476487477+00:00 stderr F I1208 17:46:05.476462 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.coreos.com changed 2025-12-08T17:46:05.476545208+00:00 stderr F I1208 17:46:05.476510 12 controller.go:231] Updating CRD OpenAPI spec because storages.operator.openshift.io changed 2025-12-08T17:46:05.476545208+00:00 stderr F I1208 17:46:05.476517 12 handler.go:288] Adding GroupVersion samples.operator.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.476545208+00:00 stderr F I1208 17:46:05.476521 12 controller.go:231] Updating CRD OpenAPI spec because storagestates.migration.k8s.io changed 2025-12-08T17:46:05.476545208+00:00 stderr F I1208 17:46:05.476532 12 controller.go:231] Updating CRD OpenAPI spec because storageversionmigrations.migration.k8s.io changed 2025-12-08T17:46:05.476599260+00:00 stderr F I1208 17:46:05.476569 12 handler.go:288] Adding GroupVersion policy.networking.k8s.io v1alpha1 to ResourceManager 2025-12-08T17:46:05.476686283+00:00 stderr F I1208 17:46:05.476571 12 controller.go:231] Updating CRD OpenAPI spec because subscriptions.operators.coreos.com changed 2025-12-08T17:46:05.476686283+00:00 stderr F I1208 17:46:05.476667 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.coreos.com changed 2025-12-08T17:46:05.476745484+00:00 stderr F I1208 17:46:05.476713 12 controller.go:231] Updating CRD OpenAPI spec because userdefinednetworks.k8s.ovn.org changed 2025-12-08T17:46:05.476984251+00:00 stderr F I1208 17:46:05.476928 12 handler.go:288] Adding GroupVersion autoscaling.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.478143707+00:00 stderr F I1208 17:46:05.478083 12 handler.go:288] Adding GroupVersion authorization.openshift.io v1 to ResourceManager 2025-12-08T17:46:05.515284001+00:00 stderr F W1208 17:46:05.515153 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.515373404+00:00 stderr F W1208 17:46:05.515290 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.575024274+00:00 stderr F I1208 17:46:05.574327 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.575024274+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.575024274+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.611013015+00:00 stderr F W1208 17:46:05.608572 12 patch_genericapiserver.go:245] Request to "/apis/autoscaling.openshift.io/v1beta1/machineautoscalers" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.674089118+00:00 stderr F I1208 17:46:05.672316 12 patch_genericapiserver.go:241] Loopback request to "/api/v1/namespaces/openshift-config-managed/configmaps" (user agent "cluster-kube-controller-manager-operator/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready. This client probably does not watch /readyz and might get inconsistent answers. 2025-12-08T17:46:05.674089118+00:00 stderr F I1208 17:46:05.673389 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.674089118+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.674089118+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.766905554+00:00 stderr F I1208 17:46:05.766753 12 patch_genericapiserver.go:241] Loopback request to "/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc" (user agent "cluster-kube-apiserver-operator/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready. This client probably does not watch /readyz and might get inconsistent answers. 2025-12-08T17:46:05.773389358+00:00 stderr F I1208 17:46:05.773281 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.773389358+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.773389358+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.874320218+00:00 stderr F I1208 17:46:05.873641 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.874320218+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.874320218+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:05.936063271+00:00 stderr F W1208 17:46:05.935731 12 patch_genericapiserver.go:245] Request to "/apis/monitoring.coreos.com/v1/prometheuses" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:05.974108033+00:00 stderr F I1208 17:46:05.973916 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:05.974108033+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:05.974108033+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:06.001012451+00:00 stderr F W1208 17:46:06.000658 12 patch_genericapiserver.go:245] Request to "/apis/monitoring.coreos.com/v1beta1/alertmanagerconfigs" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.025840966+00:00 stderr F W1208 17:46:06.025715 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-console-operator/configmaps" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.073644211+00:00 stderr F W1208 17:46:06.073447 12 patch_genericapiserver.go:245] Request to "/apis/whereabouts.cni.cncf.io/v1alpha1/nodeslicepools" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.074607180+00:00 stderr F I1208 17:46:06.074496 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:06.074607180+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:06.074607180+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:06.104643972+00:00 stderr F W1208 17:46:06.104431 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces" (source IP 38.102.83.243:45834, user agent "crc/ovnkube@23bb8b679668 (linux/amd64) kubernetes/v0.33.3") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.131230259+00:00 stderr F W1208 17:46:06.131047 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-console/secrets" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.156932361+00:00 stderr F W1208 17:46:06.156700 12 patch_genericapiserver.go:245] Request to "/api/v1/podtemplates" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.184619272+00:00 stderr F I1208 17:46:06.184075 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:06.184619272+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:06.184619272+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:06.205122747+00:00 stderr F W1208 17:46:06.204945 12 patch_genericapiserver.go:245] Request to "/api/v1/pods" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.287135219+00:00 stderr F W1208 17:46:06.286609 12 patch_genericapiserver.go:245] Request to "/api/v1/persistentvolumeclaims" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.295951274+00:00 stderr F E1208 17:46:06.295828 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.295951274+00:00 stderr F loading OpenAPI spec for "v1.packages.operators.coreos.com" failed with: failed to download v1.packages.operators.coreos.com: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.295951274+00:00 stderr F , Header: map[Audit-Id:[b1b36dbe-e7c9-450a-81c8-1b0d68327154] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.295951274+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.297001585+00:00 stderr F I1208 17:46:06.296938 12 controller.go:109] OpenAPI AggregationController: action for item v1.packages.operators.coreos.com: Rate Limited Requeue. 2025-12-08T17:46:06.304370857+00:00 stderr F I1208 17:46:06.304256 12 storage_scheduling.go:111] all system priority classes are created successfully or already exist. 2025-12-08T17:46:06.305961344+00:00 stderr F I1208 17:46:06.297156 12 healthz.go:280] poststarthook/rbac/bootstrap-roles,poststarthook/scheduling/bootstrap-system-priority-classes check failed: readyz 2025-12-08T17:46:06.305961344+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:06.305961344+00:00 stderr F [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: not finished 2025-12-08T17:46:06.315544822+00:00 stderr F E1208 17:46:06.315427 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.315544822+00:00 stderr F loading OpenAPI spec for "v1.project.openshift.io" failed with: failed to download v1.project.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.315544822+00:00 stderr F , Header: map[Audit-Id:[53d174bf-2488-458c-90ef-683b793abf0c] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.315544822+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.316571272+00:00 stderr F I1208 17:46:06.316483 12 controller.go:109] OpenAPI AggregationController: action for item v1.project.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.320886162+00:00 stderr F E1208 17:46:06.320808 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.320886162+00:00 stderr F loading OpenAPI spec for "v1.quota.openshift.io" failed with: failed to download v1.quota.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.320886162+00:00 stderr F , Header: map[Audit-Id:[c1d667a0-0905-4de6-875e-9a6d402d7710] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.320886162+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.321983975+00:00 stderr F I1208 17:46:06.321931 12 controller.go:109] OpenAPI AggregationController: action for item v1.quota.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.322278034+00:00 stderr F W1208 17:46:06.322195 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-machine-config-operator/secrets" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.326177451+00:00 stderr F E1208 17:46:06.326087 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.326177451+00:00 stderr F loading OpenAPI spec for "v1.route.openshift.io" failed with: failed to download v1.route.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.326177451+00:00 stderr F , Header: map[Audit-Id:[dcbab289-bdc4-48ba-b056-514b46e8c181] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.326177451+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.327236153+00:00 stderr F I1208 17:46:06.327165 12 controller.go:109] OpenAPI AggregationController: action for item v1.route.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.333437449+00:00 stderr F E1208 17:46:06.333304 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.333437449+00:00 stderr F loading OpenAPI spec for "v1.build.openshift.io" failed with: failed to download v1.build.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.333437449+00:00 stderr F , Header: map[Audit-Id:[258f0ccb-720c-4bd3-b8d5-fc7ffb102ba9] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.333437449+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.334852982+00:00 stderr F I1208 17:46:06.334790 12 controller.go:109] OpenAPI AggregationController: action for item v1.build.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.335935764+00:00 stderr F I1208 17:46:06.335855 12 policy_source.go:240] refreshing policies 2025-12-08T17:46:06.337495321+00:00 stderr F E1208 17:46:06.337444 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.337495321+00:00 stderr F loading OpenAPI spec for "v1.security.openshift.io" failed with: failed to download v1.security.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.337495321+00:00 stderr F , Header: map[Audit-Id:[f6a128e0-d466-48c1-8338-56e6599733b3] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.337495321+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.338750948+00:00 stderr F I1208 17:46:06.338662 12 controller.go:109] OpenAPI AggregationController: action for item v1.security.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.341062318+00:00 stderr F E1208 17:46:06.340987 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.341062318+00:00 stderr F loading OpenAPI spec for "v1.template.openshift.io" failed with: failed to download v1.template.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.341062318+00:00 stderr F , Header: map[Audit-Id:[ad04c13f-8f84-43fd-81c2-ba5775deddd4] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.341062318+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.342096319+00:00 stderr F I1208 17:46:06.342026 12 controller.go:109] OpenAPI AggregationController: action for item v1.template.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.346864152+00:00 stderr F E1208 17:46:06.346748 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.346864152+00:00 stderr F loading OpenAPI spec for "v1.oauth.openshift.io" failed with: failed to download v1.oauth.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.346864152+00:00 stderr F , Header: map[Audit-Id:[dee5f72a-ac84-4523-97e9-a47841d30772] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.346864152+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.348036537+00:00 stderr F I1208 17:46:06.347848 12 controller.go:109] OpenAPI AggregationController: action for item v1.oauth.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.351206673+00:00 stderr F E1208 17:46:06.350853 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.351206673+00:00 stderr F loading OpenAPI spec for "v1.image.openshift.io" failed with: failed to download v1.image.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.351206673+00:00 stderr F , Header: map[Audit-Id:[3dc4eb7d-3d0d-4279-89bf-1806f641badc] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.351206673+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.352052038+00:00 stderr F I1208 17:46:06.351935 12 controller.go:109] OpenAPI AggregationController: action for item v1.image.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.355531922+00:00 stderr F W1208 17:46:06.355456 12 patch_genericapiserver.go:245] Request to "/api/v1/nodes" (source IP 38.102.83.243:45856, user agent "crc/ovnkube@23bb8b679668 (linux/amd64) kubernetes/v0.33.3") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.356103749+00:00 stderr F E1208 17:46:06.355638 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.356103749+00:00 stderr F loading OpenAPI spec for "v1.apps.openshift.io" failed with: failed to download v1.apps.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.356103749+00:00 stderr F , Header: map[Audit-Id:[d5bfed0e-2c08-4daf-961b-5c720c40df1b] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.356103749+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.356766210+00:00 stderr F I1208 17:46:06.356685 12 controller.go:109] OpenAPI AggregationController: action for item v1.apps.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.359862912+00:00 stderr F E1208 17:46:06.359741 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.359862912+00:00 stderr F loading OpenAPI spec for "v1.authorization.openshift.io" failed with: failed to download v1.authorization.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.359862912+00:00 stderr F , Header: map[Audit-Id:[6adf8a1e-3638-4ef1-a560-05688499cb9b] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.359862912+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.361267574+00:00 stderr F I1208 17:46:06.361195 12 controller.go:109] OpenAPI AggregationController: action for item v1.authorization.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.362953985+00:00 stderr F E1208 17:46:06.362837 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.packages.operators.coreos.com\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.364133520+00:00 stderr F I1208 17:46:06.364079 12 controller.go:126] OpenAPI AggregationController: action for item v1.packages.operators.coreos.com: Rate Limited Requeue. 2025-12-08T17:46:06.365167102+00:00 stderr F E1208 17:46:06.365079 12 controller.go:102] "Unhandled Error" err=< 2025-12-08T17:46:06.365167102+00:00 stderr F loading OpenAPI spec for "v1.user.openshift.io" failed with: failed to download v1.user.openshift.io: failed to retrieve openAPI spec, http error: ResponseCode: 500, Body: Internal Server Error: "/openapi/v2": Post "https://10.217.4.1:443/apis/authorization.k8s.io/v1/subjectaccessreviews?timeout=10s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:06.365167102+00:00 stderr F , Header: map[Audit-Id:[74d579aa-3fb1-4a6d-b34b-d2021902d0f2] Cache-Control:[no-cache, private] Content-Length:[184] Content-Type:[text/plain; charset=utf-8] Date:[Mon, 08 Dec 2025 17:46:06 GMT] X-Content-Type-Options:[nosniff]] 2025-12-08T17:46:06.365167102+00:00 stderr F > logger="UnhandledError" 2025-12-08T17:46:06.366217223+00:00 stderr F I1208 17:46:06.366130 12 controller.go:109] OpenAPI AggregationController: action for item v1.user.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.366811171+00:00 stderr F E1208 17:46:06.366736 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.project.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.368024518+00:00 stderr F I1208 17:46:06.367947 12 controller.go:126] OpenAPI AggregationController: action for item v1.project.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.370349556+00:00 stderr F E1208 17:46:06.370267 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.quota.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.371368288+00:00 stderr F I1208 17:46:06.371308 12 controller.go:126] OpenAPI AggregationController: action for item v1.quota.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.372704637+00:00 stderr F I1208 17:46:06.372621 12 healthz.go:280] poststarthook/rbac/bootstrap-roles check failed: readyz 2025-12-08T17:46:06.372704637+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:06.373834792+00:00 stderr F E1208 17:46:06.373766 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.route.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.375101309+00:00 stderr F I1208 17:46:06.375019 12 controller.go:126] OpenAPI AggregationController: action for item v1.route.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.378869172+00:00 stderr F E1208 17:46:06.378742 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.build.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.379909154+00:00 stderr F I1208 17:46:06.379798 12 controller.go:126] OpenAPI AggregationController: action for item v1.build.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.389397098+00:00 stderr F E1208 17:46:06.389291 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.security.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.390594275+00:00 stderr F I1208 17:46:06.390529 12 controller.go:126] OpenAPI AggregationController: action for item v1.security.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.402987336+00:00 stderr F E1208 17:46:06.402803 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.template.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.403992596+00:00 stderr F I1208 17:46:06.403931 12 controller.go:126] OpenAPI AggregationController: action for item v1.template.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.408363658+00:00 stderr F E1208 17:46:06.408266 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.oauth.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.409414739+00:00 stderr F I1208 17:46:06.409358 12 controller.go:126] OpenAPI AggregationController: action for item v1.oauth.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.412272915+00:00 stderr F E1208 17:46:06.412065 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.image.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.413209474+00:00 stderr F I1208 17:46:06.413123 12 controller.go:126] OpenAPI AggregationController: action for item v1.image.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.415887184+00:00 stderr F E1208 17:46:06.415813 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.apps.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.417200913+00:00 stderr F I1208 17:46:06.417146 12 controller.go:126] OpenAPI AggregationController: action for item v1.apps.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.419387979+00:00 stderr F W1208 17:46:06.419317 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-console/configmaps" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.419599635+00:00 stderr F E1208 17:46:06.419546 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.authorization.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.420923244+00:00 stderr F I1208 17:46:06.420853 12 controller.go:126] OpenAPI AggregationController: action for item v1.authorization.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.423903644+00:00 stderr F E1208 17:46:06.423823 12 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1.user.openshift.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError" 2025-12-08T17:46:06.424941066+00:00 stderr F I1208 17:46:06.424889 12 controller.go:126] OpenAPI AggregationController: action for item v1.user.openshift.io: Rate Limited Requeue. 2025-12-08T17:46:06.472608996+00:00 stderr F I1208 17:46:06.472483 12 healthz.go:280] poststarthook/rbac/bootstrap-roles check failed: readyz 2025-12-08T17:46:06.472608996+00:00 stderr F [-]poststarthook/rbac/bootstrap-roles failed: not finished 2025-12-08T17:46:06.485320997+00:00 stderr F W1208 17:46:06.485147 12 patch_genericapiserver.go:245] Request to "/apis/apps/v1/replicasets" (source IP 38.102.83.243:45832, user agent "cluster-policy-controller/v0.0.0 (linux/amd64) kubernetes/$Format") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.497869945+00:00 stderr F W1208 17:46:06.497703 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-marketplace/configmaps" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.539016580+00:00 stderr F W1208 17:46:06.538849 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-config-managed/configmaps" (source IP 38.102.83.243:52956, user agent "network-operator/4.20.0-202510211040.p2.gb0393aa.assembly.stream.el9-b0393aa") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.547496114+00:00 stderr F W1208 17:46:06.547384 12 patch_genericapiserver.go:245] Request to "/api/v1/namespaces/openshift-authentication/secrets" (source IP 38.102.83.243:52972, user agent "kubelet/v1.33.5 (linux/amd64) kubernetes/27f72e0") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.558973039+00:00 stderr F W1208 17:46:06.558815 12 patch_genericapiserver.go:245] Request to "/apis/config.openshift.io/v1/clusterversions" (source IP 38.102.83.243:45862, user agent "machine-config-daemon/v0.0.0 (linux/amd64) kubernetes/$Format/config-shared-informer") before server is ready, possibly a sign for a broken load balancer setup. 2025-12-08T17:46:06.574161524+00:00 stderr F I1208 17:46:06.574020 12 patch_genericapiserver.go:97] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-apiserver", Name:"kube-apiserver-crc", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'KubeAPIReadyz' readyz=true 2025-12-08T17:46:06.581080033+00:00 stderr F W1208 17:46:06.580997 12 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.126.11] 2025-12-08T17:46:06.582826304+00:00 stderr F I1208 17:46:06.582752 12 controller.go:667] quota admission added evaluator for: endpoints 2025-12-08T17:46:06.587492085+00:00 stderr F I1208 17:46:06.587413 12 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io 2025-12-08T17:46:06.618600478+00:00 stderr F I1208 17:46:06.618455 12 store.go:1663] "Monitoring resource count at path" resource="clusteruserdefinednetworks.k8s.ovn.org" path="//k8s.ovn.org/clusteruserdefinednetworks" 2025-12-08T17:46:06.619906047+00:00 stderr F I1208 17:46:06.619813 12 cacher.go:469] cacher (clusteruserdefinednetworks.k8s.ovn.org): initialized 2025-12-08T17:46:06.619929898+00:00 stderr F I1208 17:46:06.619890 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=ClusterUserDefinedNetwork" reflector="storage/cacher.go:/k8s.ovn.org/clusteruserdefinednetworks" 2025-12-08T17:46:07.229788983+00:00 stderr F I1208 17:46:07.229632 12 store.go:1663] "Monitoring resource count at path" resource="egressfirewalls.k8s.ovn.org" path="//k8s.ovn.org/egressfirewalls" 2025-12-08T17:46:07.230506115+00:00 stderr F I1208 17:46:07.230413 12 cacher.go:469] cacher (egressfirewalls.k8s.ovn.org): initialized 2025-12-08T17:46:07.230506115+00:00 stderr F I1208 17:46:07.230448 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=EgressFirewall" reflector="storage/cacher.go:/k8s.ovn.org/egressfirewalls" 2025-12-08T17:46:07.393771046+00:00 stderr F I1208 17:46:07.393551 12 store.go:1663] "Monitoring resource count at path" resource="egressservices.k8s.ovn.org" path="//k8s.ovn.org/egressservices" 2025-12-08T17:46:07.395504277+00:00 stderr F I1208 17:46:07.395385 12 cacher.go:469] cacher (egressservices.k8s.ovn.org): initialized 2025-12-08T17:46:07.395504277+00:00 stderr F I1208 17:46:07.395435 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=EgressService" reflector="storage/cacher.go:/k8s.ovn.org/egressservices" 2025-12-08T17:46:07.540349415+00:00 stderr F I1208 17:46:07.539384 12 store.go:1663] "Monitoring resource count at path" resource="ipamclaims.k8s.cni.cncf.io" path="//k8s.cni.cncf.io/ipamclaims" 2025-12-08T17:46:07.541354435+00:00 stderr F I1208 17:46:07.541226 12 cacher.go:469] cacher (ipamclaims.k8s.cni.cncf.io): initialized 2025-12-08T17:46:07.541354435+00:00 stderr F I1208 17:46:07.541268 12 reflector.go:430] "Caches populated" type="k8s.cni.cncf.io/v1alpha1, Kind=IPAMClaim" reflector="storage/cacher.go:/k8s.cni.cncf.io/ipamclaims" 2025-12-08T17:46:07.753472202+00:00 stderr F I1208 17:46:07.753072 12 store.go:1663] "Monitoring resource count at path" resource="network-attachment-definitions.k8s.cni.cncf.io" path="//k8s.cni.cncf.io/network-attachment-definitions" 2025-12-08T17:46:07.754949667+00:00 stderr F I1208 17:46:07.754712 12 cacher.go:469] cacher (network-attachment-definitions.k8s.cni.cncf.io): initialized 2025-12-08T17:46:07.754949667+00:00 stderr F I1208 17:46:07.754747 12 reflector.go:430] "Caches populated" type="k8s.cni.cncf.io/v1, Kind=NetworkAttachmentDefinition" reflector="storage/cacher.go:/k8s.cni.cncf.io/network-attachment-definitions" 2025-12-08T17:46:07.841606518+00:00 stderr F I1208 17:46:07.841428 12 store.go:1663] "Monitoring resource count at path" resource="prometheusrules.monitoring.coreos.com" path="//monitoring.coreos.com/prometheusrules" 2025-12-08T17:46:07.866475394+00:00 stderr F I1208 17:46:07.866329 12 cacher.go:469] cacher (prometheusrules.monitoring.coreos.com): initialized 2025-12-08T17:46:07.866535335+00:00 stderr F I1208 17:46:07.866465 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=PrometheusRule" reflector="storage/cacher.go:/monitoring.coreos.com/prometheusrules" 2025-12-08T17:46:08.020125686+00:00 stderr F I1208 17:46:08.019954 12 store.go:1663] "Monitoring resource count at path" resource="userdefinednetworks.k8s.ovn.org" path="//k8s.ovn.org/userdefinednetworks" 2025-12-08T17:46:08.022343362+00:00 stderr F I1208 17:46:08.022257 12 cacher.go:469] cacher (userdefinednetworks.k8s.ovn.org): initialized 2025-12-08T17:46:08.022432435+00:00 stderr F I1208 17:46:08.022333 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=UserDefinedNetwork" reflector="storage/cacher.go:/k8s.ovn.org/userdefinednetworks" 2025-12-08T17:46:08.117461978+00:00 stderr F I1208 17:46:08.117330 12 store.go:1663] "Monitoring resource count at path" resource="ipaddressclaims.ipam.cluster.x-k8s.io" path="//ipam.cluster.x-k8s.io/ipaddressclaims" 2025-12-08T17:46:08.121039025+00:00 stderr F I1208 17:46:08.119910 12 cacher.go:469] cacher (ipaddressclaims.ipam.cluster.x-k8s.io): initialized 2025-12-08T17:46:08.121039025+00:00 stderr F I1208 17:46:08.119941 12 reflector.go:430] "Caches populated" type="ipam.cluster.x-k8s.io/v1alpha1, Kind=IPAddressClaim" reflector="storage/cacher.go:/ipam.cluster.x-k8s.io/ipaddressclaims" 2025-12-08T17:46:08.125156409+00:00 stderr F I1208 17:46:08.124338 12 store.go:1663] "Monitoring resource count at path" resource="ipaddressclaims.ipam.cluster.x-k8s.io" path="//ipam.cluster.x-k8s.io/ipaddressclaims" 2025-12-08T17:46:08.125458298+00:00 stderr F I1208 17:46:08.125360 12 cacher.go:469] cacher (ipaddressclaims.ipam.cluster.x-k8s.io): initialized 2025-12-08T17:46:08.125458298+00:00 stderr F I1208 17:46:08.125399 12 reflector.go:430] "Caches populated" type="ipam.cluster.x-k8s.io/v1beta1, Kind=IPAddressClaim" reflector="storage/cacher.go:/ipam.cluster.x-k8s.io/ipaddressclaims" 2025-12-08T17:46:08.147447838+00:00 stderr F I1208 17:46:08.147285 12 store.go:1663] "Monitoring resource count at path" resource="clusterversions.config.openshift.io" path="//config.openshift.io/clusterversions" 2025-12-08T17:46:08.149657654+00:00 stderr F I1208 17:46:08.149538 12 cacher.go:469] cacher (clusterversions.config.openshift.io): initialized 2025-12-08T17:46:08.149657654+00:00 stderr F I1208 17:46:08.149572 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ClusterVersion" reflector="storage/cacher.go:/config.openshift.io/clusterversions" 2025-12-08T17:46:08.219980715+00:00 stderr F I1208 17:46:08.219809 12 store.go:1663] "Monitoring resource count at path" resource="machines.machine.openshift.io" path="//machine.openshift.io/machines" 2025-12-08T17:46:08.222737188+00:00 stderr F I1208 17:46:08.222595 12 cacher.go:469] cacher (machines.machine.openshift.io): initialized 2025-12-08T17:46:08.222737188+00:00 stderr F I1208 17:46:08.222656 12 reflector.go:430] "Caches populated" type="machine.openshift.io/v1beta1, Kind=Machine" reflector="storage/cacher.go:/machine.openshift.io/machines" 2025-12-08T17:46:08.290939535+00:00 stderr F I1208 17:46:08.290753 12 store.go:1663] "Monitoring resource count at path" resource="networks.operator.openshift.io" path="//operator.openshift.io/networks" 2025-12-08T17:46:08.293274315+00:00 stderr F I1208 17:46:08.293163 12 cacher.go:469] cacher (networks.operator.openshift.io): initialized 2025-12-08T17:46:08.293274315+00:00 stderr F I1208 17:46:08.293237 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=Network" reflector="storage/cacher.go:/operator.openshift.io/networks" 2025-12-08T17:46:08.421784322+00:00 stderr F I1208 17:46:08.421609 12 store.go:1663] "Monitoring resource count at path" resource="machinesets.machine.openshift.io" path="//machine.openshift.io/machinesets" 2025-12-08T17:46:08.423485623+00:00 stderr F I1208 17:46:08.423305 12 cacher.go:469] cacher (machinesets.machine.openshift.io): initialized 2025-12-08T17:46:08.423485623+00:00 stderr F I1208 17:46:08.423364 12 reflector.go:430] "Caches populated" type="machine.openshift.io/v1beta1, Kind=MachineSet" reflector="storage/cacher.go:/machine.openshift.io/machinesets" 2025-12-08T17:46:08.532532986+00:00 stderr F I1208 17:46:08.531509 12 store.go:1663] "Monitoring resource count at path" resource="metal3remediations.infrastructure.cluster.x-k8s.io" path="//infrastructure.cluster.x-k8s.io/metal3remediations" 2025-12-08T17:46:08.533513106+00:00 stderr F I1208 17:46:08.533306 12 cacher.go:469] cacher (metal3remediations.infrastructure.cluster.x-k8s.io): initialized 2025-12-08T17:46:08.533513106+00:00 stderr F I1208 17:46:08.533346 12 reflector.go:430] "Caches populated" type="infrastructure.cluster.x-k8s.io/v1alpha5, Kind=Metal3Remediation" reflector="storage/cacher.go:/infrastructure.cluster.x-k8s.io/metal3remediations" 2025-12-08T17:46:08.542504376+00:00 stderr F I1208 17:46:08.541813 12 store.go:1663] "Monitoring resource count at path" resource="metal3remediations.infrastructure.cluster.x-k8s.io" path="//infrastructure.cluster.x-k8s.io/metal3remediations" 2025-12-08T17:46:08.544111383+00:00 stderr F I1208 17:46:08.543919 12 cacher.go:469] cacher (metal3remediations.infrastructure.cluster.x-k8s.io): initialized 2025-12-08T17:46:08.544111383+00:00 stderr F I1208 17:46:08.543959 12 reflector.go:430] "Caches populated" type="infrastructure.cluster.x-k8s.io/v1beta1, Kind=Metal3Remediation" reflector="storage/cacher.go:/infrastructure.cluster.x-k8s.io/metal3remediations" 2025-12-08T17:46:08.559758784+00:00 stderr F I1208 17:46:08.559556 12 store.go:1663] "Monitoring resource count at path" resource="machinehealthchecks.machine.openshift.io" path="//machine.openshift.io/machinehealthchecks" 2025-12-08T17:46:08.562000281+00:00 stderr F I1208 17:46:08.561855 12 cacher.go:469] cacher (machinehealthchecks.machine.openshift.io): initialized 2025-12-08T17:46:08.562065443+00:00 stderr F I1208 17:46:08.562004 12 reflector.go:430] "Caches populated" type="machine.openshift.io/v1beta1, Kind=MachineHealthCheck" reflector="storage/cacher.go:/machine.openshift.io/machinehealthchecks" 2025-12-08T17:46:08.584623100+00:00 stderr F I1208 17:46:08.584427 12 store.go:1663] "Monitoring resource count at path" resource="metal3remediationtemplates.infrastructure.cluster.x-k8s.io" path="//infrastructure.cluster.x-k8s.io/metal3remediationtemplates" 2025-12-08T17:46:08.586989351+00:00 stderr F I1208 17:46:08.586834 12 cacher.go:469] cacher (metal3remediationtemplates.infrastructure.cluster.x-k8s.io): initialized 2025-12-08T17:46:08.587073224+00:00 stderr F I1208 17:46:08.586996 12 reflector.go:430] "Caches populated" type="infrastructure.cluster.x-k8s.io/v1alpha5, Kind=Metal3RemediationTemplate" reflector="storage/cacher.go:/infrastructure.cluster.x-k8s.io/metal3remediationtemplates" 2025-12-08T17:46:08.591603379+00:00 stderr F I1208 17:46:08.591468 12 store.go:1663] "Monitoring resource count at path" resource="metal3remediationtemplates.infrastructure.cluster.x-k8s.io" path="//infrastructure.cluster.x-k8s.io/metal3remediationtemplates" 2025-12-08T17:46:08.593811036+00:00 stderr F I1208 17:46:08.593671 12 cacher.go:469] cacher (metal3remediationtemplates.infrastructure.cluster.x-k8s.io): initialized 2025-12-08T17:46:08.593811036+00:00 stderr F I1208 17:46:08.593688 12 reflector.go:430] "Caches populated" type="infrastructure.cluster.x-k8s.io/v1beta1, Kind=Metal3RemediationTemplate" reflector="storage/cacher.go:/infrastructure.cluster.x-k8s.io/metal3remediationtemplates" 2025-12-08T17:46:08.654816467+00:00 stderr F I1208 17:46:08.654629 12 store.go:1663] "Monitoring resource count at path" resource="proxies.config.openshift.io" path="//config.openshift.io/proxies" 2025-12-08T17:46:08.656801617+00:00 stderr F I1208 17:46:08.656684 12 cacher.go:469] cacher (proxies.config.openshift.io): initialized 2025-12-08T17:46:08.656801617+00:00 stderr F I1208 17:46:08.656712 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Proxy" reflector="storage/cacher.go:/config.openshift.io/proxies" 2025-12-08T17:46:08.698390584+00:00 stderr F I1208 17:46:08.698184 12 store.go:1663] "Monitoring resource count at path" resource="alertrelabelconfigs.monitoring.openshift.io" path="//monitoring.openshift.io/alertrelabelconfigs" 2025-12-08T17:46:08.699241980+00:00 stderr F I1208 17:46:08.699145 12 cacher.go:469] cacher (alertrelabelconfigs.monitoring.openshift.io): initialized 2025-12-08T17:46:08.699313633+00:00 stderr F I1208 17:46:08.699241 12 reflector.go:430] "Caches populated" type="monitoring.openshift.io/v1, Kind=AlertRelabelConfig" reflector="storage/cacher.go:/monitoring.openshift.io/alertrelabelconfigs" 2025-12-08T17:46:08.709720954+00:00 stderr F I1208 17:46:08.709559 12 store.go:1663] "Monitoring resource count at path" resource="clusterresourcequotas.quota.openshift.io" path="//quota.openshift.io/clusterresourcequotas" 2025-12-08T17:46:08.710750476+00:00 stderr F I1208 17:46:08.710647 12 cacher.go:469] cacher (clusterresourcequotas.quota.openshift.io): initialized 2025-12-08T17:46:08.710750476+00:00 stderr F I1208 17:46:08.710676 12 reflector.go:430] "Caches populated" type="quota.openshift.io/v1, Kind=ClusterResourceQuota" reflector="storage/cacher.go:/quota.openshift.io/clusterresourcequotas" 2025-12-08T17:46:08.996936526+00:00 stderr F I1208 17:46:08.996745 12 store.go:1663] "Monitoring resource count at path" resource="referencegrants.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/referencegrants" 2025-12-08T17:46:08.998180822+00:00 stderr F I1208 17:46:08.998073 12 cacher.go:469] cacher (referencegrants.gateway.networking.k8s.io): initialized 2025-12-08T17:46:08.998180822+00:00 stderr F I1208 17:46:08.998103 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1beta1, Kind=ReferenceGrant" reflector="storage/cacher.go:/gateway.networking.k8s.io/referencegrants" 2025-12-08T17:46:09.176610538+00:00 stderr F I1208 17:46:09.176411 12 store.go:1663] "Monitoring resource count at path" resource="infrastructures.config.openshift.io" path="//config.openshift.io/infrastructures" 2025-12-08T17:46:09.181974330+00:00 stderr F I1208 17:46:09.181848 12 cacher.go:469] cacher (infrastructures.config.openshift.io): initialized 2025-12-08T17:46:09.181974330+00:00 stderr F I1208 17:46:09.181919 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Infrastructure" reflector="storage/cacher.go:/config.openshift.io/infrastructures" 2025-12-08T17:46:09.198456774+00:00 stderr F I1208 17:46:09.198331 12 store.go:1663] "Monitoring resource count at path" resource="podmonitors.monitoring.coreos.com" path="//monitoring.coreos.com/podmonitors" 2025-12-08T17:46:09.199957319+00:00 stderr F I1208 17:46:09.199763 12 cacher.go:469] cacher (podmonitors.monitoring.coreos.com): initialized 2025-12-08T17:46:09.199957319+00:00 stderr F I1208 17:46:09.199805 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=PodMonitor" reflector="storage/cacher.go:/monitoring.coreos.com/podmonitors" 2025-12-08T17:46:09.244704752+00:00 stderr F I1208 17:46:09.244564 12 store.go:1663] "Monitoring resource count at path" resource="imagepolicies.config.openshift.io" path="//config.openshift.io/imagepolicies" 2025-12-08T17:46:09.246340682+00:00 stderr F I1208 17:46:09.246167 12 cacher.go:469] cacher (imagepolicies.config.openshift.io): initialized 2025-12-08T17:46:09.246340682+00:00 stderr F I1208 17:46:09.246192 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ImagePolicy" reflector="storage/cacher.go:/config.openshift.io/imagepolicies" 2025-12-08T17:46:09.260292340+00:00 stderr F I1208 17:46:09.260179 12 store.go:1663] "Monitoring resource count at path" resource="egressrouters.network.operator.openshift.io" path="//network.operator.openshift.io/egressrouters" 2025-12-08T17:46:09.262632581+00:00 stderr F I1208 17:46:09.262515 12 cacher.go:469] cacher (egressrouters.network.operator.openshift.io): initialized 2025-12-08T17:46:09.262632581+00:00 stderr F I1208 17:46:09.262549 12 reflector.go:430] "Caches populated" type="network.operator.openshift.io/v1, Kind=EgressRouter" reflector="storage/cacher.go:/network.operator.openshift.io/egressrouters" 2025-12-08T17:46:09.275528017+00:00 stderr F I1208 17:46:09.274993 12 store.go:1663] "Monitoring resource count at path" resource="egressips.k8s.ovn.org" path="//k8s.ovn.org/egressips" 2025-12-08T17:46:09.276102865+00:00 stderr F I1208 17:46:09.276043 12 cacher.go:469] cacher (egressips.k8s.ovn.org): initialized 2025-12-08T17:46:09.276102865+00:00 stderr F I1208 17:46:09.276076 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=EgressIP" reflector="storage/cacher.go:/k8s.ovn.org/egressips" 2025-12-08T17:46:09.358609111+00:00 stderr F I1208 17:46:09.358423 12 store.go:1663] "Monitoring resource count at path" resource="clusterserviceversions.operators.coreos.com" path="//operators.coreos.com/clusterserviceversions" 2025-12-08T17:46:09.361332993+00:00 stderr F I1208 17:46:09.361219 12 cacher.go:469] cacher (clusterserviceversions.operators.coreos.com): initialized 2025-12-08T17:46:09.361332993+00:00 stderr F I1208 17:46:09.361253 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1alpha1, Kind=ClusterServiceVersion" reflector="storage/cacher.go:/operators.coreos.com/clusterserviceversions" 2025-12-08T17:46:09.481215022+00:00 stderr F I1208 17:46:09.481043 12 store.go:1663] "Monitoring resource count at path" resource="machineconfigpools.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/machineconfigpools" 2025-12-08T17:46:09.485633844+00:00 stderr F I1208 17:46:09.485496 12 cacher.go:469] cacher (machineconfigpools.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:09.485633844+00:00 stderr F I1208 17:46:09.485544 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=MachineConfigPool" reflector="storage/cacher.go:/machineconfiguration.openshift.io/machineconfigpools" 2025-12-08T17:46:09.587513192+00:00 stderr F I1208 17:46:09.587217 12 store.go:1663] "Monitoring resource count at path" resource="operatorconditions.operators.coreos.com" path="//operators.coreos.com/operatorconditions" 2025-12-08T17:46:09.590007457+00:00 stderr F I1208 17:46:09.589806 12 cacher.go:469] cacher (operatorconditions.operators.coreos.com): initialized 2025-12-08T17:46:09.590007457+00:00 stderr F I1208 17:46:09.589856 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1, Kind=OperatorCondition" reflector="storage/cacher.go:/operators.coreos.com/operatorconditions" 2025-12-08T17:46:09.600706818+00:00 stderr F I1208 17:46:09.600534 12 store.go:1663] "Monitoring resource count at path" resource="operatorconditions.operators.coreos.com" path="//operators.coreos.com/operatorconditions" 2025-12-08T17:46:09.602595845+00:00 stderr F I1208 17:46:09.602477 12 cacher.go:469] cacher (operatorconditions.operators.coreos.com): initialized 2025-12-08T17:46:09.602595845+00:00 stderr F I1208 17:46:09.602518 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v2, Kind=OperatorCondition" reflector="storage/cacher.go:/operators.coreos.com/operatorconditions" 2025-12-08T17:46:09.645135061+00:00 stderr F I1208 17:46:09.644999 12 store.go:1663] "Monitoring resource count at path" resource="egressqoses.k8s.ovn.org" path="//k8s.ovn.org/egressqoses" 2025-12-08T17:46:09.647411580+00:00 stderr F I1208 17:46:09.647288 12 cacher.go:469] cacher (egressqoses.k8s.ovn.org): initialized 2025-12-08T17:46:09.647465981+00:00 stderr F I1208 17:46:09.647418 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=EgressQoS" reflector="storage/cacher.go:/k8s.ovn.org/egressqoses" 2025-12-08T17:46:09.675046629+00:00 stderr F I1208 17:46:09.674826 12 store.go:1663] "Monitoring resource count at path" resource="featuregates.config.openshift.io" path="//config.openshift.io/featuregates" 2025-12-08T17:46:09.678463182+00:00 stderr F I1208 17:46:09.678321 12 cacher.go:469] cacher (featuregates.config.openshift.io): initialized 2025-12-08T17:46:09.678463182+00:00 stderr F I1208 17:46:09.678370 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=FeatureGate" reflector="storage/cacher.go:/config.openshift.io/featuregates" 2025-12-08T17:46:09.730086582+00:00 stderr F I1208 17:46:09.729943 12 store.go:1663] "Monitoring resource count at path" resource="subscriptions.operators.coreos.com" path="//operators.coreos.com/subscriptions" 2025-12-08T17:46:09.731505964+00:00 stderr F I1208 17:46:09.731393 12 cacher.go:469] cacher (subscriptions.operators.coreos.com): initialized 2025-12-08T17:46:09.731557596+00:00 stderr F I1208 17:46:09.731503 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1alpha1, Kind=Subscription" reflector="storage/cacher.go:/operators.coreos.com/subscriptions" 2025-12-08T17:46:09.799837376+00:00 stderr F I1208 17:46:09.799348 12 store.go:1663] "Monitoring resource count at path" resource="openshiftapiservers.operator.openshift.io" path="//operator.openshift.io/openshiftapiservers" 2025-12-08T17:46:09.803337370+00:00 stderr F I1208 17:46:09.803185 12 cacher.go:469] cacher (openshiftapiservers.operator.openshift.io): initialized 2025-12-08T17:46:09.803337370+00:00 stderr F I1208 17:46:09.803221 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=OpenShiftAPIServer" reflector="storage/cacher.go:/operator.openshift.io/openshiftapiservers" 2025-12-08T17:46:09.969017803+00:00 stderr F I1208 17:46:09.968754 12 store.go:1663] "Monitoring resource count at path" resource="kubeschedulers.operator.openshift.io" path="//operator.openshift.io/kubeschedulers" 2025-12-08T17:46:09.971935901+00:00 stderr F I1208 17:46:09.971823 12 cacher.go:469] cacher (kubeschedulers.operator.openshift.io): initialized 2025-12-08T17:46:09.971935901+00:00 stderr F I1208 17:46:09.971860 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=KubeScheduler" reflector="storage/cacher.go:/operator.openshift.io/kubeschedulers" 2025-12-08T17:46:09.997632452+00:00 stderr F I1208 17:46:09.997503 12 store.go:1663] "Monitoring resource count at path" resource="alertmanagers.monitoring.coreos.com" path="//monitoring.coreos.com/alertmanagers" 2025-12-08T17:46:09.998585081+00:00 stderr F I1208 17:46:09.998494 12 cacher.go:469] cacher (alertmanagers.monitoring.coreos.com): initialized 2025-12-08T17:46:09.998585081+00:00 stderr F I1208 17:46:09.998538 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=Alertmanager" reflector="storage/cacher.go:/monitoring.coreos.com/alertmanagers" 2025-12-08T17:46:10.183083478+00:00 stderr F I1208 17:46:10.182908 12 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/config/informers/externalversions/factory.go:125" 2025-12-08T17:46:10.186056127+00:00 stderr F I1208 17:46:10.185648 12 store.go:1663] "Monitoring resource count at path" resource="ingresscontrollers.operator.openshift.io" path="//operator.openshift.io/ingresscontrollers" 2025-12-08T17:46:10.188560453+00:00 stderr F I1208 17:46:10.188362 12 cacher.go:469] cacher (ingresscontrollers.operator.openshift.io): initialized 2025-12-08T17:46:10.188560453+00:00 stderr F I1208 17:46:10.188402 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=IngressController" reflector="storage/cacher.go:/operator.openshift.io/ingresscontrollers" 2025-12-08T17:46:10.196704427+00:00 stderr F I1208 17:46:10.196530 12 store.go:1663] "Monitoring resource count at path" resource="podnetworkconnectivitychecks.controlplane.operator.openshift.io" path="//controlplane.operator.openshift.io/podnetworkconnectivitychecks" 2025-12-08T17:46:10.201452860+00:00 stderr F I1208 17:46:10.201165 12 reflector.go:430] "Caches populated" type="*v1.Group" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/user/informers/externalversions/factory.go:125" 2025-12-08T17:46:10.201754849+00:00 stderr F I1208 17:46:10.201622 12 cacher.go:469] cacher (podnetworkconnectivitychecks.controlplane.operator.openshift.io): initialized 2025-12-08T17:46:10.201754849+00:00 stderr F I1208 17:46:10.201663 12 reflector.go:430] "Caches populated" type="controlplane.operator.openshift.io/v1alpha1, Kind=PodNetworkConnectivityCheck" reflector="storage/cacher.go:/controlplane.operator.openshift.io/podnetworkconnectivitychecks" 2025-12-08T17:46:10.202529652+00:00 stderr F I1208 17:46:10.202320 12 reflector.go:430] "Caches populated" type="*v1.ClusterResourceQuota" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/quota/informers/externalversions/factory.go:125" 2025-12-08T17:46:10.210527542+00:00 stderr F I1208 17:46:10.210394 12 store.go:1663] "Monitoring resource count at path" resource="projecthelmchartrepositories.helm.openshift.io" path="//helm.openshift.io/projecthelmchartrepositories" 2025-12-08T17:46:10.212400059+00:00 stderr F I1208 17:46:10.212163 12 cacher.go:469] cacher (projecthelmchartrepositories.helm.openshift.io): initialized 2025-12-08T17:46:10.212400059+00:00 stderr F I1208 17:46:10.212188 12 reflector.go:430] "Caches populated" type="helm.openshift.io/v1beta1, Kind=ProjectHelmChartRepository" reflector="storage/cacher.go:/helm.openshift.io/projecthelmchartrepositories" 2025-12-08T17:46:10.242691758+00:00 stderr F I1208 17:46:10.242543 12 store.go:1663] "Monitoring resource count at path" resource="securitycontextconstraints.security.openshift.io" path="//security.openshift.io/securitycontextconstraints" 2025-12-08T17:46:10.261838282+00:00 stderr F I1208 17:46:10.261717 12 cacher.go:469] cacher (securitycontextconstraints.security.openshift.io): initialized 2025-12-08T17:46:10.261838282+00:00 stderr F I1208 17:46:10.261760 12 reflector.go:430] "Caches populated" type="security.openshift.io/v1, Kind=SecurityContextConstraints" reflector="storage/cacher.go:/security.openshift.io/securitycontextconstraints" 2025-12-08T17:46:10.262509673+00:00 stderr F I1208 17:46:10.262434 12 store.go:1663] "Monitoring resource count at path" resource="apirequestcounts.apiserver.openshift.io" path="//apiserver.openshift.io/apirequestcounts" 2025-12-08T17:46:10.265159532+00:00 stderr F I1208 17:46:10.265061 12 cacher.go:469] cacher (apirequestcounts.apiserver.openshift.io): initialized 2025-12-08T17:46:10.265159532+00:00 stderr F I1208 17:46:10.265095 12 reflector.go:430] "Caches populated" type="apiserver.openshift.io/v1, Kind=APIRequestCount" reflector="storage/cacher.go:/apiserver.openshift.io/apirequestcounts" 2025-12-08T17:46:10.280312207+00:00 stderr F I1208 17:46:10.280203 12 store.go:1663] "Monitoring resource count at path" resource="networks.config.openshift.io" path="//config.openshift.io/networks" 2025-12-08T17:46:10.280762510+00:00 stderr F I1208 17:46:10.280675 12 reflector.go:430] "Caches populated" type="*v1.SecurityContextConstraints" reflector="github.com/openshift/client-go@v0.0.0-20250710075018-396b36f983ee/security/informers/externalversions/factory.go:125" 2025-12-08T17:46:10.281634577+00:00 stderr F I1208 17:46:10.281567 12 cacher.go:469] cacher (networks.config.openshift.io): initialized 2025-12-08T17:46:10.281634577+00:00 stderr F I1208 17:46:10.281591 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Network" reflector="storage/cacher.go:/config.openshift.io/networks" 2025-12-08T17:46:10.295243355+00:00 stderr F I1208 17:46:10.295122 12 store.go:1663] "Monitoring resource count at path" resource="adminnetworkpolicies.policy.networking.k8s.io" path="//policy.networking.k8s.io/adminnetworkpolicies" 2025-12-08T17:46:10.297323417+00:00 stderr F I1208 17:46:10.297221 12 cacher.go:469] cacher (adminnetworkpolicies.policy.networking.k8s.io): initialized 2025-12-08T17:46:10.297323417+00:00 stderr F I1208 17:46:10.297256 12 reflector.go:430] "Caches populated" type="policy.networking.k8s.io/v1alpha1, Kind=AdminNetworkPolicy" reflector="storage/cacher.go:/policy.networking.k8s.io/adminnetworkpolicies" 2025-12-08T17:46:10.330644218+00:00 stderr F I1208 17:46:10.330532 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Resource=infrastructures" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:46:10.445187076+00:00 stderr F I1208 17:46:10.444821 12 store.go:1663] "Monitoring resource count at path" resource="probes.monitoring.coreos.com" path="//monitoring.coreos.com/probes" 2025-12-08T17:46:10.446291049+00:00 stderr F I1208 17:46:10.446168 12 cacher.go:469] cacher (probes.monitoring.coreos.com): initialized 2025-12-08T17:46:10.446291049+00:00 stderr F I1208 17:46:10.446202 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=Probe" reflector="storage/cacher.go:/monitoring.coreos.com/probes" 2025-12-08T17:46:10.459073973+00:00 stderr F I1208 17:46:10.458757 12 store.go:1663] "Monitoring resource count at path" resource="kubeapiservers.operator.openshift.io" path="//operator.openshift.io/kubeapiservers" 2025-12-08T17:46:10.463940889+00:00 stderr F I1208 17:46:10.463744 12 cacher.go:469] cacher (kubeapiservers.operator.openshift.io): initialized 2025-12-08T17:46:10.463940889+00:00 stderr F I1208 17:46:10.463775 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=KubeAPIServer" reflector="storage/cacher.go:/operator.openshift.io/kubeapiservers" 2025-12-08T17:46:10.610715654+00:00 stderr F I1208 17:46:10.610536 12 store.go:1663] "Monitoring resource count at path" resource="ipaddresses.ipam.cluster.x-k8s.io" path="//ipam.cluster.x-k8s.io/ipaddresses" 2025-12-08T17:46:10.613185638+00:00 stderr F I1208 17:46:10.613031 12 cacher.go:469] cacher (ipaddresses.ipam.cluster.x-k8s.io): initialized 2025-12-08T17:46:10.613185638+00:00 stderr F I1208 17:46:10.613084 12 reflector.go:430] "Caches populated" type="ipam.cluster.x-k8s.io/v1alpha1, Kind=IPAddress" reflector="storage/cacher.go:/ipam.cluster.x-k8s.io/ipaddresses" 2025-12-08T17:46:10.620485068+00:00 stderr F I1208 17:46:10.620314 12 store.go:1663] "Monitoring resource count at path" resource="ipaddresses.ipam.cluster.x-k8s.io" path="//ipam.cluster.x-k8s.io/ipaddresses" 2025-12-08T17:46:10.622502828+00:00 stderr F I1208 17:46:10.622388 12 cacher.go:469] cacher (ipaddresses.ipam.cluster.x-k8s.io): initialized 2025-12-08T17:46:10.622502828+00:00 stderr F I1208 17:46:10.622440 12 reflector.go:430] "Caches populated" type="ipam.cluster.x-k8s.io/v1beta1, Kind=IPAddress" reflector="storage/cacher.go:/ipam.cluster.x-k8s.io/ipaddresses" 2025-12-08T17:46:10.637073755+00:00 stderr F I1208 17:46:10.636865 12 store.go:1663] "Monitoring resource count at path" resource="machineautoscalers.autoscaling.openshift.io" path="//autoscaling.openshift.io/machineautoscalers" 2025-12-08T17:46:10.638255011+00:00 stderr F I1208 17:46:10.638134 12 cacher.go:469] cacher (machineautoscalers.autoscaling.openshift.io): initialized 2025-12-08T17:46:10.638255011+00:00 stderr F I1208 17:46:10.638195 12 reflector.go:430] "Caches populated" type="autoscaling.openshift.io/v1beta1, Kind=MachineAutoscaler" reflector="storage/cacher.go:/autoscaling.openshift.io/machineautoscalers" 2025-12-08T17:46:10.688947802+00:00 stderr F I1208 17:46:10.688728 12 store.go:1663] "Monitoring resource count at path" resource="kubecontrollermanagers.operator.openshift.io" path="//operator.openshift.io/kubecontrollermanagers" 2025-12-08T17:46:10.692916141+00:00 stderr F I1208 17:46:10.692746 12 cacher.go:469] cacher (kubecontrollermanagers.operator.openshift.io): initialized 2025-12-08T17:46:10.692916141+00:00 stderr F I1208 17:46:10.692773 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=KubeControllerManager" reflector="storage/cacher.go:/operator.openshift.io/kubecontrollermanagers" 2025-12-08T17:46:10.701834520+00:00 stderr F I1208 17:46:10.701678 12 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io 2025-12-08T17:46:10.976653099+00:00 stderr F I1208 17:46:10.976180 12 store.go:1663] "Monitoring resource count at path" resource="prometheuses.monitoring.coreos.com" path="//monitoring.coreos.com/prometheuses" 2025-12-08T17:46:10.979796103+00:00 stderr F I1208 17:46:10.979726 12 cacher.go:469] cacher (prometheuses.monitoring.coreos.com): initialized 2025-12-08T17:46:10.979796103+00:00 stderr F I1208 17:46:10.979782 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=Prometheus" reflector="storage/cacher.go:/monitoring.coreos.com/prometheuses" 2025-12-08T17:46:11.049404912+00:00 stderr F I1208 17:46:11.049288 12 store.go:1663] "Monitoring resource count at path" resource="alertmanagerconfigs.monitoring.coreos.com" path="//monitoring.coreos.com/alertmanagerconfigs" 2025-12-08T17:46:11.050456453+00:00 stderr F I1208 17:46:11.050377 12 cacher.go:469] cacher (alertmanagerconfigs.monitoring.coreos.com): initialized 2025-12-08T17:46:11.050491314+00:00 stderr F I1208 17:46:11.050444 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1alpha1, Kind=AlertmanagerConfig" reflector="storage/cacher.go:/monitoring.coreos.com/alertmanagerconfigs" 2025-12-08T17:46:11.062658949+00:00 stderr F I1208 17:46:11.062562 12 store.go:1663] "Monitoring resource count at path" resource="alertmanagerconfigs.monitoring.coreos.com" path="//monitoring.coreos.com/alertmanagerconfigs" 2025-12-08T17:46:11.063351971+00:00 stderr F I1208 17:46:11.063287 12 cacher.go:469] cacher (alertmanagerconfigs.monitoring.coreos.com): initialized 2025-12-08T17:46:11.063351971+00:00 stderr F I1208 17:46:11.063332 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1beta1, Kind=AlertmanagerConfig" reflector="storage/cacher.go:/monitoring.coreos.com/alertmanagerconfigs" 2025-12-08T17:46:11.076917667+00:00 stderr F I1208 17:46:11.076814 12 store.go:1663] "Monitoring resource count at path" resource="clusteroperators.config.openshift.io" path="//config.openshift.io/clusteroperators" 2025-12-08T17:46:11.084782463+00:00 stderr F I1208 17:46:11.084701 12 store.go:1663] "Monitoring resource count at path" resource="nodeslicepools.whereabouts.cni.cncf.io" path="//whereabouts.cni.cncf.io/nodeslicepools" 2025-12-08T17:46:11.086190746+00:00 stderr F I1208 17:46:11.085711 12 cacher.go:469] cacher (nodeslicepools.whereabouts.cni.cncf.io): initialized 2025-12-08T17:46:11.086190746+00:00 stderr F I1208 17:46:11.085732 12 reflector.go:430] "Caches populated" type="whereabouts.cni.cncf.io/v1alpha1, Kind=NodeSlicePool" reflector="storage/cacher.go:/whereabouts.cni.cncf.io/nodeslicepools" 2025-12-08T17:46:11.098985540+00:00 stderr F I1208 17:46:11.098863 12 cacher.go:469] cacher (clusteroperators.config.openshift.io): initialized 2025-12-08T17:46:11.098985540+00:00 stderr F I1208 17:46:11.098937 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ClusterOperator" reflector="storage/cacher.go:/config.openshift.io/clusteroperators" 2025-12-08T17:46:11.164098815+00:00 stderr F I1208 17:46:11.163975 12 store.go:1663] "Monitoring resource count at path" resource="rolebindingrestrictions.authorization.openshift.io" path="//authorization.openshift.io/rolebindingrestrictions" 2025-12-08T17:46:11.166234208+00:00 stderr F I1208 17:46:11.166153 12 cacher.go:469] cacher (rolebindingrestrictions.authorization.openshift.io): initialized 2025-12-08T17:46:11.166234208+00:00 stderr F I1208 17:46:11.166203 12 reflector.go:430] "Caches populated" type="authorization.openshift.io/v1, Kind=RoleBindingRestriction" reflector="storage/cacher.go:/authorization.openshift.io/rolebindingrestrictions" 2025-12-08T17:46:11.386961394+00:00 stderr F I1208 17:46:11.386738 12 store.go:1663] "Monitoring resource count at path" resource="adminpolicybasedexternalroutes.k8s.ovn.org" path="//k8s.ovn.org/adminpolicybasedexternalroutes" 2025-12-08T17:46:11.389012925+00:00 stderr F I1208 17:46:11.388826 12 cacher.go:469] cacher (adminpolicybasedexternalroutes.k8s.ovn.org): initialized 2025-12-08T17:46:11.389012925+00:00 stderr F I1208 17:46:11.388867 12 reflector.go:430] "Caches populated" type="k8s.ovn.org/v1, Kind=AdminPolicyBasedExternalRoute" reflector="storage/cacher.go:/k8s.ovn.org/adminpolicybasedexternalroutes" 2025-12-08T17:46:11.442787109+00:00 stderr F I1208 17:46:11.442263 12 store.go:1663] "Monitoring resource count at path" resource="grpcroutes.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/grpcroutes" 2025-12-08T17:46:11.443492370+00:00 stderr F I1208 17:46:11.443377 12 cacher.go:469] cacher (grpcroutes.gateway.networking.k8s.io): initialized 2025-12-08T17:46:11.443492370+00:00 stderr F I1208 17:46:11.443408 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1, Kind=GRPCRoute" reflector="storage/cacher.go:/gateway.networking.k8s.io/grpcroutes" 2025-12-08T17:46:11.551985067+00:00 stderr F I1208 17:46:11.551787 12 store.go:1663] "Monitoring resource count at path" resource="pinnedimagesets.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/pinnedimagesets" 2025-12-08T17:46:11.554030149+00:00 stderr F I1208 17:46:11.553818 12 cacher.go:469] cacher (pinnedimagesets.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:11.554030149+00:00 stderr F I1208 17:46:11.553866 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=PinnedImageSet" reflector="storage/cacher.go:/machineconfiguration.openshift.io/pinnedimagesets" 2025-12-08T17:46:11.697644689+00:00 stderr F I1208 17:46:11.697510 12 store.go:1663] "Monitoring resource count at path" resource="controllerconfigs.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/controllerconfigs" 2025-12-08T17:46:11.701819685+00:00 stderr F I1208 17:46:11.701720 12 cacher.go:469] cacher (controllerconfigs.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:11.701819685+00:00 stderr F I1208 17:46:11.701767 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=ControllerConfig" reflector="storage/cacher.go:/machineconfiguration.openshift.io/controllerconfigs" 2025-12-08T17:46:11.971922492+00:00 stderr F I1208 17:46:11.971738 12 store.go:1663] "Monitoring resource count at path" resource="httproutes.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/httproutes" 2025-12-08T17:46:11.973122828+00:00 stderr F I1208 17:46:11.972994 12 cacher.go:469] cacher (httproutes.gateway.networking.k8s.io): initialized 2025-12-08T17:46:11.973122828+00:00 stderr F I1208 17:46:11.973051 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1, Kind=HTTPRoute" reflector="storage/cacher.go:/gateway.networking.k8s.io/httproutes" 2025-12-08T17:46:12.019112619+00:00 stderr F I1208 17:46:12.018976 12 store.go:1663] "Monitoring resource count at path" resource="httproutes.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/httproutes" 2025-12-08T17:46:12.020348475+00:00 stderr F I1208 17:46:12.020268 12 cacher.go:469] cacher (httproutes.gateway.networking.k8s.io): initialized 2025-12-08T17:46:12.020403667+00:00 stderr F I1208 17:46:12.020324 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1beta1, Kind=HTTPRoute" reflector="storage/cacher.go:/gateway.networking.k8s.io/httproutes" 2025-12-08T17:46:12.047186711+00:00 stderr F I1208 17:46:12.047062 12 store.go:1663] "Monitoring resource count at path" resource="overlappingrangeipreservations.whereabouts.cni.cncf.io" path="//whereabouts.cni.cncf.io/overlappingrangeipreservations" 2025-12-08T17:46:12.048734897+00:00 stderr F I1208 17:46:12.048646 12 cacher.go:469] cacher (overlappingrangeipreservations.whereabouts.cni.cncf.io): initialized 2025-12-08T17:46:12.048734897+00:00 stderr F I1208 17:46:12.048698 12 reflector.go:430] "Caches populated" type="whereabouts.cni.cncf.io/v1alpha1, Kind=OverlappingRangeIPReservation" reflector="storage/cacher.go:/whereabouts.cni.cncf.io/overlappingrangeipreservations" 2025-12-08T17:46:12.138747889+00:00 stderr F I1208 17:46:12.138657 12 store.go:1663] "Monitoring resource count at path" resource="installplans.operators.coreos.com" path="//operators.coreos.com/installplans" 2025-12-08T17:46:12.140733799+00:00 stderr F I1208 17:46:12.140656 12 cacher.go:469] cacher (installplans.operators.coreos.com): initialized 2025-12-08T17:46:12.140758970+00:00 stderr F I1208 17:46:12.140709 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1alpha1, Kind=InstallPlan" reflector="storage/cacher.go:/operators.coreos.com/installplans" 2025-12-08T17:46:12.229066191+00:00 stderr F I1208 17:46:12.228812 12 store.go:1663] "Monitoring resource count at path" resource="dnsrecords.ingress.operator.openshift.io" path="//ingress.operator.openshift.io/dnsrecords" 2025-12-08T17:46:12.231637178+00:00 stderr F I1208 17:46:12.231491 12 cacher.go:469] cacher (dnsrecords.ingress.operator.openshift.io): initialized 2025-12-08T17:46:12.231637178+00:00 stderr F I1208 17:46:12.231550 12 reflector.go:430] "Caches populated" type="ingress.operator.openshift.io/v1, Kind=DNSRecord" reflector="storage/cacher.go:/ingress.operator.openshift.io/dnsrecords" 2025-12-08T17:46:12.244513614+00:00 stderr F I1208 17:46:12.244393 12 store.go:1663] "Monitoring resource count at path" resource="machineconfigs.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/machineconfigs" 2025-12-08T17:46:12.267260677+00:00 stderr F I1208 17:46:12.267137 12 store.go:1663] "Monitoring resource count at path" resource="alertingrules.monitoring.openshift.io" path="//monitoring.openshift.io/alertingrules" 2025-12-08T17:46:12.268795623+00:00 stderr F I1208 17:46:12.268684 12 cacher.go:469] cacher (alertingrules.monitoring.openshift.io): initialized 2025-12-08T17:46:12.268795623+00:00 stderr F I1208 17:46:12.268712 12 reflector.go:430] "Caches populated" type="monitoring.openshift.io/v1, Kind=AlertingRule" reflector="storage/cacher.go:/monitoring.openshift.io/alertingrules" 2025-12-08T17:46:12.269365250+00:00 stderr F I1208 17:46:12.268977 12 cacher.go:469] cacher (machineconfigs.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:12.269365250+00:00 stderr F I1208 17:46:12.269012 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=MachineConfig" reflector="storage/cacher.go:/machineconfiguration.openshift.io/machineconfigs" 2025-12-08T17:46:12.315050741+00:00 stderr F I1208 17:46:12.314867 12 store.go:1663] "Monitoring resource count at path" resource="gateways.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/gateways" 2025-12-08T17:46:12.317647139+00:00 stderr F I1208 17:46:12.317319 12 cacher.go:469] cacher (gateways.gateway.networking.k8s.io): initialized 2025-12-08T17:46:12.317647139+00:00 stderr F I1208 17:46:12.317372 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1, Kind=Gateway" reflector="storage/cacher.go:/gateway.networking.k8s.io/gateways" 2025-12-08T17:46:12.340104783+00:00 stderr F I1208 17:46:12.339484 12 store.go:1663] "Monitoring resource count at path" resource="gateways.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/gateways" 2025-12-08T17:46:12.340933639+00:00 stderr F I1208 17:46:12.340830 12 cacher.go:469] cacher (gateways.gateway.networking.k8s.io): initialized 2025-12-08T17:46:12.340933639+00:00 stderr F I1208 17:46:12.340896 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1beta1, Kind=Gateway" reflector="storage/cacher.go:/gateway.networking.k8s.io/gateways" 2025-12-08T17:46:12.387490045+00:00 stderr F I1208 17:46:12.387383 12 store.go:1663] "Monitoring resource count at path" resource="operatorgroups.operators.coreos.com" path="//operators.coreos.com/operatorgroups" 2025-12-08T17:46:12.393853477+00:00 stderr F I1208 17:46:12.391945 12 cacher.go:469] cacher (operatorgroups.operators.coreos.com): initialized 2025-12-08T17:46:12.393853477+00:00 stderr F I1208 17:46:12.391991 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1, Kind=OperatorGroup" reflector="storage/cacher.go:/operators.coreos.com/operatorgroups" 2025-12-08T17:46:12.397369552+00:00 stderr F I1208 17:46:12.397283 12 store.go:1663] "Monitoring resource count at path" resource="operatorgroups.operators.coreos.com" path="//operators.coreos.com/operatorgroups" 2025-12-08T17:46:12.403321871+00:00 stderr F I1208 17:46:12.403235 12 cacher.go:469] cacher (operatorgroups.operators.coreos.com): initialized 2025-12-08T17:46:12.403338811+00:00 stderr F I1208 17:46:12.403293 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1alpha2, Kind=OperatorGroup" reflector="storage/cacher.go:/operators.coreos.com/operatorgroups" 2025-12-08T17:46:12.751281525+00:00 stderr F I1208 17:46:12.751089 12 store.go:1663] "Monitoring resource count at path" resource="servicemonitors.monitoring.coreos.com" path="//monitoring.coreos.com/servicemonitors" 2025-12-08T17:46:12.767746639+00:00 stderr F I1208 17:46:12.767571 12 cacher.go:469] cacher (servicemonitors.monitoring.coreos.com): initialized 2025-12-08T17:46:12.767746639+00:00 stderr F I1208 17:46:12.767623 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=ServiceMonitor" reflector="storage/cacher.go:/monitoring.coreos.com/servicemonitors" 2025-12-08T17:46:12.909444683+00:00 stderr F I1208 17:46:12.909280 12 store.go:1663] "Monitoring resource count at path" resource="machineconfigurations.operator.openshift.io" path="//operator.openshift.io/machineconfigurations" 2025-12-08T17:46:12.911048820+00:00 stderr F I1208 17:46:12.910912 12 cacher.go:469] cacher (machineconfigurations.operator.openshift.io): initialized 2025-12-08T17:46:12.911048820+00:00 stderr F I1208 17:46:12.910947 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=MachineConfiguration" reflector="storage/cacher.go:/operator.openshift.io/machineconfigurations" 2025-12-08T17:46:13.069080844+00:00 stderr F I1208 17:46:13.068923 12 store.go:1663] "Monitoring resource count at path" resource="catalogsources.operators.coreos.com" path="//operators.coreos.com/catalogsources" 2025-12-08T17:46:13.072401864+00:00 stderr F I1208 17:46:13.072245 12 cacher.go:469] cacher (catalogsources.operators.coreos.com): initialized 2025-12-08T17:46:13.072401864+00:00 stderr F I1208 17:46:13.072275 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1alpha1, Kind=CatalogSource" reflector="storage/cacher.go:/operators.coreos.com/catalogsources" 2025-12-08T17:46:13.235634513+00:00 stderr F I1208 17:46:13.235505 12 store.go:1663] "Monitoring resource count at path" resource="thanosrulers.monitoring.coreos.com" path="//monitoring.coreos.com/thanosrulers" 2025-12-08T17:46:13.237164140+00:00 stderr F I1208 17:46:13.237075 12 cacher.go:469] cacher (thanosrulers.monitoring.coreos.com): initialized 2025-12-08T17:46:13.237164140+00:00 stderr F I1208 17:46:13.237134 12 reflector.go:430] "Caches populated" type="monitoring.coreos.com/v1, Kind=ThanosRuler" reflector="storage/cacher.go:/monitoring.coreos.com/thanosrulers" 2025-12-08T17:46:13.573044810+00:00 stderr F I1208 17:46:13.572943 12 store.go:1663] "Monitoring resource count at path" resource="operatorpkis.network.operator.openshift.io" path="//network.operator.openshift.io/operatorpkis" 2025-12-08T17:46:13.575937948+00:00 stderr F I1208 17:46:13.575813 12 cacher.go:469] cacher (operatorpkis.network.operator.openshift.io): initialized 2025-12-08T17:46:13.575937948+00:00 stderr F I1208 17:46:13.575843 12 reflector.go:430] "Caches populated" type="network.operator.openshift.io/v1, Kind=OperatorPKI" reflector="storage/cacher.go:/network.operator.openshift.io/operatorpkis" 2025-12-08T17:46:13.843400646+00:00 stderr F I1208 17:46:13.843257 12 store.go:1663] "Monitoring resource count at path" resource="baselineadminnetworkpolicies.policy.networking.k8s.io" path="//policy.networking.k8s.io/baselineadminnetworkpolicies" 2025-12-08T17:46:13.844744876+00:00 stderr F I1208 17:46:13.844669 12 cacher.go:469] cacher (baselineadminnetworkpolicies.policy.networking.k8s.io): initialized 2025-12-08T17:46:13.844780117+00:00 stderr F I1208 17:46:13.844741 12 reflector.go:430] "Caches populated" type="policy.networking.k8s.io/v1alpha1, Kind=BaselineAdminNetworkPolicy" reflector="storage/cacher.go:/policy.networking.k8s.io/baselineadminnetworkpolicies" 2025-12-08T17:46:14.711723789+00:00 stderr F I1208 17:46:14.711572 12 store.go:1663] "Monitoring resource count at path" resource="controlplanemachinesets.machine.openshift.io" path="//machine.openshift.io/controlplanemachinesets" 2025-12-08T17:46:14.713361178+00:00 stderr F I1208 17:46:14.713254 12 cacher.go:469] cacher (controlplanemachinesets.machine.openshift.io): initialized 2025-12-08T17:46:14.713361178+00:00 stderr F I1208 17:46:14.713285 12 reflector.go:430] "Caches populated" type="machine.openshift.io/v1, Kind=ControlPlaneMachineSet" reflector="storage/cacher.go:/machine.openshift.io/controlplanemachinesets" 2025-12-08T17:46:15.268840021+00:00 stderr F I1208 17:46:15.268589 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=16 seatDemandAvg=0.1932605333420576 seatDemandStdev=1.3937589162088144 seatDemandSmoothed=14.69150144733967 fairFrac=2.2796127562642368 currentCL=16 concurrencyDenominator=16 backstop=false 2025-12-08T17:46:15.909227722+00:00 stderr F I1208 17:46:15.908498 12 store.go:1663] "Monitoring resource count at path" resource="ippools.whereabouts.cni.cncf.io" path="//whereabouts.cni.cncf.io/ippools" 2025-12-08T17:46:15.909961785+00:00 stderr F I1208 17:46:15.909804 12 cacher.go:469] cacher (ippools.whereabouts.cni.cncf.io): initialized 2025-12-08T17:46:15.909961785+00:00 stderr F I1208 17:46:15.909851 12 reflector.go:430] "Caches populated" type="whereabouts.cni.cncf.io/v1alpha1, Kind=IPPool" reflector="storage/cacher.go:/whereabouts.cni.cncf.io/ippools" 2025-12-08T17:46:20.934781636+00:00 stderr F I1208 17:46:20.934570 12 controller.go:667] quota admission added evaluator for: podnetworkconnectivitychecks.controlplane.operator.openshift.io 2025-12-08T17:46:20.934781636+00:00 stderr F I1208 17:46:20.934620 12 controller.go:667] quota admission added evaluator for: podnetworkconnectivitychecks.controlplane.operator.openshift.io 2025-12-08T17:46:25.269261629+00:00 stderr F I1208 17:46:25.269009 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.000978114221705008 seatDemandStdev=0.03125951877867454 seatDemandSmoothed=14.354338379609866 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:46:26.636699864+00:00 stderr F I1208 17:46:26.636498 12 controller.go:667] quota admission added evaluator for: namespaces 2025-12-08T17:46:29.729932119+00:00 stderr F I1208 17:46:29.729790 12 store.go:1663] "Monitoring resource count at path" resource="consoles.operator.openshift.io" path="//operator.openshift.io/consoles" 2025-12-08T17:46:29.733338451+00:00 stderr F I1208 17:46:29.733265 12 cacher.go:469] cacher (consoles.operator.openshift.io): initialized 2025-12-08T17:46:29.733424234+00:00 stderr F I1208 17:46:29.733306 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=Console" reflector="storage/cacher.go:/operator.openshift.io/consoles" 2025-12-08T17:46:29.738411724+00:00 stderr F I1208 17:46:29.738325 12 store.go:1663] "Monitoring resource count at path" resource="consoleclidownloads.console.openshift.io" path="//console.openshift.io/consoleclidownloads" 2025-12-08T17:46:29.740314931+00:00 stderr F I1208 17:46:29.740240 12 cacher.go:469] cacher (consoleclidownloads.console.openshift.io): initialized 2025-12-08T17:46:29.740314931+00:00 stderr F I1208 17:46:29.740264 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleCLIDownload" reflector="storage/cacher.go:/console.openshift.io/consoleclidownloads" 2025-12-08T17:46:29.769304121+00:00 stderr F I1208 17:46:29.769183 12 store.go:1663] "Monitoring resource count at path" resource="etcds.operator.openshift.io" path="//operator.openshift.io/etcds" 2025-12-08T17:46:29.772540708+00:00 stderr F I1208 17:46:29.772438 12 cacher.go:469] cacher (etcds.operator.openshift.io): initialized 2025-12-08T17:46:29.772540708+00:00 stderr F I1208 17:46:29.772468 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=Etcd" reflector="storage/cacher.go:/operator.openshift.io/etcds" 2025-12-08T17:46:29.892355894+00:00 stderr F I1208 17:46:29.892224 12 store.go:1663] "Monitoring resource count at path" resource="ingresses.config.openshift.io" path="//config.openshift.io/ingresses" 2025-12-08T17:46:29.896031284+00:00 stderr F I1208 17:46:29.895853 12 cacher.go:469] cacher (ingresses.config.openshift.io): initialized 2025-12-08T17:46:29.896031284+00:00 stderr F I1208 17:46:29.895926 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Ingress" reflector="storage/cacher.go:/config.openshift.io/ingresses" 2025-12-08T17:46:30.163802812+00:00 stderr F I1208 17:46:30.163556 12 store.go:1663] "Monitoring resource count at path" resource="consolenotifications.console.openshift.io" path="//console.openshift.io/consolenotifications" 2025-12-08T17:46:30.166866274+00:00 stderr F I1208 17:46:30.166569 12 cacher.go:469] cacher (consolenotifications.console.openshift.io): initialized 2025-12-08T17:46:30.166866274+00:00 stderr F I1208 17:46:30.166634 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleNotification" reflector="storage/cacher.go:/console.openshift.io/consolenotifications" 2025-12-08T17:46:30.292695421+00:00 stderr F I1208 17:46:30.292552 12 store.go:1663] "Monitoring resource count at path" resource="apiservers.config.openshift.io" path="//config.openshift.io/apiservers" 2025-12-08T17:46:30.293760102+00:00 stderr F I1208 17:46:30.293665 12 cacher.go:469] cacher (apiservers.config.openshift.io): initialized 2025-12-08T17:46:30.293760102+00:00 stderr F I1208 17:46:30.293704 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=APIServer" reflector="storage/cacher.go:/config.openshift.io/apiservers" 2025-12-08T17:46:30.381873688+00:00 stderr F I1208 17:46:30.381245 12 store.go:1663] "Monitoring resource count at path" resource="oauths.config.openshift.io" path="//config.openshift.io/oauths" 2025-12-08T17:46:30.385437214+00:00 stderr F I1208 17:46:30.385296 12 cacher.go:469] cacher (oauths.config.openshift.io): initialized 2025-12-08T17:46:30.385437214+00:00 stderr F I1208 17:46:30.385324 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=OAuth" reflector="storage/cacher.go:/config.openshift.io/oauths" 2025-12-08T17:46:30.619778609+00:00 stderr F I1208 17:46:30.618992 12 store.go:1663] "Monitoring resource count at path" resource="storageversionmigrations.migration.k8s.io" path="//migration.k8s.io/storageversionmigrations" 2025-12-08T17:46:30.623605573+00:00 stderr F I1208 17:46:30.623553 12 cacher.go:469] cacher (storageversionmigrations.migration.k8s.io): initialized 2025-12-08T17:46:30.623641834+00:00 stderr F I1208 17:46:30.623615 12 reflector.go:430] "Caches populated" type="migration.k8s.io/v1alpha1, Kind=StorageVersionMigration" reflector="storage/cacher.go:/migration.k8s.io/storageversionmigrations" 2025-12-08T17:46:30.676759049+00:00 stderr F I1208 17:46:30.676650 12 store.go:1663] "Monitoring resource count at path" resource="openshiftcontrollermanagers.operator.openshift.io" path="//operator.openshift.io/openshiftcontrollermanagers" 2025-12-08T17:46:30.680928873+00:00 stderr F I1208 17:46:30.680800 12 cacher.go:469] cacher (openshiftcontrollermanagers.operator.openshift.io): initialized 2025-12-08T17:46:30.681038527+00:00 stderr F I1208 17:46:30.680862 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=OpenShiftControllerManager" reflector="storage/cacher.go:/operator.openshift.io/openshiftcontrollermanagers" 2025-12-08T17:46:30.717806421+00:00 stderr F I1208 17:46:30.717684 12 store.go:1663] "Monitoring resource count at path" resource="imagedigestmirrorsets.config.openshift.io" path="//config.openshift.io/imagedigestmirrorsets" 2025-12-08T17:46:30.718916634+00:00 stderr F I1208 17:46:30.718835 12 cacher.go:469] cacher (imagedigestmirrorsets.config.openshift.io): initialized 2025-12-08T17:46:30.718982156+00:00 stderr F I1208 17:46:30.718931 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ImageDigestMirrorSet" reflector="storage/cacher.go:/config.openshift.io/imagedigestmirrorsets" 2025-12-08T17:46:31.090901540+00:00 stderr F I1208 17:46:31.090743 12 store.go:1663] "Monitoring resource count at path" resource="authentications.config.openshift.io" path="//config.openshift.io/authentications" 2025-12-08T17:46:31.092591090+00:00 stderr F I1208 17:46:31.092516 12 cacher.go:469] cacher (authentications.config.openshift.io): initialized 2025-12-08T17:46:31.092591090+00:00 stderr F I1208 17:46:31.092540 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Authentication" reflector="storage/cacher.go:/config.openshift.io/authentications" 2025-12-08T17:46:31.123453586+00:00 stderr F I1208 17:46:31.123329 12 store.go:1663] "Monitoring resource count at path" resource="gatewayclasses.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/gatewayclasses" 2025-12-08T17:46:31.124170888+00:00 stderr F I1208 17:46:31.124107 12 cacher.go:469] cacher (gatewayclasses.gateway.networking.k8s.io): initialized 2025-12-08T17:46:31.124189989+00:00 stderr F I1208 17:46:31.124154 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1, Kind=GatewayClass" reflector="storage/cacher.go:/gateway.networking.k8s.io/gatewayclasses" 2025-12-08T17:46:31.131322182+00:00 stderr F I1208 17:46:31.131174 12 store.go:1663] "Monitoring resource count at path" resource="gatewayclasses.gateway.networking.k8s.io" path="//gateway.networking.k8s.io/gatewayclasses" 2025-12-08T17:46:31.133711654+00:00 stderr F I1208 17:46:31.133578 12 cacher.go:469] cacher (gatewayclasses.gateway.networking.k8s.io): initialized 2025-12-08T17:46:31.133711654+00:00 stderr F I1208 17:46:31.133607 12 reflector.go:430] "Caches populated" type="gateway.networking.k8s.io/v1beta1, Kind=GatewayClass" reflector="storage/cacher.go:/gateway.networking.k8s.io/gatewayclasses" 2025-12-08T17:46:31.285167790+00:00 stderr F I1208 17:46:31.285024 12 store.go:1663] "Monitoring resource count at path" resource="imagetagmirrorsets.config.openshift.io" path="//config.openshift.io/imagetagmirrorsets" 2025-12-08T17:46:31.287159090+00:00 stderr F I1208 17:46:31.287020 12 cacher.go:469] cacher (imagetagmirrorsets.config.openshift.io): initialized 2025-12-08T17:46:31.287159090+00:00 stderr F I1208 17:46:31.287072 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ImageTagMirrorSet" reflector="storage/cacher.go:/config.openshift.io/imagetagmirrorsets" 2025-12-08T17:46:31.346990026+00:00 stderr F I1208 17:46:31.346783 12 store.go:1663] "Monitoring resource count at path" resource="operatorhubs.config.openshift.io" path="//config.openshift.io/operatorhubs" 2025-12-08T17:46:31.348577094+00:00 stderr F I1208 17:46:31.348513 12 cacher.go:469] cacher (operatorhubs.config.openshift.io): initialized 2025-12-08T17:46:31.348577094+00:00 stderr F I1208 17:46:31.348544 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=OperatorHub" reflector="storage/cacher.go:/config.openshift.io/operatorhubs" 2025-12-08T17:46:31.630153015+00:00 stderr F I1208 17:46:31.628898 12 store.go:1663] "Monitoring resource count at path" resource="builds.config.openshift.io" path="//config.openshift.io/builds" 2025-12-08T17:46:31.631497996+00:00 stderr F I1208 17:46:31.631425 12 cacher.go:469] cacher (builds.config.openshift.io): initialized 2025-12-08T17:46:31.631566078+00:00 stderr F I1208 17:46:31.631507 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Build" reflector="storage/cacher.go:/config.openshift.io/builds" 2025-12-08T17:46:31.659979501+00:00 stderr F I1208 17:46:31.659846 12 store.go:1663] "Monitoring resource count at path" resource="olmconfigs.operators.coreos.com" path="//operators.coreos.com/olmconfigs" 2025-12-08T17:46:31.661816826+00:00 stderr F I1208 17:46:31.661738 12 cacher.go:469] cacher (olmconfigs.operators.coreos.com): initialized 2025-12-08T17:46:31.661816826+00:00 stderr F I1208 17:46:31.661761 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1, Kind=OLMConfig" reflector="storage/cacher.go:/operators.coreos.com/olmconfigs" 2025-12-08T17:46:31.988409668+00:00 stderr F I1208 17:46:31.988306 12 store.go:1663] "Monitoring resource count at path" resource="authentications.operator.openshift.io" path="//operator.openshift.io/authentications" 2025-12-08T17:46:31.993354457+00:00 stderr F I1208 17:46:31.993266 12 cacher.go:469] cacher (authentications.operator.openshift.io): initialized 2025-12-08T17:46:31.993354457+00:00 stderr F I1208 17:46:31.993305 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=Authentication" reflector="storage/cacher.go:/operator.openshift.io/authentications" 2025-12-08T17:46:32.510104257+00:00 stderr F I1208 17:46:32.509942 12 store.go:1663] "Monitoring resource count at path" resource="dnses.config.openshift.io" path="//config.openshift.io/dnses" 2025-12-08T17:46:32.512204010+00:00 stderr F I1208 17:46:32.512109 12 cacher.go:469] cacher (dnses.config.openshift.io): initialized 2025-12-08T17:46:32.512204010+00:00 stderr F I1208 17:46:32.512165 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=DNS" reflector="storage/cacher.go:/config.openshift.io/dnses" 2025-12-08T17:46:32.556582923+00:00 stderr F I1208 17:46:32.556439 12 store.go:1663] "Monitoring resource count at path" resource="servicecas.operator.openshift.io" path="//operator.openshift.io/servicecas" 2025-12-08T17:46:32.559189531+00:00 stderr F I1208 17:46:32.559088 12 cacher.go:469] cacher (servicecas.operator.openshift.io): initialized 2025-12-08T17:46:32.559189531+00:00 stderr F I1208 17:46:32.559151 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=ServiceCA" reflector="storage/cacher.go:/operator.openshift.io/servicecas" 2025-12-08T17:46:32.619144920+00:00 stderr F I1208 17:46:32.618967 12 store.go:1663] "Monitoring resource count at path" resource="nodes.config.openshift.io" path="//config.openshift.io/nodes" 2025-12-08T17:46:32.620977085+00:00 stderr F I1208 17:46:32.620807 12 cacher.go:469] cacher (nodes.config.openshift.io): initialized 2025-12-08T17:46:32.620977085+00:00 stderr F I1208 17:46:32.620835 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Node" reflector="storage/cacher.go:/config.openshift.io/nodes" 2025-12-08T17:46:33.169113839+00:00 stderr F I1208 17:46:33.168946 12 store.go:1663] "Monitoring resource count at path" resource="operators.operators.coreos.com" path="//operators.coreos.com/operators" 2025-12-08T17:46:33.170293824+00:00 stderr F I1208 17:46:33.170023 12 cacher.go:469] cacher (operators.operators.coreos.com): initialized 2025-12-08T17:46:33.170293824+00:00 stderr F I1208 17:46:33.170045 12 reflector.go:430] "Caches populated" type="operators.coreos.com/v1, Kind=Operator" reflector="storage/cacher.go:/operators.coreos.com/operators" 2025-12-08T17:46:33.885119919+00:00 stderr F I1208 17:46:33.884956 12 store.go:1663] "Monitoring resource count at path" resource="images.config.openshift.io" path="//config.openshift.io/images" 2025-12-08T17:46:33.887323436+00:00 stderr F I1208 17:46:33.887218 12 cacher.go:469] cacher (images.config.openshift.io): initialized 2025-12-08T17:46:33.887323436+00:00 stderr F I1208 17:46:33.887243 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Image" reflector="storage/cacher.go:/config.openshift.io/images" 2025-12-08T17:46:33.994621016+00:00 stderr F I1208 17:46:33.994434 12 store.go:1663] "Monitoring resource count at path" resource="imagepruners.imageregistry.operator.openshift.io" path="//imageregistry.operator.openshift.io/imagepruners" 2025-12-08T17:46:33.996712349+00:00 stderr F I1208 17:46:33.996573 12 cacher.go:469] cacher (imagepruners.imageregistry.operator.openshift.io): initialized 2025-12-08T17:46:33.996712349+00:00 stderr F I1208 17:46:33.996596 12 reflector.go:430] "Caches populated" type="imageregistry.operator.openshift.io/v1, Kind=ImagePruner" reflector="storage/cacher.go:/imageregistry.operator.openshift.io/imagepruners" 2025-12-08T17:46:34.184609179+00:00 stderr F I1208 17:46:34.184471 12 store.go:1663] "Monitoring resource count at path" resource="configs.samples.operator.openshift.io" path="//samples.operator.openshift.io/configs" 2025-12-08T17:46:34.186616949+00:00 stderr F I1208 17:46:34.186523 12 cacher.go:469] cacher (configs.samples.operator.openshift.io): initialized 2025-12-08T17:46:34.186616949+00:00 stderr F I1208 17:46:34.186554 12 reflector.go:430] "Caches populated" type="samples.operator.openshift.io/v1, Kind=Config" reflector="storage/cacher.go:/samples.operator.openshift.io/configs" 2025-12-08T17:46:35.270587866+00:00 stderr F I1208 17:46:35.270354 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.009640285189036797 seatDemandStdev=0.1529491104057685 seatDemandSmoothed=14.02792815297752 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:46:36.293251762+00:00 stderr F I1208 17:46:36.293011 12 store.go:1663] "Monitoring resource count at path" resource="consoles.config.openshift.io" path="//config.openshift.io/consoles" 2025-12-08T17:46:36.294450607+00:00 stderr F I1208 17:46:36.294330 12 cacher.go:469] cacher (consoles.config.openshift.io): initialized 2025-12-08T17:46:36.294450607+00:00 stderr F I1208 17:46:36.294363 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Console" reflector="storage/cacher.go:/config.openshift.io/consoles" 2025-12-08T17:46:37.096473061+00:00 stderr F I1208 17:46:37.096260 12 store.go:1663] "Monitoring resource count at path" resource="imagecontentsourcepolicies.operator.openshift.io" path="//operator.openshift.io/imagecontentsourcepolicies" 2025-12-08T17:46:37.099009706+00:00 stderr F I1208 17:46:37.098922 12 cacher.go:469] cacher (imagecontentsourcepolicies.operator.openshift.io): initialized 2025-12-08T17:46:37.099009706+00:00 stderr F I1208 17:46:37.098968 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1alpha1, Kind=ImageContentSourcePolicy" reflector="storage/cacher.go:/operator.openshift.io/imagecontentsourcepolicies" 2025-12-08T17:46:37.286414082+00:00 stderr F I1208 17:46:37.286278 12 store.go:1663] "Monitoring resource count at path" resource="containerruntimeconfigs.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/containerruntimeconfigs" 2025-12-08T17:46:37.288050801+00:00 stderr F I1208 17:46:37.287966 12 cacher.go:469] cacher (containerruntimeconfigs.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:37.288050801+00:00 stderr F I1208 17:46:37.288016 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=ContainerRuntimeConfig" reflector="storage/cacher.go:/machineconfiguration.openshift.io/containerruntimeconfigs" 2025-12-08T17:46:37.370317590+00:00 stderr F I1208 17:46:37.368626 12 store.go:1663] "Monitoring resource count at path" resource="schedulers.config.openshift.io" path="//config.openshift.io/schedulers" 2025-12-08T17:46:37.372047052+00:00 stderr F I1208 17:46:37.371966 12 cacher.go:469] cacher (schedulers.config.openshift.io): initialized 2025-12-08T17:46:37.372294380+00:00 stderr F I1208 17:46:37.372078 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Scheduler" reflector="storage/cacher.go:/config.openshift.io/schedulers" 2025-12-08T17:46:41.346143408+00:00 stderr F I1208 17:46:41.345970 12 store.go:1663] "Monitoring resource count at path" resource="kubestorageversionmigrators.operator.openshift.io" path="//operator.openshift.io/kubestorageversionmigrators" 2025-12-08T17:46:41.348911940+00:00 stderr F I1208 17:46:41.348689 12 cacher.go:469] cacher (kubestorageversionmigrators.operator.openshift.io): initialized 2025-12-08T17:46:41.348911940+00:00 stderr F I1208 17:46:41.348713 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=KubeStorageVersionMigrator" reflector="storage/cacher.go:/operator.openshift.io/kubestorageversionmigrators" 2025-12-08T17:46:43.730784834+00:00 stderr F I1208 17:46:43.730625 12 controller.go:667] quota admission added evaluator for: csistoragecapacities.storage.k8s.io 2025-12-08T17:46:43.730784834+00:00 stderr F I1208 17:46:43.730679 12 controller.go:667] quota admission added evaluator for: csistoragecapacities.storage.k8s.io 2025-12-08T17:46:45.271236362+00:00 stderr F I1208 17:46:45.271072 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.003394005411585735 seatDemandStdev=0.0581591449288232 seatDemandSmoothed=13.706701527916866 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:46:47.290048078+00:00 stderr F I1208 17:46:47.289912 12 store.go:1663] "Monitoring resource count at path" resource="configs.operator.openshift.io" path="//operator.openshift.io/configs" 2025-12-08T17:46:47.292270328+00:00 stderr F I1208 17:46:47.292175 12 cacher.go:469] cacher (configs.operator.openshift.io): initialized 2025-12-08T17:46:47.292270328+00:00 stderr F I1208 17:46:47.292206 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=Config" reflector="storage/cacher.go:/operator.openshift.io/configs" 2025-12-08T17:46:49.060786279+00:00 stderr F I1208 17:46:49.060660 12 store.go:1663] "Monitoring resource count at path" resource="kubeletconfigs.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/kubeletconfigs" 2025-12-08T17:46:49.061594915+00:00 stderr F I1208 17:46:49.061529 12 cacher.go:469] cacher (kubeletconfigs.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:49.061612996+00:00 stderr F I1208 17:46:49.061581 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=KubeletConfig" reflector="storage/cacher.go:/machineconfiguration.openshift.io/kubeletconfigs" 2025-12-08T17:46:53.381734309+00:00 stderr F I1208 17:46:53.381602 12 store.go:1663] "Monitoring resource count at path" resource="consoleplugins.console.openshift.io" path="//console.openshift.io/consoleplugins" 2025-12-08T17:46:53.382705399+00:00 stderr F I1208 17:46:53.382662 12 cacher.go:469] cacher (consoleplugins.console.openshift.io): initialized 2025-12-08T17:46:53.382753420+00:00 stderr F I1208 17:46:53.382728 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsolePlugin" reflector="storage/cacher.go:/console.openshift.io/consoleplugins" 2025-12-08T17:46:53.388174801+00:00 stderr F I1208 17:46:53.388103 12 store.go:1663] "Monitoring resource count at path" resource="consoleplugins.console.openshift.io" path="//console.openshift.io/consoleplugins" 2025-12-08T17:46:53.389665619+00:00 stderr F I1208 17:46:53.389514 12 cacher.go:469] cacher (consoleplugins.console.openshift.io): initialized 2025-12-08T17:46:53.389665619+00:00 stderr F I1208 17:46:53.389570 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1alpha1, Kind=ConsolePlugin" reflector="storage/cacher.go:/console.openshift.io/consoleplugins" 2025-12-08T17:46:56.135073771+00:00 stderr F I1208 17:46:56.134901 12 store.go:1663] "Monitoring resource count at path" resource="dnses.operator.openshift.io" path="//operator.openshift.io/dnses" 2025-12-08T17:46:56.137048323+00:00 stderr F I1208 17:46:56.136946 12 cacher.go:469] cacher (dnses.operator.openshift.io): initialized 2025-12-08T17:46:56.137048323+00:00 stderr F I1208 17:46:56.136979 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=DNS" reflector="storage/cacher.go:/operator.openshift.io/dnses" 2025-12-08T17:46:57.768948224+00:00 stderr F I1208 17:46:57.768672 12 store.go:1663] "Monitoring resource count at path" resource="configs.imageregistry.operator.openshift.io" path="//imageregistry.operator.openshift.io/configs" 2025-12-08T17:46:57.770902586+00:00 stderr F I1208 17:46:57.770773 12 cacher.go:469] cacher (configs.imageregistry.operator.openshift.io): initialized 2025-12-08T17:46:57.770902586+00:00 stderr F I1208 17:46:57.770809 12 reflector.go:430] "Caches populated" type="imageregistry.operator.openshift.io/v1, Kind=Config" reflector="storage/cacher.go:/imageregistry.operator.openshift.io/configs" 2025-12-08T17:46:59.402195157+00:00 stderr F I1208 17:46:59.402005 12 store.go:1663] "Monitoring resource count at path" resource="machineosconfigs.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/machineosconfigs" 2025-12-08T17:46:59.404261803+00:00 stderr F I1208 17:46:59.404142 12 cacher.go:469] cacher (machineosconfigs.machineconfiguration.openshift.io): initialized 2025-12-08T17:46:59.404261803+00:00 stderr F I1208 17:46:59.404205 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=MachineOSConfig" reflector="storage/cacher.go:/machineconfiguration.openshift.io/machineosconfigs" 2025-12-08T17:47:02.364453096+00:00 stderr F I1208 17:47:02.363647 12 store.go:1663] "Monitoring resource count at path" resource="clusterimagepolicies.config.openshift.io" path="//config.openshift.io/clusterimagepolicies" 2025-12-08T17:47:02.365366865+00:00 stderr F I1208 17:47:02.365176 12 cacher.go:469] cacher (clusterimagepolicies.config.openshift.io): initialized 2025-12-08T17:47:02.365366865+00:00 stderr F I1208 17:47:02.365284 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ClusterImagePolicy" reflector="storage/cacher.go:/config.openshift.io/clusterimagepolicies" 2025-12-08T17:47:03.193204744+00:00 stderr F I1208 17:47:03.193066 12 store.go:1663] "Monitoring resource count at path" resource="projects.config.openshift.io" path="//config.openshift.io/projects" 2025-12-08T17:47:03.197080847+00:00 stderr F I1208 17:47:03.196997 12 cacher.go:469] cacher (projects.config.openshift.io): initialized 2025-12-08T17:47:03.197193831+00:00 stderr F I1208 17:47:03.197121 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=Project" reflector="storage/cacher.go:/config.openshift.io/projects" 2025-12-08T17:47:03.301003408+00:00 stderr F I1208 17:47:03.300715 12 controller.go:667] quota admission added evaluator for: deployments.apps 2025-12-08T17:47:03.301003408+00:00 stderr F I1208 17:47:03.300769 12 controller.go:667] quota admission added evaluator for: deployments.apps 2025-12-08T17:47:05.273036886+00:00 stderr F I1208 17:47:05.272835 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.004680105319153894 seatDemandStdev=0.09153402880676904 seatDemandSmoothed=13.086859301621244 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:47:07.191928001+00:00 stderr F I1208 17:47:07.191777 12 store.go:1663] "Monitoring resource count at path" resource="helmchartrepositories.helm.openshift.io" path="//helm.openshift.io/helmchartrepositories" 2025-12-08T17:47:07.195023538+00:00 stderr F I1208 17:47:07.194908 12 cacher.go:469] cacher (helmchartrepositories.helm.openshift.io): initialized 2025-12-08T17:47:07.195023538+00:00 stderr F I1208 17:47:07.194968 12 reflector.go:430] "Caches populated" type="helm.openshift.io/v1beta1, Kind=HelmChartRepository" reflector="storage/cacher.go:/helm.openshift.io/helmchartrepositories" 2025-12-08T17:47:11.691542186+00:00 stderr F I1208 17:47:11.691346 12 store.go:1663] "Monitoring resource count at path" resource="consolequickstarts.console.openshift.io" path="//console.openshift.io/consolequickstarts" 2025-12-08T17:47:11.707918941+00:00 stderr F I1208 17:47:11.707738 12 cacher.go:469] cacher (consolequickstarts.console.openshift.io): initialized 2025-12-08T17:47:11.707918941+00:00 stderr F I1208 17:47:11.707787 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleQuickStart" reflector="storage/cacher.go:/console.openshift.io/consolequickstarts" 2025-12-08T17:47:13.300921487+00:00 stderr F I1208 17:47:13.300700 12 store.go:1663] "Monitoring resource count at path" resource="machineosbuilds.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/machineosbuilds" 2025-12-08T17:47:13.302620261+00:00 stderr F I1208 17:47:13.302286 12 cacher.go:469] cacher (machineosbuilds.machineconfiguration.openshift.io): initialized 2025-12-08T17:47:13.302620261+00:00 stderr F I1208 17:47:13.302322 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=MachineOSBuild" reflector="storage/cacher.go:/machineconfiguration.openshift.io/machineosbuilds" 2025-12-08T17:47:14.674393133+00:00 stderr F I1208 17:47:14.673223 12 store.go:1663] "Monitoring resource count at path" resource="machineconfignodes.machineconfiguration.openshift.io" path="//machineconfiguration.openshift.io/machineconfignodes" 2025-12-08T17:47:14.676203180+00:00 stderr F I1208 17:47:14.675659 12 cacher.go:469] cacher (machineconfignodes.machineconfiguration.openshift.io): initialized 2025-12-08T17:47:14.676203180+00:00 stderr F I1208 17:47:14.675691 12 reflector.go:430] "Caches populated" type="machineconfiguration.openshift.io/v1, Kind=MachineConfigNode" reflector="storage/cacher.go:/machineconfiguration.openshift.io/machineconfignodes" 2025-12-08T17:47:21.669368218+00:00 stderr F I1208 17:47:21.669191 12 controller.go:667] quota admission added evaluator for: serviceaccounts 2025-12-08T17:47:21.932567564+00:00 stderr F I1208 17:47:21.932468 12 store.go:1663] "Monitoring resource count at path" resource="consoleexternalloglinks.console.openshift.io" path="//console.openshift.io/consoleexternalloglinks" 2025-12-08T17:47:21.933308098+00:00 stderr F I1208 17:47:21.933248 12 cacher.go:469] cacher (consoleexternalloglinks.console.openshift.io): initialized 2025-12-08T17:47:21.933308098+00:00 stderr F I1208 17:47:21.933280 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleExternalLogLink" reflector="storage/cacher.go:/console.openshift.io/consoleexternalloglinks" 2025-12-08T17:47:21.942287890+00:00 stderr F I1208 17:47:21.942176 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:47:21.942287890+00:00 stderr F I1208 17:47:21.942215 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:47:21.942287890+00:00 stderr F I1208 17:47:21.942223 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 55.192µs 2025-12-08T17:47:21.946185502+00:00 stderr F I1208 17:47:21.946136 12 store.go:1663] "Monitoring resource count at path" resource="csisnapshotcontrollers.operator.openshift.io" path="//operator.openshift.io/csisnapshotcontrollers" 2025-12-08T17:47:21.948382342+00:00 stderr F I1208 17:47:21.948337 12 cacher.go:469] cacher (csisnapshotcontrollers.operator.openshift.io): initialized 2025-12-08T17:47:21.948411773+00:00 stderr F I1208 17:47:21.948388 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=CSISnapshotController" reflector="storage/cacher.go:/operator.openshift.io/csisnapshotcontrollers" 2025-12-08T17:47:21.955700263+00:00 stderr F I1208 17:47:21.955623 12 store.go:1663] "Monitoring resource count at path" resource="consolesamples.console.openshift.io" path="//console.openshift.io/consolesamples" 2025-12-08T17:47:21.957589701+00:00 stderr F I1208 17:47:21.957539 12 cacher.go:469] cacher (consolesamples.console.openshift.io): initialized 2025-12-08T17:47:21.957589701+00:00 stderr F I1208 17:47:21.957562 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleSample" reflector="storage/cacher.go:/console.openshift.io/consolesamples" 2025-12-08T17:47:21.966797302+00:00 stderr F I1208 17:47:21.966682 12 store.go:1663] "Monitoring resource count at path" resource="storages.operator.openshift.io" path="//operator.openshift.io/storages" 2025-12-08T17:47:21.968839585+00:00 stderr F I1208 17:47:21.968768 12 cacher.go:469] cacher (storages.operator.openshift.io): initialized 2025-12-08T17:47:21.968839585+00:00 stderr F I1208 17:47:21.968805 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=Storage" reflector="storage/cacher.go:/operator.openshift.io/storages" 2025-12-08T17:47:21.976758405+00:00 stderr F I1208 17:47:21.976667 12 store.go:1663] "Monitoring resource count at path" resource="rangeallocations.security.internal.openshift.io" path="//security.internal.openshift.io/rangeallocations" 2025-12-08T17:47:21.979525472+00:00 stderr F I1208 17:47:21.979471 12 cacher.go:469] cacher (rangeallocations.security.internal.openshift.io): initialized 2025-12-08T17:47:21.979525472+00:00 stderr F I1208 17:47:21.979497 12 reflector.go:430] "Caches populated" type="security.internal.openshift.io/v1, Kind=RangeAllocation" reflector="storage/cacher.go:/security.internal.openshift.io/rangeallocations" 2025-12-08T17:47:21.985060536+00:00 stderr F I1208 17:47:21.984761 12 store.go:1663] "Monitoring resource count at path" resource="consoleyamlsamples.console.openshift.io" path="//console.openshift.io/consoleyamlsamples" 2025-12-08T17:47:21.985527451+00:00 stderr F I1208 17:47:21.985470 12 cacher.go:469] cacher (consoleyamlsamples.console.openshift.io): initialized 2025-12-08T17:47:21.985527451+00:00 stderr F I1208 17:47:21.985498 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleYAMLSample" reflector="storage/cacher.go:/console.openshift.io/consoleyamlsamples" 2025-12-08T17:47:21.987552945+00:00 stderr F I1208 17:47:21.987487 12 cacher.go:847] cacher (endpointslices.discovery.k8s.io): 1 objects queued in incoming channel. 2025-12-08T17:47:21.987552945+00:00 stderr F I1208 17:47:21.987513 12 cacher.go:847] cacher (endpointslices.discovery.k8s.io): 2 objects queued in incoming channel. 2025-12-08T17:47:21.996849097+00:00 stderr F I1208 17:47:21.996012 12 store.go:1663] "Monitoring resource count at path" resource="clusterautoscalers.autoscaling.openshift.io" path="//autoscaling.openshift.io/clusterautoscalers" 2025-12-08T17:47:21.999161320+00:00 stderr F I1208 17:47:21.998088 12 cacher.go:469] cacher (clusterautoscalers.autoscaling.openshift.io): initialized 2025-12-08T17:47:21.999161320+00:00 stderr F I1208 17:47:21.998126 12 reflector.go:430] "Caches populated" type="autoscaling.openshift.io/v1, Kind=ClusterAutoscaler" reflector="storage/cacher.go:/autoscaling.openshift.io/clusterautoscalers" 2025-12-08T17:47:22.009039702+00:00 stderr F I1208 17:47:22.008969 12 store.go:1663] "Monitoring resource count at path" resource="consolelinks.console.openshift.io" path="//console.openshift.io/consolelinks" 2025-12-08T17:47:22.009814016+00:00 stderr F I1208 17:47:22.009774 12 cacher.go:469] cacher (consolelinks.console.openshift.io): initialized 2025-12-08T17:47:22.009856077+00:00 stderr F I1208 17:47:22.009823 12 reflector.go:430] "Caches populated" type="console.openshift.io/v1, Kind=ConsoleLink" reflector="storage/cacher.go:/console.openshift.io/consolelinks" 2025-12-08T17:47:22.016322480+00:00 stderr F I1208 17:47:22.016241 12 store.go:1663] "Monitoring resource count at path" resource="imagecontentpolicies.config.openshift.io" path="//config.openshift.io/imagecontentpolicies" 2025-12-08T17:47:22.018039155+00:00 stderr F I1208 17:47:22.017941 12 cacher.go:469] cacher (imagecontentpolicies.config.openshift.io): initialized 2025-12-08T17:47:22.018039155+00:00 stderr F I1208 17:47:22.017969 12 reflector.go:430] "Caches populated" type="config.openshift.io/v1, Kind=ImageContentPolicy" reflector="storage/cacher.go:/config.openshift.io/imagecontentpolicies" 2025-12-08T17:47:22.025279403+00:00 stderr F I1208 17:47:22.023663 12 store.go:1663] "Monitoring resource count at path" resource="storagestates.migration.k8s.io" path="//migration.k8s.io/storagestates" 2025-12-08T17:47:22.026650016+00:00 stderr F I1208 17:47:22.026575 12 cacher.go:469] cacher (storagestates.migration.k8s.io): initialized 2025-12-08T17:47:22.026728768+00:00 stderr F I1208 17:47:22.026684 12 reflector.go:430] "Caches populated" type="migration.k8s.io/v1alpha1, Kind=StorageState" reflector="storage/cacher.go:/migration.k8s.io/storagestates" 2025-12-08T17:47:22.035365170+00:00 stderr F I1208 17:47:22.035135 12 store.go:1663] "Monitoring resource count at path" resource="clustercsidrivers.operator.openshift.io" path="//operator.openshift.io/clustercsidrivers" 2025-12-08T17:47:22.037803777+00:00 stderr F I1208 17:47:22.037290 12 cacher.go:469] cacher (clustercsidrivers.operator.openshift.io): initialized 2025-12-08T17:47:22.037803777+00:00 stderr F I1208 17:47:22.037310 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Kind=ClusterCSIDriver" reflector="storage/cacher.go:/operator.openshift.io/clustercsidrivers" 2025-12-08T17:47:22.046320795+00:00 stderr F I1208 17:47:22.046183 12 controller.go:667] quota admission added evaluator for: replicasets.apps 2025-12-08T17:47:22.046320795+00:00 stderr F I1208 17:47:22.046236 12 controller.go:667] quota admission added evaluator for: replicasets.apps 2025-12-08T17:47:22.073730118+00:00 stderr F I1208 17:47:22.073068 12 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io 2025-12-08T17:47:22.077011901+00:00 stderr F I1208 17:47:22.074226 12 controller.go:667] quota admission added evaluator for: endpoints 2025-12-08T17:47:22.077011901+00:00 stderr F I1208 17:47:22.074604 12 cacher.go:847] cacher (replicasets.apps): 1 objects queued in incoming channel. 2025-12-08T17:47:22.077011901+00:00 stderr F I1208 17:47:22.074616 12 cacher.go:847] cacher (replicasets.apps): 2 objects queued in incoming channel. 2025-12-08T17:47:22.077011901+00:00 stderr F I1208 17:47:22.076249 12 cacher.go:847] cacher (endpoints): 1 objects queued in incoming channel. 2025-12-08T17:47:22.077011901+00:00 stderr F I1208 17:47:22.076269 12 cacher.go:847] cacher (endpoints): 2 objects queued in incoming channel. 2025-12-08T17:47:22.081278926+00:00 stderr F E1208 17:47:22.081066 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/route.openshift.io/v1/routes?allowWatchBookmarks=true&resourceVersion=38772&timeout=8m16s&timeoutSeconds=496&watch=true" auditID="339ba9cf-8a50-4466-9035-b67bd40c7cb1" 2025-12-08T17:47:22.085501968+00:00 stderr F E1208 17:47:22.085398 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/build.openshift.io/v1/builds?allowWatchBookmarks=true&resourceVersion=38776&timeout=8m2s&timeoutSeconds=482&watch=true" auditID="37cd7af9-ac02-4bcc-b1e0-8fb093ac670d" 2025-12-08T17:47:22.085639262+00:00 stderr F E1208 17:47:22.085576 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/template.openshift.io/v1/templateinstances?allowWatchBookmarks=true&resourceVersion=38781&timeout=7m46s&timeoutSeconds=466&watch=true" auditID="21daec78-e9f8-4e02-bc39-864c32c0e82e" 2025-12-08T17:47:22.086046275+00:00 stderr F E1208 17:47:22.085966 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/build.openshift.io/v1/buildconfigs?allowWatchBookmarks=true&resourceVersion=38775&timeout=8m27s&timeoutSeconds=507&watch=true" auditID="18f5d78b-bb1e-4774-8b2f-f5f51bb6565a" 2025-12-08T17:47:22.086363045+00:00 stderr F E1208 17:47:22.086301 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/apps.openshift.io/v1/deploymentconfigs?allowWatchBookmarks=true&resourceVersion=38783&timeout=9m17s&timeoutSeconds=557&watch=true" auditID="cbe74dd6-adf7-45db-a76e-76aa13b40ca2" 2025-12-08T17:47:22.087421559+00:00 stderr F E1208 17:47:22.086742 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/image.openshift.io/v1/images?allowWatchBookmarks=true&resourceVersion=38778&timeout=6m5s&timeoutSeconds=365&watch=true" auditID="09f0c240-5187-42e5-bb28-c61ee99cc9e9" 2025-12-08T17:47:22.087421559+00:00 stderr F E1208 17:47:22.086844 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/image.openshift.io/v1/imagestreams?allowWatchBookmarks=true&resourceVersion=38774&timeout=8m4s&timeoutSeconds=484&watch=true" auditID="eb362d90-957b-4c5e-9441-751e6ffdbf01" 2025-12-08T17:47:22.718408612+00:00 stderr F I1208 17:47:22.718268 12 controller.go:667] quota admission added evaluator for: serviceaccounts 2025-12-08T17:47:23.311390108+00:00 stderr F I1208 17:47:23.310788 12 controller.go:667] quota admission added evaluator for: catalogsources.operators.coreos.com 2025-12-08T17:47:23.438361015+00:00 stderr F I1208 17:47:23.438237 12 controller.go:667] quota admission added evaluator for: daemonsets.apps 2025-12-08T17:47:23.438361015+00:00 stderr F I1208 17:47:23.438278 12 controller.go:667] quota admission added evaluator for: daemonsets.apps 2025-12-08T17:47:24.063845084+00:00 stderr F I1208 17:47:24.063722 12 cacher.go:847] cacher (leases.coordination.k8s.io): 1 objects queued in incoming channel. 2025-12-08T17:47:24.063845084+00:00 stderr F I1208 17:47:24.063745 12 cacher.go:847] cacher (leases.coordination.k8s.io): 2 objects queued in incoming channel. 2025-12-08T17:47:25.275055473+00:00 stderr F I1208 17:47:25.274562 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=4 seatDemandAvg=0.024791908761415887 seatDemandStdev=0.2086546831495865 seatDemandSmoothed=12.500501879291031 fairFrac=2.2796127562642368 currentCL=4 concurrencyDenominator=4 backstop=false 2025-12-08T17:47:38.632522763+00:00 stderr F I1208 17:47:38.632249 12 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io 2025-12-08T17:47:39.032591587+00:00 stderr F I1208 17:47:39.032389 12 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io 2025-12-08T17:47:41.432969497+00:00 stderr F I1208 17:47:41.432739 12 controller.go:667] quota admission added evaluator for: servicemonitors.monitoring.coreos.com 2025-12-08T17:47:44.034610444+00:00 stderr F I1208 17:47:44.033937 12 controller.go:667] quota admission added evaluator for: prometheusrules.monitoring.coreos.com 2025-12-08T17:47:45.276595690+00:00 stderr F I1208 17:47:45.276419 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006979461197065342 seatDemandStdev=0.14267234969857376 seatDemandSmoothed=11.937488824201946 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:47:49.032091660+00:00 stderr F I1208 17:47:49.031814 12 controller.go:667] quota admission added evaluator for: operatorpkis.network.operator.openshift.io 2025-12-08T17:47:53.833511989+00:00 stderr F I1208 17:47:53.833317 12 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io 2025-12-08T17:47:54.032811459+00:00 stderr F I1208 17:47:54.032155 12 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io 2025-12-08T17:47:55.233660351+00:00 stderr F I1208 17:47:55.233032 12 controller.go:667] quota admission added evaluator for: servicemonitors.monitoring.coreos.com 2025-12-08T17:47:55.278139847+00:00 stderr F I1208 17:47:55.277865 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0008572663871292836 seatDemandStdev=0.029266559098581814 seatDemandSmoothed=11.663619429231472 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:47:58.424150961+00:00 stderr F I1208 17:47:58.423166 12 controller.go:667] quota admission added evaluator for: operatorpkis.network.operator.openshift.io 2025-12-08T17:48:03.272545471+00:00 stderr F E1208 17:48:03.271442 12 wrap.go:53] "Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/apis/route.openshift.io/v1/routes?allowWatchBookmarks=true&resourceVersion=39238&timeout=7m55s&timeoutSeconds=475&watch=true" auditID="7a4354fd-13d0-4a9a-a2f4-8341a8f92973" 2025-12-08T17:48:05.279946587+00:00 stderr F I1208 17:48:05.279035 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.007390322080540606 seatDemandStdev=0.14228744559170586 seatDemandSmoothed=11.39879877101561 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:48:15.279690894+00:00 stderr F I1208 17:48:15.279465 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=2 seatDemandAvg=0.003934366230919871 seatDemandStdev=0.06538168659193702 seatDemandSmoothed=11.138220668497176 fairFrac=2.2796127562642368 currentCL=2 concurrencyDenominator=2 backstop=false 2025-12-08T17:48:25.280965249+00:00 stderr F I1208 17:48:25.279972 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.005408169204985352 seatDemandStdev=0.10811358600396702 seatDemandSmoothed=10.884652593491547 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:48:35.280939194+00:00 stderr F I1208 17:48:35.280672 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=4 seatDemandAvg=0.0024794458055383607 seatDemandStdev=0.060859655214058166 seatDemandSmoothed=10.635762383164693 fairFrac=2.2796127562642368 currentCL=4 concurrencyDenominator=4 backstop=false 2025-12-08T17:48:45.281792115+00:00 stderr F I1208 17:48:45.281523 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0007381271052625024 seatDemandStdev=0.027158465966231952 seatDemandSmoothed=10.39178146999255 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:49:25.285627541+00:00 stderr F I1208 17:49:25.284950 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.005510700677900754 seatDemandStdev=0.11587001984010808 seatDemandSmoothed=9.473571345360403 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:49:45.286542314+00:00 stderr F I1208 17:49:45.286288 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0005935014333001721 seatDemandStdev=0.024354654367263 seatDemandSmoothed=9.045246584748936 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:50:25.289508029+00:00 stderr F I1208 17:50:25.288395 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.0057908736136492375 seatDemandStdev=0.1344275571106892 seatDemandSmoothed=8.24650784443685 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:50:45.290609347+00:00 stderr F I1208 17:50:45.290338 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0008412048854352757 seatDemandStdev=0.02899133076931784 seatDemandSmoothed=7.874145491454295 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:51:05.392081535+00:00 stderr F I1208 17:51:05.391960 12 controller.go:231] Updating CRD OpenAPI spec because alertingrules.monitoring.openshift.io changed 2025-12-08T17:51:05.392142107+00:00 stderr F I1208 17:51:05.392091 12 controller.go:231] Updating CRD OpenAPI spec because baselineadminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T17:51:05.392154047+00:00 stderr F I1208 17:51:05.392133 12 controller.go:231] Updating CRD OpenAPI spec because configs.imageregistry.operator.openshift.io changed 2025-12-08T17:51:05.392417265+00:00 stderr F I1208 17:51:05.392344 12 controller.go:231] Updating CRD OpenAPI spec because configs.operator.openshift.io changed 2025-12-08T17:51:05.392417265+00:00 stderr F I1208 17:51:05.392380 12 controller.go:231] Updating CRD OpenAPI spec because nodeslicepools.whereabouts.cni.cncf.io changed 2025-12-08T17:51:05.392582699+00:00 stderr F I1208 17:51:05.392527 12 controller.go:231] Updating CRD OpenAPI spec because overlappingrangeipreservations.whereabouts.cni.cncf.io changed 2025-12-08T17:51:05.392673052+00:00 stderr F I1208 17:51:05.392623 12 controller.go:231] Updating CRD OpenAPI spec because networks.config.openshift.io changed 2025-12-08T17:51:05.392833847+00:00 stderr F I1208 17:51:05.392757 12 controller.go:231] Updating CRD OpenAPI spec because rolebindingrestrictions.authorization.openshift.io changed 2025-12-08T17:51:05.392847377+00:00 stderr F I1208 17:51:05.392818 12 controller.go:231] Updating CRD OpenAPI spec because oauths.config.openshift.io changed 2025-12-08T17:51:05.392902698+00:00 stderr F I1208 17:51:05.392844 12 controller.go:231] Updating CRD OpenAPI spec because pinnedimagesets.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.392940990+00:00 stderr F I1208 17:51:05.392893 12 controller.go:231] Updating CRD OpenAPI spec because referencegrants.gateway.networking.k8s.io changed 2025-12-08T17:51:05.393030932+00:00 stderr F I1208 17:51:05.392984 12 controller.go:231] Updating CRD OpenAPI spec because olmconfigs.operators.coreos.com changed 2025-12-08T17:51:05.393158466+00:00 stderr F I1208 17:51:05.393103 12 controller.go:231] Updating CRD OpenAPI spec because clusterimagepolicies.config.openshift.io changed 2025-12-08T17:51:05.393224198+00:00 stderr F I1208 17:51:05.393183 12 controller.go:231] Updating CRD OpenAPI spec because gateways.gateway.networking.k8s.io changed 2025-12-08T17:51:05.393311130+00:00 stderr F I1208 17:51:05.393255 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.coreos.com changed 2025-12-08T17:51:05.393695211+00:00 stderr F I1208 17:51:05.393620 12 controller.go:231] Updating CRD OpenAPI spec because alertrelabelconfigs.monitoring.openshift.io changed 2025-12-08T17:51:05.393695211+00:00 stderr F I1208 17:51:05.393635 12 controller.go:231] Updating CRD OpenAPI spec because clusterresourcequotas.quota.openshift.io changed 2025-12-08T17:51:05.393695211+00:00 stderr F I1208 17:51:05.393642 12 controller.go:231] Updating CRD OpenAPI spec because consolesamples.console.openshift.io changed 2025-12-08T17:51:05.393728872+00:00 stderr F I1208 17:51:05.393691 12 controller.go:231] Updating CRD OpenAPI spec because operatorconditions.operators.coreos.com changed 2025-12-08T17:51:05.393822065+00:00 stderr F I1208 17:51:05.393784 12 controller.go:231] Updating CRD OpenAPI spec because projects.config.openshift.io changed 2025-12-08T17:51:05.394087022+00:00 stderr F I1208 17:51:05.394021 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.coreos.com changed 2025-12-08T17:51:05.394256748+00:00 stderr F I1208 17:51:05.394199 12 controller.go:231] Updating CRD OpenAPI spec because apiservers.config.openshift.io changed 2025-12-08T17:51:05.394256748+00:00 stderr F I1208 17:51:05.394234 12 controller.go:231] Updating CRD OpenAPI spec because gatewayclasses.gateway.networking.k8s.io changed 2025-12-08T17:51:05.394391612+00:00 stderr F I1208 17:51:05.394337 12 controller.go:231] Updating CRD OpenAPI spec because ipamclaims.k8s.cni.cncf.io changed 2025-12-08T17:51:05.394470414+00:00 stderr F I1208 17:51:05.394420 12 controller.go:231] Updating CRD OpenAPI spec because machinehealthchecks.machine.openshift.io changed 2025-12-08T17:51:05.394470414+00:00 stderr F I1208 17:51:05.394446 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediations.infrastructure.cluster.x-k8s.io changed 2025-12-08T17:51:05.394652930+00:00 stderr F I1208 17:51:05.394594 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.coreos.com changed 2025-12-08T17:51:05.394652930+00:00 stderr F I1208 17:51:05.394610 12 controller.go:231] Updating CRD OpenAPI spec because rangeallocations.security.internal.openshift.io changed 2025-12-08T17:51:05.394652930+00:00 stderr F I1208 17:51:05.394618 12 controller.go:231] Updating CRD OpenAPI spec because containerruntimeconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.394746412+00:00 stderr F I1208 17:51:05.394692 12 controller.go:231] Updating CRD OpenAPI spec because ingresses.config.openshift.io changed 2025-12-08T17:51:05.394990139+00:00 stderr F I1208 17:51:05.394872 12 controller.go:231] Updating CRD OpenAPI spec because machineconfignodes.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.394990139+00:00 stderr F I1208 17:51:05.394953 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigpools.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.395126673+00:00 stderr F I1208 17:51:05.395069 12 controller.go:231] Updating CRD OpenAPI spec because machineosconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.395296318+00:00 stderr F I1208 17:51:05.395237 12 controller.go:231] Updating CRD OpenAPI spec because network-attachment-definitions.k8s.cni.cncf.io changed 2025-12-08T17:51:05.395365550+00:00 stderr F I1208 17:51:05.395323 12 controller.go:231] Updating CRD OpenAPI spec because clusterversions.config.openshift.io changed 2025-12-08T17:51:05.395398311+00:00 stderr F I1208 17:51:05.395356 12 controller.go:231] Updating CRD OpenAPI spec because consoleplugins.console.openshift.io changed 2025-12-08T17:51:05.395508594+00:00 stderr F I1208 17:51:05.395455 12 controller.go:231] Updating CRD OpenAPI spec because ingresscontrollers.operator.openshift.io changed 2025-12-08T17:51:05.395557695+00:00 stderr F I1208 17:51:05.395520 12 controller.go:231] Updating CRD OpenAPI spec because kubecontrollermanagers.operator.openshift.io changed 2025-12-08T17:51:05.395557695+00:00 stderr F I1208 17:51:05.395538 12 controller.go:231] Updating CRD OpenAPI spec because openshiftapiservers.operator.openshift.io changed 2025-12-08T17:51:05.395703130+00:00 stderr F I1208 17:51:05.395646 12 controller.go:231] Updating CRD OpenAPI spec because catalogsources.operators.coreos.com changed 2025-12-08T17:51:05.395732610+00:00 stderr F I1208 17:51:05.395686 12 controller.go:231] Updating CRD OpenAPI spec because controlplanemachinesets.machine.openshift.io changed 2025-12-08T17:51:05.395818323+00:00 stderr F I1208 17:51:05.395767 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigurations.operator.openshift.io changed 2025-12-08T17:51:05.395818323+00:00 stderr F I1208 17:51:05.395788 12 controller.go:231] Updating CRD OpenAPI spec because machinesets.machine.openshift.io changed 2025-12-08T17:51:05.395818323+00:00 stderr F I1208 17:51:05.395797 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediationtemplates.infrastructure.cluster.x-k8s.io changed 2025-12-08T17:51:05.395936706+00:00 stderr F I1208 17:51:05.395889 12 controller.go:231] Updating CRD OpenAPI spec because clustercsidrivers.operator.openshift.io changed 2025-12-08T17:51:05.395936706+00:00 stderr F I1208 17:51:05.395916 12 controller.go:231] Updating CRD OpenAPI spec because openshiftcontrollermanagers.operator.openshift.io changed 2025-12-08T17:51:05.395936706+00:00 stderr F I1208 17:51:05.395923 12 controller.go:231] Updating CRD OpenAPI spec because userdefinednetworks.k8s.ovn.org changed 2025-12-08T17:51:05.396147252+00:00 stderr F I1208 17:51:05.396095 12 controller.go:231] Updating CRD OpenAPI spec because consoleclidownloads.console.openshift.io changed 2025-12-08T17:51:05.396159013+00:00 stderr F I1208 17:51:05.396137 12 controller.go:231] Updating CRD OpenAPI spec because csisnapshotcontrollers.operator.openshift.io changed 2025-12-08T17:51:05.396159013+00:00 stderr F I1208 17:51:05.396153 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentpolicies.config.openshift.io changed 2025-12-08T17:51:05.396203394+00:00 stderr F I1208 17:51:05.396161 12 controller.go:231] Updating CRD OpenAPI spec because imagetagmirrorsets.config.openshift.io changed 2025-12-08T17:51:05.396249915+00:00 stderr F I1208 17:51:05.396211 12 controller.go:231] Updating CRD OpenAPI spec because schedulers.config.openshift.io changed 2025-12-08T17:51:05.396259966+00:00 stderr F I1208 17:51:05.396239 12 controller.go:231] Updating CRD OpenAPI spec because apirequestcounts.apiserver.openshift.io changed 2025-12-08T17:51:05.396381699+00:00 stderr F I1208 17:51:05.396338 12 controller.go:231] Updating CRD OpenAPI spec because etcds.operator.openshift.io changed 2025-12-08T17:51:05.396413270+00:00 stderr F I1208 17:51:05.396380 12 controller.go:231] Updating CRD OpenAPI spec because imagedigestmirrorsets.config.openshift.io changed 2025-12-08T17:51:05.396465211+00:00 stderr F I1208 17:51:05.396428 12 controller.go:231] Updating CRD OpenAPI spec because operatorhubs.config.openshift.io changed 2025-12-08T17:51:05.396493342+00:00 stderr F I1208 17:51:05.396460 12 controller.go:231] Updating CRD OpenAPI spec because operatorpkis.network.operator.openshift.io changed 2025-12-08T17:51:05.396567204+00:00 stderr F I1208 17:51:05.396526 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.coreos.com changed 2025-12-08T17:51:05.396676877+00:00 stderr F I1208 17:51:05.396633 12 controller.go:231] Updating CRD OpenAPI spec because subscriptions.operators.coreos.com changed 2025-12-08T17:51:05.396676877+00:00 stderr F I1208 17:51:05.396645 12 controller.go:231] Updating CRD OpenAPI spec because authentications.operator.openshift.io changed 2025-12-08T17:51:05.396719939+00:00 stderr F I1208 17:51:05.396686 12 controller.go:231] Updating CRD OpenAPI spec because controllerconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.396839542+00:00 stderr F I1208 17:51:05.396793 12 controller.go:231] Updating CRD OpenAPI spec because imagepolicies.config.openshift.io changed 2025-12-08T17:51:05.397003347+00:00 stderr F I1208 17:51:05.396905 12 controller.go:231] Updating CRD OpenAPI spec because machines.machine.openshift.io changed 2025-12-08T17:51:05.397003347+00:00 stderr F I1208 17:51:05.396919 12 controller.go:231] Updating CRD OpenAPI spec because podnetworkconnectivitychecks.controlplane.operator.openshift.io changed 2025-12-08T17:51:05.397057879+00:00 stderr F I1208 17:51:05.396998 12 controller.go:231] Updating CRD OpenAPI spec because clusteruserdefinednetworks.k8s.ovn.org changed 2025-12-08T17:51:05.397057879+00:00 stderr F I1208 17:51:05.397032 12 controller.go:231] Updating CRD OpenAPI spec because consoleexternalloglinks.console.openshift.io changed 2025-12-08T17:51:05.397195903+00:00 stderr F I1208 17:51:05.397152 12 controller.go:231] Updating CRD OpenAPI spec because consolelinks.console.openshift.io changed 2025-12-08T17:51:05.397339827+00:00 stderr F I1208 17:51:05.397290 12 controller.go:231] Updating CRD OpenAPI spec because dnses.config.openshift.io changed 2025-12-08T17:51:05.397339827+00:00 stderr F I1208 17:51:05.397301 12 controller.go:231] Updating CRD OpenAPI spec because proxies.config.openshift.io changed 2025-12-08T17:51:05.397379139+00:00 stderr F I1208 17:51:05.397345 12 controller.go:231] Updating CRD OpenAPI spec because storagestates.migration.k8s.io changed 2025-12-08T17:51:05.397379139+00:00 stderr F I1208 17:51:05.397360 12 controller.go:231] Updating CRD OpenAPI spec because authentications.config.openshift.io changed 2025-12-08T17:51:05.397544863+00:00 stderr F I1208 17:51:05.397482 12 controller.go:231] Updating CRD OpenAPI spec because clusterserviceversions.operators.coreos.com changed 2025-12-08T17:51:05.397621185+00:00 stderr F I1208 17:51:05.397580 12 controller.go:231] Updating CRD OpenAPI spec because securitycontextconstraints.security.openshift.io changed 2025-12-08T17:51:05.397714698+00:00 stderr F I1208 17:51:05.397661 12 controller.go:231] Updating CRD OpenAPI spec because dnsrecords.ingress.operator.openshift.io changed 2025-12-08T17:51:05.397891433+00:00 stderr F I1208 17:51:05.397822 12 controller.go:231] Updating CRD OpenAPI spec because kubestorageversionmigrators.operator.openshift.io changed 2025-12-08T17:51:05.398079099+00:00 stderr F I1208 17:51:05.398026 12 controller.go:231] Updating CRD OpenAPI spec because machineosbuilds.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.398221223+00:00 stderr F I1208 17:51:05.398177 12 controller.go:231] Updating CRD OpenAPI spec because consolequickstarts.console.openshift.io changed 2025-12-08T17:51:05.398354986+00:00 stderr F I1208 17:51:05.398313 12 controller.go:231] Updating CRD OpenAPI spec because consoleyamlsamples.console.openshift.io changed 2025-12-08T17:51:05.398354986+00:00 stderr F I1208 17:51:05.398328 12 controller.go:231] Updating CRD OpenAPI spec because operatorgroups.operators.coreos.com changed 2025-12-08T17:51:05.398399718+00:00 stderr F I1208 17:51:05.398367 12 controller.go:231] Updating CRD OpenAPI spec because storageversionmigrations.migration.k8s.io changed 2025-12-08T17:51:05.398504301+00:00 stderr F I1208 17:51:05.398463 12 controller.go:231] Updating CRD OpenAPI spec because consoles.operator.openshift.io changed 2025-12-08T17:51:05.398542832+00:00 stderr F I1208 17:51:05.398510 12 controller.go:231] Updating CRD OpenAPI spec because images.config.openshift.io changed 2025-12-08T17:51:05.398636605+00:00 stderr F I1208 17:51:05.398596 12 controller.go:231] Updating CRD OpenAPI spec because kubeschedulers.operator.openshift.io changed 2025-12-08T17:51:05.398673266+00:00 stderr F I1208 17:51:05.398640 12 controller.go:231] Updating CRD OpenAPI spec because projecthelmchartrepositories.helm.openshift.io changed 2025-12-08T17:51:05.398741638+00:00 stderr F I1208 17:51:05.398702 12 controller.go:231] Updating CRD OpenAPI spec because egressqoses.k8s.ovn.org changed 2025-12-08T17:51:05.398798059+00:00 stderr F I1208 17:51:05.398764 12 controller.go:231] Updating CRD OpenAPI spec because ippools.whereabouts.cni.cncf.io changed 2025-12-08T17:51:05.398873451+00:00 stderr F I1208 17:51:05.398831 12 controller.go:231] Updating CRD OpenAPI spec because egressrouters.network.operator.openshift.io changed 2025-12-08T17:51:05.398950424+00:00 stderr F I1208 17:51:05.398891 12 controller.go:231] Updating CRD OpenAPI spec because imagepruners.imageregistry.operator.openshift.io changed 2025-12-08T17:51:05.399009545+00:00 stderr F I1208 17:51:05.398971 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentsourcepolicies.operator.openshift.io changed 2025-12-08T17:51:05.399167710+00:00 stderr F I1208 17:51:05.399122 12 controller.go:231] Updating CRD OpenAPI spec because consolenotifications.console.openshift.io changed 2025-12-08T17:51:05.399167710+00:00 stderr F I1208 17:51:05.399147 12 controller.go:231] Updating CRD OpenAPI spec because ipaddressclaims.ipam.cluster.x-k8s.io changed 2025-12-08T17:51:05.399300364+00:00 stderr F I1208 17:51:05.399244 12 controller.go:231] Updating CRD OpenAPI spec because kubeletconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.399300364+00:00 stderr F I1208 17:51:05.399275 12 controller.go:231] Updating CRD OpenAPI spec because nodes.config.openshift.io changed 2025-12-08T17:51:05.399375166+00:00 stderr F I1208 17:51:05.399336 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.coreos.com changed 2025-12-08T17:51:05.399462578+00:00 stderr F I1208 17:51:05.399411 12 controller.go:231] Updating CRD OpenAPI spec because consoles.config.openshift.io changed 2025-12-08T17:51:05.399540460+00:00 stderr F I1208 17:51:05.399502 12 controller.go:231] Updating CRD OpenAPI spec because dnses.operator.openshift.io changed 2025-12-08T17:51:05.399540460+00:00 stderr F I1208 17:51:05.399521 12 controller.go:231] Updating CRD OpenAPI spec because egressfirewalls.k8s.ovn.org changed 2025-12-08T17:51:05.399540460+00:00 stderr F I1208 17:51:05.399529 12 controller.go:231] Updating CRD OpenAPI spec because httproutes.gateway.networking.k8s.io changed 2025-12-08T17:51:05.399594882+00:00 stderr F I1208 17:51:05.399544 12 controller.go:231] Updating CRD OpenAPI spec because storages.operator.openshift.io changed 2025-12-08T17:51:05.399682704+00:00 stderr F I1208 17:51:05.399622 12 controller.go:231] Updating CRD OpenAPI spec because machineautoscalers.autoscaling.openshift.io changed 2025-12-08T17:51:05.399682704+00:00 stderr F I1208 17:51:05.399659 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:51:05.399682704+00:00 stderr F I1208 17:51:05.399670 12 controller.go:231] Updating CRD OpenAPI spec because servicecas.operator.openshift.io changed 2025-12-08T17:51:05.399695005+00:00 stderr F I1208 17:51:05.399678 12 controller.go:231] Updating CRD OpenAPI spec because adminpolicybasedexternalroutes.k8s.ovn.org changed 2025-12-08T17:51:05.399736856+00:00 stderr F I1208 17:51:05.399699 12 controller.go:231] Updating CRD OpenAPI spec because configs.samples.operator.openshift.io changed 2025-12-08T17:51:05.399736856+00:00 stderr F I1208 17:51:05.399709 12 controller.go:231] Updating CRD OpenAPI spec because helmchartrepositories.helm.openshift.io changed 2025-12-08T17:51:05.399921082+00:00 stderr F I1208 17:51:05.399855 12 controller.go:231] Updating CRD OpenAPI spec because adminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T17:51:05.400030015+00:00 stderr F I1208 17:51:05.399981 12 controller.go:231] Updating CRD OpenAPI spec because featuregates.config.openshift.io changed 2025-12-08T17:51:05.400030015+00:00 stderr F I1208 17:51:05.399996 12 controller.go:231] Updating CRD OpenAPI spec because grpcroutes.gateway.networking.k8s.io changed 2025-12-08T17:51:05.400030015+00:00 stderr F I1208 17:51:05.400009 12 controller.go:231] Updating CRD OpenAPI spec because installplans.operators.coreos.com changed 2025-12-08T17:51:05.400205270+00:00 stderr F I1208 17:51:05.400155 12 controller.go:231] Updating CRD OpenAPI spec because operators.operators.coreos.com changed 2025-12-08T17:51:05.400581321+00:00 stderr F I1208 17:51:05.400525 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.coreos.com changed 2025-12-08T17:51:05.400581321+00:00 stderr F I1208 17:51:05.400544 12 controller.go:231] Updating CRD OpenAPI spec because clusterautoscalers.autoscaling.openshift.io changed 2025-12-08T17:51:05.400581321+00:00 stderr F I1208 17:51:05.400551 12 controller.go:231] Updating CRD OpenAPI spec because kubeapiservers.operator.openshift.io changed 2025-12-08T17:51:05.400598422+00:00 stderr F I1208 17:51:05.400574 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.coreos.com changed 2025-12-08T17:51:05.400598422+00:00 stderr F I1208 17:51:05.400584 12 controller.go:231] Updating CRD OpenAPI spec because builds.config.openshift.io changed 2025-12-08T17:51:05.400704795+00:00 stderr F I1208 17:51:05.400656 12 controller.go:231] Updating CRD OpenAPI spec because infrastructures.config.openshift.io changed 2025-12-08T17:51:05.400704795+00:00 stderr F I1208 17:51:05.400678 12 controller.go:231] Updating CRD OpenAPI spec because networks.operator.openshift.io changed 2025-12-08T17:51:05.400704795+00:00 stderr F I1208 17:51:05.400687 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.coreos.com changed 2025-12-08T17:51:05.400973502+00:00 stderr F I1208 17:51:05.400930 12 controller.go:231] Updating CRD OpenAPI spec because egressips.k8s.ovn.org changed 2025-12-08T17:51:05.400973502+00:00 stderr F I1208 17:51:05.400948 12 controller.go:231] Updating CRD OpenAPI spec because ipaddresses.ipam.cluster.x-k8s.io changed 2025-12-08T17:51:05.401062595+00:00 stderr F I1208 17:51:05.401020 12 controller.go:231] Updating CRD OpenAPI spec because clusteroperators.config.openshift.io changed 2025-12-08T17:51:05.401062595+00:00 stderr F I1208 17:51:05.401036 12 controller.go:231] Updating CRD OpenAPI spec because egressservices.k8s.ovn.org changed 2025-12-08T17:51:25.292705165+00:00 stderr F I1208 17:51:25.292408 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.005728505463440753 seatDemandStdev=0.12220446514634696 seatDemandSmoothed=7.179236455130141 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:51:45.294726303+00:00 stderr F I1208 17:51:45.294467 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0012106381033334978 seatDemandStdev=0.034773157157730945 seatDemandSmoothed=6.855553238995646 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:51:45.882968431+00:00 stderr F I1208 17:51:45.880021 12 cacher.go:847] cacher (podnetworkconnectivitychecks.controlplane.operator.openshift.io): 1 objects queued in incoming channel. 2025-12-08T17:51:45.882968431+00:00 stderr F I1208 17:51:45.880061 12 cacher.go:847] cacher (podnetworkconnectivitychecks.controlplane.operator.openshift.io): 2 objects queued in incoming channel. 2025-12-08T17:52:25.296842552+00:00 stderr F I1208 17:52:25.296547 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006335221849494224 seatDemandStdev=0.1300731388351337 seatDemandSmoothed=6.251961039678037 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:52:45.298630450+00:00 stderr F I1208 17:52:45.298332 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0026988074137941948 seatDemandStdev=0.05187989834548102 seatDemandSmoothed=5.970666558231628 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:53:18.736724635+00:00 stderr F I1208 17:53:18.736164 12 cacher.go:847] cacher (secrets): 1 objects queued in incoming channel. 2025-12-08T17:53:18.736724635+00:00 stderr F I1208 17:53:18.736197 12 cacher.go:847] cacher (secrets): 2 objects queued in incoming channel. 2025-12-08T17:53:19.494536190+00:00 stderr F I1208 17:53:19.494378 12 cacher.go:847] cacher (rolebindings.rbac.authorization.k8s.io): 1 objects queued in incoming channel. 2025-12-08T17:53:19.494536190+00:00 stderr F I1208 17:53:19.494400 12 cacher.go:847] cacher (rolebindings.rbac.authorization.k8s.io): 2 objects queued in incoming channel. 2025-12-08T17:53:25.300718844+00:00 stderr F I1208 17:53:25.300465 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.029693271721730204 seatDemandStdev=0.24008272216613122 seatDemandSmoothed=5.44889112156075 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:53:35.301123086+00:00 stderr F I1208 17:53:35.300965 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=4 seatDemandAvg=0.013598834362118494 seatDemandStdev=0.12305390830201575 seatDemandSmoothed=5.326709638846127 fairFrac=2.2796127562642368 currentCL=4 concurrencyDenominator=4 backstop=false 2025-12-08T17:53:41.328484823+00:00 stderr F I1208 17:53:41.328352 12 controller.go:667] quota admission added evaluator for: controllerrevisions.apps 2025-12-08T17:53:45.302205368+00:00 stderr F I1208 17:53:45.302026 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0009663853754391975 seatDemandStdev=0.031071715027422203 seatDemandSmoothed=5.204932193461932 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:54:25.304593028+00:00 stderr F I1208 17:54:25.304347 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006601447926350062 seatDemandStdev=0.14046955723719148 seatDemandSmoothed=4.748355884262704 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:54:28.308239710+00:00 stderr F I1208 17:54:28.308044 12 cacher.go:847] cacher (configmaps): 1 objects queued in incoming channel. 2025-12-08T17:54:28.308239710+00:00 stderr F I1208 17:54:28.308094 12 cacher.go:847] cacher (configmaps): 2 objects queued in incoming channel. 2025-12-08T17:54:28.308239710+00:00 stderr F I1208 17:54:28.308124 12 cacher.go:847] cacher (serviceaccounts): 1 objects queued in incoming channel. 2025-12-08T17:54:28.308239710+00:00 stderr F I1208 17:54:28.308145 12 cacher.go:847] cacher (serviceaccounts): 2 objects queued in incoming channel. 2025-12-08T17:54:28.308239710+00:00 stderr F I1208 17:54:28.308161 12 cacher.go:847] cacher (serviceaccounts): 3 objects queued in incoming channel. 2025-12-08T17:54:28.308239710+00:00 stderr F I1208 17:54:28.308168 12 cacher.go:847] cacher (serviceaccounts): 4 objects queued in incoming channel. 2025-12-08T17:54:34.519625705+00:00 stderr F W1208 17:54:34.519351 12 watcher.go:338] watch chan error: etcdserver: mvcc: required revision has been compacted 2025-12-08T17:54:45.306612622+00:00 stderr F I1208 17:54:45.306428 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.00236248041380638 seatDemandStdev=0.0485479052081628 seatDemandSmoothed=4.536823470146762 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:54:50.059973071+00:00 stderr F I1208 17:54:50.058388 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:54:50.059973071+00:00 stderr F I1208 17:54:50.058420 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:54:50.059973071+00:00 stderr F I1208 17:54:50.058426 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 48.382µs 2025-12-08T17:54:51.042297606+00:00 stderr F I1208 17:54:51.041281 12 controller.go:667] quota admission added evaluator for: operatorgroups.operators.coreos.com 2025-12-08T17:54:51.042297606+00:00 stderr F I1208 17:54:51.041416 12 controller.go:667] quota admission added evaluator for: operatorgroups.operators.coreos.com 2025-12-08T17:54:51.204455430+00:00 stderr F I1208 17:54:51.204065 12 controller.go:667] quota admission added evaluator for: poddisruptionbudgets.policy 2025-12-08T17:54:52.082562751+00:00 stderr F I1208 17:54:52.082407 12 controller.go:667] quota admission added evaluator for: subscriptions.operators.coreos.com 2025-12-08T17:54:52.082562751+00:00 stderr F I1208 17:54:52.082470 12 controller.go:667] quota admission added evaluator for: subscriptions.operators.coreos.com 2025-12-08T17:54:55.307234941+00:00 stderr F I1208 17:54:55.307022 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=3 seatDemandAvg=0.003326503219263804 seatDemandStdev=0.06255684666972856 seatDemandSmoothed=4.433991847380833 fairFrac=2.2796127562642368 currentCL=3 concurrencyDenominator=3 backstop=false 2025-12-08T17:54:56.109124040+00:00 stderr F I1208 17:54:56.108950 12 controller.go:667] quota admission added evaluator for: jobs.batch 2025-12-08T17:54:56.109124040+00:00 stderr F I1208 17:54:56.109027 12 controller.go:667] quota admission added evaluator for: jobs.batch 2025-12-08T17:54:58.911861496+00:00 stderr F I1208 17:54:58.911569 12 trace.go:236] Trace[230713801]: "Update" accept:application/json, */*,audit-id:fb360934-014a-44ec-a600-81f766021b34,client:10.217.0.15,api-group:operators.coreos.com,api-version:v1alpha1,name:openshift-cert-manager-operator,subresource:status,namespace:cert-manager-operator,protocol:HTTP/2.0,resource:subscriptions,scope:resource,url:/apis/operators.coreos.com/v1alpha1/namespaces/cert-manager-operator/subscriptions/openshift-cert-manager-operator/status,user-agent:catalog/v0.0.0 (linux/amd64) kubernetes/$Format,verb:PUT (08-Dec-2025 17:54:58.271) (total time: 639ms): 2025-12-08T17:54:58.911861496+00:00 stderr F Trace[230713801]: ["GuaranteedUpdate etcd3" audit-id:fb360934-014a-44ec-a600-81f766021b34,key:/operators.coreos.com/subscriptions/cert-manager-operator/openshift-cert-manager-operator,type:*unstructured.Unstructured,resource:subscriptions.operators.coreos.com 639ms (17:54:58.272)] 2025-12-08T17:54:58.911861496+00:00 stderr F Trace[230713801]: [639.558381ms] [639.558381ms] END 2025-12-08T17:55:05.307602333+00:00 stderr F I1208 17:55:05.307396 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.006441958265934979 seatDemandStdev=0.0800028714461858 seatDemandSmoothed=4.3339982659744525 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:55:07.009870262+00:00 stderr F I1208 17:55:07.009701 12 controller.go:667] quota admission added evaluator for: installplans.operators.coreos.com 2025-12-08T17:55:07.009870262+00:00 stderr F I1208 17:55:07.009771 12 controller.go:667] quota admission added evaluator for: installplans.operators.coreos.com 2025-12-08T17:55:07.787370426+00:00 stderr F I1208 17:55:07.787200 12 controller.go:667] quota admission added evaluator for: clusterserviceversions.operators.coreos.com 2025-12-08T17:55:07.787418537+00:00 stderr F I1208 17:55:07.787370 12 controller.go:667] quota admission added evaluator for: clusterserviceversions.operators.coreos.com 2025-12-08T17:55:07.811077053+00:00 stderr F I1208 17:55:07.810975 12 controller.go:667] quota admission added evaluator for: operatorconditions.operators.coreos.com 2025-12-08T17:55:07.811077053+00:00 stderr F I1208 17:55:07.811033 12 controller.go:667] quota admission added evaluator for: operatorconditions.operators.coreos.com 2025-12-08T17:55:08.729600802+00:00 stderr F I1208 17:55:08.729331 12 controller.go:237] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:55:08.730562389+00:00 stderr F I1208 17:55:08.730486 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1alpha1 to ResourceManager 2025-12-08T17:55:08.940398255+00:00 stderr F I1208 17:55:08.940285 12 controller.go:237] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:55:08.940452896+00:00 stderr F I1208 17:55:08.940410 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:08.964430592+00:00 stderr F I1208 17:55:08.964305 12 cacher.go:847] cacher (customresourcedefinitions.apiextensions.k8s.io): 1 objects queued in incoming channel. 2025-12-08T17:55:08.964430592+00:00 stderr F I1208 17:55:08.964339 12 cacher.go:847] cacher (customresourcedefinitions.apiextensions.k8s.io): 2 objects queued in incoming channel. 2025-12-08T17:55:09.029152183+00:00 stderr F I1208 17:55:09.028420 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1alpha1 to ResourceManager 2025-12-08T17:55:09.030538601+00:00 stderr F I1208 17:55:09.030476 12 controller.go:237] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:55:09.099026514+00:00 stderr F I1208 17:55:09.093712 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:09.099026514+00:00 stderr F I1208 17:55:09.096518 12 controller.go:237] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:55:09.212141747+00:00 stderr F I1208 17:55:09.212012 12 controller.go:237] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:55:09.229572197+00:00 stderr F I1208 17:55:09.229427 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:09.397360212+00:00 stderr F I1208 17:55:09.397231 12 controller.go:237] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:55:09.398156943+00:00 stderr F I1208 17:55:09.398033 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1alpha1 to ResourceManager 2025-12-08T17:55:09.543074794+00:00 stderr F I1208 17:55:09.542896 12 trace.go:236] Trace[602825446]: "Patch" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json,audit-id:184c4fe7-245a-4e56-a575-6236f9bb4fce,client:10.217.0.26,api-group:apiextensions.k8s.io,api-version:v1,name:alertmanagerconfigs.monitoring.rhobs,subresource:,namespace:,protocol:HTTP/2.0,resource:customresourcedefinitions,scope:resource,url:/apis/apiextensions.k8s.io/v1/customresourcedefinitions/alertmanagerconfigs.monitoring.rhobs,user-agent:olm/v0.0.0 (linux/amd64) kubernetes/$Format,verb:PATCH (08-Dec-2025 17:55:07.994) (total time: 1548ms): 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ["GuaranteedUpdate etcd3" audit-id:184c4fe7-245a-4e56-a575-6236f9bb4fce,key:/apiextensions.k8s.io/customresourcedefinitions/alertmanagerconfigs.monitoring.rhobs,type:*apiextensions.CustomResourceDefinition,resource:customresourcedefinitions.apiextensions.k8s.io 1488ms (17:55:08.054) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"initial value restored" 74ms (17:55:08.128) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to Encode" 322ms (17:55:08.451) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to Encode" 420ms (17:55:08.910) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"Retry value restored" 55ms (17:55:08.981) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to Encode" 457ms (17:55:09.439) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"Encode succeeded" len:355066 69ms (17:55:09.508) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"decode succeeded" len:355066 27ms (17:55:09.542)] 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to check admission control" 110ms (17:55:08.239) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to apply patch" 251ms (17:55:08.490) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to check admission control" 160ms (17:55:08.650) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to apply patch" 331ms (17:55:08.981) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"About to check admission control" 281ms (17:55:09.263) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: ---"Object stored in database" 279ms (17:55:09.542) 2025-12-08T17:55:09.543074794+00:00 stderr F Trace[602825446]: [1.548669425s] [1.548669425s] END 2025-12-08T17:55:09.636609330+00:00 stderr F I1208 17:55:09.636512 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:55:09.711453465+00:00 stderr F I1208 17:55:09.710387 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:55:09.752542050+00:00 stderr F I1208 17:55:09.751976 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:55:09.762482257+00:00 stderr F I1208 17:55:09.762336 12 controller.go:237] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:55:09.774585403+00:00 stderr F I1208 17:55:09.772697 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:09.779968008+00:00 stderr F I1208 17:55:09.779805 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:55:09.786791202+00:00 stderr F I1208 17:55:09.786274 12 controller.go:237] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T17:55:09.786791202+00:00 stderr F I1208 17:55:09.786710 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:10.021592500+00:00 stderr F I1208 17:55:10.021488 12 controller.go:667] quota admission added evaluator for: poddisruptionbudgets.policy 2025-12-08T17:55:10.064950197+00:00 stderr F I1208 17:55:10.062123 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:55:10.151484605+00:00 stderr F I1208 17:55:10.149466 12 controller.go:237] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:55:10.151484605+00:00 stderr F I1208 17:55:10.149636 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1alpha1 to ResourceManager 2025-12-08T17:55:10.164078824+00:00 stderr F I1208 17:55:10.162468 12 controller.go:237] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T17:55:10.164078824+00:00 stderr F I1208 17:55:10.162631 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:10.185505361+00:00 stderr F I1208 17:55:10.185349 12 trace.go:236] Trace[1995772544]: "Patch" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json,audit-id:9a1390c0-1f6a-4225-9010-7b61383cbc42,client:10.217.0.26,api-group:apiextensions.k8s.io,api-version:v1,name:prometheusagents.monitoring.rhobs,subresource:,namespace:,protocol:HTTP/2.0,resource:customresourcedefinitions,scope:resource,url:/apis/apiextensions.k8s.io/v1/customresourcedefinitions/prometheusagents.monitoring.rhobs,user-agent:olm/v0.0.0 (linux/amd64) kubernetes/$Format,verb:PATCH (08-Dec-2025 17:55:09.652) (total time: 533ms): 2025-12-08T17:55:10.185505361+00:00 stderr F Trace[1995772544]: ["GuaranteedUpdate etcd3" audit-id:9a1390c0-1f6a-4225-9010-7b61383cbc42,key:/apiextensions.k8s.io/customresourcedefinitions/prometheusagents.monitoring.rhobs,type:*apiextensions.CustomResourceDefinition,resource:customresourcedefinitions.apiextensions.k8s.io 519ms (17:55:09.666) 2025-12-08T17:55:10.185505361+00:00 stderr F Trace[1995772544]: ---"About to Encode" 453ms (17:55:10.152)] 2025-12-08T17:55:10.185505361+00:00 stderr F Trace[1995772544]: ---"About to check admission control" 191ms (17:55:09.889) 2025-12-08T17:55:10.185505361+00:00 stderr F Trace[1995772544]: ---"Object stored in database" 295ms (17:55:10.185) 2025-12-08T17:55:10.185505361+00:00 stderr F Trace[1995772544]: [533.068004ms] [533.068004ms] END 2025-12-08T17:55:10.208058988+00:00 stderr F I1208 17:55:10.207919 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:55:10.208245913+00:00 stderr F I1208 17:55:10.208190 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1alpha1 to ResourceManager 2025-12-08T17:55:10.208291824+00:00 stderr F I1208 17:55:10.208251 12 controller.go:237] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T17:55:10.409980441+00:00 stderr F I1208 17:55:10.406897 12 controller.go:237] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:55:10.409980441+00:00 stderr F I1208 17:55:10.407947 12 handler.go:288] Adding GroupVersion monitoring.rhobs v1 to ResourceManager 2025-12-08T17:55:10.591816175+00:00 stderr F I1208 17:55:10.591644 12 trace.go:236] Trace[952336273]: "Patch" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json,audit-id:0a286791-a64f-49ff-802b-003c16e13c02,client:10.217.0.26,api-group:apiextensions.k8s.io,api-version:v1,name:prometheuses.monitoring.rhobs,subresource:,namespace:,protocol:HTTP/2.0,resource:customresourcedefinitions,scope:resource,url:/apis/apiextensions.k8s.io/v1/customresourcedefinitions/prometheuses.monitoring.rhobs,user-agent:olm/v0.0.0 (linux/amd64) kubernetes/$Format,verb:PATCH (08-Dec-2025 17:55:09.650) (total time: 940ms): 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ["GuaranteedUpdate etcd3" audit-id:0a286791-a64f-49ff-802b-003c16e13c02,key:/apiextensions.k8s.io/customresourcedefinitions/prometheuses.monitoring.rhobs,type:*apiextensions.CustomResourceDefinition,resource:customresourcedefinitions.apiextensions.k8s.io 912ms (17:55:09.679) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"About to Encode" 457ms (17:55:10.155) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"About to Encode" 304ms (17:55:10.492) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"decode succeeded" len:475803 56ms (17:55:10.591)] 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"About to check admission control" 227ms (17:55:09.925) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"About to apply patch" 262ms (17:55:10.187) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"About to check admission control" 111ms (17:55:10.298) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: ---"Object stored in database" 292ms (17:55:10.591) 2025-12-08T17:55:10.591816175+00:00 stderr F Trace[952336273]: [940.855969ms] [940.855969ms] END 2025-12-08T17:55:10.610680353+00:00 stderr F I1208 17:55:10.608328 12 trace.go:236] Trace[27961284]: "Patch" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json,audit-id:064ab58b-e5c5-48f0-8137-19364bf78430,client:10.217.0.26,api-group:apiextensions.k8s.io,api-version:v1,name:scrapeconfigs.monitoring.rhobs,subresource:,namespace:,protocol:HTTP/2.0,resource:customresourcedefinitions,scope:resource,url:/apis/apiextensions.k8s.io/v1/customresourcedefinitions/scrapeconfigs.monitoring.rhobs,user-agent:olm/v0.0.0 (linux/amd64) kubernetes/$Format,verb:PATCH (08-Dec-2025 17:55:09.650) (total time: 957ms): 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ["GuaranteedUpdate etcd3" audit-id:064ab58b-e5c5-48f0-8137-19364bf78430,key:/apiextensions.k8s.io/customresourcedefinitions/scrapeconfigs.monitoring.rhobs,type:*apiextensions.CustomResourceDefinition,resource:customresourcedefinitions.apiextensions.k8s.io 938ms (17:55:09.670) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"About to Encode" 443ms (17:55:10.129) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"About to Encode" 379ms (17:55:10.549) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"Encode succeeded" len:379147 33ms (17:55:10.582)] 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"About to check admission control" 151ms (17:55:09.836) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"About to apply patch" 333ms (17:55:10.169) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"About to check admission control" 140ms (17:55:10.310) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: ---"Object stored in database" 298ms (17:55:10.608) 2025-12-08T17:55:10.610680353+00:00 stderr F Trace[27961284]: [957.899257ms] [957.899257ms] END 2025-12-08T17:55:10.625539482+00:00 stderr F I1208 17:55:10.625432 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:55:10.635499831+00:00 stderr F I1208 17:55:10.628809 12 alloc.go:328] "allocated clusterIPs" service="openshift-operators/obo-prometheus-operator-admission-webhook" clusterIPs={"IPv4":"10.217.4.184"} 2025-12-08T17:55:10.635499831+00:00 stderr F I1208 17:55:10.629280 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:10.635499831+00:00 stderr F I1208 17:55:10.629302 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:10.635499831+00:00 stderr F I1208 17:55:10.629306 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 32.471µs 2025-12-08T17:55:10.649019704+00:00 stderr F I1208 17:55:10.648926 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:55:10.733733744+00:00 stderr F I1208 17:55:10.733580 12 trace.go:236] Trace[421878106]: "Patch" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json,audit-id:8c4abd67-eabc-4d71-94dc-9c277f3af29d,client:10.217.0.26,api-group:apiextensions.k8s.io,api-version:v1,name:thanosrulers.monitoring.rhobs,subresource:,namespace:,protocol:HTTP/2.0,resource:customresourcedefinitions,scope:resource,url:/apis/apiextensions.k8s.io/v1/customresourcedefinitions/thanosrulers.monitoring.rhobs,user-agent:olm/v0.0.0 (linux/amd64) kubernetes/$Format,verb:PATCH (08-Dec-2025 17:55:09.647) (total time: 1085ms): 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ["GuaranteedUpdate etcd3" audit-id:8c4abd67-eabc-4d71-94dc-9c277f3af29d,key:/apiextensions.k8s.io/customresourcedefinitions/thanosrulers.monitoring.rhobs,type:*apiextensions.CustomResourceDefinition,resource:customresourcedefinitions.apiextensions.k8s.io 1073ms (17:55:09.660) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to Encode" 370ms (17:55:10.049) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"Encode succeeded" len:342854 41ms (17:55:10.090) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to Encode" 203ms (17:55:10.311) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"Encode succeeded" len:343814 53ms (17:55:10.365) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to Encode" 323ms (17:55:10.711)] 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to check admission control" 127ms (17:55:09.807) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to apply patch" 300ms (17:55:10.108) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to check admission control" 118ms (17:55:10.227) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to apply patch" 161ms (17:55:10.388) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"About to check admission control" 214ms (17:55:10.603) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: ---"Object stored in database" 129ms (17:55:10.733) 2025-12-08T17:55:10.733733744+00:00 stderr F Trace[421878106]: [1.085736218s] [1.085736218s] END 2025-12-08T17:55:10.747907946+00:00 stderr F I1208 17:55:10.746845 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:55:11.622662286+00:00 stderr F I1208 17:55:11.621258 12 controller.go:667] quota admission added evaluator for: prometheusrules.monitoring.coreos.com 2025-12-08T17:55:12.029098633+00:00 stderr F I1208 17:55:12.028965 12 alloc.go:328] "allocated clusterIPs" service="openshift-operators/observability-operator" clusterIPs={"IPv4":"10.217.5.112"} 2025-12-08T17:55:12.032795462+00:00 stderr F I1208 17:55:12.032203 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:12.032795462+00:00 stderr F I1208 17:55:12.032229 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:12.032795462+00:00 stderr F I1208 17:55:12.032234 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 37.421µs 2025-12-08T17:55:12.077679330+00:00 stderr F I1208 17:55:12.077571 12 controller.go:237] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:55:12.077821794+00:00 stderr F I1208 17:55:12.077774 12 handler.go:288] Adding GroupVersion observability.openshift.io v1alpha1 to ResourceManager 2025-12-08T17:55:12.099952720+00:00 stderr F I1208 17:55:12.099553 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:55:12.106106215+00:00 stderr F I1208 17:55:12.106000 12 controller.go:237] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:55:12.106298341+00:00 stderr F I1208 17:55:12.106225 12 handler.go:288] Adding GroupVersion observability.openshift.io v1alpha1 to ResourceManager 2025-12-08T17:55:12.152704079+00:00 stderr F I1208 17:55:12.151419 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:55:12.261837597+00:00 stderr F I1208 17:55:12.261700 12 handler.go:288] Adding GroupVersion perses.dev v1alpha1 to ResourceManager 2025-12-08T17:55:12.261901798+00:00 stderr F I1208 17:55:12.261814 12 controller.go:237] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:55:12.270101999+00:00 stderr F I1208 17:55:12.269987 12 controller.go:237] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:55:12.271170277+00:00 stderr F I1208 17:55:12.270546 12 handler.go:288] Adding GroupVersion perses.dev v1alpha1 to ResourceManager 2025-12-08T17:55:12.279769499+00:00 stderr F I1208 17:55:12.278830 12 controller.go:237] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:55:12.279769499+00:00 stderr F I1208 17:55:12.278927 12 handler.go:288] Adding GroupVersion perses.dev v1alpha1 to ResourceManager 2025-12-08T17:55:12.378054155+00:00 stderr F I1208 17:55:12.377901 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:55:12.401331311+00:00 stderr F I1208 17:55:12.401208 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:55:12.404070635+00:00 stderr F I1208 17:55:12.402057 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:55:13.605545107+00:00 stderr F I1208 17:55:13.605369 12 cacher.go:847] cacher (apiservices.apiregistration.k8s.io): 1 objects queued in incoming channel. 2025-12-08T17:55:13.605545107+00:00 stderr F I1208 17:55:13.605397 12 cacher.go:847] cacher (apiservices.apiregistration.k8s.io): 2 objects queued in incoming channel. 2025-12-08T17:55:13.610932392+00:00 stderr F I1208 17:55:13.609111 12 controller.go:237] Updating CRD OpenAPI spec because agents.agent.k8s.elastic.co changed 2025-12-08T17:55:13.610932392+00:00 stderr F I1208 17:55:13.610315 12 handler.go:288] Adding GroupVersion agent.k8s.elastic.co v1alpha1 to ResourceManager 2025-12-08T17:55:13.662278543+00:00 stderr F I1208 17:55:13.661617 12 controller.go:237] Updating CRD OpenAPI spec because apmservers.apm.k8s.elastic.co changed 2025-12-08T17:55:13.662278543+00:00 stderr F I1208 17:55:13.662034 12 handler.go:288] Adding GroupVersion apm.k8s.elastic.co v1 to ResourceManager 2025-12-08T17:55:13.662278543+00:00 stderr F I1208 17:55:13.662099 12 handler.go:288] Adding GroupVersion apm.k8s.elastic.co v1beta1 to ResourceManager 2025-12-08T17:55:13.663065354+00:00 stderr F I1208 17:55:13.662999 12 handler.go:288] Adding GroupVersion beat.k8s.elastic.co v1beta1 to ResourceManager 2025-12-08T17:55:13.663085825+00:00 stderr F I1208 17:55:13.663061 12 controller.go:237] Updating CRD OpenAPI spec because beats.beat.k8s.elastic.co changed 2025-12-08T17:55:13.695509568+00:00 stderr F I1208 17:55:13.695397 12 controller.go:237] Updating CRD OpenAPI spec because elasticmapsservers.maps.k8s.elastic.co changed 2025-12-08T17:55:13.696203366+00:00 stderr F I1208 17:55:13.696145 12 handler.go:288] Adding GroupVersion maps.k8s.elastic.co v1alpha1 to ResourceManager 2025-12-08T17:55:13.712949367+00:00 stderr F I1208 17:55:13.712431 12 controller.go:237] Updating CRD OpenAPI spec because elasticsearchautoscalers.autoscaling.k8s.elastic.co changed 2025-12-08T17:55:13.712949367+00:00 stderr F I1208 17:55:13.712645 12 handler.go:288] Adding GroupVersion autoscaling.k8s.elastic.co v1alpha1 to ResourceManager 2025-12-08T17:55:13.873898589+00:00 stderr F I1208 17:55:13.873780 12 handler.go:288] Adding GroupVersion elasticsearch.k8s.elastic.co v1 to ResourceManager 2025-12-08T17:55:13.873930890+00:00 stderr F I1208 17:55:13.873848 12 handler.go:288] Adding GroupVersion elasticsearch.k8s.elastic.co v1beta1 to ResourceManager 2025-12-08T17:55:13.876452737+00:00 stderr F I1208 17:55:13.876389 12 controller.go:237] Updating CRD OpenAPI spec because elasticsearches.elasticsearch.k8s.elastic.co changed 2025-12-08T17:55:13.930103801+00:00 stderr F I1208 17:55:13.926280 12 controller.go:237] Updating CRD OpenAPI spec because enterprisesearches.enterprisesearch.k8s.elastic.co changed 2025-12-08T17:55:13.930103801+00:00 stderr F I1208 17:55:13.926679 12 handler.go:288] Adding GroupVersion enterprisesearch.k8s.elastic.co v1 to ResourceManager 2025-12-08T17:55:13.930103801+00:00 stderr F I1208 17:55:13.926733 12 handler.go:288] Adding GroupVersion enterprisesearch.k8s.elastic.co v1beta1 to ResourceManager 2025-12-08T17:55:13.974011872+00:00 stderr F I1208 17:55:13.973898 12 handler.go:288] Adding GroupVersion kibana.k8s.elastic.co v1 to ResourceManager 2025-12-08T17:55:13.974060934+00:00 stderr F I1208 17:55:13.973997 12 handler.go:288] Adding GroupVersion kibana.k8s.elastic.co v1beta1 to ResourceManager 2025-12-08T17:55:13.974156106+00:00 stderr F I1208 17:55:13.974115 12 controller.go:237] Updating CRD OpenAPI spec because kibanas.kibana.k8s.elastic.co changed 2025-12-08T17:55:14.012730104+00:00 stderr F I1208 17:55:14.012522 12 controller.go:237] Updating CRD OpenAPI spec because logstashes.logstash.k8s.elastic.co changed 2025-12-08T17:55:14.012730104+00:00 stderr F I1208 17:55:14.012582 12 handler.go:288] Adding GroupVersion logstash.k8s.elastic.co v1alpha1 to ResourceManager 2025-12-08T17:55:14.024866231+00:00 stderr F I1208 17:55:14.024766 12 controller.go:237] Updating CRD OpenAPI spec because stackconfigpolicies.stackconfigpolicy.k8s.elastic.co changed 2025-12-08T17:55:14.025045225+00:00 stderr F I1208 17:55:14.025001 12 handler.go:288] Adding GroupVersion stackconfigpolicy.k8s.elastic.co v1alpha1 to ResourceManager 2025-12-08T17:55:15.308010851+00:00 stderr F I1208 17:55:15.307840 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=7 seatDemandAvg=0.9495363191094135 seatDemandStdev=1.581261557021009 seatDemandSmoothed=4.292524657008039 fairFrac=2.2796127562642368 currentCL=7 concurrencyDenominator=7 backstop=false 2025-12-08T17:55:15.418643929+00:00 stderr F I1208 17:55:15.418472 12 alloc.go:328] "allocated clusterIPs" service="openshift-operators/obo-prometheus-operator-admission-webhook-service" clusterIPs={"IPv4":"10.217.4.107"} 2025-12-08T17:55:15.423093369+00:00 stderr F I1208 17:55:15.422978 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:15.423093369+00:00 stderr F I1208 17:55:15.423005 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:15.423093369+00:00 stderr F I1208 17:55:15.423011 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 38.141µs 2025-12-08T17:55:16.535417393+00:00 stderr F I1208 17:55:16.532069 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/elastic-operator-service" clusterIPs={"IPv4":"10.217.5.72"} 2025-12-08T17:55:16.547422276+00:00 stderr F I1208 17:55:16.544138 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:16.547422276+00:00 stderr F I1208 17:55:16.544164 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:16.547422276+00:00 stderr F I1208 17:55:16.544168 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 37.411µs 2025-12-08T17:55:16.721963663+00:00 stderr F I1208 17:55:16.716245 12 controller.go:231] Updating CRD OpenAPI spec because elasticmapsservers.maps.k8s.elastic.co changed 2025-12-08T17:55:16.721963663+00:00 stderr F I1208 17:55:16.721407 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearchautoscalers.autoscaling.k8s.elastic.co changed 2025-12-08T17:55:16.722002174+00:00 stderr F I1208 17:55:16.721931 12 controller.go:231] Updating CRD OpenAPI spec because beats.beat.k8s.elastic.co changed 2025-12-08T17:55:16.722002174+00:00 stderr F I1208 17:55:16.721958 12 controller.go:231] Updating CRD OpenAPI spec because stackconfigpolicies.stackconfigpolicy.k8s.elastic.co changed 2025-12-08T17:55:16.732812505+00:00 stderr F I1208 17:55:16.732672 12 controller.go:231] Updating CRD OpenAPI spec because apmservers.apm.k8s.elastic.co changed 2025-12-08T17:55:16.734464020+00:00 stderr F I1208 17:55:16.734385 12 controller.go:231] Updating CRD OpenAPI spec because agents.agent.k8s.elastic.co changed 2025-12-08T17:55:16.745932318+00:00 stderr F I1208 17:55:16.740769 12 controller.go:231] Updating CRD OpenAPI spec because kibanas.kibana.k8s.elastic.co changed 2025-12-08T17:55:16.746132053+00:00 stderr F I1208 17:55:16.746077 12 controller.go:231] Updating CRD OpenAPI spec because enterprisesearches.enterprisesearch.k8s.elastic.co changed 2025-12-08T17:55:16.757119459+00:00 stderr F I1208 17:55:16.754489 12 controller.go:231] Updating CRD OpenAPI spec because logstashes.logstash.k8s.elastic.co changed 2025-12-08T17:55:16.831180362+00:00 stderr F I1208 17:55:16.830391 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearches.elasticsearch.k8s.elastic.co changed 2025-12-08T17:55:18.031540406+00:00 stderr F I1208 17:55:18.031438 12 store.go:1663] "Monitoring resource count at path" resource="alertmanagerconfigs.monitoring.rhobs" path="//monitoring.rhobs/alertmanagerconfigs" 2025-12-08T17:55:18.032727897+00:00 stderr F I1208 17:55:18.032639 12 cacher.go:469] cacher (alertmanagerconfigs.monitoring.rhobs): initialized 2025-12-08T17:55:18.032727897+00:00 stderr F I1208 17:55:18.032682 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1alpha1, Kind=AlertmanagerConfig" reflector="storage/cacher.go:/monitoring.rhobs/alertmanagerconfigs" 2025-12-08T17:55:18.210491891+00:00 stderr F I1208 17:55:18.210344 12 store.go:1663] "Monitoring resource count at path" resource="agents.agent.k8s.elastic.co" path="//agent.k8s.elastic.co/agents" 2025-12-08T17:55:18.211687263+00:00 stderr F I1208 17:55:18.211287 12 cacher.go:469] cacher (agents.agent.k8s.elastic.co): initialized 2025-12-08T17:55:18.211687263+00:00 stderr F I1208 17:55:18.211326 12 reflector.go:430] "Caches populated" type="agent.k8s.elastic.co/v1alpha1, Kind=Agent" reflector="storage/cacher.go:/agent.k8s.elastic.co/agents" 2025-12-08T17:55:18.240740135+00:00 stderr F I1208 17:55:18.240636 12 controller.go:231] Updating CRD OpenAPI spec because agents.agent.k8s.elastic.co changed 2025-12-08T17:55:18.288490760+00:00 stderr F I1208 17:55:18.287010 12 store.go:1663] "Monitoring resource count at path" resource="apmservers.apm.k8s.elastic.co" path="//apm.k8s.elastic.co/apmservers" 2025-12-08T17:55:18.292562870+00:00 stderr F I1208 17:55:18.289641 12 cacher.go:469] cacher (apmservers.apm.k8s.elastic.co): initialized 2025-12-08T17:55:18.292562870+00:00 stderr F I1208 17:55:18.289682 12 reflector.go:430] "Caches populated" type="apm.k8s.elastic.co/v1, Kind=ApmServer" reflector="storage/cacher.go:/apm.k8s.elastic.co/apmservers" 2025-12-08T17:55:18.294525813+00:00 stderr F I1208 17:55:18.294431 12 store.go:1663] "Monitoring resource count at path" resource="apmservers.apm.k8s.elastic.co" path="//apm.k8s.elastic.co/apmservers" 2025-12-08T17:55:18.295442467+00:00 stderr F I1208 17:55:18.295368 12 cacher.go:469] cacher (apmservers.apm.k8s.elastic.co): initialized 2025-12-08T17:55:18.295442467+00:00 stderr F I1208 17:55:18.295401 12 reflector.go:430] "Caches populated" type="apm.k8s.elastic.co/v1beta1, Kind=ApmServer" reflector="storage/cacher.go:/apm.k8s.elastic.co/apmservers" 2025-12-08T17:55:18.305664183+00:00 stderr F I1208 17:55:18.305564 12 store.go:1663] "Monitoring resource count at path" resource="apmservers.apm.k8s.elastic.co" path="//apm.k8s.elastic.co/apmservers" 2025-12-08T17:55:18.307785419+00:00 stderr F I1208 17:55:18.307650 12 cacher.go:469] cacher (apmservers.apm.k8s.elastic.co): initialized 2025-12-08T17:55:18.307785419+00:00 stderr F I1208 17:55:18.307690 12 reflector.go:430] "Caches populated" type="apm.k8s.elastic.co/v1alpha1, Kind=ApmServer" reflector="storage/cacher.go:/apm.k8s.elastic.co/apmservers" 2025-12-08T17:55:18.329226486+00:00 stderr F I1208 17:55:18.329083 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:55:18.329226486+00:00 stderr F I1208 17:55:18.329121 12 controller.go:231] Updating CRD OpenAPI spec because agents.agent.k8s.elastic.co changed 2025-12-08T17:55:18.462742850+00:00 stderr F I1208 17:55:18.459920 12 controller.go:231] Updating CRD OpenAPI spec because apmservers.apm.k8s.elastic.co changed 2025-12-08T17:55:18.495685805+00:00 stderr F I1208 17:55:18.495566 12 store.go:1663] "Monitoring resource count at path" resource="beats.beat.k8s.elastic.co" path="//beat.k8s.elastic.co/beats" 2025-12-08T17:55:18.496847587+00:00 stderr F I1208 17:55:18.496771 12 cacher.go:469] cacher (beats.beat.k8s.elastic.co): initialized 2025-12-08T17:55:18.496847587+00:00 stderr F I1208 17:55:18.496821 12 reflector.go:430] "Caches populated" type="beat.k8s.elastic.co/v1beta1, Kind=Beat" reflector="storage/cacher.go:/beat.k8s.elastic.co/beats" 2025-12-08T17:55:18.540180573+00:00 stderr F I1208 17:55:18.540070 12 controller.go:231] Updating CRD OpenAPI spec because beats.beat.k8s.elastic.co changed 2025-12-08T17:55:18.607264338+00:00 stderr F I1208 17:55:18.607152 12 store.go:1663] "Monitoring resource count at path" resource="alertmanagers.monitoring.rhobs" path="//monitoring.rhobs/alertmanagers" 2025-12-08T17:55:18.608738459+00:00 stderr F I1208 17:55:18.608049 12 cacher.go:469] cacher (alertmanagers.monitoring.rhobs): initialized 2025-12-08T17:55:18.608738459+00:00 stderr F I1208 17:55:18.608075 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=Alertmanager" reflector="storage/cacher.go:/monitoring.rhobs/alertmanagers" 2025-12-08T17:55:18.617986977+00:00 stderr F I1208 17:55:18.617845 12 store.go:1663] "Monitoring resource count at path" resource="elasticmapsservers.maps.k8s.elastic.co" path="//maps.k8s.elastic.co/elasticmapsservers" 2025-12-08T17:55:18.619576640+00:00 stderr F I1208 17:55:18.619019 12 cacher.go:469] cacher (elasticmapsservers.maps.k8s.elastic.co): initialized 2025-12-08T17:55:18.619576640+00:00 stderr F I1208 17:55:18.619051 12 reflector.go:430] "Caches populated" type="maps.k8s.elastic.co/v1alpha1, Kind=ElasticMapsServer" reflector="storage/cacher.go:/maps.k8s.elastic.co/elasticmapsservers" 2025-12-08T17:55:18.644242653+00:00 stderr F I1208 17:55:18.644127 12 controller.go:231] Updating CRD OpenAPI spec because elasticmapsservers.maps.k8s.elastic.co changed 2025-12-08T17:55:18.784598390+00:00 stderr F I1208 17:55:18.783142 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:55:18.823764594+00:00 stderr F I1208 17:55:18.823421 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:55:18.854396609+00:00 stderr F I1208 17:55:18.853921 12 controller.go:231] Updating CRD OpenAPI spec because elasticmapsservers.maps.k8s.elastic.co changed 2025-12-08T17:55:18.854396609+00:00 stderr F I1208 17:55:18.854294 12 controller.go:231] Updating CRD OpenAPI spec because beats.beat.k8s.elastic.co changed 2025-12-08T17:55:18.869389143+00:00 stderr F I1208 17:55:18.869272 12 controller.go:231] Updating CRD OpenAPI spec because apmservers.apm.k8s.elastic.co changed 2025-12-08T17:55:18.896557333+00:00 stderr F I1208 17:55:18.895668 12 store.go:1663] "Monitoring resource count at path" resource="monitoringstacks.monitoring.rhobs" path="//monitoring.rhobs/monitoringstacks" 2025-12-08T17:55:18.899023800+00:00 stderr F I1208 17:55:18.898846 12 cacher.go:469] cacher (monitoringstacks.monitoring.rhobs): initialized 2025-12-08T17:55:18.899023800+00:00 stderr F I1208 17:55:18.898910 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1alpha1, Kind=MonitoringStack" reflector="storage/cacher.go:/monitoring.rhobs/monitoringstacks" 2025-12-08T17:55:19.006119883+00:00 stderr F I1208 17:55:19.005822 12 store.go:1663] "Monitoring resource count at path" resource="elasticsearchautoscalers.autoscaling.k8s.elastic.co" path="//autoscaling.k8s.elastic.co/elasticsearchautoscalers" 2025-12-08T17:55:19.010717586+00:00 stderr F I1208 17:55:19.010189 12 cacher.go:469] cacher (elasticsearchautoscalers.autoscaling.k8s.elastic.co): initialized 2025-12-08T17:55:19.010717586+00:00 stderr F I1208 17:55:19.010224 12 reflector.go:430] "Caches populated" type="autoscaling.k8s.elastic.co/v1alpha1, Kind=ElasticsearchAutoscaler" reflector="storage/cacher.go:/autoscaling.k8s.elastic.co/elasticsearchautoscalers" 2025-12-08T17:55:19.011897178+00:00 stderr F I1208 17:55:19.011760 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:55:19.068680876+00:00 stderr F I1208 17:55:19.068561 12 store.go:1663] "Monitoring resource count at path" resource="podmonitors.monitoring.rhobs" path="//monitoring.rhobs/podmonitors" 2025-12-08T17:55:19.070405403+00:00 stderr F I1208 17:55:19.070329 12 cacher.go:469] cacher (podmonitors.monitoring.rhobs): initialized 2025-12-08T17:55:19.070405403+00:00 stderr F I1208 17:55:19.070353 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=PodMonitor" reflector="storage/cacher.go:/monitoring.rhobs/podmonitors" 2025-12-08T17:55:19.141351672+00:00 stderr F I1208 17:55:19.139469 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:55:19.202414675+00:00 stderr F I1208 17:55:19.201028 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearchautoscalers.autoscaling.k8s.elastic.co changed 2025-12-08T17:55:19.202414675+00:00 stderr F I1208 17:55:19.202110 12 store.go:1663] "Monitoring resource count at path" resource="probes.monitoring.rhobs" path="//monitoring.rhobs/probes" 2025-12-08T17:55:19.205685833+00:00 stderr F I1208 17:55:19.205435 12 cacher.go:469] cacher (probes.monitoring.rhobs): initialized 2025-12-08T17:55:19.205685833+00:00 stderr F I1208 17:55:19.205460 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=Probe" reflector="storage/cacher.go:/monitoring.rhobs/probes" 2025-12-08T17:55:19.234320653+00:00 stderr F I1208 17:55:19.234242 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:55:19.294306187+00:00 stderr F I1208 17:55:19.288207 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:55:19.338693622+00:00 stderr F I1208 17:55:19.337716 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearchautoscalers.autoscaling.k8s.elastic.co changed 2025-12-08T17:55:19.385361288+00:00 stderr F I1208 17:55:19.385269 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:55:19.385706777+00:00 stderr F I1208 17:55:19.385668 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:55:19.394261577+00:00 stderr F I1208 17:55:19.394175 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:55:19.430596975+00:00 stderr F I1208 17:55:19.430497 12 store.go:1663] "Monitoring resource count at path" resource="prometheusagents.monitoring.rhobs" path="//monitoring.rhobs/prometheusagents" 2025-12-08T17:55:19.432523067+00:00 stderr F I1208 17:55:19.432474 12 cacher.go:469] cacher (prometheusagents.monitoring.rhobs): initialized 2025-12-08T17:55:19.432553718+00:00 stderr F I1208 17:55:19.432505 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1alpha1, Kind=PrometheusAgent" reflector="storage/cacher.go:/monitoring.rhobs/prometheusagents" 2025-12-08T17:55:19.656983178+00:00 stderr F I1208 17:55:19.656892 12 store.go:1663] "Monitoring resource count at path" resource="elasticsearches.elasticsearch.k8s.elastic.co" path="//elasticsearch.k8s.elastic.co/elasticsearches" 2025-12-08T17:55:19.657892742+00:00 stderr F I1208 17:55:19.657832 12 cacher.go:469] cacher (elasticsearches.elasticsearch.k8s.elastic.co): initialized 2025-12-08T17:55:19.657892742+00:00 stderr F I1208 17:55:19.657855 12 reflector.go:430] "Caches populated" type="elasticsearch.k8s.elastic.co/v1, Kind=Elasticsearch" reflector="storage/cacher.go:/elasticsearch.k8s.elastic.co/elasticsearches" 2025-12-08T17:55:19.667624224+00:00 stderr F I1208 17:55:19.667537 12 store.go:1663] "Monitoring resource count at path" resource="elasticsearches.elasticsearch.k8s.elastic.co" path="//elasticsearch.k8s.elastic.co/elasticsearches" 2025-12-08T17:55:19.668906708+00:00 stderr F I1208 17:55:19.668817 12 cacher.go:469] cacher (elasticsearches.elasticsearch.k8s.elastic.co): initialized 2025-12-08T17:55:19.668906708+00:00 stderr F I1208 17:55:19.668851 12 reflector.go:430] "Caches populated" type="elasticsearch.k8s.elastic.co/v1beta1, Kind=Elasticsearch" reflector="storage/cacher.go:/elasticsearch.k8s.elastic.co/elasticsearches" 2025-12-08T17:55:19.676249676+00:00 stderr F I1208 17:55:19.675406 12 store.go:1663] "Monitoring resource count at path" resource="elasticsearches.elasticsearch.k8s.elastic.co" path="//elasticsearch.k8s.elastic.co/elasticsearches" 2025-12-08T17:55:19.678951979+00:00 stderr F I1208 17:55:19.676706 12 cacher.go:469] cacher (elasticsearches.elasticsearch.k8s.elastic.co): initialized 2025-12-08T17:55:19.678951979+00:00 stderr F I1208 17:55:19.676727 12 reflector.go:430] "Caches populated" type="elasticsearch.k8s.elastic.co/v1alpha1, Kind=Elasticsearch" reflector="storage/cacher.go:/elasticsearch.k8s.elastic.co/elasticsearches" 2025-12-08T17:55:19.792355480+00:00 stderr F I1208 17:55:19.789504 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:55:19.892280360+00:00 stderr F I1208 17:55:19.892156 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearches.elasticsearch.k8s.elastic.co changed 2025-12-08T17:55:20.054964428+00:00 stderr F I1208 17:55:20.052808 12 store.go:1663] "Monitoring resource count at path" resource="prometheuses.monitoring.rhobs" path="//monitoring.rhobs/prometheuses" 2025-12-08T17:55:20.054964428+00:00 stderr F I1208 17:55:20.054649 12 cacher.go:469] cacher (prometheuses.monitoring.rhobs): initialized 2025-12-08T17:55:20.054964428+00:00 stderr F I1208 17:55:20.054686 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=Prometheus" reflector="storage/cacher.go:/monitoring.rhobs/prometheuses" 2025-12-08T17:55:20.214658025+00:00 stderr F I1208 17:55:20.214214 12 store.go:1663] "Monitoring resource count at path" resource="enterprisesearches.enterprisesearch.k8s.elastic.co" path="//enterprisesearch.k8s.elastic.co/enterprisesearches" 2025-12-08T17:55:20.218740045+00:00 stderr F I1208 17:55:20.218653 12 cacher.go:469] cacher (enterprisesearches.enterprisesearch.k8s.elastic.co): initialized 2025-12-08T17:55:20.218740045+00:00 stderr F I1208 17:55:20.218692 12 reflector.go:430] "Caches populated" type="enterprisesearch.k8s.elastic.co/v1, Kind=EnterpriseSearch" reflector="storage/cacher.go:/enterprisesearch.k8s.elastic.co/enterprisesearches" 2025-12-08T17:55:20.226697719+00:00 stderr F I1208 17:55:20.226546 12 store.go:1663] "Monitoring resource count at path" resource="enterprisesearches.enterprisesearch.k8s.elastic.co" path="//enterprisesearch.k8s.elastic.co/enterprisesearches" 2025-12-08T17:55:20.231478058+00:00 stderr F I1208 17:55:20.231394 12 cacher.go:469] cacher (enterprisesearches.enterprisesearch.k8s.elastic.co): initialized 2025-12-08T17:55:20.231478058+00:00 stderr F I1208 17:55:20.231424 12 reflector.go:430] "Caches populated" type="enterprisesearch.k8s.elastic.co/v1beta1, Kind=EnterpriseSearch" reflector="storage/cacher.go:/enterprisesearch.k8s.elastic.co/enterprisesearches" 2025-12-08T17:55:20.266100879+00:00 stderr F I1208 17:55:20.266008 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:55:20.369317447+00:00 stderr F I1208 17:55:20.369214 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearches.elasticsearch.k8s.elastic.co changed 2025-12-08T17:55:20.411524603+00:00 stderr F I1208 17:55:20.411420 12 controller.go:231] Updating CRD OpenAPI spec because enterprisesearches.enterprisesearch.k8s.elastic.co changed 2025-12-08T17:55:20.459916706+00:00 stderr F I1208 17:55:20.459786 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:55:20.472123694+00:00 stderr F I1208 17:55:20.471742 12 controller.go:231] Updating CRD OpenAPI spec because enterprisesearches.enterprisesearch.k8s.elastic.co changed 2025-12-08T17:55:20.493371285+00:00 stderr F I1208 17:55:20.493255 12 store.go:1663] "Monitoring resource count at path" resource="prometheusrules.monitoring.rhobs" path="//monitoring.rhobs/prometheusrules" 2025-12-08T17:55:20.494628020+00:00 stderr F I1208 17:55:20.494584 12 cacher.go:469] cacher (prometheusrules.monitoring.rhobs): initialized 2025-12-08T17:55:20.494669271+00:00 stderr F I1208 17:55:20.494605 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=PrometheusRule" reflector="storage/cacher.go:/monitoring.rhobs/prometheusrules" 2025-12-08T17:55:20.584135018+00:00 stderr F I1208 17:55:20.582040 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T17:55:20.799033432+00:00 stderr F I1208 17:55:20.798950 12 store.go:1663] "Monitoring resource count at path" resource="kibanas.kibana.k8s.elastic.co" path="//kibana.k8s.elastic.co/kibanas" 2025-12-08T17:55:20.800319636+00:00 stderr F I1208 17:55:20.800149 12 cacher.go:469] cacher (kibanas.kibana.k8s.elastic.co): initialized 2025-12-08T17:55:20.800319636+00:00 stderr F I1208 17:55:20.800180 12 reflector.go:430] "Caches populated" type="kibana.k8s.elastic.co/v1, Kind=Kibana" reflector="storage/cacher.go:/kibana.k8s.elastic.co/kibanas" 2025-12-08T17:55:20.812044271+00:00 stderr F I1208 17:55:20.809677 12 store.go:1663] "Monitoring resource count at path" resource="kibanas.kibana.k8s.elastic.co" path="//kibana.k8s.elastic.co/kibanas" 2025-12-08T17:55:20.812339719+00:00 stderr F I1208 17:55:20.812298 12 cacher.go:469] cacher (kibanas.kibana.k8s.elastic.co): initialized 2025-12-08T17:55:20.812370190+00:00 stderr F I1208 17:55:20.812339 12 reflector.go:430] "Caches populated" type="kibana.k8s.elastic.co/v1beta1, Kind=Kibana" reflector="storage/cacher.go:/kibana.k8s.elastic.co/kibanas" 2025-12-08T17:55:20.813466840+00:00 stderr F I1208 17:55:20.813409 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:55:20.829749789+00:00 stderr F I1208 17:55:20.829657 12 store.go:1663] "Monitoring resource count at path" resource="kibanas.kibana.k8s.elastic.co" path="//kibana.k8s.elastic.co/kibanas" 2025-12-08T17:55:20.832245045+00:00 stderr F I1208 17:55:20.832179 12 cacher.go:469] cacher (kibanas.kibana.k8s.elastic.co): initialized 2025-12-08T17:55:20.832245045+00:00 stderr F I1208 17:55:20.832215 12 reflector.go:430] "Caches populated" type="kibana.k8s.elastic.co/v1alpha1, Kind=Kibana" reflector="storage/cacher.go:/kibana.k8s.elastic.co/kibanas" 2025-12-08T17:55:20.891219592+00:00 stderr F I1208 17:55:20.889480 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T17:55:21.031985751+00:00 stderr F I1208 17:55:21.031101 12 controller.go:231] Updating CRD OpenAPI spec because kibanas.kibana.k8s.elastic.co changed 2025-12-08T17:55:21.042023840+00:00 stderr F I1208 17:55:21.041767 12 store.go:1663] "Monitoring resource count at path" resource="persesdashboards.perses.dev" path="//perses.dev/persesdashboards" 2025-12-08T17:55:21.044079166+00:00 stderr F I1208 17:55:21.042653 12 cacher.go:469] cacher (persesdashboards.perses.dev): initialized 2025-12-08T17:55:21.044079166+00:00 stderr F I1208 17:55:21.042681 12 reflector.go:430] "Caches populated" type="perses.dev/v1alpha1, Kind=PersesDashboard" reflector="storage/cacher.go:/perses.dev/persesdashboards" 2025-12-08T17:55:21.080966108+00:00 stderr F I1208 17:55:21.077674 12 store.go:1663] "Monitoring resource count at path" resource="thanosrulers.monitoring.rhobs" path="//monitoring.rhobs/thanosrulers" 2025-12-08T17:55:21.080966108+00:00 stderr F I1208 17:55:21.078674 12 cacher.go:469] cacher (thanosrulers.monitoring.rhobs): initialized 2025-12-08T17:55:21.080966108+00:00 stderr F I1208 17:55:21.078827 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=ThanosRuler" reflector="storage/cacher.go:/monitoring.rhobs/thanosrulers" 2025-12-08T17:55:21.103200247+00:00 stderr F I1208 17:55:21.103094 12 controller.go:231] Updating CRD OpenAPI spec because kibanas.kibana.k8s.elastic.co changed 2025-12-08T17:55:21.126078732+00:00 stderr F I1208 17:55:21.125316 12 store.go:1663] "Monitoring resource count at path" resource="scrapeconfigs.monitoring.rhobs" path="//monitoring.rhobs/scrapeconfigs" 2025-12-08T17:55:21.127693296+00:00 stderr F I1208 17:55:21.127635 12 cacher.go:469] cacher (scrapeconfigs.monitoring.rhobs): initialized 2025-12-08T17:55:21.127854470+00:00 stderr F I1208 17:55:21.127796 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1alpha1, Kind=ScrapeConfig" reflector="storage/cacher.go:/monitoring.rhobs/scrapeconfigs" 2025-12-08T17:55:21.397935568+00:00 stderr F I1208 17:55:21.397774 12 store.go:1663] "Monitoring resource count at path" resource="logstashes.logstash.k8s.elastic.co" path="//logstash.k8s.elastic.co/logstashes" 2025-12-08T17:55:21.398918695+00:00 stderr F I1208 17:55:21.398658 12 cacher.go:469] cacher (logstashes.logstash.k8s.elastic.co): initialized 2025-12-08T17:55:21.398918695+00:00 stderr F I1208 17:55:21.398690 12 reflector.go:430] "Caches populated" type="logstash.k8s.elastic.co/v1alpha1, Kind=Logstash" reflector="storage/cacher.go:/logstash.k8s.elastic.co/logstashes" 2025-12-08T17:55:21.434119642+00:00 stderr F I1208 17:55:21.432949 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:55:21.438999824+00:00 stderr F I1208 17:55:21.437930 12 store.go:1663] "Monitoring resource count at path" resource="servicemonitors.monitoring.rhobs" path="//monitoring.rhobs/servicemonitors" 2025-12-08T17:55:21.445037146+00:00 stderr F I1208 17:55:21.439258 12 cacher.go:469] cacher (servicemonitors.monitoring.rhobs): initialized 2025-12-08T17:55:21.445037146+00:00 stderr F I1208 17:55:21.439280 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1, Kind=ServiceMonitor" reflector="storage/cacher.go:/monitoring.rhobs/servicemonitors" 2025-12-08T17:55:21.606848741+00:00 stderr F I1208 17:55:21.606692 12 controller.go:231] Updating CRD OpenAPI spec because logstashes.logstash.k8s.elastic.co changed 2025-12-08T17:55:21.640966229+00:00 stderr F I1208 17:55:21.639706 12 store.go:1663] "Monitoring resource count at path" resource="perses.perses.dev" path="//perses.dev/perses" 2025-12-08T17:55:21.641741289+00:00 stderr F I1208 17:55:21.641666 12 cacher.go:469] cacher (perses.perses.dev): initialized 2025-12-08T17:55:21.641741289+00:00 stderr F I1208 17:55:21.641700 12 reflector.go:430] "Caches populated" type="perses.dev/v1alpha1, Kind=Perses" reflector="storage/cacher.go:/perses.dev/perses" 2025-12-08T17:55:21.722993996+00:00 stderr F I1208 17:55:21.721846 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:55:21.796567657+00:00 stderr F I1208 17:55:21.796354 12 controller.go:231] Updating CRD OpenAPI spec because logstashes.logstash.k8s.elastic.co changed 2025-12-08T17:55:21.873340002+00:00 stderr F I1208 17:55:21.870738 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T17:55:21.943594213+00:00 stderr F I1208 17:55:21.943485 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T17:55:21.995319885+00:00 stderr F I1208 17:55:21.994128 12 store.go:1663] "Monitoring resource count at path" resource="stackconfigpolicies.stackconfigpolicy.k8s.elastic.co" path="//stackconfigpolicy.k8s.elastic.co/stackconfigpolicies" 2025-12-08T17:55:21.995319885+00:00 stderr F I1208 17:55:21.994983 12 cacher.go:469] cacher (stackconfigpolicies.stackconfigpolicy.k8s.elastic.co): initialized 2025-12-08T17:55:21.995319885+00:00 stderr F I1208 17:55:21.995011 12 reflector.go:430] "Caches populated" type="stackconfigpolicy.k8s.elastic.co/v1alpha1, Kind=StackConfigPolicy" reflector="storage/cacher.go:/stackconfigpolicy.k8s.elastic.co/stackconfigpolicies" 2025-12-08T17:55:22.033094121+00:00 stderr F I1208 17:55:22.032992 12 store.go:1663] "Monitoring resource count at path" resource="thanosqueriers.monitoring.rhobs" path="//monitoring.rhobs/thanosqueriers" 2025-12-08T17:55:22.033868332+00:00 stderr F I1208 17:55:22.033621 12 cacher.go:469] cacher (thanosqueriers.monitoring.rhobs): initialized 2025-12-08T17:55:22.034006316+00:00 stderr F I1208 17:55:22.033662 12 reflector.go:430] "Caches populated" type="monitoring.rhobs/v1alpha1, Kind=ThanosQuerier" reflector="storage/cacher.go:/monitoring.rhobs/thanosqueriers" 2025-12-08T17:55:22.194044943+00:00 stderr F I1208 17:55:22.192432 12 controller.go:231] Updating CRD OpenAPI spec because stackconfigpolicies.stackconfigpolicy.k8s.elastic.co changed 2025-12-08T17:55:22.235265812+00:00 stderr F I1208 17:55:22.234646 12 store.go:1663] "Monitoring resource count at path" resource="observabilityinstallers.observability.openshift.io" path="//observability.openshift.io/observabilityinstallers" 2025-12-08T17:55:22.237892233+00:00 stderr F I1208 17:55:22.236614 12 cacher.go:469] cacher (observabilityinstallers.observability.openshift.io): initialized 2025-12-08T17:55:22.237892233+00:00 stderr F I1208 17:55:22.236637 12 reflector.go:430] "Caches populated" type="observability.openshift.io/v1alpha1, Kind=ObservabilityInstaller" reflector="storage/cacher.go:/observability.openshift.io/observabilityinstallers" 2025-12-08T17:55:22.237892233+00:00 stderr F I1208 17:55:22.237381 12 controller.go:231] Updating CRD OpenAPI spec because stackconfigpolicies.stackconfigpolicy.k8s.elastic.co changed 2025-12-08T17:55:22.244977014+00:00 stderr F I1208 17:55:22.244227 12 store.go:1663] "Monitoring resource count at path" resource="persesdatasources.perses.dev" path="//perses.dev/persesdatasources" 2025-12-08T17:55:22.246985747+00:00 stderr F I1208 17:55:22.244982 12 cacher.go:469] cacher (persesdatasources.perses.dev): initialized 2025-12-08T17:55:22.246985747+00:00 stderr F I1208 17:55:22.245013 12 reflector.go:430] "Caches populated" type="perses.dev/v1alpha1, Kind=PersesDatasource" reflector="storage/cacher.go:/perses.dev/persesdatasources" 2025-12-08T17:55:22.354531852+00:00 stderr F I1208 17:55:22.354434 12 store.go:1663] "Monitoring resource count at path" resource="uiplugins.observability.openshift.io" path="//observability.openshift.io/uiplugins" 2025-12-08T17:55:22.356228167+00:00 stderr F I1208 17:55:22.356114 12 cacher.go:469] cacher (uiplugins.observability.openshift.io): initialized 2025-12-08T17:55:22.356228167+00:00 stderr F I1208 17:55:22.356164 12 reflector.go:430] "Caches populated" type="observability.openshift.io/v1alpha1, Kind=UIPlugin" reflector="storage/cacher.go:/observability.openshift.io/uiplugins" 2025-12-08T17:55:22.379424712+00:00 stderr F I1208 17:55:22.379213 12 controller.go:231] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T17:55:22.405431762+00:00 stderr F I1208 17:55:22.404561 12 controller.go:231] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T17:55:23.142781494+00:00 stderr F I1208 17:55:23.142668 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:55:23.403168942+00:00 stderr F I1208 17:55:23.401987 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:55:25.018106816+00:00 stderr F E1208 17:55:25.017963 12 repairip.go:523] "Unhandled Error" err="the IPAddress: 10.217.5.237 for Service obo-prometheus-operator-admission-webhook/openshift-operators has a wrong reference &v1.ParentReference{Group:\"\", Resource:\"services\", Namespace:\"openshift-operators\", Name:\"obo-prometheus-operator-admission-webhook\"}; cleaning up" logger="UnhandledError" 2025-12-08T17:55:25.022664191+00:00 stderr F I1208 17:55:25.022568 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:25.022664191+00:00 stderr F I1208 17:55:25.022594 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:25.022664191+00:00 stderr F I1208 17:55:25.022600 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 37.201µs 2025-12-08T17:55:25.025751626+00:00 stderr F I1208 17:55:25.025602 12 ipallocator.go:374] error releasing ip 10.217.5.237 : ipaddresses.networking.k8s.io "10.217.5.237" not found 2025-12-08T17:55:25.308547345+00:00 stderr F I1208 17:55:25.308408 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.03338325134960385 seatDemandStdev=0.21315773757284248 seatDemandSmoothed=4.19946703264207 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:55:26.480804572+00:00 stderr F I1208 17:55:26.480674 12 controller.go:237] Updating CRD OpenAPI spec because challenges.acme.cert-manager.io changed 2025-12-08T17:55:26.481117211+00:00 stderr F I1208 17:55:26.481050 12 handler.go:288] Adding GroupVersion acme.cert-manager.io v1 to ResourceManager 2025-12-08T17:55:26.487976859+00:00 stderr F I1208 17:55:26.487856 12 handler.go:288] Adding GroupVersion acme.cert-manager.io v1 to ResourceManager 2025-12-08T17:55:26.487976859+00:00 stderr F I1208 17:55:26.487924 12 controller.go:237] Updating CRD OpenAPI spec because orders.acme.cert-manager.io changed 2025-12-08T17:55:27.069843945+00:00 stderr F I1208 17:55:27.069702 12 controller.go:231] Updating CRD OpenAPI spec because orders.acme.cert-manager.io changed 2025-12-08T17:55:27.140665808+00:00 stderr F I1208 17:55:27.140550 12 controller.go:231] Updating CRD OpenAPI spec because challenges.acme.cert-manager.io changed 2025-12-08T17:55:27.214995028+00:00 stderr F I1208 17:55:27.214865 12 alloc.go:328] "allocated clusterIPs" service="cert-manager-operator/cert-manager-operator-controller-manager-metrics-service" clusterIPs={"IPv4":"10.217.5.9"} 2025-12-08T17:55:27.219516872+00:00 stderr F I1208 17:55:27.219347 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:27.219516872+00:00 stderr F I1208 17:55:27.219425 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:27.219516872+00:00 stderr F I1208 17:55:27.219447 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 106.803µs 2025-12-08T17:55:27.251091589+00:00 stderr F I1208 17:55:27.245270 12 controller.go:237] Updating CRD OpenAPI spec because certificaterequests.cert-manager.io changed 2025-12-08T17:55:27.251091589+00:00 stderr F I1208 17:55:27.245592 12 handler.go:288] Adding GroupVersion cert-manager.io v1 to ResourceManager 2025-12-08T17:55:27.281340379+00:00 stderr F I1208 17:55:27.281226 12 controller.go:237] Updating CRD OpenAPI spec because certificates.cert-manager.io changed 2025-12-08T17:55:27.283220560+00:00 stderr F I1208 17:55:27.283156 12 handler.go:288] Adding GroupVersion cert-manager.io v1 to ResourceManager 2025-12-08T17:55:27.301732028+00:00 stderr F I1208 17:55:27.300690 12 controller.go:231] Updating CRD OpenAPI spec because certificaterequests.cert-manager.io changed 2025-12-08T17:55:27.326626132+00:00 stderr F I1208 17:55:27.325369 12 controller.go:231] Updating CRD OpenAPI spec because certificates.cert-manager.io changed 2025-12-08T17:55:27.441057732+00:00 stderr F E1208 17:55:27.437349 12 repairip.go:523] "Unhandled Error" err="the IPAddress: 10.217.5.185 for Service observability-operator/openshift-operators has a wrong reference &v1.ParentReference{Group:\"\", Resource:\"services\", Namespace:\"openshift-operators\", Name:\"observability-operator\"}; cleaning up" logger="UnhandledError" 2025-12-08T17:55:27.454952213+00:00 stderr F I1208 17:55:27.451403 12 ipallocator.go:374] error releasing ip 10.217.5.185 : ipaddresses.networking.k8s.io "10.217.5.185" not found 2025-12-08T17:55:27.522808395+00:00 stderr F I1208 17:55:27.521492 12 controller.go:237] Updating CRD OpenAPI spec because clusterissuers.cert-manager.io changed 2025-12-08T17:55:27.522808395+00:00 stderr F I1208 17:55:27.521573 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:55:27.522808395+00:00 stderr F I1208 17:55:27.521783 12 handler.go:288] Adding GroupVersion cert-manager.io v1 to ResourceManager 2025-12-08T17:55:27.558686079+00:00 stderr F I1208 17:55:27.558586 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:55:27.575558402+00:00 stderr F I1208 17:55:27.573463 12 controller.go:237] Updating CRD OpenAPI spec because issuers.cert-manager.io changed 2025-12-08T17:55:27.575558402+00:00 stderr F I1208 17:55:27.574272 12 handler.go:288] Adding GroupVersion cert-manager.io v1 to ResourceManager 2025-12-08T17:55:27.594223034+00:00 stderr F I1208 17:55:27.594073 12 controller.go:237] Updating CRD OpenAPI spec because certmanagers.operator.openshift.io changed 2025-12-08T17:55:27.594621945+00:00 stderr F I1208 17:55:27.594538 12 handler.go:288] Adding GroupVersion operator.openshift.io v1alpha1 to ResourceManager 2025-12-08T17:55:27.610333126+00:00 stderr F I1208 17:55:27.610232 12 controller.go:231] Updating CRD OpenAPI spec because clusterissuers.cert-manager.io changed 2025-12-08T17:55:27.619527838+00:00 stderr F I1208 17:55:27.619218 12 handler.go:288] Adding GroupVersion operator.openshift.io v1alpha1 to ResourceManager 2025-12-08T17:55:27.619527838+00:00 stderr F I1208 17:55:27.619294 12 controller.go:237] Updating CRD OpenAPI spec because istiocsrs.operator.openshift.io changed 2025-12-08T17:55:27.642318843+00:00 stderr F I1208 17:55:27.639205 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:55:27.642318843+00:00 stderr F I1208 17:55:27.641014 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:55:27.693774376+00:00 stderr F I1208 17:55:27.693207 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:55:27.711447911+00:00 stderr F I1208 17:55:27.711186 12 controller.go:231] Updating CRD OpenAPI spec because certmanagers.operator.openshift.io changed 2025-12-08T17:55:27.724190671+00:00 stderr F I1208 17:55:27.724080 12 controller.go:231] Updating CRD OpenAPI spec because istiocsrs.operator.openshift.io changed 2025-12-08T17:55:27.771116798+00:00 stderr F I1208 17:55:27.769962 12 controller.go:231] Updating CRD OpenAPI spec because issuers.cert-manager.io changed 2025-12-08T17:55:27.841405366+00:00 stderr F I1208 17:55:27.841278 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:55:27.886361851+00:00 stderr F I1208 17:55:27.882939 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:55:27.906699219+00:00 stderr F I1208 17:55:27.905919 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:55:28.481146411+00:00 stderr F I1208 17:55:28.480987 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:55:28.668418930+00:00 stderr F I1208 17:55:28.668280 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:55:30.069785443+00:00 stderr F W1208 17:55:30.066727 12 dispatcher.go:205] Failed calling webhook, failing open elastic-es-validation-v1.k8s.elastic.co: failed calling webhook "elastic-es-validation-v1.k8s.elastic.co": failed to call webhook: Post "https://elastic-operator-service.service-telemetry.svc:443/validate-elasticsearch-k8s-elastic-co-v1-elasticsearch?timeout=10s": no endpoints available for service "elastic-operator-service" 2025-12-08T17:55:30.069785443+00:00 stderr F E1208 17:55:30.066773 12 dispatcher.go:213] "Unhandled Error" err="failed calling webhook \"elastic-es-validation-v1.k8s.elastic.co\": failed to call webhook: Post \"https://elastic-operator-service.service-telemetry.svc:443/validate-elasticsearch-k8s-elastic-co-v1-elasticsearch?timeout=10s\": no endpoints available for service \"elastic-operator-service\"" logger="UnhandledError" 2025-12-08T17:55:30.069785443+00:00 stderr F I1208 17:55:30.068995 12 controller.go:667] quota admission added evaluator for: elasticsearches.elasticsearch.k8s.elastic.co 2025-12-08T17:55:30.069785443+00:00 stderr F I1208 17:55:30.069049 12 controller.go:667] quota admission added evaluator for: elasticsearches.elasticsearch.k8s.elastic.co 2025-12-08T17:55:35.008217482+00:00 stderr F I1208 17:55:35.008067 12 cacher.go:847] cacher (clusterserviceversions.operators.coreos.com): 1 objects queued in incoming channel. 2025-12-08T17:55:35.311410452+00:00 stderr F I1208 17:55:35.311237 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=6 seatDemandAvg=0.12038375606993268 seatDemandStdev=0.6071010395298224 seatDemandSmoothed=4.119611441190096 fairFrac=2.2796127562642368 currentCL=6 concurrencyDenominator=6 backstop=false 2025-12-08T17:55:36.291018532+00:00 stderr F I1208 17:55:36.290785 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:55:36.791859134+00:00 stderr F I1208 17:55:36.791344 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:55:36.871506510+00:00 stderr F I1208 17:55:36.870521 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:55:36.880464466+00:00 stderr F I1208 17:55:36.880245 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:55:36.946807367+00:00 stderr F I1208 17:55:36.946676 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:55:36.948572275+00:00 stderr F I1208 17:55:36.948458 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:55:37.007922054+00:00 stderr F I1208 17:55:37.006756 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:55:37.106174720+00:00 stderr F I1208 17:55:37.106059 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:55:37.202222845+00:00 stderr F I1208 17:55:37.201764 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:55:37.204946620+00:00 stderr F I1208 17:55:37.204460 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:55:37.659140663+00:00 stderr F I1208 17:55:37.654836 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:55:37.965007825+00:00 stderr F I1208 17:55:37.964563 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:55:38.311274917+00:00 stderr F I1208 17:55:38.311069 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:55:38.372017614+00:00 stderr F I1208 17:55:38.371864 12 cacher.go:847] cacher (clusterroles.rbac.authorization.k8s.io): 1 objects queued in incoming channel. 2025-12-08T17:55:38.372017614+00:00 stderr F I1208 17:55:38.371927 12 cacher.go:847] cacher (clusterroles.rbac.authorization.k8s.io): 2 objects queued in incoming channel. 2025-12-08T17:55:38.415410875+00:00 stderr F I1208 17:55:38.415293 12 cacher.go:847] cacher (clusterroles.rbac.authorization.k8s.io): 3 objects queued in incoming channel. 2025-12-08T17:55:38.415410875+00:00 stderr F I1208 17:55:38.415319 12 cacher.go:847] cacher (clusterroles.rbac.authorization.k8s.io): 4 objects queued in incoming channel. 2025-12-08T17:55:38.619012452+00:00 stderr F I1208 17:55:38.618922 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T17:55:38.782949330+00:00 stderr F I1208 17:55:38.782159 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:55:38.785220092+00:00 stderr F I1208 17:55:38.782659 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T17:55:39.005107486+00:00 stderr F I1208 17:55:39.004977 12 store.go:1663] "Monitoring resource count at path" resource="certmanagers.operator.openshift.io" path="//operator.openshift.io/certmanagers" 2025-12-08T17:55:39.006798972+00:00 stderr F I1208 17:55:39.006708 12 cacher.go:469] cacher (certmanagers.operator.openshift.io): initialized 2025-12-08T17:55:39.006798972+00:00 stderr F I1208 17:55:39.006751 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1alpha1, Kind=CertManager" reflector="storage/cacher.go:/operator.openshift.io/certmanagers" 2025-12-08T17:55:39.036937009+00:00 stderr F I1208 17:55:39.036811 12 store.go:1663] "Monitoring resource count at path" resource="certificates.cert-manager.io" path="//cert-manager.io/certificates" 2025-12-08T17:55:39.038413709+00:00 stderr F I1208 17:55:39.037960 12 cacher.go:469] cacher (certificates.cert-manager.io): initialized 2025-12-08T17:55:39.038413709+00:00 stderr F I1208 17:55:39.037993 12 reflector.go:430] "Caches populated" type="cert-manager.io/v1, Kind=Certificate" reflector="storage/cacher.go:/cert-manager.io/certificates" 2025-12-08T17:55:39.058807160+00:00 stderr F I1208 17:55:39.058399 12 store.go:1663] "Monitoring resource count at path" resource="istiocsrs.operator.openshift.io" path="//operator.openshift.io/istiocsrs" 2025-12-08T17:55:39.060754852+00:00 stderr F I1208 17:55:39.060263 12 cacher.go:469] cacher (istiocsrs.operator.openshift.io): initialized 2025-12-08T17:55:39.060754852+00:00 stderr F I1208 17:55:39.060291 12 reflector.go:430] "Caches populated" type="operator.openshift.io/v1alpha1, Kind=IstioCSR" reflector="storage/cacher.go:/operator.openshift.io/istiocsrs" 2025-12-08T17:55:39.567664252+00:00 stderr F I1208 17:55:39.567517 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:55:39.836671973+00:00 stderr F I1208 17:55:39.836340 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T17:55:39.927051104+00:00 stderr F I1208 17:55:39.926920 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:55:39.936163604+00:00 stderr F I1208 17:55:39.935835 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T17:55:40.419734963+00:00 stderr F I1208 17:55:40.419206 12 controller.go:231] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T17:55:40.457889750+00:00 stderr F I1208 17:55:40.457382 12 controller.go:231] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T17:55:41.205252577+00:00 stderr F I1208 17:55:41.205125 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:55:41.221473403+00:00 stderr F I1208 17:55:41.221087 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/elasticsearch-es-http" clusterIPs={"IPv4":"10.217.4.122"} 2025-12-08T17:55:41.228363401+00:00 stderr F I1208 17:55:41.227066 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:41.228363401+00:00 stderr F I1208 17:55:41.227100 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:41.228363401+00:00 stderr F I1208 17:55:41.227106 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 47.631µs 2025-12-08T17:55:41.233017959+00:00 stderr F I1208 17:55:41.231943 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/elasticsearch-es-internal-http" clusterIPs={"IPv4":"10.217.4.189"} 2025-12-08T17:55:41.452073300+00:00 stderr F I1208 17:55:41.451935 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:55:41.998125454+00:00 stderr F I1208 17:55:41.998024 12 controller.go:667] quota admission added evaluator for: statefulsets.apps 2025-12-08T17:55:41.998125454+00:00 stderr F I1208 17:55:41.998070 12 controller.go:667] quota admission added evaluator for: statefulsets.apps 2025-12-08T17:55:42.019092138+00:00 stderr F I1208 17:55:42.018977 12 controller.go:667] quota admission added evaluator for: controllerrevisions.apps 2025-12-08T17:55:42.608254196+00:00 stderr F I1208 17:55:42.607750 12 cacher.go:847] cacher (clusterroles.rbac.authorization.k8s.io): 5 objects queued in incoming channel. 2025-12-08T17:55:42.608254196+00:00 stderr F I1208 17:55:42.607772 12 cacher.go:847] cacher (clusterroles.rbac.authorization.k8s.io): 6 objects queued in incoming channel. 2025-12-08T17:55:42.657195678+00:00 stderr F E1208 17:55:42.657068 12 repairip.go:523] "Unhandled Error" err="the IPAddress: 10.217.4.165 for Service obo-prometheus-operator-admission-webhook/openshift-operators has a wrong reference &v1.ParentReference{Group:\"\", Resource:\"services\", Namespace:\"openshift-operators\", Name:\"obo-prometheus-operator-admission-webhook\"}; cleaning up" logger="UnhandledError" 2025-12-08T17:55:42.673120015+00:00 stderr F I1208 17:55:42.667753 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:42.673120015+00:00 stderr F I1208 17:55:42.667799 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:42.673120015+00:00 stderr F I1208 17:55:42.667806 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 58.061µs 2025-12-08T17:55:42.677983289+00:00 stderr F I1208 17:55:42.675013 12 ipallocator.go:374] error releasing ip 10.217.4.165 : ipaddresses.networking.k8s.io "10.217.4.165" not found 2025-12-08T17:55:42.717923755+00:00 stderr F I1208 17:55:42.717756 12 alloc.go:328] "allocated clusterIPs" service="cert-manager/cert-manager-cainjector" clusterIPs={"IPv4":"10.217.5.54"} 2025-12-08T17:55:43.042658675+00:00 stderr F I1208 17:55:43.042410 12 alloc.go:328] "allocated clusterIPs" service="cert-manager/cert-manager-webhook" clusterIPs={"IPv4":"10.217.5.149"} 2025-12-08T17:55:43.045417811+00:00 stderr F I1208 17:55:43.045318 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:43.045417811+00:00 stderr F I1208 17:55:43.045343 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:43.045417811+00:00 stderr F I1208 17:55:43.045348 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 36.291µs 2025-12-08T17:55:44.421072149+00:00 stderr F E1208 17:55:44.420554 12 repairip.go:523] "Unhandled Error" err="the IPAddress: 10.217.4.42 for Service observability-operator/openshift-operators has a wrong reference &v1.ParentReference{Group:\"\", Resource:\"services\", Namespace:\"openshift-operators\", Name:\"observability-operator\"}; cleaning up" logger="UnhandledError" 2025-12-08T17:55:44.425824499+00:00 stderr F I1208 17:55:44.425706 12 ipallocator.go:374] error releasing ip 10.217.4.42 : ipaddresses.networking.k8s.io "10.217.4.42" not found 2025-12-08T17:55:44.425922782+00:00 stderr F I1208 17:55:44.425708 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:44.425975913+00:00 stderr F I1208 17:55:44.425943 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:44.425975913+00:00 stderr F I1208 17:55:44.425957 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 254.808µs 2025-12-08T17:55:44.487926233+00:00 stderr F I1208 17:55:44.487664 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:55:44.521978658+00:00 stderr F I1208 17:55:44.519350 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:55:44.532359523+00:00 stderr F I1208 17:55:44.531671 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:55:44.553998736+00:00 stderr F I1208 17:55:44.553564 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:55:44.649226799+00:00 stderr F I1208 17:55:44.649029 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:55:44.732140754+00:00 stderr F I1208 17:55:44.732025 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:55:44.849109454+00:00 stderr F I1208 17:55:44.848993 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:55:44.869594936+00:00 stderr F I1208 17:55:44.869481 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:55:45.312203511+00:00 stderr F I1208 17:55:45.312054 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=3 seatDemandAvg=0.013142892809745323 seatDemandStdev=0.12329575640887097 seatDemandSmoothed=4.0279984669747515 fairFrac=2.2796127562642368 currentCL=3 concurrencyDenominator=3 backstop=false 2025-12-08T17:55:45.447700899+00:00 stderr F I1208 17:55:45.447420 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:55:45.476677644+00:00 stderr F I1208 17:55:45.476565 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:55:50.233822099+00:00 stderr F I1208 17:55:50.233714 12 alloc.go:328] "allocated clusterIPs" service="cert-manager/cert-manager" clusterIPs={"IPv4":"10.217.5.213"} 2025-12-08T17:55:50.238289041+00:00 stderr F I1208 17:55:50.238189 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:55:50.238289041+00:00 stderr F I1208 17:55:50.238216 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:55:50.238289041+00:00 stderr F I1208 17:55:50.238222 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 40.121µs 2025-12-08T17:55:52.345815392+00:00 stderr F I1208 17:55:52.345654 12 store.go:1663] "Monitoring resource count at path" resource="challenges.acme.cert-manager.io" path="//acme.cert-manager.io/challenges" 2025-12-08T17:55:52.348217968+00:00 stderr F I1208 17:55:52.348130 12 cacher.go:469] cacher (challenges.acme.cert-manager.io): initialized 2025-12-08T17:55:52.348217968+00:00 stderr F I1208 17:55:52.348187 12 reflector.go:430] "Caches populated" type="acme.cert-manager.io/v1, Kind=Challenge" reflector="storage/cacher.go:/acme.cert-manager.io/challenges" 2025-12-08T17:55:52.362290554+00:00 stderr F I1208 17:55:52.362152 12 store.go:1663] "Monitoring resource count at path" resource="orders.acme.cert-manager.io" path="//acme.cert-manager.io/orders" 2025-12-08T17:55:52.363213589+00:00 stderr F I1208 17:55:52.363092 12 cacher.go:469] cacher (orders.acme.cert-manager.io): initialized 2025-12-08T17:55:52.363213589+00:00 stderr F I1208 17:55:52.363154 12 reflector.go:430] "Caches populated" type="acme.cert-manager.io/v1, Kind=Order" reflector="storage/cacher.go:/acme.cert-manager.io/orders" 2025-12-08T17:55:52.388538014+00:00 stderr F I1208 17:55:52.388361 12 store.go:1663] "Monitoring resource count at path" resource="certificaterequests.cert-manager.io" path="//cert-manager.io/certificaterequests" 2025-12-08T17:55:52.389955922+00:00 stderr F I1208 17:55:52.389718 12 cacher.go:469] cacher (certificaterequests.cert-manager.io): initialized 2025-12-08T17:55:52.389955922+00:00 stderr F I1208 17:55:52.389748 12 reflector.go:430] "Caches populated" type="cert-manager.io/v1, Kind=CertificateRequest" reflector="storage/cacher.go:/cert-manager.io/certificaterequests" 2025-12-08T17:55:52.408314027+00:00 stderr F I1208 17:55:52.408129 12 store.go:1663] "Monitoring resource count at path" resource="issuers.cert-manager.io" path="//cert-manager.io/issuers" 2025-12-08T17:55:52.409778797+00:00 stderr F I1208 17:55:52.409651 12 cacher.go:469] cacher (issuers.cert-manager.io): initialized 2025-12-08T17:55:52.409778797+00:00 stderr F I1208 17:55:52.409699 12 reflector.go:430] "Caches populated" type="cert-manager.io/v1, Kind=Issuer" reflector="storage/cacher.go:/cert-manager.io/issuers" 2025-12-08T17:55:52.491765137+00:00 stderr F I1208 17:55:52.491646 12 store.go:1663] "Monitoring resource count at path" resource="clusterissuers.cert-manager.io" path="//cert-manager.io/clusterissuers" 2025-12-08T17:55:52.492907598+00:00 stderr F I1208 17:55:52.492792 12 cacher.go:469] cacher (clusterissuers.cert-manager.io): initialized 2025-12-08T17:55:52.492907598+00:00 stderr F I1208 17:55:52.492828 12 reflector.go:430] "Caches populated" type="cert-manager.io/v1, Kind=ClusterIssuer" reflector="storage/cacher.go:/cert-manager.io/clusterissuers" 2025-12-08T17:55:55.312631071+00:00 stderr F I1208 17:55:55.312434 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.0028955730845125114 seatDemandStdev=0.07648497463468533 seatDemandSmoothed=3.9371802548318735 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:56:05.199144584+00:00 stderr F I1208 17:56:05.198548 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:56:05.199144584+00:00 stderr F I1208 17:56:05.198578 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:56:05.199144584+00:00 stderr F I1208 17:56:05.198590 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 42.811µs 2025-12-08T17:56:05.313308557+00:00 stderr F I1208 17:56:05.313120 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.001300866345649167 seatDemandStdev=0.03604405765726057 seatDemandSmoothed=3.847484042222807 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:56:05.392301785+00:00 stderr F I1208 17:56:05.392189 12 controller.go:231] Updating CRD OpenAPI spec because consolesamples.console.openshift.io changed 2025-12-08T17:56:05.392471689+00:00 stderr F I1208 17:56:05.392418 12 controller.go:231] Updating CRD OpenAPI spec because operatorconditions.operators.coreos.com changed 2025-12-08T17:56:05.392471689+00:00 stderr F I1208 17:56:05.392438 12 controller.go:231] Updating CRD OpenAPI spec because projects.config.openshift.io changed 2025-12-08T17:56:05.392601823+00:00 stderr F I1208 17:56:05.392558 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.coreos.com changed 2025-12-08T17:56:05.392632644+00:00 stderr F I1208 17:56:05.392602 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearchautoscalers.autoscaling.k8s.elastic.co changed 2025-12-08T17:56:05.392820479+00:00 stderr F I1208 17:56:05.392767 12 controller.go:231] Updating CRD OpenAPI spec because apiservers.config.openshift.io changed 2025-12-08T17:56:05.393528378+00:00 stderr F I1208 17:56:05.393467 12 controller.go:231] Updating CRD OpenAPI spec because gatewayclasses.gateway.networking.k8s.io changed 2025-12-08T17:56:05.393528378+00:00 stderr F I1208 17:56:05.393491 12 controller.go:231] Updating CRD OpenAPI spec because ipamclaims.k8s.cni.cncf.io changed 2025-12-08T17:56:05.393641451+00:00 stderr F I1208 17:56:05.393578 12 controller.go:231] Updating CRD OpenAPI spec because machinehealthchecks.machine.openshift.io changed 2025-12-08T17:56:05.393782025+00:00 stderr F I1208 17:56:05.393718 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediations.infrastructure.cluster.x-k8s.io changed 2025-12-08T17:56:05.393919769+00:00 stderr F I1208 17:56:05.393846 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.coreos.com changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394545 12 controller.go:231] Updating CRD OpenAPI spec because rangeallocations.security.internal.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394580 12 controller.go:231] Updating CRD OpenAPI spec because certmanagers.operator.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394589 12 controller.go:231] Updating CRD OpenAPI spec because containerruntimeconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394598 12 controller.go:231] Updating CRD OpenAPI spec because ingresses.config.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394606 12 controller.go:231] Updating CRD OpenAPI spec because machineconfignodes.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394618 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigpools.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394629 12 controller.go:231] Updating CRD OpenAPI spec because machineosconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394654 12 controller.go:231] Updating CRD OpenAPI spec because network-attachment-definitions.k8s.cni.cncf.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394666 12 controller.go:231] Updating CRD OpenAPI spec because clusterversions.config.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394673 12 controller.go:231] Updating CRD OpenAPI spec because consoleplugins.console.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394681 12 controller.go:231] Updating CRD OpenAPI spec because ingresscontrollers.operator.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394688 12 controller.go:231] Updating CRD OpenAPI spec because kubecontrollermanagers.operator.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394698 12 controller.go:231] Updating CRD OpenAPI spec because openshiftapiservers.operator.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394706 12 controller.go:231] Updating CRD OpenAPI spec because elasticmapsservers.maps.k8s.elastic.co changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394752 12 controller.go:231] Updating CRD OpenAPI spec because issuers.cert-manager.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394797 12 controller.go:231] Updating CRD OpenAPI spec because catalogsources.operators.coreos.com changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394805 12 controller.go:231] Updating CRD OpenAPI spec because controlplanemachinesets.machine.openshift.io changed 2025-12-08T17:56:05.395081091+00:00 stderr F I1208 17:56:05.394976 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigurations.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395136 12 controller.go:231] Updating CRD OpenAPI spec because machinesets.machine.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395181 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediationtemplates.infrastructure.cluster.x-k8s.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395189 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395195 12 controller.go:231] Updating CRD OpenAPI spec because clustercsidrivers.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395212 12 controller.go:231] Updating CRD OpenAPI spec because openshiftcontrollermanagers.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395235 12 controller.go:231] Updating CRD OpenAPI spec because userdefinednetworks.k8s.ovn.org changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395316 12 controller.go:231] Updating CRD OpenAPI spec because consoleclidownloads.console.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395327 12 controller.go:231] Updating CRD OpenAPI spec because csisnapshotcontrollers.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395374 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentpolicies.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395533 12 controller.go:231] Updating CRD OpenAPI spec because imagetagmirrorsets.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395616 12 controller.go:231] Updating CRD OpenAPI spec because schedulers.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395661 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395819 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395838 12 controller.go:231] Updating CRD OpenAPI spec because apirequestcounts.apiserver.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395923 12 controller.go:231] Updating CRD OpenAPI spec because etcds.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.395938 12 controller.go:231] Updating CRD OpenAPI spec because imagedigestmirrorsets.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396106 12 controller.go:231] Updating CRD OpenAPI spec because operatorhubs.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396192 12 controller.go:231] Updating CRD OpenAPI spec because operatorpkis.network.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396299 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.coreos.com changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396311 12 controller.go:231] Updating CRD OpenAPI spec because subscriptions.operators.coreos.com changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396397 12 controller.go:231] Updating CRD OpenAPI spec because kibanas.kibana.k8s.elastic.co changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396561 12 controller.go:231] Updating CRD OpenAPI spec because authentications.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396645 12 controller.go:231] Updating CRD OpenAPI spec because controllerconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396699 12 controller.go:231] Updating CRD OpenAPI spec because imagepolicies.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396775 12 controller.go:231] Updating CRD OpenAPI spec because machines.machine.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396965 12 controller.go:231] Updating CRD OpenAPI spec because podnetworkconnectivitychecks.controlplane.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396983 12 controller.go:231] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.396995 12 controller.go:231] Updating CRD OpenAPI spec because certificates.cert-manager.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.397102 12 controller.go:231] Updating CRD OpenAPI spec because istiocsrs.operator.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.397223 12 controller.go:231] Updating CRD OpenAPI spec because clusteruserdefinednetworks.k8s.ovn.org changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.397302 12 controller.go:231] Updating CRD OpenAPI spec because consoleexternalloglinks.console.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.397372 12 controller.go:231] Updating CRD OpenAPI spec because consolelinks.console.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.397493 12 controller.go:231] Updating CRD OpenAPI spec because dnses.config.openshift.io changed 2025-12-08T17:56:05.397546509+00:00 stderr F I1208 17:56:05.397505 12 controller.go:231] Updating CRD OpenAPI spec because proxies.config.openshift.io changed 2025-12-08T17:56:05.397666232+00:00 stderr F I1208 17:56:05.397601 12 controller.go:231] Updating CRD OpenAPI spec because apmservers.apm.k8s.elastic.co changed 2025-12-08T17:56:05.397666232+00:00 stderr F I1208 17:56:05.397630 12 controller.go:231] Updating CRD OpenAPI spec because orders.acme.cert-manager.io changed 2025-12-08T17:56:05.397817926+00:00 stderr F I1208 17:56:05.397762 12 controller.go:231] Updating CRD OpenAPI spec because storagestates.migration.k8s.io changed 2025-12-08T17:56:05.397967800+00:00 stderr F I1208 17:56:05.397920 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T17:56:05.398247068+00:00 stderr F I1208 17:56:05.398075 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T17:56:05.398247068+00:00 stderr F I1208 17:56:05.398175 12 controller.go:231] Updating CRD OpenAPI spec because authentications.config.openshift.io changed 2025-12-08T17:56:05.398247068+00:00 stderr F I1208 17:56:05.398199 12 controller.go:231] Updating CRD OpenAPI spec because clusterserviceversions.operators.coreos.com changed 2025-12-08T17:56:05.398247068+00:00 stderr F I1208 17:56:05.398205 12 controller.go:231] Updating CRD OpenAPI spec because securitycontextconstraints.security.openshift.io changed 2025-12-08T17:56:05.398267018+00:00 stderr F I1208 17:56:05.398237 12 controller.go:231] Updating CRD OpenAPI spec because logstashes.logstash.k8s.elastic.co changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398272 12 controller.go:231] Updating CRD OpenAPI spec because challenges.acme.cert-manager.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398336 12 controller.go:231] Updating CRD OpenAPI spec because dnsrecords.ingress.operator.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398350 12 controller.go:231] Updating CRD OpenAPI spec because kubestorageversionmigrators.operator.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398399 12 controller.go:231] Updating CRD OpenAPI spec because machineosbuilds.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398431 12 controller.go:231] Updating CRD OpenAPI spec because beats.beat.k8s.elastic.co changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398445 12 controller.go:231] Updating CRD OpenAPI spec because consolequickstarts.console.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398454 12 controller.go:231] Updating CRD OpenAPI spec because consoleyamlsamples.console.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398465 12 controller.go:231] Updating CRD OpenAPI spec because operatorgroups.operators.coreos.com changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398507 12 controller.go:231] Updating CRD OpenAPI spec because storageversionmigrations.migration.k8s.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398523 12 controller.go:231] Updating CRD OpenAPI spec because consoles.operator.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398600 12 controller.go:231] Updating CRD OpenAPI spec because images.config.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398619 12 controller.go:231] Updating CRD OpenAPI spec because kubeschedulers.operator.openshift.io changed 2025-12-08T17:56:05.398806563+00:00 stderr F I1208 17:56:05.398669 12 controller.go:231] Updating CRD OpenAPI spec because projecthelmchartrepositories.helm.openshift.io changed 2025-12-08T17:56:05.399087020+00:00 stderr F I1208 17:56:05.398835 12 controller.go:231] Updating CRD OpenAPI spec because egressqoses.k8s.ovn.org changed 2025-12-08T17:56:05.399087020+00:00 stderr F I1208 17:56:05.398852 12 controller.go:231] Updating CRD OpenAPI spec because ippools.whereabouts.cni.cncf.io changed 2025-12-08T17:56:05.399087020+00:00 stderr F I1208 17:56:05.398860 12 controller.go:231] Updating CRD OpenAPI spec because egressrouters.network.operator.openshift.io changed 2025-12-08T17:56:05.399087020+00:00 stderr F I1208 17:56:05.398900 12 controller.go:231] Updating CRD OpenAPI spec because imagepruners.imageregistry.operator.openshift.io changed 2025-12-08T17:56:05.399087020+00:00 stderr F I1208 17:56:05.398954 12 controller.go:231] Updating CRD OpenAPI spec because clusterissuers.cert-manager.io changed 2025-12-08T17:56:05.399087020+00:00 stderr F I1208 17:56:05.399014 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentsourcepolicies.operator.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399098 12 controller.go:231] Updating CRD OpenAPI spec because consolenotifications.console.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399140 12 controller.go:231] Updating CRD OpenAPI spec because ipaddressclaims.ipam.cluster.x-k8s.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399242 12 controller.go:231] Updating CRD OpenAPI spec because kubeletconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399257 12 controller.go:231] Updating CRD OpenAPI spec because nodes.config.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399378 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.coreos.com changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399425 12 controller.go:231] Updating CRD OpenAPI spec because consoles.config.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399456 12 controller.go:231] Updating CRD OpenAPI spec because dnses.operator.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399528 12 controller.go:231] Updating CRD OpenAPI spec because egressfirewalls.k8s.ovn.org changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399557 12 controller.go:231] Updating CRD OpenAPI spec because httproutes.gateway.networking.k8s.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399601 12 controller.go:231] Updating CRD OpenAPI spec because storages.operator.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399630 12 controller.go:231] Updating CRD OpenAPI spec because machineautoscalers.autoscaling.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399679 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigs.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399737 12 controller.go:231] Updating CRD OpenAPI spec because servicecas.operator.openshift.io changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399790 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399820 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T17:56:05.399997696+00:00 stderr F I1208 17:56:05.399898 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.399994 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400054 12 controller.go:231] Updating CRD OpenAPI spec because adminpolicybasedexternalroutes.k8s.ovn.org changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400063 12 controller.go:231] Updating CRD OpenAPI spec because configs.samples.operator.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400141 12 controller.go:231] Updating CRD OpenAPI spec because helmchartrepositories.helm.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400166 12 controller.go:231] Updating CRD OpenAPI spec because adminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400239 12 controller.go:231] Updating CRD OpenAPI spec because featuregates.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400284 12 controller.go:231] Updating CRD OpenAPI spec because grpcroutes.gateway.networking.k8s.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400344 12 controller.go:231] Updating CRD OpenAPI spec because installplans.operators.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400369 12 controller.go:231] Updating CRD OpenAPI spec because operators.operators.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400447 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400484 12 controller.go:231] Updating CRD OpenAPI spec because enterprisesearches.enterprisesearch.k8s.elastic.co changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400503 12 controller.go:231] Updating CRD OpenAPI spec because clusterautoscalers.autoscaling.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400542 12 controller.go:231] Updating CRD OpenAPI spec because kubeapiservers.operator.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400556 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400567 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400613 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400649 12 controller.go:231] Updating CRD OpenAPI spec because stackconfigpolicies.stackconfigpolicy.k8s.elastic.co changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400663 12 controller.go:231] Updating CRD OpenAPI spec because builds.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400689 12 controller.go:231] Updating CRD OpenAPI spec because infrastructures.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400709 12 controller.go:231] Updating CRD OpenAPI spec because networks.operator.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400810 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400821 12 controller.go:231] Updating CRD OpenAPI spec because egressips.k8s.ovn.org changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400861 12 controller.go:231] Updating CRD OpenAPI spec because ipaddresses.ipam.cluster.x-k8s.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.400973 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401017 12 controller.go:231] Updating CRD OpenAPI spec because clusteroperators.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401034 12 controller.go:231] Updating CRD OpenAPI spec because egressservices.k8s.ovn.org changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401102 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401113 12 controller.go:231] Updating CRD OpenAPI spec because certificaterequests.cert-manager.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401222 12 controller.go:231] Updating CRD OpenAPI spec because alertingrules.monitoring.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401264 12 controller.go:231] Updating CRD OpenAPI spec because baselineadminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401297 12 controller.go:231] Updating CRD OpenAPI spec because configs.imageregistry.operator.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401349 12 controller.go:231] Updating CRD OpenAPI spec because configs.operator.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401366 12 controller.go:231] Updating CRD OpenAPI spec because nodeslicepools.whereabouts.cni.cncf.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401435 12 controller.go:231] Updating CRD OpenAPI spec because overlappingrangeipreservations.whereabouts.cni.cncf.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401446 12 controller.go:231] Updating CRD OpenAPI spec because networks.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401486 12 controller.go:231] Updating CRD OpenAPI spec because rolebindingrestrictions.authorization.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401536 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401551 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401602 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearches.elasticsearch.k8s.elastic.co changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401613 12 controller.go:231] Updating CRD OpenAPI spec because oauths.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401809 12 controller.go:231] Updating CRD OpenAPI spec because pinnedimagesets.machineconfiguration.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401832 12 controller.go:231] Updating CRD OpenAPI spec because referencegrants.gateway.networking.k8s.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401942 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401957 12 controller.go:231] Updating CRD OpenAPI spec because olmconfigs.operators.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.401988 12 controller.go:231] Updating CRD OpenAPI spec because clusterimagepolicies.config.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.402010 12 controller.go:231] Updating CRD OpenAPI spec because gateways.gateway.networking.k8s.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.402049 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.coreos.com changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.402102 12 controller.go:231] Updating CRD OpenAPI spec because agents.agent.k8s.elastic.co changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.402145 12 controller.go:231] Updating CRD OpenAPI spec because alertrelabelconfigs.monitoring.openshift.io changed 2025-12-08T17:56:05.404067247+00:00 stderr F I1208 17:56:05.402183 12 controller.go:231] Updating CRD OpenAPI spec because clusterresourcequotas.quota.openshift.io changed 2025-12-08T17:56:07.065635420+00:00 stderr F I1208 17:56:07.065504 12 controller.go:667] quota admission added evaluator for: catalogsources.operators.coreos.com 2025-12-08T17:56:07.078543615+00:00 stderr F I1208 17:56:07.078403 12 controller.go:667] quota admission added evaluator for: networkpolicies.networking.k8s.io 2025-12-08T17:56:07.078543615+00:00 stderr F I1208 17:56:07.078483 12 controller.go:667] quota admission added evaluator for: networkpolicies.networking.k8s.io 2025-12-08T17:56:07.923594432+00:00 stderr F I1208 17:56:07.880640 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/infrawatch-operators" clusterIPs={"IPv4":"10.217.4.230"} 2025-12-08T17:56:07.923594432+00:00 stderr F I1208 17:56:07.881757 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:56:07.923594432+00:00 stderr F I1208 17:56:07.881794 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:56:07.923594432+00:00 stderr F I1208 17:56:07.881800 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 54.581µs 2025-12-08T17:56:15.314404655+00:00 stderr F I1208 17:56:15.313727 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.0040188256398213575 seatDemandStdev=0.08682926127937088 seatDemandSmoothed=3.761081415250824 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:56:38.555774033+00:00 stderr F I1208 17:56:38.553288 12 controller.go:237] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T17:56:38.555774033+00:00 stderr F I1208 17:56:38.553394 12 handler.go:288] Adding GroupVersion interconnectedcloud.github.io v1alpha1 to ResourceManager 2025-12-08T17:56:39.571112936+00:00 stderr F I1208 17:56:39.570990 12 controller.go:237] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T17:56:39.571112936+00:00 stderr F I1208 17:56:39.570996 12 handler.go:288] Adding GroupVersion infra.watch v1beta1 to ResourceManager 2025-12-08T17:56:39.579399512+00:00 stderr F I1208 17:56:39.579273 12 cacher.go:847] cacher (clusterserviceversions.operators.coreos.com): 2 objects queued in incoming channel. 2025-12-08T17:56:40.351790137+00:00 stderr F I1208 17:56:40.350729 12 controller.go:231] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T17:56:40.950982885+00:00 stderr F I1208 17:56:40.950860 12 controller.go:237] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T17:56:40.951146939+00:00 stderr F I1208 17:56:40.951110 12 handler.go:288] Adding GroupVersion smartgateway.infra.watch v2 to ResourceManager 2025-12-08T17:56:41.585016211+00:00 stderr F I1208 17:56:41.584158 12 controller.go:231] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T17:56:42.786092088+00:00 stderr F I1208 17:56:42.784895 12 controller.go:231] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T17:56:43.012668238+00:00 stderr F I1208 17:56:43.012196 12 store.go:1663] "Monitoring resource count at path" resource="interconnects.interconnectedcloud.github.io" path="//interconnectedcloud.github.io/interconnects" 2025-12-08T17:56:43.013784767+00:00 stderr F I1208 17:56:43.013663 12 cacher.go:469] cacher (interconnects.interconnectedcloud.github.io): initialized 2025-12-08T17:56:43.013784767+00:00 stderr F I1208 17:56:43.013689 12 reflector.go:430] "Caches populated" type="interconnectedcloud.github.io/v1alpha1, Kind=Interconnect" reflector="storage/cacher.go:/interconnectedcloud.github.io/interconnects" 2025-12-08T17:56:43.030488812+00:00 stderr F I1208 17:56:43.030377 12 controller.go:231] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T17:56:43.100409826+00:00 stderr F I1208 17:56:43.099834 12 controller.go:231] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T17:56:44.379959579+00:00 stderr F I1208 17:56:44.379838 12 store.go:1663] "Monitoring resource count at path" resource="servicetelemetrys.infra.watch" path="//infra.watch/servicetelemetrys" 2025-12-08T17:56:44.381144941+00:00 stderr F I1208 17:56:44.381087 12 cacher.go:469] cacher (servicetelemetrys.infra.watch): initialized 2025-12-08T17:56:44.381144941+00:00 stderr F I1208 17:56:44.381107 12 reflector.go:430] "Caches populated" type="infra.watch/v1beta1, Kind=ServiceTelemetry" reflector="storage/cacher.go:/infra.watch/servicetelemetrys" 2025-12-08T17:56:44.405584838+00:00 stderr F I1208 17:56:44.405492 12 controller.go:231] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T17:56:44.474473105+00:00 stderr F I1208 17:56:44.474379 12 controller.go:231] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T17:56:45.315373987+00:00 stderr F I1208 17:56:45.315235 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=7 seatDemandAvg=0.056332626420425205 seatDemandStdev=0.3491783108146452 seatDemandSmoothed=3.5220950850798785 fairFrac=2.2796127562642368 currentCL=7 concurrencyDenominator=7 backstop=false 2025-12-08T17:56:46.740454156+00:00 stderr F I1208 17:56:46.740354 12 store.go:1663] "Monitoring resource count at path" resource="smartgateways.smartgateway.infra.watch" path="//smartgateway.infra.watch/smartgateways" 2025-12-08T17:56:46.742128319+00:00 stderr F I1208 17:56:46.742075 12 cacher.go:469] cacher (smartgateways.smartgateway.infra.watch): initialized 2025-12-08T17:56:46.742161240+00:00 stderr F I1208 17:56:46.742133 12 reflector.go:430] "Caches populated" type="smartgateway.infra.watch/v2, Kind=SmartGateway" reflector="storage/cacher.go:/smartgateway.infra.watch/smartgateways" 2025-12-08T17:56:46.753586498+00:00 stderr F I1208 17:56:46.753492 12 controller.go:231] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T17:56:46.803678494+00:00 stderr F I1208 17:56:46.803567 12 controller.go:231] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T17:56:50.566866627+00:00 stderr F I1208 17:56:50.566762 12 controller.go:231] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T17:56:50.615442233+00:00 stderr F I1208 17:56:50.615334 12 controller.go:231] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T17:56:51.817809783+00:00 stderr F I1208 17:56:51.817683 12 controller.go:231] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T17:56:51.954117589+00:00 stderr F I1208 17:56:51.953290 12 controller.go:231] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T17:56:54.823888027+00:00 stderr F I1208 17:56:54.823767 12 controller.go:231] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T17:56:54.940504039+00:00 stderr F I1208 17:56:54.940376 12 controller.go:231] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T17:56:55.317243355+00:00 stderr F I1208 17:56:55.315611 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0016265110431322519 seatDemandStdev=0.040297214605463996 seatDemandSmoothed=3.442051143812959 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:56:57.610767235+00:00 stderr F W1208 17:56:57.610540 12 watcher.go:338] watch chan error: etcdserver: mvcc: required revision has been compacted 2025-12-08T17:57:05.316500086+00:00 stderr F I1208 17:57:05.315826 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.0016995779425957984 seatDemandStdev=0.05568451042970999 seatDemandSmoothed=3.364203801537824 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:57:12.254340468+00:00 stderr F I1208 17:57:12.254181 12 controller.go:667] quota admission added evaluator for: servicetelemetrys.infra.watch 2025-12-08T17:57:12.254340468+00:00 stderr F I1208 17:57:12.254229 12 controller.go:667] quota admission added evaluator for: servicetelemetrys.infra.watch 2025-12-08T17:57:19.032924968+00:00 stderr F I1208 17:57:19.032055 12 controller.go:667] quota admission added evaluator for: issuers.cert-manager.io 2025-12-08T17:57:19.032924968+00:00 stderr F I1208 17:57:19.032161 12 controller.go:667] quota admission added evaluator for: issuers.cert-manager.io 2025-12-08T17:57:19.964579758+00:00 stderr F I1208 17:57:19.964435 12 controller.go:667] quota admission added evaluator for: certificates.cert-manager.io 2025-12-08T17:57:19.964579758+00:00 stderr F I1208 17:57:19.964481 12 controller.go:667] quota admission added evaluator for: certificates.cert-manager.io 2025-12-08T17:57:20.325649195+00:00 stderr F I1208 17:57:20.325446 12 controller.go:667] quota admission added evaluator for: certificaterequests.cert-manager.io 2025-12-08T17:57:20.325649195+00:00 stderr F I1208 17:57:20.325524 12 controller.go:667] quota admission added evaluator for: certificaterequests.cert-manager.io 2025-12-08T17:57:21.844029407+00:00 stderr F I1208 17:57:21.843861 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:57:21.844029407+00:00 stderr F I1208 17:57:21.844003 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:57:21.844090939+00:00 stderr F I1208 17:57:21.844024 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 168.674µs 2025-12-08T17:57:28.261718123+00:00 stderr F I1208 17:57:28.261569 12 controller.go:667] quota admission added evaluator for: interconnects.interconnectedcloud.github.io 2025-12-08T17:57:28.261718123+00:00 stderr F I1208 17:57:28.261665 12 controller.go:667] quota admission added evaluator for: interconnects.interconnectedcloud.github.io 2025-12-08T17:57:28.269003643+00:00 stderr F E1208 17:57:28.268850 12 fieldmanager.go:155] "[SHOULD NOT HAPPEN] failed to update managedFields" err="failed to convert new object (service-telemetry/default-interconnect; interconnectedcloud.github.io/v1alpha1, Kind=Interconnect) to smd typed: .status.conditions[0].transitionTime: expected map, got &{2025-12-08T17:57:28Z}" versionKind="interconnectedcloud.github.io/v1alpha1, Kind=Interconnect" namespace="service-telemetry" name="default-interconnect" 2025-12-08T17:57:28.541999324+00:00 stderr F I1208 17:57:28.541459 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-interconnect" clusterIPs={"IPv4":"10.217.4.193"} 2025-12-08T17:57:28.546860720+00:00 stderr F I1208 17:57:28.546732 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:57:28.546860720+00:00 stderr F I1208 17:57:28.546756 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:57:28.546860720+00:00 stderr F I1208 17:57:28.546761 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 35.611µs 2025-12-08T17:57:37.388626980+00:00 stderr F I1208 17:57:37.388461 12 controller.go:667] quota admission added evaluator for: prometheuses.monitoring.rhobs 2025-12-08T17:57:37.388626980+00:00 stderr F I1208 17:57:37.388556 12 controller.go:667] quota admission added evaluator for: prometheuses.monitoring.rhobs 2025-12-08T17:57:39.479044009+00:00 stderr F I1208 17:57:39.478754 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-prometheus-proxy" clusterIPs={"IPv4":"10.217.4.221"} 2025-12-08T17:57:39.496076299+00:00 stderr F I1208 17:57:39.491518 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:57:39.496076299+00:00 stderr F I1208 17:57:39.491557 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:57:39.496076299+00:00 stderr F I1208 17:57:39.491565 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 54.871µs 2025-12-08T17:57:45.318386430+00:00 stderr F I1208 17:57:45.318152 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.002111437266315972 seatDemandStdev=0.045901841999928326 seatDemandSmoothed=3.0761446810068525 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:57:46.257816490+00:00 stderr F I1208 17:57:46.256661 12 controller.go:667] quota admission added evaluator for: alertmanagers.monitoring.rhobs 2025-12-08T17:57:46.257816490+00:00 stderr F I1208 17:57:46.256745 12 controller.go:667] quota admission added evaluator for: alertmanagers.monitoring.rhobs 2025-12-08T17:57:49.276707105+00:00 stderr F I1208 17:57:49.276415 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-prometheus-webhook-snmp" clusterIPs={"IPv4":"10.217.4.247"} 2025-12-08T17:57:49.280768431+00:00 stderr F I1208 17:57:49.278996 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:57:49.280768431+00:00 stderr F I1208 17:57:49.279023 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:57:49.280768431+00:00 stderr F I1208 17:57:49.279029 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 38.961µs 2025-12-08T17:57:55.793657360+00:00 stderr F I1208 17:57:55.793085 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-alertmanager-proxy" clusterIPs={"IPv4":"10.217.4.38"} 2025-12-08T17:57:55.798528156+00:00 stderr F I1208 17:57:55.798251 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:57:55.798528156+00:00 stderr F I1208 17:57:55.798366 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:57:55.798528156+00:00 stderr F I1208 17:57:55.798377 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 136.254µs 2025-12-08T17:57:57.769448916+00:00 stderr F I1208 17:57:57.769328 12 controller.go:667] quota admission added evaluator for: smartgateways.smartgateway.infra.watch 2025-12-08T17:57:57.769448916+00:00 stderr F I1208 17:57:57.769411 12 controller.go:667] quota admission added evaluator for: smartgateways.smartgateway.infra.watch 2025-12-08T17:57:59.706599103+00:00 stderr F I1208 17:57:59.706484 12 controller.go:667] quota admission added evaluator for: scrapeconfigs.monitoring.rhobs 2025-12-08T17:57:59.706599103+00:00 stderr F I1208 17:57:59.706534 12 controller.go:667] quota admission added evaluator for: scrapeconfigs.monitoring.rhobs 2025-12-08T17:58:05.318856476+00:00 stderr F I1208 17:58:05.318682 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.007770604758189724 seatDemandStdev=0.12227997380204698 seatDemandSmoothed=2.940427318030233 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:58:07.185407898+00:00 stderr F I1208 17:58:07.185256 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-cloud1-coll-meter" clusterIPs={"IPv4":"10.217.4.61"} 2025-12-08T17:58:07.188041746+00:00 stderr F I1208 17:58:07.187950 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:58:07.188041746+00:00 stderr F I1208 17:58:07.187989 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:58:07.188041746+00:00 stderr F I1208 17:58:07.187995 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 51.152µs 2025-12-08T17:58:09.885216326+00:00 stderr F I1208 17:58:09.885092 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-cloud1-ceil-meter" clusterIPs={"IPv4":"10.217.4.132"} 2025-12-08T17:58:09.887254638+00:00 stderr F I1208 17:58:09.887126 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:58:09.887254638+00:00 stderr F I1208 17:58:09.887148 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:58:09.887254638+00:00 stderr F I1208 17:58:09.887153 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 29.931µs 2025-12-08T17:58:14.152746303+00:00 stderr F I1208 17:58:14.152211 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/default-cloud1-sens-meter" clusterIPs={"IPv4":"10.217.5.150"} 2025-12-08T17:58:14.160544015+00:00 stderr F I1208 17:58:14.160381 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:58:14.160544015+00:00 stderr F I1208 17:58:14.160411 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:58:14.160544015+00:00 stderr F I1208 17:58:14.160417 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 45.221µs 2025-12-08T17:58:15.320232348+00:00 stderr F I1208 17:58:15.320085 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=2 seatDemandAvg=0.0030960534978667414 seatDemandStdev=0.05873139187825276 seatDemandSmoothed=2.8742195209591883 fairFrac=2.2796127562642368 currentCL=2 concurrencyDenominator=2 backstop=false 2025-12-08T17:58:25.321609309+00:00 stderr F I1208 17:58:25.321006 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.005488778360702892 seatDemandStdev=0.12076078443257447 seatDemandSmoothed=2.8110162119213724 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:58:35.322466718+00:00 stderr F I1208 17:58:35.322321 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=4 seatDemandAvg=0.0026027546834160578 seatDemandStdev=0.0670658154902746 seatDemandSmoothed=2.7479652161611754 fairFrac=2.2796127562642368 currentCL=4 concurrencyDenominator=4 backstop=false 2025-12-08T17:58:45.323811678+00:00 stderr F I1208 17:58:45.323575 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=3 seatDemandAvg=0.003649036143946392 seatDemandStdev=0.09559900585831756 seatDemandSmoothed=2.6870447211555204 fairFrac=2.2796127562642368 currentCL=3 concurrencyDenominator=3 backstop=false 2025-12-08T17:58:55.325560980+00:00 stderr F I1208 17:58:55.324987 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0010692575144972973 seatDemandStdev=0.03268201650548797 seatDemandSmoothed=2.6260189718714027 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:59:05.512231926+00:00 stderr F I1208 17:59:05.512075 12 alloc.go:328] "allocated clusterIPs" service="service-telemetry/qdr-test" clusterIPs={"IPv4":"10.217.4.87"} 2025-12-08T17:59:05.522121296+00:00 stderr F I1208 17:59:05.516182 12 cidrallocator.go:241] syncing ServiceCIDR allocators 2025-12-08T17:59:05.522121296+00:00 stderr F I1208 17:59:05.516220 12 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.217.4.0/23 2025-12-08T17:59:05.522121296+00:00 stderr F I1208 17:59:05.516227 12 cidrallocator.go:243] syncing ServiceCIDR allocators took: 52.972µs 2025-12-08T17:59:24.480917006+00:00 stderr F E1208 17:59:24.479912 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/curl/curl\", Err:(*tls.permanentError)(0xc00d820f50)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/curl/curl\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T17:59:24.771585457+00:00 stderr F E1208 17:59:24.771436 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\", Err:(*tls.permanentError)(0xc033d4eed0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T17:59:25.326478592+00:00 stderr F I1208 17:59:25.326277 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.0067256093050179315 seatDemandStdev=0.13205260024141177 seatDemandSmoothed=2.4541066248987784 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T17:59:35.327593486+00:00 stderr F I1208 17:59:35.327290 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=3 seatDemandAvg=0.002950135487835206 seatDemandStdev=0.06379785575595234 seatDemandSmoothed=2.399197376324713 fairFrac=2.2796127562642368 currentCL=3 concurrencyDenominator=3 backstop=false 2025-12-08T17:59:45.328798090+00:00 stderr F I1208 17:59:45.328597 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0030069679389366976 seatDemandStdev=0.05475332028974046 seatDemandSmoothed=2.3453443232985043 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T17:59:54.923834268+00:00 stderr F E1208 17:59:54.923016 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\", Err:(*tls.permanentError)(0xc0606ae980)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:00:00.152035309+00:00 stderr F I1208 18:00:00.151835 12 controller.go:667] quota admission added evaluator for: cronjobs.batch 2025-12-08T18:00:05.329630108+00:00 stderr F I1208 18:00:05.329368 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.010396223241367174 seatDemandStdev=0.14153582320453598 seatDemandSmoothed=2.242886719206214 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:00:15.330813398+00:00 stderr F I1208 18:00:15.330632 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0013358660481977553 seatDemandStdev=0.03652508056252618 seatDemandSmoothed=2.1921711264365173 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:00:25.067206430+00:00 stderr F E1208 18:00:25.066845 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\", Err:(*tls.permanentError)(0xc020275710)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:00:25.332179389+00:00 stderr F I1208 18:00:25.331975 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006577752320803372 seatDemandStdev=0.12648309435885374 seatDemandSmoothed=2.1448115900021096 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:00:45.334104884+00:00 stderr F I1208 18:00:45.333868 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0011600347153226363 seatDemandStdev=0.03403952165912269 seatDemandSmoothed=2.050172643747577 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:00:55.252408699+00:00 stderr F E1208 18:00:55.251777 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\", Err:(*tls.permanentError)(0xc046d36470)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:05.393063563+00:00 stderr F I1208 18:01:05.392747 12 controller.go:231] Updating CRD OpenAPI spec because ipamclaims.k8s.cni.cncf.io changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.393244 12 controller.go:231] Updating CRD OpenAPI spec because machinehealthchecks.machine.openshift.io changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.394384 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediations.infrastructure.cluster.x-k8s.io changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.394413 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.coreos.com changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.394428 12 controller.go:231] Updating CRD OpenAPI spec because rangeallocations.security.internal.openshift.io changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.396317 12 controller.go:231] Updating CRD OpenAPI spec because certmanagers.operator.openshift.io changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.396739 12 controller.go:231] Updating CRD OpenAPI spec because containerruntimeconfigs.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.397016099+00:00 stderr F I1208 18:01:05.396949 12 controller.go:231] Updating CRD OpenAPI spec because ingresses.config.openshift.io changed 2025-12-08T18:01:05.397339057+00:00 stderr F I1208 18:01:05.397227 12 controller.go:231] Updating CRD OpenAPI spec because machineconfignodes.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.397539403+00:00 stderr F I1208 18:01:05.397420 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigpools.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.397752998+00:00 stderr F I1208 18:01:05.397653 12 controller.go:231] Updating CRD OpenAPI spec because machineosconfigs.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.397752998+00:00 stderr F I1208 18:01:05.397716 12 controller.go:231] Updating CRD OpenAPI spec because network-attachment-definitions.k8s.cni.cncf.io changed 2025-12-08T18:01:05.397925133+00:00 stderr F I1208 18:01:05.397807 12 controller.go:231] Updating CRD OpenAPI spec because clusterversions.config.openshift.io changed 2025-12-08T18:01:05.398336504+00:00 stderr F I1208 18:01:05.398227 12 controller.go:231] Updating CRD OpenAPI spec because consoleplugins.console.openshift.io changed 2025-12-08T18:01:05.398465367+00:00 stderr F I1208 18:01:05.398379 12 controller.go:231] Updating CRD OpenAPI spec because ingresscontrollers.operator.openshift.io changed 2025-12-08T18:01:05.398465367+00:00 stderr F I1208 18:01:05.398418 12 controller.go:231] Updating CRD OpenAPI spec because kubecontrollermanagers.operator.openshift.io changed 2025-12-08T18:01:05.398465367+00:00 stderr F I1208 18:01:05.398435 12 controller.go:231] Updating CRD OpenAPI spec because openshiftapiservers.operator.openshift.io changed 2025-12-08T18:01:05.398518508+00:00 stderr F I1208 18:01:05.398459 12 controller.go:231] Updating CRD OpenAPI spec because elasticmapsservers.maps.k8s.elastic.co changed 2025-12-08T18:01:05.398717504+00:00 stderr F I1208 18:01:05.398624 12 controller.go:231] Updating CRD OpenAPI spec because issuers.cert-manager.io changed 2025-12-08T18:01:05.398800736+00:00 stderr F I1208 18:01:05.398733 12 controller.go:231] Updating CRD OpenAPI spec because catalogsources.operators.coreos.com changed 2025-12-08T18:01:05.398908839+00:00 stderr F I1208 18:01:05.398790 12 controller.go:231] Updating CRD OpenAPI spec because controlplanemachinesets.machine.openshift.io changed 2025-12-08T18:01:05.399023062+00:00 stderr F I1208 18:01:05.398944 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigurations.operator.openshift.io changed 2025-12-08T18:01:05.399075603+00:00 stderr F I1208 18:01:05.399013 12 controller.go:231] Updating CRD OpenAPI spec because machinesets.machine.openshift.io changed 2025-12-08T18:01:05.399188096+00:00 stderr F I1208 18:01:05.399112 12 controller.go:231] Updating CRD OpenAPI spec because metal3remediationtemplates.infrastructure.cluster.x-k8s.io changed 2025-12-08T18:01:05.399188096+00:00 stderr F I1208 18:01:05.399152 12 controller.go:231] Updating CRD OpenAPI spec because scrapeconfigs.monitoring.rhobs changed 2025-12-08T18:01:05.399334330+00:00 stderr F I1208 18:01:05.399251 12 controller.go:231] Updating CRD OpenAPI spec because clustercsidrivers.operator.openshift.io changed 2025-12-08T18:01:05.399334330+00:00 stderr F I1208 18:01:05.399293 12 controller.go:231] Updating CRD OpenAPI spec because openshiftcontrollermanagers.operator.openshift.io changed 2025-12-08T18:01:05.399425902+00:00 stderr F I1208 18:01:05.399360 12 controller.go:231] Updating CRD OpenAPI spec because userdefinednetworks.k8s.ovn.org changed 2025-12-08T18:01:05.399485954+00:00 stderr F I1208 18:01:05.399425 12 controller.go:231] Updating CRD OpenAPI spec because consoleclidownloads.console.openshift.io changed 2025-12-08T18:01:05.399613727+00:00 stderr F I1208 18:01:05.399537 12 controller.go:231] Updating CRD OpenAPI spec because csisnapshotcontrollers.operator.openshift.io changed 2025-12-08T18:01:05.399613727+00:00 stderr F I1208 18:01:05.399552 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentpolicies.config.openshift.io changed 2025-12-08T18:01:05.399613727+00:00 stderr F I1208 18:01:05.399587 12 controller.go:231] Updating CRD OpenAPI spec because imagetagmirrorsets.config.openshift.io changed 2025-12-08T18:01:05.399704569+00:00 stderr F I1208 18:01:05.399639 12 controller.go:231] Updating CRD OpenAPI spec because schedulers.config.openshift.io changed 2025-12-08T18:01:05.399757261+00:00 stderr F I1208 18:01:05.399693 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.rhobs changed 2025-12-08T18:01:05.399826164+00:00 stderr F I1208 18:01:05.399762 12 controller.go:231] Updating CRD OpenAPI spec because uiplugins.observability.openshift.io changed 2025-12-08T18:01:05.399826164+00:00 stderr F I1208 18:01:05.399780 12 controller.go:231] Updating CRD OpenAPI spec because apirequestcounts.apiserver.openshift.io changed 2025-12-08T18:01:05.399983488+00:00 stderr F I1208 18:01:05.399862 12 controller.go:231] Updating CRD OpenAPI spec because etcds.operator.openshift.io changed 2025-12-08T18:01:05.399983488+00:00 stderr F I1208 18:01:05.399931 12 controller.go:231] Updating CRD OpenAPI spec because imagedigestmirrorsets.config.openshift.io changed 2025-12-08T18:01:05.400169813+00:00 stderr F I1208 18:01:05.400075 12 controller.go:231] Updating CRD OpenAPI spec because operatorhubs.config.openshift.io changed 2025-12-08T18:01:05.400169813+00:00 stderr F I1208 18:01:05.400127 12 controller.go:231] Updating CRD OpenAPI spec because operatorpkis.network.operator.openshift.io changed 2025-12-08T18:01:05.400169813+00:00 stderr F I1208 18:01:05.400148 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.coreos.com changed 2025-12-08T18:01:05.400247115+00:00 stderr F I1208 18:01:05.400177 12 controller.go:231] Updating CRD OpenAPI spec because subscriptions.operators.coreos.com changed 2025-12-08T18:01:05.400382098+00:00 stderr F I1208 18:01:05.400303 12 controller.go:231] Updating CRD OpenAPI spec because kibanas.kibana.k8s.elastic.co changed 2025-12-08T18:01:05.400511232+00:00 stderr F I1208 18:01:05.400428 12 controller.go:231] Updating CRD OpenAPI spec because authentications.operator.openshift.io changed 2025-12-08T18:01:05.400511232+00:00 stderr F I1208 18:01:05.400465 12 controller.go:231] Updating CRD OpenAPI spec because controllerconfigs.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.400511232+00:00 stderr F I1208 18:01:05.400488 12 controller.go:231] Updating CRD OpenAPI spec because imagepolicies.config.openshift.io changed 2025-12-08T18:01:05.400578923+00:00 stderr F I1208 18:01:05.400505 12 controller.go:231] Updating CRD OpenAPI spec because machines.machine.openshift.io changed 2025-12-08T18:01:05.400691926+00:00 stderr F I1208 18:01:05.400606 12 controller.go:231] Updating CRD OpenAPI spec because podnetworkconnectivitychecks.controlplane.operator.openshift.io changed 2025-12-08T18:01:05.400691926+00:00 stderr F I1208 18:01:05.400666 12 controller.go:231] Updating CRD OpenAPI spec because thanosqueriers.monitoring.rhobs changed 2025-12-08T18:01:05.400842980+00:00 stderr F I1208 18:01:05.400757 12 controller.go:231] Updating CRD OpenAPI spec because certificates.cert-manager.io changed 2025-12-08T18:01:05.400842980+00:00 stderr F I1208 18:01:05.400796 12 controller.go:231] Updating CRD OpenAPI spec because istiocsrs.operator.openshift.io changed 2025-12-08T18:01:05.400993204+00:00 stderr F I1208 18:01:05.400844 12 controller.go:231] Updating CRD OpenAPI spec because clusteruserdefinednetworks.k8s.ovn.org changed 2025-12-08T18:01:05.401082897+00:00 stderr F I1208 18:01:05.401000 12 controller.go:231] Updating CRD OpenAPI spec because consoleexternalloglinks.console.openshift.io changed 2025-12-08T18:01:05.401441966+00:00 stderr F I1208 18:01:05.401343 12 controller.go:231] Updating CRD OpenAPI spec because consolelinks.console.openshift.io changed 2025-12-08T18:01:05.401580340+00:00 stderr F I1208 18:01:05.401487 12 controller.go:231] Updating CRD OpenAPI spec because dnses.config.openshift.io changed 2025-12-08T18:01:05.401580340+00:00 stderr F I1208 18:01:05.401522 12 controller.go:231] Updating CRD OpenAPI spec because proxies.config.openshift.io changed 2025-12-08T18:01:05.401762735+00:00 stderr F I1208 18:01:05.401667 12 controller.go:231] Updating CRD OpenAPI spec because apmservers.apm.k8s.elastic.co changed 2025-12-08T18:01:05.402173125+00:00 stderr F I1208 18:01:05.402066 12 controller.go:231] Updating CRD OpenAPI spec because orders.acme.cert-manager.io changed 2025-12-08T18:01:05.402173125+00:00 stderr F I1208 18:01:05.402117 12 controller.go:231] Updating CRD OpenAPI spec because storagestates.migration.k8s.io changed 2025-12-08T18:01:05.402554255+00:00 stderr F I1208 18:01:05.402468 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.rhobs changed 2025-12-08T18:01:05.402554255+00:00 stderr F I1208 18:01:05.402499 12 controller.go:231] Updating CRD OpenAPI spec because persesdatasources.perses.dev changed 2025-12-08T18:01:05.402554255+00:00 stderr F I1208 18:01:05.402518 12 controller.go:231] Updating CRD OpenAPI spec because authentications.config.openshift.io changed 2025-12-08T18:01:05.402868275+00:00 stderr F I1208 18:01:05.402789 12 controller.go:231] Updating CRD OpenAPI spec because clusterserviceversions.operators.coreos.com changed 2025-12-08T18:01:05.402986238+00:00 stderr F I1208 18:01:05.402920 12 controller.go:231] Updating CRD OpenAPI spec because securitycontextconstraints.security.openshift.io changed 2025-12-08T18:01:05.403048489+00:00 stderr F I1208 18:01:05.403003 12 controller.go:231] Updating CRD OpenAPI spec because logstashes.logstash.k8s.elastic.co changed 2025-12-08T18:01:05.403111231+00:00 stderr F I1208 18:01:05.403058 12 controller.go:231] Updating CRD OpenAPI spec because challenges.acme.cert-manager.io changed 2025-12-08T18:01:05.403340217+00:00 stderr F I1208 18:01:05.403255 12 controller.go:231] Updating CRD OpenAPI spec because dnsrecords.ingress.operator.openshift.io changed 2025-12-08T18:01:05.403340217+00:00 stderr F I1208 18:01:05.403287 12 controller.go:231] Updating CRD OpenAPI spec because kubestorageversionmigrators.operator.openshift.io changed 2025-12-08T18:01:05.403667586+00:00 stderr F I1208 18:01:05.403573 12 controller.go:231] Updating CRD OpenAPI spec because machineosbuilds.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.403734777+00:00 stderr F I1208 18:01:05.403670 12 controller.go:231] Updating CRD OpenAPI spec because beats.beat.k8s.elastic.co changed 2025-12-08T18:01:05.403872931+00:00 stderr F I1208 18:01:05.403806 12 controller.go:231] Updating CRD OpenAPI spec because consolequickstarts.console.openshift.io changed 2025-12-08T18:01:05.403962463+00:00 stderr F I1208 18:01:05.403913 12 controller.go:231] Updating CRD OpenAPI spec because consoleyamlsamples.console.openshift.io changed 2025-12-08T18:01:05.405081943+00:00 stderr F I1208 18:01:05.404968 12 controller.go:231] Updating CRD OpenAPI spec because operatorgroups.operators.coreos.com changed 2025-12-08T18:01:05.405269118+00:00 stderr F I1208 18:01:05.405116 12 controller.go:231] Updating CRD OpenAPI spec because storageversionmigrations.migration.k8s.io changed 2025-12-08T18:01:05.405269118+00:00 stderr F I1208 18:01:05.405214 12 controller.go:231] Updating CRD OpenAPI spec because consoles.operator.openshift.io changed 2025-12-08T18:01:05.405355010+00:00 stderr F I1208 18:01:05.405285 12 controller.go:231] Updating CRD OpenAPI spec because images.config.openshift.io changed 2025-12-08T18:01:05.405451914+00:00 stderr F I1208 18:01:05.405379 12 controller.go:231] Updating CRD OpenAPI spec because kubeschedulers.operator.openshift.io changed 2025-12-08T18:01:05.405717201+00:00 stderr F I1208 18:01:05.405634 12 controller.go:231] Updating CRD OpenAPI spec because projecthelmchartrepositories.helm.openshift.io changed 2025-12-08T18:01:05.405803983+00:00 stderr F I1208 18:01:05.405745 12 controller.go:231] Updating CRD OpenAPI spec because egressqoses.k8s.ovn.org changed 2025-12-08T18:01:05.405910556+00:00 stderr F I1208 18:01:05.405818 12 controller.go:231] Updating CRD OpenAPI spec because ippools.whereabouts.cni.cncf.io changed 2025-12-08T18:01:05.406037509+00:00 stderr F I1208 18:01:05.405942 12 controller.go:231] Updating CRD OpenAPI spec because egressrouters.network.operator.openshift.io changed 2025-12-08T18:01:05.406037509+00:00 stderr F I1208 18:01:05.405978 12 controller.go:231] Updating CRD OpenAPI spec because imagepruners.imageregistry.operator.openshift.io changed 2025-12-08T18:01:05.406255575+00:00 stderr F I1208 18:01:05.406174 12 controller.go:231] Updating CRD OpenAPI spec because clusterissuers.cert-manager.io changed 2025-12-08T18:01:05.406633855+00:00 stderr F I1208 18:01:05.406538 12 controller.go:231] Updating CRD OpenAPI spec because imagecontentsourcepolicies.operator.openshift.io changed 2025-12-08T18:01:05.406633855+00:00 stderr F I1208 18:01:05.406582 12 controller.go:231] Updating CRD OpenAPI spec because consolenotifications.console.openshift.io changed 2025-12-08T18:01:05.406856500+00:00 stderr F I1208 18:01:05.406781 12 controller.go:231] Updating CRD OpenAPI spec because ipaddressclaims.ipam.cluster.x-k8s.io changed 2025-12-08T18:01:05.408327260+00:00 stderr F I1208 18:01:05.408204 12 controller.go:231] Updating CRD OpenAPI spec because kubeletconfigs.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.408327260+00:00 stderr F I1208 18:01:05.408253 12 controller.go:231] Updating CRD OpenAPI spec because nodes.config.openshift.io changed 2025-12-08T18:01:05.408327260+00:00 stderr F I1208 18:01:05.408276 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.coreos.com changed 2025-12-08T18:01:05.408327260+00:00 stderr F I1208 18:01:05.408290 12 controller.go:231] Updating CRD OpenAPI spec because consoles.config.openshift.io changed 2025-12-08T18:01:05.408327260+00:00 stderr F I1208 18:01:05.408302 12 controller.go:231] Updating CRD OpenAPI spec because dnses.operator.openshift.io changed 2025-12-08T18:01:05.408376721+00:00 stderr F I1208 18:01:05.408314 12 controller.go:231] Updating CRD OpenAPI spec because egressfirewalls.k8s.ovn.org changed 2025-12-08T18:01:05.408376721+00:00 stderr F I1208 18:01:05.408326 12 controller.go:231] Updating CRD OpenAPI spec because httproutes.gateway.networking.k8s.io changed 2025-12-08T18:01:05.408376721+00:00 stderr F I1208 18:01:05.408338 12 controller.go:231] Updating CRD OpenAPI spec because storages.operator.openshift.io changed 2025-12-08T18:01:05.408454243+00:00 stderr F I1208 18:01:05.408370 12 controller.go:231] Updating CRD OpenAPI spec because machineautoscalers.autoscaling.openshift.io changed 2025-12-08T18:01:05.408454243+00:00 stderr F I1208 18:01:05.408395 12 controller.go:231] Updating CRD OpenAPI spec because machineconfigs.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.408498685+00:00 stderr F I1208 18:01:05.408430 12 controller.go:231] Updating CRD OpenAPI spec because servicecas.operator.openshift.io changed 2025-12-08T18:01:05.408498685+00:00 stderr F I1208 18:01:05.408465 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagers.monitoring.rhobs changed 2025-12-08T18:01:05.408498685+00:00 stderr F I1208 18:01:05.408486 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.rhobs changed 2025-12-08T18:01:05.408579037+00:00 stderr F I1208 18:01:05.408504 12 controller.go:231] Updating CRD OpenAPI spec because prometheusagents.monitoring.rhobs changed 2025-12-08T18:01:05.408579037+00:00 stderr F I1208 18:01:05.408532 12 controller.go:231] Updating CRD OpenAPI spec because observabilityinstallers.observability.openshift.io changed 2025-12-08T18:01:05.408579037+00:00 stderr F I1208 18:01:05.408556 12 controller.go:231] Updating CRD OpenAPI spec because adminpolicybasedexternalroutes.k8s.ovn.org changed 2025-12-08T18:01:05.408637568+00:00 stderr F I1208 18:01:05.408574 12 controller.go:231] Updating CRD OpenAPI spec because configs.samples.operator.openshift.io changed 2025-12-08T18:01:05.408637568+00:00 stderr F I1208 18:01:05.408600 12 controller.go:231] Updating CRD OpenAPI spec because helmchartrepositories.helm.openshift.io changed 2025-12-08T18:01:05.408637568+00:00 stderr F I1208 18:01:05.408617 12 controller.go:231] Updating CRD OpenAPI spec because adminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T18:01:05.408869744+00:00 stderr F I1208 18:01:05.408782 12 controller.go:231] Updating CRD OpenAPI spec because featuregates.config.openshift.io changed 2025-12-08T18:01:05.409046209+00:00 stderr F I1208 18:01:05.408969 12 controller.go:231] Updating CRD OpenAPI spec because grpcroutes.gateway.networking.k8s.io changed 2025-12-08T18:01:05.409135021+00:00 stderr F I1208 18:01:05.409077 12 controller.go:231] Updating CRD OpenAPI spec because installplans.operators.coreos.com changed 2025-12-08T18:01:05.409238474+00:00 stderr F I1208 18:01:05.409151 12 controller.go:231] Updating CRD OpenAPI spec because operators.operators.coreos.com changed 2025-12-08T18:01:05.409238474+00:00 stderr F I1208 18:01:05.409182 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.coreos.com changed 2025-12-08T18:01:05.409399778+00:00 stderr F I1208 18:01:05.409307 12 controller.go:231] Updating CRD OpenAPI spec because enterprisesearches.enterprisesearch.k8s.elastic.co changed 2025-12-08T18:01:05.409696806+00:00 stderr F I1208 18:01:05.409575 12 controller.go:231] Updating CRD OpenAPI spec because clusterautoscalers.autoscaling.openshift.io changed 2025-12-08T18:01:05.409799249+00:00 stderr F I1208 18:01:05.409721 12 controller.go:231] Updating CRD OpenAPI spec because kubeapiservers.operator.openshift.io changed 2025-12-08T18:01:05.409799249+00:00 stderr F I1208 18:01:05.409760 12 controller.go:231] Updating CRD OpenAPI spec because probes.monitoring.coreos.com changed 2025-12-08T18:01:05.409799249+00:00 stderr F I1208 18:01:05.409771 12 controller.go:231] Updating CRD OpenAPI spec because monitoringstacks.monitoring.rhobs changed 2025-12-08T18:01:05.410031755+00:00 stderr F I1208 18:01:05.409949 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.rhobs changed 2025-12-08T18:01:05.410031755+00:00 stderr F I1208 18:01:05.409970 12 controller.go:231] Updating CRD OpenAPI spec because stackconfigpolicies.stackconfigpolicy.k8s.elastic.co changed 2025-12-08T18:01:05.410172269+00:00 stderr F I1208 18:01:05.410078 12 controller.go:231] Updating CRD OpenAPI spec because builds.config.openshift.io changed 2025-12-08T18:01:05.410265031+00:00 stderr F I1208 18:01:05.410184 12 controller.go:231] Updating CRD OpenAPI spec because infrastructures.config.openshift.io changed 2025-12-08T18:01:05.410265031+00:00 stderr F I1208 18:01:05.410221 12 controller.go:231] Updating CRD OpenAPI spec because networks.operator.openshift.io changed 2025-12-08T18:01:05.410380934+00:00 stderr F I1208 18:01:05.410287 12 controller.go:231] Updating CRD OpenAPI spec because prometheuses.monitoring.coreos.com changed 2025-12-08T18:01:05.410490337+00:00 stderr F I1208 18:01:05.410398 12 controller.go:231] Updating CRD OpenAPI spec because smartgateways.smartgateway.infra.watch changed 2025-12-08T18:01:05.410622540+00:00 stderr F I1208 18:01:05.410522 12 controller.go:231] Updating CRD OpenAPI spec because egressips.k8s.ovn.org changed 2025-12-08T18:01:05.410622540+00:00 stderr F I1208 18:01:05.410571 12 controller.go:231] Updating CRD OpenAPI spec because ipaddresses.ipam.cluster.x-k8s.io changed 2025-12-08T18:01:05.410751214+00:00 stderr F I1208 18:01:05.410652 12 controller.go:231] Updating CRD OpenAPI spec because prometheusrules.monitoring.rhobs changed 2025-12-08T18:01:05.410751214+00:00 stderr F I1208 18:01:05.410676 12 controller.go:231] Updating CRD OpenAPI spec because interconnects.interconnectedcloud.github.io changed 2025-12-08T18:01:05.410936389+00:00 stderr F I1208 18:01:05.410810 12 controller.go:231] Updating CRD OpenAPI spec because clusteroperators.config.openshift.io changed 2025-12-08T18:01:05.410936389+00:00 stderr F I1208 18:01:05.410837 12 controller.go:231] Updating CRD OpenAPI spec because egressservices.k8s.ovn.org changed 2025-12-08T18:01:05.410936389+00:00 stderr F I1208 18:01:05.410906 12 controller.go:231] Updating CRD OpenAPI spec because perses.perses.dev changed 2025-12-08T18:01:05.411019682+00:00 stderr F I1208 18:01:05.410949 12 controller.go:231] Updating CRD OpenAPI spec because certificaterequests.cert-manager.io changed 2025-12-08T18:01:05.411081763+00:00 stderr F I1208 18:01:05.411016 12 controller.go:231] Updating CRD OpenAPI spec because alertingrules.monitoring.openshift.io changed 2025-12-08T18:01:05.411081763+00:00 stderr F I1208 18:01:05.411032 12 controller.go:231] Updating CRD OpenAPI spec because baselineadminnetworkpolicies.policy.networking.k8s.io changed 2025-12-08T18:01:05.411249938+00:00 stderr F I1208 18:01:05.411166 12 controller.go:231] Updating CRD OpenAPI spec because configs.imageregistry.operator.openshift.io changed 2025-12-08T18:01:05.411324790+00:00 stderr F I1208 18:01:05.411269 12 controller.go:231] Updating CRD OpenAPI spec because configs.operator.openshift.io changed 2025-12-08T18:01:05.411408232+00:00 stderr F I1208 18:01:05.411348 12 controller.go:231] Updating CRD OpenAPI spec because nodeslicepools.whereabouts.cni.cncf.io changed 2025-12-08T18:01:05.411486564+00:00 stderr F I1208 18:01:05.411432 12 controller.go:231] Updating CRD OpenAPI spec because overlappingrangeipreservations.whereabouts.cni.cncf.io changed 2025-12-08T18:01:05.411571156+00:00 stderr F I1208 18:01:05.411506 12 controller.go:231] Updating CRD OpenAPI spec because networks.config.openshift.io changed 2025-12-08T18:01:05.411646848+00:00 stderr F I1208 18:01:05.411592 12 controller.go:231] Updating CRD OpenAPI spec because rolebindingrestrictions.authorization.openshift.io changed 2025-12-08T18:01:05.411733071+00:00 stderr F I1208 18:01:05.411670 12 controller.go:231] Updating CRD OpenAPI spec because alertmanagerconfigs.monitoring.rhobs changed 2025-12-08T18:01:05.411810393+00:00 stderr F I1208 18:01:05.411756 12 controller.go:231] Updating CRD OpenAPI spec because persesdashboards.perses.dev changed 2025-12-08T18:01:05.411923356+00:00 stderr F I1208 18:01:05.411831 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearches.elasticsearch.k8s.elastic.co changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412014 12 controller.go:231] Updating CRD OpenAPI spec because oauths.config.openshift.io changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412036 12 controller.go:231] Updating CRD OpenAPI spec because pinnedimagesets.machineconfiguration.openshift.io changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412044 12 controller.go:231] Updating CRD OpenAPI spec because referencegrants.gateway.networking.k8s.io changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412051 12 controller.go:231] Updating CRD OpenAPI spec because thanosrulers.monitoring.rhobs changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412059 12 controller.go:231] Updating CRD OpenAPI spec because olmconfigs.operators.coreos.com changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412065 12 controller.go:231] Updating CRD OpenAPI spec because clusterimagepolicies.config.openshift.io changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412076 12 controller.go:231] Updating CRD OpenAPI spec because gateways.gateway.networking.k8s.io changed 2025-12-08T18:01:05.412107940+00:00 stderr F I1208 18:01:05.412083 12 controller.go:231] Updating CRD OpenAPI spec because podmonitors.monitoring.coreos.com changed 2025-12-08T18:01:05.412131711+00:00 stderr F I1208 18:01:05.412091 12 controller.go:231] Updating CRD OpenAPI spec because agents.agent.k8s.elastic.co changed 2025-12-08T18:01:05.412185733+00:00 stderr F I1208 18:01:05.412108 12 controller.go:231] Updating CRD OpenAPI spec because servicetelemetrys.infra.watch changed 2025-12-08T18:01:05.412185733+00:00 stderr F I1208 18:01:05.412148 12 controller.go:231] Updating CRD OpenAPI spec because alertrelabelconfigs.monitoring.openshift.io changed 2025-12-08T18:01:05.412240914+00:00 stderr F I1208 18:01:05.412178 12 controller.go:231] Updating CRD OpenAPI spec because clusterresourcequotas.quota.openshift.io changed 2025-12-08T18:01:05.412240914+00:00 stderr F I1208 18:01:05.412203 12 controller.go:231] Updating CRD OpenAPI spec because consolesamples.console.openshift.io changed 2025-12-08T18:01:05.412240914+00:00 stderr F I1208 18:01:05.412224 12 controller.go:231] Updating CRD OpenAPI spec because operatorconditions.operators.coreos.com changed 2025-12-08T18:01:05.412322776+00:00 stderr F I1208 18:01:05.412243 12 controller.go:231] Updating CRD OpenAPI spec because projects.config.openshift.io changed 2025-12-08T18:01:05.412322776+00:00 stderr F I1208 18:01:05.412269 12 controller.go:231] Updating CRD OpenAPI spec because servicemonitors.monitoring.coreos.com changed 2025-12-08T18:01:05.412322776+00:00 stderr F I1208 18:01:05.412288 12 controller.go:231] Updating CRD OpenAPI spec because elasticsearchautoscalers.autoscaling.k8s.elastic.co changed 2025-12-08T18:01:05.412322776+00:00 stderr F I1208 18:01:05.412306 12 controller.go:231] Updating CRD OpenAPI spec because apiservers.config.openshift.io changed 2025-12-08T18:01:05.412341667+00:00 stderr F I1208 18:01:05.412322 12 controller.go:231] Updating CRD OpenAPI spec because gatewayclasses.gateway.networking.k8s.io changed 2025-12-08T18:01:25.336127618+00:00 stderr F I1208 18:01:25.335828 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.0051465821971968554 seatDemandStdev=0.10958239708498793 seatDemandSmoothed=1.8732551078974455 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:01:25.447054674+00:00 stderr F E1208 18:01:25.446850 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\", Err:(*tls.permanentError)(0xc05e426620)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:45.336804377+00:00 stderr F I1208 18:01:45.336548 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.000973807366779198 seatDemandStdev=0.031190688770714962 seatDemandSmoothed=1.7910325352943153 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:01:57.035113502+00:00 stderr F E1208 18:01:57.034960 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/stf-smoketest-smoke1-pbhxq/smoketest-collectd\", Err:(*tls.permanentError)(0xc063a67450)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/stf-smoketest-smoke1-pbhxq/smoketest-collectd\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:57.302745557+00:00 stderr F E1208 18:01:57.302561 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/stf-smoketest-smoke1-pbhxq/smoketest-ceilometer\", Err:(*tls.permanentError)(0xc0415f5210)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/stf-smoketest-smoke1-pbhxq/smoketest-ceilometer\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:57.576407754+00:00 stderr F E1208 18:01:57.576174 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-interconnect-55bf8d5cb-rwr2k/default-interconnect\", Err:(*tls.permanentError)(0xc062c60e60)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-interconnect-55bf8d5cb-rwr2k/default-interconnect\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:57.866131577+00:00 stderr F E1208 18:01:57.865967 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx/bridge\", Err:(*tls.permanentError)(0xc06bfb6b60)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx/bridge\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:58.169907066+00:00 stderr F E1208 18:01:58.169692 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx/sg-core\", Err:(*tls.permanentError)(0xc0701adc00)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx/sg-core\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:58.443754877+00:00 stderr F E1208 18:01:58.443426 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn/bridge\", Err:(*tls.permanentError)(0xc062beb550)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn/bridge\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:58.707695855+00:00 stderr F E1208 18:01:58.707240 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn/sg-core\", Err:(*tls.permanentError)(0xc0631766c0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn/sg-core\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:58.957750453+00:00 stderr F E1208 18:01:58.956906 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v/bridge\", Err:(*tls.permanentError)(0xc066dfbea0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v/bridge\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:59.218818454+00:00 stderr F E1208 18:01:59.218552 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v/sg-core\", Err:(*tls.permanentError)(0xc0475cf5e0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v/sg-core\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:59.521343339+00:00 stderr F E1208 18:01:59.521171 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk/bridge\", Err:(*tls.permanentError)(0xc040b343d0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk/bridge\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:01:59.776344798+00:00 stderr F E1208 18:01:59.776185 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk/sg-core\", Err:(*tls.permanentError)(0xc04177d490)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk/sg-core\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:00.099764630+00:00 stderr F E1208 18:02:00.099529 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp/bridge\", Err:(*tls.permanentError)(0xc063d96940)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp/bridge\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:00.381944572+00:00 stderr F E1208 18:02:00.381527 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp/sg-core\", Err:(*tls.permanentError)(0xc0701d7a00)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp/sg-core\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:00.620264957+00:00 stderr F E1208 18:02:00.619611 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/smart-gateway-operator-5cd794ff55-w8r45/operator\", Err:(*tls.permanentError)(0xc046275870)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/smart-gateway-operator-5cd794ff55-w8r45/operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:00.926492982+00:00 stderr F E1208 18:02:00.925732 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/prometheus-default-0/prometheus\", Err:(*tls.permanentError)(0xc04135d0b0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/prometheus-default-0/prometheus\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:01.239154536+00:00 stderr F E1208 18:02:01.238673 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/elasticsearch-es-default-0/elasticsearch\", Err:(*tls.permanentError)(0xc04bc4fbb0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/elasticsearch-es-default-0/elasticsearch\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:01.526147668+00:00 stderr F E1208 18:02:01.525987 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\", Err:(*tls.permanentError)(0xc04d93e320)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn/prometheus-webhook-snmp\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:01.802503676+00:00 stderr F E1208 18:02:01.802310 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/alertmanager-default-0/alertmanager\", Err:(*tls.permanentError)(0xc04f056740)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/alertmanager-default-0/alertmanager\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:14.287439650+00:00 stderr F E1208 18:02:14.286849 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/service-telemetry-operator-79647f8775-zs8hl/operator\", Err:(*tls.permanentError)(0xc05d1c2470)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/service-telemetry-operator-79647f8775-zs8hl/operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:14.541092964+00:00 stderr F E1208 18:02:14.540927 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/smart-gateway-operator-5cd794ff55-w8r45/operator\", Err:(*tls.permanentError)(0xc054b18250)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/smart-gateway-operator-5cd794ff55-w8r45/operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:14.804601960+00:00 stderr F E1208 18:02:14.804382 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/service-telemetry/qdr-test/qdr\", Err:(*tls.permanentError)(0xc05733ecd0)}: Get \"https://192.168.126.11:10250/containerLogs/service-telemetry/qdr-test/qdr\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:25.339847802+00:00 stderr F I1208 18:02:25.339130 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006245702181938751 seatDemandStdev=0.13340967559008335 seatDemandSmoothed=1.637874960133614 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:02:45.340727003+00:00 stderr F I1208 18:02:45.340596 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=2 seatDemandAvg=0.0025163294701790787 seatDemandStdev=0.05913492451733157 seatDemandSmoothed=1.5670047959769386 fairFrac=2.2796127562642368 currentCL=2 concurrencyDenominator=2 backstop=false 2025-12-08T18:02:50.545493084+00:00 stderr F E1208 18:02:50.545267 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?follow=true×tamps=true\", Err:(*tls.permanentError)(0xc04eb40ee0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?follow=true×tamps=true\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:02:55.342384385+00:00 stderr F I1208 18:02:55.341533 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0010399436808592164 seatDemandStdev=0.03223138529445884 seatDemandSmoothed=1.5317289262359013 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:03:05.341939494+00:00 stderr F I1208 18:03:05.341703 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.008765395281734777 seatDemandStdev=0.1475470976006087 seatDemandSmoothed=1.5000943482687694 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:03:15.342803031+00:00 stderr F I1208 18:03:15.342576 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.002566546261502612 seatDemandStdev=0.05059603840015717 seatDemandSmoothed=1.4668149177058059 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:03:25.343229485+00:00 stderr F I1208 18:03:25.343099 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006249252590829742 seatDemandStdev=0.12413000697977772 seatDemandSmoothed=1.4360768975686964 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:03:25.355217796+00:00 stderr F E1208 18:03:25.355081 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht/control-plane-machine-set-operator\", Err:(*tls.permanentError)(0xc00935cb20)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht/control-plane-machine-set-operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:25.517034474+00:00 stderr F E1208 18:03:25.516897 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-machine-api/machine-api-operator-755bb95488-5httz/machine-api-operator\", Err:(*tls.permanentError)(0xc04a21f8f0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-machine-api/machine-api-operator-755bb95488-5httz/machine-api-operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:25.538942140+00:00 stderr F E1208 18:03:25.538793 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-machine-api/machine-api-operator-755bb95488-5httz/kube-rbac-proxy\", Err:(*tls.permanentError)(0xc0509c9b00)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-machine-api/machine-api-operator-755bb95488-5httz/kube-rbac-proxy\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:37.468218038+00:00 stderr F E1208 18:03:37.468108 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-858d87f86b-7q2ss/cert-manager-controller\", Err:(*tls.permanentError)(0xc022dd7500)}: Get \"https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-858d87f86b-7q2ss/cert-manager-controller\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:37.639597422+00:00 stderr F E1208 18:03:37.639474 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-cainjector-7dbf76d5c8-fdk5q/cert-manager-cainjector\", Err:(*tls.permanentError)(0xc0247a30d0)}: Get \"https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-cainjector-7dbf76d5c8-fdk5q/cert-manager-cainjector\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:37.665607099+00:00 stderr F E1208 18:03:37.665471 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-webhook-7894b5b9b4-wdn4b/cert-manager-webhook\", Err:(*tls.permanentError)(0xc022efc2d0)}: Get \"https://192.168.126.11:10250/containerLogs/cert-manager/cert-manager-webhook-7894b5b9b4-wdn4b/cert-manager-webhook\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:45.345270345+00:00 stderr F I1208 18:03:45.344735 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.0021722287623018445 seatDemandStdev=0.04655652676592266 seatDemandSmoothed=1.373929920492961 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:03:52.055661618+00:00 stderr F E1208 18:03:52.055496 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util\", Err:(*tls.permanentError)(0xc04bdfb950)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.197493321+00:00 stderr F E1208 18:03:52.197327 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util\", Err:(*tls.permanentError)(0xc04466f840)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.220037605+00:00 stderr F E1208 18:03:52.219861 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull\", Err:(*tls.permanentError)(0xc043e3a3a0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.234715578+00:00 stderr F E1208 18:03:52.234549 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull\", Err:(*tls.permanentError)(0xc04df42aa0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.381713428+00:00 stderr F E1208 18:03:52.381563 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util\", Err:(*tls.permanentError)(0xc04a8cec20)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.396835584+00:00 stderr F E1208 18:03:52.396700 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull\", Err:(*tls.permanentError)(0xc04f702970)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.415633808+00:00 stderr F E1208 18:03:52.415509 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/extract\", Err:(*tls.permanentError)(0xc04b9060f0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj/extract\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.556735300+00:00 stderr F E1208 18:03:52.556600 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util\", Err:(*tls.permanentError)(0xc042e2f340)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.668918447+00:00 stderr F E1208 18:03:52.668785 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util\", Err:(*tls.permanentError)(0xc04b0b4640)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.713856442+00:00 stderr F E1208 18:03:52.713700 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull\", Err:(*tls.permanentError)(0xc065b8ca40)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.728297998+00:00 stderr F E1208 18:03:52.728163 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull\", Err:(*tls.permanentError)(0xc04b0b4d30)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.880147419+00:00 stderr F E1208 18:03:52.880024 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull\", Err:(*tls.permanentError)(0xc04b0b5b00)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.913594935+00:00 stderr F E1208 18:03:52.913445 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util\", Err:(*tls.permanentError)(0xc04c277ab0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:52.936435077+00:00 stderr F E1208 18:03:52.936285 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/extract\", Err:(*tls.permanentError)(0xc0477529b0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5/extract\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.058948072+00:00 stderr F E1208 18:03:53.058321 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util\", Err:(*tls.permanentError)(0xc046b8e910)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.190386275+00:00 stderr F E1208 18:03:53.190271 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util\", Err:(*tls.permanentError)(0xc046b8f920)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.194130745+00:00 stderr F E1208 18:03:53.194038 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull\", Err:(*tls.permanentError)(0xc04bdeeb90)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.252374896+00:00 stderr F E1208 18:03:53.252235 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull\", Err:(*tls.permanentError)(0xc04b01ece0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.411032670+00:00 stderr F E1208 18:03:53.410914 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util\", Err:(*tls.permanentError)(0xc04b8d11b0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.412932071+00:00 stderr F E1208 18:03:53.412834 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull\", Err:(*tls.permanentError)(0xc04acc3320)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.453680393+00:00 stderr F E1208 18:03:53.453534 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/extract\", Err:(*tls.permanentError)(0xc0217d0d20)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj/extract\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.593159522+00:00 stderr F E1208 18:03:53.593027 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util\", Err:(*tls.permanentError)(0xc04acc3ce0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.731512220+00:00 stderr F E1208 18:03:53.731370 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util\", Err:(*tls.permanentError)(0xc0455e6950)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.757367774+00:00 stderr F E1208 18:03:53.757236 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull\", Err:(*tls.permanentError)(0xc050045a10)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.772691094+00:00 stderr F E1208 18:03:53.772550 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull\", Err:(*tls.permanentError)(0xc0455e7820)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.898122796+00:00 stderr F E1208 18:03:53.894241 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull\", Err:(*tls.permanentError)(0xc04b794160)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/pull\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.898122796+00:00 stderr F E1208 18:03:53.895104 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util\", Err:(*tls.permanentError)(0xc04a94f010)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/util\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:53.929418585+00:00 stderr F E1208 18:03:53.928052 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/extract\", Err:(*tls.permanentError)(0xc051060ca0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f/extract\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.067760273+00:00 stderr F E1208 18:03:54.067614 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities\", Err:(*tls.permanentError)(0xc05aa1e7e0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.252011282+00:00 stderr F E1208 18:03:54.251895 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content\", Err:(*tls.permanentError)(0xc04f252380)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.252061863+00:00 stderr F E1208 18:03:54.251994 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content\", Err:(*tls.permanentError)(0xc04ea6e500)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.289423605+00:00 stderr F E1208 18:03:54.289279 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities\", Err:(*tls.permanentError)(0xc04d495e80)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.460197212+00:00 stderr F E1208 18:03:54.460060 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities\", Err:(*tls.permanentError)(0xc04ea6fb20)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.477373064+00:00 stderr F E1208 18:03:54.477248 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content\", Err:(*tls.permanentError)(0xc04ea5a950)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.502167977+00:00 stderr F E1208 18:03:54.502014 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/registry-server\", Err:(*tls.permanentError)(0xc04a14a0b0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/certified-operators-58d6l/registry-server\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.611315393+00:00 stderr F E1208 18:03:54.611185 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities\", Err:(*tls.permanentError)(0xc0626e84a0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.761412407+00:00 stderr F E1208 18:03:54.761274 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content\", Err:(*tls.permanentError)(0xc05d79c0f0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.763740119+00:00 stderr F E1208 18:03:54.763632 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities\", Err:(*tls.permanentError)(0xc05fe580e0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.776678956+00:00 stderr F E1208 18:03:54.776550 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content\", Err:(*tls.permanentError)(0xc05284ca60)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.933898290+00:00 stderr F E1208 18:03:54.933741 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities\", Err:(*tls.permanentError)(0xc062a88240)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.959206279+00:00 stderr F E1208 18:03:54.959081 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/marketplace-operator-547dbd544d-6bbtn/marketplace-operator\", Err:(*tls.permanentError)(0xc062a893e0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/marketplace-operator-547dbd544d-6bbtn/marketplace-operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.961939002+00:00 stderr F E1208 18:03:54.961818 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content\", Err:(*tls.permanentError)(0xc04f252fa0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:54.962551049+00:00 stderr F E1208 18:03:54.962483 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/registry-server\", Err:(*tls.permanentError)(0xc05284dd40)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/community-operators-zdvxg/registry-server\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.120380639+00:00 stderr F E1208 18:03:55.120263 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities\", Err:(*tls.permanentError)(0xc05cb4e2c0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.276500894+00:00 stderr F E1208 18:03:55.276301 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content\", Err:(*tls.permanentError)(0xc043e88a20)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.281046595+00:00 stderr F E1208 18:03:55.280950 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content\", Err:(*tls.permanentError)(0xc062837890)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.284199641+00:00 stderr F E1208 18:03:55.284131 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities\", Err:(*tls.permanentError)(0xc0415f8e20)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.415660294+00:00 stderr F E1208 18:03:55.415522 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities\", Err:(*tls.permanentError)(0xc0415f9a10)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-utilities\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.440256103+00:00 stderr F E1208 18:03:55.440101 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content\", Err:(*tls.permanentError)(0xc0624ea980)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/extract-content\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:55.468657544+00:00 stderr F E1208 18:03:55.468531 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/registry-server\", Err:(*tls.permanentError)(0xc043cc4ac0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-marketplace/redhat-operators-xpnf9/registry-server\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:03:56.986538892+00:00 stderr F W1208 18:03:56.986372 12 watcher.go:338] watch chan error: etcdserver: mvcc: required revision has been compacted 2025-12-08T18:04:07.131339454+00:00 stderr F E1208 18:04:07.131158 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-86648f486b-4j9kn/prometheus-operator\", Err:(*tls.permanentError)(0xc05cdbe910)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-86648f486b-4j9kn/prometheus-operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:07.268901372+00:00 stderr F E1208 18:04:07.268765 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm/prometheus-operator-admission-webhook\", Err:(*tls.permanentError)(0xc05463eab0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm/prometheus-operator-admission-webhook\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:07.304140086+00:00 stderr F E1208 18:04:07.304023 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t/prometheus-operator-admission-webhook\", Err:(*tls.permanentError)(0xc05df7b1a0)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t/prometheus-operator-admission-webhook\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:07.440223435+00:00 stderr F E1208 18:04:07.440042 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-operators/observability-operator-78c97476f4-mg4b2/operator\", Err:(*tls.permanentError)(0xc05cc24210)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-operators/observability-operator-78c97476f4-mg4b2/operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:07.476705113+00:00 stderr F E1208 18:04:07.476514 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-operators/perses-operator-68bdb49cbf-m2cdr/perses-operator\", Err:(*tls.permanentError)(0xc05cf67750)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-operators/perses-operator-68bdb49cbf-m2cdr/perses-operator\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:25.349665128+00:00 stderr F I1208 18:04:25.349089 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=5 seatDemandAvg=0.006064909558314343 seatDemandStdev=0.12335797515955865 seatDemandSmoothed=1.2579607644380462 fairFrac=2.2796127562642368 currentCL=5 concurrencyDenominator=5 backstop=false 2025-12-08T18:04:45.350603598+00:00 stderr F I1208 18:04:45.350427 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=2 seatDemandAvg=0.0009822579858792366 seatDemandStdev=0.033031373226230364 seatDemandSmoothed=1.2037589282442682 fairFrac=2.2796127562642368 currentCL=2 concurrencyDenominator=2 backstop=false 2025-12-08T18:04:50.559753674+00:00 stderr F E1208 18:04:50.559362 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &url.Error{Op:\"Get\", URL:\"https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?timestamps=true\", Err:(*tls.permanentError)(0xc05f177730)}: Get \"https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?timestamps=true\": remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.716542252+00:00 stderr F E1208 18:04:50.716372 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.729785294+00:00 stderr F E1208 18:04:50.729627 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.755646039+00:00 stderr F E1208 18:04:50.755492 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.764427322+00:00 stderr F E1208 18:04:50.764308 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.777137580+00:00 stderr F E1208 18:04:50.776992 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.789354303+00:00 stderr F E1208 18:04:50.789230 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.806901978+00:00 stderr F E1208 18:04:50.806711 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.820703725+00:00 stderr F E1208 18:04:50.820425 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.966260325+00:00 stderr F E1208 18:04:50.965496 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:50.978561772+00:00 stderr F E1208 18:04:50.978418 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:51.001782927+00:00 stderr F E1208 18:04:51.001681 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:51.012379438+00:00 stderr F E1208 18:04:51.012309 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:51.027180481+00:00 stderr F E1208 18:04:51.027059 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:51.040717520+00:00 stderr F E1208 18:04:51.040602 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:51.058547822+00:00 stderr F E1208 18:04:51.058375 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:51.071569488+00:00 stderr F E1208 18:04:51.071420 12 status.go:71] "Unhandled Error" err="apiserver received an error that is not an metav1.Status: &errors.errorString{s:\"error dialing backend: remote error: tls: internal error\"}: error dialing backend: remote error: tls: internal error" logger="UnhandledError" 2025-12-08T18:04:55.350890656+00:00 stderr F I1208 18:04:55.350715 12 apf_controller.go:493] "Update CurrentCL" plName="exempt" seatDemandHighWatermark=1 seatDemandAvg=0.000541501761435135 seatDemandStdev=0.023263889126229467 seatDemandSmoothed=1.1766199968850664 fairFrac=2.2796127562642368 currentCL=1 concurrencyDenominator=1 backstop=false 2025-12-08T18:04:56.124355378+00:00 stderr F I1208 18:04:56.123424 12 node_authorizer.go:224] "NODE DENY" err="node 'crc' cannot get unknown pod openshift-must-gather-gctth/must-gather-5cz8j" 2025-12-08T18:04:56.480752551+00:00 stderr F I1208 18:04:56.480593 12 httplog.go:93] system:serviceaccount:openshift-apiserver:openshift-apiserver-sa[system:serviceaccounts,system:serviceaccounts:openshift-apiserver,system:authenticated] is impersonating system:serviceaccount:kube-system:namespace-controller[system:serviceaccounts,system:serviceaccounts:kube-system,system:authenticated] 2025-12-08T18:04:56.485937169+00:00 stderr F I1208 18:04:56.485850 12 httplog.go:93] system:serviceaccount:openshift-apiserver:openshift-apiserver-sa[system:serviceaccounts,system:serviceaccounts:openshift-apiserver,system:authenticated] is impersonating system:serviceaccount:kube-system:namespace-controller[system:serviceaccounts,system:serviceaccounts:kube-system,system:authenticated] 2025-12-08T18:04:56.715065144+00:00 stderr F I1208 18:04:56.714945 12 httplog.go:93] system:serviceaccount:openshift-apiserver:openshift-apiserver-sa[system:serviceaccounts,system:serviceaccounts:openshift-apiserver,system:authenticated] is impersonating system:serviceaccount:kube-system:namespace-controller[system:serviceaccounts,system:serviceaccounts:kube-system,system:authenticated] 2025-12-08T18:04:56.719807110+00:00 stderr F I1208 18:04:56.719704 12 httplog.go:93] system:serviceaccount:openshift-apiserver:openshift-apiserver-sa[system:serviceaccounts,system:serviceaccounts:openshift-apiserver,system:authenticated] is impersonating system:serviceaccount:kube-system:namespace-controller[system:serviceaccounts,system:serviceaccounts:kube-system,system:authenticated] ././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000024700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000000041115115611513033042 0ustar zuulzuul2025-12-08T17:46:03.438061553+00:00 stdout F Fixing audit permissions ... 2025-12-08T17:46:03.449547977+00:00 stdout F Acquiring exclusive lock /var/log/kube-apiserver/.lock ... 2025-12-08T17:46:03.451349231+00:00 stdout F flock: getting lock took 0.000006 seconds ././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000005364515115611513033063 0ustar zuulzuul2025-12-08T17:46:04.986441787+00:00 stderr F W1208 17:46:04.986188 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:46:04.986686595+00:00 stderr F I1208 17:46:04.986578 1 crypto.go:594] Generating new CA for check-endpoints-signer@1765215964 cert, and key in /tmp/serving-cert-260175964/serving-signer.crt, /tmp/serving-cert-260175964/serving-signer.key 2025-12-08T17:46:04.986686595+00:00 stderr F Validity period of the certificate for "check-endpoints-signer@1765215964" is unset, resetting to 43800h0m0s! 2025-12-08T17:46:05.760544113+00:00 stderr F I1208 17:46:05.760455 1 observer_polling.go:159] Starting file observer 2025-12-08T17:46:05.760747419+00:00 stderr F I1208 17:46:05.760721 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:46:05.760747419+00:00 stderr F I1208 17:46:05.760739 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:46:05.760747419+00:00 stderr F I1208 17:46:05.760744 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:46:05.760759550+00:00 stderr F I1208 17:46:05.760748 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:46:05.760759550+00:00 stderr F I1208 17:46:05.760753 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:46:10.772350576+00:00 stderr F I1208 17:46:10.772284 1 builder.go:304] check-endpoints version v0.0.0-unknown-c3d9642-c3d9642 2025-12-08T17:46:10.773576903+00:00 stderr F I1208 17:46:10.773533 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/tmp/serving-cert-260175964/tls.crt::/tmp/serving-cert-260175964/tls.key" 2025-12-08T17:46:11.259850449+00:00 stderr F I1208 17:46:11.259779 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:46:11.261777346+00:00 stderr F I1208 17:46:11.261713 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:46:11.261777346+00:00 stderr F I1208 17:46:11.261735 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:46:11.261805817+00:00 stderr F I1208 17:46:11.261774 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:46:11.261805817+00:00 stderr F I1208 17:46:11.261783 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:46:11.266166838+00:00 stderr F I1208 17:46:11.266092 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:46:11.266166838+00:00 stderr F W1208 17:46:11.266127 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:46:11.266166838+00:00 stderr F I1208 17:46:11.266127 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:46:11.266166838+00:00 stderr F W1208 17:46:11.266133 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:46:11.266166838+00:00 stderr F W1208 17:46:11.266142 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:46:11.266166838+00:00 stderr F W1208 17:46:11.266145 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:46:11.266166838+00:00 stderr F W1208 17:46:11.266148 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:46:11.266166838+00:00 stderr F W1208 17:46:11.266151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:46:11.269658673+00:00 stderr F I1208 17:46:11.269599 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:46:11.269658673+00:00 stderr F I1208 17:46:11.269597 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:46:11.269728455+00:00 stderr F I1208 17:46:11.269671 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:46:11.269728455+00:00 stderr F I1208 17:46:11.269671 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:46:11.269808457+00:00 stderr F I1208 17:46:11.269781 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:46:11.269868879+00:00 stderr F I1208 17:46:11.269847 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:46:11.270088525+00:00 stderr F I1208 17:46:11.270065 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/tmp/serving-cert-260175964/tls.crt::/tmp/serving-cert-260175964/tls.key" 2025-12-08T17:46:11.270177988+00:00 stderr F I1208 17:46:11.270139 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-260175964/tls.crt::/tmp/serving-cert-260175964/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215964\" (2025-12-08 17:46:04 +0000 UTC to 2025-12-08 17:46:05 +0000 UTC (now=2025-12-08 17:46:11.270099366 +0000 UTC))" 2025-12-08T17:46:11.270527209+00:00 stderr F I1208 17:46:11.270489 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215971\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215970\" (2025-12-08 16:46:10 +0000 UTC to 2028-12-08 16:46:10 +0000 UTC (now=2025-12-08 17:46:11.270456447 +0000 UTC))" 2025-12-08T17:46:11.270527209+00:00 stderr F I1208 17:46:11.270520 1 secure_serving.go:211] Serving securely on [::]:17697 2025-12-08T17:46:11.270552160+00:00 stderr F I1208 17:46:11.270544 1 genericapiserver.go:696] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:46:11.270598222+00:00 stderr F I1208 17:46:11.270564 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:46:11.270950062+00:00 stderr F I1208 17:46:11.270909 1 base_controller.go:76] Waiting for caches to sync for CheckEndpointsTimeToStart 2025-12-08T17:46:11.271731805+00:00 stderr F I1208 17:46:11.271702 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:11.271867319+00:00 stderr F I1208 17:46:11.271715 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:11.272330403+00:00 stderr F I1208 17:46:11.272296 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:11.370226871+00:00 stderr F I1208 17:46:11.370164 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:46:11.370354945+00:00 stderr F I1208 17:46:11.370274 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:46:11.370354945+00:00 stderr F I1208 17:46:11.370232 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:46:11.370647054+00:00 stderr F I1208 17:46:11.370613 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:46:11.370585032 +0000 UTC))" 2025-12-08T17:46:11.370647054+00:00 stderr F I1208 17:46:11.370638 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:46:11.370629003 +0000 UTC))" 2025-12-08T17:46:11.370675565+00:00 stderr F I1208 17:46:11.370652 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:46:11.370643464 +0000 UTC))" 2025-12-08T17:46:11.370675565+00:00 stderr F I1208 17:46:11.370664 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:46:11.370657084 +0000 UTC))" 2025-12-08T17:46:11.370688925+00:00 stderr F I1208 17:46:11.370681 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:46:11.370668004 +0000 UTC))" 2025-12-08T17:46:11.370721066+00:00 stderr F I1208 17:46:11.370694 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:46:11.370685845 +0000 UTC))" 2025-12-08T17:46:11.370721066+00:00 stderr F I1208 17:46:11.370715 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:46:11.370705606 +0000 UTC))" 2025-12-08T17:46:11.370734846+00:00 stderr F I1208 17:46:11.370728 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:46:11.370719606 +0000 UTC))" 2025-12-08T17:46:11.370763997+00:00 stderr F I1208 17:46:11.370744 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:46:11.370732476 +0000 UTC))" 2025-12-08T17:46:11.370780258+00:00 stderr F I1208 17:46:11.370762 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:46:11.370755057 +0000 UTC))" 2025-12-08T17:46:11.370978954+00:00 stderr F I1208 17:46:11.370953 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-260175964/tls.crt::/tmp/serving-cert-260175964/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215964\" (2025-12-08 17:46:04 +0000 UTC to 2025-12-08 17:46:05 +0000 UTC (now=2025-12-08 17:46:11.370941423 +0000 UTC))" 2025-12-08T17:46:11.371146599+00:00 stderr F I1208 17:46:11.371119 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215971\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215970\" (2025-12-08 16:46:10 +0000 UTC to 2028-12-08 16:46:10 +0000 UTC (now=2025-12-08 17:46:11.371092137 +0000 UTC))" 2025-12-08T17:46:11.371287264+00:00 stderr F I1208 17:46:11.371263 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:46:11.371252763 +0000 UTC))" 2025-12-08T17:46:11.371287264+00:00 stderr F I1208 17:46:11.371281 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:46:11.371274003 +0000 UTC))" 2025-12-08T17:46:11.371300054+00:00 stderr F I1208 17:46:11.371293 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:46:11.371285874 +0000 UTC))" 2025-12-08T17:46:11.371329665+00:00 stderr F I1208 17:46:11.371309 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:46:11.371297564 +0000 UTC))" 2025-12-08T17:46:11.371340505+00:00 stderr F I1208 17:46:11.371326 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:46:11.371318075 +0000 UTC))" 2025-12-08T17:46:11.371350556+00:00 stderr F I1208 17:46:11.371340 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:46:11.371332255 +0000 UTC))" 2025-12-08T17:46:11.371367586+00:00 stderr F I1208 17:46:11.371353 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:46:11.371344786 +0000 UTC))" 2025-12-08T17:46:11.371377697+00:00 stderr F I1208 17:46:11.371365 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:46:11.371357776 +0000 UTC))" 2025-12-08T17:46:11.371387667+00:00 stderr F I1208 17:46:11.371378 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:46:11.371369676 +0000 UTC))" 2025-12-08T17:46:11.371397597+00:00 stderr F I1208 17:46:11.371391 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:46:11.371384257 +0000 UTC))" 2025-12-08T17:46:11.371409987+00:00 stderr F I1208 17:46:11.371404 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:46:11.371395817 +0000 UTC))" 2025-12-08T17:46:11.371567412+00:00 stderr F I1208 17:46:11.371544 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/tmp/serving-cert-260175964/tls.crt::/tmp/serving-cert-260175964/tls.key" certDetail="\"localhost\" [serving] validServingFor=[localhost] issuer=\"check-endpoints-signer@1765215964\" (2025-12-08 17:46:04 +0000 UTC to 2025-12-08 17:46:05 +0000 UTC (now=2025-12-08 17:46:11.371535751 +0000 UTC))" 2025-12-08T17:46:11.371701316+00:00 stderr F I1208 17:46:11.371678 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215971\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215970\" (2025-12-08 16:46:10 +0000 UTC to 2028-12-08 16:46:10 +0000 UTC (now=2025-12-08 17:46:11.371669185 +0000 UTC))" 2025-12-08T17:46:11.406215232+00:00 stderr F I1208 17:46:11.406144 1 reflector.go:430] "Caches populated" type="*v1.CustomResourceDefinition" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:11.471300685+00:00 stderr F I1208 17:46:11.471214 1 base_controller.go:82] Caches are synced for CheckEndpointsTimeToStart 2025-12-08T17:46:11.471300685+00:00 stderr F I1208 17:46:11.471270 1 base_controller.go:119] Starting #1 worker of CheckEndpointsTimeToStart controller ... 2025-12-08T17:46:11.471402578+00:00 stderr F I1208 17:46:11.471372 1 base_controller.go:76] Waiting for caches to sync for CheckEndpointsStop 2025-12-08T17:46:11.471402578+00:00 stderr F I1208 17:46:11.471389 1 base_controller.go:82] Caches are synced for CheckEndpointsStop 2025-12-08T17:46:11.471402578+00:00 stderr F I1208 17:46:11.471397 1 base_controller.go:119] Starting #1 worker of CheckEndpointsStop controller ... 2025-12-08T17:46:11.471455810+00:00 stderr F I1208 17:46:11.471428 1 base_controller.go:181] Shutting down CheckEndpointsTimeToStart ... 2025-12-08T17:46:11.472131421+00:00 stderr F I1208 17:46:11.472074 1 base_controller.go:76] Waiting for caches to sync for check-endpoints 2025-12-08T17:46:11.472194113+00:00 stderr F I1208 17:46:11.472140 1 base_controller.go:123] Shutting down worker of CheckEndpointsTimeToStart controller ... 2025-12-08T17:46:11.472204613+00:00 stderr F I1208 17:46:11.472196 1 base_controller.go:113] All CheckEndpointsTimeToStart workers have been terminated 2025-12-08T17:46:11.473782070+00:00 stderr F I1208 17:46:11.473704 1 reflector.go:430] "Caches populated" type="*v1alpha1.PodNetworkConnectivityCheck" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:11.479013667+00:00 stderr F I1208 17:46:11.478920 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:11.573042280+00:00 stderr F I1208 17:46:11.572961 1 base_controller.go:82] Caches are synced for check-endpoints 2025-12-08T17:46:11.573042280+00:00 stderr F I1208 17:46:11.572997 1 base_controller.go:119] Starting #1 worker of check-endpoints controller ... ././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000000016415115611513033047 0ustar zuulzuul2025-12-08T17:46:04.761689252+00:00 stderr F I1208 17:46:04.761515 1 readyz.go:111] Listening on 0.0.0.0:6080 ././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000004527015115611513033056 0ustar zuulzuul2025-12-08T17:46:04.520644306+00:00 stderr F W1208 17:46:04.520533 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:46:04.520844552+00:00 stderr F I1208 17:46:04.520688 1 crypto.go:594] Generating new CA for cert-regeneration-controller-signer@1765215964 cert, and key in /tmp/serving-cert-292974847/serving-signer.crt, /tmp/serving-cert-292974847/serving-signer.key 2025-12-08T17:46:04.520844552+00:00 stderr F Validity period of the certificate for "cert-regeneration-controller-signer@1765215964" is unset, resetting to 43800h0m0s! 2025-12-08T17:46:05.385855977+00:00 stderr F I1208 17:46:05.385784 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:46:05.386528587+00:00 stderr F I1208 17:46:05.386504 1 observer_polling.go:159] Starting file observer 2025-12-08T17:46:05.386593179+00:00 stderr F I1208 17:46:05.386555 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:46:05.386593179+00:00 stderr F I1208 17:46:05.386573 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:46:05.386593179+00:00 stderr F I1208 17:46:05.386579 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:46:05.386593179+00:00 stderr F I1208 17:46:05.386584 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:46:05.386593179+00:00 stderr F I1208 17:46:05.386589 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:46:10.398422252+00:00 stderr F I1208 17:46:10.398343 1 builder.go:304] cert-regeneration-controller version v0.0.0-unknown-c3d9642-c3d9642 2025-12-08T17:46:10.405122654+00:00 stderr F I1208 17:46:10.405070 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:46:10.405742862+00:00 stderr F I1208 17:46:10.405703 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-apiserver/cert-regeneration-controller-lock... 2025-12-08T17:46:10.411867606+00:00 stderr F I1208 17:46:10.411821 1 leaderelection.go:271] successfully acquired lease openshift-kube-apiserver/cert-regeneration-controller-lock 2025-12-08T17:46:10.412286728+00:00 stderr F I1208 17:46:10.412143 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-apiserver", Name:"cert-regeneration-controller-lock", UID:"a52d9256-5b5f-401d-8892-664b6804f54f", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"38873", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' crc_da986ad6-cf8d-4e17-ae5d-44ec1919059e became leader 2025-12-08T17:46:10.413350331+00:00 stderr F I1208 17:46:10.413320 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:46:10.417304439+00:00 stderr F I1208 17:46:10.417246 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.417583037+00:00 stderr F I1208 17:46:10.417533 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.417671540+00:00 stderr F I1208 17:46:10.417599 1 cmd.go:126] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:46:10.417739462+00:00 stderr F I1208 17:46:10.417643 1 event.go:377] Event(v1.ObjectReference{Kind:"Node", Namespace:"openshift-kube-apiserver", Name:"crc", UID:"23216ff3-032e-49af-af7e-1d23d5907b59", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:46:10.417954258+00:00 stderr F I1208 17:46:10.417919 1 certrotationcontroller.go:148] Setting monthPeriod to 720h0m0s, yearPeriod to 8760h0m0s, tenMonthPeriod to 7008h0m0s 2025-12-08T17:46:10.424086922+00:00 stderr F I1208 17:46:10.423914 1 cabundlesyncer.go:82] Starting CA bundle controller 2025-12-08T17:46:10.424086922+00:00 stderr F I1208 17:46:10.423953 1 shared_informer.go:350] "Waiting for caches to sync" controller="CABundleController" 2025-12-08T17:46:10.424317399+00:00 stderr F I1208 17:46:10.424292 1 certrotationcontroller.go:919] Starting CertRotation 2025-12-08T17:46:10.424317399+00:00 stderr F I1208 17:46:10.424307 1 certrotationcontroller.go:884] Waiting for CertRotation 2025-12-08T17:46:10.428415702+00:00 stderr F I1208 17:46:10.428371 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.428641299+00:00 stderr F I1208 17:46:10.428453 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.430199756+00:00 stderr F I1208 17:46:10.430172 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.431564187+00:00 stderr F I1208 17:46:10.430992 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.431629569+00:00 stderr F I1208 17:46:10.431033 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.432225126+00:00 stderr F I1208 17:46:10.432097 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.444160665+00:00 stderr F I1208 17:46:10.444093 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.448907537+00:00 stderr F I1208 17:46:10.448842 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.459162065+00:00 stderr F I1208 17:46:10.459108 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.469764554+00:00 stderr F I1208 17:46:10.469637 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubeapiservers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524278 1 shared_informer.go:357] "Caches are synced" controller="CABundleController" 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524399 1 servicehostname.go:46] syncing servicenetwork hostnames: [10.217.4.1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local openshift openshift.default openshift.default.svc openshift.default.svc.cluster.local] 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524447 1 externalloadbalancer.go:27] syncing external loadbalancer hostnames: api.crc.testing 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524455 1 internalloadbalancer.go:27] syncing internal loadbalancer hostnames: api-int.crc.testing 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524461 1 certrotationcontroller.go:902] Finished waiting for CertRotation 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524506 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524512 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524519 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524560 1 servicehostname.go:46] syncing servicenetwork hostnames: [10.217.4.1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local openshift openshift.default openshift.default.svc openshift.default.svc.cluster.local] 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524599 1 externalloadbalancer.go:27] syncing external loadbalancer hostnames: api.crc.testing 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524615 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524621 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524645491+00:00 stderr F I1208 17:46:10.524626 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524641 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524647 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524651 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524665 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524670 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524674 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524694 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524701 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524713253+00:00 stderr F I1208 17:46:10.524705 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524753414+00:00 stderr F I1208 17:46:10.524726 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524753414+00:00 stderr F I1208 17:46:10.524737 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524753414+00:00 stderr F I1208 17:46:10.524742 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524765364+00:00 stderr F I1208 17:46:10.524759 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524775084+00:00 stderr F I1208 17:46:10.524765 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524775084+00:00 stderr F I1208 17:46:10.524769 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524808825+00:00 stderr F I1208 17:46:10.524788 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524808825+00:00 stderr F I1208 17:46:10.524797 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524808825+00:00 stderr F I1208 17:46:10.524802 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524855987+00:00 stderr F I1208 17:46:10.524828 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524855987+00:00 stderr F I1208 17:46:10.524838 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524855987+00:00 stderr F I1208 17:46:10.524842 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524867787+00:00 stderr F I1208 17:46:10.524857 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524867787+00:00 stderr F I1208 17:46:10.524862 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524894778+00:00 stderr F I1208 17:46:10.524867 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524909638+00:00 stderr F I1208 17:46:10.524901 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524919179+00:00 stderr F I1208 17:46:10.524908 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524919179+00:00 stderr F I1208 17:46:10.524912 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:46:10.524955820+00:00 stderr F I1208 17:46:10.524931 1 base_controller.go:76] Waiting for caches to sync for CertRotationController 2025-12-08T17:46:10.524955820+00:00 stderr F I1208 17:46:10.524940 1 base_controller.go:82] Caches are synced for CertRotationController 2025-12-08T17:46:10.524955820+00:00 stderr F I1208 17:46:10.524945 1 base_controller.go:119] Starting #1 worker of CertRotationController controller ... 2025-12-08T17:56:10.429202886+00:00 stderr F I1208 17:56:10.429137 1 externalloadbalancer.go:27] syncing external loadbalancer hostnames: api.crc.testing 2025-12-08T17:56:10.432588749+00:00 stderr F I1208 17:56:10.432527 1 servicehostname.go:46] syncing servicenetwork hostnames: [10.217.4.1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local openshift openshift.default openshift.default.svc openshift.default.svc.cluster.local] ././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611521033043 5ustar zuulzuul././@LongLink0000644000000000000000000000027400000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000001562215115611513033054 0ustar zuulzuul2025-12-08T17:46:04.224723384+00:00 stderr F I1208 17:46:04.224605 1 base_controller.go:76] Waiting for caches to sync for CertSyncController 2025-12-08T17:46:04.224723384+00:00 stderr F I1208 17:46:04.224701 1 observer_polling.go:159] Starting file observer 2025-12-08T17:46:10.325025859+00:00 stderr F I1208 17:46:10.324938 1 base_controller.go:82] Caches are synced for CertSyncController 2025-12-08T17:46:10.325025859+00:00 stderr F I1208 17:46:10.324986 1 base_controller.go:119] Starting #1 worker of CertSyncController controller ... 2025-12-08T17:46:10.325119652+00:00 stderr F I1208 17:46:10.325066 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true} {control-plane-node-kubeconfig false} {check-endpoints-kubeconfig false}] 2025-12-08T17:46:10.325414270+00:00 stderr F I1208 17:46:10.325365 1 certsync_controller.go:178] Syncing secrets: [{aggregator-client false} {localhost-serving-cert-certkey false} {service-network-serving-certkey false} {external-loadbalancer-serving-certkey false} {internal-loadbalancer-serving-certkey false} {bound-service-account-signing-key false} {control-plane-node-admin-client-cert-key false} {check-endpoints-client-cert-key false} {kubelet-client false} {node-kubeconfigs false} {user-serving-cert true} {user-serving-cert-000 true} {user-serving-cert-001 true} {user-serving-cert-002 true} {user-serving-cert-003 true} {user-serving-cert-004 true} {user-serving-cert-005 true} {user-serving-cert-006 true} {user-serving-cert-007 true} {user-serving-cert-008 true} {user-serving-cert-009 true}] 2025-12-08T17:56:10.241927847+00:00 stderr F I1208 17:56:10.241327 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true} {control-plane-node-kubeconfig false} {check-endpoints-kubeconfig false}] 2025-12-08T17:56:10.241927847+00:00 stderr F I1208 17:56:10.241683 1 certsync_controller.go:178] Syncing secrets: [{aggregator-client false} {localhost-serving-cert-certkey false} {service-network-serving-certkey false} {external-loadbalancer-serving-certkey false} {internal-loadbalancer-serving-certkey false} {bound-service-account-signing-key false} {control-plane-node-admin-client-cert-key false} {check-endpoints-client-cert-key false} {kubelet-client false} {node-kubeconfigs false} {user-serving-cert true} {user-serving-cert-000 true} {user-serving-cert-001 true} {user-serving-cert-002 true} {user-serving-cert-003 true} {user-serving-cert-004 true} {user-serving-cert-005 true} {user-serving-cert-006 true} {user-serving-cert-007 true} {user-serving-cert-008 true} {user-serving-cert-009 true}] 2025-12-08T17:56:10.243973693+00:00 stderr F I1208 17:56:10.242253 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true} {control-plane-node-kubeconfig false} {check-endpoints-kubeconfig false}] 2025-12-08T17:56:10.243973693+00:00 stderr F I1208 17:56:10.242463 1 certsync_controller.go:178] Syncing secrets: [{aggregator-client false} {localhost-serving-cert-certkey false} {service-network-serving-certkey false} {external-loadbalancer-serving-certkey false} {internal-loadbalancer-serving-certkey false} {bound-service-account-signing-key false} {control-plane-node-admin-client-cert-key false} {check-endpoints-client-cert-key false} {kubelet-client false} {node-kubeconfigs false} {user-serving-cert true} {user-serving-cert-000 true} {user-serving-cert-001 true} {user-serving-cert-002 true} {user-serving-cert-003 true} {user-serving-cert-004 true} {user-serving-cert-005 true} {user-serving-cert-006 true} {user-serving-cert-007 true} {user-serving-cert-008 true} {user-serving-cert-009 true}] 2025-12-08T17:56:10.274704227+00:00 stderr F I1208 17:56:10.274600 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true} {control-plane-node-kubeconfig false} {check-endpoints-kubeconfig false}] 2025-12-08T17:56:10.275128718+00:00 stderr F I1208 17:56:10.275066 1 certsync_controller.go:178] Syncing secrets: [{aggregator-client false} {localhost-serving-cert-certkey false} {service-network-serving-certkey false} {external-loadbalancer-serving-certkey false} {internal-loadbalancer-serving-certkey false} {bound-service-account-signing-key false} {control-plane-node-admin-client-cert-key false} {check-endpoints-client-cert-key false} {kubelet-client false} {node-kubeconfigs false} {user-serving-cert true} {user-serving-cert-000 true} {user-serving-cert-001 true} {user-serving-cert-002 true} {user-serving-cert-003 true} {user-serving-cert-004 true} {user-serving-cert-005 true} {user-serving-cert-006 true} {user-serving-cert-007 true} {user-serving-cert-008 true} {user-serving-cert-009 true}] 2025-12-08T17:56:10.276270859+00:00 stderr F I1208 17:56:10.276224 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true} {control-plane-node-kubeconfig false} {check-endpoints-kubeconfig false}] 2025-12-08T17:56:10.276555537+00:00 stderr F I1208 17:56:10.276497 1 certsync_controller.go:178] Syncing secrets: [{aggregator-client false} {localhost-serving-cert-certkey false} {service-network-serving-certkey false} {external-loadbalancer-serving-certkey false} {internal-loadbalancer-serving-certkey false} {bound-service-account-signing-key false} {control-plane-node-admin-client-cert-key false} {check-endpoints-client-cert-key false} {kubelet-client false} {node-kubeconfigs false} {user-serving-cert true} {user-serving-cert-000 true} {user-serving-cert-001 true} {user-serving-cert-002 true} {user-serving-cert-003 true} {user-serving-cert-004 true} {user-serving-cert-005 true} {user-serving-cert-006 true} {user-serving-cert-007 true} {user-serving-cert-008 true} {user-serving-cert-009 true}] 2025-12-08T17:56:10.277346739+00:00 stderr F I1208 17:56:10.277299 1 certsync_controller.go:74] Syncing configmaps: [{aggregator-client-ca false} {client-ca false} {trusted-ca-bundle true} {control-plane-node-kubeconfig false} {check-endpoints-kubeconfig false}] 2025-12-08T17:56:10.277675118+00:00 stderr F I1208 17:56:10.277606 1 certsync_controller.go:178] Syncing secrets: [{aggregator-client false} {localhost-serving-cert-certkey false} {service-network-serving-certkey false} {external-loadbalancer-serving-certkey false} {internal-loadbalancer-serving-certkey false} {bound-service-account-signing-key false} {control-plane-node-admin-client-cert-key false} {check-endpoints-client-cert-key false} {kubelet-client false} {node-kubeconfigs false} {user-serving-cert true} {user-serving-cert-000 true} {user-serving-cert-001 true} {user-serving-cert-002 true} {user-serving-cert-003 true} {user-serving-cert-004 true} {user-serving-cert-005 true} {user-serving-cert-006 true} {user-serving-cert-007 true} {user-serving-cert-008 true} {user-serving-cert-009 true}] ././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node0000755000175000017500000000000015115611514033064 5ustar zuulzuul././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node0000755000175000017500000000000015115611523033064 5ustar zuulzuul././@LongLink0000644000000000000000000000027500000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node0000644000175000017500000000716215115611514033074 0ustar zuulzuul2025-12-08T17:44:02.074585423+00:00 stderr F + [[ -f /env/_master ]] 2025-12-08T17:44:02.074585423+00:00 stderr F + ho_enable=--enable-hybrid-overlay 2025-12-08T17:44:02.075207200+00:00 stderr F ++ date '+%m%d %H:%M:%S.%N' 2025-12-08T17:44:02.079810686+00:00 stderr F + echo 'I1208 17:44:02.079063856 - network-node-identity - start webhook' 2025-12-08T17:44:02.079887948+00:00 stdout F I1208 17:44:02.079063856 - network-node-identity - start webhook 2025-12-08T17:44:02.079931609+00:00 stderr F + exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 --webhook-cert-dir=/etc/webhook-cert --webhook-host=127.0.0.1 --webhook-port=9743 --enable-hybrid-overlay --enable-interconnect --disable-approver --extra-allowed-user=system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane --wait-for-kubernetes-api=200s --pod-admission-conditions=/var/run/ovnkube-identity-config/additional-pod-admission-cond.json --loglevel=2 2025-12-08T17:44:02.237252270+00:00 stderr F I1208 17:44:02.236976 1 ovnkubeidentity.go:133] Config: {kubeconfig: apiServer:https://api-int.crc.testing:6443 logLevel:2 port:9743 host:127.0.0.1 certDir:/etc/webhook-cert metricsAddress:0 leaseNamespace: enableInterconnect:true enableHybridOverlay:true disableWebhook:false disableApprover:true waitForKAPIDuration:200000000000 localKAPIPort:6443 extraAllowedUsers:{slice:[system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane] separator:{sep: disabled:false customized:false} hasBeenSet:true keepSpace:false} csrAcceptanceConditionFile: csrAcceptanceConditions:[] podAdmissionConditionFile:/var/run/ovnkube-identity-config/additional-pod-admission-cond.json podAdmissionConditions:[]} 2025-12-08T17:44:02.237252270+00:00 stderr F W1208 17:44:02.237223 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:02.238477034+00:00 stderr F I1208 17:44:02.238443 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:02.238477034+00:00 stderr F I1208 17:44:02.238463 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:02.238477034+00:00 stderr F I1208 17:44:02.238468 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:02.238477034+00:00 stderr F I1208 17:44:02.238472 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:02.238498565+00:00 stderr F I1208 17:44:02.238476 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:02.247671335+00:00 stderr F I1208 17:44:02.247621 1 ovnkubeidentity.go:352] Waiting for caches to sync 2025-12-08T17:44:02.261269846+00:00 stderr F I1208 17:44:02.261164 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:02.350481888+00:00 stderr F I1208 17:44:02.350431 1 certwatcher.go:211] "Updated current TLS certificate" logger="controller-runtime.certwatcher" 2025-12-08T17:44:02.355529586+00:00 stderr F I1208 17:44:02.350993 1 certwatcher.go:133] "Starting certificate poll+watcher" logger="controller-runtime.certwatcher" interval="10s" 2025-12-08T17:44:02.355529586+00:00 stderr F I1208 17:44:02.351301 1 ovnkubeidentity.go:431] Starting the webhook server 2025-12-08T17:46:35.774270624+00:00 stderr F I1208 17:46:35.774153 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node0000755000175000017500000000000015115611523033064 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node0000644000175000017500000002760515115611514033100 0ustar zuulzuul2025-12-08T17:44:02.197616159+00:00 stderr F + [[ -f /env/_master ]] 2025-12-08T17:44:02.198043110+00:00 stderr F ++ date '+%m%d %H:%M:%S.%N' 2025-12-08T17:44:02.200590560+00:00 stdout F I1208 17:44:02.200153688 - network-node-identity - start approver 2025-12-08T17:44:02.200608851+00:00 stderr F + echo 'I1208 17:44:02.200153688 - network-node-identity - start approver' 2025-12-08T17:44:02.200608851+00:00 stderr F + exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 --disable-webhook --csr-acceptance-conditions=/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json --loglevel=4 2025-12-08T17:44:02.248444515+00:00 stderr F I1208 17:44:02.248321 1 ovnkubeidentity.go:133] Config: {kubeconfig: apiServer:https://api-int.crc.testing:6443 logLevel:4 port:9443 host:localhost certDir: metricsAddress:0 leaseNamespace: enableInterconnect:false enableHybridOverlay:false disableWebhook:true disableApprover:false waitForKAPIDuration:0 localKAPIPort:6443 extraAllowedUsers:{slice:[] separator:{sep: disabled:false customized:false} hasBeenSet:false keepSpace:false} csrAcceptanceConditionFile:/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json csrAcceptanceConditions:[] podAdmissionConditionFile: podAdmissionConditions:[]} 2025-12-08T17:44:02.248444515+00:00 stderr F W1208 17:44:02.248416 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:02.250058570+00:00 stderr F I1208 17:44:02.250027 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:02.250058570+00:00 stderr F I1208 17:44:02.250048 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:02.250058570+00:00 stderr F I1208 17:44:02.250053 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:02.250078970+00:00 stderr F I1208 17:44:02.250057 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:02.250078970+00:00 stderr F I1208 17:44:02.250060 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:02.251778006+00:00 stderr F I1208 17:44:02.251741 1 ovnkubeidentity.go:472] Starting certificate signing request approver 2025-12-08T17:44:02.251921210+00:00 stderr F I1208 17:44:02.251899 1 leaderelection.go:257] attempting to acquire leader lease openshift-network-node-identity/ovnkube-identity... 2025-12-08T17:44:02.264022791+00:00 stderr F I1208 17:44:02.263717 1 leaderelection.go:271] successfully acquired lease openshift-network-node-identity/ovnkube-identity 2025-12-08T17:44:02.264189055+00:00 stderr F I1208 17:44:02.264138 1 recorder.go:104] "crc_0000a3a4-7d0f-458a-8ee6-1ae77da57101 became leader" logger="events" type="Normal" object={"kind":"Lease","namespace":"openshift-network-node-identity","name":"ovnkube-identity","uid":"fee3ec17-3b17-44dd-a09d-51f732ff7195","apiVersion":"coordination.k8s.io/v1","resourceVersion":"36390"} reason="LeaderElection" 2025-12-08T17:44:02.265289145+00:00 stderr F I1208 17:44:02.264469 1 controller.go:246] "Starting EventSource" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" source="kind source: *v1.CertificateSigningRequest" 2025-12-08T17:44:02.272808270+00:00 stderr F I1208 17:44:02.272733 1 reflector.go:357] "Starting reflector" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" resyncPeriod="9h43m49.315681528s" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:02.272808270+00:00 stderr F I1208 17:44:02.272756 1 reflector.go:403] "Listing and watching" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:02.274398183+00:00 stderr F I1208 17:44:02.274366 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:02.373682392+00:00 stderr F I1208 17:44:02.373592 1 controller.go:186] "Starting Controller" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" 2025-12-08T17:44:02.373682392+00:00 stderr F I1208 17:44:02.373634 1 controller.go:195] "Starting workers" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" worker count=1 2025-12-08T17:44:02.374285148+00:00 stderr F I1208 17:44:02.373991 1 approver.go:230] Finished syncing CSR csr-rlqr8 for unknown node in 17.371µs 2025-12-08T17:44:02.374285148+00:00 stderr F I1208 17:44:02.374082 1 approver.go:230] Finished syncing CSR csr-5pdpj for unknown node in 14.29µs 2025-12-08T17:44:02.374285148+00:00 stderr F I1208 17:44:02.374104 1 approver.go:230] Finished syncing CSR csr-852t7 for unknown node in 12.07µs 2025-12-08T17:44:02.374285148+00:00 stderr F I1208 17:44:02.374156 1 approver.go:230] Finished syncing CSR csr-qcbqt for unknown node in 11.55µs 2025-12-08T17:44:08.915004370+00:00 stderr F I1208 17:44:08.914864 1 recorder.go:104] "CSR \"csr-xwrbp\" has been approved" logger="events" type="Normal" object={"kind":"CertificateSigningRequest","name":"csr-xwrbp"} reason="CSRApproved" 2025-12-08T17:44:08.921400024+00:00 stderr F I1208 17:44:08.921324 1 approver.go:230] Finished syncing CSR csr-xwrbp for crc node in 6.888147ms 2025-12-08T17:44:08.922219116+00:00 stderr F I1208 17:44:08.921486 1 approver.go:230] Finished syncing CSR csr-xwrbp for unknown node in 90.082µs 2025-12-08T17:44:08.931257783+00:00 stderr F I1208 17:44:08.931197 1 approver.go:230] Finished syncing CSR csr-xwrbp for unknown node in 96.892µs 2025-12-08T17:44:14.466714824+00:00 stderr F I1208 17:44:14.466647 1 recorder.go:104] "CSR \"csr-5xvnm\" has been approved" logger="events" type="Normal" object={"kind":"CertificateSigningRequest","name":"csr-5xvnm"} reason="CSRApproved" 2025-12-08T17:44:14.470603789+00:00 stderr F I1208 17:44:14.470540 1 approver.go:230] Finished syncing CSR csr-5xvnm for crc node in 4.276047ms 2025-12-08T17:44:14.470711842+00:00 stderr F I1208 17:44:14.470670 1 approver.go:230] Finished syncing CSR csr-5xvnm for unknown node in 63.252µs 2025-12-08T17:44:14.476464439+00:00 stderr F I1208 17:44:14.476366 1 approver.go:230] Finished syncing CSR csr-5xvnm for unknown node in 69.422µs 2025-12-08T17:45:52.348123020+00:00 stderr F I1208 17:45:52.348076 1 reflector.go:946] "Watch close" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" totalItems=11 2025-12-08T17:45:52.348928744+00:00 stderr F I1208 17:45:52.348904 1 reflector.go:518] "Watch failed - backing off" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" err="Get \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests?allowWatchBookmarks=true&resourceVersion=38583&timeoutSeconds=325&watch=true\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:45:53.161904246+00:00 stderr F I1208 17:45:53.161788 1 reflector.go:518] "Watch failed - backing off" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" err="Get \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests?allowWatchBookmarks=true&resourceVersion=38583&timeoutSeconds=527&watch=true\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:45:56.343563436+00:00 stderr F I1208 17:45:56.343466 1 reflector.go:518] "Watch failed - backing off" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" err="Get \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests?allowWatchBookmarks=true&resourceVersion=38583&timeoutSeconds=416&watch=true\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:00.501643145+00:00 stderr F I1208 17:46:00.501574 1 reflector.go:518] "Watch failed - backing off" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" err="Get \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests?allowWatchBookmarks=true&resourceVersion=38583&timeoutSeconds=339&watch=true\": dial tcp 38.102.83.243:6443: connect: connection refused" 2025-12-08T17:46:02.315818358+00:00 stderr F E1208 17:46:02.315665 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-network-node-identity/leases/ovnkube-identity?timeout=15s": dial tcp 38.102.83.243:6443: connect: connection refused, falling back to slow path 2025-12-08T17:46:02.316603602+00:00 stderr F E1208 17:46:02.316541 1 leaderelection.go:436] error retrieving resource lock openshift-network-node-identity/ovnkube-identity: Get "https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/openshift-network-node-identity/leases/ovnkube-identity?timeout=15s": dial tcp 38.102.83.243:6443: connect: connection refused 2025-12-08T17:46:07.019959905+00:00 stderr F I1208 17:46:07.019829 1 reflector.go:543] "Watch closed" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" err="too old resource version: 38583 (38840)" 2025-12-08T17:46:22.990429338+00:00 stderr F I1208 17:46:22.990310 1 reflector.go:403] "Listing and watching" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:46:22.993157040+00:00 stderr F I1208 17:46:22.993073 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:46:22.993505920+00:00 stderr F I1208 17:46:22.993426 1 approver.go:230] Finished syncing CSR csr-5pdpj for unknown node in 152.814µs 2025-12-08T17:46:22.993554322+00:00 stderr F I1208 17:46:22.993533 1 approver.go:230] Finished syncing CSR csr-5xvnm for unknown node in 34.071µs 2025-12-08T17:46:22.993637424+00:00 stderr F I1208 17:46:22.993594 1 approver.go:230] Finished syncing CSR csr-852t7 for unknown node in 37.361µs 2025-12-08T17:46:22.993841100+00:00 stderr F I1208 17:46:22.993800 1 approver.go:230] Finished syncing CSR csr-qcbqt for unknown node in 28.021µs 2025-12-08T17:46:22.994023525+00:00 stderr F I1208 17:46:22.993982 1 approver.go:230] Finished syncing CSR csr-rlqr8 for unknown node in 69.672µs 2025-12-08T17:46:22.994156079+00:00 stderr F I1208 17:46:22.994117 1 approver.go:230] Finished syncing CSR csr-xwrbp for unknown node in 24.761µs 2025-12-08T17:53:48.994866013+00:00 stderr F I1208 17:53:48.994749 1 reflector.go:946] "Watch close" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" totalItems=9 2025-12-08T18:02:20.997489223+00:00 stderr F I1208 18:02:20.997420 1 reflector.go:946] "Watch close" logger="controller-runtime.cache" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" type="*v1.CertificateSigningRequest" totalItems=10 ././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-o0000755000175000017500000000000015115611513032724 5ustar zuulzuul././@LongLink0000644000000000000000000000031300000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-o0000755000175000017500000000000015115611521032723 5ustar zuulzuul././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-o0000644000175000017500000013735515115611513032744 0ustar zuulzuul2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.489858 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.491054 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.493485 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.493497 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.493502 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.493506 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:20.493966708+00:00 stderr F I1208 17:44:20.493620 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:20.725778532+00:00 stderr F I1208 17:44:20.725088 1 builder.go:304] service-ca-operator version - 2025-12-08T17:44:20.732966377+00:00 stderr F I1208 17:44:20.732921 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:21.643950367+00:00 stderr F I1208 17:44:21.640116 1 requestheader_controller.go:255] Loaded a new request header values for RequestHeaderAuthRequestController 2025-12-08T17:44:21.668916978+00:00 stderr F I1208 17:44:21.668830 1 maxinflight.go:139] "Initialized nonMutatingChan" len=400 2025-12-08T17:44:21.668916978+00:00 stderr F I1208 17:44:21.668864 1 maxinflight.go:145] "Initialized mutatingChan" len=200 2025-12-08T17:44:21.668916978+00:00 stderr F I1208 17:44:21.668906 1 maxinflight.go:116] "Set denominator for readonly requests" limit=400 2025-12-08T17:44:21.668953859+00:00 stderr F I1208 17:44:21.668913 1 maxinflight.go:120] "Set denominator for mutating requests" limit=200 2025-12-08T17:44:21.696052897+00:00 stderr F I1208 17:44:21.695966 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:21.696052897+00:00 stderr F W1208 17:44:21.695993 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:21.696052897+00:00 stderr F W1208 17:44:21.695997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:21.696052897+00:00 stderr F W1208 17:44:21.696001 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:21.696052897+00:00 stderr F W1208 17:44:21.696005 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:21.696052897+00:00 stderr F W1208 17:44:21.696007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:21.696052897+00:00 stderr F W1208 17:44:21.696010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:21.696200291+00:00 stderr F I1208 17:44:21.696125 1 genericapiserver.go:535] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete 2025-12-08T17:44:21.701776094+00:00 stderr F I1208 17:44:21.701502 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-service-ca-operator.svc\" [serving] validServingFor=[metrics.openshift-service-ca-operator.svc,metrics.openshift-service-ca-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:21.701467735 +0000 UTC))" 2025-12-08T17:44:21.704310313+00:00 stderr F I1208 17:44:21.702983 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:21.704310313+00:00 stderr F I1208 17:44:21.703313 1 leaderelection.go:257] attempting to acquire leader lease openshift-service-ca-operator/service-ca-operator-lock... 2025-12-08T17:44:21.704310313+00:00 stderr F I1208 17:44:21.703660 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:21.704310313+00:00 stderr F I1208 17:44:21.704026 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:44:21.705851835+00:00 stderr F I1208 17:44:21.705803 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:21.705851835+00:00 stderr F I1208 17:44:21.705823 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:44:21.706090882+00:00 stderr F I1208 17:44:21.706047 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709375 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709586 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709760 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2026-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:44:21.701730682 +0000 UTC))" 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709801 1 secure_serving.go:213] Serving securely on [::]:8443 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709823 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709827 1 genericapiserver.go:685] [graceful-termination] waiting for shutdown to be initiated 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709845 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:21.710482751+00:00 stderr F I1208 17:44:21.709868 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController 2025-12-08T17:44:21.712642870+00:00 stderr F I1208 17:44:21.712597 1 leaderelection.go:271] successfully acquired lease openshift-service-ca-operator/service-ca-operator-lock 2025-12-08T17:44:21.713103412+00:00 stderr F I1208 17:44:21.713065 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.716400432+00:00 stderr F I1208 17:44:21.715743 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-service-ca-operator", Name:"service-ca-operator-lock", UID:"180a5908-6ee2-4a7b-84b9-a1c6b03b4038", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37196", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' service-ca-operator-5b9c976747-cdz4v_c57fc46e-3317-4356-aac2-cb3ec4d3d99a became leader 2025-12-08T17:44:21.723153197+00:00 stderr F I1208 17:44:21.723108 1 starter.go:111] Fetching FeatureGates 2025-12-08T17:44:21.724109563+00:00 stderr F I1208 17:44:21.724024 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:21.741062885+00:00 stderr F I1208 17:44:21.738511 1 reflector.go:376] Caches populated for *v1.ClusterVersion from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.755936582+00:00 stderr F I1208 17:44:21.754244 1 reflector.go:376] Caches populated for *v1.FeatureGate from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.755936582+00:00 stderr F I1208 17:44:21.755002 1 starter.go:160] Setting signing certificate lifetime to 18960h0m0s, minimum trust duration to 9480h0m0s 2025-12-08T17:44:21.760901426+00:00 stderr F I1208 17:44:21.756969 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-service-ca-operator", Name:"service-ca-operator", UID:"1703c560-9cd5-4273-a6b7-22510bce9318", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:21.760901426+00:00 stderr F I1208 17:44:21.758177 1 base_controller.go:76] Waiting for caches to sync for resource-sync 2025-12-08T17:44:21.760901426+00:00 stderr F I1208 17:44:21.758887 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_service-ca 2025-12-08T17:44:21.760901426+00:00 stderr F I1208 17:44:21.759427 1 reflector.go:376] Caches populated for *v1.ServiceAccount from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.763457916+00:00 stderr F I1208 17:44:21.761441 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:21.763501867+00:00 stderr F I1208 17:44:21.763484 1 base_controller.go:76] Waiting for caches to sync for ServiceCAOperator 2025-12-08T17:44:21.766009006+00:00 stderr F I1208 17:44:21.763824 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.766009006+00:00 stderr F I1208 17:44:21.764310 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.766009006+00:00 stderr F I1208 17:44:21.764556 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.766009006+00:00 stderr F I1208 17:44:21.764587 1 reflector.go:376] Caches populated for *v1.Deployment from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.766009006+00:00 stderr F I1208 17:44:21.765394 1 reflector.go:376] Caches populated for *v1.Infrastructure from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.769895152+00:00 stderr F I1208 17:44:21.766599 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.769895152+00:00 stderr F I1208 17:44:21.767892 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.809519 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.810442 1 reflector.go:376] Caches populated for *v1.ClusterOperator from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.810968 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.810991 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811372 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:21.811335352 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811396 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:21.811383263 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811416 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:21.811405514 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811439 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:21.811421324 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811457 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:21.811444755 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811478 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:21.811463845 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811494 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:21.811483176 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811714 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-service-ca-operator.svc\" [serving] validServingFor=[metrics.openshift-service-ca-operator.svc,metrics.openshift-service-ca-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:21.811697882 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.811947 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2026-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:44:21.811930279 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812304 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:21.812285288 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812326 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:21.812314009 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812342 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:21.81233148 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812357 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:21.8123471 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812375 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:21.812363811 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812400 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:21.812380821 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr F I1208 17:44:21.812417 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:21.812405302 +0000 UTC))" 2025-12-08T17:44:21.815047754+00:00 stderr P I1208 17:44:21.812434 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC 2025-12-08T17:44:21.815338782+00:00 stderr F to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:21.812422942 +0000 UTC))" 2025-12-08T17:44:21.815338782+00:00 stderr F I1208 17:44:21.812612 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-service-ca-operator.svc\" [serving] validServingFor=[metrics.openshift-service-ca-operator.svc,metrics.openshift-service-ca-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:21.812597037 +0000 UTC))" 2025-12-08T17:44:21.815338782+00:00 stderr F I1208 17:44:21.812783 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2026-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:44:21.812768531 +0000 UTC))" 2025-12-08T17:44:21.815338782+00:00 stderr F I1208 17:44:21.813739 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.815456325+00:00 stderr F I1208 17:44:21.815434 1 reflector.go:376] Caches populated for *v1.Namespace from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.816017630+00:00 stderr F I1208 17:44:21.815978 1 shared_informer.go:320] Caches are synced for RequestHeaderAuthRequestController 2025-12-08T17:44:21.860559145+00:00 stderr F I1208 17:44:21.824568 1 reflector.go:376] Caches populated for *v1.ServiceCA from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.861132310+00:00 stderr F I1208 17:44:21.860668 1 base_controller.go:82] Caches are synced for StatusSyncer_service-ca 2025-12-08T17:44:21.861191652+00:00 stderr F I1208 17:44:21.861172 1 base_controller.go:119] Starting #1 worker of StatusSyncer_service-ca controller ... 2025-12-08T17:44:21.864459871+00:00 stderr F I1208 17:44:21.864421 1 base_controller.go:82] Caches are synced for ServiceCAOperator 2025-12-08T17:44:21.864459871+00:00 stderr F I1208 17:44:21.864436 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:21.864459871+00:00 stderr F I1208 17:44:21.864451 1 base_controller.go:119] Starting #1 worker of ServiceCAOperator controller ... 2025-12-08T17:44:21.864477941+00:00 stderr F I1208 17:44:21.864462 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:21.876140139+00:00 stderr F I1208 17:44:21.872660 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:21.975605683+00:00 stderr F I1208 17:44:21.961150 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:22.164714242+00:00 stderr F I1208 17:44:22.162785 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:44:22.260189976+00:00 stderr F I1208 17:44:22.260124 1 base_controller.go:82] Caches are synced for resource-sync 2025-12-08T17:44:22.260189976+00:00 stderr F I1208 17:44:22.260182 1 base_controller.go:119] Starting #1 worker of resource-sync controller ... 2025-12-08T17:44:23.830391946+00:00 stderr F I1208 17:44:23.816004 1 status_controller.go:229] clusteroperator/service-ca diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:52:03Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:23Z","message":"Progressing: All service-ca-operator deployments updated","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:52:05Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:05Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:23.858091581+00:00 stderr F I1208 17:44:23.845303 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-service-ca-operator", Name:"service-ca-operator", UID:"1703c560-9cd5-4273-a6b7-22510bce9318", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/service-ca changed: Progressing changed from True to False ("Progressing: All service-ca-operator deployments updated") 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.606079 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.606014324 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607133 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.607116054 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607150 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.607140204 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607171 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.607156745 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607190 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.607178205 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607212 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.607196006 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607231 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.607219146 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607249 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.607237707 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607270 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.607257847 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607295 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.607279278 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607620 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-service-ca-operator.svc\" [serving] validServingFor=[metrics.openshift-service-ca-operator.svc,metrics.openshift-service-ca-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:44:30.607605497 +0000 UTC))" 2025-12-08T17:44:30.612866290+00:00 stderr F I1208 17:44:30.607791 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2026-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:44:30.607777461 +0000 UTC))" 2025-12-08T17:45:16.041764290+00:00 stderr F I1208 17:45:16.041579 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.041554724 +0000 UTC))" 2025-12-08T17:45:16.041764290+00:00 stderr F I1208 17:45:16.041733 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.041722509 +0000 UTC))" 2025-12-08T17:45:16.041764290+00:00 stderr F I1208 17:45:16.041748 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.041738859 +0000 UTC))" 2025-12-08T17:45:16.041764290+00:00 stderr F I1208 17:45:16.041758 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.04175171 +0000 UTC))" 2025-12-08T17:45:16.041816322+00:00 stderr F I1208 17:45:16.041769 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.04176202 +0000 UTC))" 2025-12-08T17:45:16.041816322+00:00 stderr F I1208 17:45:16.041780 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.04177338 +0000 UTC))" 2025-12-08T17:45:16.041816322+00:00 stderr F I1208 17:45:16.041794 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.041784241 +0000 UTC))" 2025-12-08T17:45:16.041816322+00:00 stderr F I1208 17:45:16.041809 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041799241 +0000 UTC))" 2025-12-08T17:45:16.041846112+00:00 stderr F I1208 17:45:16.041826 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.041814162 +0000 UTC))" 2025-12-08T17:45:16.041855123+00:00 stderr F I1208 17:45:16.041845 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.041832902 +0000 UTC))" 2025-12-08T17:45:16.041927625+00:00 stderr F I1208 17:45:16.041863 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.041851003 +0000 UTC))" 2025-12-08T17:45:16.042089799+00:00 stderr F I1208 17:45:16.042053 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-service-ca-operator.svc\" [serving] validServingFor=[metrics.openshift-service-ca-operator.svc,metrics.openshift-service-ca-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:24 +0000 UTC to 2027-11-02 07:52:25 +0000 UTC (now=2025-12-08 17:45:16.042033418 +0000 UTC))" 2025-12-08T17:45:16.042213163+00:00 stderr F I1208 17:45:16.042194 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215861\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215860\" (2025-12-08 16:44:20 +0000 UTC to 2026-12-08 16:44:20 +0000 UTC (now=2025-12-08 17:45:16.042176232 +0000 UTC))" 2025-12-08T17:46:21.727597133+00:00 stderr F E1208 17:46:21.726849 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-service-ca-operator/leases/service-ca-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:21.728582443+00:00 stderr F E1208 17:46:21.728512 1 leaderelection.go:436] error retrieving resource lock openshift-service-ca-operator/service-ca-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-service-ca-operator/leases/service-ca-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:22.263283402+00:00 stderr F E1208 17:46:22.263213 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.271153448+00:00 stderr F E1208 17:46:22.271100 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.290994983+00:00 stderr F E1208 17:46:22.290773 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.314846220+00:00 stderr F E1208 17:46:22.314766 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.358696186+00:00 stderr F E1208 17:46:22.358608 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.441391428+00:00 stderr F E1208 17:46:22.441330 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.604500414+00:00 stderr F E1208 17:46:22.604394 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:22.928362285+00:00 stderr F E1208 17:46:22.928276 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.571699706+00:00 stderr F E1208 17:46:23.571378 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.855094497+00:00 stderr F E1208 17:46:24.854739 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.418241562+00:00 stderr F E1208 17:46:27.418174 1 base_controller.go:279] "Unhandled Error" err="resource-sync reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/servicecas/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:56.980746723+00:00 stderr F I1208 17:46:56.979659 1 reflector.go:376] Caches populated for *v1.Namespace from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:00.186663722+00:00 stderr F I1208 17:47:00.185948 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:02.665639758+00:00 stderr F I1208 17:47:02.665559 1 reflector.go:376] Caches populated for *v1.FeatureGate from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:02.763417366+00:00 stderr F I1208 17:47:02.763338 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:04.938065811+00:00 stderr F I1208 17:47:04.937293 1 reflector.go:376] Caches populated for *v1.ServiceAccount from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:05.039346709+00:00 stderr F I1208 17:47:05.039283 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:08.017164678+00:00 stderr F I1208 17:47:08.017070 1 reflector.go:376] Caches populated for *v1.ClusterOperator from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:14.442987278+00:00 stderr F I1208 17:47:14.442370 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:21.880285568+00:00 stderr F I1208 17:47:21.876203 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:33.546796959+00:00 stderr F I1208 17:47:33.545816 1 reflector.go:376] Caches populated for *v1.Infrastructure from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:37.984763721+00:00 stderr F I1208 17:47:37.984090 1 reflector.go:376] Caches populated for *v1.Deployment from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:39.979282516+00:00 stderr F I1208 17:47:39.978509 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:43.782150527+00:00 stderr F I1208 17:47:43.782102 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:47.081916620+00:00 stderr F I1208 17:47:47.081322 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:50.071829460+00:00 stderr F I1208 17:47:50.071741 1 reflector.go:376] Caches populated for *v1.ClusterVersion from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:50.307450277+00:00 stderr F I1208 17:47:50.307039 1 reflector.go:376] Caches populated for *v1.ServiceCA from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:53.222182693+00:00 stderr F I1208 17:47:53.221754 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:54.801906779+00:00 stderr F I1208 17:47:54.801755 1 reflector.go:376] Caches populated for *v1.Secret from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:47:55.773959118+00:00 stderr F I1208 17:47:55.773812 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:48:01.877805783+00:00 stderr F I1208 17:48:01.877031 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 2025-12-08T17:48:03.646773364+00:00 stderr F I1208 17:48:03.645859 1 reflector.go:376] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.32.2/tools/cache/reflector.go:251 ././@LongLink0000644000000000000000000000030300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611513033145 5ustar zuulzuul././@LongLink0000644000000000000000000000035100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611521033144 5ustar zuulzuul././@LongLink0000644000000000000000000000035600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000644000175000017500000000204515115611513033150 0ustar zuulzuul2025-12-08T17:55:34.433937164+00:00 stdout F ts=2025-12-08T17:55:34.433674967Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:55:34.439493277+00:00 stdout F ts=2025-12-08T17:55:34.439421905Z level=warn caller=/workspace/pkg/server/server.go:158 msg="server TLS client verification disabled" client_ca_file=/etc/tls/private/tls-ca.crt err="stat /etc/tls/private/tls-ca.crt: no such file or directory" 2025-12-08T17:55:34.509587210+00:00 stdout F ts=2025-12-08T17:55:34.509436666Z level=info caller=/workspace/pkg/server/server.go:295 msg="starting secure server" address=[::]:8443 http2=false 2025-12-08T17:55:34.509714334+00:00 stderr F I1208 17:55:34.509588 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:55:34.510529276+00:00 stderr F I1208 17:55:34.510500 1 dynamic_serving_content.go:135] "Starting controller" name="servingCert::/tmp/k8s-webhook-server/serving-certs/tls.crt::/tmp/k8s-webhook-server/serving-certs/tls.key" ././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611514032777 5ustar zuulzuul././@LongLink0000644000000000000000000000033200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611521032775 5ustar zuulzuul././@LongLink0000644000000000000000000000033700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000012336715115611514033015 0ustar zuulzuul2025-12-08T17:44:23.640353102+00:00 stderr F I1208 17:44:23.639486 1 start.go:61] Version: 89b561f0 (f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:44:23.642382537+00:00 stderr F I1208 17:44:23.640855 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:23.642382537+00:00 stderr F I1208 17:44:23.640978 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.642382537+00:00 stderr F I1208 17:44:23.640987 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:23.642382537+00:00 stderr F I1208 17:44:23.640993 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.642382537+00:00 stderr F I1208 17:44:23.640997 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.642382537+00:00 stderr F I1208 17:44:23.641001 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.828158174+00:00 stderr F I1208 17:44:23.826601 1 leaderelection.go:257] attempting to acquire leader lease openshift-machine-config-operator/machine-config-controller... 2025-12-08T17:44:23.879575538+00:00 stderr F I1208 17:44:23.879212 1 leaderelection.go:271] successfully acquired lease openshift-machine-config-operator/machine-config-controller 2025-12-08T17:44:23.931344939+00:00 stderr F I1208 17:44:23.931286 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:23.935078541+00:00 stderr F I1208 17:44:23.931523 1 metrics.go:92] Registering Prometheus metrics 2025-12-08T17:44:23.935078541+00:00 stderr F I1208 17:44:23.931602 1 metrics.go:99] Starting metrics listener on 127.0.0.1:8797 2025-12-08T17:44:24.001137603+00:00 stderr F I1208 17:44:23.998905 1 certrotation_controller.go:173] MCS CA/TLS cert rotator not added 2025-12-08T17:44:24.019973487+00:00 stderr F I1208 17:44:24.018030 1 reflector.go:430] "Caches populated" type="*v1.ContainerRuntimeConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.046047208+00:00 stderr F I1208 17:44:24.042079 1 reflector.go:430] "Caches populated" type="*v1.MachineOSBuild" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.052112563+00:00 stderr F I1208 17:44:24.052009 1 reflector.go:430] "Caches populated" type="*v1.MachineConfigPool" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.052896385+00:00 stderr F I1208 17:44:24.052650 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.053900022+00:00 stderr F I1208 17:44:24.053016 1 reflector.go:430] "Caches populated" type="*v1.ImageDigestMirrorSet" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.053900022+00:00 stderr F I1208 17:44:24.053398 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.053900022+00:00 stderr F I1208 17:44:24.053572 1 reflector.go:430] "Caches populated" type="*v1.ImageTagMirrorSet" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.053900022+00:00 stderr F I1208 17:44:24.053760 1 reflector.go:430] "Caches populated" type="*v1alpha1.ImageContentSourcePolicy" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.054894189+00:00 stderr F E1208 17:44:24.053982 1 template_controller.go:245] "Unhandled Error" err="couldn't get ControllerConfig on dependency callback &%!w(errors.StatusError=errors.StatusError{ErrStatus:v1.Status{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ListMeta:v1.ListMeta{SelfLink:\"\", ResourceVersion:\"\", Continue:\"\", RemainingItemCount:(*int64)(nil)}, Status:\"Failure\", Message:\"controllerconfig.machineconfiguration.openshift.io \\\"machine-config-controller\\\" not found\", Reason:\"NotFound\", Details:(*v1.StatusDetails)(0xc000c2ade0), Code:404}})" 2025-12-08T17:44:24.054894189+00:00 stderr F I1208 17:44:24.054308 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.054894189+00:00 stderr F I1208 17:44:24.054586 1 reflector.go:430] "Caches populated" type="*v1.Scheduler" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.054894189+00:00 stderr F E1208 17:44:24.054697 1 node_controller.go:605] getting scheduler config failed: cluster scheduler couldn't be found 2025-12-08T17:44:24.054973021+00:00 stderr F I1208 17:44:24.054934 1 reflector.go:430] "Caches populated" type="*v1.MachineOSConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.055599358+00:00 stderr F I1208 17:44:24.055051 1 template_controller.go:146] Re-syncing ControllerConfig due to secret pull-secret change 2025-12-08T17:44:24.056247277+00:00 stderr F I1208 17:44:24.056057 1 reflector.go:430] "Caches populated" type="*v1.KubeletConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.059941907+00:00 stderr F I1208 17:44:24.057691 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.059941907+00:00 stderr F I1208 17:44:24.058365 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.072254282+00:00 stderr F I1208 17:44:24.071942 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.072254282+00:00 stderr F I1208 17:44:24.072163 1 reflector.go:430] "Caches populated" type="*v1.ControllerConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.110601389+00:00 stderr F I1208 17:44:24.109511 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.110601389+00:00 stderr F I1208 17:44:24.109590 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.129355601+00:00 stderr F I1208 17:44:24.124377 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.139255100+00:00 stderr F I1208 17:44:24.138840 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:44:24.143267540+00:00 stderr F I1208 17:44:24.142137 1 featuregates.go:112] FeatureGates initialized: enabled=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks], disabled=[AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:24.143267540+00:00 stderr F I1208 17:44:24.142741 1 event.go:377] Event(v1.ObjectReference{Kind:"Node", Namespace:"openshift-machine-config-operator", Name:"crc", UID:"23216ff3-032e-49af-af7e-1d23d5907b59", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:24.144017200+00:00 stderr F I1208 17:44:24.143695 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.144017200+00:00 stderr F I1208 17:44:24.143991 1 reflector.go:430] "Caches populated" type="*v1.MachineConfiguration" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.148705 1 reflector.go:430] "Caches populated" type="*v1.PinnedImageSet" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.149409 1 certrotation_controller.go:192] Starting machineconfigcontroller-certrotationcontroller 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.149417 1 certrotation_controller.go:180] Waiting for machineconfigcontroller-certrotationcontroller 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.149426 1 certrotation_controller.go:188] Finished waiting for machineconfigcontroller-certrotationcontroller 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.149430 1 certrotation_controller.go:198] No cert rotators needed, shutting down 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.149434 1 certrotation_controller.go:199] Shutting down machineconfigcontroller-certrotationcontroller 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.149584 1 container_runtime_config_controller.go:234] addded image policy observers with sigstore featuregate enabled 2025-12-08T17:44:24.150738693+00:00 stderr F I1208 17:44:24.150617 1 drain_controller.go:178] Starting MachineConfigController-DrainController 2025-12-08T17:44:24.152894582+00:00 stderr F I1208 17:44:24.151154 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.152894582+00:00 stderr F I1208 17:44:24.151331 1 kubelet_config_controller.go:221] Re-syncing all kubelet config controller generated MachineConfigs due to apiServer cluster change 2025-12-08T17:44:24.152894582+00:00 stderr F I1208 17:44:24.151672 1 template_controller.go:198] Re-syncing ControllerConfig due to apiServer cluster change 2025-12-08T17:44:24.152894582+00:00 stderr F I1208 17:44:24.152362 1 reflector.go:430] "Caches populated" type="*v1.ClusterImagePolicy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.173303110+00:00 stderr F I1208 17:44:24.173248 1 reflector.go:430] "Caches populated" type="*v1.MachineConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.228362261+00:00 stderr F I1208 17:44:24.227922 1 reflector.go:430] "Caches populated" type="*v1.ImagePolicy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:44:24.246896477+00:00 stderr F I1208 17:44:24.246848 1 pinned_image_set.go:115] Starting MachineConfigController-PinnedImageSetController 2025-12-08T17:44:24.253180928+00:00 stderr F I1208 17:44:24.252103 1 node_controller.go:264] Starting MachineConfigController-NodeController 2025-12-08T17:44:24.253180928+00:00 stderr F I1208 17:44:24.252135 1 template_controller.go:294] Starting MachineConfigController-TemplateController 2025-12-08T17:44:24.258288727+00:00 stderr F I1208 17:44:24.254915 1 kubelet_config_controller.go:200] Starting MachineConfigController-KubeletConfigController 2025-12-08T17:44:24.258288727+00:00 stderr F I1208 17:44:24.255074 1 container_runtime_config_controller.go:244] Starting MachineConfigController-ContainerRuntimeConfigController 2025-12-08T17:44:24.258288727+00:00 stderr F I1208 17:44:24.255239 1 render_controller.go:155] Starting MachineConfigController-RenderController 2025-12-08T17:44:24.717736320+00:00 stderr F I1208 17:44:24.717509 1 kubelet_config_features.go:125] Applied FeatureSet cluster on MachineConfigPool master 2025-12-08T17:44:24.829980911+00:00 stderr F I1208 17:44:24.829926 1 kubelet_config_nodes.go:162] Applied Node configuration 97-master-generated-kubelet on MachineConfigPool master 2025-12-08T17:44:25.078941672+00:00 stderr F I1208 17:44:25.077828 1 kubelet_config_features.go:125] Applied FeatureSet cluster on MachineConfigPool worker 2025-12-08T17:44:25.492915275+00:00 stderr F I1208 17:44:25.486683 1 kubelet_config_nodes.go:162] Applied Node configuration 97-worker-generated-kubelet on MachineConfigPool worker 2025-12-08T17:44:26.474031396+00:00 stderr F I1208 17:44:26.473241 1 kubelet_config_features.go:125] Applied FeatureSet cluster on MachineConfigPool worker 2025-12-08T17:44:27.059264820+00:00 stderr F I1208 17:44:27.059212 1 kubelet_config_features.go:125] Applied FeatureSet cluster on MachineConfigPool master 2025-12-08T17:44:29.059020347+00:00 stderr F I1208 17:44:29.058970 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" 2025-12-08T17:44:29.172189633+00:00 stderr F W1208 17:44:29.172110 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-d582710c680b4cd4536e11249c7e09e9 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:44:29.348638486+00:00 stderr F E1208 17:44:29.348206 1 render_controller.go:475] Error syncing Generated MCFG: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:44:29.360479379+00:00 stderr F E1208 17:44:29.360266 1 render_controller.go:497] Error updating MachineConfigPool master: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:44:29.360479379+00:00 stderr F I1208 17:44:29.360288 1 render_controller.go:408] Error syncing machineconfigpool master: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:44:29.460261811+00:00 stderr F W1208 17:44:29.459472 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-d582710c680b4cd4536e11249c7e09e9 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:44:34.088577177+00:00 stderr F I1208 17:44:34.088522 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" 2025-12-08T17:45:13.078656111+00:00 stderr F I1208 17:45:13.078076 1 kubelet_config_controller.go:221] Re-syncing all kubelet config controller generated MachineConfigs due to apiServer cluster change 2025-12-08T17:45:13.079556936+00:00 stderr F I1208 17:45:13.078120 1 template_controller.go:198] Re-syncing ControllerConfig due to apiServer cluster change 2025-12-08T17:45:13.247939311+00:00 stderr F I1208 17:45:13.247862 1 kubelet_config_nodes.go:162] Applied Node configuration 97-master-generated-kubelet on MachineConfigPool master 2025-12-08T17:45:13.362921412+00:00 stderr F I1208 17:45:13.362851 1 kubelet_config_nodes.go:162] Applied Node configuration 97-worker-generated-kubelet on MachineConfigPool worker 2025-12-08T17:45:13.487668322+00:00 stderr F I1208 17:45:13.487566 1 kubelet_config_features.go:125] Applied FeatureSet cluster on MachineConfigPool master 2025-12-08T17:45:14.088592553+00:00 stderr F I1208 17:45:14.088528 1 kubelet_config_features.go:125] Applied FeatureSet cluster on MachineConfigPool worker 2025-12-08T17:46:23.892042651+00:00 stderr F E1208 17:46:23.891000 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-config-operator/leases/machine-config-controller": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:23.893397271+00:00 stderr F E1208 17:46:23.893319 1 leaderelection.go:436] error retrieving resource lock openshift-machine-config-operator/machine-config-controller: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-machine-config-operator/leases/machine-config-controller": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:47:04.135319031+00:00 stderr F I1208 17:47:04.135221 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:09.651659852+00:00 stderr F I1208 17:47:09.651308 1 reflector.go:430] "Caches populated" type="*v1.MachineConfigPool" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:10.352927987+00:00 stderr F I1208 17:47:10.352582 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:14.448228133+00:00 stderr F I1208 17:47:14.448151 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:14.678679598+00:00 stderr F I1208 17:47:14.678613 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" 2025-12-08T17:47:14.736585271+00:00 stderr F W1208 17:47:14.736519 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-d582710c680b4cd4536e11249c7e09e9 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:47:15.498073742+00:00 stderr F I1208 17:47:15.497998 1 reflector.go:430] "Caches populated" type="*v1.MachineConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:16.000781086+00:00 stderr F I1208 17:47:16.000455 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:17.855861282+00:00 stderr F I1208 17:47:17.855809 1 reflector.go:430] "Caches populated" type="*v1.ImageTagMirrorSet" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:17.929228532+00:00 stderr F I1208 17:47:17.929124 1 reflector.go:430] "Caches populated" type="*v1.ContainerRuntimeConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:20.182534585+00:00 stderr F I1208 17:47:20.182135 1 reflector.go:430] "Caches populated" type="*v1alpha1.ImageContentSourcePolicy" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:47:20.304802953+00:00 stderr F I1208 17:47:20.304712 1 reflector.go:430] "Caches populated" type="*v1.Scheduler" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:20.566214663+00:00 stderr F W1208 17:47:20.565866 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:47:20.652061844+00:00 stderr F I1208 17:47:20.651268 1 render_controller.go:584] Generated machineconfig rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 from 11 configs: [{MachineConfig 00-master machineconfiguration.openshift.io/v1 } {MachineConfig 01-master-container-runtime machineconfiguration.openshift.io/v1 } {MachineConfig 01-master-kubelet machineconfiguration.openshift.io/v1 } {MachineConfig 97-master-generated-kubelet machineconfiguration.openshift.io/v1 } {MachineConfig 98-master-generated-kubelet machineconfiguration.openshift.io/v1 } {MachineConfig 99-master-generated-registries machineconfiguration.openshift.io/v1 } {MachineConfig 99-master-ssh machineconfiguration.openshift.io/v1 } {MachineConfig 99-node-sizing-for-crc machineconfiguration.openshift.io/v1 } {MachineConfig 99-openshift-machineconfig-master-console machineconfiguration.openshift.io/v1 } {MachineConfig 99-openshift-machineconfig-master-dummy-networks machineconfiguration.openshift.io/v1 } {MachineConfig custom-image machineconfiguration.openshift.io/v1 }] 2025-12-08T17:47:20.652061844+00:00 stderr F I1208 17:47:20.651472 1 event.go:377] Event(v1.ObjectReference{Kind:"MachineConfig", Namespace:"openshift-machine-config-operator", Name:"rendered-master-842a93c7bb3e86c26c29ba8a7f596b70", UID:"", APIVersion:"machineconfiguration.openshift.io/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OSImageURLOverridden' OSImageURL was overridden via machineconfig in rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 (was: is: image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest) 2025-12-08T17:47:20.652061844+00:00 stderr F I1208 17:47:20.651518 1 event.go:377] Event(v1.ObjectReference{Kind:"MachineConfigPool", Namespace:"openshift-machine-config-operator", Name:"master", UID:"3b9df6d6-bacd-4862-b99f-10ec7fcf29ac", APIVersion:"machineconfiguration.openshift.io/v1", ResourceVersion:"38019", FieldPath:""}): type: 'Normal' reason: 'RenderedConfigGenerated' rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 successfully generated (release version: 4.20.1, controller version: f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:47:20.656923628+00:00 stderr F I1208 17:47:20.655482 1 render_controller.go:584] Generated machineconfig rendered-worker-3dcc16ccd3a3eea0254ec40e36d4bfbe from 7 configs: [{MachineConfig 00-worker machineconfiguration.openshift.io/v1 } {MachineConfig 01-worker-container-runtime machineconfiguration.openshift.io/v1 } {MachineConfig 01-worker-kubelet machineconfiguration.openshift.io/v1 } {MachineConfig 97-worker-generated-kubelet machineconfiguration.openshift.io/v1 } {MachineConfig 98-worker-generated-kubelet machineconfiguration.openshift.io/v1 } {MachineConfig 99-worker-generated-registries machineconfiguration.openshift.io/v1 } {MachineConfig 99-worker-ssh machineconfiguration.openshift.io/v1 }] 2025-12-08T17:47:20.656923628+00:00 stderr F I1208 17:47:20.656284 1 event.go:377] Event(v1.ObjectReference{Kind:"MachineConfigPool", Namespace:"openshift-machine-config-operator", Name:"worker", UID:"633fcfae-03e0-4a3a-8d5c-de9a658e82f6", APIVersion:"machineconfiguration.openshift.io/v1", ResourceVersion:"32065", FieldPath:""}): type: 'Normal' reason: 'RenderedConfigGenerated' rendered-worker-3dcc16ccd3a3eea0254ec40e36d4bfbe successfully generated (release version: 4.20.1, controller version: f587a1bfbaba518cc1d49ad6300e29eeb9c38cec) 2025-12-08T17:47:20.663450883+00:00 stderr F I1208 17:47:20.662768 1 render_controller.go:610] Pool master: now targeting: rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 2025-12-08T17:47:20.663710831+00:00 stderr F I1208 17:47:20.663573 1 render_controller.go:610] Pool worker: now targeting: rendered-worker-3dcc16ccd3a3eea0254ec40e36d4bfbe 2025-12-08T17:47:25.666380611+00:00 stderr F I1208 17:47:25.665615 1 status.go:273] Pool worker: All nodes are updated with MachineConfig rendered-worker-3dcc16ccd3a3eea0254ec40e36d4bfbe 2025-12-08T17:47:25.678218903+00:00 stderr F I1208 17:47:25.678160 1 event.go:377] Event(v1.ObjectReference{Kind:"MachineConfigPool", Namespace:"openshift-machine-config-operator", Name:"worker", UID:"633fcfae-03e0-4a3a-8d5c-de9a658e82f6", APIVersion:"machineconfiguration.openshift.io/v1", ResourceVersion:"39100", FieldPath:""}): type: 'Normal' reason: 'Completed' Pool worker has completed update to MachineConfig rendered-worker-3dcc16ccd3a3eea0254ec40e36d4bfbe 2025-12-08T17:47:25.688208768+00:00 stderr F E1208 17:47:25.688081 1 pinned_image_set.go:350] Error syncing pinned image sets: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.690020235+00:00 stderr F I1208 17:47:25.689961 1 node_controller.go:676] Pool master: node crc: changed taints 2025-12-08T17:47:25.693410142+00:00 stderr F I1208 17:47:25.693367 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" 2025-12-08T17:47:25.695987123+00:00 stderr F E1208 17:47:25.694852 1 pinned_image_set.go:373] Error updating MachineConfigPool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.695987123+00:00 stderr F I1208 17:47:25.694916 1 pinned_image_set.go:299] Error syncing machineconfigpool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.739607646+00:00 stderr F W1208 17:47:25.739517 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:47:25.795303529+00:00 stderr F E1208 17:47:25.795223 1 render_controller.go:475] Error syncing Generated MCFG: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.799707548+00:00 stderr F E1208 17:47:25.799660 1 render_controller.go:497] Error updating MachineConfigPool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.799707548+00:00 stderr F I1208 17:47:25.799681 1 render_controller.go:408] Error syncing machineconfigpool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.811456708+00:00 stderr F E1208 17:47:25.811058 1 render_controller.go:475] Error syncing Generated MCFG: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.816208978+00:00 stderr F E1208 17:47:25.816169 1 render_controller.go:497] Error updating MachineConfigPool master: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.816268229+00:00 stderr F I1208 17:47:25.816253 1 render_controller.go:408] Error syncing machineconfigpool master: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:25.880501032+00:00 stderr F W1208 17:47:25.880428 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:47:30.695657358+00:00 stderr F I1208 17:47:30.695150 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" 2025-12-08T17:47:40.284933109+00:00 stderr F I1208 17:47:40.284091 1 reflector.go:430] "Caches populated" type="*v1.ControllerConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:41.825940708+00:00 stderr F I1208 17:47:41.825804 1 reflector.go:430] "Caches populated" type="*v1.ClusterImagePolicy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:42.531641783+00:00 stderr F I1208 17:47:42.531547 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:42.534564535+00:00 stderr F I1208 17:47:42.533260 1 template_controller.go:146] Re-syncing ControllerConfig due to secret pull-secret change 2025-12-08T17:47:47.312339574+00:00 stderr F I1208 17:47:47.312270 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:47.391820045+00:00 stderr F I1208 17:47:47.391741 1 reflector.go:430] "Caches populated" type="*v1.MachineOSBuild" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:48.868786680+00:00 stderr F I1208 17:47:48.868719 1 reflector.go:430] "Caches populated" type="*v1.MachineOSConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:51.788359325+00:00 stderr F I1208 17:47:51.788242 1 reflector.go:430] "Caches populated" type="*v1.ImagePolicy" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:47:56.688952741+00:00 stderr F I1208 17:47:56.688693 1 reflector.go:430] "Caches populated" type="*v1.PinnedImageSet" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:47:57.878783381+00:00 stderr F I1208 17:47:57.878726 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:58.065634584+00:00 stderr F I1208 17:47:58.065563 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:48:00.670111973+00:00 stderr F I1208 17:48:00.669607 1 reflector.go:430] "Caches populated" type="*v1.MachineConfiguration" reflector="github.com/openshift/client-go/operator/informers/externalversions/factory.go:125" 2025-12-08T17:48:02.801957263+00:00 stderr F I1208 17:48:02.801865 1 reflector.go:430] "Caches populated" type="*v1.KubeletConfig" reflector="github.com/openshift/client-go/machineconfiguration/informers/externalversions/factory.go:125" 2025-12-08T17:48:03.749379899+00:00 stderr F I1208 17:48:03.748494 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:04.456798972+00:00 stderr F I1208 17:48:04.456727 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:09.397437684+00:00 stderr F I1208 17:48:09.397049 1 reflector.go:430] "Caches populated" type="*v1.ImageDigestMirrorSet" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:12.827816142+00:00 stderr F I1208 17:48:12.827316 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:14.577457568+00:00 stderr F I1208 17:48:14.576097 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="github.com/openshift/client-go/config/informers/externalversions/factory.go:125" 2025-12-08T17:48:16.535224072+00:00 stderr F I1208 17:48:16.534705 1 node_controller.go:1120] Pool master is paused and will not update. 2025-12-08T17:48:16.542812931+00:00 stderr F I1208 17:48:16.542758 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" 2025-12-08T17:48:16.587134942+00:00 stderr F W1208 17:48:16.587068 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:48:16.662294196+00:00 stderr F E1208 17:48:16.662248 1 render_controller.go:475] Error syncing Generated MCFG: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:16.666618587+00:00 stderr F E1208 17:48:16.666591 1 render_controller.go:497] Error updating MachineConfigPool master: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:16.666661219+00:00 stderr F I1208 17:48:16.666649 1 render_controller.go:408] Error syncing machineconfigpool master: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "master": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:16.724740795+00:00 stderr F W1208 17:48:16.724666 1 render_controller.go:673] OSImageURL "image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest" for MachineConfig rendered-master-842a93c7bb3e86c26c29ba8a7f596b70 is set using a tag instead of a digest. It is highly recommended to use a digest 2025-12-08T17:48:17.044396576+00:00 stderr F E1208 17:48:17.043765 1 pinned_image_set.go:350] Error syncing pinned image sets: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:17.051473521+00:00 stderr F E1208 17:48:17.051423 1 pinned_image_set.go:373] Error updating MachineConfigPool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:17.051575714+00:00 stderr F I1208 17:48:17.051553 1 pinned_image_set.go:299] Error syncing machineconfigpool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:17.161297094+00:00 stderr F E1208 17:48:17.161217 1 render_controller.go:475] Error syncing Generated MCFG: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:17.165213373+00:00 stderr F E1208 17:48:17.165178 1 render_controller.go:497] Error updating MachineConfigPool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:17.165213373+00:00 stderr F I1208 17:48:17.165195 1 render_controller.go:408] Error syncing machineconfigpool worker: Operation cannot be fulfilled on machineconfigpools.machineconfiguration.openshift.io "worker": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:48:21.556362309+00:00 stderr F I1208 17:48:21.556309 1 status.go:325] Degraded Machine: crc and Degraded Reason: unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file "/var/lib/kubelet/config.json" ././@LongLink0000644000000000000000000000032000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000755000175000017500000000000015115611521032775 5ustar zuulzuul././@LongLink0000644000000000000000000000032500000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-conf0000644000175000017500000000227415115611514033006 0ustar zuulzuul2025-12-08T17:44:23.482979659+00:00 stderr F W1208 17:44:23.482586 1 deprecated.go:66] 2025-12-08T17:44:23.482979659+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:23.482979659+00:00 stderr F 2025-12-08T17:44:23.482979659+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:23.482979659+00:00 stderr F 2025-12-08T17:44:23.482979659+00:00 stderr F =============================================== 2025-12-08T17:44:23.482979659+00:00 stderr F 2025-12-08T17:44:23.483319388+00:00 stderr F I1208 17:44:23.483306 1 kube-rbac-proxy.go:532] Reading config file: /etc/kube-rbac-proxy/config-file.yaml 2025-12-08T17:44:23.483967756+00:00 stderr F I1208 17:44:23.483950 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:23.485004265+00:00 stderr F I1208 17:44:23.484977 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:23.485405026+00:00 stderr F I1208 17:44:23.485388 1 kube-rbac-proxy.go:397] Starting TCP socket on 0.0.0.0:9001 2025-12-08T17:44:23.485859718+00:00 stderr F I1208 17:44:23.485847 1 kube-rbac-proxy.go:404] Listening securely on 0.0.0.0:9001 ././@LongLink0000644000000000000000000000024600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611514033023 5ustar zuulzuul././@LongLink0000644000000000000000000000026500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611521033021 5ustar zuulzuul././@LongLink0000644000000000000000000000027200000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000644000175000017500000050605415115611514033037 0ustar zuulzuul2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846809 1 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846908 1 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846913 1 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846916 1 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846919 1 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846922 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846924 1 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846927 1 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846930 1 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846932 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846934 1 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846937 1 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846940 1 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846949 1 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:42:24.846962519+00:00 stderr F W1208 17:42:24.846951 1 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846955 1 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846960 1 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846963 1 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846965 1 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846968 1 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846970 1 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846973 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846975 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846977 1 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846980 1 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846982 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846984 1 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846987 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846989 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846992 1 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846994 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846996 1 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.846999 1 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847001 1 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847004 1 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847006 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847009 1 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847011 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847013 1 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847016 1 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847020 1 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847023 1 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847026 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847028 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847030 1 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847033 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847035 1 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847038 1 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847041 1 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847043 1 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847046 1 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847048 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847050 1 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847052 1 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847054 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847057 1 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847060 1 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847062 1 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847064 1 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847066 1 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847068 1 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847071 1 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847073 1 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847075 1 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847077 1 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847080 1 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847082 1 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847084 1 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847086 1 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847088 1 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847091 1 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847093 1 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847095 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847098 1 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847100 1 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:42:24.847111365+00:00 stderr F W1208 17:42:24.847103 1 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847105 1 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847112 1 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847115 1 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847117 1 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847120 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847122 1 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847125 1 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847127 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847129 1 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:42:24.847146867+00:00 stderr F W1208 17:42:24.847132 1 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:42:24.847552324+00:00 stderr F I1208 17:42:24.847518 1 flags.go:64] FLAG: --allow-metric-labels="[]" 2025-12-08T17:42:24.847552324+00:00 stderr F I1208 17:42:24.847538 1 flags.go:64] FLAG: --allow-metric-labels-manifest="" 2025-12-08T17:42:24.847552324+00:00 stderr F I1208 17:42:24.847544 1 flags.go:64] FLAG: --authentication-kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig" 2025-12-08T17:42:24.847552324+00:00 stderr F I1208 17:42:24.847548 1 flags.go:64] FLAG: --authentication-skip-lookup="false" 2025-12-08T17:42:24.847561704+00:00 stderr F I1208 17:42:24.847552 1 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="10s" 2025-12-08T17:42:24.847561704+00:00 stderr F I1208 17:42:24.847557 1 flags.go:64] FLAG: --authentication-tolerate-lookup-failure="true" 2025-12-08T17:42:24.847569955+00:00 stderr F I1208 17:42:24.847560 1 flags.go:64] FLAG: --authorization-always-allow-paths="[/healthz,/readyz,/livez]" 2025-12-08T17:42:24.847569955+00:00 stderr F I1208 17:42:24.847565 1 flags.go:64] FLAG: --authorization-kubeconfig="/etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig" 2025-12-08T17:42:24.847577445+00:00 stderr F I1208 17:42:24.847569 1 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="10s" 2025-12-08T17:42:24.847577445+00:00 stderr F I1208 17:42:24.847573 1 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="10s" 2025-12-08T17:42:24.847584885+00:00 stderr F I1208 17:42:24.847576 1 flags.go:64] FLAG: --bind-address="0.0.0.0" 2025-12-08T17:42:24.847584885+00:00 stderr F I1208 17:42:24.847580 1 flags.go:64] FLAG: --cert-dir="/var/run/kubernetes" 2025-12-08T17:42:24.847592526+00:00 stderr F I1208 17:42:24.847584 1 flags.go:64] FLAG: --client-ca-file="" 2025-12-08T17:42:24.847592526+00:00 stderr F I1208 17:42:24.847587 1 flags.go:64] FLAG: --config="/etc/kubernetes/static-pod-resources/configmaps/config/config.yaml" 2025-12-08T17:42:24.847599946+00:00 stderr F I1208 17:42:24.847590 1 flags.go:64] FLAG: --contention-profiling="true" 2025-12-08T17:42:24.847599946+00:00 stderr F I1208 17:42:24.847593 1 flags.go:64] FLAG: --disable-http2-serving="false" 2025-12-08T17:42:24.847599946+00:00 stderr F I1208 17:42:24.847596 1 flags.go:64] FLAG: --disabled-metrics="[]" 2025-12-08T17:42:24.847607836+00:00 stderr F I1208 17:42:24.847600 1 flags.go:64] FLAG: --emulated-version="[]" 2025-12-08T17:42:24.847685550+00:00 stderr F I1208 17:42:24.847603 1 flags.go:64] FLAG: --feature-gates=":AWSClusterHostedDNS=false,:AWSClusterHostedDNSInstall=false,:AWSDedicatedHosts=false,:AWSServiceLBNetworkSecurityGroup=false,:AdditionalRoutingCapabilities=true,:AdminNetworkPolicy=true,:AlibabaPlatform=true,:AutomatedEtcdBackup=false,:AzureClusterHostedDNSInstall=false,:AzureDedicatedHosts=false,:AzureMultiDisk=false,:AzureWorkloadIdentity=true,:BootImageSkewEnforcement=false,:BootcNodeManagement=false,:BuildCSIVolumes=true,:CPMSMachineNamePrefix=true,:ClusterAPIInstall=false,:ClusterAPIInstallIBMCloud=false,:ClusterMonitoringConfig=false,:ClusterVersionOperatorConfiguration=false,:ConsolePluginContentSecurityPolicy=true,:DNSNameResolver=false,:DualReplica=false,:DyanmicServiceEndpointIBMCloud=false,:DynamicResourceAllocation=false,:EtcdBackendQuota=false,:EventedPLEG=false,:Example=false,:Example2=false,:ExternalOIDC=false,:ExternalOIDCWithUIDAndExtraClaimMappings=false,:ExternalSnapshotMetadata=false,:GCPClusterHostedDNS=false,:GCPClusterHostedDNSInstall=false,:GCPCustomAPIEndpoints=false,:GCPCustomAPIEndpointsInstall=false,:GatewayAPI=true,:GatewayAPIController=true,:HighlyAvailableArbiter=true,:ImageModeStatusReporting=false,:ImageStreamImportMode=false,:ImageVolume=true,:IngressControllerDynamicConfigurationManager=false,:IngressControllerLBSubnetsAWS=true,:InsightsConfig=false,:InsightsConfigAPI=false,:InsightsOnDemandDataGather=false,:IrreconcilableMachineConfig=false,:KMSEncryptionProvider=false,:KMSv1=true,:MachineAPIMigration=false,:MachineAPIOperatorDisableMachineHealthCheckController=false,:MachineConfigNodes=true,:ManagedBootImages=true,:ManagedBootImagesAWS=true,:ManagedBootImagesAzure=false,:ManagedBootImagesvSphere=false,:MaxUnavailableStatefulSet=false,:MetricsCollectionProfiles=true,:MinimumKubeletVersion=false,:MixedCPUsAllocation=false,:MultiArchInstallAzure=false,:MultiDiskSetup=false,:MutatingAdmissionPolicy=false,:NetworkDiagnosticsConfig=true,:NetworkLiveMigration=true,:NetworkSegmentation=true,:NewOLM=true,:NewOLMCatalogdAPIV1Metas=false,:NewOLMOwnSingleNamespace=false,:NewOLMPreflightPermissionChecks=false,:NewOLMWebhookProviderOpenshiftServiceCA=false,:NoRegistryClusterOperations=false,:NodeSwap=false,:NutanixMultiSubnets=false,:OVNObservability=false,:OpenShiftPodSecurityAdmission=false,:PinnedImages=true,:PreconfiguredUDNAddresses=false,:ProcMountType=true,:RouteAdvertisements=true,:RouteExternalCertificate=true,:SELinuxMount=false,:ServiceAccountTokenNodeBinding=true,:SetEIPForNLBIngressController=true,:ShortCertRotation=false,:SignatureStores=false,:SigstoreImageVerification=true,:SigstoreImageVerificationPKI=false,:StoragePerformantSecurityPolicy=true,:TranslateStreamCloseWebsocketRequests=false,:UpgradeStatus=true,:UserNamespacesPodSecurityStandards=true,:UserNamespacesSupport=true,:VSphereConfigurableMaxAllowedBlockVolumesPerNode=false,:VSphereHostVMGroupZonal=false,:VSphereMixedNodeEnv=false,:VSphereMultiDisk=true,:VSphereMultiNetworks=true,:VolumeAttributesClass=false,:VolumeGroupSnapshot=false" 2025-12-08T17:42:24.847685550+00:00 stderr F I1208 17:42:24.847671 1 flags.go:64] FLAG: --help="false" 2025-12-08T17:42:24.847685550+00:00 stderr F I1208 17:42:24.847674 1 flags.go:64] FLAG: --http2-max-streams-per-connection="0" 2025-12-08T17:42:24.847685550+00:00 stderr F I1208 17:42:24.847679 1 flags.go:64] FLAG: --kube-api-burst="100" 2025-12-08T17:42:24.847698720+00:00 stderr F I1208 17:42:24.847683 1 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" 2025-12-08T17:42:24.847698720+00:00 stderr F I1208 17:42:24.847687 1 flags.go:64] FLAG: --kube-api-qps="50" 2025-12-08T17:42:24.847698720+00:00 stderr F I1208 17:42:24.847692 1 flags.go:64] FLAG: --kubeconfig="" 2025-12-08T17:42:24.847698720+00:00 stderr F I1208 17:42:24.847695 1 flags.go:64] FLAG: --leader-elect="true" 2025-12-08T17:42:24.847720081+00:00 stderr F I1208 17:42:24.847705 1 flags.go:64] FLAG: --leader-elect-lease-duration="15s" 2025-12-08T17:42:24.847720081+00:00 stderr F I1208 17:42:24.847713 1 flags.go:64] FLAG: --leader-elect-renew-deadline="10s" 2025-12-08T17:42:24.847720081+00:00 stderr F I1208 17:42:24.847716 1 flags.go:64] FLAG: --leader-elect-resource-lock="leases" 2025-12-08T17:42:24.847728091+00:00 stderr F I1208 17:42:24.847720 1 flags.go:64] FLAG: --leader-elect-resource-name="kube-scheduler" 2025-12-08T17:42:24.847728091+00:00 stderr F I1208 17:42:24.847722 1 flags.go:64] FLAG: --leader-elect-resource-namespace="kube-system" 2025-12-08T17:42:24.847735452+00:00 stderr F I1208 17:42:24.847725 1 flags.go:64] FLAG: --leader-elect-retry-period="2s" 2025-12-08T17:42:24.847735452+00:00 stderr F I1208 17:42:24.847729 1 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:42:24.847743012+00:00 stderr F I1208 17:42:24.847732 1 flags.go:64] FLAG: --log-json-info-buffer-size="0" 2025-12-08T17:42:24.847743012+00:00 stderr F I1208 17:42:24.847738 1 flags.go:64] FLAG: --log-json-split-stream="false" 2025-12-08T17:42:24.847750602+00:00 stderr F I1208 17:42:24.847740 1 flags.go:64] FLAG: --log-text-info-buffer-size="0" 2025-12-08T17:42:24.847750602+00:00 stderr F I1208 17:42:24.847744 1 flags.go:64] FLAG: --log-text-split-stream="false" 2025-12-08T17:42:24.847750602+00:00 stderr F I1208 17:42:24.847747 1 flags.go:64] FLAG: --logging-format="text" 2025-12-08T17:42:24.847765643+00:00 stderr F I1208 17:42:24.847750 1 flags.go:64] FLAG: --master="" 2025-12-08T17:42:24.847765643+00:00 stderr F I1208 17:42:24.847752 1 flags.go:64] FLAG: --permit-address-sharing="false" 2025-12-08T17:42:24.847765643+00:00 stderr F I1208 17:42:24.847755 1 flags.go:64] FLAG: --permit-port-sharing="false" 2025-12-08T17:42:24.847765643+00:00 stderr F I1208 17:42:24.847758 1 flags.go:64] FLAG: --pod-max-in-unschedulable-pods-duration="5m0s" 2025-12-08T17:42:24.847765643+00:00 stderr F I1208 17:42:24.847761 1 flags.go:64] FLAG: --profiling="true" 2025-12-08T17:42:24.847774433+00:00 stderr F I1208 17:42:24.847764 1 flags.go:64] FLAG: --requestheader-allowed-names="[]" 2025-12-08T17:42:24.847774433+00:00 stderr F I1208 17:42:24.847767 1 flags.go:64] FLAG: --requestheader-client-ca-file="" 2025-12-08T17:42:24.847774433+00:00 stderr F I1208 17:42:24.847770 1 flags.go:64] FLAG: --requestheader-extra-headers-prefix="[x-remote-extra-]" 2025-12-08T17:42:24.847782154+00:00 stderr F I1208 17:42:24.847774 1 flags.go:64] FLAG: --requestheader-group-headers="[x-remote-group]" 2025-12-08T17:42:24.847782154+00:00 stderr F I1208 17:42:24.847778 1 flags.go:64] FLAG: --requestheader-uid-headers="[]" 2025-12-08T17:42:24.847789834+00:00 stderr F I1208 17:42:24.847781 1 flags.go:64] FLAG: --requestheader-username-headers="[x-remote-user]" 2025-12-08T17:42:24.847789834+00:00 stderr F I1208 17:42:24.847785 1 flags.go:64] FLAG: --secure-port="10259" 2025-12-08T17:42:24.847797024+00:00 stderr F I1208 17:42:24.847787 1 flags.go:64] FLAG: --show-hidden-metrics-for-version="" 2025-12-08T17:42:24.847797024+00:00 stderr F I1208 17:42:24.847791 1 flags.go:64] FLAG: --tls-cert-file="/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt" 2025-12-08T17:42:24.847806355+00:00 stderr F I1208 17:42:24.847794 1 flags.go:64] FLAG: --tls-cipher-suites="[TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256]" 2025-12-08T17:42:24.847813395+00:00 stderr F I1208 17:42:24.847803 1 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:42:24.847813395+00:00 stderr F I1208 17:42:24.847807 1 flags.go:64] FLAG: --tls-private-key-file="/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:42:24.847820935+00:00 stderr F I1208 17:42:24.847811 1 flags.go:64] FLAG: --tls-sni-cert-key="[]" 2025-12-08T17:42:24.847820935+00:00 stderr F I1208 17:42:24.847814 1 flags.go:64] FLAG: --unsupported-kube-api-over-localhost="false" 2025-12-08T17:42:24.847828296+00:00 stderr F I1208 17:42:24.847817 1 flags.go:64] FLAG: --v="2" 2025-12-08T17:42:24.847828296+00:00 stderr F I1208 17:42:24.847822 1 flags.go:64] FLAG: --version="false" 2025-12-08T17:42:24.847835646+00:00 stderr F I1208 17:42:24.847828 1 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:42:24.847835646+00:00 stderr F I1208 17:42:24.847832 1 flags.go:64] FLAG: --write-config-to="" 2025-12-08T17:42:24.850494680+00:00 stderr F W1208 17:42:24.850444 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImages 2025-12-08T17:42:24.850494680+00:00 stderr F W1208 17:42:24.850474 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerification 2025-12-08T17:42:24.850494680+00:00 stderr F W1208 17:42:24.850478 1 feature_gate.go:328] unrecognized feature gate: NewOLMPreflightPermissionChecks 2025-12-08T17:42:24.850494680+00:00 stderr F W1208 17:42:24.850482 1 feature_gate.go:328] unrecognized feature gate: VSphereMixedNodeEnv 2025-12-08T17:42:24.850494680+00:00 stderr F W1208 17:42:24.850486 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAWS 2025-12-08T17:42:24.850494680+00:00 stderr F W1208 17:42:24.850489 1 feature_gate.go:328] unrecognized feature gate: DyanmicServiceEndpointIBMCloud 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850493 1 feature_gate.go:328] unrecognized feature gate: SigstoreImageVerificationPKI 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850497 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDCWithUIDAndExtraClaimMappings 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850500 1 feature_gate.go:328] unrecognized feature gate: NewOLMWebhookProviderOpenshiftServiceCA 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850503 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNS 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850506 1 feature_gate.go:328] unrecognized feature gate: ImageStreamImportMode 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850510 1 feature_gate.go:328] unrecognized feature gate: VSphereConfigurableMaxAllowedBlockVolumesPerNode 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850514 1 feature_gate.go:328] unrecognized feature gate: ClusterVersionOperatorConfiguration 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850517 1 feature_gate.go:328] unrecognized feature gate: RouteAdvertisements 2025-12-08T17:42:24.850526802+00:00 stderr F W1208 17:42:24.850520 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpoints 2025-12-08T17:42:24.850536742+00:00 stderr F W1208 17:42:24.850525 1 feature_gate.go:328] unrecognized feature gate: HighlyAvailableArbiter 2025-12-08T17:42:24.850536742+00:00 stderr F W1208 17:42:24.850528 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstallIBMCloud 2025-12-08T17:42:24.850536742+00:00 stderr F W1208 17:42:24.850532 1 feature_gate.go:328] unrecognized feature gate: IngressControllerDynamicConfigurationManager 2025-12-08T17:42:24.850544723+00:00 stderr F W1208 17:42:24.850535 1 feature_gate.go:328] unrecognized feature gate: DualReplica 2025-12-08T17:42:24.850551943+00:00 stderr F W1208 17:42:24.850541 1 feature_gate.go:351] Setting GA feature gate ServiceAccountTokenNodeBinding=true. It will be removed in a future release. 2025-12-08T17:42:24.850551943+00:00 stderr F W1208 17:42:24.850548 1 feature_gate.go:328] unrecognized feature gate: BuildCSIVolumes 2025-12-08T17:42:24.850559823+00:00 stderr F W1208 17:42:24.850553 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiNetworks 2025-12-08T17:42:24.850559823+00:00 stderr F W1208 17:42:24.850556 1 feature_gate.go:328] unrecognized feature gate: GCPCustomAPIEndpointsInstall 2025-12-08T17:42:24.850567433+00:00 stderr F W1208 17:42:24.850560 1 feature_gate.go:328] unrecognized feature gate: IrreconcilableMachineConfig 2025-12-08T17:42:24.850567433+00:00 stderr F W1208 17:42:24.850564 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesvSphere 2025-12-08T17:42:24.850574994+00:00 stderr F W1208 17:42:24.850569 1 feature_gate.go:328] unrecognized feature gate: MultiArchInstallAzure 2025-12-08T17:42:24.850582174+00:00 stderr F W1208 17:42:24.850573 1 feature_gate.go:328] unrecognized feature gate: EtcdBackendQuota 2025-12-08T17:42:24.850582174+00:00 stderr F W1208 17:42:24.850578 1 feature_gate.go:328] unrecognized feature gate: PreconfiguredUDNAddresses 2025-12-08T17:42:24.850589564+00:00 stderr F W1208 17:42:24.850581 1 feature_gate.go:328] unrecognized feature gate: NetworkDiagnosticsConfig 2025-12-08T17:42:24.850589564+00:00 stderr F W1208 17:42:24.850585 1 feature_gate.go:328] unrecognized feature gate: Example2 2025-12-08T17:42:24.850596985+00:00 stderr F W1208 17:42:24.850589 1 feature_gate.go:328] unrecognized feature gate: ManagedBootImagesAzure 2025-12-08T17:42:24.850596985+00:00 stderr F W1208 17:42:24.850593 1 feature_gate.go:328] unrecognized feature gate: ShortCertRotation 2025-12-08T17:42:24.850607655+00:00 stderr F W1208 17:42:24.850597 1 feature_gate.go:328] unrecognized feature gate: AutomatedEtcdBackup 2025-12-08T17:42:24.850607655+00:00 stderr F W1208 17:42:24.850600 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNSInstall 2025-12-08T17:42:24.850607655+00:00 stderr F W1208 17:42:24.850604 1 feature_gate.go:328] unrecognized feature gate: AzureWorkloadIdentity 2025-12-08T17:42:24.850615576+00:00 stderr F W1208 17:42:24.850607 1 feature_gate.go:328] unrecognized feature gate: SignatureStores 2025-12-08T17:42:24.850615576+00:00 stderr F W1208 17:42:24.850611 1 feature_gate.go:328] unrecognized feature gate: GatewayAPIController 2025-12-08T17:42:24.850622926+00:00 stderr F W1208 17:42:24.850614 1 feature_gate.go:328] unrecognized feature gate: AzureMultiDisk 2025-12-08T17:42:24.850622926+00:00 stderr F W1208 17:42:24.850618 1 feature_gate.go:328] unrecognized feature gate: UpgradeStatus 2025-12-08T17:42:24.850631436+00:00 stderr F W1208 17:42:24.850622 1 feature_gate.go:328] unrecognized feature gate: OpenShiftPodSecurityAdmission 2025-12-08T17:42:24.850631436+00:00 stderr F W1208 17:42:24.850626 1 feature_gate.go:328] unrecognized feature gate: BootcNodeManagement 2025-12-08T17:42:24.850641317+00:00 stderr F W1208 17:42:24.850630 1 feature_gate.go:328] unrecognized feature gate: AWSServiceLBNetworkSecurityGroup 2025-12-08T17:42:24.850641317+00:00 stderr F W1208 17:42:24.850634 1 feature_gate.go:328] unrecognized feature gate: MetricsCollectionProfiles 2025-12-08T17:42:24.850641317+00:00 stderr F W1208 17:42:24.850638 1 feature_gate.go:328] unrecognized feature gate: DNSNameResolver 2025-12-08T17:42:24.850650967+00:00 stderr F W1208 17:42:24.850643 1 feature_gate.go:328] unrecognized feature gate: OVNObservability 2025-12-08T17:42:24.850650967+00:00 stderr F W1208 17:42:24.850647 1 feature_gate.go:328] unrecognized feature gate: VSphereHostVMGroupZonal 2025-12-08T17:42:24.850660167+00:00 stderr F W1208 17:42:24.850653 1 feature_gate.go:328] unrecognized feature gate: AzureClusterHostedDNSInstall 2025-12-08T17:42:24.850668808+00:00 stderr F W1208 17:42:24.850657 1 feature_gate.go:328] unrecognized feature gate: ClusterMonitoringConfig 2025-12-08T17:42:24.850668808+00:00 stderr F W1208 17:42:24.850662 1 feature_gate.go:328] unrecognized feature gate: MachineAPIMigration 2025-12-08T17:42:24.850668808+00:00 stderr F W1208 17:42:24.850665 1 feature_gate.go:328] unrecognized feature gate: NewOLMCatalogdAPIV1Metas 2025-12-08T17:42:24.850677738+00:00 stderr F W1208 17:42:24.850668 1 feature_gate.go:328] unrecognized feature gate: NutanixMultiSubnets 2025-12-08T17:42:24.850677738+00:00 stderr F W1208 17:42:24.850672 1 feature_gate.go:328] unrecognized feature gate: AWSClusterHostedDNS 2025-12-08T17:42:24.850686589+00:00 stderr F W1208 17:42:24.850676 1 feature_gate.go:328] unrecognized feature gate: NoRegistryClusterOperations 2025-12-08T17:42:24.850686589+00:00 stderr F W1208 17:42:24.850679 1 feature_gate.go:328] unrecognized feature gate: CPMSMachineNamePrefix 2025-12-08T17:42:24.850686589+00:00 stderr F W1208 17:42:24.850682 1 feature_gate.go:328] unrecognized feature gate: IngressControllerLBSubnetsAWS 2025-12-08T17:42:24.850695619+00:00 stderr F W1208 17:42:24.850686 1 feature_gate.go:328] unrecognized feature gate: PinnedImages 2025-12-08T17:42:24.850695619+00:00 stderr F W1208 17:42:24.850689 1 feature_gate.go:328] unrecognized feature gate: SetEIPForNLBIngressController 2025-12-08T17:42:24.850704219+00:00 stderr F W1208 17:42:24.850694 1 feature_gate.go:328] unrecognized feature gate: AlibabaPlatform 2025-12-08T17:42:24.850704219+00:00 stderr F W1208 17:42:24.850697 1 feature_gate.go:328] unrecognized feature gate: NetworkSegmentation 2025-12-08T17:42:24.850704219+00:00 stderr F W1208 17:42:24.850700 1 feature_gate.go:328] unrecognized feature gate: ClusterAPIInstall 2025-12-08T17:42:24.850718480+00:00 stderr F W1208 17:42:24.850704 1 feature_gate.go:328] unrecognized feature gate: NetworkLiveMigration 2025-12-08T17:42:24.850718480+00:00 stderr F W1208 17:42:24.850708 1 feature_gate.go:328] unrecognized feature gate: ImageModeStatusReporting 2025-12-08T17:42:24.850718480+00:00 stderr F W1208 17:42:24.850711 1 feature_gate.go:328] unrecognized feature gate: ExternalOIDC 2025-12-08T17:42:24.850718480+00:00 stderr F W1208 17:42:24.850714 1 feature_gate.go:328] unrecognized feature gate: MixedCPUsAllocation 2025-12-08T17:42:24.850728420+00:00 stderr F W1208 17:42:24.850718 1 feature_gate.go:328] unrecognized feature gate: KMSEncryptionProvider 2025-12-08T17:42:24.850728420+00:00 stderr F W1208 17:42:24.850722 1 feature_gate.go:328] unrecognized feature gate: Example 2025-12-08T17:42:24.850728420+00:00 stderr F W1208 17:42:24.850725 1 feature_gate.go:328] unrecognized feature gate: BootImageSkewEnforcement 2025-12-08T17:42:24.850737881+00:00 stderr F W1208 17:42:24.850728 1 feature_gate.go:328] unrecognized feature gate: InsightsConfig 2025-12-08T17:42:24.850737881+00:00 stderr F W1208 17:42:24.850732 1 feature_gate.go:328] unrecognized feature gate: AWSDedicatedHosts 2025-12-08T17:42:24.850746631+00:00 stderr F W1208 17:42:24.850735 1 feature_gate.go:328] unrecognized feature gate: MachineConfigNodes 2025-12-08T17:42:24.850746631+00:00 stderr F W1208 17:42:24.850740 1 feature_gate.go:328] unrecognized feature gate: VSphereMultiDisk 2025-12-08T17:42:24.850746631+00:00 stderr F W1208 17:42:24.850743 1 feature_gate.go:328] unrecognized feature gate: InsightsConfigAPI 2025-12-08T17:42:24.850755982+00:00 stderr F W1208 17:42:24.850749 1 feature_gate.go:328] unrecognized feature gate: AdditionalRoutingCapabilities 2025-12-08T17:42:24.850755982+00:00 stderr F W1208 17:42:24.850752 1 feature_gate.go:328] unrecognized feature gate: GatewayAPI 2025-12-08T17:42:24.850764532+00:00 stderr F W1208 17:42:24.850756 1 feature_gate.go:328] unrecognized feature gate: GCPClusterHostedDNSInstall 2025-12-08T17:42:24.850764532+00:00 stderr F W1208 17:42:24.850760 1 feature_gate.go:328] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController 2025-12-08T17:42:24.850772732+00:00 stderr F W1208 17:42:24.850763 1 feature_gate.go:328] unrecognized feature gate: AdminNetworkPolicy 2025-12-08T17:42:24.850772732+00:00 stderr F W1208 17:42:24.850766 1 feature_gate.go:328] unrecognized feature gate: InsightsOnDemandDataGather 2025-12-08T17:42:24.850772732+00:00 stderr F W1208 17:42:24.850769 1 feature_gate.go:328] unrecognized feature gate: ExternalSnapshotMetadata 2025-12-08T17:42:24.850780493+00:00 stderr F W1208 17:42:24.850773 1 feature_gate.go:328] unrecognized feature gate: VolumeGroupSnapshot 2025-12-08T17:42:24.850787743+00:00 stderr F W1208 17:42:24.850778 1 feature_gate.go:349] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. 2025-12-08T17:42:24.850787743+00:00 stderr F W1208 17:42:24.850784 1 feature_gate.go:328] unrecognized feature gate: MultiDiskSetup 2025-12-08T17:42:24.850797183+00:00 stderr F W1208 17:42:24.850791 1 feature_gate.go:328] unrecognized feature gate: AzureDedicatedHosts 2025-12-08T17:42:24.850804424+00:00 stderr F W1208 17:42:24.850795 1 feature_gate.go:328] unrecognized feature gate: NewOLMOwnSingleNamespace 2025-12-08T17:42:24.850804424+00:00 stderr F W1208 17:42:24.850799 1 feature_gate.go:328] unrecognized feature gate: ConsolePluginContentSecurityPolicy 2025-12-08T17:42:24.850811594+00:00 stderr F W1208 17:42:24.850802 1 feature_gate.go:328] unrecognized feature gate: NewOLM 2025-12-08T17:42:24.855616101+00:00 stderr F I1208 17:42:24.855571 1 dynamic_serving_content.go:116] "Loaded a new cert/key pair" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:42:25.269282516+00:00 stderr F W1208 17:42:25.269206 1 authentication.go:397] Error looking up in-cluster authentication configuration: Get "https://api-int.crc.testing:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication": dial tcp 38.102.83.243:6443: connect: connection refused 2025-12-08T17:42:25.269282516+00:00 stderr F W1208 17:42:25.269248 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous. 2025-12-08T17:42:25.269282516+00:00 stderr F W1208 17:42:25.269260 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false 2025-12-08T17:42:25.291069873+00:00 stderr F I1208 17:42:25.290924 1 framework.go:399] "the scheduler starts to work with those plugins" Plugins={"PreEnqueue":{"Enabled":[{"Name":"SchedulingGates","Weight":0},{"Name":"DefaultPreemption","Weight":0}],"Disabled":null},"QueueSort":{"Enabled":[{"Name":"PrioritySort","Weight":0}],"Disabled":null},"PreFilter":{"Enabled":[{"Name":"NodeAffinity","Weight":0},{"Name":"NodePorts","Weight":0},{"Name":"NodeResourcesFit","Weight":0},{"Name":"VolumeRestrictions","Weight":0},{"Name":"NodeVolumeLimits","Weight":0},{"Name":"VolumeBinding","Weight":0},{"Name":"VolumeZone","Weight":0},{"Name":"PodTopologySpread","Weight":0},{"Name":"InterPodAffinity","Weight":0}],"Disabled":null},"Filter":{"Enabled":[{"Name":"NodeUnschedulable","Weight":0},{"Name":"NodeName","Weight":0},{"Name":"TaintToleration","Weight":0},{"Name":"NodeAffinity","Weight":0},{"Name":"NodePorts","Weight":0},{"Name":"NodeResourcesFit","Weight":0},{"Name":"VolumeRestrictions","Weight":0},{"Name":"NodeVolumeLimits","Weight":0},{"Name":"VolumeBinding","Weight":0},{"Name":"VolumeZone","Weight":0},{"Name":"PodTopologySpread","Weight":0},{"Name":"InterPodAffinity","Weight":0}],"Disabled":null},"PostFilter":{"Enabled":[{"Name":"DefaultPreemption","Weight":0}],"Disabled":null},"PreScore":{"Enabled":[{"Name":"TaintToleration","Weight":0},{"Name":"NodeAffinity","Weight":0},{"Name":"NodeResourcesFit","Weight":0},{"Name":"VolumeBinding","Weight":0},{"Name":"PodTopologySpread","Weight":0},{"Name":"InterPodAffinity","Weight":0},{"Name":"NodeResourcesBalancedAllocation","Weight":0}],"Disabled":null},"Score":{"Enabled":[{"Name":"TaintToleration","Weight":3},{"Name":"NodeAffinity","Weight":2},{"Name":"NodeResourcesFit","Weight":1},{"Name":"VolumeBinding","Weight":1},{"Name":"PodTopologySpread","Weight":2},{"Name":"InterPodAffinity","Weight":2},{"Name":"NodeResourcesBalancedAllocation","Weight":1},{"Name":"ImageLocality","Weight":1}],"Disabled":null},"Reserve":{"Enabled":[{"Name":"VolumeBinding","Weight":0}],"Disabled":null},"Permit":{"Enabled":null,"Disabled":null},"PreBind":{"Enabled":[{"Name":"VolumeBinding","Weight":0}],"Disabled":null},"Bind":{"Enabled":[{"Name":"DefaultBinder","Weight":0}],"Disabled":null},"PostBind":{"Enabled":null,"Disabled":null},"MultiPoint":{"Enabled":null,"Disabled":null}} 2025-12-08T17:42:25.299799438+00:00 stderr F I1208 17:42:25.299746 1 configfile.go:94] "Using component config" config=< 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F clientConnection: 2025-12-08T17:42:25.299799438+00:00 stderr F acceptContentTypes: "" 2025-12-08T17:42:25.299799438+00:00 stderr F burst: 100 2025-12-08T17:42:25.299799438+00:00 stderr F contentType: application/vnd.kubernetes.protobuf 2025-12-08T17:42:25.299799438+00:00 stderr F kubeconfig: /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig 2025-12-08T17:42:25.299799438+00:00 stderr F qps: 50 2025-12-08T17:42:25.299799438+00:00 stderr F enableContentionProfiling: false 2025-12-08T17:42:25.299799438+00:00 stderr F enableProfiling: false 2025-12-08T17:42:25.299799438+00:00 stderr F kind: KubeSchedulerConfiguration 2025-12-08T17:42:25.299799438+00:00 stderr F leaderElection: 2025-12-08T17:42:25.299799438+00:00 stderr F leaderElect: true 2025-12-08T17:42:25.299799438+00:00 stderr F leaseDuration: 2m17s 2025-12-08T17:42:25.299799438+00:00 stderr F renewDeadline: 1m47s 2025-12-08T17:42:25.299799438+00:00 stderr F resourceLock: leases 2025-12-08T17:42:25.299799438+00:00 stderr F resourceName: kube-scheduler 2025-12-08T17:42:25.299799438+00:00 stderr F resourceNamespace: openshift-kube-scheduler 2025-12-08T17:42:25.299799438+00:00 stderr F retryPeriod: 26s 2025-12-08T17:42:25.299799438+00:00 stderr F parallelism: 16 2025-12-08T17:42:25.299799438+00:00 stderr F percentageOfNodesToScore: 0 2025-12-08T17:42:25.299799438+00:00 stderr F podInitialBackoffSeconds: 1 2025-12-08T17:42:25.299799438+00:00 stderr F podMaxBackoffSeconds: 10 2025-12-08T17:42:25.299799438+00:00 stderr F profiles: 2025-12-08T17:42:25.299799438+00:00 stderr F - pluginConfig: 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F kind: DefaultPreemptionArgs 2025-12-08T17:42:25.299799438+00:00 stderr F minCandidateNodesAbsolute: 100 2025-12-08T17:42:25.299799438+00:00 stderr F minCandidateNodesPercentage: 10 2025-12-08T17:42:25.299799438+00:00 stderr F name: DefaultPreemption 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F hardPodAffinityWeight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F ignorePreferredTermsOfExistingPods: false 2025-12-08T17:42:25.299799438+00:00 stderr F kind: InterPodAffinityArgs 2025-12-08T17:42:25.299799438+00:00 stderr F name: InterPodAffinity 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F kind: NodeAffinityArgs 2025-12-08T17:42:25.299799438+00:00 stderr F name: NodeAffinity 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F kind: NodeResourcesBalancedAllocationArgs 2025-12-08T17:42:25.299799438+00:00 stderr F resources: 2025-12-08T17:42:25.299799438+00:00 stderr F - name: cpu 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F - name: memory 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F name: NodeResourcesBalancedAllocation 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F kind: NodeResourcesFitArgs 2025-12-08T17:42:25.299799438+00:00 stderr F scoringStrategy: 2025-12-08T17:42:25.299799438+00:00 stderr F resources: 2025-12-08T17:42:25.299799438+00:00 stderr F - name: cpu 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F - name: memory 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F type: LeastAllocated 2025-12-08T17:42:25.299799438+00:00 stderr F name: NodeResourcesFit 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F defaultingType: System 2025-12-08T17:42:25.299799438+00:00 stderr F kind: PodTopologySpreadArgs 2025-12-08T17:42:25.299799438+00:00 stderr F name: PodTopologySpread 2025-12-08T17:42:25.299799438+00:00 stderr F - args: 2025-12-08T17:42:25.299799438+00:00 stderr F apiVersion: kubescheduler.config.k8s.io/v1 2025-12-08T17:42:25.299799438+00:00 stderr F bindTimeoutSeconds: 600 2025-12-08T17:42:25.299799438+00:00 stderr F kind: VolumeBindingArgs 2025-12-08T17:42:25.299799438+00:00 stderr F name: VolumeBinding 2025-12-08T17:42:25.299799438+00:00 stderr F plugins: 2025-12-08T17:42:25.299799438+00:00 stderr F bind: {} 2025-12-08T17:42:25.299799438+00:00 stderr F filter: {} 2025-12-08T17:42:25.299799438+00:00 stderr F multiPoint: 2025-12-08T17:42:25.299799438+00:00 stderr F enabled: 2025-12-08T17:42:25.299799438+00:00 stderr F - name: SchedulingGates 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: PrioritySort 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodeUnschedulable 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodeName 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: TaintToleration 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 3 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodeAffinity 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 2 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodePorts 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodeResourcesFit 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F - name: VolumeRestrictions 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodeVolumeLimits 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: VolumeBinding 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: VolumeZone 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: PodTopologySpread 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 2 2025-12-08T17:42:25.299799438+00:00 stderr F - name: InterPodAffinity 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 2 2025-12-08T17:42:25.299799438+00:00 stderr F - name: DefaultPreemption 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F - name: NodeResourcesBalancedAllocation 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F - name: ImageLocality 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 1 2025-12-08T17:42:25.299799438+00:00 stderr F - name: DefaultBinder 2025-12-08T17:42:25.299799438+00:00 stderr F weight: 0 2025-12-08T17:42:25.299799438+00:00 stderr F permit: {} 2025-12-08T17:42:25.299799438+00:00 stderr F postBind: {} 2025-12-08T17:42:25.299799438+00:00 stderr F postFilter: {} 2025-12-08T17:42:25.299799438+00:00 stderr F preBind: {} 2025-12-08T17:42:25.299799438+00:00 stderr F preEnqueue: {} 2025-12-08T17:42:25.299799438+00:00 stderr F preFilter: {} 2025-12-08T17:42:25.299799438+00:00 stderr F preScore: {} 2025-12-08T17:42:25.299799438+00:00 stderr F queueSort: {} 2025-12-08T17:42:25.299799438+00:00 stderr F reserve: {} 2025-12-08T17:42:25.299799438+00:00 stderr F score: {} 2025-12-08T17:42:25.299799438+00:00 stderr F schedulerName: default-scheduler 2025-12-08T17:42:25.299799438+00:00 stderr F > 2025-12-08T17:42:25.302169439+00:00 stderr F I1208 17:42:25.302133 1 server.go:176] "Starting Kubernetes Scheduler" version="v1.33.5" 2025-12-08T17:42:25.302217442+00:00 stderr F I1208 17:42:25.302203 1 server.go:178] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" 2025-12-08T17:42:25.304583163+00:00 stderr F I1208 17:42:25.304524 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:42:25.304995051+00:00 stderr F I1208 17:42:25.304950 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" 2025-12-08T17:42:25.305056204+00:00 stderr F I1208 17:42:25.305012 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:42:25.305937052+00:00 stderr F E1208 17:42:25.305843 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap" 2025-12-08T17:42:25.306184062+00:00 stderr F I1208 17:42:25.306157 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"scheduler.openshift-kube-scheduler.svc\" [serving] validServingFor=[scheduler.openshift-kube-scheduler.svc,scheduler.openshift-kube-scheduler.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:42:25.30612563 +0000 UTC))" 2025-12-08T17:42:25.306472695+00:00 stderr F I1208 17:42:25.306441 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215745\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:42:25.306427943 +0000 UTC))" 2025-12-08T17:42:25.306534727+00:00 stderr F I1208 17:42:25.306519 1 secure_serving.go:211] Serving securely on [::]:10259 2025-12-08T17:42:25.306600010+00:00 stderr F I1208 17:42:25.306566 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:42:25.308412978+00:00 stderr F E1208 17:42:25.308335 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PersistentVolume: Get \"https://api-int.crc.testing:6443/api/v1/persistentvolumes?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume" 2025-12-08T17:42:25.308508232+00:00 stderr F E1208 17:42:25.308464 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: Get \"https://api-int.crc.testing:6443/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim" 2025-12-08T17:42:25.308613177+00:00 stderr F E1208 17:42:25.308576 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" 2025-12-08T17:42:25.308632168+00:00 stderr F E1208 17:42:25.308598 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: Get \"https://api-int.crc.testing:6443/apis/policy/v1/poddisruptionbudgets?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget" 2025-12-08T17:42:25.308795405+00:00 stderr F E1208 17:42:25.308759 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ReplicationController: Get \"https://api-int.crc.testing:6443/api/v1/replicationcontrollers?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController" 2025-12-08T17:42:25.308943061+00:00 stderr F E1208 17:42:25.308900 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csistoragecapacities?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity" 2025-12-08T17:42:25.309042655+00:00 stderr F E1208 17:42:25.308992 1 reflector.go:200] "Failed to watch" err="failed to list *v1.StatefulSet: Get \"https://api-int.crc.testing:6443/apis/apps/v1/statefulsets?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet" 2025-12-08T17:42:25.309126289+00:00 stderr F E1208 17:42:25.309087 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Namespace: Get \"https://api-int.crc.testing:6443/api/v1/namespaces?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace" 2025-12-08T17:42:25.309183151+00:00 stderr F E1208 17:42:25.309149 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ReplicaSet: Get \"https://api-int.crc.testing:6443/apis/apps/v1/replicasets?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet" 2025-12-08T17:42:25.309183151+00:00 stderr F E1208 17:42:25.309158 1 reflector.go:200] "Failed to watch" err="failed to list *v1.VolumeAttachment: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/volumeattachments?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment" 2025-12-08T17:42:25.309256464+00:00 stderr F E1208 17:42:25.309219 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" 2025-12-08T17:42:25.309316327+00:00 stderr F E1208 17:42:25.309272 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" 2025-12-08T17:42:25.309557377+00:00 stderr F E1208 17:42:25.309503 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Pod: Get \"https://api-int.crc.testing:6443/api/v1/pods?fieldSelector=status.phase%21%3DSucceeded%2Cstatus.phase%21%3DFailed&limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod" 2025-12-08T17:42:25.309557377+00:00 stderr F E1208 17:42:25.309521 1 reflector.go:200] "Failed to watch" err="failed to list *v1.StorageClass: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass" 2025-12-08T17:42:25.309848510+00:00 stderr F E1208 17:42:25.309799 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSINode: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode" 2025-12-08T17:42:26.143532182+00:00 stderr F E1208 17:42:26.143399 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: Get \"https://api-int.crc.testing:6443/apis/policy/v1/poddisruptionbudgets?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget" 2025-12-08T17:42:26.152191766+00:00 stderr F E1208 17:42:26.152116 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" 2025-12-08T17:42:26.165002026+00:00 stderr F E1208 17:42:26.164894 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" 2025-12-08T17:42:26.172710603+00:00 stderr F E1208 17:42:26.172598 1 reflector.go:200] "Failed to watch" err="failed to list *v1.VolumeAttachment: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/volumeattachments?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment" 2025-12-08T17:42:26.175734293+00:00 stderr F E1208 17:42:26.175692 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" 2025-12-08T17:42:26.183818949+00:00 stderr F E1208 17:42:26.183760 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: Get \"https://api-int.crc.testing:6443/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim" 2025-12-08T17:42:26.185507494+00:00 stderr F E1208 17:42:26.185462 1 reflector.go:200] "Failed to watch" err="failed to list *v1.StatefulSet: Get \"https://api-int.crc.testing:6443/apis/apps/v1/statefulsets?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet" 2025-12-08T17:42:26.197709926+00:00 stderr F E1208 17:42:26.197633 1 reflector.go:200] "Failed to watch" err="failed to list *v1.PersistentVolume: Get \"https://api-int.crc.testing:6443/api/v1/persistentvolumes?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume" 2025-12-08T17:42:26.235285952+00:00 stderr F E1208 17:42:26.235219 1 reflector.go:200] "Failed to watch" err="failed to list *v1.StorageClass: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.243:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass" 2025-12-08T17:42:36.300113131+00:00 stderr F I1208 17:42:36.298600 1 trace.go:236] Trace[550148175]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:26.294) (total time: 10003ms): 2025-12-08T17:42:36.300113131+00:00 stderr F Trace[550148175]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/apps/v1/replicasets?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10003ms (17:42:36.297) 2025-12-08T17:42:36.300113131+00:00 stderr F Trace[550148175]: [10.003403731s] [10.003403731s] END 2025-12-08T17:42:36.300113131+00:00 stderr F E1208 17:42:36.298678 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ReplicaSet: Get \"https://api-int.crc.testing:6443/apis/apps/v1/replicasets?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet" 2025-12-08T17:42:36.386331213+00:00 stderr F I1208 17:42:36.386189 1 trace.go:236] Trace[1812282877]: "Reflector ListAndWatch" name:runtime/asm_amd64.s:1700 (08-Dec-2025 17:42:26.384) (total time: 10001ms): 2025-12-08T17:42:36.386331213+00:00 stderr F Trace[1812282877]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:42:36.386) 2025-12-08T17:42:36.386331213+00:00 stderr F Trace[1812282877]: [10.001614643s] [10.001614643s] END 2025-12-08T17:42:36.386331213+00:00 stderr F E1208 17:42:36.386222 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap" 2025-12-08T17:42:36.484100070+00:00 stderr F I1208 17:42:36.484041 1 trace.go:236] Trace[903536028]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:26.482) (total time: 10001ms): 2025-12-08T17:42:36.484100070+00:00 stderr F Trace[903536028]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/namespaces?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:42:36.483) 2025-12-08T17:42:36.484100070+00:00 stderr F Trace[903536028]: [10.001356843s] [10.001356843s] END 2025-12-08T17:42:36.484100070+00:00 stderr F E1208 17:42:36.484075 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Namespace: Get \"https://api-int.crc.testing:6443/api/v1/namespaces?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace" 2025-12-08T17:42:36.607588699+00:00 stderr F I1208 17:42:36.607536 1 trace.go:236] Trace[930252774]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:26.606) (total time: 10001ms): 2025-12-08T17:42:36.607588699+00:00 stderr F Trace[930252774]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:42:36.607) 2025-12-08T17:42:36.607588699+00:00 stderr F Trace[930252774]: [10.001349309s] [10.001349309s] END 2025-12-08T17:42:36.607588699+00:00 stderr F E1208 17:42:36.607565 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSINode: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode" 2025-12-08T17:42:36.656995857+00:00 stderr F I1208 17:42:36.656943 1 trace.go:236] Trace[1413565900]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:26.656) (total time: 10000ms): 2025-12-08T17:42:36.656995857+00:00 stderr F Trace[1413565900]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/pods?fieldSelector=status.phase%21%3DSucceeded%2Cstatus.phase%21%3DFailed&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10000ms (17:42:36.656) 2025-12-08T17:42:36.656995857+00:00 stderr F Trace[1413565900]: [10.00090493s] [10.00090493s] END 2025-12-08T17:42:36.656995857+00:00 stderr F E1208 17:42:36.656972 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Pod: Get \"https://api-int.crc.testing:6443/api/v1/pods?fieldSelector=status.phase%21%3DSucceeded%2Cstatus.phase%21%3DFailed&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod" 2025-12-08T17:42:36.688608509+00:00 stderr F I1208 17:42:36.688553 1 trace.go:236] Trace[1861616050]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:26.687) (total time: 10000ms): 2025-12-08T17:42:36.688608509+00:00 stderr F Trace[1861616050]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csistoragecapacities?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10000ms (17:42:36.688) 2025-12-08T17:42:36.688608509+00:00 stderr F Trace[1861616050]: [10.000889969s] [10.000889969s] END 2025-12-08T17:42:36.688608509+00:00 stderr F E1208 17:42:36.688584 1 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csistoragecapacities?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity" 2025-12-08T17:42:36.761305292+00:00 stderr F I1208 17:42:36.761184 1 trace.go:236] Trace[296083212]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:26.760) (total time: 10001ms): 2025-12-08T17:42:36.761305292+00:00 stderr F Trace[296083212]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/replicationcontrollers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (17:42:36.761) 2025-12-08T17:42:36.761305292+00:00 stderr F Trace[296083212]: [10.001071081s] [10.001071081s] END 2025-12-08T17:42:36.761305292+00:00 stderr F E1208 17:42:36.761244 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ReplicationController: Get \"https://api-int.crc.testing:6443/api/v1/replicationcontrollers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController" 2025-12-08T17:42:38.791719756+00:00 stderr F I1208 17:42:38.791606 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:38.998329651+00:00 stderr F I1208 17:42:38.998192 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:39.034595371+00:00 stderr F I1208 17:42:39.034480 1 reflector.go:430] "Caches populated" type="*v1.CSINode" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:39.110437479+00:00 stderr F I1208 17:42:39.110313 1 reflector.go:430] "Caches populated" type="*v1.CSIStorageCapacity" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:39.551290834+00:00 stderr F I1208 17:42:39.551159 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:42:39.605553354+00:00 stderr F I1208 17:42:39.605482 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:42:39.605869122+00:00 stderr F I1208 17:42:39.605829 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:42:39.605812241 +0000 UTC))" 2025-12-08T17:42:39.605869122+00:00 stderr F I1208 17:42:39.605854 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:42:39.605849262 +0000 UTC))" 2025-12-08T17:42:39.605920424+00:00 stderr F I1208 17:42:39.605864 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:42:39.605860492 +0000 UTC))" 2025-12-08T17:42:39.605920424+00:00 stderr F I1208 17:42:39.605890 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:42:39.605871713 +0000 UTC))" 2025-12-08T17:42:39.605920424+00:00 stderr F I1208 17:42:39.605903 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:42:39.605897033 +0000 UTC))" 2025-12-08T17:42:39.605920424+00:00 stderr F I1208 17:42:39.605913 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:42:39.605909584 +0000 UTC))" 2025-12-08T17:42:39.605946705+00:00 stderr F I1208 17:42:39.605923 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:42:39.605919964 +0000 UTC))" 2025-12-08T17:42:39.606139271+00:00 stderr F I1208 17:42:39.606106 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"scheduler.openshift-kube-scheduler.svc\" [serving] validServingFor=[scheduler.openshift-kube-scheduler.svc,scheduler.openshift-kube-scheduler.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:42:39.60609949 +0000 UTC))" 2025-12-08T17:42:39.606269064+00:00 stderr F I1208 17:42:39.606241 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215745\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:42:39.606233803 +0000 UTC))" 2025-12-08T17:42:42.237967258+00:00 stderr F I1208 17:42:42.237381 1 trace.go:236] Trace[1711618524]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:28.060) (total time: 14177ms): 2025-12-08T17:42:42.237967258+00:00 stderr F Trace[1711618524]: ---"Objects listed" error: 14177ms (17:42:42.237) 2025-12-08T17:42:42.237967258+00:00 stderr F Trace[1711618524]: [14.177114097s] [14.177114097s] END 2025-12-08T17:42:42.237967258+00:00 stderr F I1208 17:42:42.237413 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolume" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.237967258+00:00 stderr F I1208 17:42:42.237946 1 trace.go:236] Trace[1591449693]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:28.103) (total time: 14134ms): 2025-12-08T17:42:42.237967258+00:00 stderr F Trace[1591449693]: ---"Objects listed" error: 14134ms (17:42:42.237) 2025-12-08T17:42:42.237967258+00:00 stderr F Trace[1591449693]: [14.134692506s] [14.134692506s] END 2025-12-08T17:42:42.238045120+00:00 stderr F I1208 17:42:42.237958 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolumeClaim" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.238251366+00:00 stderr F I1208 17:42:42.238207 1 trace.go:236] Trace[233068351]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:28.387) (total time: 13850ms): 2025-12-08T17:42:42.238251366+00:00 stderr F Trace[233068351]: ---"Objects listed" error: 13850ms (17:42:42.238) 2025-12-08T17:42:42.238251366+00:00 stderr F Trace[233068351]: [13.850939447s] [13.850939447s] END 2025-12-08T17:42:42.238251366+00:00 stderr F I1208 17:42:42.238227 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.238736569+00:00 stderr F I1208 17:42:42.238682 1 trace.go:236] Trace[568568347]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:28.517) (total time: 13720ms): 2025-12-08T17:42:42.238736569+00:00 stderr F Trace[568568347]: ---"Objects listed" error: 13720ms (17:42:42.238) 2025-12-08T17:42:42.238736569+00:00 stderr F Trace[568568347]: [13.720931741s] [13.720931741s] END 2025-12-08T17:42:42.238746759+00:00 stderr F I1208 17:42:42.238729 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.239444589+00:00 stderr F I1208 17:42:42.238763 1 node_tree.go:65] "Added node to NodeTree" node="crc" zone="" 2025-12-08T17:42:42.239444589+00:00 stderr F I1208 17:42:42.239210 1 trace.go:236] Trace[166554217]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:29.324) (total time: 12914ms): 2025-12-08T17:42:42.239444589+00:00 stderr F Trace[166554217]: ---"Objects listed" error: 12914ms (17:42:42.239) 2025-12-08T17:42:42.239444589+00:00 stderr F Trace[166554217]: [12.914670232s] [12.914670232s] END 2025-12-08T17:42:42.239444589+00:00 stderr F I1208 17:42:42.239230 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.239444589+00:00 stderr F I1208 17:42:42.239241 1 trace.go:236] Trace[1724885711]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:29.115) (total time: 13123ms): 2025-12-08T17:42:42.239444589+00:00 stderr F Trace[1724885711]: ---"Objects listed" error: 13123ms (17:42:42.239) 2025-12-08T17:42:42.239444589+00:00 stderr F Trace[1724885711]: [13.123465308s] [13.123465308s] END 2025-12-08T17:42:42.239444589+00:00 stderr F I1208 17:42:42.239252 1 reflector.go:430] "Caches populated" type="*v1.VolumeAttachment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.239708516+00:00 stderr F I1208 17:42:42.239688 1 trace.go:236] Trace[395559046]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:29.206) (total time: 13033ms): 2025-12-08T17:42:42.239708516+00:00 stderr F Trace[395559046]: ---"Objects listed" error: 13033ms (17:42:42.239) 2025-12-08T17:42:42.239708516+00:00 stderr F Trace[395559046]: [13.033167512s] [13.033167512s] END 2025-12-08T17:42:42.239765867+00:00 stderr F I1208 17:42:42.239750 1 reflector.go:430] "Caches populated" type="*v1.StorageClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.239860220+00:00 stderr F I1208 17:42:42.239825 1 trace.go:236] Trace[140875455]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:29.068) (total time: 13170ms): 2025-12-08T17:42:42.239860220+00:00 stderr F Trace[140875455]: ---"Objects listed" error: 13170ms (17:42:42.239) 2025-12-08T17:42:42.239860220+00:00 stderr F Trace[140875455]: [13.170939657s] [13.170939657s] END 2025-12-08T17:42:42.239860220+00:00 stderr F I1208 17:42:42.239850 1 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:42.249734319+00:00 stderr F I1208 17:42:42.249669 1 trace.go:236] Trace[708691382]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (08-Dec-2025 17:42:29.059) (total time: 13189ms): 2025-12-08T17:42:42.249734319+00:00 stderr F Trace[708691382]: ---"Objects listed" error: 13189ms (17:42:42.249) 2025-12-08T17:42:42.249734319+00:00 stderr F Trace[708691382]: [13.189771219s] [13.189771219s] END 2025-12-08T17:42:42.249734319+00:00 stderr F I1208 17:42:42.249708 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:43.344976154+00:00 stderr F I1208 17:42:43.342958 1 reflector.go:430] "Caches populated" type="*v1.ReplicaSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:43.483613996+00:00 stderr F I1208 17:42:43.483533 1 reflector.go:430] "Caches populated" type="*v1.ReplicationController" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:42:43.508684790+00:00 stderr F I1208 17:42:43.508609 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-scheduler/kube-scheduler... 2025-12-08T17:42:43.516187934+00:00 stderr F I1208 17:42:43.516146 1 leaderelection.go:271] successfully acquired lease openshift-kube-scheduler/kube-scheduler 2025-12-08T17:42:43.517315925+00:00 stderr F I1208 17:42:43.517284 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-apiserver/apiserver-9ddfb9f55-8h8fl" 2025-12-08T17:42:43.517500820+00:00 stderr F I1208 17:42:43.517478 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-apiserver/apiserver-9ddfb9f55-8h8fl" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:43.520539593+00:00 stderr F I1208 17:42:43.520474 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-machine-config-operator/machine-config-daemon-8vxnt" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:43.520815630+00:00 stderr F I1208 17:42:43.520769 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-dns/node-resolver-vk6p6" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:43.894090102+00:00 stderr F E1208 17:42:43.893998 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-apiserver/apiserver-9ddfb9f55-8h8fl" 2025-12-08T17:42:43.894612046+00:00 stderr F I1208 17:42:43.894548 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-controller-manager/controller-manager-65b6cccf98-6wjgz" 2025-12-08T17:42:43.894627316+00:00 stderr F I1208 17:42:43.894604 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-controller-manager/controller-manager-65b6cccf98-6wjgz" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:43.900921669+00:00 stderr F I1208 17:42:43.899028 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-multus/multus-dlvbf" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:43.900921669+00:00 stderr F I1208 17:42:43.899224 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-multus/multus-additional-cni-plugins-lq9nf" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:44.263918150+00:00 stderr F E1208 17:42:44.263781 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-controller-manager/controller-manager-65b6cccf98-6wjgz" 2025-12-08T17:42:44.264178687+00:00 stderr F I1208 17:42:44.264153 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-machine-api/machine-api-operator-755bb95488-5httz" 2025-12-08T17:42:44.264212228+00:00 stderr F I1208 17:42:44.264185 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-machine-api/machine-api-operator-755bb95488-5httz" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:44.269916333+00:00 stderr F I1208 17:42:44.269758 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ovn-kubernetes/ovnkube-node-wr4x4" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:44.645724975+00:00 stderr F E1208 17:42:44.645165 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-machine-api/machine-api-operator-755bb95488-5httz" 2025-12-08T17:42:44.645724975+00:00 stderr F I1208 17:42:44.645371 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-oauth-apiserver/apiserver-8596bd845d-rdv9c" 2025-12-08T17:42:44.645724975+00:00 stderr F I1208 17:42:44.645400 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-oauth-apiserver/apiserver-8596bd845d-rdv9c" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:45.009084526+00:00 stderr F E1208 17:42:45.008502 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-oauth-apiserver/apiserver-8596bd845d-rdv9c" 2025-12-08T17:42:45.009084526+00:00 stderr F I1208 17:42:45.008799 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q" 2025-12-08T17:42:45.009084526+00:00 stderr F I1208 17:42:45.008850 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:45.370144344+00:00 stderr F E1208 17:42:45.370065 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q" 2025-12-08T17:42:45.371666516+00:00 stderr F I1208 17:42:45.370301 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-q6lj7" 2025-12-08T17:42:45.371666516+00:00 stderr F I1208 17:42:45.370325 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-q6lj7" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:45.378287887+00:00 stderr F I1208 17:42:45.378228 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-image-registry/node-ca-pvtml" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:45.748616668+00:00 stderr F E1208 17:42:45.748554 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-q6lj7" 2025-12-08T17:42:45.748959287+00:00 stderr F I1208 17:42:45.748928 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-cluster-machine-approver/machine-approver-54c688565-487qx" 2025-12-08T17:42:45.749058130+00:00 stderr F I1208 17:42:45.749029 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-cluster-machine-approver/machine-approver-54c688565-487qx" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:46.103456987+00:00 stderr F E1208 17:42:46.103398 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-cluster-machine-approver/machine-approver-54c688565-487qx" 2025-12-08T17:42:46.103701814+00:00 stderr F I1208 17:42:46.103671 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-5scww" 2025-12-08T17:42:46.103790556+00:00 stderr F I1208 17:42:46.103764 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-5scww" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:46.465315537+00:00 stderr F E1208 17:42:46.465230 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-5scww" 2025-12-08T17:42:46.465481362+00:00 stderr F I1208 17:42:46.465430 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-authentication/oauth-openshift-66458b6674-ztdrc" 2025-12-08T17:42:46.465513423+00:00 stderr F I1208 17:42:46.465487 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-authentication/oauth-openshift-66458b6674-ztdrc" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:46.846029452+00:00 stderr F E1208 17:42:46.845955 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-authentication/oauth-openshift-66458b6674-ztdrc" 2025-12-08T17:42:46.846177556+00:00 stderr F I1208 17:42:46.846143 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-console-operator/console-operator-67c89758df-79mps" 2025-12-08T17:42:46.846202157+00:00 stderr F I1208 17:42:46.846183 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-console-operator/console-operator-67c89758df-79mps" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:47.198695582+00:00 stderr F E1208 17:42:47.198630 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-console-operator/console-operator-67c89758df-79mps" 2025-12-08T17:42:47.198801055+00:00 stderr F I1208 17:42:47.198763 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-sft9f" 2025-12-08T17:42:47.198801055+00:00 stderr F I1208 17:42:47.198787 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-sft9f" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:47.576190229+00:00 stderr F E1208 17:42:47.576119 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-sft9f" 2025-12-08T17:42:47.576486637+00:00 stderr F I1208 17:42:47.576419 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-2cnx5" 2025-12-08T17:42:47.576580830+00:00 stderr F I1208 17:42:47.576552 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-2cnx5" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:47.989967645+00:00 stderr F E1208 17:42:47.989842 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-2cnx5" 2025-12-08T17:42:47.990210622+00:00 stderr F I1208 17:42:47.990154 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-config-operator/openshift-config-operator-5777786469-v69x6" 2025-12-08T17:42:47.990235932+00:00 stderr F I1208 17:42:47.990214 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-config-operator/openshift-config-operator-5777786469-v69x6" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:48.343337534+00:00 stderr F E1208 17:42:48.343262 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-config-operator/openshift-config-operator-5777786469-v69x6" 2025-12-08T17:42:48.343521820+00:00 stderr F I1208 17:42:48.343489 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-console/console-64d44f6ddf-dhfvx" 2025-12-08T17:42:48.343534450+00:00 stderr F I1208 17:42:48.343524 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-console/console-64d44f6ddf-dhfvx" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:48.765650054+00:00 stderr F E1208 17:42:48.765575 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-console/console-64d44f6ddf-dhfvx" 2025-12-08T17:42:48.765829558+00:00 stderr F I1208 17:42:48.765768 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-console/downloads-747b44746d-x7wvx" 2025-12-08T17:42:48.765856959+00:00 stderr F I1208 17:42:48.765842 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-console/downloads-747b44746d-x7wvx" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:49.118559520+00:00 stderr F E1208 17:42:49.118465 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-console/downloads-747b44746d-x7wvx" 2025-12-08T17:42:49.118791656+00:00 stderr F I1208 17:42:49.118762 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-rwgjl" 2025-12-08T17:42:49.118910059+00:00 stderr F I1208 17:42:49.118850 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-rwgjl" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:49.485509749+00:00 stderr F E1208 17:42:49.485417 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-rwgjl" 2025-12-08T17:42:49.485801287+00:00 stderr F I1208 17:42:49.485760 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-image-registry/image-registry-66587d64c8-s6hn4" 2025-12-08T17:42:49.486095465+00:00 stderr F I1208 17:42:49.485809 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-image-registry/image-registry-66587d64c8-s6hn4" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:49.855499272+00:00 stderr F E1208 17:42:49.855400 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-image-registry/image-registry-66587d64c8-s6hn4" 2025-12-08T17:42:49.855635105+00:00 stderr F I1208 17:42:49.855595 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-dns-operator/dns-operator-799b87ffcd-9b988" 2025-12-08T17:42:49.855667906+00:00 stderr F I1208 17:42:49.855639 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-dns-operator/dns-operator-799b87ffcd-9b988" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:50.218263687+00:00 stderr F E1208 17:42:50.218155 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-dns-operator/dns-operator-799b87ffcd-9b988" 2025-12-08T17:42:50.218479623+00:00 stderr F I1208 17:42:50.218419 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-etcd-operator/etcd-operator-69b85846b6-k26tc" 2025-12-08T17:42:50.218498923+00:00 stderr F I1208 17:42:50.218462 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-etcd-operator/etcd-operator-69b85846b6-k26tc" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:50.581229337+00:00 stderr F E1208 17:42:50.581118 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-etcd-operator/etcd-operator-69b85846b6-k26tc" 2025-12-08T17:42:50.581373351+00:00 stderr F I1208 17:42:50.581319 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-2pwhz" 2025-12-08T17:42:50.581393431+00:00 stderr F I1208 17:42:50.581368 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-2pwhz" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:50.938382990+00:00 stderr F E1208 17:42:50.938247 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-2pwhz" 2025-12-08T17:42:50.938438861+00:00 stderr F I1208 17:42:50.938422 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-m5ltz" 2025-12-08T17:42:50.938494013+00:00 stderr F I1208 17:42:50.938450 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-m5ltz" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:51.314847828+00:00 stderr F E1208 17:42:51.314779 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-m5ltz" 2025-12-08T17:42:51.315108965+00:00 stderr F I1208 17:42:51.315006 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-6gkgz" 2025-12-08T17:42:51.315108965+00:00 stderr F I1208 17:42:51.315097 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-6gkgz" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:51.694620358+00:00 stderr F E1208 17:42:51.694553 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-6gkgz" 2025-12-08T17:42:51.694761192+00:00 stderr F I1208 17:42:51.694737 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-6lgwk" 2025-12-08T17:42:51.694851404+00:00 stderr F I1208 17:42:51.694777 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-6lgwk" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:52.047095332+00:00 stderr F E1208 17:42:52.046981 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-6lgwk" 2025-12-08T17:42:52.047172734+00:00 stderr F I1208 17:42:52.047154 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-bhk9x" 2025-12-08T17:42:52.047228656+00:00 stderr F I1208 17:42:52.047186 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-bhk9x" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:52.404904572+00:00 stderr F E1208 17:42:52.404796 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-bhk9x" 2025-12-08T17:42:52.405069746+00:00 stderr F I1208 17:42:52.405020 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-gvb6q" 2025-12-08T17:42:52.405171039+00:00 stderr F I1208 17:42:52.405111 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-gvb6q" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:52.768682694+00:00 stderr F E1208 17:42:52.768601 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-gvb6q" 2025-12-08T17:42:52.768954252+00:00 stderr F I1208 17:42:52.768836 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-ingress/router-default-68cf44c8b8-rscz2" 2025-12-08T17:42:52.769009473+00:00 stderr F I1208 17:42:52.768975 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-ingress/router-default-68cf44c8b8-rscz2" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:53.146129890+00:00 stderr F E1208 17:42:53.146018 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-ingress/router-default-68cf44c8b8-rscz2" 2025-12-08T17:42:53.146333396+00:00 stderr F I1208 17:42:53.146267 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-p88k2" 2025-12-08T17:42:53.146357036+00:00 stderr F I1208 17:42:53.146326 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-p88k2" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:53.515807094+00:00 stderr F E1208 17:42:53.515709 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-p88k2" 2025-12-08T17:42:53.515991689+00:00 stderr F I1208 17:42:53.515939 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-4g75z" 2025-12-08T17:42:53.516014490+00:00 stderr F I1208 17:42:53.515987 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-4g75z" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:53.900556418+00:00 stderr F E1208 17:42:53.900433 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-4g75z" 2025-12-08T17:42:53.900638511+00:00 stderr F I1208 17:42:53.900622 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-multus/multus-admission-controller-69db94689b-v9sxk" 2025-12-08T17:42:53.900840656+00:00 stderr F I1208 17:42:53.900651 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-multus/multus-admission-controller-69db94689b-v9sxk" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:54.260253821+00:00 stderr F E1208 17:42:54.260099 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-multus/multus-admission-controller-69db94689b-v9sxk" 2025-12-08T17:42:54.260451536+00:00 stderr F I1208 17:42:54.260346 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-5pp5q" 2025-12-08T17:42:54.260451536+00:00 stderr F I1208 17:42:54.260392 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-5pp5q" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:54.626347056+00:00 stderr F E1208 17:42:54.626278 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-5pp5q" 2025-12-08T17:42:54.626479050+00:00 stderr F I1208 17:42:54.626424 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-marketplace/marketplace-operator-547dbd544d-85wdh" 2025-12-08T17:42:54.626479050+00:00 stderr F I1208 17:42:54.626452 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-marketplace/marketplace-operator-547dbd544d-85wdh" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:54.984344912+00:00 stderr F E1208 17:42:54.984229 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-marketplace/marketplace-operator-547dbd544d-85wdh" 2025-12-08T17:42:54.984478345+00:00 stderr F I1208 17:42:54.984418 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht" 2025-12-08T17:42:54.984478345+00:00 stderr F I1208 17:42:54.984465 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:55.328387876+00:00 stderr F E1208 17:42:55.328227 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht" 2025-12-08T17:42:55.328493089+00:00 stderr F I1208 17:42:55.328379 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-bl822" 2025-12-08T17:42:55.328493089+00:00 stderr F I1208 17:42:55.328402 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-bl822" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:55.673238183+00:00 stderr F E1208 17:42:55.673135 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-bl822" 2025-12-08T17:42:55.673351256+00:00 stderr F I1208 17:42:55.673300 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-d8qsj" 2025-12-08T17:42:55.673351256+00:00 stderr F I1208 17:42:55.673329 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-d8qsj" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:56.029532911+00:00 stderr F E1208 17:42:56.029445 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-d8qsj" 2025-12-08T17:42:56.029712366+00:00 stderr F I1208 17:42:56.029640 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-4kjg6" 2025-12-08T17:42:56.029712366+00:00 stderr F I1208 17:42:56.029676 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-4kjg6" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:56.413392201+00:00 stderr F E1208 17:42:56.413281 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-4kjg6" 2025-12-08T17:42:56.413468863+00:00 stderr F I1208 17:42:56.413436 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-ggh59" 2025-12-08T17:42:56.413486984+00:00 stderr F I1208 17:42:56.413466 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-ggh59" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:56.796723458+00:00 stderr F E1208 17:42:56.796635 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-ggh59" 2025-12-08T17:42:56.797078017+00:00 stderr F I1208 17:42:56.797029 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-service-ca/service-ca-74545575db-d69qv" 2025-12-08T17:42:56.797102438+00:00 stderr F I1208 17:42:56.797076 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-service-ca/service-ca-74545575db-d69qv" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:56.802951647+00:00 stderr F I1208 17:42:56.802530 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-x68jp" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:42:57.150719833+00:00 stderr F E1208 17:42:57.150617 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-service-ca/service-ca-74545575db-d69qv" 2025-12-08T17:42:57.150804316+00:00 stderr F I1208 17:42:57.150779 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-cdz4v" 2025-12-08T17:42:57.150834606+00:00 stderr F I1208 17:42:57.150810 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-cdz4v" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:57.499815236+00:00 stderr F E1208 17:42:57.499504 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-cdz4v" 2025-12-08T17:42:57.499815236+00:00 stderr F I1208 17:42:57.499647 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-operator-lifecycle-manager/collect-profiles-29420250-qhrfp" 2025-12-08T17:42:57.499815236+00:00 stderr F I1208 17:42:57.499671 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-operator-lifecycle-manager/collect-profiles-29420250-qhrfp" err="0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling." 2025-12-08T17:42:57.858732976+00:00 stderr F E1208 17:42:57.858650 1 schedule_one.go:1095] "Error updating pod" err="Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" pod="openshift-operator-lifecycle-manager/collect-profiles-29420250-qhrfp" 2025-12-08T17:42:57.864071762+00:00 stderr F I1208 17:42:57.863982 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-multus/network-metrics-daemon-54w78" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:10.073157050+00:00 stderr F I1208 17:44:10.073075 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-sft9f" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745637808+00:00 stderr F I1208 17:44:15.745559 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-q6lj7" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745759892+00:00 stderr F I1208 17:44:15.745731 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/collect-profiles-29420250-qhrfp" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745773742+00:00 stderr F I1208 17:44:15.745766 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-apiserver/apiserver-9ddfb9f55-8h8fl" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745825563+00:00 stderr F I1208 17:44:15.745793 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-controller-manager/controller-manager-65b6cccf98-6wjgz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745837834+00:00 stderr F I1208 17:44:15.745827 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-service-ca/service-ca-74545575db-d69qv" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745893865+00:00 stderr F I1208 17:44:15.745849 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-oauth-apiserver/apiserver-8596bd845d-rdv9c" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745957517+00:00 stderr F I1208 17:44:15.745921 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-qkg2q" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745957517+00:00 stderr F I1208 17:44:15.745951 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-bhk9x" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.745997278+00:00 stderr F I1208 17:44:15.745974 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-p88k2" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746009648+00:00 stderr F I1208 17:44:15.746002 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-4g75z" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746046259+00:00 stderr F I1208 17:44:15.746023 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-5pp5q" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746056480+00:00 stderr F I1208 17:44:15.746049 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-dhfht" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746092671+00:00 stderr F I1208 17:44:15.746070 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ingress/router-default-68cf44c8b8-rscz2" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746123241+00:00 stderr F I1208 17:44:15.746100 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-machine-api/machine-api-operator-755bb95488-5httz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746157392+00:00 stderr F I1208 17:44:15.746135 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-d8qsj" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746186903+00:00 stderr F I1208 17:44:15.746165 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-bl822" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746222834+00:00 stderr F I1208 17:44:15.746201 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-6lgwk" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746262455+00:00 stderr F I1208 17:44:15.746239 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-rwgjl" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746297646+00:00 stderr F I1208 17:44:15.746276 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-cluster-machine-approver/machine-approver-54c688565-487qx" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746336097+00:00 stderr F I1208 17:44:15.746314 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-ggh59" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746371688+00:00 stderr F I1208 17:44:15.746350 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-console/downloads-747b44746d-x7wvx" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746409969+00:00 stderr F I1208 17:44:15.746388 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-dns-operator/dns-operator-799b87ffcd-9b988" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746449230+00:00 stderr F I1208 17:44:15.746427 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-multus/multus-admission-controller-69db94689b-v9sxk" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746483601+00:00 stderr F I1208 17:44:15.746463 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-cdz4v" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746515502+00:00 stderr F I1208 17:44:15.746496 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-image-registry/image-registry-66587d64c8-s6hn4" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746547353+00:00 stderr F I1208 17:44:15.746529 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-gvb6q" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746579664+00:00 stderr F I1208 17:44:15.746560 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/marketplace-operator-547dbd544d-85wdh" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.746610715+00:00 stderr F I1208 17:44:15.746591 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-4kjg6" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765802 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-authentication/oauth-openshift-66458b6674-ztdrc" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765852 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-2cnx5" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765906 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-dns/dns-default-c5tbq" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765929 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-config-operator/openshift-config-operator-5777786469-v69x6" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765944 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-2pwhz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765962 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-6gkgz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765980 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-m5ltz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.765995 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-console-operator/console-operator-67c89758df-79mps" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.766011 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-etcd-operator/etcd-operator-69b85846b6-k26tc" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.766026 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-console/console-64d44f6ddf-dhfvx" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.768557383+00:00 stderr F I1208 17:44:15.766040 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-5scww" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.780080368+00:00 stderr F I1208 17:44:15.780024 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-machine-config-operator/machine-config-server-psb45" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.827124571+00:00 stderr F I1208 17:44:15.825569 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-multus/cni-sysctl-allowlist-ds-bdhnb" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.832363494+00:00 stderr F I1208 17:44:15.832317 1 schedule_one.go:314] "Successfully bound pod to node" pod="hostpath-provisioner/csi-hostpathplugin-qrls7" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:15.923482660+00:00 stderr F I1208 17:44:15.922999 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ingress-canary/ingress-canary-psjrr" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:21.890656916+00:00 stderr F I1208 17:44:21.889658 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/community-operators-r22jf" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:22.074059958+00:00 stderr F I1208 17:44:22.073119 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/certified-operators-lxwl6" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:22.281203509+00:00 stderr F I1208 17:44:22.279138 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/community-operators-sb7gg" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:22.470921394+00:00 stderr F I1208 17:44:22.469301 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/certified-operators-n5vp7" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:24.081625039+00:00 stderr F I1208 17:44:24.081570 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-marketplace-rvglb" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:24.481532457+00:00 stderr F I1208 17:44:24.481484 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-marketplace-6m6rs" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:25.085151292+00:00 stderr F I1208 17:44:25.084994 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-operators-zfv6j" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:25.509620600+00:00 stderr F I1208 17:44:25.509500 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-operators-w7jrs" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604485 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.604459611 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604525 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.604518313 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604539 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.604534013 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604554 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.604547763 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604568 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.604562074 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604581 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.604576384 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604596 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.604591215 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604611 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.604605255 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604627 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.604621995 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.604857 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"scheduler.openshift-kube-scheduler.svc\" [serving] validServingFor=[scheduler.openshift-kube-scheduler.svc,scheduler.openshift-kube-scheduler.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:44:30.604847321 +0000 UTC))" 2025-12-08T17:44:30.609339514+00:00 stderr F I1208 17:44:30.605071 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215745\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:44:30.605060807 +0000 UTC))" 2025-12-08T17:45:00.125969603+00:00 stderr F I1208 17:45:00.125894 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/collect-profiles-29420265-vsxwc" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040376 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.04035148 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040406 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.040399702 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040417 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.040412922 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040428 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.040424432 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040438 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.040434453 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040449 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.040445163 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040460 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.040456483 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040471 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.040467654 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040481 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.040477814 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040492 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.040488884 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040651 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key" certDetail="\"scheduler.openshift-kube-scheduler.svc\" [serving] validServingFor=[scheduler.openshift-kube-scheduler.svc,scheduler.openshift-kube-scheduler.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:15 +0000 UTC to 2027-11-02 07:52:16 +0000 UTC (now=2025-12-08 17:45:16.040645078 +0000 UTC))" 2025-12-08T17:45:16.042468260+00:00 stderr F I1208 17:45:16.040786 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215745\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215745\" (2025-12-08 16:42:24 +0000 UTC to 2028-12-08 16:42:24 +0000 UTC (now=2025-12-08 17:45:16.040778622 +0000 UTC))" 2025-12-08T17:45:22.135924120+00:00 stderr F I1208 17:45:22.135818 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-authentication/oauth-openshift-57ffdf54dd-5dg99" 2025-12-08T17:45:22.135924120+00:00 stderr F I1208 17:45:22.135854 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-authentication/oauth-openshift-57ffdf54dd-5dg99" err="0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules. preemption: 0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules." 2025-12-08T17:45:47.564212137+00:00 stderr F I1208 17:45:47.563706 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-authentication/oauth-openshift-57ffdf54dd-5dg99" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:46:06.142598151+00:00 stderr F I1208 17:46:06.142478 1 cert_rotation.go:92] "Certificate rotation detected, shutting down client connections to start using new credentials" logger="tls-transport-cache" 2025-12-08T17:46:07.587234783+00:00 stderr F I1208 17:46:07.587068 1 cert_rotation.go:92] "Certificate rotation detected, shutting down client connections to start using new credentials" logger="tls-transport-cache" 2025-12-08T17:46:21.502895788+00:00 stderr F I1208 17:46:21.502792 1 reflector.go:430] "Caches populated" type="*v1.ReplicaSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:23.286541216+00:00 stderr F I1208 17:46:23.286446 1 reflector.go:430] "Caches populated" type="*v1.VolumeAttachment" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:23.496147438+00:00 stderr F I1208 17:46:23.493905 1 reflector.go:430] "Caches populated" type="*v1.CSIStorageCapacity" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:24.589469694+00:00 stderr F I1208 17:46:24.589381 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:25.343861368+00:00 stderr F I1208 17:46:25.343757 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolumeClaim" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:26.380930426+00:00 stderr F I1208 17:46:26.380240 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:26.855730278+00:00 stderr F I1208 17:46:26.855669 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:27.288343033+00:00 stderr F I1208 17:46:27.288282 1 reflector.go:430] "Caches populated" type="*v1.CSINode" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:28.541583330+00:00 stderr F I1208 17:46:28.541530 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:28.624050895+00:00 stderr F I1208 17:46:28.623949 1 reflector.go:430] "Caches populated" type="*v1.PersistentVolume" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:29.037078023+00:00 stderr F I1208 17:46:29.036999 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:29.158940280+00:00 stderr F I1208 17:46:29.157963 1 reflector.go:430] "Caches populated" type="*v1.StatefulSet" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:31.123930861+00:00 stderr F I1208 17:46:31.123837 1 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:31.410285336+00:00 stderr F I1208 17:46:31.410220 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="runtime/asm_amd64.s:1700" 2025-12-08T17:46:31.949343476+00:00 stderr F I1208 17:46:31.949266 1 reflector.go:430] "Caches populated" type="*v1.StorageClass" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:46:34.081184794+00:00 stderr F I1208 17:46:34.081078 1 reflector.go:430] "Caches populated" type="*v1.ReplicationController" reflector="k8s.io/client-go/informers/factory.go:160" 2025-12-08T17:47:22.093924873+00:00 stderr F I1208 17:47:22.093853 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-controller-manager/controller-manager-6cd9c44569-vhg58" 2025-12-08T17:47:22.094032207+00:00 stderr F I1208 17:47:22.094014 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-controller-manager/controller-manager-6cd9c44569-vhg58" err="0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules. preemption: 0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules." 2025-12-08T17:47:22.209000456+00:00 stderr F I1208 17:47:22.206572 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-route-controller-manager/route-controller-manager-6975b9f87f-8vkdj" 2025-12-08T17:47:22.209000456+00:00 stderr F I1208 17:47:22.206617 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-route-controller-manager/route-controller-manager-6975b9f87f-8vkdj" err="0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules. preemption: 0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules." 2025-12-08T17:47:22.557362331+00:00 stderr F I1208 17:47:22.557263 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-route-controller-manager/route-controller-manager-6975b9f87f-8vkdj" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:47:22.589267136+00:00 stderr F I1208 17:47:22.588671 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-controller-manager/controller-manager-6cd9c44569-vhg58" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:47:23.283401797+00:00 stderr F I1208 17:47:23.283339 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-controller-manager/controller-manager-5cb6f9d449-mjxkv" 2025-12-08T17:47:23.283401797+00:00 stderr F I1208 17:47:23.283380 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-controller-manager/controller-manager-5cb6f9d449-mjxkv" err="0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules. preemption: 0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules." 2025-12-08T17:47:24.422277307+00:00 stderr F I1208 17:47:24.422219 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-controller-manager/controller-manager-5cb6f9d449-mjxkv" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:48:03.284306988+00:00 stderr F I1208 17:48:03.283991 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-route-controller-manager/route-controller-manager-7dd6d6d8c8-wfznc" 2025-12-08T17:48:03.284306988+00:00 stderr F I1208 17:48:03.284036 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-route-controller-manager/route-controller-manager-7dd6d6d8c8-wfznc" err="0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules. preemption: 0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules." 2025-12-08T17:48:03.749180112+00:00 stderr F I1208 17:48:03.748676 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-route-controller-manager/route-controller-manager-7dd6d6d8c8-wfznc" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:48:09.372629623+00:00 stderr F I1208 17:48:09.372567 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/marketplace-operator-547dbd544d-6bbtn" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:48:11.154839485+00:00 stderr F I1208 17:48:11.154763 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/certified-operators-58d6l" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:48:12.150509109+00:00 stderr F I1208 17:48:12.150081 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-marketplace-xp5vr" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:48:13.552414745+00:00 stderr F I1208 17:48:13.552341 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-operators-xpnf9" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:48:14.548221153+00:00 stderr F I1208 17:48:14.548148 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/community-operators-zdvxg" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:53:41.185133465+00:00 stderr F I1208 17:53:41.185011 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-lfp2m" 2025-12-08T17:53:41.185133465+00:00 stderr F I1208 17:53:41.185059 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-lfp2m" err="0/1 nodes are available: 1 node(s) didn't have free ports for the requested pod ports. preemption: 0/1 nodes are available: 1 node(s) didn't have free ports for the requested pod ports." 2025-12-08T17:53:41.365642673+00:00 stderr F I1208 17:53:41.365579 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-lfp2m" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:53:41.720982955+00:00 stderr F I1208 17:53:41.720465 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-ovn-kubernetes/ovnkube-node-gpg4k" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:54:28.425402443+00:00 stderr F I1208 17:54:28.423844 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/certified-operators-tkpnz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:54:41.496025965+00:00 stderr F I1208 17:54:41.495896 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-operators-hl4hq" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:54:51.190646528+00:00 stderr F I1208 17:54:51.190466 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-image-registry/image-registry-5d9d95bf5b-cmjbz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:54:56.136556759+00:00 stderr F I1208 17:54:56.136397 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:04.136567419+00:00 stderr F I1208 17:55:04.135713 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:04.533643134+00:00 stderr F I1208 17:55:04.531382 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:15.610509092+00:00 stderr F I1208 17:55:15.609992 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operators/obo-prometheus-operator-86648f486b-4j9kn" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:15.730639255+00:00 stderr F I1208 17:55:15.729898 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:15.763801047+00:00 stderr F I1208 17:55:15.762347 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:15.922144579+00:00 stderr F I1208 17:55:15.921791 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operators/observability-operator-78c97476f4-mg4b2" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:16.187461278+00:00 stderr F I1208 17:55:16.187352 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operators/perses-operator-68bdb49cbf-m2cdr" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:16.770060217+00:00 stderr F I1208 17:55:16.768182 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/elastic-operator-c9c86658-4qchz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:29.786983703+00:00 stderr F I1208 17:55:29.786888 1 schedule_one.go:314] "Successfully bound pod to node" pod="cert-manager-operator/cert-manager-operator-controller-manager-64c74584c4-qtkx9" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:40.662708600+00:00 stderr F I1208 17:55:40.661291 1 schedule_one.go:314] "Successfully bound pod to node" pod="cert-manager/cert-manager-webhook-7894b5b9b4-wdn4b" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:42.042534642+00:00 stderr F I1208 17:55:42.041955 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/elasticsearch-es-default-0" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:43.172916039+00:00 stderr F I1208 17:55:43.172712 1 schedule_one.go:314] "Successfully bound pod to node" pod="cert-manager/cert-manager-cainjector-7dbf76d5c8-fdk5q" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:55:59.985516164+00:00 stderr F I1208 17:55:59.985459 1 schedule_one.go:314] "Successfully bound pod to node" pod="cert-manager/cert-manager-858d87f86b-7q2ss" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:07.484828143+00:00 stderr F I1208 17:56:07.484021 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/infrawatch-operators-xmhcm" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:12.684272885+00:00 stderr F I1208 17:56:12.684184 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/infrawatch-operators-tv99j" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:27.946841537+00:00 stderr F I1208 17:56:27.946437 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:28.725782648+00:00 stderr F I1208 17:56:28.725471 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:29.524826219+00:00 stderr F I1208 17:56:29.524760 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:39.596323083+00:00 stderr F I1208 17:56:39.592495 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/interconnect-operator-78b9bd8798-456sz" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:41.042488492+00:00 stderr F I1208 17:56:41.042408 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/service-telemetry-operator-79647f8775-zs8hl" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:56:42.320658519+00:00 stderr F I1208 17:56:42.318974 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/smart-gateway-operator-5cd794ff55-w8r45" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:57:28.548682148+00:00 stderr F I1208 17:57:28.548598 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-interconnect-55bf8d5cb-76n5w" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:57:38.574486110+00:00 stderr F I1208 17:57:38.574409 1 binder.go:715] "All PVCs for pod are bound" pod="service-telemetry/prometheus-default-0" 2025-12-08T17:57:38.579155850+00:00 stderr F I1208 17:57:38.579026 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/prometheus-default-0" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:57:48.450179203+00:00 stderr F I1208 17:57:48.444269 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-snmp-webhook-6774d8dfbc-75fxn" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:57:52.340258565+00:00 stderr F I1208 17:57:52.340179 1 binder.go:715] "All PVCs for pod are bound" pod="service-telemetry/alertmanager-default-0" 2025-12-08T17:57:52.344946716+00:00 stderr F I1208 17:57:52.344896 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/alertmanager-default-0" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:58:06.122232909+00:00 stderr F I1208 17:58:06.120343 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-cloud1-coll-meter-smartgateway-787645d794-4zrzx" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:58:08.902110657+00:00 stderr F I1208 17:58:08.901933 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:58:12.995859563+00:00 stderr F I1208 17:58:12.995783 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:58:20.099480230+00:00 stderr F I1208 17:58:20.099416 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-cloud1-coll-event-smartgateway-d956b4648-jwkwn" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:58:21.425774668+00:00 stderr F I1208 17:58:21.418335 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:58:34.383269834+00:00 stderr F I1208 17:58:34.383200 1 preemption.go:264] "No preemption candidate is found; preemption is not helpful for scheduling" pod="service-telemetry/default-interconnect-55bf8d5cb-rwr2k" 2025-12-08T17:58:34.383269834+00:00 stderr F I1208 17:58:34.383238 1 schedule_one.go:1044] "Unable to schedule pod; no fit; waiting" pod="service-telemetry/default-interconnect-55bf8d5cb-rwr2k" err="0/1 nodes are available: 1 node(s) didn't match pod anti-affinity rules. preemption: 0/1 nodes are available: 1 No preemption victims found for incoming pod." 2025-12-08T17:58:34.743434572+00:00 stderr F I1208 17:58:34.743354 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/default-interconnect-55bf8d5cb-rwr2k" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:59:05.505335404+00:00 stderr F I1208 17:59:05.505157 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/qdr-test" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:59:14.153757137+00:00 stderr F I1208 17:59:14.153615 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/stf-smoketest-smoke1-pbhxq" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:59:14.579507618+00:00 stderr F I1208 17:59:14.579421 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/curl" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T17:59:51.683848733+00:00 stderr F I1208 17:59:51.683724 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/community-operators-jlbqc" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T18:00:00.165579715+00:00 stderr F I1208 18:00:00.165458 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-operator-lifecycle-manager/collect-profiles-29420280-hxvtb" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T18:01:18.236707788+00:00 stderr F I1208 18:01:18.236625 1 schedule_one.go:314] "Successfully bound pod to node" pod="service-telemetry/infrawatch-operators-b88kp" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T18:02:40.541847050+00:00 stderr F I1208 18:02:40.541545 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-must-gather-gctth/must-gather-5cz8j" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T18:04:31.732158413+00:00 stderr F I1208 18:04:31.732037 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/certified-operators-p8pz8" node="crc" evaluatedNodes=1 feasibleNodes=1 2025-12-08T18:04:42.450251409+00:00 stderr F I1208 18:04:42.450143 1 schedule_one.go:314] "Successfully bound pod to node" pod="openshift-marketplace/redhat-operators-5gtms" node="crc" evaluatedNodes=1 feasibleNodes=1 ././@LongLink0000644000000000000000000000031100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611521033021 5ustar zuulzuul././@LongLink0000644000000000000000000000031600000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000644000175000017500000002350015115611514033025 0ustar zuulzuul2025-12-08T17:42:25.075537346+00:00 stderr F + timeout 3m /bin/bash -exuo pipefail -c 'while [ -n "$(ss -Htanop \( sport = 11443 \))" ]; do sleep 1; done' 2025-12-08T17:42:25.079753757+00:00 stderr F ++ ss -Htanop '(' sport = 11443 ')' 2025-12-08T17:42:25.088720892+00:00 stderr F + '[' -n '' ']' 2025-12-08T17:42:25.089785409+00:00 stderr F + exec cluster-kube-scheduler-operator cert-recovery-controller --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/kube-scheduler-cert-syncer-kubeconfig/kubeconfig --namespace=openshift-kube-scheduler --listen=0.0.0.0:11443 -v=2 2025-12-08T17:42:25.161235870+00:00 stderr F W1208 17:42:25.161108 1 cmd.go:257] Using insecure, self-signed certificates 2025-12-08T17:42:25.161343555+00:00 stderr F I1208 17:42:25.161317 1 crypto.go:594] Generating new CA for cert-recovery-controller-signer@1765215745 cert, and key in /tmp/serving-cert-87551787/serving-signer.crt, /tmp/serving-cert-87551787/serving-signer.key 2025-12-08T17:42:25.161343555+00:00 stderr F Validity period of the certificate for "cert-recovery-controller-signer@1765215745" is unset, resetting to 157680000000000000 years! 2025-12-08T17:42:25.611320681+00:00 stderr F I1208 17:42:25.611257 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:42:25.612086498+00:00 stderr F I1208 17:42:25.612023 1 observer_polling.go:159] Starting file observer 2025-12-08T17:42:25.612104078+00:00 stderr F I1208 17:42:25.612087 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:42:25.612104078+00:00 stderr F I1208 17:42:25.612098 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:42:25.612113218+00:00 stderr F I1208 17:42:25.612103 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:42:25.612113218+00:00 stderr F I1208 17:42:25.612108 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:42:25.612121508+00:00 stderr F I1208 17:42:25.612112 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:42:25.613525954+00:00 stderr F W1208 17:42:25.613487 1 builder.go:272] unable to get owner reference (falling back to namespace): Get "https://localhost:6443/api/v1/namespaces/openshift-kube-scheduler/pods": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:25.613621204+00:00 stderr F I1208 17:42:25.613598 1 builder.go:304] cert-recovery-controller version v0.0.0-master+$Format:%H$-$Format:%H$ 2025-12-08T17:42:25.614320192+00:00 stderr F W1208 17:42:25.614288 1 builder.go:364] unable to get control plane topology, using HA cluster values for leader election: Get "https://localhost:6443/apis/config.openshift.io/v1/infrastructures/cluster": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:25.614451611+00:00 stderr F I1208 17:42:25.614401 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"openshift-kube-scheduler", Name:"openshift-kube-scheduler", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ControlPlaneTopology' unable to get control plane topology, using HA cluster values for leader election: Get "https://localhost:6443/apis/config.openshift.io/v1/infrastructures/cluster": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:42:25.615282218+00:00 stderr F I1208 17:42:25.615243 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-scheduler/cert-recovery-controller-lock... 2025-12-08T17:42:25.615656117+00:00 stderr F E1208 17:42:25.615619 1 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://localhost:6443/api/v1/namespaces/openshift-kube-scheduler/events\": dial tcp [::1]:6443: connect: connection refused" event="&Event{ObjectMeta:{openshift-kube-scheduler.187f4e5e3d434ec6 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Namespace,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:ControlPlaneTopology,Message:unable to get control plane topology, using HA cluster values for leader election: Get \"https://localhost:6443/apis/config.openshift.io/v1/infrastructures/cluster\": dial tcp [::1]:6443: connect: connection refused,Source:EventSource{Component:cert-recovery-controller,Host:,},FirstTimestamp:2025-12-08 17:42:25.614237382 +0000 UTC m=+0.519992043,LastTimestamp:2025-12-08 17:42:25.614237382 +0000 UTC m=+0.519992043,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:cert-recovery-controller,ReportingInstance:,}" 2025-12-08T17:42:25.615844816+00:00 stderr F E1208 17:42:25.615816 1 leaderelection.go:436] error retrieving resource lock openshift-kube-scheduler/cert-recovery-controller-lock: Get "https://localhost:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-scheduler/leases/cert-recovery-controller-lock?timeout=1m47s": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:43:21.307866643+00:00 stderr F I1208 17:43:21.307769 1 leaderelection.go:271] successfully acquired lease openshift-kube-scheduler/cert-recovery-controller-lock 2025-12-08T17:43:21.308154031+00:00 stderr F I1208 17:43:21.308087 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-scheduler", Name:"cert-recovery-controller-lock", UID:"206effe2-6ff2-459d-9797-0e93d6570b44", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"36306", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' crc_7fb41bff-d77b-4646-a2c4-34857487186d became leader 2025-12-08T17:43:21.310108003+00:00 stderr F I1208 17:43:21.310040 1 base_controller.go:76] Waiting for caches to sync for kube-scheduler 2025-12-08T17:43:21.313934879+00:00 stderr F I1208 17:43:21.313868 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubeschedulers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:43:21.314327849+00:00 stderr F I1208 17:43:21.314277 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:43:21.318318168+00:00 stderr F I1208 17:43:21.318252 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:43:21.318650228+00:00 stderr F I1208 17:43:21.318597 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:43:21.335864787+00:00 stderr F I1208 17:43:21.335753 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:43:21.410699108+00:00 stderr F I1208 17:43:21.410587 1 base_controller.go:82] Caches are synced for kube-scheduler 2025-12-08T17:43:21.410699108+00:00 stderr F I1208 17:43:21.410653 1 base_controller.go:119] Starting #1 worker of kube-scheduler controller ... 2025-12-08T17:44:26.782973963+00:00 stderr F I1208 17:44:26.782919 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"openshift-kube-scheduler", Name:"openshift-kube-scheduler", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kube-scheduler-client-cert-key -n openshift-kube-scheduler because it changed 2025-12-08T17:44:31.603792680+00:00 stderr F I1208 17:44:31.603702 1 event.go:377] Event(v1.ObjectReference{Kind:"Namespace", Namespace:"openshift-kube-scheduler", Name:"openshift-kube-scheduler", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'SecretUpdated' Updated Secret/kube-scheduler-client-cert-key -n openshift-kube-scheduler because it changed 2025-12-08T17:45:57.342524321+00:00 stderr F E1208 17:45:57.342428 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://localhost:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-scheduler/leases/cert-recovery-controller-lock?timeout=1m47s": dial tcp [::1]:6443: connect: connection refused, falling back to slow path 2025-12-08T17:45:57.344358956+00:00 stderr F E1208 17:45:57.344295 1 leaderelection.go:436] error retrieving resource lock openshift-kube-scheduler/cert-recovery-controller-lock: Get "https://localhost:6443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-scheduler/leases/cert-recovery-controller-lock?timeout=1m47s": dial tcp [::1]:6443: connect: connection refused 2025-12-08T17:46:26.665324302+00:00 stderr F I1208 17:46:26.665226 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.187980971+00:00 stderr F I1208 17:46:29.187002 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:29.575215005+00:00 stderr F I1208 17:46:29.575122 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:31.558739652+00:00 stderr F I1208 17:46:31.558649 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:35.327410051+00:00 stderr F I1208 17:46:35.327353 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubeschedulers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611521033021 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000644000175000017500000000012515115611514033023 0ustar zuulzuul2025-12-08T17:42:24.373274553+00:00 stdout P Waiting for port :10259 to be released. ././@LongLink0000644000000000000000000000030100000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611521033021 5ustar zuulzuul././@LongLink0000644000000000000000000000030600000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000644000175000017500000002073415115611521033031 0ustar zuulzuul2025-12-08T17:42:25.146258507+00:00 stderr F I1208 17:42:25.145913 1 observer_polling.go:159] Starting file observer 2025-12-08T17:42:25.146416754+00:00 stderr F I1208 17:42:25.146249 1 base_controller.go:76] Waiting for caches to sync for CertSyncController 2025-12-08T17:42:25.150453487+00:00 stderr F E1208 17:42:25.150008 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: Get \"https://localhost:6443/api/v1/namespaces/openshift-kube-scheduler/configmaps?limit=500&resourceVersion=0\": dial tcp [::1]:6443: connect: connection refused" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ConfigMap" 2025-12-08T17:42:25.150516309+00:00 stderr F E1208 17:42:25.150488 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Secret: Ge********** \"https://localhost:6443/api/v1/namespaces/openshift-kube-scheduler/secrets?limit=500&resourceVersion=0\": dial tcp [::1]:6443: connect: connection refused" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.Secret" 2025-12-08T17:42:25.974826299+00:00 stderr F E1208 17:42:25.974750 1 reflector.go:200] "Failed to watch" err="failed to list *v1.Secret: Ge********** \"https://localhost:6443/api/v1/namespaces/openshift-kube-scheduler/secrets?limit=500&resourceVersion=0\": dial tcp [::1]:6443: connect: connection refused" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.Secret" 2025-12-08T17:42:26.068100313+00:00 stderr F E1208 17:42:26.068012 1 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: Get \"https://localhost:6443/api/v1/namespaces/openshift-kube-scheduler/configmaps?limit=500&resourceVersion=0\": dial tcp [::1]:6443: connect: connection refused" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" type="*v1.ConfigMap" 2025-12-08T17:42:42.448993484+00:00 stderr F I1208 17:42:42.447984 1 base_controller.go:82] Caches are synced for CertSyncController 2025-12-08T17:42:42.448993484+00:00 stderr F I1208 17:42:42.448051 1 base_controller.go:119] Starting #1 worker of CertSyncController controller ... 2025-12-08T17:42:42.448993484+00:00 stderr F I1208 17:42:42.448132 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:42:42.448993484+00:00 stderr F I1208 17:42:42.448146 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:26.780837914+00:00 stderr F I1208 17:44:26.780760 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:26.780837914+00:00 stderr F I1208 17:44:26.780800 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:26.787625480+00:00 stderr F I1208 17:44:26.786859 1 certsync_controller.go:260] Creating directory "/etc/kubernetes/static-pod-certs/secrets/kube-scheduler-client-cert-key" ... 2025-12-08T17:44:26.787625480+00:00 stderr F I1208 17:44:26.786930 1 certsync_controller.go:274] Writing secret manifest "/etc/kubernetes/static-pod-certs/secrets/kube-scheduler-client-cert-key/tls.crt" ... 2025-12-08T17:44:26.787625480+00:00 stderr F I1208 17:44:26.787247 1 certsync_controller.go:274] Writing secret manifest "/etc/kubernetes/static-pod-certs/secrets/kube-scheduler-client-cert-key/tls.key" ... 2025-12-08T17:44:26.787818315+00:00 stderr F I1208 17:44:26.787781 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-scheduler", Name:"openshift-kube-scheduler-crc", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CertificateUpdated' Wrote updated secret: op**********ey 2025-12-08T17:44:28.823440340+00:00 stderr F I1208 17:44:28.823391 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:28.823440340+00:00 stderr F I1208 17:44:28.823410 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:28.835181501+00:00 stderr F I1208 17:44:28.834425 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:28.835181501+00:00 stderr F I1208 17:44:28.834447 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:28.848121253+00:00 stderr F I1208 17:44:28.845112 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:28.848121253+00:00 stderr F I1208 17:44:28.845132 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:28.861059417+00:00 stderr F I1208 17:44:28.860247 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:28.861059417+00:00 stderr F I1208 17:44:28.860269 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:28.912432978+00:00 stderr F I1208 17:44:28.912125 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:28.912432978+00:00 stderr F I1208 17:44:28.912155 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:28.924400784+00:00 stderr F I1208 17:44:28.924331 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:28.924400784+00:00 stderr F I1208 17:44:28.924345 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:31.601069856+00:00 stderr F I1208 17:44:31.600982 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:44:31.601069856+00:00 stderr F I1208 17:44:31.601005 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:44:31.606061012+00:00 stderr F I1208 17:44:31.603243 1 certsync_controller.go:260] Creating directory "/etc/kubernetes/static-pod-certs/secrets/kube-scheduler-client-cert-key" ... 2025-12-08T17:44:31.606061012+00:00 stderr F I1208 17:44:31.603273 1 certsync_controller.go:274] Writing secret manifest "/etc/kubernetes/static-pod-certs/secrets/kube-scheduler-client-cert-key/tls.crt" ... 2025-12-08T17:44:31.606061012+00:00 stderr F I1208 17:44:31.603448 1 certsync_controller.go:274] Writing secret manifest "/etc/kubernetes/static-pod-certs/secrets/kube-scheduler-client-cert-key/tls.key" ... 2025-12-08T17:44:31.606061012+00:00 stderr F I1208 17:44:31.604183 1 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"openshift-kube-scheduler", Name:"openshift-kube-scheduler-crc", UID:"", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'CertificateUpdated' Wrote updated secret: op**********ey 2025-12-08T17:46:22.953393706+00:00 stderr F I1208 17:46:22.952694 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:46:22.953393706+00:00 stderr F I1208 17:46:22.953279 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:46:22.953476999+00:00 stderr F I1208 17:46:22.953441 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:46:22.953476999+00:00 stderr F I1208 17:46:22.953451 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:46:30.452195638+00:00 stderr F I1208 17:46:30.451754 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:46:30.452195638+00:00 stderr F I1208 17:46:30.451775 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:56:22.953443338+00:00 stderr F I1208 17:56:22.953306 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:56:22.953443338+00:00 stderr F I1208 17:56:22.953360 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:56:22.953710575+00:00 stderr F I1208 17:56:22.953614 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:56:22.953710575+00:00 stderr F I1208 17:56:22.953646 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:56:30.452952637+00:00 stderr F I1208 17:56:30.452841 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:56:30.452952637+00:00 stderr F I1208 17:56:30.452872 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] 2025-12-08T17:56:30.453162752+00:00 stderr F I1208 17:56:30.453127 1 certsync_controller.go:74] Syncing configmaps: [] 2025-12-08T17:56:30.453162752+00:00 stderr F I1208 17:56:30.453143 1 certsync_controller.go:178] Syncing secrets: [{kube-scheduler-client-cert-key false}] ././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611514033102 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611521033100 5ustar zuulzuul././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000003061015115611514033104 0ustar zuulzuul2025-12-08T17:58:37.861961977+00:00 stdout F Waiting for IP address... 2025-12-08T17:58:37.873040479+00:00 stdout F cat /etc/qpid-dispatch/sasl-users//guest | saslpasswd2 -c -p -u default-interconnect guest -f /tmp/qdrouterd.sasldb 2025-12-08T17:58:38.011923999+00:00 stderr F 2025-12-08 17:58:38.011810 +0000 SERVER (info) Container Name: default-interconnect-55bf8d5cb-rwr2k 2025-12-08T17:58:38.011969751+00:00 stderr F 2025-12-08 17:58:38.011943 +0000 ROUTER (info) Router started in Interior mode, area=0 id=default-interconnect-55bf8d5cb-rwr2k 2025-12-08T17:58:38.011969751+00:00 stderr F 2025-12-08 17:58:38.011951 +0000 ROUTER (info) Version: Red Hat AMQ Interconnect 1.10.9 (qpid-dispatch 1.14.0) 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012549 +0000 ROUTER (info) Router Engine Instantiated: id=default-interconnect-55bf8d5cb-rwr2k instance=1765216718 max_routers=128 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012716 +0000 ROUTER_CORE (info) Core module present but disabled: edge_router 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012751 +0000 ROUTER_CORE (info) Core module present but disabled: core_test_hooks 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012776 +0000 ROUTER_CORE (info) Core module enabled: edge_addr_tracking 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012784 +0000 ROUTER_CORE (info) Core module enabled: address_lookup_server 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012789 +0000 ROUTER_CORE (info) Core module enabled: address_lookup_client 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012815 +0000 ROUTER_CORE (info) Stuck delivery detection: Scan interval: 30 seconds, Delivery age threshold: 10 seconds 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012821 +0000 ROUTER_CORE (info) Core module enabled: stuck_delivery_detection 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012930 +0000 ROUTER_CORE (info) Core module enabled: mobile_sync 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012944 +0000 ROUTER_CORE (info) Streaming link scrubber: Scan interval: 30 seconds, max free pool: 128 links 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012956 +0000 ROUTER_CORE (info) Core module enabled: streaming_link_scruber 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012961 +0000 ROUTER_CORE (info) Router Core thread running. 0/default-interconnect-55bf8d5cb-rwr2k 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012967 +0000 ROUTER_CORE (info) In-process subscription M/$management 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012979 +0000 ROUTER_CORE (info) In-process subscription L/$management 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012985 +0000 ROUTER_CORE (info) In-process subscription L/qdrouter 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.012999 +0000 ROUTER_CORE (info) In-process subscription T/qdrouter 2025-12-08T17:58:38.013058179+00:00 stderr F 2025-12-08 17:58:38.013005 +0000 ROUTER_CORE (info) In-process subscription L/qdhello 2025-12-08T17:58:38.013102740+00:00 stderr F 2025-12-08 17:58:38.013050 +0000 ROUTER_CORE (info) In-process subscription L/qdrouter.ma 2025-12-08T17:58:38.013102740+00:00 stderr F 2025-12-08 17:58:38.013059 +0000 ROUTER_CORE (info) In-process subscription T/qdrouter.ma 2025-12-08T17:58:38.029953754+00:00 stderr F 2025-12-08 17:58:38.026474 +0000 AGENT (info) Activating management agent on $_management_internal 2025-12-08T17:58:38.029953754+00:00 stderr F 2025-12-08 17:58:38.026540 +0000 ROUTER_CORE (info) In-process subscription L/$_management_internal 2025-12-08T17:58:38.029953754+00:00 stderr F 2025-12-08 17:58:38.027440 +0000 POLICY (info) Policy configured maxConnections: 65535, policyDir: '',access rules enabled: 'false', use hostname patterns: 'false' 2025-12-08T17:58:38.029953754+00:00 stderr F 2025-12-08 17:58:38.028372 +0000 POLICY (info) Policy fallback defaultVhost is defined: '$default' 2025-12-08T17:58:38.029953754+00:00 stderr F 2025-12-08 17:58:38.028841 +0000 CONN_MGR (info) Created SSL Profile with name openstack 2025-12-08T17:58:38.029953754+00:00 stderr F 2025-12-08 17:58:38.029725 +0000 CONN_MGR (info) Created SSL Profile with name inter-router 2025-12-08T17:58:38.038943922+00:00 stderr F 2025-12-08 17:58:38.037716 +0000 CONN_MGR (info) Configured Listener: 127.0.0.1:5672 proto=any, role=normal 2025-12-08T17:58:38.039497876+00:00 stderr F 2025-12-08 17:58:38.039416 +0000 CONN_MGR (info) Configured Listener: :8888 proto=any, role=normal, http 2025-12-08T17:58:38.039718152+00:00 stderr F 2025-12-08 17:58:38.039603 +0000 SERVER (info) HTTP server thread running 2025-12-08T17:58:38.040055651+00:00 stderr F 2025-12-08 17:58:38.039864 +0000 SERVER (notice) Listening for HTTP on :8888 2025-12-08T17:58:38.041333134+00:00 stderr F 2025-12-08 17:58:38.041299 +0000 CONN_MGR (info) Configured Listener: :55671 proto=any, role=inter-router, sslProfile=inter-router 2025-12-08T17:58:38.043064720+00:00 stderr F 2025-12-08 17:58:38.043024 +0000 CONN_MGR (info) Configured Listener: :5671 proto=any, role=edge, sslProfile=openstack 2025-12-08T17:58:38.044654742+00:00 stderr F 2025-12-08 17:58:38.044614 +0000 CONN_MGR (info) Configured Listener: :5673 proto=any, role=edge 2025-12-08T17:58:38.045888174+00:00 stderr F 2025-12-08 17:58:38.045577 +0000 SERVER (notice) Operational, 4 Threads Running (process ID 1) 2025-12-08T17:58:38.045888174+00:00 stderr F 2025-12-08 17:58:38.045744 +0000 SERVER (notice) Process VmSize 279.70 MiB (31.34 GiB available memory) 2025-12-08T17:58:38.046014348+00:00 stderr F 2025-12-08 17:58:38.045959 +0000 SERVER (notice) Listening on 127.0.0.1:5672 2025-12-08T17:58:38.046271874+00:00 stderr F 2025-12-08 17:58:38.046218 +0000 SERVER (notice) Listening on :55671 2025-12-08T17:58:38.046281585+00:00 stderr F 2025-12-08 17:58:38.046265 +0000 SERVER (notice) Listening on :5671 2025-12-08T17:58:38.046348916+00:00 stderr F 2025-12-08 17:58:38.046318 +0000 SERVER (notice) Listening on :5673 2025-12-08T17:58:50.056460739+00:00 stderr F 2025-12-08 17:58:50.055045 +0000 SERVER (info) [C1] Accepted connection to :5673 from 10.217.0.77:52792 2025-12-08T17:58:50.063004043+00:00 stderr F 2025-12-08 17:58:50.059977 +0000 ROUTER (info) [C1] Connection Opened: dir=in host=10.217.0.77:52792 vhost= encrypted=no auth=ANONYMOUS user=anonymous container_id=bridge-3c8 props= 2025-12-08T17:58:50.063004043+00:00 stderr F 2025-12-08 17:58:50.061090 +0000 ROUTER_CORE (info) [C1][L8] Link attached: dir=out source={/collectd/cloud1-telemetry expire:sess} target={ expire:sess} 2025-12-08T17:58:51.236243744+00:00 stderr F 2025-12-08 17:58:51.236166 +0000 SERVER (info) [C2] Accepted connection to :5673 from 10.217.0.78:49706 2025-12-08T17:58:51.237450716+00:00 stderr F 2025-12-08 17:58:51.237415 +0000 ROUTER (info) [C2] Connection Opened: dir=in host=10.217.0.78:49706 vhost= encrypted=no auth=ANONYMOUS user=anonymous container_id=bridge-ee props= 2025-12-08T17:58:51.238136683+00:00 stderr F 2025-12-08 17:58:51.238079 +0000 ROUTER_CORE (info) [C2][L9] Link attached: dir=out source={/anycast/ceilometer/cloud1-metering.sample expire:sess} target={ expire:sess} 2025-12-08T17:58:53.116682634+00:00 stderr F 2025-12-08 17:58:53.115557 +0000 SERVER (info) [C3] Accepted connection to :5673 from 10.217.0.80:32824 2025-12-08T17:58:53.118900693+00:00 stderr F 2025-12-08 17:58:53.118329 +0000 ROUTER (info) [C3] Connection Opened: dir=in host=10.217.0.80:32824 vhost= encrypted=no auth=ANONYMOUS user=anonymous container_id=bridge-349 props= 2025-12-08T17:58:53.118900693+00:00 stderr F 2025-12-08 17:58:53.118552 +0000 ROUTER_CORE (info) [C3][L10] Link attached: dir=out source={/collectd/cloud1-notify expire:sess} target={ expire:sess} 2025-12-08T17:58:54.026926884+00:00 stderr F 2025-12-08 17:58:54.026842 +0000 SERVER (info) [C4] Accepted connection to :5673 from 10.217.0.79:51094 2025-12-08T17:58:54.027976941+00:00 stderr F 2025-12-08 17:58:54.027924 +0000 ROUTER (info) [C4] Connection Opened: dir=in host=10.217.0.79:51094 vhost= encrypted=no auth=ANONYMOUS user=anonymous container_id=bridge-349 props= 2025-12-08T17:58:54.028157106+00:00 stderr F 2025-12-08 17:58:54.028010 +0000 ROUTER_CORE (info) [C4][L11] Link attached: dir=out source={/sensubility/cloud1-telemetry expire:sess} target={ expire:sess} 2025-12-08T17:58:55.227612878+00:00 stderr F 2025-12-08 17:58:55.227498 +0000 SERVER (info) [C5] Accepted connection to :5673 from 10.217.0.81:39602 2025-12-08T17:58:55.229092298+00:00 stderr F 2025-12-08 17:58:55.229010 +0000 ROUTER (info) [C5] Connection Opened: dir=in host=10.217.0.81:39602 vhost= encrypted=no auth=ANONYMOUS user=anonymous container_id=bridge-3cd props= 2025-12-08T17:58:55.229724984+00:00 stderr F 2025-12-08 17:58:55.229666 +0000 ROUTER_CORE (info) [C5][L12] Link attached: dir=out source={/anycast/ceilometer/cloud1-event.sample expire:sess} target={ expire:sess} 2025-12-08T17:59:13.215243742+00:00 stderr F 2025-12-08 17:59:13.213016 +0000 SERVER (info) [C6] Accepted connection to :5671 from 10.217.0.83:36392 2025-12-08T17:59:13.280312608+00:00 stderr F 2025-12-08 17:59:13.280184 +0000 ROUTER (info) [C6] Connection Opened: dir=in host=10.217.0.83:36392 vhost= encrypted=TLSv1/SSLv3 auth=PLAIN user=guest@default-interconnect container_id=qdr-test.smoketest props={:product="qpid-dispatch-router", :version="1.19.0", :"qd.conn-id"=1, :"qd.annotations-version"=1} 2025-12-08T17:59:13.283241115+00:00 stderr F 2025-12-08 17:59:13.283167 +0000 ROUTER_CORE (info) [C6][L13] Link attached: dir=in source={ expire:sess} target={_$qd.edge_heartbeat expire:link} 2025-12-08T17:59:13.283474141+00:00 stderr F 2025-12-08 17:59:13.283366 +0000 ROUTER_CORE (info) [C6][L14] Link attached: dir=in source={ expire:sess} target={_$qd.addr_lookup expire:link} 2025-12-08T17:59:13.283474141+00:00 stderr F 2025-12-08 17:59:13.283414 +0000 ROUTER_CORE (info) [C6][L15] Link attached: dir=out source={ expire:link} target={ expire:sess} 2025-12-08T17:59:13.283474141+00:00 stderr F 2025-12-08 17:59:13.283430 +0000 ROUTER_CORE (info) [C6][L16] Link attached: dir=in source={ expire:sess} target={ expire:sess} 2025-12-08T17:59:13.283474141+00:00 stderr F 2025-12-08 17:59:13.283460 +0000 ROUTER_CORE (info) [C6][L17] Link attached: dir=out source={qdr-test.smoketest expire:link caps::"qd.router-edge-downlink"} target={ expire:sess caps::"qd.router-edge-downlink"} 2025-12-08T17:59:13.283551623+00:00 stderr F 2025-12-08 17:59:13.283518 +0000 ROUTER_CORE (info) [C6][L18] Link attached: dir=out source={_$qd.edge_addr_tracking expire:link} target={ expire:sess} 2025-12-08T17:59:13.283582454+00:00 stderr F 2025-12-08 17:59:13.283560 +0000 ROUTER_CORE (info) [C6][L19] Link attached: dir=in source={ expire:sess} target={$management expire:link} 2025-12-08T17:59:13.283598575+00:00 stderr F 2025-12-08 17:59:13.283582 +0000 ROUTER_CORE (info) [C6][L20] Link attached: dir=out source={ expire:link} target={ expire:sess} 2025-12-08T17:59:30.652947573+00:00 stderr F 2025-12-08 17:59:30.650212 +0000 ROUTER_CORE (info) [C6][L21] Link attached: dir=in source={ expire:sess} target={sensubility/cloud1-telemetry expire:link} 2025-12-08T17:59:30.948866353+00:00 stderr F 2025-12-08 17:59:30.948771 +0000 ROUTER_CORE (info) [C6][L22] Link attached: dir=in source={ expire:sess} target={anycast/ceilometer/cloud1-event.sample expire:link} 2025-12-08T17:59:31.020948062+00:00 stderr F 2025-12-08 17:59:31.020341 +0000 ROUTER_CORE (info) [C6][L23] Link attached: dir=in source={ expire:sess} target={anycast/ceilometer/cloud1-metering.sample expire:link} 2025-12-08T17:59:31.157433209+00:00 stderr F 2025-12-08 17:59:31.156975 +0000 ROUTER_CORE (info) [C6][L22] Link cleanup deferred after IO processing: del=7 presett=0 psdrop=0 acc=7 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:31.157433209+00:00 stderr F 2025-12-08 17:59:31.157022 +0000 ROUTER_CORE (info) [C6][L23] Link cleanup deferred after IO processing: del=3 presett=0 psdrop=0 acc=3 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no 2025-12-08T17:59:40.072093150+00:00 stderr F 2025-12-08 17:59:40.070158 +0000 ROUTER_CORE (info) [C6][L13] Link blocked with zero credit for 27 seconds 2025-12-08T17:59:58.173369564+00:00 stderr F 2025-12-08 17:59:58.173287 +0000 ROUTER_CORE (info) [C6][L21] Link detached: del=10 presett=0 psdrop=0 acc=10 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no ././@LongLink0000644000000000000000000000023500000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000755000175000017500000000000015115611513033125 5ustar zuulzuul././@LongLink0000644000000000000000000000025200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000755000175000017500000000000015115611521033124 5ustar zuulzuul././@LongLink0000644000000000000000000000025700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000644000175000017500000000325515115611513033134 0ustar zuulzuul2025-12-08T17:58:11.733303441+00:00 stderr F time=2025-12-08T17:58:11.733Z level=INFO source=main.go:192 msg="Starting Alertmanager" version="(version=0.29.0, branch=HEAD, revision=2f0cff51fd1cc761eeb671db43736341ca2ab511)" 2025-12-08T17:58:11.733303441+00:00 stderr F time=2025-12-08T17:58:11.733Z level=INFO source=main.go:193 msg="Build context" build_context="(go=go1.25.3, platform=linux/amd64, user=root@f4d6cb29d2f5, date=20251104-13:09:23, tags=netgo)" 2025-12-08T17:58:11.771538089+00:00 stderr F time=2025-12-08T17:58:11.771Z level=INFO source=coordinator.go:112 msg="Loading configuration file" component=configuration file=/etc/alertmanager/config_out/alertmanager.env.yaml 2025-12-08T17:58:11.771814146+00:00 stderr F time=2025-12-08T17:58:11.771Z level=INFO source=coordinator.go:125 msg="Completed loading of configuration file" component=configuration file=/etc/alertmanager/config_out/alertmanager.env.yaml 2025-12-08T17:58:11.776839046+00:00 stderr F time=2025-12-08T17:58:11.776Z level=INFO source=tls_config.go:346 msg="Listening on" address=127.0.0.1:9093 2025-12-08T17:58:11.776971740+00:00 stderr F time=2025-12-08T17:58:11.776Z level=INFO source=tls_config.go:385 msg="TLS is disabled." http2=false address=127.0.0.1:9093 2025-12-08T17:58:19.714455519+00:00 stderr F time=2025-12-08T17:58:19.714Z level=INFO source=coordinator.go:112 msg="Loading configuration file" component=configuration file=/etc/alertmanager/config_out/alertmanager.env.yaml 2025-12-08T17:58:19.714603983+00:00 stderr F time=2025-12-08T17:58:19.714Z level=INFO source=coordinator.go:125 msg="Completed loading of configuration file" component=configuration file=/etc/alertmanager/config_out/alertmanager.env.yaml ././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000755000175000017500000000000015115611521033124 5ustar zuulzuul././@LongLink0000644000000000000000000000026700000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000644000175000017500000000130015115611513033121 0ustar zuulzuul2025-12-08T17:58:05.309599046+00:00 stdout F ts=2025-12-08T17:58:05.309497603Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:148 msg="Starting prometheus-config-reloader" version="(version=1.24, branch=, revision=unknown)" build_context="(go=go1.24.6 (Red Hat 1.24.6-1.el9_6), platform=linux/amd64, user=, date=20251110-21:03:26, tags=unknown)" 2025-12-08T17:58:05.409321984+00:00 stdout F ts=2025-12-08T17:58:05.409248292Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:58:05.511181296+00:00 stdout F level=info ts=2025-12-08T17:58:05.511084003Z caller=reloader.go:282 msg="reloading via HTTP" ././@LongLink0000644000000000000000000000025500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000755000175000017500000000000015115611520033123 5ustar zuulzuul././@LongLink0000644000000000000000000000026200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000644000175000017500000000430615115611513033132 0ustar zuulzuul2025-12-08T17:58:18.909308310+00:00 stdout F ts=2025-12-08T17:58:18.909162876Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:148 msg="Starting prometheus-config-reloader" version="(version=1.24, branch=, revision=unknown)" build_context="(go=go1.24.6 (Red Hat 1.24.6-1.el9_6), platform=linux/amd64, user=, date=20251110-21:03:26, tags=unknown)" 2025-12-08T17:58:18.909763801+00:00 stdout F ts=2025-12-08T17:58:18.90971872Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:58:19.509722517+00:00 stdout F level=info ts=2025-12-08T17:58:19.509568563Z caller=reloader.go:282 msg="reloading via HTTP" 2025-12-08T17:58:19.609513217+00:00 stdout F ts=2025-12-08T17:58:19.608927922Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:202 msg="Starting web server for metrics" listen=localhost:8080 2025-12-08T17:58:19.609661041+00:00 stdout F ts=2025-12-08T17:58:19.609614249Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/github.com/prometheus/exporter-toolkit@v0.14.1/web/tls_config.go:346 msg="Listening on" address=127.0.0.1:8080 2025-12-08T17:58:19.609715442+00:00 stdout F ts=2025-12-08T17:58:19.609690731Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/github.com/prometheus/exporter-toolkit@v0.14.1/web/tls_config.go:385 msg="TLS is disabled." http2=false address=127.0.0.1:8080 2025-12-08T17:58:19.809224438+00:00 stdout F level=info ts=2025-12-08T17:58:19.809132386Z caller=reloader.go:548 msg="Reload triggered" cfg_in=/etc/alertmanager/config/alertmanager.yaml.gz cfg_out=/etc/alertmanager/config_out/alertmanager.env.yaml cfg_dirs= watched_dirs="/etc/alertmanager/config, /etc/alertmanager/secrets/default-alertmanager-proxy-tls, /etc/alertmanager/secrets/default-session-secret" 2025-12-08T17:58:19.809278570+00:00 stdout F level=info ts=2025-12-08T17:58:19.809248359Z caller=reloader.go:330 msg="started watching config file and directories for changes" cfg=/etc/alertmanager/config/alertmanager.yaml.gz cfgDirs= out=/etc/alertmanager/config_out/alertmanager.env.yaml dirs=/etc/alertmanager/config,/etc/alertmanager/secrets/default-alertmanager-proxy-tls,/etc/alertmanager/secrets/default-session-secret ././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000755000175000017500000000000015115611520033123 5ustar zuulzuul././@LongLink0000644000000000000000000000025600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_aler0000644000175000017500000000271715115611513033136 0ustar zuulzuul2025-12-08T17:58:14.035018751+00:00 stderr F 2025/12/08 17:58:14 provider.go:129: Defaulting client-id to system:serviceaccount:service-telemetry:alertmanager-stf 2025-12-08T17:58:14.035018751+00:00 stderr F 2025/12/08 17:58:14 provider.go:134: Defaulting client-secret to service account token /var/run/secrets/kubernetes.io/serviceaccount/token 2025-12-08T17:58:14.038929381+00:00 stderr F 2025/12/08 17:58:14 provider.go:358: Delegation of authentication and authorization to OpenShift is enabled for bearer tokens and client certificates. 2025-12-08T17:58:14.061000002+00:00 stderr F 2025/12/08 17:58:14 oauthproxy.go:210: mapping path "/" => upstream "http://localhost:9093/" 2025-12-08T17:58:14.061000002+00:00 stderr F 2025/12/08 17:58:14 oauthproxy.go:237: OAuthProxy configured for Client ID: system:serviceaccount:service-telemetry:alertmanager-stf 2025-12-08T17:58:14.061000002+00:00 stderr F 2025/12/08 17:58:14 oauthproxy.go:247: Cookie settings: name:_oauth_proxy secure(https):true httponly:true expiry:168h0m0s domain: samesite: refresh:disabled 2025-12-08T17:58:14.061494324+00:00 stderr F 2025/12/08 17:58:14 http.go:64: HTTP: listening on 127.0.0.1:4180 2025-12-08T17:58:14.062059360+00:00 stderr F 2025/12/08 17:58:14 http.go:110: HTTPS: listening on [::]:9095 2025-12-08T17:58:14.062059360+00:00 stderr F I1208 17:58:14.061947 1 dynamic_serving_content.go:135] "Starting controller" name="serving::/etc/tls/private/tls.crt::/etc/tls/private/tls.key" ././@LongLink0000644000000000000000000000030300000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611513033145 5ustar zuulzuul././@LongLink0000644000000000000000000000035100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611520033143 5ustar zuulzuul././@LongLink0000644000000000000000000000035600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000644000175000017500000000204515115611513033150 0ustar zuulzuul2025-12-08T17:55:34.613547543+00:00 stdout F ts=2025-12-08T17:55:34.612856054Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:55:34.614842668+00:00 stdout F ts=2025-12-08T17:55:34.614770326Z level=warn caller=/workspace/pkg/server/server.go:158 msg="server TLS client verification disabled" client_ca_file=/etc/tls/private/tls-ca.crt err="stat /etc/tls/private/tls-ca.crt: no such file or directory" 2025-12-08T17:55:34.619359962+00:00 stdout F ts=2025-12-08T17:55:34.617964953Z level=info caller=/workspace/pkg/server/server.go:295 msg="starting secure server" address=[::]:8443 http2=false 2025-12-08T17:55:34.619383343+00:00 stderr F I1208 17:55:34.618064 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:55:34.621902602+00:00 stderr F I1208 17:55:34.621021 1 dynamic_serving_content.go:135] "Starting controller" name="servingCert::/tmp/k8s-webhook-server/serving-certs/tls.crt::/tmp/k8s-webhook-server/serving-certs/tls.key" ././@LongLink0000644000000000000000000000032100000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-m0000755000175000017500000000000015115611514033065 5ustar zuulzuul././@LongLink0000644000000000000000000000036700000000000011611 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-m0000755000175000017500000000000015115611523033065 5ustar zuulzuul././@LongLink0000644000000000000000000000037400000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-m0000644000175000017500000063230315115611514033076 0ustar zuulzuul2025-12-08T17:44:23.097951238+00:00 stderr F I1208 17:44:23.095397 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:23.097951238+00:00 stderr F I1208 17:44:23.097182 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:23.098706578+00:00 stderr F I1208 17:44:23.098651 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:23.252096292+00:00 stderr F I1208 17:44:23.243709 1 builder.go:304] openshift-controller-manager-operator version 4.20.0-202510211040.p2.gaa455c0.assembly.stream.el9-aa455c0-aa455c043152123595c2b4f72e02279aad9dd48a 2025-12-08T17:44:24.225933144+00:00 stderr F I1208 17:44:24.221786 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:24.225933144+00:00 stderr F W1208 17:44:24.222413 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:24.225933144+00:00 stderr F W1208 17:44:24.222418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:24.225933144+00:00 stderr F W1208 17:44:24.222422 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:24.225933144+00:00 stderr F W1208 17:44:24.222428 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:24.225933144+00:00 stderr F W1208 17:44:24.222431 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:24.225933144+00:00 stderr F W1208 17:44:24.222434 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:24.233915302+00:00 stderr F I1208 17:44:24.230806 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:24.234419306+00:00 stderr F I1208 17:44:24.234394 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:24.234491948+00:00 stderr F I1208 17:44:24.234471 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.234867 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.238620 1 leaderelection.go:257] attempting to acquire leader lease openshift-controller-manager-operator/openshift-controller-manager-operator-lock... 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.234994 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.235018 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.235035 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.239175 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.235103 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.241579152+00:00 stderr F I1208 17:44:24.239205 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.269921425+00:00 stderr F I1208 17:44:24.267194 1 leaderelection.go:271] successfully acquired lease openshift-controller-manager-operator/openshift-controller-manager-operator-lock 2025-12-08T17:44:24.276683089+00:00 stderr F I1208 17:44:24.276595 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator-lock", UID:"226cec6b-557d-4fe6-b9ee-b69fbbe3d47b", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37480", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' openshift-controller-manager-operator-686468bdd5-m5ltz_5e9c4556-1e83-49da-a480-cd415c3019d2 became leader 2025-12-08T17:44:24.312022143+00:00 stderr F I1208 17:44:24.303567 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:24.313576726+00:00 stderr F I1208 17:44:24.313536 1 starter.go:117] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:24.313833133+00:00 stderr F I1208 17:44:24.313789 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:24.341346972+00:00 stderr F I1208 17:44:24.341028 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:24.341389744+00:00 stderr F I1208 17:44:24.341352 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:24.348070926+00:00 stderr F I1208 17:44:24.342912 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:24.427049221+00:00 stderr F I1208 17:44:24.422356 1 base_controller.go:76] Waiting for caches to sync for ImagePullSecretCleanupController 2025-12-08T17:44:24.431024430+00:00 stderr F I1208 17:44:24.430975 1 base_controller.go:76] Waiting for caches to sync for OpenshiftControllerManagerStaticResources-StaticResources 2025-12-08T17:44:24.431024430+00:00 stderr F I1208 17:44:24.431009 1 operator.go:145] Starting OpenShiftControllerManagerOperator 2025-12-08T17:44:24.431117452+00:00 stderr F I1208 17:44:24.431096 1 base_controller.go:76] Waiting for caches to sync for openshift-controller-manager 2025-12-08T17:44:24.431129742+00:00 stderr F I1208 17:44:24.431115 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:24.431137793+00:00 stderr F I1208 17:44:24.431130 1 base_controller.go:76] Waiting for caches to sync for UserCAObservationController 2025-12-08T17:44:24.431200244+00:00 stderr F I1208 17:44:24.431181 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:24.431235655+00:00 stderr F I1208 17:44:24.431189 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_openshift-controller-manager 2025-12-08T17:44:24.557044226+00:00 stderr F I1208 17:44:24.544045 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:24.557044226+00:00 stderr F I1208 17:44:24.544080 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:24.557044226+00:00 stderr F I1208 17:44:24.544671 1 base_controller.go:82] Caches are synced for UserCAObservationController 2025-12-08T17:44:24.557044226+00:00 stderr F I1208 17:44:24.544691 1 base_controller.go:119] Starting #1 worker of UserCAObservationController controller ... 2025-12-08T17:44:24.557044226+00:00 stderr F I1208 17:44:24.545316 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.578918253+00:00 stderr F I1208 17:44:24.578511 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.580013523+00:00 stderr F I1208 17:44:24.579993 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.583664913+00:00 stderr F I1208 17:44:24.583475 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.604406258+00:00 stderr F I1208 17:44:24.604352 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.635086915+00:00 stderr F I1208 17:44:24.634843 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:24.635086915+00:00 stderr F I1208 17:44:24.634890 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:24.635086915+00:00 stderr F I1208 17:44:24.634921 1 base_controller.go:82] Caches are synced for OpenshiftControllerManagerStaticResources-StaticResources 2025-12-08T17:44:24.635086915+00:00 stderr F I1208 17:44:24.634926 1 base_controller.go:119] Starting #1 worker of OpenshiftControllerManagerStaticResources-StaticResources controller ... 2025-12-08T17:44:24.635086915+00:00 stderr F I1208 17:44:24.634938 1 base_controller.go:82] Caches are synced for openshift-controller-manager 2025-12-08T17:44:24.635086915+00:00 stderr F I1208 17:44:24.634947 1 base_controller.go:119] Starting #1 worker of openshift-controller-manager controller ... 2025-12-08T17:44:24.635146017+00:00 stderr F I1208 17:44:24.635121 1 base_controller.go:82] Caches are synced for StatusSyncer_openshift-controller-manager 2025-12-08T17:44:24.635146017+00:00 stderr F I1208 17:44:24.635142 1 base_controller.go:119] Starting #1 worker of StatusSyncer_openshift-controller-manager controller ... 2025-12-08T17:44:24.658331610+00:00 stderr F I1208 17:44:24.658268 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.772792982+00:00 stderr F I1208 17:44:24.770514 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:24.808508735+00:00 stderr F I1208 17:44:24.807038 1 status_controller.go:229] clusteroperator/openshift-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:51:57Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:44:24Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:01Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:57Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:24.824924733+00:00 stderr F I1208 17:44:24.821019 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-controller-manager changed: Progressing changed from True to False ("All is well"),Available changed from False to True ("All is well") 2025-12-08T17:44:24.824924733+00:00 stderr F I1208 17:44:24.823151 1 base_controller.go:82] Caches are synced for ImagePullSecretCleanupController 2025-12-08T17:44:24.824924733+00:00 stderr F I1208 17:44:24.823173 1 base_controller.go:119] Starting #1 worker of ImagePullSecretCleanupController controller ... 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.609787 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.609725645 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610079 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.610066864 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610105 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.610086575 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610118 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.610109575 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610131 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.610122195 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610145 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.610136166 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610158 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.610149186 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610172 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.610163177 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610188 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.610176077 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610206 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.610195747 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610400 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-controller-manager-operator.svc\" [serving] validServingFor=[metrics.openshift-controller-manager-operator.svc,metrics.openshift-controller-manager-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:44:30.610386693 +0000 UTC))" 2025-12-08T17:44:30.611090153+00:00 stderr F I1208 17:44:30.610555 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:23 +0000 UTC to 2028-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:44:30.610544367 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.047071 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.047019177 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.047978 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.047957402 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.047998 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.047986673 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048015 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.048003954 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048038 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.048022204 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048059 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.048044085 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048077 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.048064355 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048097 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.048083896 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048116 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.048103626 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048147 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.048133027 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048167 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.048154438 +0000 UTC))" 2025-12-08T17:45:16.048462956+00:00 stderr F I1208 17:45:16.048450 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-controller-manager-operator.svc\" [serving] validServingFor=[metrics.openshift-controller-manager-operator.svc,metrics.openshift-controller-manager-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:11 +0000 UTC to 2027-11-02 07:52:12 +0000 UTC (now=2025-12-08 17:45:16.048432945 +0000 UTC))" 2025-12-08T17:45:16.050480733+00:00 stderr F I1208 17:45:16.048650 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215864\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215863\" (2025-12-08 16:44:23 +0000 UTC to 2028-12-08 16:44:23 +0000 UTC (now=2025-12-08 17:45:16.048635501 +0000 UTC))" 2025-12-08T17:46:24.291681446+00:00 stderr F E1208 17:46:24.290982 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-controller-manager-operator/leases/openshift-controller-manager-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:24.292772929+00:00 stderr F E1208 17:46:24.292709 1 leaderelection.go:436] error retrieving resource lock openshift-controller-manager-operator/openshift-controller-manager-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-controller-manager-operator/leases/openshift-controller-manager-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:24.638527126+00:00 stderr F E1208 17:46:24.638460 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.639075374+00:00 stderr F E1208 17:46:24.639022 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.648197507+00:00 stderr F E1208 17:46:24.647793 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.648451495+00:00 stderr F E1208 17:46:24.648393 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.660836796+00:00 stderr F E1208 17:46:24.660753 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.661493916+00:00 stderr F E1208 17:46:24.661435 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.682599129+00:00 stderr F E1208 17:46:24.682520 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.684304521+00:00 stderr F E1208 17:46:24.684244 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.685456685+00:00 stderr P E1208 17:46:24.685386 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:24.685505717+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:24.842115847+00:00 stderr P E1208 17:46:24.841989 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:24.842194800+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.041237244+00:00 stderr F E1208 17:46:25.041162 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.239679321+00:00 stderr F E1208 17:46:25.239617 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.442513378+00:00 stderr P E1208 17:46:25.442409 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:25.442596391+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.639094250+00:00 stderr F E1208 17:46:25.639022 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.839192705+00:00 stderr F E1208 17:46:25.839138 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.040487148+00:00 stderr P E1208 17:46:26.040421 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:26.040549809+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.239850171+00:00 stderr F E1208 17:46:26.239771 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.438318379+00:00 stderr F E1208 17:46:26.438267 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.640590410+00:00 stderr P E1208 17:46:26.640502 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:26.640651192+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.838993045+00:00 stderr F E1208 17:46:26.838948 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.039528655+00:00 stderr F E1208 17:46:27.039471 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.239861818+00:00 stderr P E1208 17:46:27.239775 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:27.239925500+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.483926023+00:00 stderr F E1208 17:46:27.483755 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.682735011+00:00 stderr F E1208 17:46:27.682665 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.845085244+00:00 stderr P E1208 17:46:27.844976 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:27.845161896+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:28.561143187+00:00 stderr P E1208 17:46:28.560893 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:28.561501778+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:28.768580684+00:00 stderr F E1208 17:46:28.767144 1 base_controller.go:279] "Unhandled Error" err="openshift-controller-manager reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.966548405+00:00 stderr F E1208 17:46:28.966082 1 base_controller.go:279] "Unhandled Error" err="ConfigObserver reconciliation failed: unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.301369425+00:00 stderr P E1208 17:46:29.301262 1 base_controller.go:279] "Unhandled Error" err="OpenshiftControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/openshift-controller-manager/informer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/informer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-leader-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/system:openshift:leader-locking-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-route-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-route-controller-manager/services/route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-tokenreview-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:tokenreview-openshift-route-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/route-controller-manager-ingress-to-route-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:ingress-to-route-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/roles/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/separate-sa-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings/system:openshift:sa-creating-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/serviceaccounts/openshift-controller-manager-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-controller-manager/services/controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/roles/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/servicemonitor-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-controller-manager/rolebindings/prometheus-k8s\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/ 2025-12-08T17:46:29.301456038+00:00 stderr F apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/buildconfigstatus-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:update-buildconfig-status\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/deployer-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:deployer\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrole.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterroles/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/image-trigger-controller-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:openshift-controller-manager:image-trigger-controller\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-role.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/openshift-controller-manager/old-leader-rolebinding.yaml\" (string): Delete \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-openshift-controller-manager\": dial tcp 10.217.4.1:443: connect: connection refused, unable to get operator configuration: Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/openshiftcontrollermanagers/cluster\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:55.648573156+00:00 stderr F I1208 17:46:55.647916 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:00.621347345+00:00 stderr F I1208 17:47:00.620838 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:01.556197473+00:00 stderr F I1208 17:47:01.555669 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:02.701641971+00:00 stderr F I1208 17:47:02.701575 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.142308752+00:00 stderr F I1208 17:47:03.142042 1 reflector.go:430] "Caches populated" type="*v1.Build" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.185376868+00:00 stderr F I1208 17:47:03.185313 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:03.255847196+00:00 stderr P I1208 17:47:03.255286 1 core.go:352] ConfigMap "openshift-controller-manager/client-ca" changes: {"data":{"ca-bundle.crt":"-----BEGIN CERTIFICATE-----\nMIIFWzCCA0OgAwIBAgIUGCGM8Q3O0omYhECixt5AvIY+d4owDQYJKoZIhvcNAQEL\nBQAwPTESMBAGA1UECwwJb3BlbnNoaWZ0MScwJQYDVQQDDB5hZG1pbi1rdWJlY29u\nZmlnLXNpZ25lci1jdXN0b20wHhcNMjUxMjA4MTc0NTA5WhcNMzUxMjA2MTc0NTA5\nWjA9MRIwEAYDVQQLDAlvcGVuc2hpZnQxJzAlBgNVBAMMHmFkbWluLWt1YmVjb25m\naWctc2lnbmVyLWN1c3RvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB\nANpMZjkbItUcAdd+J4eSy87mk6EfZSZD8WMH+LJpdHgeZFUqJZfH8qVDAn3QyEkE\nPn1W+6zV/EUmgAx76hvaAfYq9U7ic5RYK8jJt2j6Tb0SMG+/kvaEohwCnX5GDSek\nzzSRKc6aHZwjkR3d4QpY8BOzMx8lBIIl/px2xsw3QGtihaeBbnYa7CcbWznR/V0b\nfJ/o/oMd5okhZtJZkc0w6o4codNaSIFu1MbPPBCK6OwVfoD43uq+y/Wcinv3M1sw\nKKFaW9gaMFAkStevvcQcFFSSRej8CuZK+o2H+2OxTVi19P4WmIDn9A22MPrlIGno\nOcQPfFayfIczLMiUNe6bjueCMkVEIfTszMKUALNlzHPQ1W15CC3Bqg4xqnRL9JpL\nE1DBQwhuq4lvAxFItsJhQCagWlHgyinbVZHOB/QS+RZ4Vo2DcIkTcXRxZ7KUz/mj\nitF8kCdDz6aUiPeDNGm2M4fKBdWqrgHLUqfATGq3Qh545HpZ6QqYffvLLNLuKxM0\nim+qD5wCgoJPROitdK5plsPfe/C4zjoYc7oFKlXM389DNj0KxwRvMUE6kZoptjUo\nd676JxYQF3XrZnIpZ+PlIqXt2R+ahpuz0BvBMAlwrqEhDP9CsCHx8sRXNrw3OSSp\n9LZ5CRFampF4RoHWikdd8uybWY05f7Eis/o2gEPJrCUnAgMBAAGjUzBRMB0GA1Ud\nDgQWBBScadKZBJR7Ydm+yfS6UsTlrSxg1DAfBgNVHSMEGDAWgBScadKZBJR7Ydm+\nyfS6UsTlrSxg1DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBp\nOh3vzzZhIkTOOVScU0gxIIl6mpSRScsJkK3TfSdvQWCrNlYJmPcO1aD/YAJv6fmT\nfih+NkBD00qrrkzcdOsaDJXsT2oaB4h8VJQrTDcal3V4F3jWS8j0bn8QukgwWtIq\nevP2sTS8oIzx59k/e2EhjdCgDjnTEBDajXfn9UXRjh+3ZHqmFtZYdz/uZWmBxeLK\n6Kqi0GtFwP6dfylQzg1IXB0C4D2xqVoHEimKIrBQyak8RmDKzleRxvIOUSj3o5DM\nVEyajcJQ6XaD+IwMGh1/DVLxN4uTMbMZDwv+gWl3TvK++f+TSSTMhy+92A2WecDO\nPNLD7xiX5wc6ge5Dh9AzzoOW3tP1iiB9Y0iCmxuj4SUhR0hfgQlRY6sxF40E8xWO\nNNQYbDo+rEwE7frnykHMfqclzJ/a8ax3+lzfM4CvYOmj97909M+2pc0d8Dnbkg75\nncxbob8nQ2UTmQ4nu3qFCZ+5ssDtQaDBXCzSbrSUiFpYtZ1vDZMXcBcoPtri29Ih\ndhUSPKLUHmHzvcEK1n8PPRcfKHjES8s0ankZfnKkcU11Yjhx8eeKUT+s2Iq+Tl6e\ndHKDccvC42BF9X0NarLfvMJcrQu1mPjBYs6WX9a2v9uvg0DG4OAZnu6oQ+gw25LT\n31bGdVPDcEEpNdcMmlY/LsOs00DNYQmW8rdLjfhjrQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIIHuh6I9HTH+QwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM1WhcNMjYx\nMDI4MDgxNzM2WjAmMSQwIgYDVQQDDBtrdWJlLWNzci1zaWduZXJfQDE3NjIwNzE0\nNTUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb91UVWMxGg+Q7+o+5\ntt2z/Oq+7Mc1SNGJ4oF0Y99WM1Y7tIo5NOKWiIXE16rGuzY4+D9O53fh14ngzJm3\nWh3GjCGvr8/2W7J1BblXwuKwDuRl+OfJvyPtETUeME42Y9V6XP/B60iaaEYhm5t7\nTDf7TmmjEpqeWix43KqHnpcW9Zr8tM+tHLrGwcHnb+z3LvGkQA7mbXsaHiuryCVx\nudxQYNKWgtAkw3OOtuVyJ2gGD7iYVni1jg7nc9ZhQOYBoYRbAw3zh36CY50dZA89\nhDKYsVVourd9xfAdwSmrcgtsVo1X0ucCpEEUYKEz3/udgk+Dgf2hy3flIMcg9kcI\nS8xzAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0G\nA1UdDgQWBBSGMLijow+n1t5asMgXimhNhoMUvDAfBgNVHSMEGDAWgBQwDwrhoAwS\n6q2GA5CnoIQZVVim5jANBgkqhkiG9w0BAQsFAAOCAQEAhDT7ncsDiLG3QyVtffPp\nTjWscZowuHbVwUjwpZt5OxQFonSJxNCnqrj4MlIqqxraH5nnsrW5FqWyWWeMmXpz\nbFkiVhPFCVGjmRN9V1LXjHDc4lufe3ixq+MvgtU1YL/WJBmUxxw5dPifTT15IApJ\n6stLJ0QtHBNeEIIroUFpDB+O7OJYZ85ed6o6gT4n/v9nxBaLsZNpO2TzaWfI0Bst\nFEoPsfPKgBvwg9+2GijlP/VyKmP2gFdFm25PWeROU1VZzPrEhaOliO+/YXHt32YU\nJkxTF/smrLzxRRbb507cuvWEilzud93YbHmAhAj1h0CpDdzjxYDr5zRYJSP7BV+6\nUA==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDiTCCAnGgAwIBAgIIOsA0z3vOTSgwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM0WhcNMjcx\nMDIzMDgxNzM1WjBSMVAwTgYDVQQDDEdvcGVuc2hpZnQta3ViZS1jb250cm9sbGVy\nLW1hbmFnZXItb3BlcmF0b3JfY3NyLXNpZ25lci1zaWduZXJAMTc2MjA3MTQ1NTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPY8aATxrjWpRKfnFU7BgmD1\nkNEst9RVY3fzGsARc5tF3x+zhl9BbsoJ9NAncCj+KnkAIYk4pJMU6BHIWIakpvqV\nLsnlPM45VU8ocwOWb5Z7g88YdqHGRWeZqZt/rPXmH/846iVGDstB0YQWgKKKK97X\nvjKMsq9ALSVj8gRWai7B7MVP/bZ4FgeqsYq6zIH9XKdPO8ev20qffrob4nmLGHdJ\nikAliwIy6nkVYrATKOS8t56votMD7xuFQ8rM6uQ0YVejA5rE/Tmq4/NsFpfFfkCP\nMU0vO2XwqCBV81XKD00SmW9MXIxbTq5lU9YjjE1sDFREtN4uZL4nEDa2+wnvIxEC\nAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFDAPCuGgDBLqrYYDkKeghBlVWKbmMB8GA1UdIwQYMBaAFDAPCuGgDBLqrYYD\nkKeghBlVWKbmMA0GCSqGSIb3DQEBCwUAA4IBAQDS31/o3/4/9DBLFVcLvIwOQCK1\n0ZwJUa/8iK5GaMlMH6cNfvVZBrb4SwTVx0WXI5wrIXrlvYc+PtXL0MJeyIJmMpoU\nRyQAJZsh8cckeQjghV2Pf7wMfEbHudKTp8uoQDUBntkfNhJa4pPxmNWuhOrlvdB5\nEF/6IGviKAdSy0jcNpscvD3W0oSpCYRW0Ki/25LaFvIqP2Xy/cNJlWhzJWqZbK6k\nR9I4knhvIv/JYmppOVXw1rEvP+8Pn8UF2oSfFXcN5W+j4YIIhrAUnjAbaflpyX8k\nAUEKdtgVNNe7RlW9nQrhO8GqFJItitBbNtVuSkw9XIlQ0gc40E7mgB7Mjnbu\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDQjCCAiqgAwIBAgIICN23rtJ7PrYwDQYJKoZIhvcNAQELBQAwPzESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSkwJwYDVQQDEyBrdWJlLWFwaXNlcnZlci10by1rdWJlbGV0\nLXNpZ25lcjAeFw0yNTExMDIwNzM0MTFaFw0yNjExMDIwNzM0MTFaMD8xEjAQBgNV\nBAsTCW9wZW5zaGlmdDEpMCcGA1UEAxMga3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxl\ndC1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzRlKEKNEy\nCyd+PPBkdzQZd3BeUb0b2qi1h8fbUiSNENrOpafKxxAHcr3a4KWB6sKhi28r14mF\nxcJ2Yb92/jLpkS15p629AUrGXxKuL8QtkBsY3dH0CqKMBedO6oxodva9Avc+3DMI\nvvYBJFy+4on/0JbM54fduvDmcEmhBtgRItK3Z87VbhemVPj7uDi9EV381uRMlmq4\ncgtD5mfS1yeRu0ut5IIr7/PN1G+93slLGQkHveqWlsFrDYd8Qm5PqirRBYy+18RC\nmEuNirFX3yPrEGwMvRlJyFia0RKuK69bFL2vduI5Wu7h/6VKP0/vEpEYqI6bJYoV\nbUjA2vqrV/1VAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBRNj9dsSIhfwKZ491Q/XZYt2lRWLzANBgkqhkiG9w0BAQsF\nAAOCAQEANmt7a5N+zch83D5+YeC4VQoFvpkHPKEYYT0EM/ykjch3453RmQWQEwkj\nVvstV1U16YpnEI61l1s347RHi64SwtlwV6tiNCpopDF2u3Bb+eiJqrlIC69EFnZE\n1426AVmZZq3sWu3eKB0HgT5u6B1rErSTl3c4hK4SiDsWWlVktBSN0BS4cD+urSAF\nc673/wLKCjq2I+9i3Wv2K7Ton3w5oaETE7lgQyImbKOVhJhFrPGu9fKXaeWlyXGY\nj7tz68vNTvecRynKrmzUJ9BBMfAXTrCowitzjBjanFitgXK4DnQMkb+8lv2Txb/n\nkB7RzcFDyIVd3g5XWBujR3fkQFWsNQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDlTCCAn2gAwIBAgIIV5i/4m8WRp0wDQYJKoZIhvcNAQELBQAwWDFWMFQGA1UE\nAwxNb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtYXBpc2Vy\ndmVyLXRvLWt1YmVsZXQtc2lnbmVyQDE3NjUyMTU4NjMwHhcNMjUxMjA4MTc0NDIz\nWhcNMjYxMjA4MTc0NDI0WjBYMVYwVAYDVQQDDE1vcGVuc2hpZnQta3ViZS1hcGlz\nZXJ2ZXItb3BlcmF0b3Jfa3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxldC1zaWduZXJA\nMTc2NTIxNTg2MzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMQk29Lc\nviCBF+ZN194ChQgHyYo1iN6wW+jZDEEIQpkmIfgrPnEOPL8+9d3SN92BqqYGdwnp\n5TdyDJBFBjrM8iKKvrq6x+EcyQJU6/Q+41bpPSLsziclImlDUUE29OYj6poxfNi1\nQBeFL1q4j9/ks+AfMnpjEbiGjxjJ8cV8++3NERSB1jJLft1rYcnQvgBuE64jqipO\nbNczVjMjcq0g+H+qpZknHlFueBqi5F/Nj/hC7QZbS96VThCxM123zqORBAfU5Fj0\ndMk3XqYTM1mpfyQHihtlyG3vsPXI/CBZgno6CI+KuXZJ46IjNNmiImyVJNKe7tXS\niWxbKKtEZHAvMcsCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFKzukDY3Odtmp3C+ncqem+63g2NuMB8GA1UdIwQYMBaA\nFKzukDY3Odtmp3C+ncqem+63g2NuMA0GCSqGSIb3DQEBCwUAA4IBAQBC2NLbh36L\n05mNq0+V4avx/2/xXvih+RtebPhiF8w8WG7WWRiIlK/yn8+iToFX/07+HWbBSK3g\nu5Yqac0eh8iKLkG+eIFiXpZR4B4Ha3ZRoU4N6dBMohIChZNugHGtjhfFjDpjFY8N\n9jMoZmTtjtK7RW2tu1qRyJcNSk8ou6nYNo/fB9PHWP5E12cWdg2ZQyESq+zE2dFo\n/dNjvb2y+GneObWzG9nclr6L7f6jI4LSOujO9ZA28xW4lf2EmosQ2HOeun48vA3O\n0C9lO1/SqcPkA6TtMHsoXZDSRv+mH62ugEZkDn8lgOizTm3l+jU9UA4RSvRD7ghR\nuXScj56hVynp\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDNDCCAhygAwIBAgIILvKlXd2YBKIwDQYJKoZIhvcNAQELBQAwODESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVy\nMB4XDTI1MTEwMjA3MzQxMFoXDTI2MTEwMjA3MzQxMFowODESMBAGA1UECxMJb3Bl\nbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOnFEdxnqqE7l+5NxN82TBBycC2F\n5cUboCcUVr211lubqmjCusnJzzz+6rKpO8uFqqGeu4C7rGpudOaK52IZhG0WP8b7\n0raQhnM4DV8t2SHDV4GhFUiuNE+b4FJrZ6jiljQo2g9ZeeCZgdmmBrIFHBXDFzEc\nA1RPScqOtvBbbH064Zd267gOmVPJnWmxDXo6X/RGYCm1YUS6FQ2WWpl707ComvgZ\nAvWGSA4H1sZirMQ+ug3bctkLb+SiXUzf+tLnGIHPeqDNfMrNUhxBl6dDhlMbUIzY\nrVxgDD8y3i7eg5i5HG8yntl8epgs2gn47wfavfjLqATBlciJPZY6Qv3BFQIDAQAB\no0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nWi6wmZ50AVdwxaiO3WjTvp82+sEwDQYJKoZIhvcNAQELBQADggEBAA4siMWwOGL5\nCiCxbiJsuKMbCLlFR6JaD43FWnUCZDoe7np7W76Oh4py4Zht7XlZKotcXrrfRYVO\ndVht66PCbSujy375p/B6c3isG4h/1cNSGDm1uhAkHXGZ88S2wSjKT5YJ/HUAkvyj\nadQgZeO7Q60YBSDE/67Ldq1zqvBrMF2k8pF49p1AdAtf4OSDzIaGGPUQJTFExA0E\n03 2025-12-08T17:47:03.256031693+00:00 stderr F xMlOPNhYZ8MgFT2XE6nRT74lCAfK9krAsZLtFuAtp/14t013PD0FqTTQRUmuSj\nO6pJKDTH8tZ3ieXxSzRR+j4p5hkHaehgQVyUbwiw8WVXkd6NcWR6yQcSeqIsTSCD\neMDdTmmKyuo=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhzCCAm+gAwIBAgIIcuJfJWKJ/NEwDQYJKoZIhvcNAQELBQAwUTFPME0GA1UE\nAwxGb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtY29udHJv\nbC1wbGFuZS1zaWduZXJAMTc2NTIxNTg2MzAeFw0yNTEyMDgxNzQ0MjNaFw0yNjAy\nMDYxNzQ0MjRaMFExTzBNBgNVBAMMRm9wZW5zaGlmdC1rdWJlLWFwaXNlcnZlci1v\ncGVyYXRvcl9rdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyQDE3NjUyMTU4NjMwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC86U3wU8BYhyUyqYM3Vuy/Kvfu\nQlUYxAqiVxA50CLa488sawRtfVN03+NKfPtzoj6xg5nYxR0a/+IP95n2YltFsU5k\nyumfMqcMWP1gZeUuqq0tHgy/GYvD4uF2IWLRMYMdYrsbJlOPWRCnRfWtXN7LJHAY\nBQwKW01c7MOm8AMOT5sGCw7z1GwROdLkjebZSAWeWP+uho5ubO7R9yFVrMJGzBum\nXUceaUrjiVyDCVdMBMttbZtjYYwW1NqDl4P4CgtW+CRONRTW8FNDdldzjm2fo/HL\n/frz934yfHA6c6xDWRI4+BEKJpecqxBUoC6xeGNdPd3KFmqPFRp7N/oERpH5AgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRrr6Rca+ABepsTcpAiBlwqNlkzNzAfBgNVHSMEGDAWgBRrr6Rca+ABepsTcpAi\nBlwqNlkzNzANBgkqhkiG9w0BAQsFAAOCAQEArD1l55HNxEi+lDb8LV+9Zzmb+gxB\nDq27GP6pZD+v8cHdoet3SgTFXeYKrd/Aw34+ZJceKPQrhoLtGkl+UW9T50ymZmVx\nENwuX+8e/OxAYAcKZdAwlCmPBV2A+puager7UZ6cE35W22ZqqijJ3J+nB7BmCtQ7\nqooWmH+OcHkw9Eoa8BbWCAH8nItf7bglCui0yQb4MCbrGMCHOVKwInTpI2biAdb6\nvQwXe1ofL4bVZt0eiPk2tuhljglLjV23q/aaFqTXC7T6UIKtb0olqNjGO10Aasew\ntAxUmbhL/uOz2X2JztYbjYPfVWbeUefTtX8tXV8oqflB6auskk/m2wMUbw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDSDCCAjCgAwIBAgIIV//3Qv2OxM4wDQYJKoZIhvcNAQELBQAwQjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSwwKgYDVQQDEyNrdWJlbGV0LWJvb3RzdHJhcC1rdWJlY29u\nZmlnLXNpZ25lcjAeFw0yNTExMDIwNzM0MDhaFw0zNTEwMzEwNzM0MDhaMEIxEjAQ\nBgNVBAsTCW9wZW5zaGlmdDEsMCoGA1UEAxMja3ViZWxldC1ib290c3RyYXAta3Vi\nZWNvbmZpZy1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc\nZBn9WZ5wLPck6+g3f4p0NBGyE6LaXnWavXx9m0sVdTVQooknndmKufeYkXGZ5Lb+\nfbMAp/6swgSJ1DjdBj06rCqJEZfdZl3uZoD/Th4ha2Phl12bXaNYLiuOu5BOZ3UW\n08y1Wab9Y9zc0o4Z71pHH4o9TH3QPNT6BqAz4kkgD6t1r/R7E7lrZbx+7e+0JBAW\nRgufaFOX1AYU5B4+pSM21eJY7oP1P9I4DMeeJW39opCCHAuUQgHpOV1YPtRqEPJ4\n9matas8qm5qIMIPbGEGFckSJqgny9YCfHaLezJtZMIHJgz5LW4H91gQCGvfSbLtH\nYxYO/PcTXQCnDYNwqf29AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBSCg7Y8QkzypoNAqYVPNK5YWMVIXzANBgkqhkiG\n9w0BAQsFAAOCAQEAsP1NR5ZgC7F5FvoU2CXla4FufdeYTk4bjjNoFJMqnvI5gcY6\nJDxP1Rl2YBHCpRxngtBFxw3Xoe8UnBKzZWsS5wLUYRloprhGVBSLM0vqJJOvP7M0\njt3SLuB7h0dG2GO9yQ4y10+xVWxP5Os9wcbQcRgTQKL3gHmCq4aQN1cqJSxyJ/ut\nlbfYlM/xBcfLMY5Leeas6y2FPCFIEONh1U9FJZlF3YkhPp+XD7aePtC4tJqsokMc\nP80IwPn54aDT9akRPsOteB6C+xSAz2TlfWaJ/l/x9yXK+HJrRhMartqyN511SeEd\nDpNcMW9qPTjJzBj+N3f0ZfvbTmhVSvV65ZEtAw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhTCCAm2gAwIBAgIIfksp5LDEMMgwDQYJKoZIhvcNAQELBQAwUDFOMEwGA1UE\nAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX25vZGUtc3lzdGVt\nLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MB4XDTI1MTEwMjA3NTEyN1oXDTI4MTEw\nMTA3NTEyOFowUDFOMEwGA1UEAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9w\nZXJhdG9yX25vZGUtc3lzdGVtLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA402A+B9GaYmzBVLJEyrGvytCY0Vb\nvvIKHW2kxl9IFN3+LNZHW/mKeJfz/hBTm2bs+6uRCDlMSyONDlVUWVsuE+q0+F42\n0n3VyxWRSrDZ2ur5oNxmoBSsHRM+PxccQ6X3JTZyO397LHNOzxAs/Es+St8A8sbY\nGLc1lNqeOLvwAOT5d2PrFlYCAfXYs/UVIaio846jidKKN1f8Z6W5pgdAHuTXbyBQ\nLDQh6s43TBPhww1KszmcwURjEBDCT6KlhsM/quMd9XlMU0ZEAMf6XxsqvW8ia8C8\nF+RNAaGkwmiS4qZ+hJ4KIUnWM84j+bsyNBqlHFKi1e7LsKRyjnQ288FqIQIDAQAB\no2MwYTAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\n9O+dX0K7dzD13bshuR70sHbUAeAwHwYDVR0jBBgwFoAU9O+dX0K7dzD13bshuR70\nsHbUAeAwDQYJKoZIhvcNAQELBQADggEBADPRGSn1U/YwBUkpU7vTzsxqLaVJW21o\n6hV/W2IjjGNGqp6c2kSH/3ZGSEjNwIJqKRFpC2gmTPgAqnC4nDosOHx5F5HXTmrU\n1l2Ivcm1Ep+t/zBgNHjBi3yommx8n2iTTdakpQaq7/u1s0I4UiRqXydjoGXp7H1C\naAsmRlK8ovgEAWzItjeMBzy65wqiStPBK+XAIddqznHCxrRyH5xk3HcnyMG4GDWl\nrogdK8yTGCuZVCvGfe9Hwm8tyYrxDRNvRLTc0ssTonAwnR/7IzaVVc9Pp0svCynJ\n6VX3FGhgWwDVWeajj8yrXeR42az/Rr1TAAOZtJMW+4hIkaU0/+msvgw=\n-----END CERTIFICATE-----\n"},"metadata":{"creationTimestamp":"2025-11-02T07:51:50Z","managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ca-bundle.crt":{}},"f:metadata":{"f:annotations":{".":{},"f:openshift.io/owning-component":{}}}},"manager":"cluster-kube-apiserver-operator","operation":"Update","time":"2025-12-08T17:45:16Z"}],"resourceVersion":null,"uid":"e7bc0b2c-2af6-488e-bf6f-25875798350a"}} 2025-12-08T17:47:03.256239129+00:00 stderr F I1208 17:47:03.256125 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/client-ca -n openshift-controller-manager: 2025-12-08T17:47:03.256239129+00:00 stderr F cause by changes in data.ca-bundle.crt 2025-12-08T17:47:03.286006926+00:00 stderr P I1208 17:47:03.285519 1 core.go:352] ConfigMap "openshift-route-controller-manager/client-ca" changes: {"data":{"ca-bundle.crt":"-----BEGIN CERTIFICATE-----\nMIIFWzCCA0OgAwIBAgIUGCGM8Q3O0omYhECixt5AvIY+d4owDQYJKoZIhvcNAQEL\nBQAwPTESMBAGA1UECwwJb3BlbnNoaWZ0MScwJQYDVQQDDB5hZG1pbi1rdWJlY29u\nZmlnLXNpZ25lci1jdXN0b20wHhcNMjUxMjA4MTc0NTA5WhcNMzUxMjA2MTc0NTA5\nWjA9MRIwEAYDVQQLDAlvcGVuc2hpZnQxJzAlBgNVBAMMHmFkbWluLWt1YmVjb25m\naWctc2lnbmVyLWN1c3RvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB\nANpMZjkbItUcAdd+J4eSy87mk6EfZSZD8WMH+LJpdHgeZFUqJZfH8qVDAn3QyEkE\nPn1W+6zV/EUmgAx76hvaAfYq9U7ic5RYK8jJt2j6Tb0SMG+/kvaEohwCnX5GDSek\nzzSRKc6aHZwjkR3d4QpY8BOzMx8lBIIl/px2xsw3QGtihaeBbnYa7CcbWznR/V0b\nfJ/o/oMd5okhZtJZkc0w6o4codNaSIFu1MbPPBCK6OwVfoD43uq+y/Wcinv3M1sw\nKKFaW9gaMFAkStevvcQcFFSSRej8CuZK+o2H+2OxTVi19P4WmIDn9A22MPrlIGno\nOcQPfFayfIczLMiUNe6bjueCMkVEIfTszMKUALNlzHPQ1W15CC3Bqg4xqnRL9JpL\nE1DBQwhuq4lvAxFItsJhQCagWlHgyinbVZHOB/QS+RZ4Vo2DcIkTcXRxZ7KUz/mj\nitF8kCdDz6aUiPeDNGm2M4fKBdWqrgHLUqfATGq3Qh545HpZ6QqYffvLLNLuKxM0\nim+qD5wCgoJPROitdK5plsPfe/C4zjoYc7oFKlXM389DNj0KxwRvMUE6kZoptjUo\nd676JxYQF3XrZnIpZ+PlIqXt2R+ahpuz0BvBMAlwrqEhDP9CsCHx8sRXNrw3OSSp\n9LZ5CRFampF4RoHWikdd8uybWY05f7Eis/o2gEPJrCUnAgMBAAGjUzBRMB0GA1Ud\nDgQWBBScadKZBJR7Ydm+yfS6UsTlrSxg1DAfBgNVHSMEGDAWgBScadKZBJR7Ydm+\nyfS6UsTlrSxg1DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBp\nOh3vzzZhIkTOOVScU0gxIIl6mpSRScsJkK3TfSdvQWCrNlYJmPcO1aD/YAJv6fmT\nfih+NkBD00qrrkzcdOsaDJXsT2oaB4h8VJQrTDcal3V4F3jWS8j0bn8QukgwWtIq\nevP2sTS8oIzx59k/e2EhjdCgDjnTEBDajXfn9UXRjh+3ZHqmFtZYdz/uZWmBxeLK\n6Kqi0GtFwP6dfylQzg1IXB0C4D2xqVoHEimKIrBQyak8RmDKzleRxvIOUSj3o5DM\nVEyajcJQ6XaD+IwMGh1/DVLxN4uTMbMZDwv+gWl3TvK++f+TSSTMhy+92A2WecDO\nPNLD7xiX5wc6ge5Dh9AzzoOW3tP1iiB9Y0iCmxuj4SUhR0hfgQlRY6sxF40E8xWO\nNNQYbDo+rEwE7frnykHMfqclzJ/a8ax3+lzfM4CvYOmj97909M+2pc0d8Dnbkg75\nncxbob8nQ2UTmQ4nu3qFCZ+5ssDtQaDBXCzSbrSUiFpYtZ1vDZMXcBcoPtri29Ih\ndhUSPKLUHmHzvcEK1n8PPRcfKHjES8s0ankZfnKkcU11Yjhx8eeKUT+s2Iq+Tl6e\ndHKDccvC42BF9X0NarLfvMJcrQu1mPjBYs6WX9a2v9uvg0DG4OAZnu6oQ+gw25LT\n31bGdVPDcEEpNdcMmlY/LsOs00DNYQmW8rdLjfhjrQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIIHuh6I9HTH+QwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM1WhcNMjYx\nMDI4MDgxNzM2WjAmMSQwIgYDVQQDDBtrdWJlLWNzci1zaWduZXJfQDE3NjIwNzE0\nNTUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb91UVWMxGg+Q7+o+5\ntt2z/Oq+7Mc1SNGJ4oF0Y99WM1Y7tIo5NOKWiIXE16rGuzY4+D9O53fh14ngzJm3\nWh3GjCGvr8/2W7J1BblXwuKwDuRl+OfJvyPtETUeME42Y9V6XP/B60iaaEYhm5t7\nTDf7TmmjEpqeWix43KqHnpcW9Zr8tM+tHLrGwcHnb+z3LvGkQA7mbXsaHiuryCVx\nudxQYNKWgtAkw3OOtuVyJ2gGD7iYVni1jg7nc9ZhQOYBoYRbAw3zh36CY50dZA89\nhDKYsVVourd9xfAdwSmrcgtsVo1X0ucCpEEUYKEz3/udgk+Dgf2hy3flIMcg9kcI\nS8xzAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0G\nA1UdDgQWBBSGMLijow+n1t5asMgXimhNhoMUvDAfBgNVHSMEGDAWgBQwDwrhoAwS\n6q2GA5CnoIQZVVim5jANBgkqhkiG9w0BAQsFAAOCAQEAhDT7ncsDiLG3QyVtffPp\nTjWscZowuHbVwUjwpZt5OxQFonSJxNCnqrj4MlIqqxraH5nnsrW5FqWyWWeMmXpz\nbFkiVhPFCVGjmRN9V1LXjHDc4lufe3ixq+MvgtU1YL/WJBmUxxw5dPifTT15IApJ\n6stLJ0QtHBNeEIIroUFpDB+O7OJYZ85ed6o6gT4n/v9nxBaLsZNpO2TzaWfI0Bst\nFEoPsfPKgBvwg9+2GijlP/VyKmP2gFdFm25PWeROU1VZzPrEhaOliO+/YXHt32YU\nJkxTF/smrLzxRRbb507cuvWEilzud93YbHmAhAj1h0CpDdzjxYDr5zRYJSP7BV+6\nUA==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDiTCCAnGgAwIBAgIIOsA0z3vOTSgwDQYJKoZIhvcNAQELBQAwUjFQME4GA1UE\nAwxHb3BlbnNoaWZ0LWt1YmUtY29udHJvbGxlci1tYW5hZ2VyLW9wZXJhdG9yX2Nz\nci1zaWduZXItc2lnbmVyQDE3NjIwNzE0NTUwHhcNMjUxMTAyMDgxNzM0WhcNMjcx\nMDIzMDgxNzM1WjBSMVAwTgYDVQQDDEdvcGVuc2hpZnQta3ViZS1jb250cm9sbGVy\nLW1hbmFnZXItb3BlcmF0b3JfY3NyLXNpZ25lci1zaWduZXJAMTc2MjA3MTQ1NTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPY8aATxrjWpRKfnFU7BgmD1\nkNEst9RVY3fzGsARc5tF3x+zhl9BbsoJ9NAncCj+KnkAIYk4pJMU6BHIWIakpvqV\nLsnlPM45VU8ocwOWb5Z7g88YdqHGRWeZqZt/rPXmH/846iVGDstB0YQWgKKKK97X\nvjKMsq9ALSVj8gRWai7B7MVP/bZ4FgeqsYq6zIH9XKdPO8ev20qffrob4nmLGHdJ\nikAliwIy6nkVYrATKOS8t56votMD7xuFQ8rM6uQ0YVejA5rE/Tmq4/NsFpfFfkCP\nMU0vO2XwqCBV81XKD00SmW9MXIxbTq5lU9YjjE1sDFREtN4uZL4nEDa2+wnvIxEC\nAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFDAPCuGgDBLqrYYDkKeghBlVWKbmMB8GA1UdIwQYMBaAFDAPCuGgDBLqrYYD\nkKeghBlVWKbmMA0GCSqGSIb3DQEBCwUAA4IBAQDS31/o3/4/9DBLFVcLvIwOQCK1\n0ZwJUa/8iK5GaMlMH6cNfvVZBrb4SwTVx0WXI5wrIXrlvYc+PtXL0MJeyIJmMpoU\nRyQAJZsh8cckeQjghV2Pf7wMfEbHudKTp8uoQDUBntkfNhJa4pPxmNWuhOrlvdB5\nEF/6IGviKAdSy0jcNpscvD3W0oSpCYRW0Ki/25LaFvIqP2Xy/cNJlWhzJWqZbK6k\nR9I4knhvIv/JYmppOVXw1rEvP+8Pn8UF2oSfFXcN5W+j4YIIhrAUnjAbaflpyX8k\nAUEKdtgVNNe7RlW9nQrhO8GqFJItitBbNtVuSkw9XIlQ0gc40E7mgB7Mjnbu\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDQjCCAiqgAwIBAgIICN23rtJ7PrYwDQYJKoZIhvcNAQELBQAwPzESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSkwJwYDVQQDEyBrdWJlLWFwaXNlcnZlci10by1rdWJlbGV0\nLXNpZ25lcjAeFw0yNTExMDIwNzM0MTFaFw0yNjExMDIwNzM0MTFaMD8xEjAQBgNV\nBAsTCW9wZW5zaGlmdDEpMCcGA1UEAxMga3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxl\ndC1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzRlKEKNEy\nCyd+PPBkdzQZd3BeUb0b2qi1h8fbUiSNENrOpafKxxAHcr3a4KWB6sKhi28r14mF\nxcJ2Yb92/jLpkS15p629AUrGXxKuL8QtkBsY3dH0CqKMBedO6oxodva9Avc+3DMI\nvvYBJFy+4on/0JbM54fduvDmcEmhBtgRItK3Z87VbhemVPj7uDi9EV381uRMlmq4\ncgtD5mfS1yeRu0ut5IIr7/PN1G+93slLGQkHveqWlsFrDYd8Qm5PqirRBYy+18RC\nmEuNirFX3yPrEGwMvRlJyFia0RKuK69bFL2vduI5Wu7h/6VKP0/vEpEYqI6bJYoV\nbUjA2vqrV/1VAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBRNj9dsSIhfwKZ491Q/XZYt2lRWLzANBgkqhkiG9w0BAQsF\nAAOCAQEANmt7a5N+zch83D5+YeC4VQoFvpkHPKEYYT0EM/ykjch3453RmQWQEwkj\nVvstV1U16YpnEI61l1s347RHi64SwtlwV6tiNCpopDF2u3Bb+eiJqrlIC69EFnZE\n1426AVmZZq3sWu3eKB0HgT5u6B1rErSTl3c4hK4SiDsWWlVktBSN0BS4cD+urSAF\nc673/wLKCjq2I+9i3Wv2K7Ton3w5oaETE7lgQyImbKOVhJhFrPGu9fKXaeWlyXGY\nj7tz68vNTvecRynKrmzUJ9BBMfAXTrCowitzjBjanFitgXK4DnQMkb+8lv2Txb/n\nkB7RzcFDyIVd3g5XWBujR3fkQFWsNQ==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDlTCCAn2gAwIBAgIIV5i/4m8WRp0wDQYJKoZIhvcNAQELBQAwWDFWMFQGA1UE\nAwxNb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtYXBpc2Vy\ndmVyLXRvLWt1YmVsZXQtc2lnbmVyQDE3NjUyMTU4NjMwHhcNMjUxMjA4MTc0NDIz\nWhcNMjYxMjA4MTc0NDI0WjBYMVYwVAYDVQQDDE1vcGVuc2hpZnQta3ViZS1hcGlz\nZXJ2ZXItb3BlcmF0b3Jfa3ViZS1hcGlzZXJ2ZXItdG8ta3ViZWxldC1zaWduZXJA\nMTc2NTIxNTg2MzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMQk29Lc\nviCBF+ZN194ChQgHyYo1iN6wW+jZDEEIQpkmIfgrPnEOPL8+9d3SN92BqqYGdwnp\n5TdyDJBFBjrM8iKKvrq6x+EcyQJU6/Q+41bpPSLsziclImlDUUE29OYj6poxfNi1\nQBeFL1q4j9/ks+AfMnpjEbiGjxjJ8cV8++3NERSB1jJLft1rYcnQvgBuE64jqipO\nbNczVjMjcq0g+H+qpZknHlFueBqi5F/Nj/hC7QZbS96VThCxM123zqORBAfU5Fj0\ndMk3XqYTM1mpfyQHihtlyG3vsPXI/CBZgno6CI+KuXZJ46IjNNmiImyVJNKe7tXS\niWxbKKtEZHAvMcsCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFKzukDY3Odtmp3C+ncqem+63g2NuMB8GA1UdIwQYMBaA\nFKzukDY3Odtmp3C+ncqem+63g2NuMA0GCSqGSIb3DQEBCwUAA4IBAQBC2NLbh36L\n05mNq0+V4avx/2/xXvih+RtebPhiF8w8WG7WWRiIlK/yn8+iToFX/07+HWbBSK3g\nu5Yqac0eh8iKLkG+eIFiXpZR4B4Ha3ZRoU4N6dBMohIChZNugHGtjhfFjDpjFY8N\n9jMoZmTtjtK7RW2tu1qRyJcNSk8ou6nYNo/fB9PHWP5E12cWdg2ZQyESq+zE2dFo\n/dNjvb2y+GneObWzG9nclr6L7f6jI4LSOujO9ZA28xW4lf2EmosQ2HOeun48vA3O\n0C9lO1/SqcPkA6TtMHsoXZDSRv+mH62ugEZkDn8lgOizTm3l+jU9UA4RSvRD7ghR\nuXScj56hVynp\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDNDCCAhygAwIBAgIILvKlXd2YBKIwDQYJKoZIhvcNAQELBQAwODESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVy\nMB4XDTI1MTEwMjA3MzQxMFoXDTI2MTEwMjA3MzQxMFowODESMBAGA1UECxMJb3Bl\nbnNoaWZ0MSIwIAYDVQQDExlrdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOnFEdxnqqE7l+5NxN82TBBycC2F\n5cUboCcUVr211lubqmjCusnJzzz+6rKpO8uFqqGeu4C7rGpudOaK52IZhG0WP8b7\n0raQhnM4DV8t2SHDV4GhFUiuNE+b4FJrZ6jiljQo2g9ZeeCZgdmmBrIFHBXDFzEc\nA1RPScqOtvBbbH064Zd267gOmVPJnWmxDXo6X/RGYCm1YUS6FQ2WWpl707ComvgZ\nAvWGSA4H1sZirMQ+ug3bctkLb+SiXUzf+tLnGIHPeqDNfMrNUhxBl6dDhlMbUIzY\nrVxgDD8y3i7eg5i5HG8yntl8epgs2gn47wfavfjLqATBlciJPZY6Qv3BFQIDAQAB\no0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nWi6wmZ50AVdwxaiO3WjTvp82+sEwDQYJKoZIhvcNAQELBQADggEBAA4siMWwOGL5\nCiCxbiJsuKMbCLlFR6JaD43FWnUCZDoe7np7W76Oh4py4Zht7XlZKotcXrrfRYVO\ndVht66PCbSujy375p/B6c3isG4h/1cNSGDm1uhAkHXGZ88S2wSjKT5YJ/HUAkvyj\nadQgZeO7Q60YBSDE/67Ldq1zqvBrMF2k8pF49p1AdAtf4OSDzIaGGPUQJTFExA 2025-12-08T17:47:03.286099509+00:00 stderr F 0E\n03xMlOPNhYZ8MgFT2XE6nRT74lCAfK9krAsZLtFuAtp/14t013PD0FqTTQRUmuSj\nO6pJKDTH8tZ3ieXxSzRR+j4p5hkHaehgQVyUbwiw8WVXkd6NcWR6yQcSeqIsTSCD\neMDdTmmKyuo=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhzCCAm+gAwIBAgIIcuJfJWKJ/NEwDQYJKoZIhvcNAQELBQAwUTFPME0GA1UE\nAwxGb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX2t1YmUtY29udHJv\nbC1wbGFuZS1zaWduZXJAMTc2NTIxNTg2MzAeFw0yNTEyMDgxNzQ0MjNaFw0yNjAy\nMDYxNzQ0MjRaMFExTzBNBgNVBAMMRm9wZW5zaGlmdC1rdWJlLWFwaXNlcnZlci1v\ncGVyYXRvcl9rdWJlLWNvbnRyb2wtcGxhbmUtc2lnbmVyQDE3NjUyMTU4NjMwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC86U3wU8BYhyUyqYM3Vuy/Kvfu\nQlUYxAqiVxA50CLa488sawRtfVN03+NKfPtzoj6xg5nYxR0a/+IP95n2YltFsU5k\nyumfMqcMWP1gZeUuqq0tHgy/GYvD4uF2IWLRMYMdYrsbJlOPWRCnRfWtXN7LJHAY\nBQwKW01c7MOm8AMOT5sGCw7z1GwROdLkjebZSAWeWP+uho5ubO7R9yFVrMJGzBum\nXUceaUrjiVyDCVdMBMttbZtjYYwW1NqDl4P4CgtW+CRONRTW8FNDdldzjm2fo/HL\n/frz934yfHA6c6xDWRI4+BEKJpecqxBUoC6xeGNdPd3KFmqPFRp7N/oERpH5AgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRrr6Rca+ABepsTcpAiBlwqNlkzNzAfBgNVHSMEGDAWgBRrr6Rca+ABepsTcpAi\nBlwqNlkzNzANBgkqhkiG9w0BAQsFAAOCAQEArD1l55HNxEi+lDb8LV+9Zzmb+gxB\nDq27GP6pZD+v8cHdoet3SgTFXeYKrd/Aw34+ZJceKPQrhoLtGkl+UW9T50ymZmVx\nENwuX+8e/OxAYAcKZdAwlCmPBV2A+puager7UZ6cE35W22ZqqijJ3J+nB7BmCtQ7\nqooWmH+OcHkw9Eoa8BbWCAH8nItf7bglCui0yQb4MCbrGMCHOVKwInTpI2biAdb6\nvQwXe1ofL4bVZt0eiPk2tuhljglLjV23q/aaFqTXC7T6UIKtb0olqNjGO10Aasew\ntAxUmbhL/uOz2X2JztYbjYPfVWbeUefTtX8tXV8oqflB6auskk/m2wMUbw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDSDCCAjCgAwIBAgIIV//3Qv2OxM4wDQYJKoZIhvcNAQELBQAwQjESMBAGA1UE\nCxMJb3BlbnNoaWZ0MSwwKgYDVQQDEyNrdWJlbGV0LWJvb3RzdHJhcC1rdWJlY29u\nZmlnLXNpZ25lcjAeFw0yNTExMDIwNzM0MDhaFw0zNTEwMzEwNzM0MDhaMEIxEjAQ\nBgNVBAsTCW9wZW5zaGlmdDEsMCoGA1UEAxMja3ViZWxldC1ib290c3RyYXAta3Vi\nZWNvbmZpZy1zaWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc\nZBn9WZ5wLPck6+g3f4p0NBGyE6LaXnWavXx9m0sVdTVQooknndmKufeYkXGZ5Lb+\nfbMAp/6swgSJ1DjdBj06rCqJEZfdZl3uZoD/Th4ha2Phl12bXaNYLiuOu5BOZ3UW\n08y1Wab9Y9zc0o4Z71pHH4o9TH3QPNT6BqAz4kkgD6t1r/R7E7lrZbx+7e+0JBAW\nRgufaFOX1AYU5B4+pSM21eJY7oP1P9I4DMeeJW39opCCHAuUQgHpOV1YPtRqEPJ4\n9matas8qm5qIMIPbGEGFckSJqgny9YCfHaLezJtZMIHJgz5LW4H91gQCGvfSbLtH\nYxYO/PcTXQCnDYNwqf29AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBSCg7Y8QkzypoNAqYVPNK5YWMVIXzANBgkqhkiG\n9w0BAQsFAAOCAQEAsP1NR5ZgC7F5FvoU2CXla4FufdeYTk4bjjNoFJMqnvI5gcY6\nJDxP1Rl2YBHCpRxngtBFxw3Xoe8UnBKzZWsS5wLUYRloprhGVBSLM0vqJJOvP7M0\njt3SLuB7h0dG2GO9yQ4y10+xVWxP5Os9wcbQcRgTQKL3gHmCq4aQN1cqJSxyJ/ut\nlbfYlM/xBcfLMY5Leeas6y2FPCFIEONh1U9FJZlF3YkhPp+XD7aePtC4tJqsokMc\nP80IwPn54aDT9akRPsOteB6C+xSAz2TlfWaJ/l/x9yXK+HJrRhMartqyN511SeEd\nDpNcMW9qPTjJzBj+N3f0ZfvbTmhVSvV65ZEtAw==\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDhTCCAm2gAwIBAgIIfksp5LDEMMgwDQYJKoZIhvcNAQELBQAwUDFOMEwGA1UE\nAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9wZXJhdG9yX25vZGUtc3lzdGVt\nLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MB4XDTI1MTEwMjA3NTEyN1oXDTI4MTEw\nMTA3NTEyOFowUDFOMEwGA1UEAwxFb3BlbnNoaWZ0LWt1YmUtYXBpc2VydmVyLW9w\nZXJhdG9yX25vZGUtc3lzdGVtLWFkbWluLXNpZ25lckAxNzYyMDY5ODg3MIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA402A+B9GaYmzBVLJEyrGvytCY0Vb\nvvIKHW2kxl9IFN3+LNZHW/mKeJfz/hBTm2bs+6uRCDlMSyONDlVUWVsuE+q0+F42\n0n3VyxWRSrDZ2ur5oNxmoBSsHRM+PxccQ6X3JTZyO397LHNOzxAs/Es+St8A8sbY\nGLc1lNqeOLvwAOT5d2PrFlYCAfXYs/UVIaio846jidKKN1f8Z6W5pgdAHuTXbyBQ\nLDQh6s43TBPhww1KszmcwURjEBDCT6KlhsM/quMd9XlMU0ZEAMf6XxsqvW8ia8C8\nF+RNAaGkwmiS4qZ+hJ4KIUnWM84j+bsyNBqlHFKi1e7LsKRyjnQ288FqIQIDAQAB\no2MwYTAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\n9O+dX0K7dzD13bshuR70sHbUAeAwHwYDVR0jBBgwFoAU9O+dX0K7dzD13bshuR70\nsHbUAeAwDQYJKoZIhvcNAQELBQADggEBADPRGSn1U/YwBUkpU7vTzsxqLaVJW21o\n6hV/W2IjjGNGqp6c2kSH/3ZGSEjNwIJqKRFpC2gmTPgAqnC4nDosOHx5F5HXTmrU\n1l2Ivcm1Ep+t/zBgNHjBi3yommx8n2iTTdakpQaq7/u1s0I4UiRqXydjoGXp7H1C\naAsmRlK8ovgEAWzItjeMBzy65wqiStPBK+XAIddqznHCxrRyH5xk3HcnyMG4GDWl\nrogdK8yTGCuZVCvGfe9Hwm8tyYrxDRNvRLTc0ssTonAwnR/7IzaVVc9Pp0svCynJ\n6VX3FGhgWwDVWeajj8yrXeR42az/Rr1TAAOZtJMW+4hIkaU0/+msvgw=\n-----END CERTIFICATE-----\n"},"metadata":{"creationTimestamp":"2025-11-02T07:51:50Z","managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ca-bundle.crt":{}},"f:metadata":{"f:annotations":{".":{},"f:openshift.io/owning-component":{}}}},"manager":"cluster-kube-apiserver-operator","operation":"Update","time":"2025-12-08T17:45:16Z"}],"resourceVersion":null,"uid":"e7bc0b2c-2af6-488e-bf6f-25875798350a"}} 2025-12-08T17:47:03.287840144+00:00 stderr F I1208 17:47:03.287745 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/client-ca -n openshift-route-controller-manager: 2025-12-08T17:47:03.287840144+00:00 stderr F cause by changes in data.ca-bundle.crt 2025-12-08T17:47:03.292319415+00:00 stderr F I1208 17:47:03.292228 1 apps.go:155] Deployment "openshift-controller-manager/controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"06da8043501bad9ee4b5112f0f0d4005fb95b27e977f6923b28772a025c590fb"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/client-ca":"39015"}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["openshift-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/etc/pki/ca-trust/extracted/pem","name":"proxy-ca-bundles"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"configMap":{"items":[{"key":"ca-bundle.crt","path":"tls-ca-bundle.pem"}],"name":"openshift-global-ca"},"name":"proxy-ca-bundles"},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:03.303347621+00:00 stderr F I1208 17:47:03.303205 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:03.304183418+00:00 stderr F I1208 17:47:03.303991 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/controller-manager -n openshift-controller-manager because it changed 2025-12-08T17:47:03.309990841+00:00 stderr F I1208 17:47:03.309710 1 apps.go:155] Deployment "openshift-route-controller-manager/route-controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"f6566d56e2a6b887f8436b7dfcf2a2f54aa99f8c436c6e52894c2d3ed2a39cd2"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/client-ca":"39017"}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["route-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"route-controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:03.318971394+00:00 stderr F I1208 17:47:03.318915 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:03.319353325+00:00 stderr F I1208 17:47:03.319277 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/route-controller-manager -n openshift-route-controller-manager because it changed 2025-12-08T17:47:03.477718431+00:00 stderr F I1208 17:47:03.477674 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:05.436531843+00:00 stderr F I1208 17:47:05.435542 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:05.827597923+00:00 stderr F I1208 17:47:05.827534 1 reflector.go:430] "Caches populated" type="*v1.ClusterRole" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:06.525078848+00:00 stderr F I1208 17:47:06.524999 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:06.548833407+00:00 stderr F I1208 17:47:06.548780 1 core.go:352] ConfigMap "openshift-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:06.549007432+00:00 stderr F I1208 17:47:06.548960 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/config -n openshift-controller-manager: 2025-12-08T17:47:06.549007432+00:00 stderr F cause by changes in data.openshift-controller-manager.client-ca.configmap 2025-12-08T17:47:06.562429735+00:00 stderr F I1208 17:47:06.562383 1 core.go:352] ConfigMap "openshift-route-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-route-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:06.563063825+00:00 stderr F I1208 17:47:06.562682 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'ConfigMapUpdated' Updated ConfigMap/config -n openshift-route-controller-manager: 2025-12-08T17:47:06.563063825+00:00 stderr F cause by changes in data.openshift-route-controller-manager.client-ca.configmap 2025-12-08T17:47:06.585920354+00:00 stderr F I1208 17:47:06.584800 1 apps.go:155] Deployment "openshift-controller-manager/controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"dc6bc21b862057818fe7b9bf70532cd9b57e5a76e0f5268fed27eba8bed55cf3"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/config":"39028"}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["openshift-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/etc/pki/ca-trust/extracted/pem","name":"proxy-ca-bundles"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"configMap":{"items":[{"key":"ca-bundle.crt","path":"tls-ca-bundle.pem"}],"name":"openshift-global-ca"},"name":"proxy-ca-bundles"},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:06.596168447+00:00 stderr F I1208 17:47:06.595154 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:06.596168447+00:00 stderr F I1208 17:47:06.595918 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/controller-manager -n openshift-controller-manager because it changed 2025-12-08T17:47:06.600976389+00:00 stderr F I1208 17:47:06.599313 1 apps.go:155] Deployment "openshift-route-controller-manager/route-controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"bfc781c4875bd16b1ca04b47b92551b795136fde90a974eac7e0876d45b8b757"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/config":"39030"}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["route-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"route-controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:06.606148550+00:00 stderr F I1208 17:47:06.605782 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:06.607941797+00:00 stderr F I1208 17:47:06.606457 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/route-controller-manager -n openshift-route-controller-manager because it changed 2025-12-08T17:47:06.793961023+00:00 stderr F I1208 17:47:06.793918 1 core.go:352] ConfigMap "openshift-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:06.794098308+00:00 stderr F I1208 17:47:06.794069 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ConfigMapUpdateFailed' Failed to update ConfigMap/config -n openshift-controller-manager: Operation cannot be fulfilled on configmaps "config": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:06.993266077+00:00 stderr F I1208 17:47:06.993216 1 core.go:352] ConfigMap "openshift-route-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-route-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:06.993520905+00:00 stderr F I1208 17:47:06.993466 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ConfigMapUpdateFailed' Failed to update ConfigMap/config -n openshift-route-controller-manager: Operation cannot be fulfilled on configmaps "config": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:07.114211204+00:00 stderr F I1208 17:47:07.114100 1 apps.go:155] Deployment "openshift-controller-manager/controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"4f62bf34abd506a2b46af209932a88dd4b43aadba6998cdbb15afc8287ad205f"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/config":null}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["openshift-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/etc/pki/ca-trust/extracted/pem","name":"proxy-ca-bundles"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"configMap":{"items":[{"key":"ca-bundle.crt","path":"tls-ca-bundle.pem"}],"name":"openshift-global-ca"},"name":"proxy-ca-bundles"},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:07.156107843+00:00 stderr F I1208 17:47:07.156054 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:07.156852197+00:00 stderr F I1208 17:47:07.156780 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/controller-manager -n openshift-controller-manager because it changed 2025-12-08T17:47:07.194772021+00:00 stderr F I1208 17:47:07.194677 1 apps.go:155] Deployment "openshift-route-controller-manager/route-controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"16c35ef0bb727b5aff0b37cbe1205db40758832fb8ebd2ebc908df957fb7aba1"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/config":null}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["route-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"route-controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:07.239739166+00:00 stderr F I1208 17:47:07.239652 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:07.240107078+00:00 stderr F I1208 17:47:07.240031 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/route-controller-manager -n openshift-route-controller-manager because it changed 2025-12-08T17:47:07.247947115+00:00 stderr F E1208 17:47:07.247863 1 operator.go:174] "Unhandled Error" err="key failed with : Operation cannot be fulfilled on openshiftcontrollermanagers.operator.openshift.io \"cluster\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:07.473206355+00:00 stderr F I1208 17:47:07.473157 1 core.go:352] ConfigMap "openshift-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:07.473357310+00:00 stderr F I1208 17:47:07.473305 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ConfigMapUpdateFailed' Failed to update ConfigMap/config -n openshift-controller-manager: Operation cannot be fulfilled on configmaps "config": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:07.674194702+00:00 stderr F I1208 17:47:07.674149 1 core.go:352] ConfigMap "openshift-route-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-route-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:07.674429470+00:00 stderr F I1208 17:47:07.674355 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ConfigMapUpdateFailed' Failed to update ConfigMap/config -n openshift-route-controller-manager: Operation cannot be fulfilled on configmaps "config": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:07.794871681+00:00 stderr F I1208 17:47:07.794769 1 apps.go:155] Deployment "openshift-controller-manager/controller-manager" changes: {"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["openshift-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/etc/pki/ca-trust/extracted/pem","name":"proxy-ca-bundles"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"configMap":{"items":[{"key":"ca-bundle.crt","path":"tls-ca-bundle.pem"}],"name":"openshift-global-ca"},"name":"proxy-ca-bundles"},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:07.840188098+00:00 stderr F I1208 17:47:07.840076 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/controller-manager -n openshift-controller-manager because it changed 2025-12-08T17:47:07.874770846+00:00 stderr F I1208 17:47:07.874700 1 apps.go:155] Deployment "openshift-route-controller-manager/route-controller-manager" changes: {"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["route-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"route-controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:07.918239185+00:00 stderr F I1208 17:47:07.918143 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/route-controller-manager -n openshift-route-controller-manager because it changed 2025-12-08T17:47:10.687248921+00:00 stderr F I1208 17:47:10.686756 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:10.801859029+00:00 stderr F I1208 17:47:10.801784 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:11.945444688+00:00 stderr F I1208 17:47:11.945371 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.465424956+00:00 stderr F I1208 17:47:13.465356 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.573281970+00:00 stderr F I1208 17:47:13.573214 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.661287230+00:00 stderr F I1208 17:47:14.661239 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.267199482+00:00 stderr F I1208 17:47:17.266619 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:19.318349450+00:00 stderr F I1208 17:47:19.317929 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:20.666934983+00:00 stderr F I1208 17:47:20.666847 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.093469000+00:00 stderr F I1208 17:47:21.092844 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.455560598+00:00 stderr F I1208 17:47:21.455447 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:21.665745535+00:00 stderr F I1208 17:47:21.665256 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:22.489927269+00:00 stderr F I1208 17:47:22.486438 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:23.231458712+00:00 stderr F I1208 17:47:23.231132 1 core.go:352] ConfigMap "openshift-route-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-route-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:23.231605456+00:00 stderr F I1208 17:47:23.231575 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ConfigMapUpdateFailed' Failed to update ConfigMap/config -n openshift-route-controller-manager: Operation cannot be fulfilled on configmaps "config": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:23.240303350+00:00 stderr F I1208 17:47:23.240249 1 apps.go:155] Deployment "openshift-controller-manager/controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"dc6bc21b862057818fe7b9bf70532cd9b57e5a76e0f5268fed27eba8bed55cf3"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/config":"39028"}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["openshift-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/etc/pki/ca-trust/extracted/pem","name":"proxy-ca-bundles"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"configMap":{"items":[{"key":"ca-bundle.crt","path":"tls-ca-bundle.pem"}],"name":"openshift-global-ca"},"name":"proxy-ca-bundles"},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:47:23.245480583+00:00 stderr F I1208 17:47:23.245428 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:47:23.246479514+00:00 stderr F I1208 17:47:23.246430 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/controller-manager -n openshift-controller-manager because it changed 2025-12-08T17:47:24.446461299+00:00 stderr F I1208 17:47:24.445936 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:26.850920799+00:00 stderr F I1208 17:47:26.850269 1 reflector.go:430] "Caches populated" type="*v1.OpenShiftControllerManager" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:26.858464097+00:00 stderr F I1208 17:47:26.858380 1 status_controller.go:229] clusteroperator/openshift-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:51:57Z","message":"RouteControllerManagerDegraded: \"route-controller-manager\" \"configmap\": Operation cannot be fulfilled on configmaps \"config\": the object has been modified; please apply your changes to the latest version and try again\nRouteControllerManagerDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:47:26Z","message":"Progressing: deployment/controller-manager: observed generation is 18, desired generation is 19\nProgressing: deployment/controller-manager: no available replica found\nRouteControllerManagerProgressing: deployment/route-controller-manager: no available replica found","reason":"RouteControllerManager_DesiredStateNotYetAchieved::_DesiredStateNotYetAchieved","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:26Z","message":"Available: no openshift controller manager deployment pods available on any node\nRouteControllerManagerAvailable: no route controller manager deployment pods available on any node","reason":"RouteControllerManager_NoPodsAvailable::_NoPodsAvailable","status":"False","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:01Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:57Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:26.866588913+00:00 stderr F I1208 17:47:26.866493 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-controller-manager changed: Degraded message changed from "All is well" to "RouteControllerManagerDegraded: \"route-controller-manager\" \"configmap\": Operation cannot be fulfilled on configmaps \"config\": the object has been modified; please apply your changes to the latest version and try again\nRouteControllerManagerDegraded: ",Progressing changed from False to True ("Progressing: deployment/controller-manager: observed generation is 18, desired generation is 19\nProgressing: deployment/controller-manager: no available replica found\nRouteControllerManagerProgressing: deployment/route-controller-manager: no available replica found"),Available changed from True to False ("Available: no openshift controller manager deployment pods available on any node\nRouteControllerManagerAvailable: no route controller manager deployment pods available on any node") 2025-12-08T17:47:31.054573717+00:00 stderr F I1208 17:47:31.054163 1 reflector.go:430] "Caches populated" type="*v1.Image" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:34.554498450+00:00 stderr F I1208 17:47:34.552860 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:35.882900807+00:00 stderr F I1208 17:47:35.882361 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:39.634928937+00:00 stderr F I1208 17:47:39.634106 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:43.232106593+00:00 stderr F I1208 17:47:43.231999 1 core.go:352] ConfigMap "openshift-route-controller-manager/config" changes: {"apiVersion":"v1","data":{"openshift-route-controller-manager.client-ca.configmap":"orAruQ=="},"kind":"ConfigMap","metadata":{"creationTimestamp":null,"managedFields":null,"resourceVersion":null,"uid":null}} 2025-12-08T17:47:43.232276068+00:00 stderr F I1208 17:47:43.232204 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'ConfigMapUpdateFailed' Failed to update ConfigMap/config -n openshift-route-controller-manager: Operation cannot be fulfilled on configmaps "config": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:47:43.258965728+00:00 stderr F I1208 17:47:43.258558 1 status_controller.go:229] clusteroperator/openshift-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:51:57Z","message":"RouteControllerManagerDegraded: \"route-controller-manager\" \"configmap\": Operation cannot be fulfilled on configmaps \"config\": the object has been modified; please apply your changes to the latest version and try again\nRouteControllerManagerDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:47:43Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:01Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:57Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:43.268900781+00:00 stderr F I1208 17:47:43.268792 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-controller-manager changed: Progressing changed from True to False ("All is well"),Available changed from False to True ("All is well") 2025-12-08T17:47:45.864259059+00:00 stderr F I1208 17:47:45.863971 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.069713878+00:00 stderr F I1208 17:47:46.069635 1 reflector.go:430] "Caches populated" type="*v1.Network" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.372626313+00:00 stderr F I1208 17:47:46.372584 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.460447687+00:00 stderr F I1208 17:47:46.460376 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:46.918575529+00:00 stderr F I1208 17:47:46.918489 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:47.720937776+00:00 stderr F I1208 17:47:47.718530 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:48.138997686+00:00 stderr F I1208 17:47:48.138864 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:48.241580146+00:00 stderr F I1208 17:47:48.241526 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:48.766007254+00:00 stderr F I1208 17:47:48.765950 1 reflector.go:430] "Caches populated" type="*v1.Proxy" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:49.774272394+00:00 stderr F I1208 17:47:49.773834 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:52.865080218+00:00 stderr F I1208 17:47:52.864588 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:58.770609693+00:00 stderr F I1208 17:47:58.770083 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:02.511939919+00:00 stderr F I1208 17:48:02.511521 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:03.224678633+00:00 stderr F I1208 17:48:03.224343 1 apps.go:155] Deployment "openshift-route-controller-manager/route-controller-manager" changes: {"metadata":{"annotations":{"operator.openshift.io/spec-hash":"bfc781c4875bd16b1ca04b47b92551b795136fde90a974eac7e0876d45b8b757"}},"spec":{"progressDeadlineSeconds":null,"revisionHistoryLimit":null,"template":{"metadata":{"annotations":{"configmaps/config":"39030"}},"spec":{"containers":[{"args":["--config=/var/run/configmaps/config/config.yaml","-v=2"],"command":["route-controller-manager","start"],"env":[{"name":"POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}}],"image":"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095","imagePullPolicy":"IfNotPresent","livenessProbe":{"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30},"name":"route-controller-manager","ports":[{"containerPort":8443}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"healthz","port":8443,"scheme":"HTTPS"}},"resources":{"requests":{"cpu":"100m","memory":"100Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true},"terminationMessagePolicy":"FallbackToLogsOnError","volumeMounts":[{"mountPath":"/var/run/configmaps/config","name":"config"},{"mountPath":"/var/run/configmaps/client-ca","name":"client-ca"},{"mountPath":"/var/run/secrets/serving-cert","name":"serving-cert"},{"mountPath":"/tmp","name":"tmp"}]}],"dnsPolicy":null,"restartPolicy":null,"schedulerName":null,"serviceAccount":null,"terminationGracePeriodSeconds":null,"volumes":[{"configMap":{"name":"config"},"name":"config"},{"configMap":{"name":"client-ca"},"name":"client-ca"},{"name":"serving-cert","secret":{"secretName":"serving-cert"}},{"emptyDir":{},"name":"tmp"}]}}}} 2025-12-08T17:48:03.230169559+00:00 stderr F I1208 17:48:03.230123 1 warnings.go:110] "Warning: spec.template.spec.nodeSelector[node-role.kubernetes.io/master]: use \"node-role.kubernetes.io/control-plane\" instead" 2025-12-08T17:48:03.231121429+00:00 stderr F I1208 17:48:03.231093 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'DeploymentUpdated' Updated Deployment.apps/route-controller-manager -n openshift-route-controller-manager because it changed 2025-12-08T17:48:03.246991908+00:00 stderr F I1208 17:48:03.246781 1 status_controller.go:229] clusteroperator/openshift-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:51:57Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:48:03Z","message":"RouteControllerManagerProgressing: deployment/route-controller-manager: observed generation is 16, desired generation is 17","reason":"RouteControllerManager_DesiredStateNotYetAchieved","status":"True","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:01Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:57Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:48:03.259980591+00:00 stderr F I1208 17:48:03.259889 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-controller-manager changed: Degraded message changed from "RouteControllerManagerDegraded: \"route-controller-manager\" \"configmap\": Operation cannot be fulfilled on configmaps \"config\": the object has been modified; please apply your changes to the latest version and try again\nRouteControllerManagerDegraded: " to "All is well",Progressing changed from False to True ("RouteControllerManagerProgressing: deployment/route-controller-manager: observed generation is 16, desired generation is 17") 2025-12-08T17:48:04.129475858+00:00 stderr F I1208 17:48:04.129092 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:09.290528490+00:00 stderr F I1208 17:48:09.289996 1 reflector.go:430] "Caches populated" type="*v1.Deployment" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:14.988190965+00:00 stderr F I1208 17:48:14.987654 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:23.256823467+00:00 stderr F I1208 17:48:23.256259 1 status_controller.go:229] clusteroperator/openshift-controller-manager diff {"status":{"conditions":[{"lastTransitionTime":"2025-11-02T07:51:57Z","message":"All is well","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-12-08T17:48:23Z","message":"All is well","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-12-08T17:47:43Z","message":"All is well","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:01Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:51:57Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:48:23.267767869+00:00 stderr F I1208 17:48:23.267705 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-controller-manager-operator", Name:"openshift-controller-manager-operator", UID:"5effb0d2-94d8-48b7-8c69-e538f7848429", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/openshift-controller-manager changed: Progressing changed from True to False ("All is well") ././@LongLink0000644000000000000000000000022400000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-defa0000755000175000017500000000000015115611514032775 5ustar zuulzuul././@LongLink0000644000000000000000000000023000000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-defa0000755000175000017500000000000015115611521032773 5ustar zuulzuul././@LongLink0000644000000000000000000000023500000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-defa0000644000175000017500000010253515115611514033005 0ustar zuulzuul2025-12-08T17:44:24.187714403+00:00 stdout F .:5353 2025-12-08T17:44:24.187714403+00:00 stdout F hostname.bind.:5353 2025-12-08T17:44:24.187831486+00:00 stdout F [INFO] plugin/reload: Running configuration SHA512 = c40f1fac74a6633c6b1943fe251ad80adf3d5bd9b35c9e7d9b72bc260c5e2455f03e403e3b79d32f0936ff27e81ff6d07c68a95724b1c2c23510644372976718 2025-12-08T17:44:24.187839726+00:00 stdout F CoreDNS-1.11.3 2025-12-08T17:44:24.187839726+00:00 stdout F linux/amd64, go1.24.4 (Red Hat 1.24.4-2.el9) X:strictfipsruntime, 2025-12-08T17:45:06.912018185+00:00 stdout F [INFO] 10.217.0.38:46181 - 22429 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000768773s 2025-12-08T17:45:06.912018185+00:00 stdout F [INFO] 10.217.0.38:55086 - 30237 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000812154s 2025-12-08T17:45:25.070957117+00:00 stdout F [INFO] 10.217.0.38:36938 - 47237 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000848284s 2025-12-08T17:45:25.070957117+00:00 stdout F [INFO] 10.217.0.38:56771 - 44781 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00106182s 2025-12-08T17:45:47.881140930+00:00 stdout F [INFO] 10.217.0.38:60344 - 41302 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000842235s 2025-12-08T17:45:47.881140930+00:00 stdout F [INFO] 10.217.0.38:48524 - 3280 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000966648s 2025-12-08T17:46:25.077453931+00:00 stdout F [INFO] 10.217.0.38:35431 - 41899 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002771573s 2025-12-08T17:46:25.077610676+00:00 stdout F [INFO] 10.217.0.38:54372 - 21628 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002706691s 2025-12-08T17:47:25.072587709+00:00 stdout F [INFO] 10.217.0.38:40752 - 27178 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001187147s 2025-12-08T17:47:25.072587709+00:00 stdout F [INFO] 10.217.0.38:59489 - 47450 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001368823s 2025-12-08T17:47:46.183314613+00:00 stdout F [INFO] 10.217.0.22:47562 - 36710 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000604829s 2025-12-08T17:47:46.183314613+00:00 stdout F [INFO] 10.217.0.22:38151 - 47086 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000730823s 2025-12-08T17:47:46.228934309+00:00 stdout F [INFO] 10.217.0.22:58404 - 56044 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000804455s 2025-12-08T17:47:46.228934309+00:00 stdout F [INFO] 10.217.0.22:54076 - 51857 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000813345s 2025-12-08T17:47:47.233660227+00:00 stdout F [INFO] 10.217.0.22:53087 - 36821 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000724622s 2025-12-08T17:47:47.233836963+00:00 stdout F [INFO] 10.217.0.22:55962 - 61880 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000798365s 2025-12-08T17:47:49.238465157+00:00 stdout F [INFO] 10.217.0.22:39533 - 21340 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000652211s 2025-12-08T17:47:49.238465157+00:00 stdout F [INFO] 10.217.0.22:45970 - 20475 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00063039s 2025-12-08T17:48:25.074470821+00:00 stdout F [INFO] 10.217.0.38:40635 - 50344 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001558407s 2025-12-08T17:48:25.074470821+00:00 stdout F [INFO] 10.217.0.38:48274 - 5192 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00197827s 2025-12-08T17:48:31.728726270+00:00 stdout F [INFO] 10.217.0.38:58404 - 63371 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001404642s 2025-12-08T17:48:31.728726270+00:00 stdout F [INFO] 10.217.0.38:47338 - 43835 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001426522s 2025-12-08T17:49:25.082340616+00:00 stdout F [INFO] 10.217.0.38:47749 - 2872 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00378756s 2025-12-08T17:49:25.082521832+00:00 stdout F [INFO] 10.217.0.38:54430 - 17976 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.003829101s 2025-12-08T17:50:25.077975574+00:00 stdout F [INFO] 10.217.0.38:43401 - 16363 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002588956s 2025-12-08T17:50:25.077975574+00:00 stdout F [INFO] 10.217.0.38:46082 - 19272 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002974284s 2025-12-08T17:52:25.079239281+00:00 stdout F [INFO] 10.217.0.38:32950 - 18793 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002316912s 2025-12-08T17:52:25.079239281+00:00 stdout F [INFO] 10.217.0.38:40407 - 57904 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00227043s 2025-12-08T17:53:25.078332516+00:00 stdout F [INFO] 10.217.0.38:54716 - 59728 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002388255s 2025-12-08T17:53:25.078470290+00:00 stdout F [INFO] 10.217.0.38:48363 - 28655 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002987192s 2025-12-08T17:54:25.080114247+00:00 stdout F [INFO] 10.217.0.38:51658 - 11374 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00403952s 2025-12-08T17:54:25.080187319+00:00 stdout F [INFO] 10.217.0.38:49184 - 42375 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.004180973s 2025-12-08T17:55:22.907870512+00:00 stdout F [INFO] 10.217.0.22:34014 - 50481 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00111129s 2025-12-08T17:55:22.907870512+00:00 stdout F [INFO] 10.217.0.22:33433 - 41246 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000581236s 2025-12-08T17:55:23.917685621+00:00 stdout F [INFO] 10.217.0.22:54801 - 5418 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000641858s 2025-12-08T17:55:23.919175571+00:00 stdout F [INFO] 10.217.0.22:46807 - 27499 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001893181s 2025-12-08T17:55:25.076185039+00:00 stdout F [INFO] 10.217.0.38:44158 - 17041 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000989587s 2025-12-08T17:55:25.076332883+00:00 stdout F [INFO] 10.217.0.38:58738 - 30477 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001323976s 2025-12-08T17:55:25.923337956+00:00 stdout F [INFO] 10.217.0.22:41528 - 59104 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000432282s 2025-12-08T17:55:25.923508680+00:00 stdout F [INFO] 10.217.0.22:58829 - 2889 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000541024s 2025-12-08T17:55:42.099623928+00:00 stdout F [INFO] 10.217.0.22:59030 - 8011 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000501793s 2025-12-08T17:55:42.099623928+00:00 stdout F [INFO] 10.217.0.22:53902 - 7920 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000621137s 2025-12-08T17:55:43.108345707+00:00 stdout F [INFO] 10.217.0.22:34718 - 3152 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00073286s 2025-12-08T17:55:43.108345707+00:00 stdout F [INFO] 10.217.0.22:41424 - 41186 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000707949s 2025-12-08T17:55:45.111916025+00:00 stdout F [INFO] 10.217.0.22:58433 - 23022 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000680778s 2025-12-08T17:55:45.111916025+00:00 stdout F [INFO] 10.217.0.22:42616 - 28988 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00071355s 2025-12-08T17:55:51.256978574+00:00 stdout F [INFO] 10.217.0.22:37068 - 20361 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000849693s 2025-12-08T17:55:51.256978574+00:00 stdout F [INFO] 10.217.0.22:45159 - 40247 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000912945s 2025-12-08T17:55:52.262435284+00:00 stdout F [INFO] 10.217.0.22:39631 - 22264 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00110176s 2025-12-08T17:55:52.262435284+00:00 stdout F [INFO] 10.217.0.22:59825 - 15765 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001210223s 2025-12-08T17:55:54.266053242+00:00 stdout F [INFO] 10.217.0.22:47995 - 35141 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000651868s 2025-12-08T17:55:54.266053242+00:00 stdout F [INFO] 10.217.0.22:55618 - 11231 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00076238s 2025-12-08T17:56:00.426662888+00:00 stdout F [INFO] 10.217.0.22:35531 - 7347 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000950685s 2025-12-08T17:56:00.426662888+00:00 stdout F [INFO] 10.217.0.22:35528 - 9235 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001071059s 2025-12-08T17:56:01.431653245+00:00 stdout F [INFO] 10.217.0.22:42988 - 55668 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00074012s 2025-12-08T17:56:01.431653245+00:00 stdout F [INFO] 10.217.0.22:39198 - 6789 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000920005s 2025-12-08T17:56:03.442194754+00:00 stdout F [INFO] 10.217.0.22:48234 - 53680 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00073004s 2025-12-08T17:56:03.442194754+00:00 stdout F [INFO] 10.217.0.22:45489 - 1506 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000786692s 2025-12-08T17:56:04.562266879+00:00 stdout F [INFO] 10.217.0.22:41210 - 21373 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000567315s 2025-12-08T17:56:04.562266879+00:00 stdout F [INFO] 10.217.0.22:39904 - 3725 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000690958s 2025-12-08T17:56:05.565432666+00:00 stdout F [INFO] 10.217.0.22:58226 - 52418 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000542915s 2025-12-08T17:56:05.565485067+00:00 stdout F [INFO] 10.217.0.22:46677 - 12373 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000592087s 2025-12-08T17:56:07.569172478+00:00 stdout F [INFO] 10.217.0.22:54195 - 24940 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000580465s 2025-12-08T17:56:07.569214529+00:00 stdout F [INFO] 10.217.0.22:52301 - 15978 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000894864s 2025-12-08T17:56:07.913763973+00:00 stdout F [INFO] 10.217.0.22:50863 - 50191 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000471153s 2025-12-08T17:56:07.913831965+00:00 stdout F [INFO] 10.217.0.22:55782 - 62365 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000439122s 2025-12-08T17:56:08.918026329+00:00 stdout F [INFO] 10.217.0.22:50976 - 50676 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000676148s 2025-12-08T17:56:08.918122092+00:00 stdout F [INFO] 10.217.0.22:35273 - 60404 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000649728s 2025-12-08T17:56:10.923551980+00:00 stdout F [INFO] 10.217.0.22:37392 - 47740 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000547775s 2025-12-08T17:56:10.923551980+00:00 stdout F [INFO] 10.217.0.22:46034 - 638 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00073939s 2025-12-08T17:56:25.080777712+00:00 stdout F [INFO] 10.217.0.38:52901 - 4774 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001335747s 2025-12-08T17:56:25.080777712+00:00 stdout F [INFO] 10.217.0.38:46540 - 9303 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001324957s 2025-12-08T17:57:13.536518910+00:00 stdout F [INFO] 10.217.0.22:47917 - 60255 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000601047s 2025-12-08T17:57:13.536518910+00:00 stdout F [INFO] 10.217.0.22:37289 - 29707 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001390817s 2025-12-08T17:57:14.542275893+00:00 stdout F [INFO] 10.217.0.22:33929 - 53043 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000669317s 2025-12-08T17:57:14.542336164+00:00 stdout F [INFO] 10.217.0.22:55500 - 43741 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00079505s 2025-12-08T17:57:16.546218089+00:00 stdout F [INFO] 10.217.0.22:45820 - 65482 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00042278s 2025-12-08T17:57:16.546218089+00:00 stdout F [INFO] 10.217.0.22:41040 - 27991 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000524034s 2025-12-08T17:57:16.623096935+00:00 stdout F [INFO] 10.217.0.22:56312 - 43596 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000541474s 2025-12-08T17:57:16.623331451+00:00 stdout F [INFO] 10.217.0.22:46138 - 22061 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000674238s 2025-12-08T17:57:17.629839542+00:00 stdout F [INFO] 10.217.0.22:58259 - 26239 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000726519s 2025-12-08T17:57:17.630023307+00:00 stdout F [INFO] 10.217.0.22:53391 - 38954 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000922844s 2025-12-08T17:57:19.635229427+00:00 stdout F [INFO] 10.217.0.22:57329 - 16419 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000538164s 2025-12-08T17:57:19.635467443+00:00 stdout F [INFO] 10.217.0.22:59694 - 34452 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001034916s 2025-12-08T17:57:25.078572910+00:00 stdout F [INFO] 10.217.0.38:46206 - 33422 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001025286s 2025-12-08T17:57:25.078572910+00:00 stdout F [INFO] 10.217.0.38:35934 - 53200 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00117567s 2025-12-08T17:57:28.607369829+00:00 stdout F [INFO] 10.217.0.22:58125 - 44287 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000595626s 2025-12-08T17:57:28.607369829+00:00 stdout F [INFO] 10.217.0.22:57932 - 30094 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000887374s 2025-12-08T17:57:29.613131891+00:00 stdout F [INFO] 10.217.0.22:53204 - 40501 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000474343s 2025-12-08T17:57:29.613131891+00:00 stdout F [INFO] 10.217.0.22:49859 - 2335 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000538384s 2025-12-08T17:57:31.618489965+00:00 stdout F [INFO] 10.217.0.22:46169 - 49429 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000422411s 2025-12-08T17:57:31.618525166+00:00 stdout F [INFO] 10.217.0.22:41495 - 16389 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000426991s 2025-12-08T17:57:37.634928116+00:00 stdout F [INFO] 10.217.0.22:55097 - 5951 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.004016384s 2025-12-08T17:57:37.634928116+00:00 stdout F [INFO] 10.217.0.22:46303 - 18641 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.004853334s 2025-12-08T17:57:38.642446526+00:00 stdout F [INFO] 10.217.0.22:37092 - 9469 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000564564s 2025-12-08T17:57:38.642446526+00:00 stdout F [INFO] 10.217.0.22:43454 - 34231 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000539914s 2025-12-08T17:57:40.647733584+00:00 stdout F [INFO] 10.217.0.22:54786 - 26032 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000823202s 2025-12-08T17:57:40.647799276+00:00 stdout F [INFO] 10.217.0.22:33653 - 19029 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001340635s 2025-12-08T17:57:40.740646346+00:00 stdout F [INFO] 10.217.0.22:53805 - 58901 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000432471s 2025-12-08T17:57:40.740646346+00:00 stdout F [INFO] 10.217.0.22:52927 - 60174 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000530853s 2025-12-08T17:57:41.744339426+00:00 stdout F [INFO] 10.217.0.22:44251 - 8689 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000693497s 2025-12-08T17:57:41.746058261+00:00 stdout F [INFO] 10.217.0.22:38339 - 20567 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000576845s 2025-12-08T17:57:43.750074496+00:00 stdout F [INFO] 10.217.0.22:58391 - 59105 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000723848s 2025-12-08T17:57:43.750074496+00:00 stdout F [INFO] 10.217.0.22:37807 - 41132 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00079366s 2025-12-08T17:57:45.988079938+00:00 stdout F [INFO] 10.217.0.22:38167 - 4765 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000583075s 2025-12-08T17:57:45.988079938+00:00 stdout F [INFO] 10.217.0.22:56550 - 20351 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000528944s 2025-12-08T17:57:46.991824431+00:00 stdout F [INFO] 10.217.0.22:37316 - 49706 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000678167s 2025-12-08T17:57:46.991824431+00:00 stdout F [INFO] 10.217.0.22:50386 - 448 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000868482s 2025-12-08T17:57:49.009086609+00:00 stdout F [INFO] 10.217.0.22:48662 - 6366 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.002218846s 2025-12-08T17:57:49.009180681+00:00 stdout F [INFO] 10.217.0.22:39839 - 45670 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.002817042s 2025-12-08T17:57:49.128351421+00:00 stdout F [INFO] 10.217.0.22:49720 - 1229 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000827161s 2025-12-08T17:57:49.128378011+00:00 stdout F [INFO] 10.217.0.22:53089 - 27482 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000847362s 2025-12-08T17:57:50.137076172+00:00 stdout F [INFO] 10.217.0.22:50941 - 49870 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000340469s 2025-12-08T17:57:50.137076172+00:00 stdout F [INFO] 10.217.0.22:58590 - 44029 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000628586s 2025-12-08T17:57:52.141588520+00:00 stdout F [INFO] 10.217.0.22:45754 - 16481 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000679688s 2025-12-08T17:57:52.141782725+00:00 stdout F [INFO] 10.217.0.22:36012 - 7492 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000735848s 2025-12-08T17:57:52.307580090+00:00 stdout F [INFO] 10.217.0.22:44667 - 19194 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001013746s 2025-12-08T17:57:52.307610291+00:00 stdout F [INFO] 10.217.0.22:60792 - 5209 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001020856s 2025-12-08T17:57:53.312834921+00:00 stdout F [INFO] 10.217.0.22:48891 - 38995 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00072035s 2025-12-08T17:57:53.312834921+00:00 stdout F [INFO] 10.217.0.22:41199 - 7141 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000697209s 2025-12-08T17:57:55.315872452+00:00 stdout F [INFO] 10.217.0.22:32977 - 62353 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000579345s 2025-12-08T17:57:55.316176849+00:00 stdout F [INFO] 10.217.0.22:38610 - 7231 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000881292s 2025-12-08T17:57:55.911471656+00:00 stdout F [INFO] 10.217.0.22:36355 - 60753 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001240352s 2025-12-08T17:57:55.911526867+00:00 stdout F [INFO] 10.217.0.22:40140 - 44164 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001441837s 2025-12-08T17:57:56.916334546+00:00 stdout F [INFO] 10.217.0.22:46414 - 26204 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000624236s 2025-12-08T17:57:56.916408038+00:00 stdout F [INFO] 10.217.0.22:39877 - 37009 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000686498s 2025-12-08T17:57:58.922200239+00:00 stdout F [INFO] 10.217.0.22:42798 - 58178 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00077458s 2025-12-08T17:57:58.922200239+00:00 stdout F [INFO] 10.217.0.22:54206 - 51057 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000948515s 2025-12-08T17:57:58.996110030+00:00 stdout F [INFO] 10.217.0.22:55897 - 15216 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000533704s 2025-12-08T17:57:58.996110030+00:00 stdout F [INFO] 10.217.0.22:44869 - 31862 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000638766s 2025-12-08T17:58:00.000370136+00:00 stdout F [INFO] 10.217.0.22:52283 - 13564 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000955055s 2025-12-08T17:58:00.000461568+00:00 stdout F [INFO] 10.217.0.22:45678 - 10509 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001056657s 2025-12-08T17:58:02.008772574+00:00 stdout F [INFO] 10.217.0.22:56973 - 23658 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000995376s 2025-12-08T17:58:02.008982879+00:00 stdout F [INFO] 10.217.0.22:33425 - 4530 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001645343s 2025-12-08T17:58:07.277562500+00:00 stdout F [INFO] 10.217.0.22:37900 - 23960 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000722899s 2025-12-08T17:58:07.277627631+00:00 stdout F [INFO] 10.217.0.22:54723 - 6355 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.0007654s 2025-12-08T17:58:08.282131523+00:00 stdout F [INFO] 10.217.0.22:60555 - 10576 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000593735s 2025-12-08T17:58:08.282182774+00:00 stdout F [INFO] 10.217.0.22:52572 - 60042 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000759949s 2025-12-08T17:58:10.290309986+00:00 stdout F [INFO] 10.217.0.22:59926 - 22784 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00078074s 2025-12-08T17:58:10.290309986+00:00 stdout F [INFO] 10.217.0.22:51538 - 57747 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000921663s 2025-12-08T17:58:10.385959418+00:00 stdout F [INFO] 10.217.0.22:46813 - 38447 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000486522s 2025-12-08T17:58:10.385959418+00:00 stdout F [INFO] 10.217.0.22:39585 - 65174 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000462081s 2025-12-08T17:58:11.387732060+00:00 stdout F [INFO] 10.217.0.22:38315 - 30120 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000493192s 2025-12-08T17:58:11.387732060+00:00 stdout F [INFO] 10.217.0.22:46215 - 58203 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000530324s 2025-12-08T17:58:13.398600932+00:00 stdout F [INFO] 10.217.0.22:38073 - 58369 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000544175s 2025-12-08T17:58:13.398600932+00:00 stdout F [INFO] 10.217.0.22:44134 - 17717 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000793731s 2025-12-08T17:58:14.308664203+00:00 stdout F [INFO] 10.217.0.22:47665 - 48319 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000586835s 2025-12-08T17:58:14.308664203+00:00 stdout F [INFO] 10.217.0.22:39960 - 5518 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000874423s 2025-12-08T17:58:15.317928768+00:00 stdout F [INFO] 10.217.0.22:50681 - 16097 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000947445s 2025-12-08T17:58:15.317997820+00:00 stdout F [INFO] 10.217.0.22:55881 - 6132 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001056778s 2025-12-08T17:58:17.322000794+00:00 stdout F [INFO] 10.217.0.22:45301 - 52967 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000788031s 2025-12-08T17:58:17.322044235+00:00 stdout F [INFO] 10.217.0.22:38783 - 3059 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000815391s 2025-12-08T17:58:17.341892498+00:00 stdout F [INFO] 10.217.0.22:44996 - 63630 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000423031s 2025-12-08T17:58:17.341892498+00:00 stdout F [INFO] 10.217.0.22:35842 - 4704 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000417171s 2025-12-08T17:58:18.345639131+00:00 stdout F [INFO] 10.217.0.22:34043 - 51074 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000919785s 2025-12-08T17:58:18.345677702+00:00 stdout F [INFO] 10.217.0.22:57715 - 59382 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001084389s 2025-12-08T17:58:20.349933493+00:00 stdout F [INFO] 10.217.0.22:55617 - 63740 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000630286s 2025-12-08T17:58:20.349933493+00:00 stdout F [INFO] 10.217.0.22:52724 - 984 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00077005s 2025-12-08T17:58:25.081723429+00:00 stdout F [INFO] 10.217.0.38:34604 - 4719 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001179551s 2025-12-08T17:58:25.081723429+00:00 stdout F [INFO] 10.217.0.38:37344 - 43309 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001353565s 2025-12-08T17:59:05.615779265+00:00 stdout F [INFO] 10.217.0.22:57098 - 59814 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001219342s 2025-12-08T17:59:05.615779265+00:00 stdout F [INFO] 10.217.0.22:56680 - 50127 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.001328985s 2025-12-08T17:59:06.620056143+00:00 stdout F [INFO] 10.217.0.22:51537 - 52771 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000923654s 2025-12-08T17:59:06.620056143+00:00 stdout F [INFO] 10.217.0.22:45314 - 41469 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000921034s 2025-12-08T17:59:08.625622671+00:00 stdout F [INFO] 10.217.0.22:34134 - 13858 "A IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.00073772s 2025-12-08T17:59:08.625622671+00:00 stdout F [INFO] 10.217.0.22:50443 - 10842 "AAAA IN cluster-monitoring-operator.openshift-monitoring.svc. udp 81 false 1232" NXDOMAIN qr,rd,ra 70 0.000852493s 2025-12-08T17:59:25.081115895+00:00 stdout F [INFO] 10.217.0.38:50893 - 26729 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.000574435s 2025-12-08T17:59:25.081957876+00:00 stdout F [INFO] 10.217.0.38:34353 - 9005 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002098505s 2025-12-08T18:00:25.086684243+00:00 stdout F [INFO] 10.217.0.38:55449 - 17130 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002915967s 2025-12-08T18:00:25.086684243+00:00 stdout F [INFO] 10.217.0.38:53485 - 8371 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002703031s 2025-12-08T18:01:25.083352334+00:00 stdout F [INFO] 10.217.0.38:53427 - 55876 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001297245s 2025-12-08T18:01:25.083352334+00:00 stdout F [INFO] 10.217.0.38:47614 - 9869 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001237234s 2025-12-08T18:02:25.087914124+00:00 stdout F [INFO] 10.217.0.38:42751 - 11979 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.00300653s 2025-12-08T18:02:25.088066909+00:00 stdout F [INFO] 10.217.0.38:33335 - 49145 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.003512295s 2025-12-08T18:03:25.084864719+00:00 stdout F [INFO] 10.217.0.38:36479 - 52997 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002007264s 2025-12-08T18:03:25.084933060+00:00 stdout F [INFO] 10.217.0.38:55249 - 56891 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.002461086s 2025-12-08T18:04:25.088952384+00:00 stdout F [INFO] 10.217.0.38:39455 - 28551 "A IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001435829s 2025-12-08T18:04:25.089090987+00:00 stdout F [INFO] 10.217.0.38:40034 - 18351 "AAAA IN thanos-querier.openshift-monitoring.svc. udp 68 false 1232" NXDOMAIN qr,rd,ra 57 0.001545531s ././@LongLink0000644000000000000000000000024400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-defa0000755000175000017500000000000015115611521032773 5ustar zuulzuul././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-defa0000644000175000017500000000202015115611514032771 0ustar zuulzuul2025-12-08T17:44:24.516500151+00:00 stderr F W1208 17:44:24.516376 1 deprecated.go:66] 2025-12-08T17:44:24.516500151+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:24.516500151+00:00 stderr F 2025-12-08T17:44:24.516500151+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:24.516500151+00:00 stderr F 2025-12-08T17:44:24.516500151+00:00 stderr F =============================================== 2025-12-08T17:44:24.516500151+00:00 stderr F 2025-12-08T17:44:24.517275581+00:00 stderr F I1208 17:44:24.517079 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:24.518172957+00:00 stderr F I1208 17:44:24.518157 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:24.518620479+00:00 stderr F I1208 17:44:24.518606 1 kube-rbac-proxy.go:397] Starting TCP socket on :9154 2025-12-08T17:44:24.519096171+00:00 stderr F I1208 17:44:24.519082 1 kube-rbac-proxy.go:404] Listening securely on :9154 ././@LongLink0000644000000000000000000000024200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-cana0000755000175000017500000000000015115611514033022 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-cana0000755000175000017500000000000015115611521033020 5ustar zuulzuul././@LongLink0000644000000000000000000000030000000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-cana0000644000175000017500000000330215115611514033022 0ustar zuulzuul2025-12-08T17:44:22.959804169+00:00 stdout F serving TLS on 8888 2025-12-08T17:44:22.959804169+00:00 stdout F serving TLS on 8443 2025-12-08T17:45:35.105947916+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:46:35.108575543+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:47:35.104481173+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:48:35.118659964+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:49:35.116578315+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:50:35.112108819+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:51:35.105695483+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:52:35.113011231+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:53:35.128909054+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:54:35.115063059+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:55:35.118964291+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:56:35.108518973+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:57:35.111924328+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:58:35.118488166+00:00 stdout F Serving canary healthcheck request 2025-12-08T17:59:35.105485902+00:00 stdout F Serving canary healthcheck request 2025-12-08T18:00:35.118943228+00:00 stdout F Serving canary healthcheck request 2025-12-08T18:01:35.119057178+00:00 stdout F Serving canary healthcheck request 2025-12-08T18:02:35.114748449+00:00 stdout F Serving canary healthcheck request 2025-12-08T18:03:35.116614302+00:00 stdout F Serving canary healthcheck request 2025-12-08T18:04:35.121844228+00:00 stdout F Serving canary healthcheck request ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611513033052 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000755000175000017500000000000015115611520033050 5ustar zuulzuul././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lif0000644000175000017500000000134015115611513033052 0ustar zuulzuul2025-12-08T17:45:01.042181267+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="Successfully created configMap openshift-operator-lifecycle-manager/olm-operator-heap-qvfbx" 2025-12-08T17:45:01.079316361+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="Successfully created configMap openshift-operator-lifecycle-manager/catalog-operator-heap-jb2b2" 2025-12-08T17:45:01.084368471+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="Successfully deleted configMap openshift-operator-lifecycle-manager/catalog-operator-heap-9rg4c" 2025-12-08T17:45:01.087908610+00:00 stderr F time="2025-12-08T17:45:01Z" level=info msg="Successfully deleted configMap openshift-operator-lifecycle-manager/olm-operator-heap-mn8zt" ././@LongLink0000644000000000000000000000031100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611514033023 5ustar zuulzuul././@LongLink0000644000000000000000000000035300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611523033023 5ustar zuulzuul././@LongLink0000644000000000000000000000036000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000644000175000017500000043367615115611514033050 0ustar zuulzuul2025-12-08T17:44:21.706369879+00:00 stderr F I1208 17:44:21.697571 1 cmd.go:253] Using service-serving-cert provided certificates 2025-12-08T17:44:21.706369879+00:00 stderr F I1208 17:44:21.698233 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}. 2025-12-08T17:44:21.706369879+00:00 stderr F I1208 17:44:21.702710 1 observer_polling.go:159] Starting file observer 2025-12-08T17:44:21.725538562+00:00 stderr F I1208 17:44:21.724578 1 builder.go:304] openshift-cluster-kube-scheduler-operator version 4.20.0-202510211040.p2.g58cbd29.assembly.stream.el9-58cbd29-58cbd296eecc61c0871739588ae65af9c05e87a6 2025-12-08T17:44:22.290391949+00:00 stderr F I1208 17:44:22.288004 1 secure_serving.go:57] Forcing use of http/1.1 only 2025-12-08T17:44:22.290391949+00:00 stderr F W1208 17:44:22.288485 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:22.290391949+00:00 stderr F W1208 17:44:22.288489 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. 2025-12-08T17:44:22.290391949+00:00 stderr F W1208 17:44:22.288493 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. 2025-12-08T17:44:22.290391949+00:00 stderr F W1208 17:44:22.288495 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. 2025-12-08T17:44:22.290391949+00:00 stderr F W1208 17:44:22.288498 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. 2025-12-08T17:44:22.290391949+00:00 stderr F W1208 17:44:22.288500 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. 2025-12-08T17:44:22.290391949+00:00 stderr F I1208 17:44:22.290349 1 builder.go:446] detected SingleReplicaTopologyMode, the original leader election has been altered for the default SingleReplicaTopology 2025-12-08T17:44:22.319385791+00:00 stderr F I1208 17:44:22.319085 1 leaderelection.go:257] attempting to acquire leader lease openshift-kube-scheduler-operator/openshift-cluster-kube-scheduler-operator-lock... 2025-12-08T17:44:22.326330100+00:00 stderr F I1208 17:44:22.326296 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController 2025-12-08T17:44:22.332774245+00:00 stderr F I1208 17:44:22.332736 1 shared_informer.go:350] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:22.332833288+00:00 stderr F I1208 17:44:22.326323 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.332930650+00:00 stderr F I1208 17:44:22.332909 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.332962221+00:00 stderr F I1208 17:44:22.326352 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.332980892+00:00 stderr F I1208 17:44:22.332674 1 secure_serving.go:211] Serving securely on [::]:8443 2025-12-08T17:44:22.333050294+00:00 stderr F I1208 17:44:22.332693 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" 2025-12-08T17:44:22.333178907+00:00 stderr F I1208 17:44:22.332709 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" 2025-12-08T17:44:22.333255689+00:00 stderr F I1208 17:44:22.333035 1 shared_informer.go:350] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.355918437+00:00 stderr F I1208 17:44:22.353534 1 leaderelection.go:271] successfully acquired lease openshift-kube-scheduler-operator/openshift-cluster-kube-scheduler-operator-lock 2025-12-08T17:44:22.363021680+00:00 stderr F I1208 17:44:22.360921 1 event.go:377] Event(v1.ObjectReference{Kind:"Lease", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-cluster-kube-scheduler-operator-lock", UID:"0678f1a7-b3ec-4b60-852e-c0a882031145", APIVersion:"coordination.k8s.io/v1", ResourceVersion:"37238", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' openshift-kube-scheduler-operator-54f497555d-gvb6q_3e54bcea-a03a-4b91-995e-c7ba7ed922f1 became leader 2025-12-08T17:44:22.363021680+00:00 stderr F I1208 17:44:22.361549 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:22.377760563+00:00 stderr F I1208 17:44:22.373187 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:22.377760563+00:00 stderr F I1208 17:44:22.373240 1 starter.go:90] FeatureGates initialized: knownFeatureGates=[AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AzureWorkloadIdentity BuildCSIVolumes CPMSMachineNamePrefix ConsolePluginContentSecurityPolicy GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageVolume IngressControllerLBSubnetsAWS KMSv1 MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MetricsCollectionProfiles NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM PinnedImages ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SigstoreImageVerification StoragePerformantSecurityPolicy UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereMultiDisk VSphereMultiNetworks AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk BootImageSkewEnforcement BootcNodeManagement ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall ImageModeStatusReporting ImageStreamImportMode IngressControllerDynamicConfigurationManager InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PreconfiguredUDNAddresses SELinuxMount ShortCertRotation SignatureStores SigstoreImageVerificationPKI TranslateStreamCloseWebsocketRequests VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VolumeAttributesClass VolumeGroupSnapshot] 2025-12-08T17:44:22.435613911+00:00 stderr F I1208 17:44:22.435232 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" 2025-12-08T17:44:22.435613911+00:00 stderr F I1208 17:44:22.435286 1 shared_informer.go:357] "Caches are synced" controller="RequestHeaderAuthRequestController" 2025-12-08T17:44:22.435613911+00:00 stderr F I1208 17:44:22.435371 1 shared_informer.go:357] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" 2025-12-08T17:44:22.476912697+00:00 stderr F I1208 17:44:22.474610 1 base_controller.go:76] Waiting for caches to sync for kube-controller-manager-RemoveStaleConditions 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.530478 1 base_controller.go:76] Waiting for caches to sync for MissingStaticPodController 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.530811 1 base_controller.go:76] Waiting for caches to sync for ConfigObserver 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.531257 1 base_controller.go:76] Waiting for caches to sync for kube-scheduler 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.531322 1 base_controller.go:76] Waiting for caches to sync for TargetConfigController 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.531344 1 base_controller.go:76] Waiting for caches to sync for GuardController 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.531840 1 base_controller.go:76] Waiting for caches to sync for RevisionController 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532031 1 base_controller.go:76] Waiting for caches to sync for Installer 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532049 1 base_controller.go:76] Waiting for caches to sync for kube-scheduler-InstallerState 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532073 1 base_controller.go:76] Waiting for caches to sync for kube-scheduler-StaticPodState 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532087 1 base_controller.go:76] Waiting for caches to sync for PruneController 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532109 1 base_controller.go:76] Waiting for caches to sync for kube-scheduler-Node 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532196 1 base_controller.go:76] Waiting for caches to sync for BackingResourceController-StaticResources 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532217 1 base_controller.go:76] Waiting for caches to sync for cluster-kube-scheduler-operator-UnsupportedConfigOverrides 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.532230 1 base_controller.go:76] Waiting for caches to sync for LoggingSyncer 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.534941 1 base_controller.go:76] Waiting for caches to sync for KubeControllerManagerStaticResources-StaticResources 2025-12-08T17:44:22.538700033+00:00 stderr F I1208 17:44:22.538298 1 base_controller.go:76] Waiting for caches to sync for StatusSyncer_kube-scheduler 2025-12-08T17:44:22.633056787+00:00 stderr F I1208 17:44:22.633003 1 base_controller.go:82] Caches are synced for LoggingSyncer 2025-12-08T17:44:22.633205211+00:00 stderr F I1208 17:44:22.633177 1 base_controller.go:119] Starting #1 worker of LoggingSyncer controller ... 2025-12-08T17:44:22.634464105+00:00 stderr F I1208 17:44:22.633961 1 base_controller.go:82] Caches are synced for RevisionController 2025-12-08T17:44:22.634464105+00:00 stderr F I1208 17:44:22.633977 1 base_controller.go:119] Starting #1 worker of RevisionController controller ... 2025-12-08T17:44:22.634464105+00:00 stderr F I1208 17:44:22.634006 1 base_controller.go:82] Caches are synced for PruneController 2025-12-08T17:44:22.634464105+00:00 stderr F I1208 17:44:22.634012 1 base_controller.go:119] Starting #1 worker of PruneController controller ... 2025-12-08T17:44:22.638379302+00:00 stderr F I1208 17:44:22.636959 1 base_controller.go:82] Caches are synced for kube-scheduler-Node 2025-12-08T17:44:22.638379302+00:00 stderr F I1208 17:44:22.636994 1 base_controller.go:119] Starting #1 worker of kube-scheduler-Node controller ... 2025-12-08T17:44:22.638421183+00:00 stderr F I1208 17:44:22.638385 1 base_controller.go:82] Caches are synced for cluster-kube-scheduler-operator-UnsupportedConfigOverrides 2025-12-08T17:44:22.638430573+00:00 stderr F I1208 17:44:22.638423 1 base_controller.go:119] Starting #1 worker of cluster-kube-scheduler-operator-UnsupportedConfigOverrides controller ... 2025-12-08T17:44:22.640052907+00:00 stderr F I1208 17:44:22.638840 1 base_controller.go:82] Caches are synced for StatusSyncer_kube-scheduler 2025-12-08T17:44:22.640052907+00:00 stderr F I1208 17:44:22.638850 1 base_controller.go:119] Starting #1 worker of StatusSyncer_kube-scheduler controller ... 2025-12-08T17:44:22.640052907+00:00 stderr F I1208 17:44:22.639730 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)","reason":"NodeController_MasterNodesReady","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:22.653330539+00:00 stderr F I1208 17:44:22.653257 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.661218395+00:00 stderr F I1208 17:44:22.661095 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)","reason":"NodeController_MasterNodesReady","status":"True","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:22.661491242+00:00 stderr F I1208 17:44:22.661457 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded changed from False to True ("NodeControllerDegraded: The master nodes not ready: node \"crc\" not ready since 2025-11-03 09:40:44 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)") 2025-12-08T17:44:22.671617058+00:00 stderr F E1208 17:44:22.671262 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:44:22.675736521+00:00 stderr F I1208 17:44:22.675542 1 base_controller.go:82] Caches are synced for kube-controller-manager-RemoveStaleConditions 2025-12-08T17:44:22.675736521+00:00 stderr F I1208 17:44:22.675560 1 base_controller.go:119] Starting #1 worker of kube-controller-manager-RemoveStaleConditions controller ... 2025-12-08T17:44:22.677981122+00:00 stderr F I1208 17:44:22.677716 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.733782184+00:00 stderr F I1208 17:44:22.730943 1 base_controller.go:82] Caches are synced for ConfigObserver 2025-12-08T17:44:22.733782184+00:00 stderr F I1208 17:44:22.730986 1 base_controller.go:119] Starting #1 worker of ConfigObserver controller ... 2025-12-08T17:44:22.908514980+00:00 stderr F I1208 17:44:22.907858 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:22.934513469+00:00 stderr F I1208 17:44:22.933198 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'MasterNodesReadyChanged' All master nodes are ready 2025-12-08T17:44:22.950561127+00:00 stderr F I1208 17:44:22.949694 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:22.974977693+00:00 stderr F I1208 17:44:22.974695 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded changed from True to False ("NodeControllerDegraded: All master nodes are ready") 2025-12-08T17:44:23.080632064+00:00 stderr F I1208 17:44:23.080302 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.133573479+00:00 stderr F I1208 17:44:23.132961 1 base_controller.go:82] Caches are synced for kube-scheduler 2025-12-08T17:44:23.133573479+00:00 stderr F I1208 17:44:23.133556 1 base_controller.go:119] Starting #1 worker of kube-scheduler controller ... 2025-12-08T17:44:23.276936190+00:00 stderr F I1208 17:44:23.276835 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.333009599+00:00 stderr F I1208 17:44:23.331809 1 base_controller.go:82] Caches are synced for TargetConfigController 2025-12-08T17:44:23.333009599+00:00 stderr F I1208 17:44:23.332475 1 base_controller.go:119] Starting #1 worker of TargetConfigController controller ... 2025-12-08T17:44:23.333565804+00:00 stderr F I1208 17:44:23.333089 1 base_controller.go:82] Caches are synced for BackingResourceController-StaticResources 2025-12-08T17:44:23.333565804+00:00 stderr F I1208 17:44:23.333112 1 base_controller.go:119] Starting #1 worker of BackingResourceController-StaticResources controller ... 2025-12-08T17:44:23.337073460+00:00 stderr F I1208 17:44:23.337033 1 base_controller.go:82] Caches are synced for KubeControllerManagerStaticResources-StaticResources 2025-12-08T17:44:23.337073460+00:00 stderr F I1208 17:44:23.337057 1 base_controller.go:119] Starting #1 worker of KubeControllerManagerStaticResources-StaticResources controller ... 2025-12-08T17:44:23.484446210+00:00 stderr F I1208 17:44:23.483849 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:44:23.544276921+00:00 stderr F I1208 17:44:23.544221 1 base_controller.go:82] Caches are synced for GuardController 2025-12-08T17:44:23.544276921+00:00 stderr F I1208 17:44:23.544246 1 base_controller.go:119] Starting #1 worker of GuardController controller ... 2025-12-08T17:44:23.545769762+00:00 stderr F I1208 17:44:23.545403 1 base_controller.go:82] Caches are synced for Installer 2025-12-08T17:44:23.545769762+00:00 stderr F I1208 17:44:23.545417 1 base_controller.go:119] Starting #1 worker of Installer controller ... 2025-12-08T17:44:23.545769762+00:00 stderr F I1208 17:44:23.545597 1 base_controller.go:82] Caches are synced for kube-scheduler-InstallerState 2025-12-08T17:44:23.545769762+00:00 stderr F I1208 17:44:23.545603 1 base_controller.go:119] Starting #1 worker of kube-scheduler-InstallerState controller ... 2025-12-08T17:44:23.545769762+00:00 stderr F I1208 17:44:23.545614 1 base_controller.go:82] Caches are synced for kube-scheduler-StaticPodState 2025-12-08T17:44:23.545769762+00:00 stderr F I1208 17:44:23.545617 1 base_controller.go:119] Starting #1 worker of kube-scheduler-StaticPodState controller ... 2025-12-08T17:44:23.545799963+00:00 stderr F I1208 17:44:23.545786 1 base_controller.go:82] Caches are synced for MissingStaticPodController 2025-12-08T17:44:23.545799963+00:00 stderr F I1208 17:44:23.545793 1 base_controller.go:119] Starting #1 worker of MissingStaticPodController controller ... 2025-12-08T17:44:23.677574098+00:00 stderr F I1208 17:44:23.676065 1 request.go:752] "Waited before sending request" delay="1.024069053s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods/revision-pruner-6-crc" 2025-12-08T17:44:24.084348873+00:00 stderr F I1208 17:44:24.083431 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:44:24.084348873+00:00 stderr F -  "", 2025-12-08T17:44:24.084348873+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:44:24.084348873+00:00 stderr F   ) 2025-12-08T17:44:24.679936478+00:00 stderr F I1208 17:44:24.676766 1 request.go:752] "Waited before sending request" delay="1.131027881s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:44:25.088221825+00:00 stderr F I1208 17:44:25.087486 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'PodCreated' Created Pod/revision-pruner-6-crc -n openshift-kube-scheduler because it was missing 2025-12-08T17:44:26.280911438+00:00 stderr F I1208 17:44:26.276943 1 request.go:752] "Waited before sending request" delay="1.18933504s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-crc" 2025-12-08T17:44:27.875749800+00:00 stderr F I1208 17:44:27.874978 1 request.go:752] "Waited before sending request" delay="1.110305705s" reason="client-side throttling, not priority and fairness" verb="PUT" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/secrets/kube-scheduler-client-cert-key" 2025-12-08T17:44:27.890970266+00:00 stderr F I1208 17:44:27.889651 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'SecretUpdateFailed' Failed to update Secret/kube-scheduler-client-cert-key -n openshift-kube-scheduler: Operation cannot be fulfilled on secrets "kube-scheduler-client-cert-key": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:44:27.905850291+00:00 stderr F I1208 17:44:27.905780 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nResourceSyncControllerDegraded: Operation cannot be fulfilled on secrets \"kube-scheduler-client-cert-key\": the object has been modified; please apply your changes to the latest version and try again","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:27.918131476+00:00 stderr F I1208 17:44:27.915666 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready" to "NodeControllerDegraded: All master nodes are ready\nResourceSyncControllerDegraded: Operation cannot be fulfilled on secrets \"kube-scheduler-client-cert-key\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:44:27.923281407+00:00 stderr F I1208 17:44:27.921775 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:27.939012326+00:00 stderr F I1208 17:44:27.935351 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nResourceSyncControllerDegraded: Operation cannot be fulfilled on secrets \"kube-scheduler-client-cert-key\": the object has been modified; please apply your changes to the latest version and try again" to "NodeControllerDegraded: All master nodes are ready" 2025-12-08T17:44:28.875963173+00:00 stderr F I1208 17:44:28.875119 1 request.go:752] "Waited before sending request" delay="1.177786017s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler" 2025-12-08T17:44:29.875405925+00:00 stderr F I1208 17:44:29.875350 1 request.go:752] "Waited before sending request" delay="1.392112273s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/secrets/serving-cert" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605232 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:44:30.60517276 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605748 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:44:30.605730616 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605766 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:44:30.605754887 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605786 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:44:30.605773727 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605804 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:44:30.605791108 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605824 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:44:30.605811338 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605841 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:44:30.605829419 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605868 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.60585525 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605904 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:44:30.60589036 +0000 UTC))" 2025-12-08T17:44:30.608096321+00:00 stderr F I1208 17:44:30.605925 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:44:30.605913101 +0000 UTC))" 2025-12-08T17:44:30.608801070+00:00 stderr F I1208 17:44:30.608297 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-scheduler-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-scheduler-operator.svc,metrics.openshift-kube-scheduler-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:14 +0000 UTC to 2027-11-02 07:52:15 +0000 UTC (now=2025-12-08 17:44:30.608275466 +0000 UTC))" 2025-12-08T17:44:30.608801070+00:00 stderr F I1208 17:44:30.608497 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215862\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:21 +0000 UTC to 2028-12-08 16:44:21 +0000 UTC (now=2025-12-08 17:44:30.608476691 +0000 UTC))" 2025-12-08T17:44:31.075045567+00:00 stderr F I1208 17:44:31.074987 1 request.go:752] "Waited before sending request" delay="1.395632078s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller" 2025-12-08T17:44:32.278145475+00:00 stderr F I1208 17:44:32.278084 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:44:32.278145475+00:00 stderr F -  "", 2025-12-08T17:44:32.278145475+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:44:32.278145475+00:00 stderr F   ) 2025-12-08T17:44:32.480039332+00:00 stderr F I1208 17:44:32.479943 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Warning' reason: 'SecretUpdateFailed' Failed to update Secret/kube-scheduler-client-cert-key -n openshift-kube-scheduler: Operation cannot be fulfilled on secrets "kube-scheduler-client-cert-key": the object has been modified; please apply your changes to the latest version and try again 2025-12-08T17:44:32.495936755+00:00 stderr F I1208 17:44:32.493596 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nResourceSyncControllerDegraded: Operation cannot be fulfilled on secrets \"kube-scheduler-client-cert-key\": the object has been modified; please apply your changes to the latest version and try again","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:32.503984474+00:00 stderr F I1208 17:44:32.503802 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready" to "NodeControllerDegraded: All master nodes are ready\nResourceSyncControllerDegraded: Operation cannot be fulfilled on secrets \"kube-scheduler-client-cert-key\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:44:32.507100099+00:00 stderr F I1208 17:44:32.506839 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:44:32.516313301+00:00 stderr F I1208 17:44:32.515773 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nResourceSyncControllerDegraded: Operation cannot be fulfilled on secrets \"kube-scheduler-client-cert-key\": the object has been modified; please apply your changes to the latest version and try again" to "NodeControllerDegraded: All master nodes are ready" 2025-12-08T17:44:33.676230130+00:00 stderr F I1208 17:44:33.675935 1 request.go:752] "Waited before sending request" delay="1.182936918s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa" 2025-12-08T17:44:34.875533873+00:00 stderr F I1208 17:44:34.875482 1 request.go:752] "Waited before sending request" delay="1.192980581s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa" 2025-12-08T17:44:36.678051850+00:00 stderr F I1208 17:44:36.677772 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:44:36.678051850+00:00 stderr F -  "", 2025-12-08T17:44:36.678051850+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:44:36.678051850+00:00 stderr F   ) 2025-12-08T17:44:39.582720888+00:00 stderr F I1208 17:44:39.581160 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:44:39.582720888+00:00 stderr F -  "", 2025-12-08T17:44:39.582720888+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:44:39.582720888+00:00 stderr F   ) 2025-12-08T17:45:06.969542055+00:00 stderr F I1208 17:45:06.965070 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:45:06.969542055+00:00 stderr F -  "", 2025-12-08T17:45:06.969542055+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:45:06.969542055+00:00 stderr F   ) 2025-12-08T17:45:11.938346302+00:00 stderr F I1208 17:45:11.936705 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:45:11.938346302+00:00 stderr F -  "", 2025-12-08T17:45:11.938346302+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:45:11.938346302+00:00 stderr F   ) 2025-12-08T17:45:14.222226291+00:00 stderr F I1208 17:45:14.221583 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:45:14.222226291+00:00 stderr F -  "", 2025-12-08T17:45:14.222226291+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:45:14.222226291+00:00 stderr F   ) 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039247 1 tlsconfig.go:181] "Loaded client CA" index=0 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:07 +0000 UTC to 2035-10-31 07:34:07 +0000 UTC (now=2025-12-08 17:45:16.039202169 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039792 1 tlsconfig.go:181] "Loaded client CA" index=1 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-control-plane-signer\" [] issuer=\"\" (2025-11-02 07:34:10 +0000 UTC to 2026-11-02 07:34:10 +0000 UTC (now=2025-12-08 17:45:16.039777185 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039808 1 tlsconfig.go:181] "Loaded client CA" index=2 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-apiserver-to-kubelet-signer\" [] issuer=\"\" (2025-11-02 07:34:11 +0000 UTC to 2026-11-02 07:34:11 +0000 UTC (now=2025-12-08 17:45:16.039797665 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039822 1 tlsconfig.go:181] "Loaded client CA" index=3 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kubelet-bootstrap-kubeconfig-signer\" [] issuer=\"\" (2025-11-02 07:34:08 +0000 UTC to 2035-10-31 07:34:08 +0000 UTC (now=2025-12-08 17:45:16.039813316 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039837 1 tlsconfig.go:181] "Loaded client CA" index=4 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_node-system-admin-signer@1762069887\" [] issuer=\"\" (2025-11-02 07:51:27 +0000 UTC to 2028-11-01 07:51:28 +0000 UTC (now=2025-12-08 17:45:16.039826836 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039856 1 tlsconfig.go:181] "Loaded client CA" index=5 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" [] issuer=\"\" (2025-11-02 08:17:34 +0000 UTC to 2027-10-23 08:17:35 +0000 UTC (now=2025-12-08 17:45:16.039842776 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039899 1 tlsconfig.go:181] "Loaded client CA" index=6 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"kube-csr-signer_@1762071455\" [] issuer=\"openshift-kube-controller-manager-operator_csr-signer-signer@1762071455\" (2025-11-02 08:17:35 +0000 UTC to 2026-10-28 08:17:36 +0000 UTC (now=2025-12-08 17:45:16.039860657 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039915 1 tlsconfig.go:181] "Loaded client CA" index=7 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-apiserver-to-kubelet-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-12-08 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.039904828 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039934 1 tlsconfig.go:181] "Loaded client CA" index=8 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_kube-control-plane-signer@1765215863\" [] issuer=\"\" (2025-12-08 17:44:23 +0000 UTC to 2026-02-06 17:44:24 +0000 UTC (now=2025-12-08 17:45:16.039921839 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039952 1 tlsconfig.go:181] "Loaded client CA" index=9 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"admin-kubeconfig-signer-custom\" [] issuer=\"\" (2025-12-08 17:45:09 +0000 UTC to 2035-12-06 17:45:09 +0000 UTC (now=2025-12-08 17:45:16.039940599 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.039972 1 tlsconfig.go:181] "Loaded client CA" index=10 certName="client-ca::kube-system::extension-apiserver-authentication::client-ca-file,client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" certDetail="\"openshift-kube-apiserver-operator_aggregator-client-signer@1762159055\" [] issuer=\"\" (2025-11-03 08:37:34 +0000 UTC to 2026-11-03 08:37:35 +0000 UTC (now=2025-12-08 17:45:16.03996041 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.040221 1 tlsconfig.go:203] "Loaded serving cert" certName="serving-cert::/var/run/secrets/serving-cert/tls.crt::/var/run/secrets/serving-cert/tls.key" certDetail="\"metrics.openshift-kube-scheduler-operator.svc\" [serving] validServingFor=[metrics.openshift-kube-scheduler-operator.svc,metrics.openshift-kube-scheduler-operator.svc.cluster.local] issuer=\"openshift-service-serving-signer@1762069924\" (2025-11-02 07:52:14 +0000 UTC to 2027-11-02 07:52:15 +0000 UTC (now=2025-12-08 17:45:16.040206126 +0000 UTC))" 2025-12-08T17:45:16.040844364+00:00 stderr F I1208 17:45:16.040454 1 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1765215862\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1765215862\" (2025-12-08 16:44:21 +0000 UTC to 2028-12-08 16:44:21 +0000 UTC (now=2025-12-08 17:45:16.040440313 +0000 UTC))" 2025-12-08T17:45:17.006000290+00:00 stderr F I1208 17:45:17.005904 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:45:17.006000290+00:00 stderr F -  "", 2025-12-08T17:45:17.006000290+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:45:17.006000290+00:00 stderr F   ) 2025-12-08T17:45:23.338534502+00:00 stderr F I1208 17:45:23.337812 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:45:23.338534502+00:00 stderr F -  "", 2025-12-08T17:45:23.338534502+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:45:23.338534502+00:00 stderr F   ) 2025-12-08T17:46:22.379309215+00:00 stderr F E1208 17:46:22.378678 1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-scheduler-operator/leases/openshift-cluster-kube-scheduler-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused, falling back to slow path 2025-12-08T17:46:22.380642944+00:00 stderr F E1208 17:46:22.380582 1 leaderelection.go:436] error retrieving resource lock openshift-kube-scheduler-operator/openshift-cluster-kube-scheduler-operator-lock: Get "https://10.217.4.1:443/apis/coordination.k8s.io/v1/namespaces/openshift-kube-scheduler-operator/leases/openshift-cluster-kube-scheduler-operator-lock?timeout=4m0s": dial tcp 10.217.4.1:443: connect: connection refused 2025-12-08T17:46:23.340639380+00:00 stderr F I1208 17:46:23.340561 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:46:23.340639380+00:00 stderr F -  "", 2025-12-08T17:46:23.340639380+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:46:23.340639380+00:00 stderr F   ) 2025-12-08T17:46:23.345471765+00:00 stderr F E1208 17:46:23.345419 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:23.350424504+00:00 stderr F E1208 17:46:23.350328 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:23.353766644+00:00 stderr F E1208 17:46:23.353720 1 base_controller.go:279] "Unhandled Error" err="KubeControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/openshift-kube-scheduler-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/localhost-recovery-client-crb.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler-recovery\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/localhost-recovery-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/localhost-recovery-client\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeControllerManagerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=KubeControllerManagerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:23.543366255+00:00 stderr F E1208 17:46:23.543001 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:23.741521512+00:00 stderr F I1208 17:46:23.741056 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:46:23.741521512+00:00 stderr F -  "", 2025-12-08T17:46:23.741521512+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:46:23.741521512+00:00 stderr F   ) 2025-12-08T17:46:24.144447806+00:00 stderr F E1208 17:46:24.144350 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-scheduler-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=kube-scheduler-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.342398978+00:00 stderr F E1208 17:46:24.342333 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:24.544683440+00:00 stderr F E1208 17:46:24.544575 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:25.142206465+00:00 stderr F E1208 17:46:25.142139 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-scheduler-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=kube-scheduler-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.340004282+00:00 stderr F E1208 17:46:25.339937 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:25.542600943+00:00 stderr F E1208 17:46:25.542563 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.142593322+00:00 stderr F E1208 17:46:26.142491 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-scheduler-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=kube-scheduler-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.339557655+00:00 stderr F E1208 17:46:26.339467 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:26.543440534+00:00 stderr F E1208 17:46:26.543347 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:26.943837072+00:00 stderr F E1208 17:46:26.943328 1 base_controller.go:279] "Unhandled Error" err="KubeControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/openshift-kube-scheduler-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/localhost-recovery-client-crb.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler-recovery\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/localhost-recovery-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/localhost-recovery-client\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"KubeControllerManagerStaticResources-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=KubeControllerManagerStaticResources-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.141090203+00:00 stderr F E1208 17:46:27.141000 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-scheduler-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=kube-scheduler-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.340904450+00:00 stderr F E1208 17:46:27.340065 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:27.543689838+00:00 stderr F E1208 17:46:27.543594 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:27.742977619+00:00 stderr F E1208 17:46:27.742897 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: Put \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.141749049+00:00 stderr F E1208 17:46:28.141674 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-scheduler-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=kube-scheduler-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.340424872+00:00 stderr F E1208 17:46:28.340354 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:28.542980382+00:00 stderr F E1208 17:46:28.542903 1 base_controller.go:279] "Unhandled Error" err="BackingResourceController-StaticResources reconciliation failed: [\"manifests/installer-sa.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/installer-sa\": dial tcp 10.217.4.1:443: connect: connection refused, \"manifests/installer-cluster-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:openshift-kube-scheduler-installer\": dial tcp 10.217.4.1:443: connect: connection refused, unable to ApplyStatus for operator using fieldManager \"BackingResourceController-StaticResources\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=BackingResourceController-StaticResources&force=true\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:28.741030796+00:00 stderr F I1208 17:46:28.740278 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:46:28.741030796+00:00 stderr F -  "", 2025-12-08T17:46:28.741030796+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:46:28.741030796+00:00 stderr F   ) 2025-12-08T17:46:29.142190147+00:00 stderr F E1208 17:46:29.142089 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-StaticPodState reconciliation failed: unable to ApplyStatus for operator using fieldManager \"kube-scheduler-StaticPodState\": Patch \"https://10.217.4.1:443/apis/operator.openshift.io/v1/kubeschedulers/cluster/status?fieldManager=kube-scheduler-StaticPodState&force=true\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:29.340069357+00:00 stderr F E1208 17:46:29.339954 1 base_controller.go:279] "Unhandled Error" err="kube-scheduler-InstallerState reconciliation failed: Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods?labelSelector=app%3Dinstaller\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:46:30.753894043+00:00 stderr F E1208 17:46:30.753790 1 base_controller.go:279] "Unhandled Error" err="KubeControllerManagerStaticResources-StaticResources reconciliation failed: [\"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused, \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused]" 2025-12-08T17:46:32.165212805+00:00 stderr F I1208 17:46:32.165134 1 helpers.go:264] lister was stale at resourceVersion=38172, live get showed resourceVersion=38926 2025-12-08T17:46:32.177910207+00:00 stderr F E1208 17:46:32.176742 1 base_controller.go:279] "Unhandled Error" err="TargetConfigController reconciliation failed: synthetic requeue request" 2025-12-08T17:46:32.542262443+00:00 stderr F I1208 17:46:32.542172 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:46:32.542262443+00:00 stderr F -  "", 2025-12-08T17:46:32.542262443+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:46:32.542262443+00:00 stderr F   ) 2025-12-08T17:46:58.065925583+00:00 stderr F I1208 17:46:58.065087 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:46:58.071021343+00:00 stderr F I1208 17:46:58.070821 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:46:58.071021343+00:00 stderr F -  "", 2025-12-08T17:46:58.071021343+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:46:58.071021343+00:00 stderr F   ) 2025-12-08T17:46:58.084070944+00:00 stderr F I1208 17:46:58.084039 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:46:58.084070944+00:00 stderr F -  "", 2025-12-08T17:46:58.084070944+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:46:58.084070944+00:00 stderr F   ) 2025-12-08T17:47:00.955719780+00:00 stderr F I1208 17:47:00.955225 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:07.684179366+00:00 stderr F I1208 17:47:07.683400 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:07.689236826+00:00 stderr F I1208 17:47:07.689171 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:07.689236826+00:00 stderr F -  "", 2025-12-08T17:47:07.689236826+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:07.689236826+00:00 stderr F   ) 2025-12-08T17:47:08.086052317+00:00 stderr F I1208 17:47:08.084736 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:08.086052317+00:00 stderr F -  "", 2025-12-08T17:47:08.086052317+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:08.086052317+00:00 stderr F   ) 2025-12-08T17:47:09.441385853+00:00 stderr F I1208 17:47:09.441029 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:12.414229355+00:00 stderr F I1208 17:47:12.413863 1 reflector.go:430] "Caches populated" type="operator.openshift.io/v1, Resource=kubeschedulers" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:12.415644949+00:00 stderr F I1208 17:47:12.415614 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.427699110+00:00 stderr F I1208 17:47:12.427618 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready" to "NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:12.429160825+00:00 stderr F I1208 17:47:12.429125 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:12.429160825+00:00 stderr F -  "", 2025-12-08T17:47:12.429160825+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:12.429160825+00:00 stderr F   ) 2025-12-08T17:47:12.431530710+00:00 stderr F I1208 17:47:12.431292 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.435564036+00:00 stderr F E1208 17:47:12.435523 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.441612957+00:00 stderr F I1208 17:47:12.441575 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.446081648+00:00 stderr F E1208 17:47:12.446034 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.449159354+00:00 stderr F I1208 17:47:12.449093 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.457137366+00:00 stderr F E1208 17:47:12.456753 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.459642495+00:00 stderr F I1208 17:47:12.459602 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.463605319+00:00 stderr F E1208 17:47:12.463576 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.506338355+00:00 stderr F I1208 17:47:12.506290 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.511330231+00:00 stderr F E1208 17:47:12.511283 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.594273353+00:00 stderr F I1208 17:47:12.594196 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.599872779+00:00 stderr F E1208 17:47:12.599781 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:12.763540481+00:00 stderr F I1208 17:47:12.763436 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:12.772096290+00:00 stderr F E1208 17:47:12.771954 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:13.016897396+00:00 stderr F I1208 17:47:13.016815 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.094721647+00:00 stderr F I1208 17:47:13.094677 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:13.101688255+00:00 stderr F E1208 17:47:13.101657 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:13.334216265+00:00 stderr F I1208 17:47:13.334128 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:13.631158683+00:00 stderr F I1208 17:47:13.631083 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:13.636710497+00:00 stderr F E1208 17:47:13.636648 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:13.744696997+00:00 stderr F I1208 17:47:13.744582 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: ","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:13.750636784+00:00 stderr F E1208 17:47:13.749750 1 base_controller.go:279] "Unhandled Error" err="StatusSyncer_kube-scheduler reconciliation failed: Operation cannot be fulfilled on clusteroperators.config.openshift.io \"kube-scheduler\": the object has been modified; please apply your changes to the latest version and try again" 2025-12-08T17:47:13.943452434+00:00 stderr F I1208 17:47:13.943382 1 reflector.go:430] "Caches populated" type="*v1.PodDisruptionBudget" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:14.618815654+00:00 stderr F I1208 17:47:14.618251 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:14.618815654+00:00 stderr F -  "", 2025-12-08T17:47:14.618815654+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:14.618815654+00:00 stderr F   ) 2025-12-08T17:47:14.815300989+00:00 stderr F I1208 17:47:14.815221 1 request.go:752] "Waited before sending request" delay="1.178402026s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-crc" 2025-12-08T17:47:15.815980539+00:00 stderr F I1208 17:47:15.815288 1 request.go:752] "Waited before sending request" delay="1.196930379s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca" 2025-12-08T17:47:16.422018776+00:00 stderr F I1208 17:47:16.421941 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:17.022390126+00:00 stderr F I1208 17:47:17.022305 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:19.017156430+00:00 stderr F I1208 17:47:19.017090 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:19.616265859+00:00 stderr F I1208 17:47:19.615840 1 request.go:752] "Waited before sending request" delay="1.023664624s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler-operator/secrets?resourceVersion=38696" 2025-12-08T17:47:19.618916713+00:00 stderr F I1208 17:47:19.618766 1 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:20.419228075+00:00 stderr F I1208 17:47:20.418312 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:20.419228075+00:00 stderr F -  "", 2025-12-08T17:47:20.419228075+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:20.419228075+00:00 stderr F   ) 2025-12-08T17:47:23.191645609+00:00 stderr F I1208 17:47:23.190871 1 reflector.go:430] "Caches populated" type="*v1.APIServer" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:23.346751431+00:00 stderr F I1208 17:47:23.346705 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:23.346751431+00:00 stderr F -  "", 2025-12-08T17:47:23.346751431+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:23.346751431+00:00 stderr F   ) 2025-12-08T17:47:25.503993929+00:00 stderr F I1208 17:47:25.503580 1 reflector.go:430] "Caches populated" type="*v1.ClusterVersion" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:29.550112958+00:00 stderr F I1208 17:47:29.549333 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:31.847227979+00:00 stderr F I1208 17:47:31.847143 1 reflector.go:430] "Caches populated" type="*v1.RoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:32.876057045+00:00 stderr F I1208 17:47:32.875162 1 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:34.342829547+00:00 stderr F I1208 17:47:34.342609 1 reflector.go:430] "Caches populated" type="*v1.FeatureGate" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:34.347192165+00:00 stderr F I1208 17:47:34.347115 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:34.347192165+00:00 stderr F -  "", 2025-12-08T17:47:34.347192165+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:34.347192165+00:00 stderr F   ) 2025-12-08T17:47:34.842499917+00:00 stderr F I1208 17:47:34.842391 1 reflector.go:430] "Caches populated" type="*v1.Role" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:36.735404224+00:00 stderr F I1208 17:47:36.734826 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:36.735799626+00:00 stderr F I1208 17:47:36.735728 1 status_controller.go:229] clusteroperator/kube-scheduler diff {"status":{"conditions":[{"lastTransitionTime":"2025-12-08T17:44:22Z","message":"NodeControllerDegraded: All master nodes are ready","reason":"AsExpected","status":"False","type":"Degraded"},{"lastTransitionTime":"2025-11-02T08:13:36Z","message":"NodeInstallerProgressing: 1 node is at revision 6","reason":"AsExpected","status":"False","type":"Progressing"},{"lastTransitionTime":"2025-11-02T07:56:14Z","message":"StaticPodsAvailable: 1 nodes are active; 1 node is at revision 6","reason":"AsExpected","status":"True","type":"Available"},{"lastTransitionTime":"2025-11-02T07:52:02Z","message":"All is well","reason":"AsExpected","status":"True","type":"Upgradeable"},{"lastTransitionTime":"2025-11-02T07:52:02Z","reason":"NoData","status":"Unknown","type":"EvaluationConditionsDetected"}]}} 2025-12-08T17:47:36.747522105+00:00 stderr F I1208 17:47:36.744570 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-kube-scheduler-operator", Name:"openshift-kube-scheduler-operator", UID:"c3ff943a-b570-4a98-8388-1f8a3280a85a", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/kube-scheduler changed: Degraded message changed from "NodeControllerDegraded: All master nodes are ready\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/ns.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/leader-election-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/system:openshift:leader-locking-kube-scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/scheduler-clusterrolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:operator:kube-scheduler:public-2\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-role.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/roles/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/policyconfigmap-rolebinding.yaml\" (string): Get \"https://10.217.4.1:443/apis/rbac.authorization.k8s.io/v1/namespaces/openshift-kube-scheduler/rolebindings/system:openshift:sa-listing-configmaps\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \"assets/kube-scheduler/svc.yaml\" (string): Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/services/scheduler\": dial tcp 10.217.4.1:443: connect: connection refused\nKubeControllerManagerStaticResourcesDegraded: \nTargetConfigControllerDegraded: \"configmap\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/config\": dial tcp 10.217.4.1:443: connect: connection refused\nTargetConfigControllerDegraded: \"configmap/serviceaccount-ca\": Get \"https://10.217.4.1:443/api/v1/namespaces/openshift-kube-scheduler/configmaps/serviceaccount-ca\": dial tcp 10.217.4.1:443: connect: connection refused" to "NodeControllerDegraded: All master nodes are ready" 2025-12-08T17:47:37.795921387+00:00 stderr F I1208 17:47:37.795800 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:37.799750057+00:00 stderr F I1208 17:47:37.799701 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:37.799750057+00:00 stderr F -  "", 2025-12-08T17:47:37.799750057+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:37.799750057+00:00 stderr F   ) 2025-12-08T17:47:39.373711274+00:00 stderr F I1208 17:47:39.373092 1 reflector.go:430] "Caches populated" type="*v1.Scheduler" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:39.376692858+00:00 stderr F I1208 17:47:39.376622 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:39.376692858+00:00 stderr F -  "", 2025-12-08T17:47:39.376692858+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:39.376692858+00:00 stderr F   ) 2025-12-08T17:47:42.292485244+00:00 stderr F I1208 17:47:42.292400 1 reflector.go:430] "Caches populated" type="*v1.Namespace" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:42.297097439+00:00 stderr F I1208 17:47:42.296813 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:42.297097439+00:00 stderr F -  "", 2025-12-08T17:47:42.297097439+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:42.297097439+00:00 stderr F   ) 2025-12-08T17:47:49.444673128+00:00 stderr F I1208 17:47:49.444168 1 reflector.go:430] "Caches populated" type="*v1.Infrastructure" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:49.447742464+00:00 stderr F I1208 17:47:49.447668 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:49.447742464+00:00 stderr F -  "", 2025-12-08T17:47:49.447742464+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:49.447742464+00:00 stderr F   ) 2025-12-08T17:47:50.872748472+00:00 stderr F I1208 17:47:50.871266 1 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:52.170755612+00:00 stderr F I1208 17:47:52.170695 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:52.175474131+00:00 stderr F I1208 17:47:52.175406 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:52.175474131+00:00 stderr F -  "", 2025-12-08T17:47:52.175474131+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:52.175474131+00:00 stderr F   ) 2025-12-08T17:47:52.195521352+00:00 stderr F I1208 17:47:52.195432 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:47:52.195521352+00:00 stderr F -  "", 2025-12-08T17:47:52.195521352+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:47:52.195521352+00:00 stderr F   ) 2025-12-08T17:47:54.960990741+00:00 stderr F I1208 17:47:54.960871 1 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:56.644149966+00:00 stderr F I1208 17:47:56.644095 1 reflector.go:430] "Caches populated" type="*v1.ClusterRoleBinding" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:47:56.887847129+00:00 stderr F I1208 17:47:56.887742 1 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:06.257574365+00:00 stderr F I1208 17:48:06.257076 1 reflector.go:430] "Caches populated" type="*v1.ServiceAccount" reflector="k8s.io/client-go@v0.33.2/tools/cache/reflector.go:285" 2025-12-08T17:48:06.262761343+00:00 stderr F I1208 17:48:06.262709 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:48:06.262761343+00:00 stderr F -  "", 2025-12-08T17:48:06.262761343+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:48:06.262761343+00:00 stderr F   ) 2025-12-08T17:48:06.858077414+00:00 stderr F I1208 17:48:06.858004 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:48:06.858077414+00:00 stderr F -  "", 2025-12-08T17:48:06.858077414+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:48:06.858077414+00:00 stderr F   ) 2025-12-08T17:48:23.346526671+00:00 stderr F I1208 17:48:23.346012 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:48:23.346526671+00:00 stderr F -  "", 2025-12-08T17:48:23.346526671+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:48:23.346526671+00:00 stderr F   ) 2025-12-08T17:49:23.350057771+00:00 stderr F I1208 17:49:23.349411 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:49:23.350057771+00:00 stderr F -  "", 2025-12-08T17:49:23.350057771+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:49:23.350057771+00:00 stderr F   ) 2025-12-08T17:50:23.346555855+00:00 stderr F I1208 17:50:23.345912 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:50:23.346555855+00:00 stderr F -  "", 2025-12-08T17:50:23.346555855+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:50:23.346555855+00:00 stderr F   ) 2025-12-08T17:51:23.347747751+00:00 stderr F I1208 17:51:23.347142 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:51:23.347747751+00:00 stderr F -  "", 2025-12-08T17:51:23.347747751+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:51:23.347747751+00:00 stderr F   ) 2025-12-08T17:52:23.350922861+00:00 stderr F I1208 17:52:23.350286 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:52:23.350922861+00:00 stderr F -  "", 2025-12-08T17:52:23.350922861+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:52:23.350922861+00:00 stderr F   ) 2025-12-08T17:53:23.349357445+00:00 stderr F I1208 17:53:23.348551 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:53:23.349357445+00:00 stderr F -  "", 2025-12-08T17:53:23.349357445+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:53:23.349357445+00:00 stderr F   ) 2025-12-08T17:54:23.349107524+00:00 stderr F I1208 17:54:23.348554 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:54:23.349107524+00:00 stderr F -  "", 2025-12-08T17:54:23.349107524+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:54:23.349107524+00:00 stderr F   ) 2025-12-08T17:55:23.356780054+00:00 stderr F I1208 17:55:23.356008 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:55:23.356780054+00:00 stderr F -  "", 2025-12-08T17:55:23.356780054+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:55:23.356780054+00:00 stderr F   ) 2025-12-08T17:56:23.362090121+00:00 stderr F I1208 17:56:23.361586 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:56:23.362090121+00:00 stderr F -  "", 2025-12-08T17:56:23.362090121+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:56:23.362090121+00:00 stderr F   ) 2025-12-08T17:56:58.076083631+00:00 stderr F I1208 17:56:58.075446 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:56:58.076083631+00:00 stderr F -  "", 2025-12-08T17:56:58.076083631+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:56:58.076083631+00:00 stderr F   ) 2025-12-08T17:56:58.287092946+00:00 stderr F I1208 17:56:58.287040 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:56:58.287092946+00:00 stderr F -  "", 2025-12-08T17:56:58.287092946+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:56:58.287092946+00:00 stderr F   ) 2025-12-08T17:57:07.694286844+00:00 stderr F I1208 17:57:07.693577 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:07.694286844+00:00 stderr F -  "", 2025-12-08T17:57:07.694286844+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:07.694286844+00:00 stderr F   ) 2025-12-08T17:57:09.295040034+00:00 stderr F I1208 17:57:09.293965 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:09.295040034+00:00 stderr F -  "", 2025-12-08T17:57:09.295040034+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:09.295040034+00:00 stderr F   ) 2025-12-08T17:57:13.027838723+00:00 stderr F I1208 17:57:13.026833 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:13.027838723+00:00 stderr F -  "", 2025-12-08T17:57:13.027838723+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:13.027838723+00:00 stderr F   ) 2025-12-08T17:57:13.047963908+00:00 stderr F I1208 17:57:13.047879 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:13.047963908+00:00 stderr F -  "", 2025-12-08T17:57:13.047963908+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:13.047963908+00:00 stderr F   ) 2025-12-08T17:57:17.034848334+00:00 stderr F I1208 17:57:17.033984 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:17.034848334+00:00 stderr F -  "", 2025-12-08T17:57:17.034848334+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:17.034848334+00:00 stderr F   ) 2025-12-08T17:57:18.628857609+00:00 stderr F I1208 17:57:18.628301 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:18.628857609+00:00 stderr F -  "", 2025-12-08T17:57:18.628857609+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:18.628857609+00:00 stderr F   ) 2025-12-08T17:57:20.827764371+00:00 stderr F I1208 17:57:20.827695 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:20.827764371+00:00 stderr F -  "", 2025-12-08T17:57:20.827764371+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:20.827764371+00:00 stderr F   ) 2025-12-08T17:57:23.359178985+00:00 stderr F I1208 17:57:23.358707 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:23.359178985+00:00 stderr F -  "", 2025-12-08T17:57:23.359178985+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:23.359178985+00:00 stderr F   ) 2025-12-08T17:57:34.348665360+00:00 stderr F I1208 17:57:34.348055 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:34.348665360+00:00 stderr F -  "", 2025-12-08T17:57:34.348665360+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:34.348665360+00:00 stderr F   ) 2025-12-08T17:57:37.800374202+00:00 stderr F I1208 17:57:37.799813 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:37.800374202+00:00 stderr F -  "", 2025-12-08T17:57:37.800374202+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:37.800374202+00:00 stderr F   ) 2025-12-08T17:57:37.826072207+00:00 stderr F I1208 17:57:37.825994 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:37.826072207+00:00 stderr F -  "", 2025-12-08T17:57:37.826072207+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:37.826072207+00:00 stderr F   ) 2025-12-08T17:57:39.380281006+00:00 stderr F I1208 17:57:39.379836 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:39.380281006+00:00 stderr F -  "", 2025-12-08T17:57:39.380281006+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:39.380281006+00:00 stderr F   ) 2025-12-08T17:57:42.299799623+00:00 stderr F I1208 17:57:42.299195 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:42.299799623+00:00 stderr F -  "", 2025-12-08T17:57:42.299799623+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:42.299799623+00:00 stderr F   ) 2025-12-08T17:57:49.449974454+00:00 stderr F I1208 17:57:49.449379 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:49.449974454+00:00 stderr F -  "", 2025-12-08T17:57:49.449974454+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:49.449974454+00:00 stderr F   ) 2025-12-08T17:57:52.180721982+00:00 stderr F I1208 17:57:52.180375 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:52.180721982+00:00 stderr F -  "", 2025-12-08T17:57:52.180721982+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:52.180721982+00:00 stderr F   ) 2025-12-08T17:57:52.762965821+00:00 stderr F I1208 17:57:52.762360 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:57:52.762965821+00:00 stderr F -  "", 2025-12-08T17:57:52.762965821+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:57:52.762965821+00:00 stderr F   ) 2025-12-08T17:58:06.263923511+00:00 stderr F I1208 17:58:06.262498 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:58:06.263923511+00:00 stderr F -  "", 2025-12-08T17:58:06.263923511+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:58:06.263923511+00:00 stderr F   ) 2025-12-08T17:58:07.264779549+00:00 stderr F I1208 17:58:07.264307 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:58:07.264779549+00:00 stderr F -  "", 2025-12-08T17:58:07.264779549+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:58:07.264779549+00:00 stderr F   ) 2025-12-08T17:58:23.356181931+00:00 stderr F I1208 17:58:23.355446 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:58:23.356181931+00:00 stderr F -  "", 2025-12-08T17:58:23.356181931+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:58:23.356181931+00:00 stderr F   ) 2025-12-08T17:59:23.356623855+00:00 stderr F I1208 17:59:23.355817 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T17:59:23.356623855+00:00 stderr F -  "", 2025-12-08T17:59:23.356623855+00:00 stderr F +  "kube-scheduler", 2025-12-08T17:59:23.356623855+00:00 stderr F   ) 2025-12-08T18:00:23.357232494+00:00 stderr F I1208 18:00:23.356656 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T18:00:23.357232494+00:00 stderr F -  "", 2025-12-08T18:00:23.357232494+00:00 stderr F +  "kube-scheduler", 2025-12-08T18:00:23.357232494+00:00 stderr F   ) 2025-12-08T18:01:23.357459524+00:00 stderr F I1208 18:01:23.356818 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T18:01:23.357459524+00:00 stderr F -  "", 2025-12-08T18:01:23.357459524+00:00 stderr F +  "kube-scheduler", 2025-12-08T18:01:23.357459524+00:00 stderr F   ) 2025-12-08T18:02:23.358673702+00:00 stderr F I1208 18:02:23.358063 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T18:02:23.358673702+00:00 stderr F -  "", 2025-12-08T18:02:23.358673702+00:00 stderr F +  "kube-scheduler", 2025-12-08T18:02:23.358673702+00:00 stderr F   ) 2025-12-08T18:03:23.357051625+00:00 stderr F I1208 18:03:23.356446 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T18:03:23.357051625+00:00 stderr F -  "", 2025-12-08T18:03:23.357051625+00:00 stderr F +  "kube-scheduler", 2025-12-08T18:03:23.357051625+00:00 stderr F   ) 2025-12-08T18:04:23.358841340+00:00 stderr F I1208 18:04:23.358209 1 annotations.go:45] Updating "openshift.io/owning-component" annotation for serviceaccount-ca/openshift-kube-scheduler, diff:   string( 2025-12-08T18:04:23.358841340+00:00 stderr F -  "", 2025-12-08T18:04:23.358841340+00:00 stderr F +  "kube-scheduler", 2025-12-08T18:04:23.358841340+00:00 stderr F   ) ././@LongLink0000644000000000000000000000025400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611513033101 5ustar zuulzuul././@LongLink0000644000000000000000000000030400000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000755000175000017500000000000015115611520033077 5ustar zuulzuul././@LongLink0000644000000000000000000000031100000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_defa0000644000175000017500000000747215115611513033115 0ustar zuulzuul2025-12-08T17:57:58.437690007+00:00 stderr F DEBUG:prometheus_webhook_snmp.utils:Configuration settings: {"debug": true, "snmp_host": "192.168.24.254", "snmp_port": 162, "snmp_community": "public", "snmp_retries": 5, "snmp_timeout": 1, "alert_oid_label": "oid", "trap_oid_prefix": "1.3.6.1.4.1.50495.15", "trap_default_oid": "1.3.6.1.4.1.50495.15.1.2.1", "trap_default_severity": "", "host": "0.0.0.0", "port": 9099, "metrics": false, "cert": "", "key": ""} 2025-12-08T17:57:58.437921063+00:00 stderr F INFO:cherrypy.error:[08/Dec/2025:17:57:58] ENGINE Listening for SIGTERM. 2025-12-08T17:57:58.437963834+00:00 stderr F INFO:cherrypy.error:[08/Dec/2025:17:57:58] ENGINE Listening for SIGHUP. 2025-12-08T17:57:58.438016135+00:00 stderr F INFO:cherrypy.error:[08/Dec/2025:17:57:58] ENGINE Listening for SIGUSR1. 2025-12-08T17:57:58.438028825+00:00 stderr F INFO:cherrypy.error:[08/Dec/2025:17:57:58] ENGINE Bus STARTING 2025-12-08T17:57:58.541427468+00:00 stderr F INFO:cherrypy.error:[08/Dec/2025:17:57:58] ENGINE Serving on http://0.0.0.0:9099 2025-12-08T17:57:58.541461589+00:00 stderr F INFO:cherrypy.error:[08/Dec/2025:17:57:58] ENGINE Bus STARTED 2025-12-08T17:59:46.965814880+00:00 stderr F DEBUG:prometheus_webhook_snmp.utils:Receiving notification: {'receiver': 'snmp_wh', 'status': 'firing', 'alerts': [{'status': 'firing', 'labels': {'alertname': 'smoketest', 'severity': 'warning'}, 'annotations': {}, 'startsAt': '2025-12-08T17:59:14Z', 'endsAt': '0001-01-01T00:00:00Z', 'generatorURL': '', 'fingerprint': '47fa8d406c7155f6'}], 'groupLabels': {}, 'commonLabels': {'alertname': 'smoketest', 'severity': 'warning'}, 'commonAnnotations': {}, 'externalURL': 'http://alertmanager-default-0:9093', 'version': '4', 'groupKey': '{}:{}', 'truncatedAlerts': 0} 2025-12-08T17:59:46.966233890+00:00 stderr F DEBUG:asyncio:Using selector: EpollSelector 2025-12-08T17:59:47.100662256+00:00 stderr F DEBUG:prometheus_webhook_snmp.utils:Sending SNMP trap: {'oid': '1.3.6.1.4.1.50495.15.1.2.1', 'alertname': 'smoketest', 'status': 'firing', 'severity': 'warning', 'instance': None, 'job': None, 'description': None, 'labels': {}, 'timestamp': 1765216754, 'rawdata': {'status': 'firing', 'labels': {}, 'annotations': {}, 'startsAt': '2025-12-08T17:59:14Z', 'endsAt': '0001-01-01T00:00:00Z', 'generatorURL': '', 'fingerprint': '47fa8d406c7155f6'}} 2025-12-08T17:59:47.102021452+00:00 stderr F INFO:cherrypy.access.140421634863936:10.217.0.76 - - [08/Dec/2025:17:59:47] "POST / HTTP/1.1" 200 - "" "Alertmanager/0.29.0" 2025-12-08T18:04:46.966721636+00:00 stderr F DEBUG:prometheus_webhook_snmp.utils:Receiving notification: {'receiver': 'snmp_wh', 'status': 'resolved', 'alerts': [{'status': 'resolved', 'labels': {'alertname': 'smoketest', 'severity': 'warning'}, 'annotations': {}, 'startsAt': '2025-12-08T17:59:14Z', 'endsAt': '2025-12-08T18:04:16.951100843Z', 'generatorURL': '', 'fingerprint': '47fa8d406c7155f6'}], 'groupLabels': {}, 'commonLabels': {'alertname': 'smoketest', 'severity': 'warning'}, 'commonAnnotations': {}, 'externalURL': 'http://alertmanager-default-0:9093', 'version': '4', 'groupKey': '{}:{}', 'truncatedAlerts': 0} 2025-12-08T18:04:46.967208990+00:00 stderr F DEBUG:asyncio:Using selector: EpollSelector 2025-12-08T18:04:47.107654444+00:00 stderr F DEBUG:prometheus_webhook_snmp.utils:Sending SNMP trap: {'oid': '1.3.6.1.4.1.50495.15.1.2.1', 'alertname': 'smoketest', 'status': 'resolved', 'severity': 'warning', 'instance': None, 'job': None, 'description': None, 'labels': {}, 'timestamp': 1765217056, 'rawdata': {'status': 'resolved', 'labels': {}, 'annotations': {}, 'startsAt': '2025-12-08T17:59:14Z', 'endsAt': '2025-12-08T18:04:16.951100843Z', 'generatorURL': '', 'fingerprint': '47fa8d406c7155f6'}} 2025-12-08T18:04:47.108762093+00:00 stderr F INFO:cherrypy.access.140421634863936:10.217.0.76 - - [08/Dec/2025:18:04:47] "POST / HTTP/1.1" 200 - "" "Alertmanager/0.29.0" ././@LongLink0000644000000000000000000000026100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611514033146 5ustar zuulzuul././@LongLink0000644000000000000000000000030500000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000755000175000017500000000000015115611521033144 5ustar zuulzuul././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_ob0000644000175000017500000010557615115611514033166 0ustar zuulzuul2025-12-08T17:55:34.922254863+00:00 stdout F ts=2025-12-08T17:55:34.921887193Z level=info caller=/workspace/cmd/operator/main.go:219 msg="Starting Prometheus Operator" version="(version=, branch=, revision=unknown)" build_context="(go=go1.24.6 (Red Hat 1.24.6-1.el9_6), platform=linux/amd64, user=, date=, tags=unknown)" feature_gates="PrometheusAgentDaemonSet=false,PrometheusShardRetentionPolicy=false,PrometheusTopologySharding=false,StatusForConfigurationResources=false" 2025-12-08T17:55:34.922254863+00:00 stdout F ts=2025-12-08T17:55:34.92212444Z level=info caller=/workspace/cmd/operator/main.go:220 msg="Operator's configuration" watch_referenced_objects_in_all_namespaces=false controller_id="" enable_config_reloader_probes=false 2025-12-08T17:55:34.922747597+00:00 stdout F ts=2025-12-08T17:55:34.922581252Z level=info caller=/workspace/internal/goruntime/cpu.go:27 msg="Updating GOMAXPROCS=1: using minimum allowed GOMAXPROCS" 2025-12-08T17:55:34.923678943+00:00 stdout F ts=2025-12-08T17:55:34.923450427Z level=info caller=/workspace/cmd/operator/main.go:234 msg="Namespaces filtering configuration " config="{allow_list=\"\",deny_list=\"\",prometheus_allow_list=\"\",alertmanager_allow_list=\"\",alertmanagerconfig_allow_list=\"\",thanosruler_allow_list=\"\"}" 2025-12-08T17:55:35.017517217+00:00 stdout F ts=2025-12-08T17:55:35.01726249Z level=info caller=/workspace/cmd/operator/main.go:275 msg="connection established" kubernetes_version=1.33.5 2025-12-08T17:55:35.035298515+00:00 stdout F ts=2025-12-08T17:55:35.035218963Z level=info caller=/workspace/cmd/operator/main.go:360 msg="Kubernetes API capabilities" endpointslices=true 2025-12-08T17:55:35.239110468+00:00 stdout F ts=2025-12-08T17:55:35.238917122Z level=info caller=/workspace/pkg/server/server.go:293 msg="starting insecure server" address=[::]:8080 2025-12-08T17:55:35.240164177+00:00 stdout F ts=2025-12-08T17:55:35.239283082Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:35.240979639+00:00 stdout F ts=2025-12-08T17:55:35.240175997Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:35.242221063+00:00 stdout F ts=2025-12-08T17:55:35.240952048Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:35.243012315+00:00 stdout F ts=2025-12-08T17:55:35.242960884Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=thanos 2025-12-08T17:55:36.331001889+00:00 stdout F ts=2025-12-08T17:55:36.330923457Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.331001889+00:00 stdout F ts=2025-12-08T17:55:36.330964098Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.417323008+00:00 stdout F ts=2025-12-08T17:55:36.417238796Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:36.417323008+00:00 stdout F ts=2025-12-08T17:55:36.417291557Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:36.417323008+00:00 stdout F ts=2025-12-08T17:55:36.417298687Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:36.417323008+00:00 stdout F ts=2025-12-08T17:55:36.417308138Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:36.417323008+00:00 stdout F ts=2025-12-08T17:55:36.417313548Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:36.417370929+00:00 stdout F ts=2025-12-08T17:55:36.417320918Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511833111Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=thanos 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511912144Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=thanos 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511922894Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=thanos 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511944374Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=thanos 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511952575Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=thanos 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511965605Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=thanos 2025-12-08T17:55:36.512034777+00:00 stdout F ts=2025-12-08T17:55:36.511972755Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=thanos 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630728273Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630787835Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630797115Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630807806Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630814966Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630825066Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630832386Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630842467Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630849237Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630862547Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630869307Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630898498Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630915469Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630931189Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.630938409Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:36.631938476+00:00 stdout F ts=2025-12-08T17:55:36.63095492Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:36.634996160+00:00 stdout F ts=2025-12-08T17:55:36.632616976Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:36.634996160+00:00 stdout F ts=2025-12-08T17:55:36.632650257Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:36.634996160+00:00 stdout F ts=2025-12-08T17:55:36.632673297Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:36.634996160+00:00 stdout F ts=2025-12-08T17:55:36.632685187Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:36.714183084+00:00 stdout F ts=2025-12-08T17:55:36.511983416Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=thanos 2025-12-08T17:55:36.714183084+00:00 stdout F ts=2025-12-08T17:55:36.714154553Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=thanos 2025-12-08T17:55:36.714222285+00:00 stdout F ts=2025-12-08T17:55:36.714188434Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=thanos 2025-12-08T17:55:36.714222285+00:00 stdout F ts=2025-12-08T17:55:36.714194394Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=thanos 2025-12-08T17:55:36.714222285+00:00 stdout F ts=2025-12-08T17:55:36.714202474Z level=info caller=/workspace/pkg/thanos/operator.go:317 msg="successfully synced all caches" component=thanos-controller 2025-12-08T17:55:36.909971876+00:00 stdout F ts=2025-12-08T17:55:36.909827632Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.909971876+00:00 stdout F ts=2025-12-08T17:55:36.909906414Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.909971876+00:00 stdout F ts=2025-12-08T17:55:36.909916025Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.909971876+00:00 stdout F ts=2025-12-08T17:55:36.909929115Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.909971876+00:00 stdout F ts=2025-12-08T17:55:36.909935185Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.911170599+00:00 stdout F ts=2025-12-08T17:55:36.909943475Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheus 2025-12-08T17:55:36.911170599+00:00 stdout F ts=2025-12-08T17:55:36.911148538Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheus 2025-12-08T17:55:36.911170599+00:00 stdout F ts=2025-12-08T17:55:36.911163008Z level=info caller=/workspace/pkg/prometheus/server/operator.go:446 msg="successfully synced all caches" component=prometheus-controller 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.012034826Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.012106668Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.012140959Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.01215247Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.01215931Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.01217367Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.01218129Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.012191251Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.012198081Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=prometheusagent 2025-12-08T17:55:37.012864399+00:00 stdout F ts=2025-12-08T17:55:37.012206431Z level=info caller=/workspace/pkg/prometheus/agent/operator.go:490 msg="successfully synced all caches" component=prometheusagent-controller 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031091329Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031168812Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031179062Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031190522Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031198112Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031210803Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031220033Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031230563Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:349 msg="Waiting for caches to sync" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031237533Z level=info caller=/cachi2/output/deps/gomod/pkg/mod/k8s.io/client-go@v0.34.1/tools/cache/shared_informer.go:356 msg="Caches are synced" controller=alertmanager 2025-12-08T17:55:37.033067684+00:00 stdout F ts=2025-12-08T17:55:37.031247114Z level=info caller=/workspace/pkg/alertmanager/operator.go:369 msg="successfully synced all caches" component=alertmanager-controller 2025-12-08T17:57:37.396617587+00:00 stdout F ts=2025-12-08T17:57:37.396106924Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:37.608984216+00:00 stdout F ts=2025-12-08T17:57:37.541693956Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:37.909698417+00:00 stdout F ts=2025-12-08T17:57:37.909574274Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:39.718331433+00:00 stdout F ts=2025-12-08T17:57:39.717655396Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:41.289573032+00:00 stdout F ts=2025-12-08T17:57:41.283820274Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:41.352121059+00:00 stdout F ts=2025-12-08T17:57:41.352070027Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:43.841286173+00:00 stdout F ts=2025-12-08T17:57:43.840851192Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:43.876733230+00:00 stdout F ts=2025-12-08T17:57:43.876626126Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:45.301064372+00:00 stdout F ts=2025-12-08T17:57:45.300365794Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:46.260330985+00:00 stdout F ts=2025-12-08T17:57:46.260229292Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:46.274735708+00:00 stdout F ts=2025-12-08T17:57:46.274677225Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:46.421976693+00:00 stdout F ts=2025-12-08T17:57:46.421810239Z level=info caller=/workspace/pkg/alertmanager/operator.go:724 msg="StatefulSet not found" component=alertmanager-controller key=service-telemetry/alertmanager-default 2025-12-08T17:57:46.431348495+00:00 stdout F ts=2025-12-08T17:57:46.430956345Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:46.512166344+00:00 stdout F ts=2025-12-08T17:57:46.511523757Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:46.640702406+00:00 stdout F ts=2025-12-08T17:57:46.640183853Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:50.152414639+00:00 stdout F ts=2025-12-08T17:57:50.150231832Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:50.154358348+00:00 stdout F ts=2025-12-08T17:57:50.153724672Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:50.234412568+00:00 stdout F ts=2025-12-08T17:57:50.234345076Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:50.313948503+00:00 stdout F ts=2025-12-08T17:57:50.312572678Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:55.849711698+00:00 stdout F ts=2025-12-08T17:57:55.84900427Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:55.850656784+00:00 stdout F ts=2025-12-08T17:57:55.850604932Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:58.676869449+00:00 stdout F ts=2025-12-08T17:57:58.676308374Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:58.682970407+00:00 stdout F ts=2025-12-08T17:57:58.682631408Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:58.719402878+00:00 stdout F ts=2025-12-08T17:57:58.719326486Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:58.722418946+00:00 stdout F ts=2025-12-08T17:57:58.722355185Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:59.711164061+00:00 stdout F ts=2025-12-08T17:57:59.711099269Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:57:59.817775126+00:00 stdout F ts=2025-12-08T17:57:59.817705105Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:57:59.922780310+00:00 stdout F ts=2025-12-08T17:57:59.92240788Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:00.019914541+00:00 stdout F ts=2025-12-08T17:58:00.019768667Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:01.099049872+00:00 stdout F ts=2025-12-08T17:58:01.09898711Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:01.100991392+00:00 stdout F ts=2025-12-08T17:58:01.1009329Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:02.013181798+00:00 stdout F ts=2025-12-08T17:58:02.012716046Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:02.021334909+00:00 stdout F ts=2025-12-08T17:58:02.017941421Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:02.214803630+00:00 stdout F ts=2025-12-08T17:58:02.214739308Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:02.317345699+00:00 stdout F ts=2025-12-08T17:58:02.317038621Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:03.537742492+00:00 stdout F ts=2025-12-08T17:58:03.537074115Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:03.721796449+00:00 stdout F ts=2025-12-08T17:58:03.721717537Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:03.835671901+00:00 stdout F ts=2025-12-08T17:58:03.83560555Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:05.091424387+00:00 stdout F ts=2025-12-08T17:58:05.090625067Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:05.094967229+00:00 stdout F ts=2025-12-08T17:58:05.093026779Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:07.321532966+00:00 stdout F ts=2025-12-08T17:58:07.320729705Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:07.424965469+00:00 stdout F ts=2025-12-08T17:58:07.424782264Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:07.524462271+00:00 stdout F ts=2025-12-08T17:58:07.524189344Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:07.643102927+00:00 stdout F ts=2025-12-08T17:58:07.640267454Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:07.825345857+00:00 stdout F ts=2025-12-08T17:58:07.825045219Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:08.017347860+00:00 stdout F ts=2025-12-08T17:58:08.015731808Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:08.029242157+00:00 stdout F ts=2025-12-08T17:58:08.029161885Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:09.992582621+00:00 stdout F ts=2025-12-08T17:58:09.992277743Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:09.994842950+00:00 stdout F ts=2025-12-08T17:58:09.994658965Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:11.307826354+00:00 stdout F ts=2025-12-08T17:58:11.307251419Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:11.311604402+00:00 stdout F ts=2025-12-08T17:58:11.311541361Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:12.066807830+00:00 stdout F ts=2025-12-08T17:58:12.066440291Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:12.072690593+00:00 stdout F ts=2025-12-08T17:58:12.07261686Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:14.420668718+00:00 stdout F ts=2025-12-08T17:58:14.42000453Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:14.423845140+00:00 stdout F ts=2025-12-08T17:58:14.423760028Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:19.194005068+00:00 stdout F ts=2025-12-08T17:58:19.191101993Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:19.194276105+00:00 stdout F ts=2025-12-08T17:58:19.194156372Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:20.456241530+00:00 stdout F ts=2025-12-08T17:58:20.455326677Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:20.458532200+00:00 stdout F ts=2025-12-08T17:58:20.458487469Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:32.581262089+00:00 stdout F ts=2025-12-08T17:58:32.580795797Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:32.582432340+00:00 stdout F ts=2025-12-08T17:58:32.582356798Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:58:37.154651862+00:00 stdout F ts=2025-12-08T17:58:37.154010816Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:58:37.158162453+00:00 stdout F ts=2025-12-08T17:58:37.158102371Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:59:01.001948275+00:00 stdout F ts=2025-12-08T17:59:01.001326918Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:59:01.006049133+00:00 stdout F ts=2025-12-08T17:59:01.00596206Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:59:01.210319016+00:00 stdout F ts=2025-12-08T17:59:01.210231564Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:59:01.317753387+00:00 stdout F ts=2025-12-08T17:59:01.317676755Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:59:01.517926754+00:00 stdout F ts=2025-12-08T17:59:01.51739812Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:59:01.709013770+00:00 stdout F ts=2025-12-08T17:59:01.622856299Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default 2025-12-08T17:59:01.921543421+00:00 stdout F ts=2025-12-08T17:59:01.921422998Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:59:05.350287578+00:00 stdout F ts=2025-12-08T17:59:05.349912838Z level=info caller=/workspace/pkg/alertmanager/operator.go:605 msg="sync alertmanager" component=alertmanager-controller key=service-telemetry/default 2025-12-08T17:59:05.352991489+00:00 stdout F ts=2025-12-08T17:59:05.352928227Z level=info caller=/workspace/pkg/prometheus/server/operator.go:843 msg="sync prometheus" component=prometheus-controller key=service-telemetry/default ././@LongLink0000644000000000000000000000031200000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611514033077 5ustar zuulzuul././@LongLink0000644000000000000000000000032200000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000414215115611514033102 0ustar zuulzuul2025-12-08T17:55:12.596621967+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Using in-cluster kube client config" 2025-12-08T17:55:12.608167487+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/acme.cert-manager.io_challenges.yaml 2025-12-08T17:55:12.613071808+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/acme.cert-manager.io_orders.yaml 2025-12-08T17:55:12.613800389+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager-operator-controller-manager-metrics-service_v1_service.yaml 2025-12-08T17:55:12.614659002+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml 2025-12-08T17:55:12.615491984+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager-operator.clusterserviceversion.yaml 2025-12-08T17:55:12.622701118+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager.io_certificaterequests.yaml 2025-12-08T17:55:12.630566800+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager.io_certificates.yaml 2025-12-08T17:55:12.631966567+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager.io_clusterissuers.yaml 2025-12-08T17:55:12.648040080+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/cert-manager.io_issuers.yaml 2025-12-08T17:55:12.656342744+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/operator.openshift.io_certmanagers.yaml 2025-12-08T17:55:12.658105851+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/manifests/operator.openshift.io_istiocsrs.yaml 2025-12-08T17:55:12.667794472+00:00 stderr F time="2025-12-08T17:55:12Z" level=info msg="Reading file" file=/bundle/metadata/annotations.yaml ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000021346615115611514033115 0ustar zuulzuul2025-12-08T17:55:11.732708797+00:00 stdout F skipping a dir without errors: / 2025-12-08T17:55:11.732708797+00:00 stdout F skipping a dir without errors: /afs 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /boot 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /bundle 2025-12-08T17:55:11.733221430+00:00 stdout F skipping all files in the dir: /dev 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/X11 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/X11/applnk 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/X11/fontpath.d 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/X11/xinit 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/X11/xinit/xinitrc.d 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/X11/xinit/xinput.d 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/alternatives 2025-12-08T17:55:11.733221430+00:00 stdout F skipping a dir without errors: /etc/bash_completion.d 2025-12-08T17:55:11.733263551+00:00 stdout F skipping a dir without errors: /etc/crypto-policies 2025-12-08T17:55:11.733310713+00:00 stdout F skipping a dir without errors: /etc/crypto-policies/back-ends 2025-12-08T17:55:11.733435576+00:00 stdout F skipping a dir without errors: /etc/crypto-policies/local.d 2025-12-08T17:55:11.733462787+00:00 stdout F skipping a dir without errors: /etc/crypto-policies/policies 2025-12-08T17:55:11.733496808+00:00 stdout F skipping a dir without errors: /etc/crypto-policies/policies/modules 2025-12-08T17:55:11.733529938+00:00 stdout F skipping a dir without errors: /etc/crypto-policies/state 2025-12-08T17:55:11.733589201+00:00 stdout F skipping a dir without errors: /etc/default 2025-12-08T17:55:11.733626732+00:00 stdout F skipping a dir without errors: /etc/dnf 2025-12-08T17:55:11.733658733+00:00 stdout F skipping a dir without errors: /etc/dnf/aliases.d 2025-12-08T17:55:11.733695874+00:00 stdout F skipping a dir without errors: /etc/dnf/modules.d 2025-12-08T17:55:11.733727805+00:00 stdout F skipping a dir without errors: /etc/dnf/modules.defaults.d 2025-12-08T17:55:11.733759266+00:00 stdout F skipping a dir without errors: /etc/dnf/plugins 2025-12-08T17:55:11.733793646+00:00 stdout F skipping a dir without errors: /etc/dnf/protected.d 2025-12-08T17:55:11.733908450+00:00 stdout F skipping a dir without errors: /etc/dnf/vars 2025-12-08T17:55:11.734031923+00:00 stdout F skipping a dir without errors: /etc/fonts 2025-12-08T17:55:11.734068094+00:00 stdout F skipping a dir without errors: /etc/fonts/conf.d 2025-12-08T17:55:11.734137146+00:00 stdout F skipping a dir without errors: /etc/gcrypt 2025-12-08T17:55:11.734182577+00:00 stdout F skipping a dir without errors: /etc/gnupg 2025-12-08T17:55:11.734247509+00:00 stdout F skipping a dir without errors: /etc/gss 2025-12-08T17:55:11.734285640+00:00 stdout F skipping a dir without errors: /etc/gss/mech.d 2025-12-08T17:55:11.734379072+00:00 stdout F skipping a dir without errors: /etc/issue.d 2025-12-08T17:55:11.734449184+00:00 stdout F skipping a dir without errors: /etc/krb5.conf.d 2025-12-08T17:55:11.734531176+00:00 stdout F skipping a dir without errors: /etc/ld.so.conf.d 2025-12-08T17:55:11.734590158+00:00 stdout F skipping a dir without errors: /etc/libreport 2025-12-08T17:55:11.734630059+00:00 stdout F skipping a dir without errors: /etc/libreport/events 2025-12-08T17:55:11.734669040+00:00 stdout F skipping a dir without errors: /etc/libreport/events.d 2025-12-08T17:55:11.734722091+00:00 stdout F skipping a dir without errors: /etc/libreport/plugins 2025-12-08T17:55:11.734760742+00:00 stdout F skipping a dir without errors: /etc/libreport/workflows.d 2025-12-08T17:55:11.734868745+00:00 stdout F skipping a dir without errors: /etc/logrotate.d 2025-12-08T17:55:11.734976858+00:00 stdout F skipping a dir without errors: /etc/motd.d 2025-12-08T17:55:11.735093651+00:00 stdout F skipping a dir without errors: /etc/openldap 2025-12-08T17:55:11.735132862+00:00 stdout F skipping a dir without errors: /etc/openldap/certs 2025-12-08T17:55:11.735186264+00:00 stdout F skipping a dir without errors: /etc/opt 2025-12-08T17:55:11.735246605+00:00 stdout F skipping a dir without errors: /etc/pkcs11 2025-12-08T17:55:11.735285286+00:00 stdout F skipping a dir without errors: /etc/pkcs11/modules 2025-12-08T17:55:11.735333577+00:00 stdout F skipping a dir without errors: /etc/pki 2025-12-08T17:55:11.735375609+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust 2025-12-08T17:55:11.735442960+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/extracted 2025-12-08T17:55:11.735500032+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/extracted/edk2 2025-12-08T17:55:11.735569024+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/extracted/java 2025-12-08T17:55:11.735639496+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/extracted/openssl 2025-12-08T17:55:11.735714368+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/extracted/pem 2025-12-08T17:55:11.736016976+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/extracted/pem/directory-hash 2025-12-08T17:55:11.742624944+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/source 2025-12-08T17:55:11.742624944+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/source/anchors 2025-12-08T17:55:11.742624944+00:00 stdout F skipping a dir without errors: /etc/pki/ca-trust/source/blocklist 2025-12-08T17:55:11.742674045+00:00 stdout F skipping a dir without errors: /etc/pki/entitlement 2025-12-08T17:55:11.742711606+00:00 stdout F skipping a dir without errors: /etc/pki/java 2025-12-08T17:55:11.742787318+00:00 stdout F skipping a dir without errors: /etc/pki/product 2025-12-08T17:55:11.742787318+00:00 stdout F skipping a dir without errors: /etc/pki/product-default 2025-12-08T17:55:11.742892551+00:00 stdout F skipping a dir without errors: /etc/pki/rpm-gpg 2025-12-08T17:55:11.742979553+00:00 stdout F skipping a dir without errors: /etc/pki/swid 2025-12-08T17:55:11.743019014+00:00 stdout F skipping a dir without errors: /etc/pki/swid/CA 2025-12-08T17:55:11.743063916+00:00 stdout F skipping a dir without errors: /etc/pki/swid/CA/redhat.com 2025-12-08T17:55:11.743124647+00:00 stdout F skipping a dir without errors: /etc/pki/tls 2025-12-08T17:55:11.743220850+00:00 stdout F skipping a dir without errors: /etc/pki/tls/certs 2025-12-08T17:55:11.743253641+00:00 stdout F skipping a dir without errors: /etc/pki/tls/misc 2025-12-08T17:55:11.743311502+00:00 stdout F skipping a dir without errors: /etc/pki/tls/openssl.d 2025-12-08T17:55:11.743353503+00:00 stdout F skipping a dir without errors: /etc/pki/tls/private 2025-12-08T17:55:11.743391404+00:00 stdout F skipping a dir without errors: /etc/pm 2025-12-08T17:55:11.743430495+00:00 stdout F skipping a dir without errors: /etc/pm/config.d 2025-12-08T17:55:11.743470436+00:00 stdout F skipping a dir without errors: /etc/pm/power.d 2025-12-08T17:55:11.743502667+00:00 stdout F skipping a dir without errors: /etc/pm/sleep.d 2025-12-08T17:55:11.743546059+00:00 stdout F skipping a dir without errors: /etc/popt.d 2025-12-08T17:55:11.743581639+00:00 stdout F skipping a dir without errors: /etc/profile.d 2025-12-08T17:55:11.743720683+00:00 stdout F skipping a dir without errors: /etc/rpm 2025-12-08T17:55:11.743755634+00:00 stdout F skipping a dir without errors: /etc/rwtab.d 2025-12-08T17:55:11.743801195+00:00 stdout F skipping a dir without errors: /etc/sasl2 2025-12-08T17:55:11.743808685+00:00 stdout F skipping a dir without errors: /etc/selinux 2025-12-08T17:55:11.743898198+00:00 stdout F skipping a dir without errors: /etc/skel 2025-12-08T17:55:11.743978600+00:00 stdout F skipping a dir without errors: /etc/ssl 2025-12-08T17:55:11.744006751+00:00 stdout F skipping a dir without errors: /etc/statetab.d 2025-12-08T17:55:11.744059122+00:00 stdout F skipping a dir without errors: /etc/swid 2025-12-08T17:55:11.744087413+00:00 stdout F skipping a dir without errors: /etc/swid/swidtags.d 2025-12-08T17:55:11.744124014+00:00 stdout F skipping a dir without errors: /etc/sysconfig 2025-12-08T17:55:11.744171885+00:00 stdout F skipping a dir without errors: /etc/terminfo 2025-12-08T17:55:11.744233057+00:00 stdout F skipping a dir without errors: /etc/xdg 2025-12-08T17:55:11.744233057+00:00 stdout F skipping a dir without errors: /etc/xdg/autostart 2025-12-08T17:55:11.744265558+00:00 stdout F skipping a dir without errors: /etc/yum.repos.d 2025-12-08T17:55:11.744319649+00:00 stdout F skipping a dir without errors: /home 2025-12-08T17:55:11.744356280+00:00 stdout F skipping a dir without errors: /licenses 2025-12-08T17:55:11.744406591+00:00 stdout F skipping a dir without errors: /manifests 2025-12-08T17:55:11.744529835+00:00 stdout F skipping a dir without errors: /media 2025-12-08T17:55:11.744566826+00:00 stdout F skipping a dir without errors: /metadata 2025-12-08T17:55:11.744688409+00:00 stdout F skipping a dir without errors: /mnt 2025-12-08T17:55:11.744724280+00:00 stdout F skipping a dir without errors: /opt 2025-12-08T17:55:11.744771622+00:00 stdout F skipping all files in the dir: /proc 2025-12-08T17:55:11.744811523+00:00 stdout F skipping a dir without errors: /root 2025-12-08T17:55:11.744928596+00:00 stdout F skipping a dir without errors: /root/buildinfo 2025-12-08T17:55:11.744960827+00:00 stdout F skipping a dir without errors: /root/buildinfo/content_manifests 2025-12-08T17:55:11.745010568+00:00 stdout F skipping a dir without errors: /run 2025-12-08T17:55:11.745068300+00:00 stdout F skipping a dir without errors: /run/blkid 2025-12-08T17:55:11.745068300+00:00 stdout F skipping a dir without errors: /run/lock 2025-12-08T17:55:11.745110921+00:00 stdout F skipping a dir without errors: /run/motd.d 2025-12-08T17:55:11.745152412+00:00 stdout F skipping a dir without errors: /run/secrets 2025-12-08T17:55:11.745152412+00:00 stdout F skipping a dir without errors: /run/secrets/kubernetes.io 2025-12-08T17:55:11.745182843+00:00 stdout F skipping a dir without errors: /run/secrets/kubernetes.io/serviceaccount 2025-12-08T17:55:11.745209134+00:00 stdout F skipping a dir without errors: /run/secrets/kubernetes.io/serviceaccount/..2025_12_08_17_55_04.1544202792 2025-12-08T17:55:11.745275515+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm 2025-12-08T17:55:11.745303006+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/ca 2025-12-08T17:55:11.745329947+00:00 stdout F skipping a dir without errors: /run/secrets/rhsm/syspurpose 2025-12-08T17:55:11.745365928+00:00 stdout F skipping a dir without errors: /run/setrans 2025-12-08T17:55:11.745406779+00:00 stdout F skipping a dir without errors: /srv 2025-12-08T17:55:11.745434390+00:00 stdout F skipping all files in the dir: /sys 2025-12-08T17:55:11.745469231+00:00 stdout F skipping a dir without errors: /tests 2025-12-08T17:55:11.745495701+00:00 stdout F skipping a dir without errors: /tests/scorecard 2025-12-08T17:55:11.745540072+00:00 stdout F skipping a dir without errors: /tmp 2025-12-08T17:55:11.745580973+00:00 stdout F skipping a dir without errors: /usr 2025-12-08T17:55:11.745663596+00:00 stdout F skipping a dir without errors: /usr/bin 2025-12-08T17:55:11.747348320+00:00 stdout F skipping a dir without errors: /usr/games 2025-12-08T17:55:11.747365881+00:00 stdout F skipping a dir without errors: /usr/include 2025-12-08T17:55:11.747410652+00:00 stdout F skipping a dir without errors: /usr/lib 2025-12-08T17:55:11.747508305+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id 2025-12-08T17:55:11.747508305+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/00 2025-12-08T17:55:11.747573747+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/01 2025-12-08T17:55:11.747573747+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/03 2025-12-08T17:55:11.747637569+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/04 2025-12-08T17:55:11.747679420+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/06 2025-12-08T17:55:11.747752572+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/07 2025-12-08T17:55:11.747768633+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/08 2025-12-08T17:55:11.747841505+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/0d 2025-12-08T17:55:11.747850545+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/0e 2025-12-08T17:55:11.747920407+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/11 2025-12-08T17:55:11.747952168+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/12 2025-12-08T17:55:11.747995759+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/13 2025-12-08T17:55:11.748043540+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/15 2025-12-08T17:55:11.748061410+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/16 2025-12-08T17:55:11.748115122+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/17 2025-12-08T17:55:11.748155703+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/19 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/1a 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/1b 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/1c 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/1f 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/21 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/22 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/23 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/25 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/26 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/27 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/28 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/29 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/2a 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/2c 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/2e 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/2f 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/31 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/32 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/33 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/34 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/36 2025-12-08T17:55:11.749065667+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/38 2025-12-08T17:55:11.749116788+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/3a 2025-12-08T17:55:11.749159349+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/3b 2025-12-08T17:55:11.749210681+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/3f 2025-12-08T17:55:11.749263502+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/41 2025-12-08T17:55:11.749302643+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/42 2025-12-08T17:55:11.749339684+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/43 2025-12-08T17:55:11.749407046+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/44 2025-12-08T17:55:11.749446377+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/45 2025-12-08T17:55:11.749503219+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/49 2025-12-08T17:55:11.749541270+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/4a 2025-12-08T17:55:11.749577951+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/4b 2025-12-08T17:55:11.749624782+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/4c 2025-12-08T17:55:11.749677893+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/4d 2025-12-08T17:55:11.749731685+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/4e 2025-12-08T17:55:11.749787196+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/4f 2025-12-08T17:55:11.749833367+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/53 2025-12-08T17:55:11.749907789+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/54 2025-12-08T17:55:11.749945700+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/55 2025-12-08T17:55:11.749991742+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/57 2025-12-08T17:55:11.750043043+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/58 2025-12-08T17:55:11.750079194+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/5a 2025-12-08T17:55:11.750124105+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/5b 2025-12-08T17:55:11.750161226+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/5c 2025-12-08T17:55:11.750198487+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/5d 2025-12-08T17:55:11.750253919+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/5f 2025-12-08T17:55:11.750314220+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/60 2025-12-08T17:55:11.750350381+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/61 2025-12-08T17:55:11.750393883+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/62 2025-12-08T17:55:11.750431754+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/63 2025-12-08T17:55:11.750467915+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/64 2025-12-08T17:55:11.750517307+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/65 2025-12-08T17:55:11.750563508+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/66 2025-12-08T17:55:11.750599839+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/68 2025-12-08T17:55:11.750636210+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/69 2025-12-08T17:55:11.750680501+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/6c 2025-12-08T17:55:11.750724442+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/6e 2025-12-08T17:55:11.750761173+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/6f 2025-12-08T17:55:11.750807024+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/71 2025-12-08T17:55:11.750887076+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/72 2025-12-08T17:55:11.750960528+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/74 2025-12-08T17:55:11.751042601+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/75 2025-12-08T17:55:11.751082762+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/76 2025-12-08T17:55:11.751138113+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/77 2025-12-08T17:55:11.751184414+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/78 2025-12-08T17:55:11.751227585+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/7a 2025-12-08T17:55:11.751264506+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/7d 2025-12-08T17:55:11.751316818+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/7e 2025-12-08T17:55:11.751369829+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/7f 2025-12-08T17:55:11.751413600+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/80 2025-12-08T17:55:11.751450841+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/82 2025-12-08T17:55:11.751495933+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/83 2025-12-08T17:55:11.751552114+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/86 2025-12-08T17:55:11.751589185+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/87 2025-12-08T17:55:11.751634646+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/88 2025-12-08T17:55:11.751675747+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/89 2025-12-08T17:55:11.751731189+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/8b 2025-12-08T17:55:11.751776070+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/8c 2025-12-08T17:55:11.751836472+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/8e 2025-12-08T17:55:11.751849702+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/8f 2025-12-08T17:55:11.751914474+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/90 2025-12-08T17:55:11.751962285+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/93 2025-12-08T17:55:11.752002166+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/94 2025-12-08T17:55:11.752078098+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/95 2025-12-08T17:55:11.752252823+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/97 2025-12-08T17:55:11.752252823+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/98 2025-12-08T17:55:11.752252823+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/99 2025-12-08T17:55:11.752252823+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/9b 2025-12-08T17:55:11.752296784+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/9c 2025-12-08T17:55:11.752332925+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/9d 2025-12-08T17:55:11.752383316+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/9e 2025-12-08T17:55:11.752436188+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/9f 2025-12-08T17:55:11.752473339+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a0 2025-12-08T17:55:11.752519480+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a3 2025-12-08T17:55:11.752575331+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a4 2025-12-08T17:55:11.752591732+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a5 2025-12-08T17:55:11.752646073+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a6 2025-12-08T17:55:11.752660143+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a7 2025-12-08T17:55:11.752712445+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/a8 2025-12-08T17:55:11.752762296+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/aa 2025-12-08T17:55:11.752807167+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ac 2025-12-08T17:55:11.752853549+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ad 2025-12-08T17:55:11.752953661+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ae 2025-12-08T17:55:11.752968252+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/af 2025-12-08T17:55:11.753011523+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b0 2025-12-08T17:55:11.753070124+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b2 2025-12-08T17:55:11.753115416+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b3 2025-12-08T17:55:11.753164568+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b4 2025-12-08T17:55:11.753290101+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b6 2025-12-08T17:55:11.753290101+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b7 2025-12-08T17:55:11.753290101+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b8 2025-12-08T17:55:11.753309902+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/b9 2025-12-08T17:55:11.753361793+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ba 2025-12-08T17:55:11.753396334+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/bb 2025-12-08T17:55:11.753440455+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/bc 2025-12-08T17:55:11.753518457+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/bd 2025-12-08T17:55:11.753571389+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/be 2025-12-08T17:55:11.753615040+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/c0 2025-12-08T17:55:11.753651761+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/c2 2025-12-08T17:55:11.753688892+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/c5 2025-12-08T17:55:11.753723913+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/c6 2025-12-08T17:55:11.753774714+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/c9 2025-12-08T17:55:11.753814355+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/cb 2025-12-08T17:55:11.753856546+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/cc 2025-12-08T17:55:11.753971139+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ce 2025-12-08T17:55:11.754018021+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/cf 2025-12-08T17:55:11.754063722+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d0 2025-12-08T17:55:11.754116743+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d1 2025-12-08T17:55:11.754164904+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d2 2025-12-08T17:55:11.754208576+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d3 2025-12-08T17:55:11.754243737+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d4 2025-12-08T17:55:11.754280067+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d5 2025-12-08T17:55:11.754331019+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d6 2025-12-08T17:55:11.754369090+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d8 2025-12-08T17:55:11.754405061+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/d9 2025-12-08T17:55:11.754442552+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/da 2025-12-08T17:55:11.754502173+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/db 2025-12-08T17:55:11.754559975+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/dc 2025-12-08T17:55:11.754595346+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/dd 2025-12-08T17:55:11.754639507+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/de 2025-12-08T17:55:11.754674778+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/df 2025-12-08T17:55:11.754717649+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e0 2025-12-08T17:55:11.754761280+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e1 2025-12-08T17:55:11.754817292+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e2 2025-12-08T17:55:11.754865333+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e3 2025-12-08T17:55:11.754926755+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e6 2025-12-08T17:55:11.755013247+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e7 2025-12-08T17:55:11.755096129+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e8 2025-12-08T17:55:11.755122600+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/e9 2025-12-08T17:55:11.755166641+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ea 2025-12-08T17:55:11.755204752+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/eb 2025-12-08T17:55:11.755282134+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ec 2025-12-08T17:55:11.755318595+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ed 2025-12-08T17:55:11.755353466+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/ef 2025-12-08T17:55:11.755394917+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/f0 2025-12-08T17:55:11.755470699+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/f1 2025-12-08T17:55:11.755519500+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/f4 2025-12-08T17:55:11.755557271+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/f7 2025-12-08T17:55:11.755594582+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/f8 2025-12-08T17:55:11.755639444+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/fa 2025-12-08T17:55:11.755683185+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/fd 2025-12-08T17:55:11.755727056+00:00 stdout F skipping a dir without errors: /usr/lib/.build-id/fe 2025-12-08T17:55:11.755770047+00:00 stdout F skipping a dir without errors: /usr/lib/debug 2025-12-08T17:55:11.755798658+00:00 stdout F skipping a dir without errors: /usr/lib/debug/.dwz 2025-12-08T17:55:11.755868950+00:00 stdout F skipping a dir without errors: /usr/lib/debug/usr 2025-12-08T17:55:11.755931411+00:00 stdout F skipping a dir without errors: /usr/lib/debug/usr/bin 2025-12-08T17:55:11.755959833+00:00 stdout F skipping a dir without errors: /usr/lib/debug/usr/lib 2025-12-08T17:55:11.755988114+00:00 stdout F skipping a dir without errors: /usr/lib/debug/usr/lib64 2025-12-08T17:55:11.756017135+00:00 stdout F skipping a dir without errors: /usr/lib/debug/usr/sbin 2025-12-08T17:55:11.756043885+00:00 stdout F skipping a dir without errors: /usr/lib/games 2025-12-08T17:55:11.756070346+00:00 stdout F skipping a dir without errors: /usr/lib/locale 2025-12-08T17:55:11.756106177+00:00 stdout F skipping a dir without errors: /usr/lib/locale/C.utf8 2025-12-08T17:55:11.756182639+00:00 stdout F skipping a dir without errors: /usr/lib/locale/C.utf8/LC_MESSAGES 2025-12-08T17:55:11.756278522+00:00 stdout F skipping a dir without errors: /usr/lib/modules 2025-12-08T17:55:11.756314493+00:00 stdout F skipping a dir without errors: /usr/lib/motd.d 2025-12-08T17:55:11.756356884+00:00 stdout F skipping a dir without errors: /usr/lib/rpm 2025-12-08T17:55:11.756384464+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/fileattrs 2025-12-08T17:55:11.756411425+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/lua 2025-12-08T17:55:11.756468157+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/macros.d 2025-12-08T17:55:11.756534368+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform 2025-12-08T17:55:11.756563979+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/aarch64-linux 2025-12-08T17:55:11.756611230+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/alpha-linux 2025-12-08T17:55:11.756649241+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/alphaev5-linux 2025-12-08T17:55:11.756700243+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/alphaev56-linux 2025-12-08T17:55:11.756739284+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/alphaev6-linux 2025-12-08T17:55:11.756777455+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/alphaev67-linux 2025-12-08T17:55:11.756815136+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/alphapca56-linux 2025-12-08T17:55:11.756853967+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/amd64-linux 2025-12-08T17:55:11.756916189+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv3l-linux 2025-12-08T17:55:11.756952330+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv4b-linux 2025-12-08T17:55:11.756988330+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv4l-linux 2025-12-08T17:55:11.757026771+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv5tejl-linux 2025-12-08T17:55:11.757072533+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv5tel-linux 2025-12-08T17:55:11.757111454+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv5tl-linux 2025-12-08T17:55:11.757158445+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv6hl-linux 2025-12-08T17:55:11.757195906+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv6l-linux 2025-12-08T17:55:11.757234507+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv7hl-linux 2025-12-08T17:55:11.757272648+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv7hnl-linux 2025-12-08T17:55:11.757310409+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv7l-linux 2025-12-08T17:55:11.757350000+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv8hl-linux 2025-12-08T17:55:11.757388591+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/armv8l-linux 2025-12-08T17:55:11.757426852+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/athlon-linux 2025-12-08T17:55:11.757477523+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/geode-linux 2025-12-08T17:55:11.757516684+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/i386-linux 2025-12-08T17:55:11.757560976+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/i486-linux 2025-12-08T17:55:11.757598367+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/i586-linux 2025-12-08T17:55:11.757637058+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/i686-linux 2025-12-08T17:55:11.757676889+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ia32e-linux 2025-12-08T17:55:11.757716300+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ia64-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/m68k-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mips-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mips64-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mips64el-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mips64r6-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mips64r6el-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mipsel-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mipsr6-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/mipsr6el-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/noarch-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/pentium3-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/pentium4-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc32dy4-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc64-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc64iseries-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc64le-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc64p7-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc64pseries-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc8260-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppc8560-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppciseries-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/ppcpseries-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/riscv64-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/s390-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/s390x-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sh-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sh3-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sh4-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sh4a-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sparc-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sparc64-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sparc64v-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sparcv8-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sparcv9-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/sparcv9v-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/rpm/platform/x86_64-linux 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/swidtag 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/swidtag/redhat.com 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/sysctl.d 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/sysimage 2025-12-08T17:55:11.759526079+00:00 stdout F skipping a dir without errors: /usr/lib/tmpfiles.d 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/X11 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/audit 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/bpf 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/engines-3 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/fipscheck 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/games 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/gawk 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/gconv 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/gconv/gconv-modules.d 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/gio 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/gio/modules 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/girepository-1.0 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5/plugins 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5/plugins/authdata 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5/plugins/kdb 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5/plugins/libkrb5 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5/plugins/preauth 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/krb5/plugins/tls 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/libdnf 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/libdnf/plugins 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/libpeas-1.0 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/libpeas-1.0/loaders 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/lua 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/lua/5.4 2025-12-08T17:55:11.762960501+00:00 stdout F skipping a dir without errors: /usr/lib64/ossl-modules 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/pkcs11 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/pm-utils 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/pm-utils/module.d 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/pm-utils/power.d 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/pm-utils/sleep.d 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/rpm-plugins 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/sasl2 2025-12-08T17:55:11.763254529+00:00 stdout F skipping a dir without errors: /usr/lib64/security 2025-12-08T17:55:11.763331661+00:00 stdout F skipping a dir without errors: /usr/libexec 2025-12-08T17:55:11.763331661+00:00 stdout F skipping a dir without errors: /usr/libexec/awk 2025-12-08T17:55:11.763409713+00:00 stdout F skipping a dir without errors: /usr/libexec/coreutils 2025-12-08T17:55:11.763409713+00:00 stdout F skipping a dir without errors: /usr/libexec/getconf 2025-12-08T17:55:11.763566407+00:00 stdout F skipping a dir without errors: /usr/libexec/openldap 2025-12-08T17:55:11.763566407+00:00 stdout F skipping a dir without errors: /usr/libexec/p11-kit 2025-12-08T17:55:11.763643769+00:00 stdout F skipping a dir without errors: /usr/libexec/selinux 2025-12-08T17:55:11.763643769+00:00 stdout F skipping a dir without errors: /usr/local 2025-12-08T17:55:11.763721591+00:00 stdout F skipping a dir without errors: /usr/local/bin 2025-12-08T17:55:11.763721591+00:00 stdout F skipping a dir without errors: /usr/local/etc 2025-12-08T17:55:11.763721591+00:00 stdout F skipping a dir without errors: /usr/local/games 2025-12-08T17:55:11.763731832+00:00 stdout F skipping a dir without errors: /usr/local/include 2025-12-08T17:55:11.763799813+00:00 stdout F skipping a dir without errors: /usr/local/lib 2025-12-08T17:55:11.763799813+00:00 stdout F skipping a dir without errors: /usr/local/lib64 2025-12-08T17:55:11.763808384+00:00 stdout F skipping a dir without errors: /usr/local/lib64/bpf 2025-12-08T17:55:11.763886566+00:00 stdout F skipping a dir without errors: /usr/local/libexec 2025-12-08T17:55:11.763886566+00:00 stdout F skipping a dir without errors: /usr/local/sbin 2025-12-08T17:55:11.763897846+00:00 stdout F skipping a dir without errors: /usr/local/share 2025-12-08T17:55:11.763954837+00:00 stdout F skipping a dir without errors: /usr/local/share/applications 2025-12-08T17:55:11.763954837+00:00 stdout F skipping a dir without errors: /usr/local/share/info 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man1 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man1x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man2 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man2x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man3 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man3x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man4 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man4x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man5 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man5x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man6 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man6x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man7 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man7x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man8 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man8x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man9 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/man9x 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/share/man/mann 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/local/src 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/sbin 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/X11 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/aclocal 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/appdata 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/applications 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/augeas 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/augeas/lenses 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/awk 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/backgrounds 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/bash-completion 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/bash-completion/completions 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/bash-completion/helpers 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/buildinfo 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/DEFAULT 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/FIPS 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/FUTURE 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/LEGACY 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/back-ends 2025-12-08T17:55:11.766277040+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/back-ends/DEFAULT 2025-12-08T17:55:11.766456565+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/back-ends/FIPS 2025-12-08T17:55:11.766603569+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/back-ends/FUTURE 2025-12-08T17:55:11.766754463+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/back-ends/LEGACY 2025-12-08T17:55:11.767015520+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/policies 2025-12-08T17:55:11.767099172+00:00 stdout F skipping a dir without errors: /usr/share/crypto-policies/policies/modules 2025-12-08T17:55:11.767208056+00:00 stdout F skipping a dir without errors: /usr/share/desktop-directories 2025-12-08T17:55:11.767243617+00:00 stdout F skipping a dir without errors: /usr/share/dict 2025-12-08T17:55:11.767270857+00:00 stdout F skipping a dir without errors: /usr/share/doc 2025-12-08T17:55:11.767295968+00:00 stdout F skipping a dir without errors: /usr/share/empty 2025-12-08T17:55:11.767326019+00:00 stdout F skipping a dir without errors: /usr/share/file 2025-12-08T17:55:11.767366250+00:00 stdout F skipping a dir without errors: /usr/share/fontconfig 2025-12-08T17:55:11.767402771+00:00 stdout F skipping a dir without errors: /usr/share/fontconfig/conf.avail 2025-12-08T17:55:11.767459962+00:00 stdout F skipping a dir without errors: /usr/share/fonts 2025-12-08T17:55:11.767495873+00:00 stdout F skipping a dir without errors: /usr/share/fonts/dejavu-sans-fonts 2025-12-08T17:55:11.767614697+00:00 stdout F skipping a dir without errors: /usr/share/games 2025-12-08T17:55:11.767652408+00:00 stdout F skipping a dir without errors: /usr/share/gcc-11 2025-12-08T17:55:11.767676338+00:00 stdout F skipping a dir without errors: /usr/share/gcc-11/python 2025-12-08T17:55:11.767707019+00:00 stdout F skipping a dir without errors: /usr/share/gcc-11/python/libstdcxx 2025-12-08T17:55:11.767747640+00:00 stdout F skipping a dir without errors: /usr/share/gcc-11/python/libstdcxx/__pycache__ 2025-12-08T17:55:11.767796101+00:00 stdout F skipping a dir without errors: /usr/share/gcc-11/python/libstdcxx/v6 2025-12-08T17:55:11.767840513+00:00 stdout F skipping a dir without errors: /usr/share/gcc-11/python/libstdcxx/v6/__pycache__ 2025-12-08T17:55:11.767988427+00:00 stdout F skipping a dir without errors: /usr/share/gdb 2025-12-08T17:55:11.768001937+00:00 stdout F skipping a dir without errors: /usr/share/gdb/auto-load 2025-12-08T17:55:11.768036148+00:00 stdout F skipping a dir without errors: /usr/share/gdb/auto-load/usr 2025-12-08T17:55:11.768066379+00:00 stdout F skipping a dir without errors: /usr/share/gdb/auto-load/usr/lib64 2025-12-08T17:55:11.768113160+00:00 stdout F skipping a dir without errors: /usr/share/gdb/auto-load/usr/lib64/__pycache__ 2025-12-08T17:55:11.768159191+00:00 stdout F skipping a dir without errors: /usr/share/glib-2.0 2025-12-08T17:55:11.768191242+00:00 stdout F skipping a dir without errors: /usr/share/glib-2.0/schemas 2025-12-08T17:55:11.768221263+00:00 stdout F skipping a dir without errors: /usr/share/gnome 2025-12-08T17:55:11.768251894+00:00 stdout F skipping a dir without errors: /usr/share/gnupg 2025-12-08T17:55:11.768300615+00:00 stdout F skipping a dir without errors: /usr/share/help 2025-12-08T17:55:11.768338546+00:00 stdout F skipping a dir without errors: /usr/share/i18n 2025-12-08T17:55:11.768371737+00:00 stdout F skipping a dir without errors: /usr/share/i18n/charmaps 2025-12-08T17:55:11.768405038+00:00 stdout F skipping a dir without errors: /usr/share/i18n/locales 2025-12-08T17:55:11.768445229+00:00 stdout F skipping a dir without errors: /usr/share/icons 2025-12-08T17:55:11.768492570+00:00 stdout F skipping a dir without errors: /usr/share/idl 2025-12-08T17:55:11.768526421+00:00 stdout F skipping a dir without errors: /usr/share/info 2025-12-08T17:55:11.768566672+00:00 stdout F skipping a dir without errors: /usr/share/libgpg-error 2025-12-08T17:55:11.768613433+00:00 stdout F skipping a dir without errors: /usr/share/libreport 2025-12-08T17:55:11.768642754+00:00 stdout F skipping a dir without errors: /usr/share/libreport/conf.d 2025-12-08T17:55:11.768666085+00:00 stdout F skipping a dir without errors: /usr/share/libreport/conf.d/plugins 2025-12-08T17:55:11.768683835+00:00 stdout F skipping a dir without errors: /usr/share/libreport/events 2025-12-08T17:55:11.768728606+00:00 stdout F skipping a dir without errors: /usr/share/libreport/workflows 2025-12-08T17:55:11.768801258+00:00 stdout F skipping a dir without errors: /usr/share/licenses 2025-12-08T17:55:11.768831729+00:00 stdout F skipping a dir without errors: /usr/share/licenses/alternatives 2025-12-08T17:55:11.768870830+00:00 stdout F skipping a dir without errors: /usr/share/licenses/audit-libs 2025-12-08T17:55:11.768920611+00:00 stdout F skipping a dir without errors: /usr/share/licenses/bash 2025-12-08T17:55:11.768962622+00:00 stdout F skipping a dir without errors: /usr/share/licenses/bzip2-libs 2025-12-08T17:55:11.769019554+00:00 stdout F skipping a dir without errors: /usr/share/licenses/coreutils-single 2025-12-08T17:55:11.769064225+00:00 stdout F skipping a dir without errors: /usr/share/licenses/crypto-policies 2025-12-08T17:55:11.769123377+00:00 stdout F skipping a dir without errors: /usr/share/licenses/cyrus-sasl-lib 2025-12-08T17:55:11.769164578+00:00 stdout F skipping a dir without errors: /usr/share/licenses/dejavu-sans-fonts 2025-12-08T17:55:11.769202209+00:00 stdout F skipping a dir without errors: /usr/share/licenses/dnf 2025-12-08T17:55:11.769263620+00:00 stdout F skipping a dir without errors: /usr/share/licenses/file-libs 2025-12-08T17:55:11.769304131+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gawk 2025-12-08T17:55:11.769367583+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gdbm-libs 2025-12-08T17:55:11.769407974+00:00 stdout F skipping a dir without errors: /usr/share/licenses/glib2 2025-12-08T17:55:11.769444725+00:00 stdout F skipping a dir without errors: /usr/share/licenses/glibc 2025-12-08T17:55:11.769508527+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gmp 2025-12-08T17:55:11.769577939+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gnupg2 2025-12-08T17:55:11.769613290+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gnutls 2025-12-08T17:55:11.769674421+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gobject-introspection 2025-12-08T17:55:11.769730063+00:00 stdout F skipping a dir without errors: /usr/share/licenses/gpgme 2025-12-08T17:55:11.769791354+00:00 stdout F skipping a dir without errors: /usr/share/licenses/grep 2025-12-08T17:55:11.769828635+00:00 stdout F skipping a dir without errors: /usr/share/licenses/json-c 2025-12-08T17:55:11.769889687+00:00 stdout F skipping a dir without errors: /usr/share/licenses/json-glib 2025-12-08T17:55:11.769930018+00:00 stdout F skipping a dir without errors: /usr/share/licenses/keyutils-libs 2025-12-08T17:55:11.769973100+00:00 stdout F skipping a dir without errors: /usr/share/licenses/krb5-libs 2025-12-08T17:55:11.770010751+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libarchive 2025-12-08T17:55:11.770050732+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libassuan 2025-12-08T17:55:11.770096253+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libcap 2025-12-08T17:55:11.770132194+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libcap-ng 2025-12-08T17:55:11.770170355+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libcom_err 2025-12-08T17:55:11.770219807+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libcurl-minimal 2025-12-08T17:55:11.770255858+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libdnf 2025-12-08T17:55:11.770296519+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libevent 2025-12-08T17:55:11.770331690+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libffi 2025-12-08T17:55:11.770373251+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libgcc 2025-12-08T17:55:11.770454953+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libgcrypt 2025-12-08T17:55:11.770494184+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libgpg-error 2025-12-08T17:55:11.770541665+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libidn2 2025-12-08T17:55:11.770611207+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libksba 2025-12-08T17:55:11.770680909+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libmodulemd 2025-12-08T17:55:11.770710940+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libmount 2025-12-08T17:55:11.770773891+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libnghttp2 2025-12-08T17:55:11.770782962+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libpeas 2025-12-08T17:55:11.770856794+00:00 stdout F skipping a dir without errors: /usr/share/licenses/librepo 2025-12-08T17:55:11.770916255+00:00 stdout F skipping a dir without errors: /usr/share/licenses/librhsm 2025-12-08T17:55:11.770995707+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libselinux 2025-12-08T17:55:11.771036318+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libsemanage 2025-12-08T17:55:11.771083660+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libsepol 2025-12-08T17:55:11.771118671+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libsigsegv 2025-12-08T17:55:11.771163172+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libsmartcols 2025-12-08T17:55:11.771224293+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libsolv 2025-12-08T17:55:11.771245954+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libtasn1 2025-12-08T17:55:11.771306426+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libunistring 2025-12-08T17:55:11.771357407+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libusbx 2025-12-08T17:55:11.771399028+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libuuid 2025-12-08T17:55:11.771442189+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libverto 2025-12-08T17:55:11.771478890+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libxcrypt 2025-12-08T17:55:11.771533762+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libxml2 2025-12-08T17:55:11.771570353+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libyaml 2025-12-08T17:55:11.771606424+00:00 stdout F skipping a dir without errors: /usr/share/licenses/libzstd 2025-12-08T17:55:11.771651935+00:00 stdout F skipping a dir without errors: /usr/share/licenses/microdnf 2025-12-08T17:55:11.771688986+00:00 stdout F skipping a dir without errors: /usr/share/licenses/mpfr 2025-12-08T17:55:11.771751067+00:00 stdout F skipping a dir without errors: /usr/share/licenses/ncurses-base 2025-12-08T17:55:11.771788798+00:00 stdout F skipping a dir without errors: /usr/share/licenses/nettle 2025-12-08T17:55:11.771834090+00:00 stdout F skipping a dir without errors: /usr/share/licenses/npth 2025-12-08T17:55:11.771884701+00:00 stdout F skipping a dir without errors: /usr/share/licenses/openldap 2025-12-08T17:55:11.771930982+00:00 stdout F skipping a dir without errors: /usr/share/licenses/openssl-libs 2025-12-08T17:55:11.771967863+00:00 stdout F skipping a dir without errors: /usr/share/licenses/p11-kit 2025-12-08T17:55:11.772004794+00:00 stdout F skipping a dir without errors: /usr/share/licenses/pcre 2025-12-08T17:55:11.772062196+00:00 stdout F skipping a dir without errors: /usr/share/licenses/pcre2-syntax 2025-12-08T17:55:11.772101997+00:00 stdout F skipping a dir without errors: /usr/share/licenses/popt 2025-12-08T17:55:11.772138308+00:00 stdout F skipping a dir without errors: /usr/share/licenses/readline 2025-12-08T17:55:11.772182939+00:00 stdout F skipping a dir without errors: /usr/share/licenses/rpm 2025-12-08T17:55:11.772219160+00:00 stdout F skipping a dir without errors: /usr/share/licenses/sed 2025-12-08T17:55:11.772258821+00:00 stdout F skipping a dir without errors: /usr/share/licenses/setup 2025-12-08T17:55:11.772296462+00:00 stdout F skipping a dir without errors: /usr/share/licenses/shadow-utils 2025-12-08T17:55:11.772345183+00:00 stdout F skipping a dir without errors: /usr/share/licenses/systemd 2025-12-08T17:55:11.772381404+00:00 stdout F skipping a dir without errors: /usr/share/licenses/tzdata 2025-12-08T17:55:11.772429105+00:00 stdout F skipping a dir without errors: /usr/share/licenses/xz-libs 2025-12-08T17:55:11.772470517+00:00 stdout F skipping a dir without errors: /usr/share/licenses/zlib 2025-12-08T17:55:11.772510798+00:00 stdout F skipping a dir without errors: /usr/share/locale 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/lua 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/lua/5.4 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/man 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/man/man0p 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/man/man1 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/man/man1p 2025-12-08T17:55:11.772721503+00:00 stdout F skipping a dir without errors: /usr/share/man/man1x 2025-12-08T17:55:11.772743964+00:00 stdout F skipping a dir without errors: /usr/share/man/man2 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man2x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man3 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man3p 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man3x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man4 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man4x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man5 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man5x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man6 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man6x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man7 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man7x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man8 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man8x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man9 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/man9x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/man/mann 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/metainfo 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/mime-info 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/misc 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/omf 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/p11-kit 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/p11-kit/modules 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/pixmaps 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/pki 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/pki/ca-trust-legacy 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/pki/ca-trust-source 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/pki/ca-trust-source/anchors 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/pki/ca-trust-source/blocklist 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/sounds 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/tabset 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/A 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/E 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/a 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/b 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/c 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/d 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/e 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/g 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/h 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/j 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/k 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/l 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/m 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/n 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/p 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/r 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/s 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/t 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/v 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/w 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/terminfo/x 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/themes 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/wayland-sessions 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/share/xsessions 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/src 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/src/debug 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /usr/src/kernels 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /util 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /var 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /var/adm 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /var/cache 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /var/db 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /var/empty 2025-12-08T17:55:11.776101095+00:00 stdout F skipping a dir without errors: /var/ftp 2025-12-08T17:55:11.776158866+00:00 stdout F skipping a dir without errors: /var/games 2025-12-08T17:55:11.776158866+00:00 stdout F skipping a dir without errors: /var/kerberos 2025-12-08T17:55:11.776158866+00:00 stdout F skipping a dir without errors: /var/kerberos/krb5 2025-12-08T17:55:11.776208178+00:00 stdout F skipping a dir without errors: /var/kerberos/krb5/user 2025-12-08T17:55:11.776208178+00:00 stdout F skipping a dir without errors: /var/lib 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/alternatives 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/dnf 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/games 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/misc 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/rpm 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/rpm-state 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/selinux 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/lib/selinux/tmp 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/local 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/log 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/nis 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/opt 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/preserve 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/spool 2025-12-08T17:55:11.776680900+00:00 stdout F skipping a dir without errors: /var/spool/lpd 2025-12-08T17:55:11.776718631+00:00 stdout F skipping a dir without errors: /var/spool/mail 2025-12-08T17:55:11.776750342+00:00 stdout F skipping a dir without errors: /var/tmp 2025-12-08T17:55:11.776757462+00:00 stdout F skipping a dir without errors: /var/yp 2025-12-08T17:55:11.776785823+00:00 stdout F &{metadata/annotations.yaml manifests/} ././@LongLink0000644000000000000000000000031700000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000755000175000017500000000000015115611521033075 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_0000644000175000017500000000010715115611514033077 0ustar zuulzuul2025-12-08T17:55:05.380836303+00:00 stdout F '/bin/cpb' -> '/util/cpb' ././@LongLink0000644000000000000000000000023300000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000755000175000017500000000000015115611513033024 5ustar zuulzuul././@LongLink0000644000000000000000000000024300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000755000175000017500000000000015115611521033023 5ustar zuulzuul././@LongLink0000644000000000000000000000025000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-regist0000644000175000017500000003074015115611513033032 0ustar zuulzuul2025-12-08T17:44:02.332460867+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:44:02.343338454+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:44:02.349616565+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:44:02.358540448+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:44:02.366920257+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:44:02.373155448+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:45:02.388856808+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:45:02.398653931+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:45:02.408472304+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:45:02.425762184+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:45:02.429293493+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:45:02.434936010+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:46:02.449457339+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:46:02.461311455+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:46:02.471077327+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:46:02.481943233+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:46:02.486283164+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:46:02.491409648+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:47:02.508758809+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:47:02.518483045+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:47:02.527216639+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:47:02.540406965+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:47:02.544294537+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:47:02.549813101+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:48:02.564616343+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:48:02.572281694+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:48:02.580111831+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:48:02.590296520+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:48:02.594371113+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:48:02.598187399+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:49:02.607777407+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:49:02.614142593+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:49:02.622815015+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:49:02.634102224+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:49:02.638432635+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:49:02.642066919+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:50:02.656412876+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:50:02.662742582+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:50:02.668593588+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:50:02.677924470+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:50:02.681369814+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:50:02.685029282+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:51:02.696347852+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:51:02.703232182+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:51:02.708282718+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:51:02.715545808+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:51:02.719974877+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:51:02.724180938+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:52:02.736668734+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:52:02.744907363+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:52:02.753721237+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:52:02.763928440+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:52:02.768220109+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:52:02.773312270+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:53:02.788927033+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:53:02.798472297+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:53:02.807507268+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:53:02.821971174+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:53:02.826341291+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:53:02.832058983+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:54:02.848315835+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:54:02.857834493+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:54:02.866839448+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:54:02.878148196+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:54:02.882651598+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:54:02.887788698+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:55:02.903133455+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:55:02.912519818+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:55:02.920392830+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:55:02.936069721+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:55:02.942356890+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:55:02.948927458+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:56:02.964685001+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:56:02.972787564+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:56:02.980528346+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:56:02.994214792+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:56:03.000057742+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:56:03.005072110+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:57:03.018224892+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:57:03.024153297+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:57:03.029205179+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:57:03.036366216+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:57:03.039806055+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:57:03.042821124+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:58:03.052092679+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:58:03.064823118+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:58:03.071928663+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:58:03.083856161+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:58:03.087560866+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:58:03.090261486+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T17:59:03.102293800+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:59:03.111418591+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T17:59:03.119039631+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T17:59:03.128776309+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T17:59:03.134432548+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T17:59:03.137212091+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T18:00:03.147786741+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:00:03.154378814+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T18:00:03.161489692+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T18:00:03.170481718+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:00:03.174554244+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T18:00:03.179206517+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T18:01:03.191168330+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:01:03.197616012+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T18:01:03.204671410+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T18:01:03.214913314+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:01:03.218614802+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T18:01:03.222924286+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T18:02:03.238238264+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:02:03.246153085+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T18:02:03.254618360+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T18:02:03.266985399+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:02:03.271441598+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T18:02:03.274425918+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T18:03:03.285298376+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:03:03.291259865+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T18:03:03.296477455+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T18:03:03.303558815+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:03:03.306510784+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T18:03:03.309172575+00:00 stdout F image-registry.openshift-image-registry.svc:5000 2025-12-08T18:04:03.319197124+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:04:03.325136762+00:00 stdout F image-registry.openshift-image-registry.svc..5000 2025-12-08T18:04:03.334787137+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local..5000 2025-12-08T18:04:03.343544449+00:00 stdout F default-route-openshift-image-registry.apps-crc.testing 2025-12-08T18:04:03.346720075+00:00 stdout F image-registry.openshift-image-registry.svc.cluster.local:5000 2025-12-08T18:04:03.349670183+00:00 stdout F image-registry.openshift-image-registry.svc:5000 ././@LongLink0000644000000000000000000000025100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator0000755000175000017500000000000015115611513033064 5ustar zuulzuul././@LongLink0000644000000000000000000000026600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator0000755000175000017500000000000015115611520033062 5ustar zuulzuul././@LongLink0000644000000000000000000000027300000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator0000644000175000017500000003347515115611513033102 0ustar zuulzuul2025-12-08T17:44:22.373302781+00:00 stderr F I1208 17:44:22.372426 1 simple_featuregate_reader.go:171] Starting feature-gate-detector 2025-12-08T17:44:22.416869850+00:00 stderr F I1208 17:44:22.415800 1 event.go:377] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-dns-operator", Name:"dns-operator", UID:"", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BuildCSIVolumes", "CPMSMachineNamePrefix", "ConsolePluginContentSecurityPolicy", "GatewayAPI", "GatewayAPIController", "HighlyAvailableArbiter", "ImageVolume", "IngressControllerLBSubnetsAWS", "KMSv1", "MachineConfigNodes", "ManagedBootImages", "ManagedBootImagesAWS", "MetricsCollectionProfiles", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NetworkSegmentation", "NewOLM", "PinnedImages", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SetEIPForNLBIngressController", "SigstoreImageVerification", "StoragePerformantSecurityPolicy", "UpgradeStatus", "UserNamespacesPodSecurityStandards", "UserNamespacesSupport", "VSphereMultiDisk", "VSphereMultiNetworks"}, Disabled:[]v1.FeatureGateName{"AWSClusterHostedDNS", "AWSClusterHostedDNSInstall", "AWSDedicatedHosts", "AWSServiceLBNetworkSecurityGroup", "AutomatedEtcdBackup", "AzureClusterHostedDNSInstall", "AzureDedicatedHosts", "AzureMultiDisk", "BootImageSkewEnforcement", "BootcNodeManagement", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "ClusterVersionOperatorConfiguration", "DNSNameResolver", "DualReplica", "DyanmicServiceEndpointIBMCloud", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "Example2", "ExternalOIDC", "ExternalOIDCWithUIDAndExtraClaimMappings", "ExternalSnapshotMetadata", "GCPClusterHostedDNS", "GCPClusterHostedDNSInstall", "GCPCustomAPIEndpoints", "GCPCustomAPIEndpointsInstall", "ImageModeStatusReporting", "ImageStreamImportMode", "IngressControllerDynamicConfigurationManager", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "IrreconcilableMachineConfig", "KMSEncryptionProvider", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "ManagedBootImagesAzure", "ManagedBootImagesvSphere", "MaxUnavailableStatefulSet", "MinimumKubeletVersion", "MixedCPUsAllocation", "MultiArchInstallAzure", "MultiDiskSetup", "MutatingAdmissionPolicy", "NewOLMCatalogdAPIV1Metas", "NewOLMOwnSingleNamespace", "NewOLMPreflightPermissionChecks", "NewOLMWebhookProviderOpenshiftServiceCA", "NoRegistryClusterOperations", "NodeSwap", "NutanixMultiSubnets", "OVNObservability", "OpenShiftPodSecurityAdmission", "PreconfiguredUDNAddresses", "SELinuxMount", "ShortCertRotation", "SignatureStores", "SigstoreImageVerificationPKI", "TranslateStreamCloseWebsocketRequests", "VSphereConfigurableMaxAllowedBlockVolumesPerNode", "VSphereHostVMGroupZonal", "VSphereMixedNodeEnv", "VolumeAttributesClass", "VolumeGroupSnapshot"}} 2025-12-08T17:44:22.416869850+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="FeatureGates initializedknownFeatures[AWSClusterHostedDNS AWSClusterHostedDNSInstall AWSDedicatedHosts AWSServiceLBNetworkSecurityGroup AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AutomatedEtcdBackup AzureClusterHostedDNSInstall AzureDedicatedHosts AzureMultiDisk AzureWorkloadIdentity BootImageSkewEnforcement BootcNodeManagement BuildCSIVolumes CPMSMachineNamePrefix ClusterAPIInstall ClusterAPIInstallIBMCloud ClusterMonitoringConfig ClusterVersionOperatorConfiguration ConsolePluginContentSecurityPolicy DNSNameResolver DualReplica DyanmicServiceEndpointIBMCloud DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example Example2 ExternalOIDC ExternalOIDCWithUIDAndExtraClaimMappings ExternalSnapshotMetadata GCPClusterHostedDNS GCPClusterHostedDNSInstall GCPCustomAPIEndpoints GCPCustomAPIEndpointsInstall GatewayAPI GatewayAPIController HighlyAvailableArbiter ImageModeStatusReporting ImageStreamImportMode ImageVolume IngressControllerDynamicConfigurationManager IngressControllerLBSubnetsAWS InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather IrreconcilableMachineConfig KMSEncryptionProvider KMSv1 MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController MachineConfigNodes ManagedBootImages ManagedBootImagesAWS ManagedBootImagesAzure ManagedBootImagesvSphere MaxUnavailableStatefulSet MetricsCollectionProfiles MinimumKubeletVersion MixedCPUsAllocation MultiArchInstallAzure MultiDiskSetup MutatingAdmissionPolicy NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM NewOLMCatalogdAPIV1Metas NewOLMOwnSingleNamespace NewOLMPreflightPermissionChecks NewOLMWebhookProviderOpenshiftServiceCA NoRegistryClusterOperations NodeSwap NutanixMultiSubnets OVNObservability OpenShiftPodSecurityAdmission PinnedImages PreconfiguredUDNAddresses ProcMountType RouteAdvertisements RouteExternalCertificate SELinuxMount ServiceAccountTokenNodeBinding SetEIPForNLBIngressController ShortCertRotation SignatureStores SigstoreImageVerification SigstoreImageVerificationPKI StoragePerformantSecurityPolicy TranslateStreamCloseWebsocketRequests UpgradeStatus UserNamespacesPodSecurityStandards UserNamespacesSupport VSphereConfigurableMaxAllowedBlockVolumesPerNode VSphereHostVMGroupZonal VSphereMixedNodeEnv VSphereMultiDisk VSphereMultiNetworks VolumeAttributesClass VolumeGroupSnapshot]" 2025-12-08T17:44:22.422075361+00:00 stderr F 2025-12-08T17:44:22Z INFO controller-runtime.metrics Starting metrics server 2025-12-08T17:44:22.422493163+00:00 stderr F 2025-12-08T17:44:22Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": "127.0.0.1:60000", "secure": false} 2025-12-08T17:44:22.422649908+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.DNS"} 2025-12-08T17:44:22.422704839+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.DaemonSet"} 2025-12-08T17:44:22.422747940+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.Service"} 2025-12-08T17:44:22.422780541+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:22.422804692+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.ConfigMap"} 2025-12-08T17:44:22.422834433+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "dns_controller", "source": "kind source: *v1.Node"} 2025-12-08T17:44:22.422860453+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting Controller {"controller": "dns_controller"} 2025-12-08T17:44:22.424328753+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "status_controller", "source": "kind source: *v1.DNS"} 2025-12-08T17:44:22.424328753+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "status_controller", "source": "kind source: *v1.DaemonSet"} 2025-12-08T17:44:22.424328753+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting EventSource {"controller": "status_controller", "source": "kind source: *v1.ClusterOperator"} 2025-12-08T17:44:22.424328753+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting Controller {"controller": "status_controller"} 2025-12-08T17:44:22.670967800+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting workers {"controller": "status_controller", "worker count": 1} 2025-12-08T17:44:22.873083153+00:00 stderr F 2025-12-08T17:44:22Z INFO Starting workers {"controller": "dns_controller", "worker count": 1} 2025-12-08T17:44:22.873083153+00:00 stderr F time="2025-12-08T17:44:22Z" level=info msg="reconciling request: /default" 2025-12-08T17:44:23.074102017+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="updated DNS default status: old: v1.DNSStatus{ClusterIP:\"10.217.4.10\", ClusterDomain:\"cluster.local\", Conditions:[]v1.OperatorCondition{v1.OperatorCondition{Type:\"Degraded\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 47, 0, time.Local), Reason:\"NoDNSPodsDesired\", Message:\"No DNS pods are desired; this could mean all nodes are tainted or unschedulable.\"}, v1.OperatorCondition{Type:\"Progressing\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 48, 0, time.Local), Reason:\"Reconciling\", Message:\"Have 0 available node-resolver pods, want 1.\"}, v1.OperatorCondition{Type:\"Available\", Status:\"False\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 47, 0, time.Local), Reason:\"NoDaemonSetPods\", Message:\"The DNS daemonset has no pods available.\"}, v1.OperatorCondition{Type:\"Upgradeable\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 2, 7, 55, 11, 0, time.Local), Reason:\"AsExpected\", Message:\"DNS Operator can be upgraded\"}}}, new: v1.DNSStatus{ClusterIP:\"10.217.4.10\", ClusterDomain:\"cluster.local\", Conditions:[]v1.OperatorCondition{v1.OperatorCondition{Type:\"Degraded\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 47, 0, time.Local), Reason:\"NoDNSPodsAvailable\", Message:\"No DNS pods are available.\"}, v1.OperatorCondition{Type:\"Progressing\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 48, 0, time.Local), Reason:\"Reconciling\", Message:\"Have 0 available DNS pods, want 1.\"}, v1.OperatorCondition{Type:\"Available\", Status:\"False\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 47, 0, time.Local), Reason:\"NoDaemonSetPods\", Message:\"The DNS daemonset has no pods available.\"}, v1.OperatorCondition{Type:\"Upgradeable\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 2, 7, 55, 11, 0, time.Local), Reason:\"AsExpected\", Message:\"DNS Operator can be upgraded\"}}}" 2025-12-08T17:44:23.094040990+00:00 stderr F time="2025-12-08T17:44:23Z" level=info msg="reconciling request: /default" 2025-12-08T17:44:35.625650425+00:00 stderr F time="2025-12-08T17:44:35Z" level=info msg="reconciling request: /default" 2025-12-08T17:44:44.476654492+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="reconciling request: /default" 2025-12-08T17:44:44.528027851+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="updated DNS default status: old: v1.DNSStatus{ClusterIP:\"10.217.4.10\", ClusterDomain:\"cluster.local\", Conditions:[]v1.OperatorCondition{v1.OperatorCondition{Type:\"Degraded\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 47, 0, time.Local), Reason:\"NoDNSPodsAvailable\", Message:\"No DNS pods are available.\"}, v1.OperatorCondition{Type:\"Progressing\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 48, 0, time.Local), Reason:\"Reconciling\", Message:\"Have 0 available DNS pods, want 1.\"}, v1.OperatorCondition{Type:\"Available\", Status:\"False\", LastTransitionTime:time.Date(2025, time.November, 3, 9, 40, 47, 0, time.Local), Reason:\"NoDaemonSetPods\", Message:\"The DNS daemonset has no pods available.\"}, v1.OperatorCondition{Type:\"Upgradeable\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 2, 7, 55, 11, 0, time.Local), Reason:\"AsExpected\", Message:\"DNS Operator can be upgraded\"}}}, new: v1.DNSStatus{ClusterIP:\"10.217.4.10\", ClusterDomain:\"cluster.local\", Conditions:[]v1.OperatorCondition{v1.OperatorCondition{Type:\"Degraded\", Status:\"False\", LastTransitionTime:time.Date(2025, time.December, 8, 17, 44, 44, 0, time.Local), Reason:\"AsExpected\", Message:\"Enough DNS pods are available, and the DNS service has a cluster IP address.\"}, v1.OperatorCondition{Type:\"Progressing\", Status:\"False\", LastTransitionTime:time.Date(2025, time.December, 8, 17, 44, 44, 0, time.Local), Reason:\"AsExpected\", Message:\"All DNS and node-resolver pods are available, and the DNS service has a cluster IP address.\"}, v1.OperatorCondition{Type:\"Available\", Status:\"True\", LastTransitionTime:time.Date(2025, time.December, 8, 17, 44, 44, 0, time.Local), Reason:\"AsExpected\", Message:\"The DNS daemonset has available pods, and the DNS service has a cluster IP address.\"}, v1.OperatorCondition{Type:\"Upgradeable\", Status:\"True\", LastTransitionTime:time.Date(2025, time.November, 2, 7, 55, 11, 0, time.Local), Reason:\"AsExpected\", Message:\"DNS Operator can be upgraded\"}}}" 2025-12-08T17:44:44.530535391+00:00 stderr F time="2025-12-08T17:44:44Z" level=info msg="reconciling request: /default" 2025-12-08T17:45:11.928699884+00:00 stderr F time="2025-12-08T17:45:11Z" level=info msg="reconciling request: /default" 2025-12-08T17:45:14.215924876+00:00 stderr F time="2025-12-08T17:45:14Z" level=info msg="reconciling request: /default" 2025-12-08T17:46:22.440925624+00:00 stderr F time="2025-12-08T17:46:22Z" level=error msg="failed to ensure default dns Get \"https://10.217.4.1:443/apis/operator.openshift.io/v1/dnses/default\": dial tcp 10.217.4.1:443: connect: connection refused" 2025-12-08T17:47:04.514238160+00:00 stderr F time="2025-12-08T17:47:04Z" level=info msg="reconciling request: /default" 2025-12-08T17:47:31.126488650+00:00 stderr F time="2025-12-08T17:47:31Z" level=info msg="reconciling request: /default" 2025-12-08T17:47:31.169455093+00:00 stderr F time="2025-12-08T17:47:31Z" level=info msg="reconciling request: /default" 2025-12-08T17:47:39.010503021+00:00 stderr F time="2025-12-08T17:47:39Z" level=info msg="reconciling request: /default" 2025-12-08T17:47:46.256458635+00:00 stderr F time="2025-12-08T17:47:46Z" level=info msg="reconciling request: /default" 2025-12-08T17:47:46.301806143+00:00 stderr F time="2025-12-08T17:47:46Z" level=info msg="reconciling request: /default" 2025-12-08T17:47:51.087096240+00:00 stderr F time="2025-12-08T17:47:51Z" level=info msg="reconciling request: /default" ././@LongLink0000644000000000000000000000027100000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator0000755000175000017500000000000015115611520033062 5ustar zuulzuul././@LongLink0000644000000000000000000000027600000000000011610 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator0000644000175000017500000000202015115611513033060 0ustar zuulzuul2025-12-08T17:44:24.081061693+00:00 stderr F W1208 17:44:24.077860 1 deprecated.go:66] 2025-12-08T17:44:24.081061693+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:24.081061693+00:00 stderr F 2025-12-08T17:44:24.081061693+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:24.081061693+00:00 stderr F 2025-12-08T17:44:24.081061693+00:00 stderr F =============================================== 2025-12-08T17:44:24.081061693+00:00 stderr F 2025-12-08T17:44:24.081061693+00:00 stderr F I1208 17:44:24.080640 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:24.094922152+00:00 stderr F I1208 17:44:24.094764 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:24.096951476+00:00 stderr F I1208 17:44:24.096043 1 kube-rbac-proxy.go:397] Starting TCP socket on :9393 2025-12-08T17:44:24.096951476+00:00 stderr F I1208 17:44:24.096787 1 kube-rbac-proxy.go:404] Listening securely on :9393 ././@LongLink0000644000000000000000000000024300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611514033023 5ustar zuulzuul././@LongLink0000644000000000000000000000025200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000755000175000017500000000000015115611523033023 5ustar zuulzuul././@LongLink0000644000000000000000000000025700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-schedul0000644000175000017500000000355415115611514033034 0ustar zuulzuul2025-12-08T17:44:27.541258567+00:00 stderr F I1208 17:44:27.537536 1 cmd.go:39] &{ true {false} prune true map[cert-dir:0xc0008dd7c0 max-eligible-revision:0xc0008dd540 protected-revisions:0xc0008dd5e0 resource-dir:0xc0008dd680 static-pod-name:0xc0008dd720 v:0xc0008ddea0] [0xc0008ddea0 0xc0008dd540 0xc0008dd5e0 0xc0008dd680 0xc0008dd7c0 0xc0008dd720] [] map[cert-dir:0xc0008dd7c0 help:0xc0008ea320 log-flush-frequency:0xc0008dde00 max-eligible-revision:0xc0008dd540 protected-revisions:0xc0008dd5e0 resource-dir:0xc0008dd680 static-pod-name:0xc0008dd720 v:0xc0008ddea0 vmodule:0xc0008ea000] [0xc0008dd540 0xc0008dd5e0 0xc0008dd680 0xc0008dd720 0xc0008dd7c0 0xc0008dde00 0xc0008ddea0 0xc0008ea000 0xc0008ea320] [0xc0008dd7c0 0xc0008ea320 0xc0008dde00 0xc0008dd540 0xc0008dd5e0 0xc0008dd680 0xc0008dd720 0xc0008ddea0 0xc0008ea000] map[104:0xc0008ea320 118:0xc0008ddea0] [] -1 0 0xc0008d6480 true 0x242b060 []} 2025-12-08T17:44:27.541258567+00:00 stderr F I1208 17:44:27.537844 1 cmd.go:40] (*prune.PruneOptions)(0xc0008cc820)({ 2025-12-08T17:44:27.541258567+00:00 stderr F MaxEligibleRevision: (int) 6, 2025-12-08T17:44:27.541258567+00:00 stderr F ProtectedRevisions: ([]int) (len=5 cap=5) { 2025-12-08T17:44:27.541258567+00:00 stderr F (int) 2, 2025-12-08T17:44:27.541258567+00:00 stderr F (int) 3, 2025-12-08T17:44:27.541258567+00:00 stderr F (int) 4, 2025-12-08T17:44:27.541258567+00:00 stderr F (int) 5, 2025-12-08T17:44:27.541258567+00:00 stderr F (int) 6 2025-12-08T17:44:27.541258567+00:00 stderr F }, 2025-12-08T17:44:27.541258567+00:00 stderr F ResourceDir: (string) (len=36) "/etc/kubernetes/static-pod-resources", 2025-12-08T17:44:27.541258567+00:00 stderr F CertDir: (string) (len=20) "kube-scheduler-certs", 2025-12-08T17:44:27.541258567+00:00 stderr F StaticPodName: (string) (len=18) "kube-scheduler-pod" 2025-12-08T17:44:27.541258567+00:00 stderr F }) ././@LongLink0000644000000000000000000000024400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611513033044 5ustar zuulzuul././@LongLink0000644000000000000000000000025300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000755000175000017500000000000015115611520033042 5ustar zuulzuul././@LongLink0000644000000000000000000000026000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserv0000644000175000017500000000355615115611513033057 0ustar zuulzuul2025-12-08T17:44:29.517762529+00:00 stderr F I1208 17:44:29.515675 1 cmd.go:39] &{ true {false} prune true map[cert-dir:0xc0003c0aa0 max-eligible-revision:0xc0003c00a0 protected-revisions:0xc0003c06e0 resource-dir:0xc0003c0820 static-pod-name:0xc0003c0960 v:0xc000309220] [0xc000309220 0xc0003c00a0 0xc0003c06e0 0xc0003c0820 0xc0003c0aa0 0xc0003c0960] [] map[cert-dir:0xc0003c0aa0 help:0xc0003f46e0 log-flush-frequency:0xc0003090e0 max-eligible-revision:0xc0003c00a0 protected-revisions:0xc0003c06e0 resource-dir:0xc0003c0820 static-pod-name:0xc0003c0960 v:0xc000309220 vmodule:0xc000309360] [0xc0003c00a0 0xc0003c06e0 0xc0003c0820 0xc0003c0960 0xc0003c0aa0 0xc0003090e0 0xc000309220 0xc000309360 0xc0003f46e0] [0xc0003c0aa0 0xc0003f46e0 0xc0003090e0 0xc0003c00a0 0xc0003c06e0 0xc0003c0820 0xc0003c0960 0xc000309220 0xc000309360] map[104:0xc0003f46e0 118:0xc000309220] [] -1 0 0xc0000384e0 true 0xae3c00 []} 2025-12-08T17:44:29.517762529+00:00 stderr F I1208 17:44:29.515809 1 cmd.go:40] (*prune.PruneOptions)(0xc000638050)({ 2025-12-08T17:44:29.517762529+00:00 stderr F MaxEligibleRevision: (int) 11, 2025-12-08T17:44:29.517762529+00:00 stderr F ProtectedRevisions: ([]int) (len=5 cap=5) { 2025-12-08T17:44:29.517762529+00:00 stderr F (int) 7, 2025-12-08T17:44:29.517762529+00:00 stderr F (int) 8, 2025-12-08T17:44:29.517762529+00:00 stderr F (int) 9, 2025-12-08T17:44:29.517762529+00:00 stderr F (int) 10, 2025-12-08T17:44:29.517762529+00:00 stderr F (int) 11 2025-12-08T17:44:29.517762529+00:00 stderr F }, 2025-12-08T17:44:29.517762529+00:00 stderr F ResourceDir: (string) (len=36) "/etc/kubernetes/static-pod-resources", 2025-12-08T17:44:29.517762529+00:00 stderr F CertDir: (string) (len=20) "kube-apiserver-certs", 2025-12-08T17:44:29.517762529+00:00 stderr F StaticPodName: (string) (len=18) "kube-apiserver-pod" 2025-12-08T17:44:29.517762529+00:00 stderr F }) ././@LongLink0000644000000000000000000000025200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000755000175000017500000000000015115611513032746 5ustar zuulzuul././@LongLink0000644000000000000000000000030200000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000755000175000017500000000000015115611520032744 5ustar zuulzuul././@LongLink0000644000000000000000000000030700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-mana0000644000175000017500000003467015115611513032762 0ustar zuulzuul2025-12-08T17:55:57.775261265+00:00 stderr F I1208 17:55:57.775020 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:55:57.775261265+00:00 stderr F I1208 17:55:57.775150 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:55:57.775261265+00:00 stderr F I1208 17:55:57.775156 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:55:57.775261265+00:00 stderr F I1208 17:55:57.775162 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:55:57.802920194+00:00 stderr F I1208 17:55:57.797251 1 setup.go:119] "Registering a reconciler for injectable" logger="cert-manager" kind="mutatingwebhookconfiguration" 2025-12-08T17:55:57.802920194+00:00 stderr F I1208 17:55:57.798428 1 setup.go:119] "Registering a reconciler for injectable" logger="cert-manager" kind="validatingwebhookconfiguration" 2025-12-08T17:55:57.802920194+00:00 stderr F I1208 17:55:57.798484 1 setup.go:119] "Registering a reconciler for injectable" logger="cert-manager" kind="apiservice" 2025-12-08T17:55:57.802920194+00:00 stderr F I1208 17:55:57.799296 1 setup.go:119] "Registering a reconciler for injectable" logger="cert-manager" kind="customresourcedefinition" 2025-12-08T17:55:57.802920194+00:00 stderr F I1208 17:55:57.799401 1 server.go:208] "Starting metrics server" logger="cert-manager.controller-runtime.metrics" 2025-12-08T17:55:57.802920194+00:00 stderr F I1208 17:55:57.799757 1 server.go:247] "Serving metrics server" logger="cert-manager.controller-runtime.metrics" bindAddress="0.0.0.0:9402" secure=false 2025-12-08T17:55:57.820037223+00:00 stderr F I1208 17:55:57.806006 1 reflector.go:376] Caches populated for *v1.MutatingWebhookConfiguration from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:57.820037223+00:00 stderr F I1208 17:55:57.807953 1 reflector.go:376] Caches populated for *v1.ValidatingWebhookConfiguration from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:57.820037223+00:00 stderr F I1208 17:55:57.813478 1 reflector.go:376] Caches populated for *v1.APIService from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:58.368025299+00:00 stderr F I1208 17:55:58.367912 1 reflector.go:376] Caches populated for *v1.CustomResourceDefinition from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:58.405956461+00:00 stderr F I1208 17:55:58.405087 1 leaderelection.go:257] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election... 2025-12-08T17:55:58.410650880+00:00 stderr F I1208 17:55:58.410583 1 leaderelection.go:271] successfully acquired lease kube-system/cert-manager-cainjector-leader-election 2025-12-08T17:55:58.410915487+00:00 stderr F I1208 17:55:58.410868 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" source="kind source: *v1.MutatingWebhookConfiguration" 2025-12-08T17:55:58.410931777+00:00 stderr F I1208 17:55:58.410919 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.410947878+00:00 stderr F I1208 17:55:58.410924 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="customresourcedefinition" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition" source="kind source: *v1.CustomResourceDefinition" 2025-12-08T17:55:58.410947878+00:00 stderr F I1208 17:55:58.410929 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.410947878+00:00 stderr F I1208 17:55:58.410939 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" source="kind source: *v1.Certificate" 2025-12-08T17:55:58.410980049+00:00 stderr F I1208 17:55:58.410942 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="customresourcedefinition" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.410980049+00:00 stderr F I1208 17:55:58.410947 1 controller.go:183] "Starting Controller" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" 2025-12-08T17:55:58.410980049+00:00 stderr F I1208 17:55:58.410954 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="customresourcedefinition" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.410980049+00:00 stderr F I1208 17:55:58.410965 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="customresourcedefinition" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition" source="kind source: *v1.Certificate" 2025-12-08T17:55:58.410980049+00:00 stderr F I1208 17:55:58.410975 1 controller.go:183] "Starting Controller" logger="cert-manager" controller="customresourcedefinition" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition" 2025-12-08T17:55:58.411025020+00:00 stderr F I1208 17:55:58.410923 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="apiservice" controllerGroup="apiregistration.k8s.io" controllerKind="APIService" source="kind source: *v1.APIService" 2025-12-08T17:55:58.411035090+00:00 stderr F I1208 17:55:58.411019 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="apiservice" controllerGroup="apiregistration.k8s.io" controllerKind="APIService" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.411043830+00:00 stderr F I1208 17:55:58.411031 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="apiservice" controllerGroup="apiregistration.k8s.io" controllerKind="APIService" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.411052591+00:00 stderr F I1208 17:55:58.411042 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="apiservice" controllerGroup="apiregistration.k8s.io" controllerKind="APIService" source="kind source: *v1.Certificate" 2025-12-08T17:55:58.411060261+00:00 stderr F I1208 17:55:58.411051 1 controller.go:183] "Starting Controller" logger="cert-manager" controller="apiservice" controllerGroup="apiregistration.k8s.io" controllerKind="APIService" 2025-12-08T17:55:58.411113052+00:00 stderr F I1208 17:55:58.411067 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="validatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="ValidatingWebhookConfiguration" source="kind source: *v1.ValidatingWebhookConfiguration" 2025-12-08T17:55:58.411162774+00:00 stderr F I1208 17:55:58.411119 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="validatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="ValidatingWebhookConfiguration" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.411162774+00:00 stderr F I1208 17:55:58.411148 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="validatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="ValidatingWebhookConfiguration" source="kind source: *v1.PartialObjectMetadata" 2025-12-08T17:55:58.411180914+00:00 stderr F I1208 17:55:58.411167 1 controller.go:175] "Starting EventSource" logger="cert-manager" controller="validatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="ValidatingWebhookConfiguration" source="kind source: *v1.Certificate" 2025-12-08T17:55:58.411208475+00:00 stderr F I1208 17:55:58.411183 1 controller.go:183] "Starting Controller" logger="cert-manager" controller="validatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="ValidatingWebhookConfiguration" 2025-12-08T17:55:58.411409910+00:00 stderr F I1208 17:55:58.410832 1 recorder.go:104] "cert-manager-cainjector-7dbf76d5c8-fdk5q_a0c0e52e-38fe-492f-a68e-5d699ecb144d became leader" logger="cert-manager.events" type="Normal" object={"kind":"Lease","namespace":"kube-system","name":"cert-manager-cainjector-leader-election","uid":"9c264768-878f-4a16-88ec-2912104b1c57","apiVersion":"coordination.k8s.io/v1","resourceVersion":"43358"} reason="LeaderElection" 2025-12-08T17:55:58.415225805+00:00 stderr F I1208 17:55:58.415192 1 reflector.go:376] Caches populated for *v1.Certificate from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:58.427811071+00:00 stderr F I1208 17:55:58.427763 1 reflector.go:376] Caches populated for *v1.PartialObjectMetadata from pkg/mod/k8s.io/client-go@v0.32.0/tools/cache/reflector.go:251 2025-12-08T17:55:58.519556088+00:00 stderr F I1208 17:55:58.519469 1 controller.go:217] "Starting workers" logger="cert-manager" controller="customresourcedefinition" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition" worker count=1 2025-12-08T17:55:58.519920558+00:00 stderr F I1208 17:55:58.519857 1 controller.go:217] "Starting workers" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" worker count=1 2025-12-08T17:55:58.520718550+00:00 stderr F I1208 17:55:58.520654 1 controller.go:217] "Starting workers" logger="cert-manager" controller="validatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="ValidatingWebhookConfiguration" worker count=1 2025-12-08T17:55:58.520824123+00:00 stderr F I1208 17:55:58.520779 1 controller.go:217] "Starting workers" logger="cert-manager" controller="apiservice" controllerGroup="apiregistration.k8s.io" controllerKind="APIService" worker count=1 2025-12-08T17:55:58.529028658+00:00 stderr F I1208 17:55:58.528811 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="validatingwebhookconfiguration" kind="validatingwebhookconfiguration" name="cert-manager-webhook" 2025-12-08T17:55:58.529202753+00:00 stderr F I1208 17:55:58.529151 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="mutatingwebhookconfiguration" kind="mutatingwebhookconfiguration" name="cert-manager-webhook" 2025-12-08T17:55:58.534095807+00:00 stderr F I1208 17:55:58.533990 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="certificaterequests.cert-manager.io" 2025-12-08T17:55:58.535041523+00:00 stderr F E1208 17:55:58.534968 1 reconciler.go:137] "unable to update target object with new CA data" err="Operation cannot be fulfilled on mutatingwebhookconfigurations.admissionregistration.k8s.io \"cert-manager-webhook\": the object has been modified; please apply your changes to the latest version and try again" logger="cert-manager" kind="mutatingwebhookconfiguration" kind="mutatingwebhookconfiguration" name="cert-manager-webhook" 2025-12-08T17:55:58.535120495+00:00 stderr F I1208 17:55:58.535068 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="validatingwebhookconfiguration" kind="validatingwebhookconfiguration" name="cert-manager-webhook" 2025-12-08T17:55:58.535363701+00:00 stderr F E1208 17:55:58.535192 1 controller.go:316] "Reconciler error" err="Operation cannot be fulfilled on mutatingwebhookconfigurations.admissionregistration.k8s.io \"cert-manager-webhook\": the object has been modified; please apply your changes to the latest version and try again" logger="cert-manager" controller="mutatingwebhookconfiguration" controllerGroup="admissionregistration.k8s.io" controllerKind="MutatingWebhookConfiguration" MutatingWebhookConfiguration="cert-manager-webhook" namespace="" name="cert-manager-webhook" reconcileID="c2652956-57db-42d6-8afe-f2aef6ccdd6a" 2025-12-08T17:55:58.540944095+00:00 stderr F I1208 17:55:58.540910 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="mutatingwebhookconfiguration" kind="mutatingwebhookconfiguration" name="cert-manager-webhook" 2025-12-08T17:55:58.547986078+00:00 stderr F I1208 17:55:58.547662 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="mutatingwebhookconfiguration" kind="mutatingwebhookconfiguration" name="cert-manager-webhook" 2025-12-08T17:55:58.915112772+00:00 stderr F I1208 17:55:58.915041 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="clusterissuers.cert-manager.io" 2025-12-08T17:55:59.017237674+00:00 stderr F I1208 17:55:59.017177 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="issuers.cert-manager.io" 2025-12-08T17:55:59.044531433+00:00 stderr F I1208 17:55:59.044437 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="certificates.cert-manager.io" 2025-12-08T17:55:59.145906995+00:00 stderr F I1208 17:55:59.145532 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="challenges.acme.cert-manager.io" 2025-12-08T17:55:59.164764522+00:00 stderr F I1208 17:55:59.164656 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="orders.acme.cert-manager.io" 2025-12-08T17:55:59.185139631+00:00 stderr F I1208 17:55:59.185038 1 reconciler.go:141] "Updated object" logger="cert-manager" kind="customresourcedefinition" kind="customresourcedefinition" name="certificaterequests.cert-manager.io" ././@LongLink0000644000000000000000000000027000000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-mach0000755000175000017500000000000015115611514033037 5ustar zuulzuul././@LongLink0000644000000000000000000000032400000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-mach0000755000175000017500000000000015115611521033035 5ustar zuulzuul././@LongLink0000644000000000000000000000033100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-mach0000644000175000017500000017761315115611514033060 0ustar zuulzuul2025-12-08T17:44:23.942799772+00:00 stderr F W1208 17:44:23.942325 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:23.943084579+00:00 stderr F W1208 17:44:23.943033 1 client_config.go:667] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. 2025-12-08T17:44:23.944298763+00:00 stderr F I1208 17:44:23.944101 1 main.go:153] setting up manager 2025-12-08T17:44:23.947198662+00:00 stderr F I1208 17:44:23.944475 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:23.947198662+00:00 stderr F I1208 17:44:23.944491 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:23.947198662+00:00 stderr F I1208 17:44:23.944497 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:23.947198662+00:00 stderr F I1208 17:44:23.944508 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:23.947198662+00:00 stderr F I1208 17:44:23.944511 1 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true 2025-12-08T17:44:24.003781795+00:00 stderr F I1208 17:44:24.003726 1 main.go:178] registering components 2025-12-08T17:44:24.003781795+00:00 stderr F I1208 17:44:24.003754 1 main.go:180] setting up scheme 2025-12-08T17:44:24.004170506+00:00 stderr F I1208 17:44:24.004153 1 main.go:218] setting up controllers 2025-12-08T17:44:24.004362341+00:00 stderr F I1208 17:44:24.004187 1 config.go:33] using default as failed to load config /var/run/configmaps/config/config.yaml: open /var/run/configmaps/config/config.yaml: no such file or directory 2025-12-08T17:44:24.004362341+00:00 stderr F I1208 17:44:24.004201 1 config.go:23] machine approver config: {NodeClientCert:{Disabled:false}} 2025-12-08T17:44:24.005988105+00:00 stderr F I1208 17:44:24.005354 1 main.go:244] starting the cmd 2025-12-08T17:44:24.010038836+00:00 stderr F I1208 17:44:24.009609 1 server.go:208] "Starting metrics server" logger="controller-runtime.metrics" 2025-12-08T17:44:24.013017667+00:00 stderr F I1208 17:44:24.010380 1 server.go:247] "Serving metrics server" logger="controller-runtime.metrics" bindAddress="127.0.0.1:9191" secure=false 2025-12-08T17:44:24.020907492+00:00 stderr F I1208 17:44:24.020651 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:24.108841240+00:00 stderr F I1208 17:44:24.107119 1 leaderelection.go:257] attempting to acquire leader lease openshift-cluster-machine-approver/cluster-machine-approver-leader... 2025-12-08T17:44:24.129928006+00:00 stderr F I1208 17:44:24.125841 1 leaderelection.go:271] successfully acquired lease openshift-cluster-machine-approver/cluster-machine-approver-leader 2025-12-08T17:44:24.129928006+00:00 stderr F I1208 17:44:24.126415 1 recorder.go:104] "crc_3a332019-925a-40b5-afe2-79b527bd7e10 became leader" logger="events" type="Normal" object={"kind":"Lease","namespace":"openshift-cluster-machine-approver","name":"cluster-machine-approver-leader","uid":"a2b07e0d-b34f-407c-bad9-eef38a0db37d","apiVersion":"coordination.k8s.io/v1","resourceVersion":"37475"} reason="LeaderElection" 2025-12-08T17:44:24.129928006+00:00 stderr F I1208 17:44:24.126693 1 controller.go:246] "Starting EventSource" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" source="kind source: *v1.ConfigMap" 2025-12-08T17:44:24.129928006+00:00 stderr F I1208 17:44:24.128467 1 status.go:100] Starting cluster operator status controller 2025-12-08T17:44:24.129928006+00:00 stderr F I1208 17:44:24.129192 1 controller.go:246] "Starting EventSource" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" source="kind source: *v1.CertificateSigningRequest" 2025-12-08T17:44:24.132143457+00:00 stderr F I1208 17:44:24.131925 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/cluster-machine-approver/status.go:102" 2025-12-08T17:44:24.411949419+00:00 stderr F I1208 17:44:24.411520 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.ConfigMap" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:44:24.439250253+00:00 stderr F I1208 17:44:24.438836 1 controller.go:186] "Starting Controller" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" 2025-12-08T17:44:24.439250253+00:00 stderr F I1208 17:44:24.438974 1 controller.go:195] "Starting workers" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" worker count=10 2025-12-08T17:44:24.439250253+00:00 stderr F I1208 17:44:24.439028 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:24.590243542+00:00 stderr F I1208 17:44:24.589588 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:24.593921953+00:00 stderr F I1208 17:44:24.592565 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:24.595376722+00:00 stderr F I1208 17:44:24.595038 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:24.595376722+00:00 stderr F I1208 17:44:24.595058 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:24.595376722+00:00 stderr F E1208 17:44:24.595172 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:24.595376722+00:00 stderr F I1208 17:44:24.595182 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:24.606794293+00:00 stderr F I1208 17:44:24.605580 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:24.606794293+00:00 stderr F E1208 17:44:24.605645 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="5aa86f2c-2373-4db2-b68c-1e7489352926" 2025-12-08T17:44:24.613417395+00:00 stderr F I1208 17:44:24.612615 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:24.684007900+00:00 stderr F I1208 17:44:24.680670 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:24.686987321+00:00 stderr F I1208 17:44:24.686123 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:24.690090736+00:00 stderr F I1208 17:44:24.690051 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:24.690132457+00:00 stderr F I1208 17:44:24.690120 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:24.690164868+00:00 stderr F E1208 17:44:24.690153 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:24.690195638+00:00 stderr F I1208 17:44:24.690183 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:24.699045400+00:00 stderr F I1208 17:44:24.698290 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:24.699045400+00:00 stderr F E1208 17:44:24.698555 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="bbd6f959-5eac-4396-bc98-414f7bb4651b" 2025-12-08T17:44:24.711248863+00:00 stderr F I1208 17:44:24.709467 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:24.737347415+00:00 stderr F I1208 17:44:24.737266 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:24.740137621+00:00 stderr F I1208 17:44:24.740095 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:24.741596580+00:00 stderr F I1208 17:44:24.741549 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:24.741596580+00:00 stderr F I1208 17:44:24.741584 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:24.741596580+00:00 stderr F E1208 17:44:24.741591 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:24.741618721+00:00 stderr F I1208 17:44:24.741597 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:24.746648788+00:00 stderr F I1208 17:44:24.746601 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:24.746687709+00:00 stderr F E1208 17:44:24.746674 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="6666c256-ae45-49a5-adf9-9e2079a74844" 2025-12-08T17:44:24.768943916+00:00 stderr F I1208 17:44:24.766980 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:24.839473370+00:00 stderr F I1208 17:44:24.839385 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:24.850422769+00:00 stderr F I1208 17:44:24.849671 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:24.851150538+00:00 stderr F I1208 17:44:24.851108 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:24.851150538+00:00 stderr F I1208 17:44:24.851128 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:24.851169949+00:00 stderr F E1208 17:44:24.851149 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:24.851169949+00:00 stderr F I1208 17:44:24.851156 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:24.858808858+00:00 stderr F I1208 17:44:24.855750 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:24.858808858+00:00 stderr F E1208 17:44:24.855803 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="ab8def16-8024-48aa-8f40-cac32de87841" 2025-12-08T17:44:24.896615199+00:00 stderr F I1208 17:44:24.896076 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:24.948946286+00:00 stderr F I1208 17:44:24.948823 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:24.952071241+00:00 stderr F I1208 17:44:24.950861 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:24.954968441+00:00 stderr F I1208 17:44:24.954926 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:24.954968441+00:00 stderr F I1208 17:44:24.954948 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:24.954968441+00:00 stderr F E1208 17:44:24.954956 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:24.955000432+00:00 stderr F I1208 17:44:24.954964 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:24.961926721+00:00 stderr F I1208 17:44:24.961338 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:24.961926721+00:00 stderr F E1208 17:44:24.961387 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="fbb1245e-e5fa-479b-acc5-9f654244c40c" 2025-12-08T17:44:25.046475447+00:00 stderr F I1208 17:44:25.043962 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:25.146579407+00:00 stderr F I1208 17:44:25.146511 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:25.156022645+00:00 stderr F I1208 17:44:25.154601 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:25.163193431+00:00 stderr F I1208 17:44:25.159232 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:25.163193431+00:00 stderr F I1208 17:44:25.159256 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:25.163193431+00:00 stderr F E1208 17:44:25.159265 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:25.163193431+00:00 stderr F I1208 17:44:25.159272 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:25.163193431+00:00 stderr F I1208 17:44:25.162706 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:25.163193431+00:00 stderr F E1208 17:44:25.162752 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="6dbe58d8-fb6d-41ea-ba76-543c0c3aacbb" 2025-12-08T17:44:25.325073756+00:00 stderr F I1208 17:44:25.323750 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:25.421996710+00:00 stderr F I1208 17:44:25.419374 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:25.428661262+00:00 stderr F I1208 17:44:25.428307 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:25.442972242+00:00 stderr F I1208 17:44:25.439621 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:25.442972242+00:00 stderr F I1208 17:44:25.439656 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:25.442972242+00:00 stderr F E1208 17:44:25.439665 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:25.442972242+00:00 stderr F I1208 17:44:25.439672 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:25.456038928+00:00 stderr F I1208 17:44:25.453590 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:25.456038928+00:00 stderr F E1208 17:44:25.453655 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="b58711be-beb8-4bf4-9daf-e752af5b25de" 2025-12-08T17:44:25.776922601+00:00 stderr F I1208 17:44:25.775111 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:25.796963508+00:00 stderr F I1208 17:44:25.796330 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:25.802269592+00:00 stderr F I1208 17:44:25.802159 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:25.804780291+00:00 stderr F I1208 17:44:25.804732 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:25.804780291+00:00 stderr F I1208 17:44:25.804757 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:25.804780291+00:00 stderr F E1208 17:44:25.804765 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:25.804780291+00:00 stderr F I1208 17:44:25.804772 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:25.813216181+00:00 stderr F I1208 17:44:25.812211 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:25.813216181+00:00 stderr F E1208 17:44:25.812263 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="e8b1059e-d5dc-455e-92cc-8130f2e65615" 2025-12-08T17:44:26.452944651+00:00 stderr F I1208 17:44:26.452403 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:26.484262875+00:00 stderr F I1208 17:44:26.484156 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:26.487009280+00:00 stderr F I1208 17:44:26.486899 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:26.492295654+00:00 stderr F I1208 17:44:26.492223 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:26.492295654+00:00 stderr F I1208 17:44:26.492265 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:26.492295654+00:00 stderr F E1208 17:44:26.492273 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:26.492295654+00:00 stderr F I1208 17:44:26.492281 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:26.496196511+00:00 stderr F I1208 17:44:26.495385 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:26.496196511+00:00 stderr F E1208 17:44:26.495431 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="e1a97f94-2c63-42ef-aca3-7fa0db54cee2" 2025-12-08T17:44:27.775631389+00:00 stderr F I1208 17:44:27.775547 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:27.797954668+00:00 stderr F I1208 17:44:27.797676 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:27.799408228+00:00 stderr F I1208 17:44:27.799329 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:27.803021636+00:00 stderr F I1208 17:44:27.801389 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:27.803021636+00:00 stderr F I1208 17:44:27.801411 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:27.803021636+00:00 stderr F E1208 17:44:27.801417 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:27.803021636+00:00 stderr F I1208 17:44:27.801424 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:27.806761549+00:00 stderr F I1208 17:44:27.806692 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:27.806815440+00:00 stderr F E1208 17:44:27.806783 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="b2f3c297-888d-42dc-9c79-ffd74bce4a9c" 2025-12-08T17:44:30.370273394+00:00 stderr F I1208 17:44:30.367789 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:30.386212498+00:00 stderr F I1208 17:44:30.385541 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:30.387496593+00:00 stderr F I1208 17:44:30.387461 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:30.389117277+00:00 stderr F I1208 17:44:30.388976 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:30.389117277+00:00 stderr F I1208 17:44:30.388996 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:30.389117277+00:00 stderr F E1208 17:44:30.389006 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:30.389117277+00:00 stderr F I1208 17:44:30.389013 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:30.393056565+00:00 stderr F I1208 17:44:30.392895 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:30.393245820+00:00 stderr F E1208 17:44:30.393217 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="d9401380-11c3-4c23-a7bc-5f7ba3714af2" 2025-12-08T17:44:35.513434054+00:00 stderr F I1208 17:44:35.513368 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:35.527324003+00:00 stderr F I1208 17:44:35.527260 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:35.529304126+00:00 stderr F I1208 17:44:35.529254 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:35.530242662+00:00 stderr F I1208 17:44:35.530198 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:35.530242662+00:00 stderr F I1208 17:44:35.530213 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:35.530242662+00:00 stderr F E1208 17:44:35.530219 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:35.530242662+00:00 stderr F I1208 17:44:35.530225 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:35.532119443+00:00 stderr F I1208 17:44:35.532070 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:35.532155804+00:00 stderr F E1208 17:44:35.532136 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="7eddf93d-a2d6-4983-821c-e62a5c10f771" 2025-12-08T17:44:45.772736355+00:00 stderr F I1208 17:44:45.772692 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:44:45.784641326+00:00 stderr F I1208 17:44:45.784600 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:44:45.786212271+00:00 stderr F I1208 17:44:45.786184 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:44:45.787358002+00:00 stderr F I1208 17:44:45.787329 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:44:45.787358002+00:00 stderr F I1208 17:44:45.787351 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:44:45.787376073+00:00 stderr F E1208 17:44:45.787360 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:44:45.787376073+00:00 stderr F I1208 17:44:45.787370 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:44:45.789680637+00:00 stderr F I1208 17:44:45.789652 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:44:45.789811150+00:00 stderr F E1208 17:44:45.789704 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="e30d00e8-f265-4776-b6bd-80dc29f15565" 2025-12-08T17:45:06.270484514+00:00 stderr F I1208 17:45:06.270414 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:45:06.290943153+00:00 stderr F I1208 17:45:06.290870 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:45:06.293964347+00:00 stderr F I1208 17:45:06.293928 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:45:06.295303855+00:00 stderr F I1208 17:45:06.295280 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:45:06.295337856+00:00 stderr F I1208 17:45:06.295328 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:45:06.295363557+00:00 stderr F E1208 17:45:06.295354 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:45:06.295388748+00:00 stderr F I1208 17:45:06.295379 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:45:06.298155135+00:00 stderr F I1208 17:45:06.298137 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:45:06.298246107+00:00 stderr F E1208 17:45:06.298233 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="b972758d-8d58-474d-9c55-7352b7142436" 2025-12-08T17:45:47.259384507+00:00 stderr F I1208 17:45:47.259335 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:45:47.284514872+00:00 stderr F I1208 17:45:47.284436 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:45:47.295059839+00:00 stderr F I1208 17:45:47.294980 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:45:47.298234124+00:00 stderr F I1208 17:45:47.298192 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:45:47.298321846+00:00 stderr F I1208 17:45:47.298295 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:45:47.298393128+00:00 stderr F E1208 17:45:47.298366 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:45:47.298468551+00:00 stderr F I1208 17:45:47.298440 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:45:47.302090110+00:00 stderr F I1208 17:45:47.302033 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:45:47.302114851+00:00 stderr F E1208 17:45:47.302101 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="f372b24e-95dd-4eba-a8a1-9243333ba20f" 2025-12-08T17:46:31.684866228+00:00 stderr F I1208 17:46:31.684782 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.CertificateSigningRequest" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:46:31.685051423+00:00 stderr F I1208 17:46:31.684994 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:46:31.705501527+00:00 stderr F I1208 17:46:31.705431 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:46:31.708796156+00:00 stderr F I1208 17:46:31.708772 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:46:31.710845027+00:00 stderr F I1208 17:46:31.710717 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:46:31.710845027+00:00 stderr F I1208 17:46:31.710781 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:46:31.710845027+00:00 stderr F E1208 17:46:31.710792 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:46:31.710845027+00:00 stderr F I1208 17:46:31.710804 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:46:31.714051973+00:00 stderr F I1208 17:46:31.714014 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:46:31.714101125+00:00 stderr F E1208 17:46:31.714074 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="c44f12c1-8bf4-4d63-b360-96136654727a" 2025-12-08T17:46:31.771436556+00:00 stderr F I1208 17:46:31.771381 1 reflector.go:430] "Caches populated" logger="controller-runtime.cache" type="*v1.ConfigMap" reflector="sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go:114" 2025-12-08T17:46:31.829338204+00:00 stderr F I1208 17:46:31.829277 1 reflector.go:430] "Caches populated" type="*v1.ClusterOperator" reflector="github.com/openshift/cluster-machine-approver/status.go:102" 2025-12-08T17:47:09.226139177+00:00 stderr F I1208 17:47:09.226061 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:47:09.280011092+00:00 stderr F I1208 17:47:09.278696 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:47:09.283794451+00:00 stderr F I1208 17:47:09.283754 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:47:09.286813346+00:00 stderr F I1208 17:47:09.286761 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:47:09.286813346+00:00 stderr F I1208 17:47:09.286790 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:47:09.286813346+00:00 stderr F E1208 17:47:09.286804 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:47:09.286833607+00:00 stderr F I1208 17:47:09.286815 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:47:09.289747058+00:00 stderr F I1208 17:47:09.289721 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:47:09.289815490+00:00 stderr F E1208 17:47:09.289789 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="aa59f869-c310-4ce0-a9ad-95b639db5c8d" 2025-12-08T17:52:36.970797984+00:00 stderr F I1208 17:52:36.970682 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T17:52:36.993530361+00:00 stderr F I1208 17:52:36.993460 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T17:52:36.996570552+00:00 stderr F I1208 17:52:36.996515 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:52:36.998348679+00:00 stderr F I1208 17:52:36.998293 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:52:36.998348679+00:00 stderr F I1208 17:52:36.998323 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:52:36.998348679+00:00 stderr F E1208 17:52:36.998334 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T17:52:36.998387080+00:00 stderr F I1208 17:52:36.998348 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:52:37.002374287+00:00 stderr F I1208 17:52:37.002264 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T17:52:37.002374287+00:00 stderr F E1208 17:52:37.002355 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="93055645-a405-44d7-986a-e4da83c910e0" 2025-12-08T17:59:12.449856440+00:00 stderr F I1208 17:59:12.449772 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.464017464+00:00 stderr F I1208 17:59:12.463853 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.466198221+00:00 stderr F I1208 17:59:12.466161 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.467275139+00:00 stderr F I1208 17:59:12.467240 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.467275139+00:00 stderr F I1208 17:59:12.467261 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.467275139+00:00 stderr F E1208 17:59:12.467268 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.467293260+00:00 stderr F I1208 17:59:12.467276 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.469985901+00:00 stderr F I1208 17:59:12.469951 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.470006051+00:00 stderr F E1208 17:59:12.469991 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="9e95dca0-4fe8-4746-8f86-c5a0ba72d1d5" 2025-12-08T17:59:12.475227019+00:00 stderr F I1208 17:59:12.475190 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.487403601+00:00 stderr F I1208 17:59:12.487328 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.490262886+00:00 stderr F I1208 17:59:12.490184 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.491392075+00:00 stderr F I1208 17:59:12.491355 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.491392075+00:00 stderr F I1208 17:59:12.491373 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.491392075+00:00 stderr F E1208 17:59:12.491380 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.491420076+00:00 stderr F I1208 17:59:12.491388 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.493592073+00:00 stderr F I1208 17:59:12.493325 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.493592073+00:00 stderr F E1208 17:59:12.493372 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="31271df1-d548-4be4-8bb8-9e0a7cd73524" 2025-12-08T17:59:12.503667058+00:00 stderr F I1208 17:59:12.503597 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.534921853+00:00 stderr F I1208 17:59:12.534828 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.537724287+00:00 stderr F I1208 17:59:12.537682 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.539820531+00:00 stderr F I1208 17:59:12.539629 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.539820531+00:00 stderr F I1208 17:59:12.539659 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.539820531+00:00 stderr F E1208 17:59:12.539670 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.539820531+00:00 stderr F I1208 17:59:12.539683 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.542152973+00:00 stderr F I1208 17:59:12.542116 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.542210085+00:00 stderr F E1208 17:59:12.542183 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="6cf14751-30df-4750-bad6-3abd955e60d6" 2025-12-08T17:59:12.562654603+00:00 stderr F I1208 17:59:12.562559 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.579035045+00:00 stderr F I1208 17:59:12.578798 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.581424778+00:00 stderr F I1208 17:59:12.581390 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.582638201+00:00 stderr F I1208 17:59:12.582450 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.582638201+00:00 stderr F I1208 17:59:12.582468 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.582638201+00:00 stderr F E1208 17:59:12.582476 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.582638201+00:00 stderr F I1208 17:59:12.582500 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.585410274+00:00 stderr F I1208 17:59:12.585331 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.585410274+00:00 stderr F E1208 17:59:12.585381 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="61b6af7a-ef96-4466-a74b-0ea91fbaf8c8" 2025-12-08T17:59:12.625740256+00:00 stderr F I1208 17:59:12.625662 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.640268839+00:00 stderr F I1208 17:59:12.640203 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.642390135+00:00 stderr F I1208 17:59:12.642345 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.643661568+00:00 stderr F I1208 17:59:12.643590 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.643661568+00:00 stderr F I1208 17:59:12.643620 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.643661568+00:00 stderr F E1208 17:59:12.643629 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.643661568+00:00 stderr F I1208 17:59:12.643638 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.646946465+00:00 stderr F I1208 17:59:12.646892 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.646946465+00:00 stderr F E1208 17:59:12.646935 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="7cea9389-c274-4599-bf92-ab1081eb677e" 2025-12-08T17:59:12.727313783+00:00 stderr F I1208 17:59:12.727244 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.744258370+00:00 stderr F I1208 17:59:12.744211 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.746550190+00:00 stderr F I1208 17:59:12.746530 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.748451920+00:00 stderr F I1208 17:59:12.748181 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.748451920+00:00 stderr F I1208 17:59:12.748232 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.748451920+00:00 stderr F E1208 17:59:12.748241 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.748451920+00:00 stderr F I1208 17:59:12.748250 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.751510141+00:00 stderr F I1208 17:59:12.751468 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.751559502+00:00 stderr F E1208 17:59:12.751531 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="1c54961a-0c7a-4ce4-808d-a4eba2e1f7cf" 2025-12-08T17:59:12.922226840+00:00 stderr F I1208 17:59:12.922125 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:12.939488615+00:00 stderr F I1208 17:59:12.939389 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:12.941736285+00:00 stderr F I1208 17:59:12.941692 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:12.943774718+00:00 stderr F I1208 17:59:12.943734 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:12.943849500+00:00 stderr F I1208 17:59:12.943828 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:12.943948573+00:00 stderr F E1208 17:59:12.943925 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:12.944010475+00:00 stderr F I1208 17:59:12.943989 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:12.947251510+00:00 stderr F I1208 17:59:12.947203 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:12.947287981+00:00 stderr F E1208 17:59:12.947259 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="6590a55e-b0c7-45d2-96d8-7c4b2efd45c8" 2025-12-08T17:59:13.268744942+00:00 stderr F I1208 17:59:13.268648 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:13.286676856+00:00 stderr F I1208 17:59:13.286602 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:13.290960258+00:00 stderr F I1208 17:59:13.290894 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:13.293613688+00:00 stderr F I1208 17:59:13.293544 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:13.293613688+00:00 stderr F I1208 17:59:13.293572 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:13.293613688+00:00 stderr F E1208 17:59:13.293585 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:13.293613688+00:00 stderr F I1208 17:59:13.293595 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:13.297197193+00:00 stderr F I1208 17:59:13.297129 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:13.297236694+00:00 stderr F E1208 17:59:13.297211 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="7b1abe19-193b-47de-8f46-e5d951dd2599" 2025-12-08T17:59:13.938211667+00:00 stderr F I1208 17:59:13.937796 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:13.953086139+00:00 stderr F I1208 17:59:13.951627 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:13.955555454+00:00 stderr F I1208 17:59:13.955520 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:13.957056533+00:00 stderr F I1208 17:59:13.956694 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:13.957056533+00:00 stderr F I1208 17:59:13.956720 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:13.957056533+00:00 stderr F E1208 17:59:13.956729 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:13.957056533+00:00 stderr F I1208 17:59:13.956736 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:13.959643032+00:00 stderr F I1208 17:59:13.959611 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:13.959683863+00:00 stderr F E1208 17:59:13.959658 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="eada62ca-1386-4b74-8a25-372258073b54" 2025-12-08T17:59:15.240264663+00:00 stderr F I1208 17:59:15.240186 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:15.262697564+00:00 stderr F I1208 17:59:15.262358 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:15.265410936+00:00 stderr F I1208 17:59:15.265346 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:15.267214224+00:00 stderr F I1208 17:59:15.267042 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:15.267214224+00:00 stderr F I1208 17:59:15.267079 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:15.267214224+00:00 stderr F E1208 17:59:15.267090 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:15.267214224+00:00 stderr F I1208 17:59:15.267102 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:15.269774591+00:00 stderr F I1208 17:59:15.269704 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:15.269817762+00:00 stderr F E1208 17:59:15.269774 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="9312d799-77be-49ac-af5b-e1d7f81c6796" 2025-12-08T17:59:17.830062859+00:00 stderr F I1208 17:59:17.829982 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:17.842322812+00:00 stderr F I1208 17:59:17.842258 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:17.844270224+00:00 stderr F I1208 17:59:17.844226 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:17.845517606+00:00 stderr F I1208 17:59:17.845483 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:17.845517606+00:00 stderr F I1208 17:59:17.845499 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:17.845517606+00:00 stderr F E1208 17:59:17.845505 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:17.845517606+00:00 stderr F I1208 17:59:17.845512 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:17.847665703+00:00 stderr F I1208 17:59:17.847608 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:17.847665703+00:00 stderr F E1208 17:59:17.847648 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="a0d18e48-3b76-47ef-9a9b-f4ef4d97b7a4" 2025-12-08T17:59:22.968077764+00:00 stderr F I1208 17:59:22.968019 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:22.993442683+00:00 stderr F I1208 17:59:22.993389 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:22.996864083+00:00 stderr F I1208 17:59:22.996834 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:22.998437875+00:00 stderr F I1208 17:59:22.998158 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:22.998437875+00:00 stderr F I1208 17:59:22.998179 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:22.998437875+00:00 stderr F E1208 17:59:22.998186 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:22.998437875+00:00 stderr F I1208 17:59:22.998195 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:23.001518806+00:00 stderr F I1208 17:59:23.001486 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:23.001566917+00:00 stderr F E1208 17:59:23.001548 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="439248b2-3ce3-46ce-8a83-a311ca24e061" 2025-12-08T17:59:33.242501932+00:00 stderr F I1208 17:59:33.242432 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:33.258057742+00:00 stderr F I1208 17:59:33.257986 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:33.260775954+00:00 stderr F I1208 17:59:33.260719 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:33.262457479+00:00 stderr F I1208 17:59:33.262413 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:33.262457479+00:00 stderr F I1208 17:59:33.262443 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:33.262474429+00:00 stderr F E1208 17:59:33.262459 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:33.262481830+00:00 stderr F I1208 17:59:33.262473 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:33.265537670+00:00 stderr F I1208 17:59:33.265483 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:33.265607422+00:00 stderr F E1208 17:59:33.265564 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="c8835f6c-f9ab-433f-9302-540765a13c78" 2025-12-08T17:59:53.746482947+00:00 stderr F I1208 17:59:53.746001 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T17:59:53.767163870+00:00 stderr F I1208 17:59:53.767102 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T17:59:53.770005615+00:00 stderr F I1208 17:59:53.769960 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T17:59:53.771739091+00:00 stderr F I1208 17:59:53.771567 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T17:59:53.771739091+00:00 stderr F I1208 17:59:53.771629 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T17:59:53.771739091+00:00 stderr F E1208 17:59:53.771640 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T17:59:53.771739091+00:00 stderr F I1208 17:59:53.771651 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T17:59:53.775116049+00:00 stderr F I1208 17:59:53.775066 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T17:59:53.775183581+00:00 stderr F E1208 17:59:53.775145 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="a83e6f76-3411-422a-8e54-ad02b4260665" 2025-12-08T18:00:34.735955767+00:00 stderr F I1208 18:00:34.735870 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T18:00:34.754055182+00:00 stderr F I1208 18:00:34.753987 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T18:00:34.757048851+00:00 stderr F I1208 18:00:34.757001 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T18:00:34.758559781+00:00 stderr F I1208 18:00:34.758497 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T18:00:34.758559781+00:00 stderr F I1208 18:00:34.758518 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T18:00:34.758559781+00:00 stderr F E1208 18:00:34.758526 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T18:00:34.758559781+00:00 stderr F I1208 18:00:34.758534 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T18:00:34.760965263+00:00 stderr F I1208 18:00:34.760918 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T18:00:34.760989014+00:00 stderr F E1208 18:00:34.760979 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="a6748ad5-aea7-4113-90ae-44685c8be0e3" 2025-12-08T18:01:56.682950665+00:00 stderr F I1208 18:01:56.682142 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T18:01:56.710031466+00:00 stderr F I1208 18:01:56.709953 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T18:01:56.713027146+00:00 stderr F I1208 18:01:56.712979 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T18:01:56.715105711+00:00 stderr F I1208 18:01:56.715052 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T18:01:56.715105711+00:00 stderr F I1208 18:01:56.715079 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T18:01:56.715105711+00:00 stderr F E1208 18:01:56.715087 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T18:01:56.715105711+00:00 stderr F I1208 18:01:56.715094 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T18:01:56.718047919+00:00 stderr F I1208 18:01:56.718005 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T18:01:56.718109511+00:00 stderr F E1208 18:01:56.718073 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="07f6ed0f-2894-4797-b7f2-227298df69dc" 2025-12-08T18:03:32.362747944+00:00 stderr F I1208 18:03:32.362667 1 controller.go:165] Reconciling CSR: csr-9nbg7 2025-12-08T18:03:32.388133695+00:00 stderr F I1208 18:03:32.388052 1 csr_check.go:173] csr-9nbg7: CSR does not appear to be client csr 2025-12-08T18:03:32.390930159+00:00 stderr F I1208 18:03:32.390844 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T18:03:32.393099538+00:00 stderr F I1208 18:03:32.393052 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T18:03:32.393099538+00:00 stderr F I1208 18:03:32.393087 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T18:03:32.393112578+00:00 stderr F E1208 18:03:32.393099 1 csr_check.go:376] csr-9nbg7: Serving Cert: No target machine for node "crc" 2025-12-08T18:03:32.393121888+00:00 stderr F I1208 18:03:32.393112 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T18:03:32.397399974+00:00 stderr F I1208 18:03:32.397366 1 controller.go:286] csr-9nbg7: CSR not authorized 2025-12-08T18:03:32.397491536+00:00 stderr F E1208 18:03:32.397466 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-9nbg7" namespace="" name="csr-9nbg7" reconcileID="51ef1b02-f430-4500-b30e-2fe5dcbe7e77" 2025-12-08T18:04:40.559176387+00:00 stderr F I1208 18:04:40.559125 1 controller.go:165] Reconciling CSR: csr-ghnkj 2025-12-08T18:04:40.579608099+00:00 stderr F I1208 18:04:40.579542 1 csr_check.go:173] csr-ghnkj: CSR does not appear to be client csr 2025-12-08T18:04:40.582550638+00:00 stderr F I1208 18:04:40.582494 1 csr_check.go:563] retrieving serving cert from crc (192.168.126.11:10250) 2025-12-08T18:04:40.584464318+00:00 stderr F I1208 18:04:40.584440 1 csr_check.go:198] Failed to retrieve current serving cert: remote error: tls: internal error 2025-12-08T18:04:40.584511659+00:00 stderr F I1208 18:04:40.584496 1 csr_check.go:218] Falling back to machine-api authorization for crc 2025-12-08T18:04:40.584545720+00:00 stderr F E1208 18:04:40.584535 1 csr_check.go:376] csr-ghnkj: Serving Cert: No target machine for node "crc" 2025-12-08T18:04:40.584571710+00:00 stderr F I1208 18:04:40.584561 1 csr_check.go:221] Could not use Machine for serving cert authorization: unable to find machine for node 2025-12-08T18:04:40.588717251+00:00 stderr F I1208 18:04:40.588683 1 controller.go:286] csr-ghnkj: CSR not authorized 2025-12-08T18:04:40.588912996+00:00 stderr F E1208 18:04:40.588829 1 controller.go:353] "Reconciler error" err="could not reconcile CSR: could not authorize CSR: exhausted all authorization methods: unable to find machine for node" controller="certificatesigningrequest" controllerGroup="certificates.k8s.io" controllerKind="CertificateSigningRequest" CertificateSigningRequest="csr-ghnkj" namespace="" name="csr-ghnkj" reconcileID="7a5f7726-aca7-4cd4-bc7c-f267e807a2ea" ././@LongLink0000644000000000000000000000031000000000000011575 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy/home/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-mach0000755000175000017500000000000015115611521033035 5ustar zuulzuul././@LongLink0000644000000000000000000000031500000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy/0.loghome/zuul/zuul-output/logs/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-mach0000644000175000017500000002025615115611514033046 0ustar zuulzuul2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.481706 1 flags.go:64] FLAG: --add-dir-header="false" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482250 1 flags.go:64] FLAG: --allow-paths="[]" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482258 1 flags.go:64] FLAG: --alsologtostderr="false" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482262 1 flags.go:64] FLAG: --auth-header-fields-enabled="false" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482265 1 flags.go:64] FLAG: --auth-header-groups-field-name="x-remote-groups" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482270 1 flags.go:64] FLAG: --auth-header-groups-field-separator="|" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482273 1 flags.go:64] FLAG: --auth-header-user-field-name="x-remote-user" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482276 1 flags.go:64] FLAG: --auth-token-audiences="[]" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482282 1 flags.go:64] FLAG: --client-ca-file="" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482285 1 flags.go:64] FLAG: --config-file="/etc/kube-rbac-proxy/config-file.yaml" 2025-12-08T17:44:21.482298257+00:00 stderr F I1208 17:44:21.482289 1 flags.go:64] FLAG: --help="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482293 1 flags.go:64] FLAG: --http2-disable="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482296 1 flags.go:64] FLAG: --http2-max-concurrent-streams="100" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482305 1 flags.go:64] FLAG: --http2-max-size="262144" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482309 1 flags.go:64] FLAG: --ignore-paths="[]" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482313 1 flags.go:64] FLAG: --insecure-listen-address="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482316 1 flags.go:64] FLAG: --kube-api-burst="0" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482320 1 flags.go:64] FLAG: --kube-api-qps="0" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482325 1 flags.go:64] FLAG: --kubeconfig="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482328 1 flags.go:64] FLAG: --log-backtrace-at="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482331 1 flags.go:64] FLAG: --log-dir="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482334 1 flags.go:64] FLAG: --log-file="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482337 1 flags.go:64] FLAG: --log-file-max-size="0" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482341 1 flags.go:64] FLAG: --log-flush-frequency="5s" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482350 1 flags.go:64] FLAG: --logtostderr="true" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482354 1 flags.go:64] FLAG: --oidc-ca-file="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482357 1 flags.go:64] FLAG: --oidc-clientID="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482360 1 flags.go:64] FLAG: --oidc-groups-claim="groups" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482362 1 flags.go:64] FLAG: --oidc-groups-prefix="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482365 1 flags.go:64] FLAG: --oidc-issuer="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482368 1 flags.go:64] FLAG: --oidc-sign-alg="[RS256]" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482378 1 flags.go:64] FLAG: --oidc-username-claim="email" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482381 1 flags.go:64] FLAG: --oidc-username-prefix="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482384 1 flags.go:64] FLAG: --one-output="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482387 1 flags.go:64] FLAG: --proxy-endpoints-port="0" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482390 1 flags.go:64] FLAG: --secure-listen-address="0.0.0.0:9192" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482393 1 flags.go:64] FLAG: --skip-headers="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482396 1 flags.go:64] FLAG: --skip-log-headers="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482399 1 flags.go:64] FLAG: --stderrthreshold="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482402 1 flags.go:64] FLAG: --tls-cert-file="/etc/tls/private/tls.crt" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482405 1 flags.go:64] FLAG: --tls-cipher-suites="[TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305]" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482414 1 flags.go:64] FLAG: --tls-min-version="VersionTLS12" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482418 1 flags.go:64] FLAG: --tls-private-key-file="/etc/tls/private/tls.key" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482422 1 flags.go:64] FLAG: --tls-reload-interval="1m0s" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482426 1 flags.go:64] FLAG: --upstream="http://127.0.0.1:9191/" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482440 1 flags.go:64] FLAG: --upstream-ca-file="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482443 1 flags.go:64] FLAG: --upstream-client-cert-file="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482446 1 flags.go:64] FLAG: --upstream-client-key-file="" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482449 1 flags.go:64] FLAG: --upstream-force-h2c="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482452 1 flags.go:64] FLAG: --v="3" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482455 1 flags.go:64] FLAG: --version="false" 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482460 1 flags.go:64] FLAG: --vmodule="" 2025-12-08T17:44:21.482642306+00:00 stderr F W1208 17:44:21.482467 1 deprecated.go:66] 2025-12-08T17:44:21.482642306+00:00 stderr F ==== Removed Flag Warning ====================== 2025-12-08T17:44:21.482642306+00:00 stderr F 2025-12-08T17:44:21.482642306+00:00 stderr F logtostderr is removed in the k8s upstream and has no effect any more. 2025-12-08T17:44:21.482642306+00:00 stderr F 2025-12-08T17:44:21.482642306+00:00 stderr F =============================================== 2025-12-08T17:44:21.482642306+00:00 stderr F 2025-12-08T17:44:21.482642306+00:00 stderr F I1208 17:44:21.482476 1 kube-rbac-proxy.go:532] Reading config file: /etc/kube-rbac-proxy/config-file.yaml 2025-12-08T17:44:21.484154768+00:00 stderr F I1208 17:44:21.483089 1 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false 2025-12-08T17:44:21.484154768+00:00 stderr F I1208 17:44:21.483107 1 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false 2025-12-08T17:44:21.484154768+00:00 stderr F I1208 17:44:21.483115 1 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false 2025-12-08T17:44:21.484154768+00:00 stderr F I1208 17:44:21.483121 1 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false 2025-12-08T17:44:21.484154768+00:00 stderr F I1208 17:44:21.483291 1 kube-rbac-proxy.go:235] Valid token audiences: 2025-12-08T17:44:21.487075038+00:00 stderr F I1208 17:44:21.487037 1 kube-rbac-proxy.go:349] Reading certificate files 2025-12-08T17:44:21.489509283+00:00 stderr F I1208 17:44:21.489471 1 kube-rbac-proxy.go:397] Starting TCP socket on 0.0.0.0:9192 2025-12-08T17:44:21.490058319+00:00 stderr F I1208 17:44:21.490034 1 kube-rbac-proxy.go:404] Listening securely on 0.0.0.0:9192 home/zuul/zuul-output/logs/ci-framework-data/logs/ci_script_000_fetch_openshift.log0000644000175000017500000000035215115610212027612 0ustar zuulzuulWARNING: Using insecure TLS client config. Setting this option is not supported! Login successful. You have access to 65 projects, the list has been suppressed. You can list all projects with 'oc projects' Using project "default". home/zuul/zuul-output/logs/ci-framework-data/logs/ci_script_001_login_into_openshift_internal.log0000644000175000017500000000002115115610223032552 0ustar zuulzuulLogin Succeeded! home/zuul/zuul-output/logs/ci-framework-data/logs/ci_script_000_check_for_oc.log0000644000175000017500000000002215115611276027053 0ustar zuulzuul/home/zuul/bin/oc home/zuul/zuul-output/logs/ci-framework-data/logs/ci_script_000_run_openstack_must_gather.log0000644000175000017500000001044415115611503031726 0ustar zuulzuul[must-gather ] OUT 2025-12-08T18:02:40.489126938Z Using must-gather plug-in image: quay.io/openstack-k8s-operators/openstack-must-gather:latest When opening a support case, bugzilla, or issue please include the following summary data along with any other requested information: ClusterID: ClientVersion: 4.20.5 ClusterVersion: Stable at "4.20.1" ClusterOperators: clusteroperator/machine-config is degraded because Failed to resync 4.20.1 because: error during syncRequiredMachineConfigPools: [context deadline exceeded, error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"")] clusteroperator/operator-lifecycle-manager is not upgradeable because ClusterServiceVersions blocking minor version upgrades to 4.19.0 or higher: - maximum supported OCP version for service-telemetry/smart-gateway-operator.v5.0.1765147433 is 4.18 - maximum supported OCP version for service-telemetry/service-telemetry-operator.v1.5.1765147436 is 4.18 clusteroperator/cloud-credential is missing clusteroperator/cluster-autoscaler is missing clusteroperator/insights is missing clusteroperator/monitoring is missing clusteroperator/storage is missing [must-gather ] OUT 2025-12-08T18:02:40.513415738Z namespace/openshift-must-gather-gctth created [must-gather ] OUT 2025-12-08T18:02:40.519071842Z clusterrolebinding.rbac.authorization.k8s.io/must-gather-vk99s created [must-gather ] OUT 2025-12-08T18:02:40.537001149Z pod for plug-in image quay.io/openstack-k8s-operators/openstack-must-gather:latest created [must-gather-5cz8j] OUT 2025-12-08T18:02:50.54753759Z gather logs unavailable: Get "https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?follow=true×tamps=true": remote error: tls: internal error [must-gather-5cz8j] OUT 2025-12-08T18:02:50.547900939Z waiting for gather to complete [must-gather-5cz8j] OUT 2025-12-08T18:04:50.552451475Z downloading gather output WARNING: cannot use rsync: rsync not available in container WARNING: cannot use tar: tar not available in container WARNING: cannot use rsync: rsync not available in container WARNING: cannot use tar: tar not available in container [must-gather-5cz8j] OUT 2025-12-08T18:04:51.073320847Z gather output not downloaded: [Get "https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?timestamps=true": remote error: tls: internal error, No available strategies to copy.] [must-gather-5cz8j] OUT 2025-12-08T18:04:51.073351618Z [must-gather ] OUT 2025-12-08T18:04:51.080283736Z namespace/openshift-must-gather-gctth deleted Reprinting Cluster State: When opening a support case, bugzilla, or issue please include the following summary data along with any other requested information: ClusterID: ClientVersion: 4.20.5 ClusterVersion: Stable at "4.20.1" ClusterOperators: clusteroperator/machine-config is degraded because Failed to resync 4.20.1 because: error during syncRequiredMachineConfigPools: [context deadline exceeded, error MachineConfigPool master is not ready, retrying. Status: (pool degraded: true total: 1, ready 0, updated: 0, unavailable: 1, reason: Node crc is reporting: "unexpected on-disk state validating against rendered-master-d582710c680b4cd4536e11249c7e09e9: content mismatch for file \"/var/lib/kubelet/config.json\"")] clusteroperator/operator-lifecycle-manager is not upgradeable because ClusterServiceVersions blocking minor version upgrades to 4.19.0 or higher: - maximum supported OCP version for service-telemetry/smart-gateway-operator.v5.0.1765147433 is 4.18 - maximum supported OCP version for service-telemetry/service-telemetry-operator.v1.5.1765147436 is 4.18 clusteroperator/cloud-credential is missing clusteroperator/cluster-autoscaler is missing clusteroperator/insights is missing clusteroperator/monitoring is missing clusteroperator/storage is missing error: unable to download output from pod must-gather-5cz8j: [Get "https://192.168.126.11:10250/containerLogs/openshift-must-gather-gctth/must-gather-5cz8j/gather?timestamps=true": remote error: tls: internal error, No available strategies to copy.] home/zuul/zuul-output/logs/ci-framework-data/logs/ci_script_000_prepare_root_ssh.log0000644000175000017500000045073015115611512030035 0ustar zuulzuulPseudo-terminal will not be allocated because stdin is not a terminal. Red Hat Enterprise Linux CoreOS 9.6.20251021-0 Part of OpenShift 4.20, RHCOS is a Kubernetes-native operating system managed by the Machine Config Operator (`clusteroperator/machine-config`). WARNING: Direct SSH access to machines is not recommended; instead, make configuration changes via `machineconfig` objects: https://docs.openshift.com/container-platform/4.20/architecture/architecture-rhcos.html --- + test -d /etc/ssh/sshd_config.d/ + sudo sed -ri 's/PermitRootLogin no/PermitRootLogin prohibit-password/' '/etc/ssh/sshd_config.d/*' sed: can't read /etc/ssh/sshd_config.d/*: No such file or directory + true + sudo sed -i 's/PermitRootLogin no/PermitRootLogin prohibit-password/' /etc/ssh/sshd_config + sudo systemctl restart sshd + sudo cp -r .ssh /root/ + sudo chown -R root: /root/.ssh + mkdir -p /tmp/crc-logs-artifacts + sudo cp -av /ostree/deploy/rhcos/var/log/pods /tmp/crc-logs-artifacts/ '/ostree/deploy/rhcos/var/log/pods' -> '/tmp/crc-logs-artifacts/pods' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a' -> '/tmp/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a' '/ostree/deploy/rhcos/var/log/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather' -> '/tmp/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather' '/ostree/deploy/rhcos/var/log/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy' -> '/tmp/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy' '/ostree/deploy/rhcos/var/log/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f' -> '/tmp/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f' '/ostree/deploy/rhcos/var/log/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager' -> '/tmp/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager' '/ostree/deploy/rhcos/var/log/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver' '/ostree/deploy/rhcos/var/log/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8' -> '/tmp/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8' '/ostree/deploy/rhcos/var/log/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479' -> '/tmp/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479' '/ostree/deploy/rhcos/var/log/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/1.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/2.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/2.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc' -> '/tmp/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc' '/ostree/deploy/rhcos/var/log/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter' -> '/tmp/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter' '/ostree/deploy/rhcos/var/log/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter/0.log' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd' -> '/tmp/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd' '/ostree/deploy/rhcos/var/log/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0' -> '/tmp/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0' '/ostree/deploy/rhcos/var/log/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver' -> '/tmp/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver' '/ostree/deploy/rhcos/var/log/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook' -> '/tmp/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook' '/ostree/deploy/rhcos/var/log/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34' -> '/tmp/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34' '/ostree/deploy/rhcos/var/log/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console' -> '/tmp/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console' '/ostree/deploy/rhcos/var/log/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/2.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/2.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/1.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge/1.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7' -> '/tmp/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7' '/ostree/deploy/rhcos/var/log/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server' -> '/tmp/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server' '/ostree/deploy/rhcos/var/log/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver/0.log' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook/0.log' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook/0.log' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner/0.log' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner/0.log' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner/0.log' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner/0.log' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar/0.log' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar/0.log' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe' '/ostree/deploy/rhcos/var/log/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe/0.log' -> '/tmp/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371' -> '/tmp/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371' '/ostree/deploy/rhcos/var/log/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b' -> '/tmp/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b' '/ostree/deploy/rhcos/var/log/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api' -> '/tmp/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api' '/ostree/deploy/rhcos/var/log/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440' -> '/tmp/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440' '/ostree/deploy/rhcos/var/log/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints' -> '/tmp/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints' '/ostree/deploy/rhcos/var/log/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239' -> '/tmp/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239' '/ostree/deploy/rhcos/var/log/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container' -> '/tmp/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container' '/ostree/deploy/rhcos/var/log/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141' -> '/tmp/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141' '/ostree/deploy/rhcos/var/log/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin' -> '/tmp/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin' '/ostree/deploy/rhcos/var/log/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/2.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/2.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/1.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge/1.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183' -> '/tmp/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183' '/ostree/deploy/rhcos/var/log/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/2.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/2.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/1.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge/1.log' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/1.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/1.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/2.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge/2.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9' -> '/tmp/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9' '/ostree/deploy/rhcos/var/log/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift' -> '/tmp/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift' '/ostree/deploy/rhcos/var/log/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/4.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/4.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/5.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon/5.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2' -> '/tmp/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2' '/ostree/deploy/rhcos/var/log/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager' -> '/tmp/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager' '/ostree/deploy/rhcos/var/log/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager/0.log' '/ostree/deploy/rhcos/var/log/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc' -> '/tmp/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc' '/ostree/deploy/rhcos/var/log/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator' -> '/tmp/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator' '/ostree/deploy/rhcos/var/log/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c' -> '/tmp/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c' '/ostree/deploy/rhcos/var/log/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions' -> '/tmp/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions' '/ostree/deploy/rhcos/var/log/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver' -> '/tmp/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver' '/ostree/deploy/rhcos/var/log/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router' '/ostree/deploy/rhcos/var/log/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector' '/ostree/deploy/rhcos/var/log/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector/0.log' -> '/tmp/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry' '/ostree/deploy/rhcos/var/log/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf.log' -> '/tmp/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf.log' '/ostree/deploy/rhcos/var/log/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138' -> '/tmp/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138' '/ostree/deploy/rhcos/var/log/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-content' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-content' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/registry-server' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/registry-server' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-utilities' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-utilities' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles' '/ostree/deploy/rhcos/var/log/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/1.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus/1.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1' -> '/tmp/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1' '/ostree/deploy/rhcos/var/log/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer' '/ostree/deploy/rhcos/var/log/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core/0.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core/0.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/2.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/2.log' '/ostree/deploy/rhcos/var/log/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/1.log' -> '/tmp/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge/1.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator' '/ostree/deploy/rhcos/var/log/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller' '/ostree/deploy/rhcos/var/log/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull/0.log' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract' '/ostree/deploy/rhcos/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract/0.log' -> '/tmp/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract/0.log' + sudo chown -R core:core /tmp/crc-logs-artifacts home/zuul/zuul-output/logs/ci-framework-data/logs/ci_script_000_copy_logs_from_crc.log0000644000175000017500000002756415115611514030336 0ustar zuulzuulExecuting: program /usr/bin/ssh host api.crc.testing, user core, command sftp OpenSSH_9.9p1, OpenSSL 3.5.1 1 Jul 2025 debug1: Reading configuration data /etc/ssh/ssh_config debug1: Reading configuration data /etc/ssh/ssh_config.d/50-redhat.conf debug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config debug1: configuration requests final Match pass debug1: re-parsing configuration debug1: Reading configuration data /etc/ssh/ssh_config debug1: Reading configuration data /etc/ssh/ssh_config.d/50-redhat.conf debug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config debug1: Connecting to api.crc.testing [38.102.83.243] port 22. debug1: Connection established. debug1: identity file /home/zuul/.ssh/id_cifw type 2 debug1: identity file /home/zuul/.ssh/id_cifw-cert type -1 debug1: Local version string SSH-2.0-OpenSSH_9.9 debug1: Remote protocol version 2.0, remote software version OpenSSH_8.7 debug1: compat_banner: match: OpenSSH_8.7 pat OpenSSH* compat 0x04000000 debug1: Authenticating to api.crc.testing:22 as 'core' debug1: load_hostkeys: fopen /home/zuul/.ssh/known_hosts2: No such file or directory debug1: load_hostkeys: fopen /etc/ssh/ssh_known_hosts: No such file or directory debug1: load_hostkeys: fopen /etc/ssh/ssh_known_hosts2: No such file or directory debug1: SSH2_MSG_KEXINIT sent debug1: SSH2_MSG_KEXINIT received debug1: kex: algorithm: curve25519-sha256 debug1: kex: host key algorithm: ssh-ed25519 debug1: kex: server->client cipher: aes256-gcm@openssh.com MAC: compression: none debug1: kex: client->server cipher: aes256-gcm@openssh.com MAC: compression: none debug1: kex: curve25519-sha256 need=32 dh_need=32 debug1: kex: curve25519-sha256 need=32 dh_need=32 debug1: expecting SSH2_MSG_KEX_ECDH_REPLY debug1: SSH2_MSG_KEX_ECDH_REPLY received debug1: Server host key: ssh-ed25519 SHA256:tM+Ir+xE/tVVjjvm5tBG65YdZ6CUySl2mZlaxmIIJ1o debug1: load_hostkeys: fopen /home/zuul/.ssh/known_hosts2: No such file or directory debug1: load_hostkeys: fopen /etc/ssh/ssh_known_hosts: No such file or directory debug1: load_hostkeys: fopen /etc/ssh/ssh_known_hosts2: No such file or directory debug1: Host 'api.crc.testing' is known and matches the ED25519 host key. debug1: Found key in /home/zuul/.ssh/known_hosts:20 debug1: ssh_packet_send2_wrapped: resetting send seqnr 3 debug1: rekey out after 4294967296 blocks debug1: SSH2_MSG_NEWKEYS sent debug1: expecting SSH2_MSG_NEWKEYS debug1: ssh_packet_read_poll2: resetting read seqnr 3 debug1: SSH2_MSG_NEWKEYS received debug1: rekey in after 4294967296 blocks debug1: SSH2_MSG_EXT_INFO received debug1: kex_ext_info_client_parse: server-sig-algs= debug1: SSH2_MSG_SERVICE_ACCEPT received debug1: Authentications that can continue: publickey,gssapi-keyex,gssapi-with-mic debug1: Next authentication method: gssapi-with-mic debug1: No credentials were supplied, or the credentials were unavailable or inaccessible No Kerberos credentials available (default cache: KCM:) debug1: No credentials were supplied, or the credentials were unavailable or inaccessible No Kerberos credentials available (default cache: KCM:) debug1: Next authentication method: publickey debug1: Will attempt key: /home/zuul/.ssh/id_cifw ECDSA SHA256:iuyGeko3ENDf9AJE3wRuRITk9jyKoUv2N82gHVnBpQ8 explicit debug1: Offering public key: /home/zuul/.ssh/id_cifw ECDSA SHA256:iuyGeko3ENDf9AJE3wRuRITk9jyKoUv2N82gHVnBpQ8 explicit debug1: Server accepts key: /home/zuul/.ssh/id_cifw ECDSA SHA256:iuyGeko3ENDf9AJE3wRuRITk9jyKoUv2N82gHVnBpQ8 explicit Authenticated to api.crc.testing ([38.102.83.243]:22) using "publickey". debug1: pkcs11_del_provider: called, provider_id = (null) debug1: channel 0: new session [client-session] (inactive timeout: 0) debug1: Requesting no-more-sessions@openssh.com debug1: Entering interactive session. debug1: pledge: filesystem debug1: client_input_global_request: rtype hostkeys-00@openssh.com want_reply 0 debug1: client_input_hostkeys: searching /home/zuul/.ssh/known_hosts for api.crc.testing / (none) debug1: client_input_hostkeys: searching /home/zuul/.ssh/known_hosts2 for api.crc.testing / (none) debug1: client_input_hostkeys: hostkeys file /home/zuul/.ssh/known_hosts2 does not exist debug1: client_input_hostkeys: no new or deprecated keys from server debug1: Remote: /var/home/core/.ssh/authorized_keys:28: key options: agent-forwarding port-forwarding pty user-rc x11-forwarding debug1: Remote: /var/home/core/.ssh/authorized_keys:28: key options: agent-forwarding port-forwarding pty user-rc x11-forwarding debug1: Sending subsystem: sftp debug1: pledge: fork scp: debug1: Fetching /tmp/crc-logs-artifacts/ to /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts scp: debug1: truncating at 2197 scp: debug1: truncating at 1956 scp: debug1: truncating at 71 scp: debug1: truncating at 1276 scp: debug1: truncating at 1040 scp: debug1: truncating at 554655 scp: debug1: truncating at 1212 scp: debug1: truncating at 596 scp: debug1: truncating at 1046 scp: debug1: truncating at 682 scp: debug1: truncating at 634697 scp: debug1: truncating at 62917 scp: debug1: truncating at 48877 scp: debug1: truncating at 1709 scp: debug1: truncating at 704 scp: debug1: truncating at 2246 scp: debug1: truncating at 1487 scp: debug1: truncating at 1902 scp: debug1: truncating at 168003 scp: debug1: truncating at 239353 scp: debug1: truncating at 274421 scp: debug1: truncating at 736 scp: debug1: truncating at 62849 scp: debug1: truncating at 71 scp: debug1: truncating at 1955 scp: debug1: truncating at 609 scp: debug1: truncating at 1040 scp: debug1: truncating at 41229 scp: debug1: truncating at 8366 scp: debug1: truncating at 18279 scp: debug1: truncating at 45770 scp: debug1: truncating at 0 scp: debug1: truncating at 82256 scp: debug1: truncating at 664 scp: debug1: truncating at 59214 scp: debug1: truncating at 392 scp: debug1: truncating at 80 scp: debug1: truncating at 404 scp: debug1: truncating at 408 scp: debug1: truncating at 414 scp: debug1: truncating at 411 scp: debug1: truncating at 0 scp: debug1: truncating at 3898 scp: debug1: truncating at 14776 scp: debug1: truncating at 3422 scp: debug1: truncating at 914136 scp: debug1: truncating at 10693 scp: debug1: truncating at 18428 scp: debug1: truncating at 31648 scp: debug1: truncating at 56628 scp: debug1: truncating at 0 scp: debug1: truncating at 65200 scp: debug1: truncating at 18301 scp: debug1: truncating at 1875 scp: debug1: truncating at 72 scp: debug1: truncating at 1212 scp: debug1: truncating at 67695 scp: debug1: truncating at 61357 scp: debug1: truncating at 19072 scp: debug1: truncating at 22905 scp: debug1: truncating at 0 scp: debug1: truncating at 10531 scp: debug1: truncating at 53711 scp: debug1: truncating at 603517 scp: debug1: truncating at 265 scp: debug1: truncating at 22437 scp: debug1: truncating at 116 scp: debug1: truncating at 19128 scp: debug1: truncating at 7058 scp: debug1: truncating at 12768 scp: debug1: truncating at 177802 scp: debug1: truncating at 1481 scp: debug1: truncating at 1344 scp: debug1: truncating at 1042 scp: debug1: truncating at 596 scp: debug1: truncating at 0 scp: debug1: truncating at 2575524 scp: debug1: truncating at 0 scp: debug1: truncating at 94462 scp: debug1: truncating at 596 scp: debug1: truncating at 1042 scp: debug1: truncating at 682 scp: debug1: truncating at 71 scp: debug1: truncating at 1956 scp: debug1: truncating at 614 scp: debug1: truncating at 1959 scp: debug1: truncating at 16483 scp: debug1: truncating at 3267152 scp: debug1: truncating at 0 scp: debug1: truncating at 0 scp: debug1: truncating at 675 scp: debug1: truncating at 0 scp: debug1: truncating at 0 scp: debug1: truncating at 4996 scp: debug1: truncating at 1061 scp: debug1: truncating at 1061 scp: debug1: truncating at 736 scp: debug1: truncating at 1040 scp: debug1: truncating at 14141 scp: debug1: truncating at 273 scp: debug1: truncating at 1481 scp: debug1: truncating at 595 scp: debug1: truncating at 1043 scp: debug1: truncating at 1344 scp: debug1: truncating at 1376 scp: debug1: truncating at 101 scp: debug1: truncating at 779 scp: debug1: truncating at 1040 scp: debug1: truncating at 4293693 scp: debug1: truncating at 0 scp: debug1: truncating at 0 scp: debug1: truncating at 61 scp: debug1: truncating at 45845 scp: debug1: truncating at 10988 scp: debug1: truncating at 5845 scp: debug1: truncating at 2852934 scp: debug1: truncating at 1183 scp: debug1: truncating at 37548 scp: debug1: truncating at 34179 scp: debug1: truncating at 0 scp: debug1: truncating at 42743 scp: debug1: truncating at 1212 scp: debug1: truncating at 1730 scp: debug1: truncating at 35710 scp: debug1: truncating at 65419 scp: debug1: truncating at 8366 scp: debug1: truncating at 446922 scp: debug1: truncating at 2383 scp: debug1: truncating at 6397 scp: debug1: truncating at 703 scp: debug1: truncating at 1483 scp: debug1: truncating at 1040 scp: debug1: truncating at 48978 scp: debug1: truncating at 396 scp: debug1: truncating at 1504 scp: debug1: truncating at 54174 scp: debug1: truncating at 122396 scp: debug1: truncating at 4017 scp: debug1: truncating at 111677 scp: debug1: truncating at 0 scp: debug1: truncating at 29935 scp: debug1: truncating at 5285 scp: debug1: truncating at 2578 scp: debug1: truncating at 71 scp: debug1: truncating at 39150 scp: debug1: truncating at 27685 scp: debug1: truncating at 799531 scp: debug1: truncating at 166956 scp: debug1: truncating at 10048 scp: debug1: truncating at 85 scp: debug1: truncating at 8732 scp: debug1: truncating at 71478 scp: debug1: truncating at 71 scp: debug1: truncating at 2146 scp: debug1: truncating at 630 scp: debug1: truncating at 71 scp: debug1: truncating at 2243 scp: debug1: truncating at 1481 scp: debug1: truncating at 596 scp: debug1: truncating at 1077 scp: debug1: truncating at 1340 scp: debug1: truncating at 647 scp: debug1: truncating at 2901 scp: debug1: truncating at 46202 scp: debug1: truncating at 1054 scp: debug1: truncating at 7997 scp: debug1: truncating at 25844 scp: debug1: truncating at 4519 scp: debug1: truncating at 4640 scp: debug1: truncating at 4680 scp: debug1: truncating at 20301 scp: debug1: truncating at 2135874 scp: debug1: truncating at 48930 scp: debug1: truncating at 2357 scp: debug1: truncating at 0 scp: debug1: truncating at 2425 scp: debug1: truncating at 7397476 scp: debug1: truncating at 3698 scp: debug1: truncating at 12165 scp: debug1: truncating at 210115 scp: debug1: truncating at 145342 scp: debug1: truncating at 1900 scp: debug1: truncating at 34645 scp: debug1: truncating at 120 scp: debug1: truncating at 250745 scp: debug1: truncating at 18781 scp: debug1: truncating at 870765 scp: debug1: truncating at 78176 scp: debug1: truncating at 12439 scp: debug1: truncating at 97019 scp: debug1: truncating at 152670 scp: debug1: truncating at 789449 scp: debug1: truncating at 96 scp: debug1: truncating at 66795 scp: debug1: truncating at 124861 scp: debug1: truncating at 240 scp: debug1: truncating at 0 scp: debug1: truncating at 0 scp: debug1: truncating at 17964 scp: debug1: truncating at 156 scp: debug1: truncating at 124 scp: debug1: truncating at 0 scp: debug1: truncating at 28478 scp: debug1: truncating at 0 scp: debug1: truncating at 33468 scp: debug1: truncating at 12680 scp: debug1: truncating at 34141 scp: debug1: truncating at 1040 debug1: client_input_channel_req: channel 0 rtype exit-status reply 0 debug1: channel 0: free: client-session, nchannels 1 Transferred: sent 168108, received 32931976 bytes, in 1.2 seconds Bytes per second: sent 137467.6, received 26929590.6 debug1: Exit status 0 home/zuul/zuul-output/logs/ci-framework-data/logs/ansible.log0000644000175000017500000046102415115611520023453 0ustar zuulzuul2025-12-08 17:51:42,655 p=31279 u=zuul n=ansible | Starting galaxy collection install process 2025-12-08 17:51:42,656 p=31279 u=zuul n=ansible | Process install dependency map 2025-12-08 17:51:58,145 p=31279 u=zuul n=ansible | Starting collection install process 2025-12-08 17:51:58,145 p=31279 u=zuul n=ansible | Installing 'cifmw.general:1.0.0+33d5122f' to '/home/zuul/.ansible/collections/ansible_collections/cifmw/general' 2025-12-08 17:51:58,665 p=31279 u=zuul n=ansible | Created collection for cifmw.general:1.0.0+33d5122f at /home/zuul/.ansible/collections/ansible_collections/cifmw/general 2025-12-08 17:51:58,666 p=31279 u=zuul n=ansible | cifmw.general:1.0.0+33d5122f was installed successfully 2025-12-08 17:51:58,666 p=31279 u=zuul n=ansible | Installing 'containers.podman:1.16.2' to '/home/zuul/.ansible/collections/ansible_collections/containers/podman' 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | Created collection for containers.podman:1.16.2 at /home/zuul/.ansible/collections/ansible_collections/containers/podman 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | containers.podman:1.16.2 was installed successfully 2025-12-08 17:51:58,720 p=31279 u=zuul n=ansible | Installing 'community.general:10.0.1' to '/home/zuul/.ansible/collections/ansible_collections/community/general' 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | Created collection for community.general:10.0.1 at /home/zuul/.ansible/collections/ansible_collections/community/general 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | community.general:10.0.1 was installed successfully 2025-12-08 17:51:59,462 p=31279 u=zuul n=ansible | Installing 'ansible.posix:1.6.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/posix' 2025-12-08 17:51:59,512 p=31279 u=zuul n=ansible | Created collection for ansible.posix:1.6.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/posix 2025-12-08 17:51:59,513 p=31279 u=zuul n=ansible | ansible.posix:1.6.2 was installed successfully 2025-12-08 17:51:59,513 p=31279 u=zuul n=ansible | Installing 'ansible.utils:5.1.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/utils' 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | Created collection for ansible.utils:5.1.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/utils 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | ansible.utils:5.1.2 was installed successfully 2025-12-08 17:51:59,608 p=31279 u=zuul n=ansible | Installing 'community.libvirt:1.3.0' to '/home/zuul/.ansible/collections/ansible_collections/community/libvirt' 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | Created collection for community.libvirt:1.3.0 at /home/zuul/.ansible/collections/ansible_collections/community/libvirt 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | community.libvirt:1.3.0 was installed successfully 2025-12-08 17:51:59,631 p=31279 u=zuul n=ansible | Installing 'community.crypto:2.22.3' to '/home/zuul/.ansible/collections/ansible_collections/community/crypto' 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | Created collection for community.crypto:2.22.3 at /home/zuul/.ansible/collections/ansible_collections/community/crypto 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | community.crypto:2.22.3 was installed successfully 2025-12-08 17:51:59,768 p=31279 u=zuul n=ansible | Installing 'kubernetes.core:5.0.0' to '/home/zuul/.ansible/collections/ansible_collections/kubernetes/core' 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | Created collection for kubernetes.core:5.0.0 at /home/zuul/.ansible/collections/ansible_collections/kubernetes/core 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | kubernetes.core:5.0.0 was installed successfully 2025-12-08 17:51:59,885 p=31279 u=zuul n=ansible | Installing 'ansible.netcommon:7.1.0' to '/home/zuul/.ansible/collections/ansible_collections/ansible/netcommon' 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | Created collection for ansible.netcommon:7.1.0 at /home/zuul/.ansible/collections/ansible_collections/ansible/netcommon 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | ansible.netcommon:7.1.0 was installed successfully 2025-12-08 17:51:59,954 p=31279 u=zuul n=ansible | Installing 'openstack.config_template:2.1.1' to '/home/zuul/.ansible/collections/ansible_collections/openstack/config_template' 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | Created collection for openstack.config_template:2.1.1 at /home/zuul/.ansible/collections/ansible_collections/openstack/config_template 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | openstack.config_template:2.1.1 was installed successfully 2025-12-08 17:51:59,971 p=31279 u=zuul n=ansible | Installing 'junipernetworks.junos:9.1.0' to '/home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos' 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | Created collection for junipernetworks.junos:9.1.0 at /home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | junipernetworks.junos:9.1.0 was installed successfully 2025-12-08 17:52:00,216 p=31279 u=zuul n=ansible | Installing 'cisco.ios:9.0.3' to '/home/zuul/.ansible/collections/ansible_collections/cisco/ios' 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | Created collection for cisco.ios:9.0.3 at /home/zuul/.ansible/collections/ansible_collections/cisco/ios 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | cisco.ios:9.0.3 was installed successfully 2025-12-08 17:52:00,466 p=31279 u=zuul n=ansible | Installing 'mellanox.onyx:1.0.0' to '/home/zuul/.ansible/collections/ansible_collections/mellanox/onyx' 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | Created collection for mellanox.onyx:1.0.0 at /home/zuul/.ansible/collections/ansible_collections/mellanox/onyx 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | mellanox.onyx:1.0.0 was installed successfully 2025-12-08 17:52:00,499 p=31279 u=zuul n=ansible | Installing 'community.okd:4.0.0' to '/home/zuul/.ansible/collections/ansible_collections/community/okd' 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | Created collection for community.okd:4.0.0 at /home/zuul/.ansible/collections/ansible_collections/community/okd 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | community.okd:4.0.0 was installed successfully 2025-12-08 17:52:00,527 p=31279 u=zuul n=ansible | Installing '@NAMESPACE@.@NAME@:3.1.4' to '/home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@' 2025-12-08 17:52:00,619 p=31279 u=zuul n=ansible | Created collection for @NAMESPACE@.@NAME@:3.1.4 at /home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@ 2025-12-08 17:52:00,619 p=31279 u=zuul n=ansible | @NAMESPACE@.@NAME@:3.1.4 was installed successfully 2025-12-08 17:52:08,760 p=31902 u=zuul n=ansible | PLAY [Bootstrap playbook] ****************************************************** 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | TASK [Gathering Facts ] ******************************************************** 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:08 +0000 (0:00:00.032) 0:00:00.032 ******* 2025-12-08 17:52:08,776 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:08 +0000 (0:00:00.031) 0:00:00.031 ******* 2025-12-08 17:52:10,015 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,052 p=31902 u=zuul n=ansible | TASK [Set custom cifmw PATH reusable fact cifmw_path={{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}, cacheable=True] *** 2025-12-08 17:52:10,053 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:01.276) 0:00:01.308 ******* 2025-12-08 17:52:10,053 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:01.276) 0:00:01.307 ******* 2025-12-08 17:52:10,080 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | TASK [Get customized parameters ci_framework_params={{ hostvars[inventory_hostname] | dict2items | selectattr("key", "match", "^(cifmw|pre|post)_(?!install_yamls|openshift_token|openshift_login|openshift_kubeconfig).*") | list | items2dict }}] *** 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.033) 0:00:01.342 ******* 2025-12-08 17:52:10,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.033) 0:00:01.341 ******* 2025-12-08 17:52:10,129 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | TASK [install_ca : Ensure target directory exists path={{ cifmw_install_ca_trust_dir }}, state=directory, mode=0755] *** 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.048) 0:00:01.391 ******* 2025-12-08 17:52:10,135 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.048) 0:00:01.390 ******* 2025-12-08 17:52:10,496 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | TASK [install_ca : Install internal CA from url url={{ cifmw_install_ca_url }}, dest={{ cifmw_install_ca_trust_dir }}, validate_certs={{ cifmw_install_ca_url_validate_certs | default(omit) }}, mode=0644] *** 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.370) 0:00:01.762 ******* 2025-12-08 17:52:10,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.370) 0:00:01.760 ******* 2025-12-08 17:52:10,533 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,543 p=31902 u=zuul n=ansible | TASK [install_ca : Install custom CA bundle from inline dest={{ cifmw_install_ca_trust_dir }}/cifmw_inline_ca_bundle.crt, content={{ cifmw_install_ca_bundle_inline }}, mode=0644] *** 2025-12-08 17:52:10,544 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.037) 0:00:01.799 ******* 2025-12-08 17:52:10,544 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.037) 0:00:01.798 ******* 2025-12-08 17:52:10,574 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | TASK [install_ca : Install custom CA bundle from file dest={{ cifmw_install_ca_trust_dir }}/{{ cifmw_install_ca_bundle_src | basename }}, src={{ cifmw_install_ca_bundle_src }}, mode=0644] *** 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.040) 0:00:01.839 ******* 2025-12-08 17:52:10,584 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.040) 0:00:01.838 ******* 2025-12-08 17:52:10,619 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:10,633 p=31902 u=zuul n=ansible | TASK [install_ca : Update ca bundle _raw_params=update-ca-trust] *************** 2025-12-08 17:52:10,634 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.049) 0:00:01.889 ******* 2025-12-08 17:52:10,634 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:10 +0000 (0:00:00.049) 0:00:01.888 ******* 2025-12-08 17:52:12,273 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | TASK [repo_setup : Ensure directories are present path={{ cifmw_repo_setup_basedir }}/{{ item }}, state=directory, mode=0755] *** 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:01.651) 0:00:03.540 ******* 2025-12-08 17:52:12,285 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:01.651) 0:00:03.539 ******* 2025-12-08 17:52:12,519 p=31902 u=zuul n=ansible | changed: [localhost] => (item=tmp) 2025-12-08 17:52:12,700 p=31902 u=zuul n=ansible | changed: [localhost] => (item=artifacts/repositories) 2025-12-08 17:52:12,886 p=31902 u=zuul n=ansible | changed: [localhost] => (item=venv/repo_setup) 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | TASK [repo_setup : Make sure git-core package is installed name=git-core, state=present] *** 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:00.616) 0:00:04.157 ******* 2025-12-08 17:52:12,901 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:12 +0000 (0:00:00.616) 0:00:04.156 ******* 2025-12-08 17:52:13,882 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:13,891 p=31902 u=zuul n=ansible | TASK [repo_setup : Get repo-setup repository accept_hostkey=True, dest={{ cifmw_repo_setup_basedir }}/tmp/repo-setup, repo={{ cifmw_repo_setup_src }}] *** 2025-12-08 17:52:13,891 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:13 +0000 (0:00:00.990) 0:00:05.147 ******* 2025-12-08 17:52:13,892 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:13 +0000 (0:00:00.990) 0:00:05.146 ******* 2025-12-08 17:52:16,138 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | TASK [repo_setup : Initialize python venv and install requirements virtualenv={{ cifmw_repo_setup_venv }}, requirements={{ cifmw_repo_setup_basedir }}/tmp/repo-setup/requirements.txt, virtualenv_command=python3 -m venv --system-site-packages --upgrade-deps] *** 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:16 +0000 (0:00:02.253) 0:00:07.400 ******* 2025-12-08 17:52:16,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:16 +0000 (0:00:02.253) 0:00:07.399 ******* 2025-12-08 17:52:24,622 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | TASK [repo_setup : Install repo-setup package chdir={{ cifmw_repo_setup_basedir }}/tmp/repo-setup, creates={{ cifmw_repo_setup_venv }}/bin/repo-setup, _raw_params={{ cifmw_repo_setup_venv }}/bin/python setup.py install] *** 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:24 +0000 (0:00:08.485) 0:00:15.886 ******* 2025-12-08 17:52:24,630 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:24 +0000 (0:00:08.485) 0:00:15.884 ******* 2025-12-08 17:52:25,496 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | TASK [repo_setup : Set cifmw_repo_setup_dlrn_hash_tag from content provider cifmw_repo_setup_dlrn_hash_tag={{ content_provider_dlrn_md5_hash }}] *** 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.875) 0:00:16.761 ******* 2025-12-08 17:52:25,506 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.875) 0:00:16.760 ******* 2025-12-08 17:52:25,538 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | TASK [repo_setup : Run repo-setup _raw_params={{ cifmw_repo_setup_venv }}/bin/repo-setup {{ cifmw_repo_setup_promotion }} {{ cifmw_repo_setup_additional_repos }} -d {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} -b {{ cifmw_repo_setup_branch }} --rdo-mirror {{ cifmw_repo_setup_rdo_mirror }} {% if cifmw_repo_setup_dlrn_hash_tag | length > 0 %} --dlrn-hash-tag {{ cifmw_repo_setup_dlrn_hash_tag }} {% endif %} -o {{ cifmw_repo_setup_output }}] *** 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.041) 0:00:16.803 ******* 2025-12-08 17:52:25,547 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:25 +0000 (0:00:00.041) 0:00:16.801 ******* 2025-12-08 17:52:26,188 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | TASK [repo_setup : Get component repo url={{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/component/{{ cifmw_repo_setup_component_name }}/{{ cifmw_repo_setup_component_promotion_tag }}/delorean.repo, dest={{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo, mode=0644] *** 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.647) 0:00:17.451 ******* 2025-12-08 17:52:26,195 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.647) 0:00:17.449 ******* 2025-12-08 17:52:26,242 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,255 p=31902 u=zuul n=ansible | TASK [repo_setup : Rename component repo path={{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo, regexp=delorean-component-{{ cifmw_repo_setup_component_name }}, replace={{ cifmw_repo_setup_component_name }}-{{ cifmw_repo_setup_component_promotion_tag }}] *** 2025-12-08 17:52:26,256 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.511 ******* 2025-12-08 17:52:26,256 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.510 ******* 2025-12-08 17:52:26,306 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | TASK [repo_setup : Disable component repo in current-podified dlrn repo path={{ cifmw_repo_setup_output }}/delorean.repo, section=delorean-component-{{ cifmw_repo_setup_component_name }}, option=enabled, value=0, mode=0644] *** 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.067) 0:00:17.579 ******* 2025-12-08 17:52:26,323 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.067) 0:00:17.578 ******* 2025-12-08 17:52:26,371 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | TASK [repo_setup : Run repo-setup-get-hash _raw_params={{ cifmw_repo_setup_venv }}/bin/repo-setup-get-hash --dlrn-url {{ cifmw_repo_setup_dlrn_uri[:-1] }} --os-version {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} --release {{ cifmw_repo_setup_branch }} {% if cifmw_repo_setup_component_name | length > 0 -%} --component {{ cifmw_repo_setup_component_name }} --tag {{ cifmw_repo_setup_component_promotion_tag }} {% else -%} --tag {{cifmw_repo_setup_promotion }} {% endif -%} {% if (cifmw_repo_setup_dlrn_hash_tag | length > 0) and (cifmw_repo_setup_component_name | length <= 0) -%} --dlrn-hash-tag {{ cifmw_repo_setup_dlrn_hash_tag }} {% endif -%} --json] *** 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.640 ******* 2025-12-08 17:52:26,384 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.060) 0:00:17.638 ******* 2025-12-08 17:52:26,908 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | TASK [repo_setup : Dump full hash in delorean.repo.md5 file content={{ _repo_setup_json['full_hash'] }} , dest={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5, mode=0644] *** 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.533) 0:00:18.173 ******* 2025-12-08 17:52:26,917 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:26 +0000 (0:00:00.533) 0:00:18.172 ******* 2025-12-08 17:52:27,629 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | TASK [repo_setup : Dump current-podified hash url={{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/current-podified/delorean.repo.md5, dest={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5, mode=0644] *** 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.717) 0:00:18.891 ******* 2025-12-08 17:52:27,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.717) 0:00:18.889 ******* 2025-12-08 17:52:27,662 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | TASK [repo_setup : Slurp current podified hash src={{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5] *** 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.033) 0:00:18.924 ******* 2025-12-08 17:52:27,668 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.033) 0:00:18.923 ******* 2025-12-08 17:52:27,694 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | TASK [repo_setup : Update the value of full_hash _repo_setup_json={{ _repo_setup_json | combine({'full_hash': _hash}, recursive=true) }}] *** 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.032) 0:00:18.957 ******* 2025-12-08 17:52:27,701 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.032) 0:00:18.955 ******* 2025-12-08 17:52:27,728 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | TASK [repo_setup : Export hashes facts for further use cifmw_repo_setup_full_hash={{ _repo_setup_json['full_hash'] }}, cifmw_repo_setup_commit_hash={{ _repo_setup_json['commit_hash'] }}, cifmw_repo_setup_distro_hash={{ _repo_setup_json['distro_hash'] }}, cifmw_repo_setup_extended_hash={{ _repo_setup_json['extended_hash'] }}, cifmw_repo_setup_dlrn_api_url={{ _repo_setup_json['dlrn_api_url'] }}, cifmw_repo_setup_dlrn_url={{ _repo_setup_json['dlrn_url'] }}, cifmw_repo_setup_release={{ _repo_setup_json['release'] }}, cacheable=True] *** 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.035) 0:00:18.993 ******* 2025-12-08 17:52:27,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.035) 0:00:18.991 ******* 2025-12-08 17:52:27,784 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:27,792 p=31902 u=zuul n=ansible | TASK [repo_setup : Create download directory path={{ cifmw_repo_setup_rhos_release_path }}, state=directory, mode=0755] *** 2025-12-08 17:52:27,792 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.055) 0:00:19.048 ******* 2025-12-08 17:52:27,793 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.055) 0:00:19.047 ******* 2025-12-08 17:52:27,814 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,821 p=31902 u=zuul n=ansible | TASK [repo_setup : Print the URL to request msg={{ cifmw_repo_setup_rhos_release_rpm }}] *** 2025-12-08 17:52:27,822 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.029) 0:00:19.077 ******* 2025-12-08 17:52:27,822 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.029) 0:00:19.076 ******* 2025-12-08 17:52:27,847 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | TASK [Download the RPM name=krb_request] *************************************** 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.034) 0:00:19.112 ******* 2025-12-08 17:52:27,856 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.034) 0:00:19.111 ******* 2025-12-08 17:52:27,873 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | TASK [repo_setup : Install RHOS Release tool name={{ cifmw_repo_setup_rhos_release_rpm if cifmw_repo_setup_rhos_release_rpm is not url else cifmw_krb_request_out.path }}, state=present, disable_gpg_check={{ cifmw_repo_setup_rhos_release_gpg_check | bool }}] *** 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.024) 0:00:19.137 ******* 2025-12-08 17:52:27,881 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.024) 0:00:19.135 ******* 2025-12-08 17:52:27,898 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | TASK [repo_setup : Get rhos-release tool version _raw_params=rhos-release --version] *** 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.026) 0:00:19.163 ******* 2025-12-08 17:52:27,907 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.026) 0:00:19.161 ******* 2025-12-08 17:52:27,924 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,932 p=31902 u=zuul n=ansible | TASK [repo_setup : Print rhos-release tool version msg={{ rr_version.stdout }}] *** 2025-12-08 17:52:27,933 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.025) 0:00:19.188 ******* 2025-12-08 17:52:27,933 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.025) 0:00:19.187 ******* 2025-12-08 17:52:27,954 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,963 p=31902 u=zuul n=ansible | TASK [repo_setup : Generate repos using rhos-release {{ cifmw_repo_setup_rhos_release_args }} _raw_params=rhos-release {{ cifmw_repo_setup_rhos_release_args }} \ -t {{ cifmw_repo_setup_output }}] *** 2025-12-08 17:52:27,963 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.030) 0:00:19.219 ******* 2025-12-08 17:52:27,964 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.030) 0:00:19.218 ******* 2025-12-08 17:52:27,976 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for /etc/ci/mirror_info.sh path=/etc/ci/mirror_info.sh] *** 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.021) 0:00:19.240 ******* 2025-12-08 17:52:27,985 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:27 +0000 (0:00:00.021) 0:00:19.239 ******* 2025-12-08 17:52:28,210 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | TASK [repo_setup : Use RDO proxy mirrors chdir={{ cifmw_repo_setup_output }}, _raw_params=set -o pipefail source /etc/ci/mirror_info.sh sed -i -e "s|https://trunk.rdoproject.org|$NODEPOOL_RDO_PROXY|g" *.repo ] *** 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.233) 0:00:19.474 ******* 2025-12-08 17:52:28,218 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.233) 0:00:19.472 ******* 2025-12-08 17:52:28,449 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:28,466 p=31902 u=zuul n=ansible | TASK [repo_setup : Use RDO CentOS mirrors (remove CentOS 10 conditional when Nodepool mirrors exist) chdir={{ cifmw_repo_setup_output }}, _raw_params=set -o pipefail source /etc/ci/mirror_info.sh sed -i -e "s|http://mirror.stream.centos.org|$NODEPOOL_CENTOS_MIRROR|g" *.repo ] *** 2025-12-08 17:52:28,466 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.248) 0:00:19.722 ******* 2025-12-08 17:52:28,467 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.248) 0:00:19.721 ******* 2025-12-08 17:52:28,706 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:28,721 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for gating.repo file on content provider url=http://{{ content_provider_registry_ip }}:8766/gating.repo] *** 2025-12-08 17:52:28,722 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.255) 0:00:19.977 ******* 2025-12-08 17:52:28,722 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.255) 0:00:19.976 ******* 2025-12-08 17:52:28,749 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | TASK [repo_setup : Populate gating repo from content provider ip content=[gating-repo] baseurl=http://{{ content_provider_registry_ip }}:8766/ enabled=1 gpgcheck=0 priority=1 , dest={{ cifmw_repo_setup_output }}/gating.repo, mode=0644] *** 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.011 ******* 2025-12-08 17:52:28,755 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.010 ******* 2025-12-08 17:52:28,784 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for DLRN repo at the destination path={{ cifmw_repo_setup_output }}/delorean.repo] *** 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.034) 0:00:20.046 ******* 2025-12-08 17:52:28,790 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.034) 0:00:20.045 ******* 2025-12-08 17:52:28,817 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | TASK [repo_setup : Lower the priority of DLRN repos to allow installation from gating repo path={{ cifmw_repo_setup_output }}/delorean.repo, regexp=priority=1, replace=priority=20] *** 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.079 ******* 2025-12-08 17:52:28,824 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.033) 0:00:20.078 ******* 2025-12-08 17:52:28,861 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | TASK [repo_setup : Check for DLRN component repo path={{ cifmw_repo_setup_output }}/{{ _comp_repo }}] *** 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.043) 0:00:20.123 ******* 2025-12-08 17:52:28,867 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.043) 0:00:20.122 ******* 2025-12-08 17:52:28,894 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | TASK [repo_setup : Lower the priority of componennt repos to allow installation from gating repo path={{ cifmw_repo_setup_output }}//{{ _comp_repo }}, regexp=priority=1, replace=priority=2] *** 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.032) 0:00:20.155 ******* 2025-12-08 17:52:28,900 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.032) 0:00:20.154 ******* 2025-12-08 17:52:28,939 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | TASK [repo_setup : Find existing repos from /etc/yum.repos.d directory paths=/etc/yum.repos.d/, patterns=*.repo, recurse=False] *** 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.045) 0:00:20.201 ******* 2025-12-08 17:52:28,945 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:28 +0000 (0:00:00.045) 0:00:20.199 ******* 2025-12-08 17:52:29,252 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | TASK [repo_setup : Remove existing repos from /etc/yum.repos.d directory path={{ item }}, state=absent] *** 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.321) 0:00:20.523 ******* 2025-12-08 17:52:29,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.322) 0:00:20.521 ******* 2025-12-08 17:52:29,525 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/etc/yum.repos.d/centos-addons.repo) 2025-12-08 17:52:29,723 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/etc/yum.repos.d/centos.repo) 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | TASK [repo_setup : Cleanup existing metadata _raw_params=dnf clean metadata] *** 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.465) 0:00:20.988 ******* 2025-12-08 17:52:29,733 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:29 +0000 (0:00:00.465) 0:00:20.987 ******* 2025-12-08 17:52:30,222 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:30,236 p=31902 u=zuul n=ansible | TASK [repo_setup : Copy generated repos to /etc/yum.repos.d directory mode=0755, remote_src=True, src={{ cifmw_repo_setup_output }}/, dest=/etc/yum.repos.d] *** 2025-12-08 17:52:30,237 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.503) 0:00:21.492 ******* 2025-12-08 17:52:30,237 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.503) 0:00:21.491 ******* 2025-12-08 17:52:30,666 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather variables for each operating system _raw_params={{ item }}] *** 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.458) 0:00:21.950 ******* 2025-12-08 17:52:30,695 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.458) 0:00:21.949 ******* 2025-12-08 17:52:30,749 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/ci_setup/vars/redhat.yml) 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | TASK [ci_setup : List packages to install var=cifmw_ci_setup_packages] ********* 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.064) 0:00:22.015 ******* 2025-12-08 17:52:30,760 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.064) 0:00:22.014 ******* 2025-12-08 17:52:30,787 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_ci_setup_packages: - bash-completion - ca-certificates - git-core - make - tar - tmux - python3-pip 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | TASK [ci_setup : Install needed packages name={{ cifmw_ci_setup_packages }}, state=latest] *** 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.036) 0:00:22.052 ******* 2025-12-08 17:52:30,796 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:30 +0000 (0:00:00.036) 0:00:22.051 ******* 2025-12-08 17:52:58,113 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather version of openshift client _raw_params=oc version --client -o yaml] *** 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:27.323) 0:00:49.376 ******* 2025-12-08 17:52:58,120 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:27.323) 0:00:49.374 ******* 2025-12-08 17:52:58,331 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:52:58,340 p=31902 u=zuul n=ansible | TASK [ci_setup : Ensure openshift client install path is present path={{ cifmw_ci_setup_oc_install_path }}, state=directory, mode=0755] *** 2025-12-08 17:52:58,340 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.220) 0:00:49.596 ******* 2025-12-08 17:52:58,341 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.220) 0:00:49.595 ******* 2025-12-08 17:52:58,535 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | TASK [ci_setup : Install openshift client src={{ cifmw_ci_setup_openshift_client_download_uri }}/{{ cifmw_ci_setup_openshift_client_version }}/openshift-client-linux.tar.gz, dest={{ cifmw_ci_setup_oc_install_path }}, remote_src=True, mode=0755, creates={{ cifmw_ci_setup_oc_install_path }}/oc] *** 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.201) 0:00:49.798 ******* 2025-12-08 17:52:58,542 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:52:58 +0000 (0:00:00.201) 0:00:49.797 ******* 2025-12-08 17:53:03,845 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | TASK [ci_setup : Add the OC path to cifmw_path if needed cifmw_path={{ cifmw_ci_setup_oc_install_path }}:{{ ansible_env.PATH }}, cacheable=True] *** 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:05.315) 0:00:55.113 ******* 2025-12-08 17:53:03,857 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:05.315) 0:00:55.112 ******* 2025-12-08 17:53:03,880 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | TASK [ci_setup : Create completion file] *************************************** 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:00.033) 0:00:55.146 ******* 2025-12-08 17:53:03,890 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:03 +0000 (0:00:00.033) 0:00:55.145 ******* 2025-12-08 17:53:04,265 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | TASK [ci_setup : Source completion from within .bashrc create=True, mode=0644, path={{ ansible_user_dir }}/.bashrc, block=if [ -f ~/.oc_completion ]; then source ~/.oc_completion fi] *** 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.383) 0:00:55.529 ******* 2025-12-08 17:53:04,274 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.383) 0:00:55.528 ******* 2025-12-08 17:53:04,625 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | TASK [ci_setup : Check rhsm status _raw_params=subscription-manager status] **** 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.361) 0:00:55.891 ******* 2025-12-08 17:53:04,635 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.361) 0:00:55.889 ******* 2025-12-08 17:53:04,655 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | TASK [ci_setup : Gather the repos to be enabled _repos={{ cifmw_ci_setup_rhel_rhsm_default_repos + (cifmw_ci_setup_rhel_rhsm_extra_repos | default([])) }}] *** 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.030) 0:00:55.921 ******* 2025-12-08 17:53:04,665 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.030) 0:00:55.919 ******* 2025-12-08 17:53:04,689 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | TASK [ci_setup : Enabling the required repositories. name={{ item }}, state={{ rhsm_repo_state | default('enabled') }}] *** 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.033) 0:00:55.954 ******* 2025-12-08 17:53:04,699 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.033) 0:00:55.953 ******* 2025-12-08 17:53:04,727 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | TASK [ci_setup : Get current /etc/redhat-release _raw_params=cat /etc/redhat-release] *** 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.038) 0:00:55.993 ******* 2025-12-08 17:53:04,737 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.038) 0:00:55.992 ******* 2025-12-08 17:53:04,762 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | TASK [ci_setup : Print current /etc/redhat-release msg={{ _current_rh_release.stdout }}] *** 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.031) 0:00:56.024 ******* 2025-12-08 17:53:04,769 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.031) 0:00:56.023 ******* 2025-12-08 17:53:04,787 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | TASK [ci_setup : Ensure the repos are enabled in the system using yum name={{ item.name }}, baseurl={{ item.baseurl }}, description={{ item.description | default(item.name) }}, gpgcheck={{ item.gpgcheck | default(false) }}, enabled=True, state={{ yum_repo_state | default('present') }}] *** 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.025) 0:00:56.050 ******* 2025-12-08 17:53:04,794 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.025) 0:00:56.049 ******* 2025-12-08 17:53:04,830 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:04,839 p=31902 u=zuul n=ansible | TASK [ci_setup : Manage directories path={{ item }}, state={{ directory_state }}, mode=0755, owner={{ ansible_user_id }}, group={{ ansible_user_id }}] *** 2025-12-08 17:53:04,839 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.045) 0:00:56.095 ******* 2025-12-08 17:53:04,840 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:04 +0000 (0:00:00.045) 0:00:56.094 ******* 2025-12-08 17:53:05,148 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr) 2025-12-08 17:53:05,359 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/logs) 2025-12-08 17:53:05,572 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/tmp) 2025-12-08 17:53:05,819 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/volumes) 2025-12-08 17:53:06,024 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/parameters) 2025-12-08 17:53:06,051 p=31902 u=zuul n=ansible | TASK [Prepare install_yamls make targets name=install_yamls, apply={'tags': ['bootstrap']}] *** 2025-12-08 17:53:06,052 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:01.212) 0:00:57.307 ******* 2025-12-08 17:53:06,052 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:01.212) 0:00:57.306 ******* 2025-12-08 17:53:06,175 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure directories exist path={{ item }}, state=directory, mode=0755] *** 2025-12-08 17:53:06,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.124) 0:00:57.431 ******* 2025-12-08 17:53:06,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.124) 0:00:57.430 ******* 2025-12-08 17:53:06,432 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts) 2025-12-08 17:53:06,603 p=31902 u=zuul n=ansible | changed: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/roles/install_yamls_makes/tasks) 2025-12-08 17:53:06,835 p=31902 u=zuul n=ansible | ok: [localhost] => (item=/home/zuul/ci-framework-data/artifacts/parameters) 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | TASK [Create variables with local repos based on Zuul items name=install_yamls, tasks_from=zuul_set_operators_repo.yml] *** 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.670) 0:00:58.102 ******* 2025-12-08 17:53:06,846 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.670) 0:00:58.100 ******* 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | TASK [install_yamls : Set fact with local repos based on Zuul items cifmw_install_yamls_operators_repo={{ cifmw_install_yamls_operators_repo | default({}) | combine(_repo_operator_info | items2dict) }}] *** 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.049) 0:00:58.152 ******* 2025-12-08 17:53:06,896 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.049) 0:00:58.150 ******* 2025-12-08 17:53:06,931 p=31902 u=zuul n=ansible | skipping: [localhost] => (item={'branch': 'master', 'change': '694', 'change_url': 'https://github.com/infrawatch/service-telemetry-operator/pull/694', 'commit_id': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'patchset': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'project': {'canonical_hostname': 'github.com', 'canonical_name': 'github.com/infrawatch/service-telemetry-operator', 'name': 'infrawatch/service-telemetry-operator', 'short_name': 'service-telemetry-operator', 'src_dir': 'src/github.com/infrawatch/service-telemetry-operator'}, 'topic': None}) 2025-12-08 17:53:06,933 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:06,943 p=31902 u=zuul n=ansible | TASK [install_yamls : Print helpful data for debugging msg=_repo_operator_name: {{ _repo_operator_name }} _repo_operator_info: {{ _repo_operator_info }} cifmw_install_yamls_operators_repo: {{ cifmw_install_yamls_operators_repo }} ] *** 2025-12-08 17:53:06,944 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.047) 0:00:58.199 ******* 2025-12-08 17:53:06,944 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.047) 0:00:58.198 ******* 2025-12-08 17:53:06,979 p=31902 u=zuul n=ansible | skipping: [localhost] => (item={'branch': 'master', 'change': '694', 'change_url': 'https://github.com/infrawatch/service-telemetry-operator/pull/694', 'commit_id': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'patchset': 'dee1e9b260d30a0e04e6122a214cac385c42d9bb', 'project': {'canonical_hostname': 'github.com', 'canonical_name': 'github.com/infrawatch/service-telemetry-operator', 'name': 'infrawatch/service-telemetry-operator', 'short_name': 'service-telemetry-operator', 'src_dir': 'src/github.com/infrawatch/service-telemetry-operator'}, 'topic': None}) 2025-12-08 17:53:06,981 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | TASK [Customize install_yamls devsetup vars if needed name=install_yamls, tasks_from=customize_devsetup_vars.yml] *** 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.055) 0:00:58.254 ******* 2025-12-08 17:53:06,999 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:06 +0000 (0:00:00.055) 0:00:58.253 ******* 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | TASK [install_yamls : Update opm_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^opm_version:, line=opm_version: {{ cifmw_install_yamls_opm_version }}, state=present] *** 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.051) 0:00:58.306 ******* 2025-12-08 17:53:07,050 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.051) 0:00:58.304 ******* 2025-12-08 17:53:07,070 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,076 p=31902 u=zuul n=ansible | TASK [install_yamls : Update sdk_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^sdk_version:, line=sdk_version: {{ cifmw_install_yamls_sdk_version }}, state=present] *** 2025-12-08 17:53:07,077 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.026) 0:00:58.332 ******* 2025-12-08 17:53:07,077 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.026) 0:00:58.331 ******* 2025-12-08 17:53:07,111 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | TASK [install_yamls : Update go_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^go_version:, line=go_version: {{ cifmw_install_yamls_go_version }}, state=present] *** 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.373 ******* 2025-12-08 17:53:07,117 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.372 ******* 2025-12-08 17:53:07,139 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | TASK [install_yamls : Update kustomize_version in install_yamls devsetup/vars/default.yaml path={{ cifmw_install_yamls_repo }}/devsetup/vars/default.yaml, regexp=^kustomize_version:, line=kustomize_version: {{ cifmw_install_yamls_kustomize_version }}, state=present] *** 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.027) 0:00:58.401 ******* 2025-12-08 17:53:07,145 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.027) 0:00:58.400 ******* 2025-12-08 17:53:07,165 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | TASK [install_yamls : Compute the cifmw_install_yamls_vars final value _install_yamls_override_vars={{ _install_yamls_override_vars | default({}) | combine(item, recursive=True) }}] *** 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.030) 0:00:58.432 ******* 2025-12-08 17:53:07,176 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.030) 0:00:58.431 ******* 2025-12-08 17:53:07,259 p=31902 u=zuul n=ansible | ok: [localhost] => (item={}) 2025-12-08 17:53:07,267 p=31902 u=zuul n=ansible | TASK [install_yamls : Set environment override cifmw_install_yamls_environment fact cifmw_install_yamls_environment={{ _install_yamls_override_vars.keys() | map('upper') | zip(_install_yamls_override_vars.values()) | items2dict(key_name=0, value_name=1) | combine({ 'OUT': cifmw_install_yamls_manifests_dir, 'OUTPUT_DIR': cifmw_install_yamls_edpm_dir, 'CHECKOUT_FROM_OPENSTACK_REF': cifmw_install_yamls_checkout_openstack_ref, 'OPENSTACK_K8S_BRANCH': (zuul is defined and not zuul.branch |regex_search('master|antelope|rhos')) | ternary(zuul.branch, 'main') }) | combine(install_yamls_operators_repos) }}, cacheable=True] *** 2025-12-08 17:53:07,267 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.091) 0:00:58.523 ******* 2025-12-08 17:53:07,268 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.091) 0:00:58.522 ******* 2025-12-08 17:53:07,299 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | TASK [install_yamls : Get environment structure base_path={{ cifmw_install_yamls_repo }}] *** 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.564 ******* 2025-12-08 17:53:07,308 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.040) 0:00:58.563 ******* 2025-12-08 17:53:07,903 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure Output directory exists path={{ cifmw_install_yamls_out_dir }}, state=directory, mode=0755] *** 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.611) 0:00:59.175 ******* 2025-12-08 17:53:07,919 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.611) 0:00:59.174 ******* 2025-12-08 17:53:07,956 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | TASK [install_yamls : Ensure user cifmw_install_yamls_vars contains existing Makefile variables that=_cifmw_install_yamls_unmatched_vars | length == 0, msg=cifmw_install_yamls_vars contains a variable that is not defined in install_yamls Makefile nor cifmw_install_yamls_whitelisted_vars: {{ _cifmw_install_yamls_unmatched_vars | join(', ')}}, quiet=True] *** 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.050) 0:00:59.226 ******* 2025-12-08 17:53:07,970 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:07 +0000 (0:00:00.050) 0:00:59.224 ******* 2025-12-08 17:53:08,004 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | TASK [install_yamls : Generate /home/zuul/ci-framework-data/artifacts/install_yamls.sh dest={{ cifmw_install_yamls_out_dir }}/{{ cifmw_install_yamls_envfile }}, content={% for k,v in cifmw_install_yamls_environment.items() %} export {{ k }}={{ v }} {% endfor %}, mode=0644] *** 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.057) 0:00:59.283 ******* 2025-12-08 17:53:08,027 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.057) 0:00:59.281 ******* 2025-12-08 17:53:08,060 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | TASK [install_yamls : Set install_yamls default values cifmw_install_yamls_defaults={{ get_makefiles_env_output.makefiles_values | combine(cifmw_install_yamls_environment) }}, cacheable=True] *** 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.047) 0:00:59.330 ******* 2025-12-08 17:53:08,074 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.047) 0:00:59.329 ******* 2025-12-08 17:53:08,117 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | TASK [install_yamls : Show the env structure var=cifmw_install_yamls_environment] *** 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.059) 0:00:59.390 ******* 2025-12-08 17:53:08,134 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.059) 0:00:59.388 ******* 2025-12-08 17:53:08,167 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_install_yamls_environment: CHECKOUT_FROM_OPENSTACK_REF: 'true' OPENSTACK_K8S_BRANCH: main OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | TASK [install_yamls : Show the env structure defaults var=cifmw_install_yamls_defaults] *** 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.042) 0:00:59.432 ******* 2025-12-08 17:53:08,177 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.042) 0:00:59.431 ******* 2025-12-08 17:53:08,219 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_install_yamls_defaults: ADOPTED_EXTERNAL_NETWORK: 172.21.1.0/24 ADOPTED_INTERNALAPI_NETWORK: 172.17.1.0/24 ADOPTED_STORAGEMGMT_NETWORK: 172.20.1.0/24 ADOPTED_STORAGE_NETWORK: 172.18.1.0/24 ADOPTED_TENANT_NETWORK: 172.9.1.0/24 ANSIBLEEE: config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_BRANCH: main ANSIBLEEE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest ANSIBLEEE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml ANSIBLEEE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests ANSIBLEEE_KUTTL_NAMESPACE: ansibleee-kuttl-tests ANSIBLEEE_REPO: https://github.com/openstack-k8s-operators/openstack-ansibleee-operator ANSIBLEE_COMMIT_HASH: '' BARBICAN: config/samples/barbican_v1beta1_barbican.yaml BARBICAN_BRANCH: main BARBICAN_COMMIT_HASH: '' BARBICAN_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml BARBICAN_DEPL_IMG: unused BARBICAN_IMG: quay.io/openstack-k8s-operators/barbican-operator-index:latest BARBICAN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml BARBICAN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests BARBICAN_KUTTL_NAMESPACE: barbican-kuttl-tests BARBICAN_REPO: https://github.com/openstack-k8s-operators/barbican-operator.git BARBICAN_SERVICE_ENABLED: 'true' BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY: sE**********U= BAREMETAL_BRANCH: main BAREMETAL_COMMIT_HASH: '' BAREMETAL_IMG: quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest BAREMETAL_OS_CONTAINER_IMG: '' BAREMETAL_OS_IMG: '' BAREMETAL_REPO: https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git BAREMETAL_TIMEOUT: 20m BASH_IMG: quay.io/openstack-k8s-operators/bash:latest BGP_ASN: '64999' BGP_LEAF_1: 100.65.4.1 BGP_LEAF_2: 100.64.4.1 BGP_OVN_ROUTING: 'false' BGP_PEER_ASN: '64999' BGP_SOURCE_IP: 172.30.4.2 BGP_SOURCE_IP6: f00d:f00d:f00d:f00d:f00d:f00d:f00d:42 BMAAS_BRIDGE_IPV4_PREFIX: 172.20.1.2/24 BMAAS_BRIDGE_IPV6_PREFIX: fd00:bbbb::2/64 BMAAS_INSTANCE_DISK_SIZE: '20' BMAAS_INSTANCE_MEMORY: '4096' BMAAS_INSTANCE_NAME_PREFIX: crc-bmaas BMAAS_INSTANCE_NET_MODEL: virtio BMAAS_INSTANCE_OS_VARIANT: centos-stream9 BMAAS_INSTANCE_VCPUS: '2' BMAAS_INSTANCE_VIRT_TYPE: kvm BMAAS_IPV4: 'true' BMAAS_IPV6: 'false' BMAAS_LIBVIRT_USER: sushyemu BMAAS_METALLB_ADDRESS_POOL: 172.20.1.64/26 BMAAS_METALLB_POOL_NAME: baremetal BMAAS_NETWORK_IPV4_PREFIX: 172.20.1.1/24 BMAAS_NETWORK_IPV6_PREFIX: fd00:bbbb::1/64 BMAAS_NETWORK_NAME: crc-bmaas BMAAS_NODE_COUNT: '1' BMAAS_OCP_INSTANCE_NAME: crc BMAAS_REDFISH_PASSWORD: password BMAAS_REDFISH_USERNAME: admin BMAAS_ROUTE_LIBVIRT_NETWORKS: crc-bmaas,crc,default BMAAS_SUSHY_EMULATOR_DRIVER: libvirt BMAAS_SUSHY_EMULATOR_IMAGE: quay.io/metal3-io/sushy-tools:latest BMAAS_SUSHY_EMULATOR_NAMESPACE: sushy-emulator BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE: /etc/openstack/clouds.yaml BMAAS_SUSHY_EMULATOR_OS_CLOUD: openstack BMH_NAMESPACE: openstack BMO_BRANCH: release-0.9 BMO_CLEANUP: 'true' BMO_COMMIT_HASH: '' BMO_IPA_BRANCH: stable/2024.1 BMO_IRONIC_HOST: 192.168.122.10 BMO_PROVISIONING_INTERFACE: '' BMO_REPO: https://github.com/metal3-io/baremetal-operator BMO_SETUP: '' BMO_SETUP_ROUTE_REPLACE: 'true' BM_CTLPLANE_INTERFACE: enp1s0 BM_INSTANCE_MEMORY: '8192' BM_INSTANCE_NAME_PREFIX: edpm-compute-baremetal BM_INSTANCE_NAME_SUFFIX: '0' BM_NETWORK_NAME: default BM_NODE_COUNT: '1' BM_ROOT_PASSWORD: '' BM_ROOT_PASSWORD_SECRET: '' CEILOMETER_CENTRAL_DEPL_IMG: unused CEILOMETER_NOTIFICATION_DEPL_IMG: unused CEPH_BRANCH: release-1.15 CEPH_CLIENT: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml CEPH_COMMON: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml CEPH_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml CEPH_CRDS: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml CEPH_IMG: quay.io/ceph/demo:latest-squid CEPH_OP: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml CEPH_REPO: https://github.com/rook/rook.git CERTMANAGER_TIMEOUT: 300s CHECKOUT_FROM_OPENSTACK_REF: 'true' CINDER: config/samples/cinder_v1beta1_cinder.yaml CINDERAPI_DEPL_IMG: unused CINDERBKP_DEPL_IMG: unused CINDERSCH_DEPL_IMG: unused CINDERVOL_DEPL_IMG: unused CINDER_BRANCH: main CINDER_COMMIT_HASH: '' CINDER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml CINDER_IMG: quay.io/openstack-k8s-operators/cinder-operator-index:latest CINDER_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml CINDER_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests CINDER_KUTTL_NAMESPACE: cinder-kuttl-tests CINDER_REPO: https://github.com/openstack-k8s-operators/cinder-operator.git CLEANUP_DIR_CMD: rm -Rf CRC_BGP_NIC_1_MAC: '52:54:00:11:11:11' CRC_BGP_NIC_2_MAC: '52:54:00:11:11:12' CRC_HTTPS_PROXY: '' CRC_HTTP_PROXY: '' CRC_STORAGE_NAMESPACE: crc-storage CRC_STORAGE_RETRIES: '3' CRC_URL: '''https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz''' CRC_VERSION: latest DATAPLANE_ANSIBLE_SECRET: dataplane-ansible-ssh-private-key-secret DATAPLANE_ANSIBLE_USER: '' DATAPLANE_COMPUTE_IP: 192.168.122.100 DATAPLANE_CONTAINER_PREFIX: openstack DATAPLANE_CONTAINER_TAG: current-podified DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest DATAPLANE_DEFAULT_GW: 192.168.122.1 DATAPLANE_EXTRA_NOVA_CONFIG_FILE: /dev/null DATAPLANE_GROWVOLS_ARGS: /=8GB /tmp=1GB /home=1GB /var=100% DATAPLANE_KUSTOMIZE_SCENARIO: preprovisioned DATAPLANE_NETWORKER_IP: 192.168.122.200 DATAPLANE_NETWORK_INTERFACE_NAME: eth0 DATAPLANE_NOVA_NFS_PATH: '' DATAPLANE_NTP_SERVER: pool.ntp.org DATAPLANE_PLAYBOOK: osp.edpm.download_cache DATAPLANE_REGISTRY_URL: quay.io/podified-antelope-centos9 DATAPLANE_RUNNER_IMG: '' DATAPLANE_SERVER_ROLE: compute DATAPLANE_SSHD_ALLOWED_RANGES: '[''192.168.122.0/24'']' DATAPLANE_TIMEOUT: 30m DATAPLANE_TLS_ENABLED: 'true' DATAPLANE_TOTAL_NETWORKER_NODES: '1' DATAPLANE_TOTAL_NODES: '1' DBSERVICE: galera DESIGNATE: config/samples/designate_v1beta1_designate.yaml DESIGNATE_BRANCH: main DESIGNATE_COMMIT_HASH: '' DESIGNATE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml DESIGNATE_IMG: quay.io/openstack-k8s-operators/designate-operator-index:latest DESIGNATE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml DESIGNATE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests DESIGNATE_KUTTL_NAMESPACE: designate-kuttl-tests DESIGNATE_REPO: https://github.com/openstack-k8s-operators/designate-operator.git DNSDATA: config/samples/network_v1beta1_dnsdata.yaml DNSDATA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml DNSMASQ: config/samples/network_v1beta1_dnsmasq.yaml DNSMASQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml DNS_DEPL_IMG: unused DNS_DOMAIN: localdomain DOWNLOAD_TOOLS_SELECTION: all EDPM_ATTACH_EXTNET: 'true' EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES: '''[]''' EDPM_COMPUTE_ADDITIONAL_NETWORKS: '''[]''' EDPM_COMPUTE_CELLS: '1' EDPM_COMPUTE_CEPH_ENABLED: 'true' EDPM_COMPUTE_CEPH_NOVA: 'true' EDPM_COMPUTE_DHCP_AGENT_ENABLED: 'true' EDPM_COMPUTE_SRIOV_ENABLED: 'true' EDPM_COMPUTE_SUFFIX: '0' EDPM_CONFIGURE_DEFAULT_ROUTE: 'true' EDPM_CONFIGURE_HUGEPAGES: 'false' EDPM_CONFIGURE_NETWORKING: 'true' EDPM_FIRSTBOOT_EXTRA: /tmp/edpm-firstboot-extra EDPM_NETWORKER_SUFFIX: '0' EDPM_TOTAL_NETWORKERS: '1' EDPM_TOTAL_NODES: '1' GALERA_REPLICAS: '' GENERATE_SSH_KEYS: 'true' GIT_CLONE_OPTS: '' GLANCE: config/samples/glance_v1beta1_glance.yaml GLANCEAPI_DEPL_IMG: unused GLANCE_BRANCH: main GLANCE_COMMIT_HASH: '' GLANCE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml GLANCE_IMG: quay.io/openstack-k8s-operators/glance-operator-index:latest GLANCE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml GLANCE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests GLANCE_KUTTL_NAMESPACE: glance-kuttl-tests GLANCE_REPO: https://github.com/openstack-k8s-operators/glance-operator.git HEAT: config/samples/heat_v1beta1_heat.yaml HEATAPI_DEPL_IMG: unused HEATCFNAPI_DEPL_IMG: unused HEATENGINE_DEPL_IMG: unused HEAT_AUTH_ENCRYPTION_KEY: 76**********f0 HEAT_BRANCH: main HEAT_COMMIT_HASH: '' HEAT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml HEAT_IMG: quay.io/openstack-k8s-operators/heat-operator-index:latest HEAT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml HEAT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests HEAT_KUTTL_NAMESPACE: heat-kuttl-tests HEAT_REPO: https://github.com/openstack-k8s-operators/heat-operator.git HEAT_SERVICE_ENABLED: 'true' HORIZON: config/samples/horizon_v1beta1_horizon.yaml HORIZON_BRANCH: main HORIZON_COMMIT_HASH: '' HORIZON_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml HORIZON_DEPL_IMG: unused HORIZON_IMG: quay.io/openstack-k8s-operators/horizon-operator-index:latest HORIZON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml HORIZON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests HORIZON_KUTTL_NAMESPACE: horizon-kuttl-tests HORIZON_REPO: https://github.com/openstack-k8s-operators/horizon-operator.git INFRA_BRANCH: main INFRA_COMMIT_HASH: '' INFRA_IMG: quay.io/openstack-k8s-operators/infra-operator-index:latest INFRA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml INFRA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests INFRA_KUTTL_NAMESPACE: infra-kuttl-tests INFRA_REPO: https://github.com/openstack-k8s-operators/infra-operator.git INSTALL_CERT_MANAGER: 'true' INSTALL_NMSTATE: true || false INSTALL_NNCP: true || false INTERNALAPI_HOST_ROUTES: '' IPV6_LAB_IPV4_NETWORK_IPADDRESS: 172.30.0.1/24 IPV6_LAB_IPV6_NETWORK_IPADDRESS: fd00:abcd:abcd:fc00::1/64 IPV6_LAB_LIBVIRT_STORAGE_POOL: default IPV6_LAB_MANAGE_FIREWALLD: 'true' IPV6_LAB_NAT64_HOST_IPV4: 172.30.0.2/24 IPV6_LAB_NAT64_HOST_IPV6: fd00:abcd:abcd:fc00::2/64 IPV6_LAB_NAT64_INSTANCE_NAME: nat64-router IPV6_LAB_NAT64_IPV6_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL: 192.168.255.0/24 IPV6_LAB_NAT64_TAYGA_IPV4: 192.168.255.1 IPV6_LAB_NAT64_TAYGA_IPV6: fd00:abcd:abcd:fc00::3 IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX: fd00:abcd:abcd:fcff::/96 IPV6_LAB_NAT64_UPDATE_PACKAGES: 'false' IPV6_LAB_NETWORK_NAME: nat64 IPV6_LAB_SNO_CLUSTER_NETWORK: fd00:abcd:0::/48 IPV6_LAB_SNO_HOST_IP: fd00:abcd:abcd:fc00::11 IPV6_LAB_SNO_HOST_PREFIX: '64' IPV6_LAB_SNO_INSTANCE_NAME: sno IPV6_LAB_SNO_MACHINE_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_OCP_MIRROR_URL: https://mirror.openshift.com/pub/openshift-v4/clients/ocp IPV6_LAB_SNO_OCP_VERSION: latest-4.14 IPV6_LAB_SNO_SERVICE_NETWORK: fd00:abcd:abcd:fc03::/112 IPV6_LAB_SSH_PUB_KEY: /home/zuul/.ssh/id_rsa.pub IPV6_LAB_WORK_DIR: /home/zuul/.ipv6lab IRONIC: config/samples/ironic_v1beta1_ironic.yaml IRONICAPI_DEPL_IMG: unused IRONICCON_DEPL_IMG: unused IRONICINS_DEPL_IMG: unused IRONICNAG_DEPL_IMG: unused IRONICPXE_DEPL_IMG: unused IRONIC_BRANCH: main IRONIC_COMMIT_HASH: '' IRONIC_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml IRONIC_IMAGE: quay.io/metal3-io/ironic IRONIC_IMAGE_TAG: release-24.1 IRONIC_IMG: quay.io/openstack-k8s-operators/ironic-operator-index:latest IRONIC_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml IRONIC_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests IRONIC_KUTTL_NAMESPACE: ironic-kuttl-tests IRONIC_REPO: https://github.com/openstack-k8s-operators/ironic-operator.git KEYSTONEAPI: config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_DEPL_IMG: unused KEYSTONE_BRANCH: main KEYSTONE_COMMIT_HASH: '' KEYSTONE_FEDERATION_CLIENT_SECRET: CO**********6f KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE: openstack KEYSTONE_IMG: quay.io/openstack-k8s-operators/keystone-operator-index:latest KEYSTONE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml KEYSTONE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests KEYSTONE_KUTTL_NAMESPACE: keystone-kuttl-tests KEYSTONE_REPO: https://github.com/openstack-k8s-operators/keystone-operator.git KUBEADMIN_PWD: '12345678' LIBVIRT_SECRET: libvirt-secret LOKI_DEPLOY_MODE: openshift-network LOKI_DEPLOY_NAMESPACE: netobserv LOKI_DEPLOY_SIZE: 1x.demo LOKI_NAMESPACE: openshift-operators-redhat LOKI_OPERATOR_GROUP: openshift-operators-redhat-loki LOKI_SUBSCRIPTION: loki-operator LVMS_CR: '1' MANILA: config/samples/manila_v1beta1_manila.yaml MANILAAPI_DEPL_IMG: unused MANILASCH_DEPL_IMG: unused MANILASHARE_DEPL_IMG: unused MANILA_BRANCH: main MANILA_COMMIT_HASH: '' MANILA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml MANILA_IMG: quay.io/openstack-k8s-operators/manila-operator-index:latest MANILA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml MANILA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests MANILA_KUTTL_NAMESPACE: manila-kuttl-tests MANILA_REPO: https://github.com/openstack-k8s-operators/manila-operator.git MANILA_SERVICE_ENABLED: 'true' MARIADB: config/samples/mariadb_v1beta1_galera.yaml MARIADB_BRANCH: main MARIADB_CHAINSAW_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml MARIADB_CHAINSAW_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests MARIADB_CHAINSAW_NAMESPACE: mariadb-chainsaw-tests MARIADB_COMMIT_HASH: '' MARIADB_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml MARIADB_DEPL_IMG: unused MARIADB_IMG: quay.io/openstack-k8s-operators/mariadb-operator-index:latest MARIADB_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml MARIADB_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests MARIADB_KUTTL_NAMESPACE: mariadb-kuttl-tests MARIADB_REPO: https://github.com/openstack-k8s-operators/mariadb-operator.git MEMCACHED: config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_DEPL_IMG: unused METADATA_SHARED_SECRET: '12**********42' METALLB_IPV6_POOL: fd00:aaaa::80-fd00:aaaa::90 METALLB_POOL: 192.168.122.80-192.168.122.90 MICROSHIFT: '0' NAMESPACE: openstack NETCONFIG: config/samples/network_v1beta1_netconfig.yaml NETCONFIG_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml NETCONFIG_DEPL_IMG: unused NETOBSERV_DEPLOY_NAMESPACE: netobserv NETOBSERV_NAMESPACE: openshift-netobserv-operator NETOBSERV_OPERATOR_GROUP: openshift-netobserv-operator-net NETOBSERV_SUBSCRIPTION: netobserv-operator NETWORK_BGP: 'false' NETWORK_DESIGNATE_ADDRESS_PREFIX: 172.28.0 NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX: 172.50.0 NETWORK_INTERNALAPI_ADDRESS_PREFIX: 172.17.0 NETWORK_ISOLATION: 'true' NETWORK_ISOLATION_INSTANCE_NAME: crc NETWORK_ISOLATION_IPV4: 'true' NETWORK_ISOLATION_IPV4_ADDRESS: 172.16.1.1/24 NETWORK_ISOLATION_IPV4_NAT: 'true' NETWORK_ISOLATION_IPV6: 'false' NETWORK_ISOLATION_IPV6_ADDRESS: fd00:aaaa::1/64 NETWORK_ISOLATION_IP_ADDRESS: 192.168.122.10 NETWORK_ISOLATION_MAC: '52:54:00:11:11:10' NETWORK_ISOLATION_NETWORK_NAME: net-iso NETWORK_ISOLATION_NET_NAME: default NETWORK_ISOLATION_USE_DEFAULT_NETWORK: 'true' NETWORK_MTU: '1500' NETWORK_STORAGEMGMT_ADDRESS_PREFIX: 172.20.0 NETWORK_STORAGE_ADDRESS_PREFIX: 172.18.0 NETWORK_STORAGE_MACVLAN: '' NETWORK_TENANT_ADDRESS_PREFIX: 172.19.0 NETWORK_VLAN_START: '20' NETWORK_VLAN_STEP: '1' NEUTRONAPI: config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_DEPL_IMG: unused NEUTRON_BRANCH: main NEUTRON_COMMIT_HASH: '' NEUTRON_IMG: quay.io/openstack-k8s-operators/neutron-operator-index:latest NEUTRON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml NEUTRON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests NEUTRON_KUTTL_NAMESPACE: neutron-kuttl-tests NEUTRON_REPO: https://github.com/openstack-k8s-operators/neutron-operator.git NFS_HOME: /home/nfs NMSTATE_NAMESPACE: openshift-nmstate NMSTATE_OPERATOR_GROUP: openshift-nmstate-tn6k8 NMSTATE_SUBSCRIPTION: kubernetes-nmstate-operator NNCP_ADDITIONAL_HOST_ROUTES: '' NNCP_BGP_1_INTERFACE: enp7s0 NNCP_BGP_1_IP_ADDRESS: 100.65.4.2 NNCP_BGP_2_INTERFACE: enp8s0 NNCP_BGP_2_IP_ADDRESS: 100.64.4.2 NNCP_BRIDGE: ospbr NNCP_CLEANUP_TIMEOUT: 120s NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX: 'fd00:aaaa::' NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX: '10' NNCP_CTLPLANE_IP_ADDRESS_PREFIX: 192.168.122 NNCP_CTLPLANE_IP_ADDRESS_SUFFIX: '10' NNCP_DNS_SERVER: 192.168.122.1 NNCP_DNS_SERVER_IPV6: fd00:aaaa::1 NNCP_GATEWAY: 192.168.122.1 NNCP_GATEWAY_IPV6: fd00:aaaa::1 NNCP_INTERFACE: enp6s0 NNCP_NODES: '' NNCP_TIMEOUT: 240s NOVA: config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_BRANCH: main NOVA_COMMIT_HASH: '' NOVA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_IMG: quay.io/openstack-k8s-operators/nova-operator-index:latest NOVA_REPO: https://github.com/openstack-k8s-operators/nova-operator.git NUMBER_OF_INSTANCES: '1' OCP_NETWORK_NAME: crc OCTAVIA: config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_BRANCH: main OCTAVIA_COMMIT_HASH: '' OCTAVIA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_IMG: quay.io/openstack-k8s-operators/octavia-operator-index:latest OCTAVIA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml OCTAVIA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests OCTAVIA_KUTTL_NAMESPACE: octavia-kuttl-tests OCTAVIA_REPO: https://github.com/openstack-k8s-operators/octavia-operator.git OKD: 'false' OPENSTACK_BRANCH: main OPENSTACK_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-bundle:latest OPENSTACK_COMMIT_HASH: '' OPENSTACK_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_CRDS_DIR: openstack_crds OPENSTACK_CTLPLANE: config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_IMG: quay.io/openstack-k8s-operators/openstack-operator-index:latest OPENSTACK_K8S_BRANCH: main OPENSTACK_K8S_TAG: latest OPENSTACK_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml OPENSTACK_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests OPENSTACK_KUTTL_NAMESPACE: openstack-kuttl-tests OPENSTACK_NEUTRON_CUSTOM_CONF: '' OPENSTACK_REPO: https://github.com/openstack-k8s-operators/openstack-operator.git OPENSTACK_STORAGE_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest OPERATOR_BASE_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator OPERATOR_CHANNEL: '' OPERATOR_NAMESPACE: openstack-operators OPERATOR_SOURCE: '' OPERATOR_SOURCE_NAMESPACE: '' OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm OVNCONTROLLER: config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_NMAP: 'true' OVNDBS: config/samples/ovn_v1beta1_ovndbcluster.yaml OVNDBS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml OVNNORTHD: config/samples/ovn_v1beta1_ovnnorthd.yaml OVNNORTHD_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml OVN_BRANCH: main OVN_COMMIT_HASH: '' OVN_IMG: quay.io/openstack-k8s-operators/ovn-operator-index:latest OVN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml OVN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests OVN_KUTTL_NAMESPACE: ovn-kuttl-tests OVN_REPO: https://github.com/openstack-k8s-operators/ovn-operator.git PASSWORD: '12**********78' PLACEMENTAPI: config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_DEPL_IMG: unused PLACEMENT_BRANCH: main PLACEMENT_COMMIT_HASH: '' PLACEMENT_IMG: quay.io/openstack-k8s-operators/placement-operator-index:latest PLACEMENT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml PLACEMENT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests PLACEMENT_KUTTL_NAMESPACE: placement-kuttl-tests PLACEMENT_REPO: https://github.com/openstack-k8s-operators/placement-operator.git PULL_SECRET: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt RABBITMQ: docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_BRANCH: patches RABBITMQ_COMMIT_HASH: '' RABBITMQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_DEPL_IMG: unused RABBITMQ_IMG: quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest RABBITMQ_REPO: https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git REDHAT_OPERATORS: 'false' REDIS: config/samples/redis_v1beta1_redis.yaml REDIS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml REDIS_DEPL_IMG: unused RH_REGISTRY_PWD: '' RH_REGISTRY_USER: '' SECRET: os**********et SG_CORE_DEPL_IMG: unused STANDALONE_COMPUTE_DRIVER: libvirt STANDALONE_EXTERNAL_NET_PREFFIX: 172.21.0 STANDALONE_INTERNALAPI_NET_PREFIX: 172.17.0 STANDALONE_STORAGEMGMT_NET_PREFIX: 172.20.0 STANDALONE_STORAGE_NET_PREFIX: 172.18.0 STANDALONE_TENANT_NET_PREFIX: 172.19.0 STORAGEMGMT_HOST_ROUTES: '' STORAGE_CLASS: local-storage STORAGE_HOST_ROUTES: '' SWIFT: config/samples/swift_v1beta1_swift.yaml SWIFT_BRANCH: main SWIFT_COMMIT_HASH: '' SWIFT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml SWIFT_IMG: quay.io/openstack-k8s-operators/swift-operator-index:latest SWIFT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml SWIFT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests SWIFT_KUTTL_NAMESPACE: swift-kuttl-tests SWIFT_REPO: https://github.com/openstack-k8s-operators/swift-operator.git TELEMETRY: config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_BRANCH: main TELEMETRY_COMMIT_HASH: '' TELEMETRY_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_IMG: quay.io/openstack-k8s-operators/telemetry-operator-index:latest TELEMETRY_KUTTL_BASEDIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator TELEMETRY_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml TELEMETRY_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites TELEMETRY_KUTTL_NAMESPACE: telemetry-kuttl-tests TELEMETRY_KUTTL_RELPATH: test/kuttl/suites TELEMETRY_REPO: https://github.com/openstack-k8s-operators/telemetry-operator.git TENANT_HOST_ROUTES: '' TIMEOUT: 300s TLS_ENABLED: 'false' tripleo_deploy: 'export REGISTRY_PWD:' 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | TASK [install_yamls : Generate make targets install_yamls_path={{ cifmw_install_yamls_repo }}, output_directory={{ cifmw_install_yamls_tasks_out }}] *** 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.053) 0:00:59.485 ******* 2025-12-08 17:53:08,230 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.053) 0:00:59.484 ******* 2025-12-08 17:53:08,544 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | TASK [install_yamls : Debug generate_make module var=cifmw_generate_makes] ***** 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.322) 0:00:59.808 ******* 2025-12-08 17:53:08,552 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.322) 0:00:59.806 ******* 2025-12-08 17:53:08,582 p=31902 u=zuul n=ansible | ok: [localhost] => cifmw_generate_makes: changed: false debug: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/Makefile: - all - help - cleanup - deploy_cleanup - wait - crc_storage - crc_storage_cleanup - crc_storage_release - crc_storage_with_retries - crc_storage_cleanup_with_retries - operator_namespace - namespace - namespace_cleanup - input - input_cleanup - crc_bmo_setup - crc_bmo_cleanup - openstack_prep - openstack - openstack_wait - openstack_init - openstack_cleanup - openstack_repo - openstack_deploy_prep - openstack_deploy - openstack_wait_deploy - openstack_deploy_cleanup - openstack_update_run - update_services - update_system - openstack_patch_version - edpm_deploy_generate_keys - edpm_patch_ansible_runner_image - edpm_deploy_prep - edpm_deploy_cleanup - edpm_deploy - edpm_deploy_baremetal_prep - edpm_deploy_baremetal - edpm_wait_deploy_baremetal - edpm_wait_deploy - edpm_register_dns - edpm_nova_discover_hosts - openstack_crds - openstack_crds_cleanup - edpm_deploy_networker_prep - edpm_deploy_networker_cleanup - edpm_deploy_networker - infra_prep - infra - infra_cleanup - dns_deploy_prep - dns_deploy - dns_deploy_cleanup - netconfig_deploy_prep - netconfig_deploy - netconfig_deploy_cleanup - memcached_deploy_prep - memcached_deploy - memcached_deploy_cleanup - keystone_prep - keystone - keystone_cleanup - keystone_deploy_prep - keystone_deploy - keystone_deploy_cleanup - barbican_prep - barbican - barbican_cleanup - barbican_deploy_prep - barbican_deploy - barbican_deploy_validate - barbican_deploy_cleanup - mariadb - mariadb_cleanup - mariadb_deploy_prep - mariadb_deploy - mariadb_deploy_cleanup - placement_prep - placement - placement_cleanup - placement_deploy_prep - placement_deploy - placement_deploy_cleanup - glance_prep - glance - glance_cleanup - glance_deploy_prep - glance_deploy - glance_deploy_cleanup - ovn_prep - ovn - ovn_cleanup - ovn_deploy_prep - ovn_deploy - ovn_deploy_cleanup - neutron_prep - neutron - neutron_cleanup - neutron_deploy_prep - neutron_deploy - neutron_deploy_cleanup - cinder_prep - cinder - cinder_cleanup - cinder_deploy_prep - cinder_deploy - cinder_deploy_cleanup - rabbitmq_prep - rabbitmq - rabbitmq_cleanup - rabbitmq_deploy_prep - rabbitmq_deploy - rabbitmq_deploy_cleanup - ironic_prep - ironic - ironic_cleanup - ironic_deploy_prep - ironic_deploy - ironic_deploy_cleanup - octavia_prep - octavia - octavia_cleanup - octavia_deploy_prep - octavia_deploy - octavia_deploy_cleanup - designate_prep - designate - designate_cleanup - designate_deploy_prep - designate_deploy - designate_deploy_cleanup - nova_prep - nova - nova_cleanup - nova_deploy_prep - nova_deploy - nova_deploy_cleanup - mariadb_kuttl_run - mariadb_kuttl - kuttl_db_prep - kuttl_db_cleanup - kuttl_common_prep - kuttl_common_cleanup - keystone_kuttl_run - keystone_kuttl - barbican_kuttl_run - barbican_kuttl - placement_kuttl_run - placement_kuttl - cinder_kuttl_run - cinder_kuttl - neutron_kuttl_run - neutron_kuttl - octavia_kuttl_run - octavia_kuttl - designate_kuttl - designate_kuttl_run - ovn_kuttl_run - ovn_kuttl - infra_kuttl_run - infra_kuttl - ironic_kuttl_run - ironic_kuttl - ironic_kuttl_crc - heat_kuttl_run - heat_kuttl - heat_kuttl_crc - ansibleee_kuttl_run - ansibleee_kuttl_cleanup - ansibleee_kuttl_prep - ansibleee_kuttl - glance_kuttl_run - glance_kuttl - manila_kuttl_run - manila_kuttl - swift_kuttl_run - swift_kuttl - horizon_kuttl_run - horizon_kuttl - openstack_kuttl_run - openstack_kuttl - mariadb_chainsaw_run - mariadb_chainsaw - horizon_prep - horizon - horizon_cleanup - horizon_deploy_prep - horizon_deploy - horizon_deploy_cleanup - heat_prep - heat - heat_cleanup - heat_deploy_prep - heat_deploy - heat_deploy_cleanup - ansibleee_prep - ansibleee - ansibleee_cleanup - baremetal_prep - baremetal - baremetal_cleanup - ceph_help - ceph - ceph_cleanup - rook_prep - rook - rook_deploy_prep - rook_deploy - rook_crc_disk - rook_cleanup - lvms - nmstate - nncp - nncp_cleanup - netattach - netattach_cleanup - metallb - metallb_config - metallb_config_cleanup - metallb_cleanup - loki - loki_cleanup - loki_deploy - loki_deploy_cleanup - netobserv - netobserv_cleanup - netobserv_deploy - netobserv_deploy_cleanup - manila_prep - manila - manila_cleanup - manila_deploy_prep - manila_deploy - manila_deploy_cleanup - telemetry_prep - telemetry - telemetry_cleanup - telemetry_deploy_prep - telemetry_deploy - telemetry_deploy_cleanup - telemetry_kuttl_run - telemetry_kuttl - swift_prep - swift - swift_cleanup - swift_deploy_prep - swift_deploy - swift_deploy_cleanup - certmanager - certmanager_cleanup - validate_marketplace - redis_deploy_prep - redis_deploy - redis_deploy_cleanup - set_slower_etcd_profile /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup/Makefile: - help - download_tools - nfs - nfs_cleanup - crc - crc_cleanup - crc_scrub - crc_attach_default_interface - crc_attach_default_interface_cleanup - ipv6_lab_network - ipv6_lab_network_cleanup - ipv6_lab_nat64_router - ipv6_lab_nat64_router_cleanup - ipv6_lab_sno - ipv6_lab_sno_cleanup - ipv6_lab - ipv6_lab_cleanup - attach_default_interface - attach_default_interface_cleanup - network_isolation_bridge - network_isolation_bridge_cleanup - edpm_baremetal_compute - edpm_compute - edpm_compute_bootc - edpm_ansible_runner - edpm_computes_bgp - edpm_compute_repos - edpm_compute_cleanup - edpm_networker - edpm_networker_cleanup - edpm_deploy_instance - tripleo_deploy - standalone_deploy - standalone_sync - standalone - standalone_cleanup - standalone_snapshot - standalone_revert - cifmw_prepare - cifmw_cleanup - bmaas_network - bmaas_network_cleanup - bmaas_route_crc_and_crc_bmaas_networks - bmaas_route_crc_and_crc_bmaas_networks_cleanup - bmaas_crc_attach_network - bmaas_crc_attach_network_cleanup - bmaas_crc_baremetal_bridge - bmaas_crc_baremetal_bridge_cleanup - bmaas_baremetal_net_nad - bmaas_baremetal_net_nad_cleanup - bmaas_metallb - bmaas_metallb_cleanup - bmaas_virtual_bms - bmaas_virtual_bms_cleanup - bmaas_sushy_emulator - bmaas_sushy_emulator_cleanup - bmaas_sushy_emulator_wait - bmaas_generate_nodes_yaml - bmaas - bmaas_cleanup failed: false success: true 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | TASK [install_yamls : Create the install_yamls parameters file dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml, content={{ { 'cifmw_install_yamls_environment': cifmw_install_yamls_environment, 'cifmw_install_yamls_defaults': cifmw_install_yamls_defaults } | to_nice_yaml }}, mode=0644] *** 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.037) 0:00:59.845 ******* 2025-12-08 17:53:08,589 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:08 +0000 (0:00:00.037) 0:00:59.843 ******* 2025-12-08 17:53:09,071 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:09,085 p=31902 u=zuul n=ansible | TASK [install_yamls : Create empty cifmw_install_yamls_environment if needed cifmw_install_yamls_environment={}] *** 2025-12-08 17:53:09,085 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.496) 0:01:00.341 ******* 2025-12-08 17:53:09,086 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.496) 0:01:00.340 ******* 2025-12-08 17:53:09,119 p=31902 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | TASK [discover_latest_image : Get latest image url={{ cifmw_discover_latest_image_base_url }}, image_prefix={{ cifmw_discover_latest_image_qcow_prefix }}, images_file={{ cifmw_discover_latest_image_images_file }}] *** 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.064) 0:01:00.406 ******* 2025-12-08 17:53:09,150 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.064) 0:01:00.405 ******* 2025-12-08 17:53:09,699 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | TASK [discover_latest_image : Export facts accordingly cifmw_discovered_image_name={{ discovered_image['data']['image_name'] }}, cifmw_discovered_image_url={{ discovered_image['data']['image_url'] }}, cifmw_discovered_hash={{ discovered_image['data']['hash'] }}, cifmw_discovered_hash_algorithm={{ discovered_image['data']['hash_algorithm'] }}, cacheable=True] *** 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.559) 0:01:00.966 ******* 2025-12-08 17:53:09,710 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.559) 0:01:00.964 ******* 2025-12-08 17:53:09,737 p=31902 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | TASK [Create artifacts with custom params mode=0644, dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/custom-params.yml, content={{ ci_framework_params | to_nice_yaml }}] *** 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.040) 0:01:01.006 ******* 2025-12-08 17:53:09,750 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:09 +0000 (0:00:00.040) 0:01:01.005 ******* 2025-12-08 17:53:10,211 p=31902 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | PLAY RECAP ********************************************************************* 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | localhost : ok=43 changed=23 unreachable=0 failed=0 skipped=40 rescued=0 ignored=0 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:10 +0000 (0:00:00.483) 0:01:01.489 ******* 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Install needed packages ------------------------------------- 27.32s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Initialize python venv and install requirements ------------ 8.49s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Install openshift client ------------------------------------- 5.32s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Get repo-setup repository ---------------------------------- 2.25s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_ca : Update ca bundle ------------------------------------------- 1.65s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | Gathering Facts --------------------------------------------------------- 1.28s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | ci_setup : Manage directories ------------------------------------------- 1.21s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Make sure git-core package is installed -------------------- 0.99s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Install repo-setup package --------------------------------- 0.88s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Dump full hash in delorean.repo.md5 file ------------------- 0.72s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Ensure directories exist -------------------------------- 0.67s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Run repo-setup --------------------------------------------- 0.65s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Ensure directories are present ----------------------------- 0.62s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Get environment structure ------------------------------- 0.61s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | discover_latest_image : Get latest image -------------------------------- 0.56s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Run repo-setup-get-hash ------------------------------------ 0.53s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | repo_setup : Cleanup existing metadata ---------------------------------- 0.50s 2025-12-08 17:53:10,234 p=31902 u=zuul n=ansible | install_yamls : Create the install_yamls parameters file ---------------- 0.50s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | Create artifacts with custom params ------------------------------------- 0.48s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | repo_setup : Remove existing repos from /etc/yum.repos.d directory ------ 0.47s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | Monday 08 December 2025 17:53:10 +0000 (0:00:00.484) 0:01:01.489 ******* 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ci_setup --------------------------------------------------------------- 35.36s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | repo_setup ------------------------------------------------------------- 18.41s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | install_yamls ----------------------------------------------------------- 2.97s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | install_ca -------------------------------------------------------------- 2.15s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | gather_facts ------------------------------------------------------------ 1.28s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | discover_latest_image --------------------------------------------------- 0.60s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.copy ---------------------------------------------------- 0.48s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.include_role -------------------------------------------- 0.12s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ansible.builtin.set_fact ------------------------------------------------ 0.08s 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2025-12-08 17:53:10,235 p=31902 u=zuul n=ansible | total ------------------------------------------------------------------ 61.46s 2025-12-08 17:53:11,534 p=32717 u=zuul n=ansible | PLAY [Run pre_infra hooks] ***************************************************** 2025-12-08 17:53:11,565 p=32717 u=zuul n=ansible | TASK [run_hook : Assert parameters are valid quiet=True, that=['_list_hooks is not string', '_list_hooks is not mapping', '_list_hooks is iterable', '(hooks | default([])) is not string', '(hooks | default([])) is not mapping', '(hooks | default([])) is iterable']] *** 2025-12-08 17:53:11,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.046) 0:00:00.046 ******* 2025-12-08 17:53:11,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.046) 0:00:00.046 ******* 2025-12-08 17:53:11,640 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | TASK [run_hook : Assert single hooks are all mappings quiet=True, that=['_not_mapping_hooks | length == 0'], msg=All single hooks must be a list of mappings or a mapping.] *** 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.085) 0:00:00.131 ******* 2025-12-08 17:53:11,651 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.085) 0:00:00.131 ******* 2025-12-08 17:53:11,739 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | TASK [run_hook : Loop on hooks for pre_infra _raw_params={{ hook.type }}.yml] *** 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.098) 0:00:00.230 ******* 2025-12-08 17:53:11,749 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.098) 0:00:00.229 ******* 2025-12-08 17:53:11,825 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:11,867 p=32717 u=zuul n=ansible | PLAY [Prepare host virtualization] ********************************************* 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | TASK [Load parameters files dir={{ cifmw_basedir }}/artifacts/parameters] ****** 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.145) 0:00:00.375 ******* 2025-12-08 17:53:11,894 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:11 +0000 (0:00:00.145) 0:00:00.374 ******* 2025-12-08 17:53:12,006 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,028 p=32717 u=zuul n=ansible | TASK [Ensure libvirt is present/configured name=libvirt_manager] *************** 2025-12-08 17:53:12,028 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.134) 0:00:00.509 ******* 2025-12-08 17:53:12,029 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.134) 0:00:00.509 ******* 2025-12-08 17:53:12,055 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | TASK [Perpare OpenShift provisioner node name=openshift_provisioner_node] ****** 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.036) 0:00:00.546 ******* 2025-12-08 17:53:12,065 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.036) 0:00:00.545 ******* 2025-12-08 17:53:12,095 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,136 p=32717 u=zuul n=ansible | PLAY [Prepare the platform] **************************************************** 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | TASK [Load parameters files dir={{ cifmw_basedir }}/artifacts/parameters] ****** 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.104) 0:00:00.651 ******* 2025-12-08 17:53:12,170 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.104) 0:00:00.650 ******* 2025-12-08 17:53:12,219 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | TASK [networking_mapper : Check for Networking Environment Definition file existence path={{ cifmw_networking_mapper_networking_env_def_path }}] *** 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.059) 0:00:00.711 ******* 2025-12-08 17:53:12,230 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.059) 0:00:00.710 ******* 2025-12-08 17:53:12,505 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | TASK [networking_mapper : Check for Networking Definition file existance that=['_net_env_def_stat.stat.exists'], msg=Ensure that the Networking Environment Definition file exists in {{ cifmw_networking_mapper_networking_env_def_path }}, quiet=True] *** 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.285) 0:00:00.996 ******* 2025-12-08 17:53:12,515 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.285) 0:00:00.995 ******* 2025-12-08 17:53:12,535 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | TASK [networking_mapper : Load the Networking Definition from file path={{ cifmw_networking_mapper_networking_env_def_path }}] *** 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.030) 0:00:01.026 ******* 2025-12-08 17:53:12,545 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.030) 0:00:01.025 ******* 2025-12-08 17:53:12,565 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | TASK [networking_mapper : Set cifmw_networking_env_definition is present cifmw_networking_env_definition={{ _net_env_def_slurp['content'] | b64decode | from_yaml }}, cacheable=True] *** 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.033) 0:00:01.060 ******* 2025-12-08 17:53:12,579 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.033) 0:00:01.059 ******* 2025-12-08 17:53:12,599 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,625 p=32717 u=zuul n=ansible | TASK [Deploy OCP using Hive name=hive] ***************************************** 2025-12-08 17:53:12,625 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.046) 0:00:01.106 ******* 2025-12-08 17:53:12,626 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.046) 0:00:01.106 ******* 2025-12-08 17:53:12,644 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | TASK [Prepare CRC name=rhol_crc] *********************************************** 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.028) 0:00:01.135 ******* 2025-12-08 17:53:12,654 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.028) 0:00:01.134 ******* 2025-12-08 17:53:12,676 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | TASK [Deploy OpenShift cluster using dev-scripts name=devscripts] ************** 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.037) 0:00:01.172 ******* 2025-12-08 17:53:12,691 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.037) 0:00:01.171 ******* 2025-12-08 17:53:12,715 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | TASK [openshift_login : Ensure output directory exists path={{ cifmw_openshift_login_basedir }}/artifacts, state=directory, mode=0755] *** 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.034) 0:00:01.206 ******* 2025-12-08 17:53:12,725 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:12 +0000 (0:00:00.034) 0:00:01.205 ******* 2025-12-08 17:53:13,060 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,074 p=32717 u=zuul n=ansible | TASK [openshift_login : OpenShift login _raw_params=login.yml] ***************** 2025-12-08 17:53:13,074 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.349) 0:00:01.555 ******* 2025-12-08 17:53:13,075 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.349) 0:00:01.555 ******* 2025-12-08 17:53:13,104 p=32717 u=zuul n=ansible | included: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/openshift_login/tasks/login.yml for localhost 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | TASK [openshift_login : Check if the password file is present path={{ cifmw_openshift_login_password_file | default(cifmw_openshift_password_file) }}] *** 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.040) 0:00:01.596 ******* 2025-12-08 17:53:13,115 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.040) 0:00:01.595 ******* 2025-12-08 17:53:13,136 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch user password content src={{ cifmw_openshift_login_password_file | default(cifmw_openshift_password_file) }}] *** 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.035) 0:00:01.632 ******* 2025-12-08 17:53:13,151 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.035) 0:00:01.631 ******* 2025-12-08 17:53:13,176 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | TASK [openshift_login : Set user password as a fact cifmw_openshift_login_password={{ cifmw_openshift_login_password_file_slurp.content | b64decode }}, cacheable=True] *** 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:01.668 ******* 2025-12-08 17:53:13,187 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:01.667 ******* 2025-12-08 17:53:13,208 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | TASK [openshift_login : Set role variables cifmw_openshift_login_kubeconfig={{ cifmw_openshift_login_kubeconfig | default(cifmw_openshift_kubeconfig) | default( ansible_env.KUBECONFIG if 'KUBECONFIG' in ansible_env else cifmw_openshift_login_kubeconfig_default_path ) | trim }}, cifmw_openshift_login_user={{ cifmw_openshift_login_user | default(cifmw_openshift_user) | default(omit) }}, cifmw_openshift_login_password={{********** cifmw_openshift_login_password | default(cifmw_openshift_password) | default(omit) }}, cifmw_openshift_login_api={{ cifmw_openshift_login_api | default(cifmw_openshift_api) | default(omit) }}, cifmw_openshift_login_cert_login={{ cifmw_openshift_login_cert_login | default(false)}}, cifmw_openshift_login_provided_token={{ cifmw_openshift_provided_token | default(omit) }}, cacheable=True] *** 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.030) 0:00:01.698 ******* 2025-12-08 17:53:13,218 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.030) 0:00:01.698 ******* 2025-12-08 17:53:13,254 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | TASK [openshift_login : Check if kubeconfig exists path={{ cifmw_openshift_login_kubeconfig }}] *** 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:01.749 ******* 2025-12-08 17:53:13,268 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:01.748 ******* 2025-12-08 17:53:13,479 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | TASK [openshift_login : Assert that enough data is provided to log in to OpenShift that=cifmw_openshift_login_kubeconfig_stat.stat.exists or (cifmw_openshift_login_provided_token is defined and cifmw_openshift_login_provided_token != '') or ( (cifmw_openshift_login_user is defined) and (cifmw_openshift_login_password is defined) and (cifmw_openshift_login_api is defined) ), msg=If an existing kubeconfig is not provided user/pwd or provided/initial token and API URL must be given] *** 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.232) 0:00:01.981 ******* 2025-12-08 17:53:13,500 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.232) 0:00:01.980 ******* 2025-12-08 17:53:13,547 p=32717 u=zuul n=ansible | ok: [localhost] => changed: false msg: All assertions passed 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch kubeconfig content src={{ cifmw_openshift_login_kubeconfig }}] *** 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.072) 0:00:02.054 ******* 2025-12-08 17:53:13,573 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.073) 0:00:02.053 ******* 2025-12-08 17:53:13,613 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,634 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch x509 key based users cifmw_openshift_login_key_based_users={{ ( cifmw_openshift_login_kubeconfig_content_b64.content | b64decode | from_yaml ). users | default([]) | selectattr('user.client-certificate-data', 'defined') | map(attribute="name") | map("split", "/") | map("first") }}, cacheable=True] *** 2025-12-08 17:53:13,635 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.061) 0:00:02.115 ******* 2025-12-08 17:53:13,635 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.061) 0:00:02.115 ******* 2025-12-08 17:53:13,668 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,684 p=32717 u=zuul n=ansible | TASK [openshift_login : Assign key based user if not provided and available cifmw_openshift_login_user={{ (cifmw_openshift_login_assume_cert_system_user | ternary('system:', '')) + (cifmw_openshift_login_key_based_users | map('replace', 'system:', '') | unique | first) }}, cifmw_openshift_login_cert_login=True, cacheable=True] *** 2025-12-08 17:53:13,685 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.165 ******* 2025-12-08 17:53:13,685 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.165 ******* 2025-12-08 17:53:13,718 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,729 p=32717 u=zuul n=ansible | TASK [openshift_login : Set the retry count cifmw_openshift_login_retries_cnt={{ 0 if cifmw_openshift_login_retries_cnt is undefined else cifmw_openshift_login_retries_cnt|int + 1 }}] *** 2025-12-08 17:53:13,729 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.044) 0:00:02.210 ******* 2025-12-08 17:53:13,730 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.044) 0:00:02.210 ******* 2025-12-08 17:53:13,770 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch token _raw_params=try_login.yml] ***************** 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.261 ******* 2025-12-08 17:53:13,780 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.050) 0:00:02.260 ******* 2025-12-08 17:53:13,804 p=32717 u=zuul n=ansible | included: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/openshift_login/tasks/try_login.yml for localhost 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | TASK [openshift_login : Try get OpenShift access token _raw_params=oc whoami -t] *** 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:02.297 ******* 2025-12-08 17:53:13,816 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.036) 0:00:02.296 ******* 2025-12-08 17:53:13,841 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:13,853 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift token output_dir={{ cifmw_openshift_login_basedir }}/artifacts, script=oc login {%- if cifmw_openshift_login_provided_token is not defined %} {%- if cifmw_openshift_login_user is defined %} -u {{ cifmw_openshift_login_user }} {%- endif %} {%- if cifmw_openshift_login_password is defined %} -p {{ cifmw_openshift_login_password }} {%- endif %} {% else %} --token={{ cifmw_openshift_login_provided_token }} {%- endif %} {%- if cifmw_openshift_login_skip_tls_verify|bool %} --insecure-skip-tls-verify=true {%- endif %} {%- if cifmw_openshift_login_api is defined %} {{ cifmw_openshift_login_api }} {%- endif %}] *** 2025-12-08 17:53:13,854 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.037) 0:00:02.334 ******* 2025-12-08 17:53:13,854 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:13 +0000 (0:00:00.037) 0:00:02.334 ******* 2025-12-08 17:53:13,924 p=32717 u=zuul n=ansible | Follow script's output here: /home/zuul/ci-framework-data/logs/ci_script_000_fetch_openshift.log 2025-12-08 17:53:14,323 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:14,332 p=32717 u=zuul n=ansible | TASK [openshift_login : Ensure kubeconfig is provided that=cifmw_openshift_login_kubeconfig != ""] *** 2025-12-08 17:53:14,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.479) 0:00:02.813 ******* 2025-12-08 17:53:14,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.479) 0:00:02.813 ******* 2025-12-08 17:53:14,355 p=32717 u=zuul n=ansible | ok: [localhost] => changed: false msg: All assertions passed 2025-12-08 17:53:14,375 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch new OpenShift access token _raw_params=oc whoami -t] *** 2025-12-08 17:53:14,375 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.042) 0:00:02.856 ******* 2025-12-08 17:53:14,376 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.042) 0:00:02.856 ******* 2025-12-08 17:53:14,888 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:14,913 p=32717 u=zuul n=ansible | TASK [openshift_login : Set new OpenShift token cifmw_openshift_login_token={{ (not cifmw_openshift_login_new_token_out.skipped | default(false)) | ternary(cifmw_openshift_login_new_token_out.stdout, cifmw_openshift_login_whoami_out.stdout) }}, cacheable=True] *** 2025-12-08 17:53:14,914 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.538) 0:00:03.394 ******* 2025-12-08 17:53:14,914 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.538) 0:00:03.394 ******* 2025-12-08 17:53:14,963 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:14,982 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift API URL _raw_params=oc whoami --show-server=true] *** 2025-12-08 17:53:14,982 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.068) 0:00:03.463 ******* 2025-12-08 17:53:14,983 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:14 +0000 (0:00:00.068) 0:00:03.463 ******* 2025-12-08 17:53:15,364 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift kubeconfig context _raw_params=oc whoami -c] *** 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.390) 0:00:03.853 ******* 2025-12-08 17:53:15,373 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.390) 0:00:03.853 ******* 2025-12-08 17:53:15,651 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | TASK [openshift_login : Fetch OpenShift current user _raw_params=oc whoami] **** 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.289) 0:00:04.142 ******* 2025-12-08 17:53:15,662 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:15 +0000 (0:00:00.289) 0:00:04.142 ******* 2025-12-08 17:53:16,045 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | TASK [openshift_login : Set OpenShift user, context and API facts cifmw_openshift_login_api={{ cifmw_openshift_login_api_out.stdout }}, cifmw_openshift_login_context={{ cifmw_openshift_login_context_out.stdout }}, cifmw_openshift_login_user={{ _oauth_user }}, cifmw_openshift_kubeconfig={{ cifmw_openshift_login_kubeconfig }}, cifmw_openshift_api={{ cifmw_openshift_login_api_out.stdout }}, cifmw_openshift_context={{ cifmw_openshift_login_context_out.stdout }}, cifmw_openshift_user={{ _oauth_user }}, cifmw_openshift_token={{ cifmw_openshift_login_token | default(omit) }}, cifmw_install_yamls_environment={{ ( cifmw_install_yamls_environment | combine({'KUBECONFIG': cifmw_openshift_login_kubeconfig}) ) if cifmw_install_yamls_environment is defined else omit }}, cacheable=True] *** 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.396) 0:00:04.539 ******* 2025-12-08 17:53:16,058 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.396) 0:00:04.539 ******* 2025-12-08 17:53:16,091 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | TASK [openshift_login : Create the openshift_login parameters file dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/openshift-login-params.yml, content={{ cifmw_openshift_login_params_content | from_yaml | to_nice_yaml }}, mode=0600] *** 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.045) 0:00:04.585 ******* 2025-12-08 17:53:16,104 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.045) 0:00:04.584 ******* 2025-12-08 17:53:16,684 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | TASK [openshift_login : Read the install yamls parameters file path={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml] *** 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.603) 0:00:05.189 ******* 2025-12-08 17:53:16,708 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:16 +0000 (0:00:00.603) 0:00:05.188 ******* 2025-12-08 17:53:17,038 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | TASK [openshift_login : Append the KUBECONFIG to the install yamls parameters content={{ cifmw_openshift_login_install_yamls_artifacts_slurp['content'] | b64decode | from_yaml | combine( { 'cifmw_install_yamls_environment': { 'KUBECONFIG': cifmw_openshift_login_kubeconfig } }, recursive=true) | to_nice_yaml }}, dest={{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml, mode=0600] *** 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.345) 0:00:05.534 ******* 2025-12-08 17:53:17,053 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.345) 0:00:05.533 ******* 2025-12-08 17:53:17,537 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | TASK [openshift_setup : Ensure output directory exists path={{ cifmw_openshift_setup_basedir }}/artifacts, state=directory, mode=0755] *** 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.507) 0:00:06.042 ******* 2025-12-08 17:53:17,561 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.507) 0:00:06.041 ******* 2025-12-08 17:53:17,757 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | TASK [openshift_setup : Fetch namespaces to create cifmw_openshift_setup_namespaces={{ (( ([cifmw_install_yamls_defaults['NAMESPACE']] + ([cifmw_install_yamls_defaults['OPERATOR_NAMESPACE']] if 'OPERATOR_NAMESPACE' is in cifmw_install_yamls_defaults else []) ) if cifmw_install_yamls_defaults is defined else [] ) + cifmw_openshift_setup_create_namespaces) | unique }}] *** 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.217) 0:00:06.259 ******* 2025-12-08 17:53:17,778 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.217) 0:00:06.258 ******* 2025-12-08 17:53:17,803 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create required namespaces kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit) }}, name={{ item }}, kind=Namespace, state=present] *** 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.050) 0:00:06.309 ******* 2025-12-08 17:53:17,828 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:17 +0000 (0:00:00.050) 0:00:06.308 ******* 2025-12-08 17:53:18,765 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack) 2025-12-08 17:53:19,446 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack-operators) 2025-12-08 17:53:19,474 p=32717 u=zuul n=ansible | TASK [openshift_setup : Get internal OpenShift registry route kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, kind=Route, name=default-route, namespace=openshift-image-registry] *** 2025-12-08 17:53:19,475 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:19 +0000 (0:00:01.646) 0:00:07.955 ******* 2025-12-08 17:53:19,475 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:19 +0000 (0:00:01.646) 0:00:07.955 ******* 2025-12-08 17:53:20,623 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:20,644 p=32717 u=zuul n=ansible | TASK [openshift_setup : Allow anonymous image-pulls in CRC registry for targeted namespaces state=present, kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'kind': 'RoleBinding', 'apiVersion': 'rbac.authorization.k8s.io/v1', 'metadata': {'name': 'system:image-puller', 'namespace': '{{ item }}'}, 'subjects': [{'kind': 'User', 'name': 'system:anonymous'}, {'kind': 'User', 'name': 'system:unauthenticated'}], 'roleRef': {'kind': 'ClusterRole', 'name': 'system:image-puller'}}] *** 2025-12-08 17:53:20,644 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:20 +0000 (0:00:01.169) 0:00:09.125 ******* 2025-12-08 17:53:20,645 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:20 +0000 (0:00:01.169) 0:00:09.125 ******* 2025-12-08 17:53:21,400 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack) 2025-12-08 17:53:22,068 p=32717 u=zuul n=ansible | changed: [localhost] => (item=openstack-operators) 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | TASK [openshift_setup : Wait for the image registry to be ready kind=Deployment, name=image-registry, namespace=openshift-image-registry, kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, wait=True, wait_sleep=10, wait_timeout=600, wait_condition={'type': 'Available', 'status': 'True'}] *** 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:22 +0000 (0:00:01.451) 0:00:10.577 ******* 2025-12-08 17:53:22,096 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:22 +0000 (0:00:01.451) 0:00:10.576 ******* 2025-12-08 17:53:23,037 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | TASK [openshift_setup : Login into OpenShift internal registry output_dir={{ cifmw_openshift_setup_basedir }}/artifacts, script=podman login -u {{ cifmw_openshift_user }} -p {{ cifmw_openshift_token }} {%- if cifmw_openshift_setup_skip_internal_registry_tls_verify|bool %} --tls-verify=false {%- endif %} {{ cifmw_openshift_setup_registry_default_route.resources[0].spec.host }}] *** 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.959) 0:00:11.536 ******* 2025-12-08 17:53:23,055 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.959) 0:00:11.535 ******* 2025-12-08 17:53:23,112 p=32717 u=zuul n=ansible | Follow script's output here: /home/zuul/ci-framework-data/logs/ci_script_001_login_into_openshift_internal.log 2025-12-08 17:53:23,338 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | TASK [Ensure we have custom CA installed on host role=install_ca] ************** 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.293) 0:00:11.829 ******* 2025-12-08 17:53:23,348 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.293) 0:00:11.828 ******* 2025-12-08 17:53:23,368 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | TASK [openshift_setup : Update ca bundle _raw_params=update-ca-trust extract] *** 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.034) 0:00:11.863 ******* 2025-12-08 17:53:23,382 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.034) 0:00:11.862 ******* 2025-12-08 17:53:23,401 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,415 p=32717 u=zuul n=ansible | TASK [openshift_setup : Slurp CAs file src={{ cifmw_openshift_setup_ca_bundle_path }}] *** 2025-12-08 17:53:23,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:11.896 ******* 2025-12-08 17:53:23,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:11.896 ******* 2025-12-08 17:53:23,452 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create config map with registry CAs kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'apiVersion': 'v1', 'kind': 'ConfigMap', 'metadata': {'namespace': 'openshift-config', 'name': 'registry-cas'}, 'data': '{{ _config_map_data | items2dict }}'}] *** 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.075) 0:00:11.972 ******* 2025-12-08 17:53:23,491 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.075) 0:00:11.971 ******* 2025-12-08 17:53:23,515 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,524 p=32717 u=zuul n=ansible | TASK [openshift_setup : Install Red Hat CA for pulling images from internal registry kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, merge_type=merge, definition={'apiVersion': 'config.openshift.io/v1', 'kind': 'Image', 'metadata': {'name': 'cluster'}, 'spec': {'additionalTrustedCA': {'name': 'registry-cas'}}}] *** 2025-12-08 17:53:23,525 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:12.005 ******* 2025-12-08 17:53:23,525 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.033) 0:00:12.005 ******* 2025-12-08 17:53:23,556 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | TASK [openshift_setup : Add insecure registry kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, merge_type=merge, definition={'apiVersion': 'config.openshift.io/v1', 'kind': 'Image', 'metadata': {'name': 'cluster'}, 'spec': {'registrySources': {'insecureRegistries': ['{{ cifmw_update_containers_registry }}'], 'allowedRegistries': '{{ all_registries }}'}}}] *** 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.041) 0:00:12.047 ******* 2025-12-08 17:53:23,566 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.041) 0:00:12.046 ******* 2025-12-08 17:53:23,587 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | TASK [openshift_setup : Create a ICSP with repository digest mirrors kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, definition={'apiVersion': 'operator.openshift.io/v1alpha1', 'kind': 'ImageContentSourcePolicy', 'metadata': {'name': 'registry-digest-mirrors'}, 'spec': {'repositoryDigestMirrors': '{{ cifmw_openshift_setup_digest_mirrors }}'}}] *** 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.029) 0:00:12.077 ******* 2025-12-08 17:53:23,596 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.029) 0:00:12.076 ******* 2025-12-08 17:53:23,633 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | TASK [openshift_setup : Gather network.operator info kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, api_version=operator.openshift.io/v1, kind=Network, name=cluster] *** 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.051) 0:00:12.128 ******* 2025-12-08 17:53:23,647 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:23 +0000 (0:00:00.051) 0:00:12.127 ******* 2025-12-08 17:53:24,386 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | TASK [openshift_setup : Patch network operator api_version=operator.openshift.io/v1, kubeconfig={{ cifmw_openshift_kubeconfig }}, kind=Network, name=cluster, persist_config=True, patch=[{'path': '/spec/defaultNetwork/ovnKubernetesConfig/gatewayConfig/routingViaHost', 'value': True, 'op': 'replace'}, {'path': '/spec/defaultNetwork/ovnKubernetesConfig/gatewayConfig/ipForwarding', 'value': 'Global', 'op': 'replace'}]] *** 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:24 +0000 (0:00:00.768) 0:00:12.897 ******* 2025-12-08 17:53:24,416 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:24 +0000 (0:00:00.768) 0:00:12.896 ******* 2025-12-08 17:53:25,337 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | TASK [openshift_setup : Patch samples registry configuration kubeconfig={{ cifmw_openshift_kubeconfig }}, api_key={{ cifmw_openshift_token | default(omit)}}, context={{ cifmw_openshift_context | default(omit)}}, api_version=samples.operator.openshift.io/v1, kind=Config, name=cluster, patch=[{'op': 'replace', 'path': '/spec/samplesRegistry', 'value': 'registry.redhat.io'}]] *** 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:25 +0000 (0:00:00.944) 0:00:13.841 ******* 2025-12-08 17:53:25,360 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:25 +0000 (0:00:00.944) 0:00:13.840 ******* 2025-12-08 17:53:26,101 p=32717 u=zuul n=ansible | changed: [localhost] 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | TASK [openshift_setup : Delete the pods from openshift-marketplace namespace kind=Pod, state=absent, delete_all=True, kubeconfig={{ cifmw_openshift_kubeconfig }}, namespace=openshift-marketplace] *** 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.755) 0:00:14.596 ******* 2025-12-08 17:53:26,116 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.755) 0:00:14.596 ******* 2025-12-08 17:53:26,134 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,147 p=32717 u=zuul n=ansible | TASK [openshift_setup : Wait for openshift-marketplace pods to be running _raw_params=oc wait pod --all --for=condition=Ready -n openshift-marketplace --timeout=1m] *** 2025-12-08 17:53:26,148 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.628 ******* 2025-12-08 17:53:26,148 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.628 ******* 2025-12-08 17:53:26,165 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | TASK [Deploy Observability operator. name=openshift_obs] *********************** 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.037) 0:00:14.666 ******* 2025-12-08 17:53:26,185 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.036) 0:00:14.665 ******* 2025-12-08 17:53:26,210 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | TASK [Deploy Metal3 BMHs name=deploy_bmh] ************************************** 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.086) 0:00:14.752 ******* 2025-12-08 17:53:26,271 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.086) 0:00:14.751 ******* 2025-12-08 17:53:26,293 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | TASK [Install certmanager operator role name=cert_manager] ********************* 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.785 ******* 2025-12-08 17:53:26,304 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.032) 0:00:14.784 ******* 2025-12-08 17:53:26,323 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,332 p=32717 u=zuul n=ansible | TASK [Configure hosts networking using nmstate name=ci_nmstate] **************** 2025-12-08 17:53:26,332 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.813 ******* 2025-12-08 17:53:26,333 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.813 ******* 2025-12-08 17:53:26,350 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | TASK [Configure multus networks name=ci_multus] ******************************** 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.029) 0:00:14.843 ******* 2025-12-08 17:53:26,362 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.029) 0:00:14.842 ******* 2025-12-08 17:53:26,379 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | TASK [Deploy Sushy Emulator service pod name=sushy_emulator] ******************* 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.026) 0:00:14.870 ******* 2025-12-08 17:53:26,389 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.026) 0:00:14.869 ******* 2025-12-08 17:53:26,408 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | TASK [Setup Libvirt on controller name=libvirt_manager] ************************ 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.898 ******* 2025-12-08 17:53:26,418 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.898 ******* 2025-12-08 17:53:26,434 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | TASK [Prepare container package builder name=pkg_build] ************************ 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.927 ******* 2025-12-08 17:53:26,446 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.028) 0:00:14.926 ******* 2025-12-08 17:53:26,472 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,481 p=32717 u=zuul n=ansible | TASK [run_hook : Assert parameters are valid quiet=True, that=['_list_hooks is not string', '_list_hooks is not mapping', '_list_hooks is iterable', '(hooks | default([])) is not string', '(hooks | default([])) is not mapping', '(hooks | default([])) is iterable']] *** 2025-12-08 17:53:26,482 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.035) 0:00:14.962 ******* 2025-12-08 17:53:26,482 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.035) 0:00:14.962 ******* 2025-12-08 17:53:26,566 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | TASK [run_hook : Assert single hooks are all mappings quiet=True, that=['_not_mapping_hooks | length == 0'], msg=All single hooks must be a list of mappings or a mapping.] *** 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.094) 0:00:15.057 ******* 2025-12-08 17:53:26,576 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.094) 0:00:15.056 ******* 2025-12-08 17:53:26,646 p=32717 u=zuul n=ansible | ok: [localhost] 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | TASK [run_hook : Loop on hooks for post_infra _raw_params={{ hook.type }}.yml] *** 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.088) 0:00:15.145 ******* 2025-12-08 17:53:26,664 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.088) 0:00:15.144 ******* 2025-12-08 17:53:26,747 p=32717 u=zuul n=ansible | skipping: [localhost] 2025-12-08 17:53:26,805 p=32717 u=zuul n=ansible | PLAY RECAP ********************************************************************* 2025-12-08 17:53:26,805 p=32717 u=zuul n=ansible | localhost : ok=35 changed=12 unreachable=0 failed=0 skipped=34 rescued=0 ignored=0 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.141) 0:00:15.286 ******* 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Create required namespaces ---------------------------- 1.65s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Allow anonymous image-pulls in CRC registry for targeted namespaces --- 1.45s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Get internal OpenShift registry route ----------------- 1.17s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Wait for the image registry to be ready --------------- 0.96s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Patch network operator -------------------------------- 0.94s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Gather network.operator info -------------------------- 0.77s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Patch samples registry configuration ------------------ 0.76s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Create the openshift_login parameters file ------------ 0.60s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch new OpenShift access token ---------------------- 0.54s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Append the KUBECONFIG to the install yamls parameters --- 0.51s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift token --------------------------------- 0.48s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift current user -------------------------- 0.40s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift API URL ------------------------------- 0.39s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Ensure output directory exists ------------------------ 0.35s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Read the install yamls parameters file ---------------- 0.35s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Login into OpenShift internal registry ---------------- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Fetch OpenShift kubeconfig context -------------------- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | networking_mapper : Check for Networking Environment Definition file existence --- 0.29s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_login : Check if kubeconfig exists ---------------------------- 0.23s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | openshift_setup : Ensure output directory exists ------------------------ 0.22s 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | Monday 08 December 2025 17:53:26 +0000 (0:00:00.142) 0:00:15.286 ******* 2025-12-08 17:53:26,806 p=32717 u=zuul n=ansible | =============================================================================== 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | openshift_setup --------------------------------------------------------- 8.62s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | openshift_login --------------------------------------------------------- 4.84s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | run_hook ---------------------------------------------------------------- 0.65s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ansible.builtin.include_role -------------------------------------------- 0.54s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | networking_mapper ------------------------------------------------------- 0.40s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ansible.builtin.include_vars -------------------------------------------- 0.19s 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2025-12-08 17:53:26,807 p=32717 u=zuul n=ansible | total ------------------------------------------------------------------ 15.24s 2025-12-08 17:53:44,400 p=33314 u=zuul n=ansible | Starting galaxy collection install process 2025-12-08 17:53:44,421 p=33314 u=zuul n=ansible | Process install dependency map 2025-12-08 17:53:59,851 p=33314 u=zuul n=ansible | Starting collection install process 2025-12-08 17:53:59,851 p=33314 u=zuul n=ansible | Installing 'cifmw.general:1.0.0+33d5122f' to '/home/zuul/.ansible/collections/ansible_collections/cifmw/general' 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | Created collection for cifmw.general:1.0.0+33d5122f at /home/zuul/.ansible/collections/ansible_collections/cifmw/general 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | cifmw.general:1.0.0+33d5122f was installed successfully 2025-12-08 17:54:00,501 p=33314 u=zuul n=ansible | Installing 'containers.podman:1.16.2' to '/home/zuul/.ansible/collections/ansible_collections/containers/podman' 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | Created collection for containers.podman:1.16.2 at /home/zuul/.ansible/collections/ansible_collections/containers/podman 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | containers.podman:1.16.2 was installed successfully 2025-12-08 17:54:00,568 p=33314 u=zuul n=ansible | Installing 'community.general:10.0.1' to '/home/zuul/.ansible/collections/ansible_collections/community/general' 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | Created collection for community.general:10.0.1 at /home/zuul/.ansible/collections/ansible_collections/community/general 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | community.general:10.0.1 was installed successfully 2025-12-08 17:54:01,553 p=33314 u=zuul n=ansible | Installing 'ansible.posix:1.6.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/posix' 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | Created collection for ansible.posix:1.6.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/posix 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | ansible.posix:1.6.2 was installed successfully 2025-12-08 17:54:01,614 p=33314 u=zuul n=ansible | Installing 'ansible.utils:5.1.2' to '/home/zuul/.ansible/collections/ansible_collections/ansible/utils' 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | Created collection for ansible.utils:5.1.2 at /home/zuul/.ansible/collections/ansible_collections/ansible/utils 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | ansible.utils:5.1.2 was installed successfully 2025-12-08 17:54:01,736 p=33314 u=zuul n=ansible | Installing 'community.libvirt:1.3.0' to '/home/zuul/.ansible/collections/ansible_collections/community/libvirt' 2025-12-08 17:54:01,797 p=33314 u=zuul n=ansible | Created collection for community.libvirt:1.3.0 at /home/zuul/.ansible/collections/ansible_collections/community/libvirt 2025-12-08 17:54:01,797 p=33314 u=zuul n=ansible | community.libvirt:1.3.0 was installed successfully 2025-12-08 17:54:01,798 p=33314 u=zuul n=ansible | Installing 'community.crypto:2.22.3' to '/home/zuul/.ansible/collections/ansible_collections/community/crypto' 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | Created collection for community.crypto:2.22.3 at /home/zuul/.ansible/collections/ansible_collections/community/crypto 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | community.crypto:2.22.3 was installed successfully 2025-12-08 17:54:01,972 p=33314 u=zuul n=ansible | Installing 'kubernetes.core:5.0.0' to '/home/zuul/.ansible/collections/ansible_collections/kubernetes/core' 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | Created collection for kubernetes.core:5.0.0 at /home/zuul/.ansible/collections/ansible_collections/kubernetes/core 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | kubernetes.core:5.0.0 was installed successfully 2025-12-08 17:54:02,156 p=33314 u=zuul n=ansible | Installing 'ansible.netcommon:7.1.0' to '/home/zuul/.ansible/collections/ansible_collections/ansible/netcommon' 2025-12-08 17:54:02,244 p=33314 u=zuul n=ansible | Created collection for ansible.netcommon:7.1.0 at /home/zuul/.ansible/collections/ansible_collections/ansible/netcommon 2025-12-08 17:54:02,245 p=33314 u=zuul n=ansible | ansible.netcommon:7.1.0 was installed successfully 2025-12-08 17:54:02,245 p=33314 u=zuul n=ansible | Installing 'openstack.config_template:2.1.1' to '/home/zuul/.ansible/collections/ansible_collections/openstack/config_template' 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | Created collection for openstack.config_template:2.1.1 at /home/zuul/.ansible/collections/ansible_collections/openstack/config_template 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | openstack.config_template:2.1.1 was installed successfully 2025-12-08 17:54:02,278 p=33314 u=zuul n=ansible | Installing 'junipernetworks.junos:9.1.0' to '/home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos' 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | Created collection for junipernetworks.junos:9.1.0 at /home/zuul/.ansible/collections/ansible_collections/junipernetworks/junos 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | junipernetworks.junos:9.1.0 was installed successfully 2025-12-08 17:54:02,619 p=33314 u=zuul n=ansible | Installing 'cisco.ios:9.0.3' to '/home/zuul/.ansible/collections/ansible_collections/cisco/ios' 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | Created collection for cisco.ios:9.0.3 at /home/zuul/.ansible/collections/ansible_collections/cisco/ios 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | cisco.ios:9.0.3 was installed successfully 2025-12-08 17:54:02,980 p=33314 u=zuul n=ansible | Installing 'mellanox.onyx:1.0.0' to '/home/zuul/.ansible/collections/ansible_collections/mellanox/onyx' 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | Created collection for mellanox.onyx:1.0.0 at /home/zuul/.ansible/collections/ansible_collections/mellanox/onyx 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | mellanox.onyx:1.0.0 was installed successfully 2025-12-08 17:54:03,021 p=33314 u=zuul n=ansible | Installing 'community.okd:4.0.0' to '/home/zuul/.ansible/collections/ansible_collections/community/okd' 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | Created collection for community.okd:4.0.0 at /home/zuul/.ansible/collections/ansible_collections/community/okd 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | community.okd:4.0.0 was installed successfully 2025-12-08 17:54:03,054 p=33314 u=zuul n=ansible | Installing '@NAMESPACE@.@NAME@:3.1.4' to '/home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@' 2025-12-08 17:54:03,162 p=33314 u=zuul n=ansible | Created collection for @NAMESPACE@.@NAME@:3.1.4 at /home/zuul/.ansible/collections/ansible_collections/@NAMESPACE@/@NAME@ 2025-12-08 17:54:03,162 p=33314 u=zuul n=ansible | @NAMESPACE@.@NAME@:3.1.4 was installed successfully home/zuul/zuul-output/logs/ci-framework-data/artifacts/0000755000175000017500000000000015115611531022342 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/resolv.conf0000644000175000017500000000015215115611271024522 0ustar zuulzuul# Generated by NetworkManager nameserver 192.168.122.10 nameserver 199.204.44.24 nameserver 199.204.47.54 home/zuul/zuul-output/logs/ci-framework-data/artifacts/hosts0000644000175000017500000000023715115611271023430 0ustar zuulzuul127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 home/zuul/zuul-output/logs/ci-framework-data/artifacts/ip-network.txt0000644000175000017500000000315715115611271025211 0ustar zuulzuuldefault via 38.102.83.1 dev eth0 proto dhcp src 38.102.83.251 metric 100 38.102.83.0/24 dev eth0 proto kernel scope link src 38.102.83.251 metric 100 169.254.169.254 via 38.102.83.126 dev eth0 proto dhcp src 38.102.83.251 metric 100 192.168.122.0/24 dev eth1 proto kernel scope link src 192.168.122.11 metric 101 0: from all lookup local 32766: from all lookup main 32767: from all lookup default [ { "ifindex": 1, "ifname": "lo", "flags": [ "LOOPBACK","UP","LOWER_UP" ], "mtu": 65536, "qdisc": "noqueue", "operstate": "UNKNOWN", "linkmode": "DEFAULT", "group": "default", "txqlen": 1000, "link_type": "loopback", "address": "00:00:00:00:00:00", "broadcast": "00:00:00:00:00:00" },{ "ifindex": 2, "ifname": "eth0", "flags": [ "BROADCAST","MULTICAST","UP","LOWER_UP" ], "mtu": 1500, "qdisc": "fq_codel", "operstate": "UP", "linkmode": "DEFAULT", "group": "default", "txqlen": 1000, "link_type": "ether", "address": "fa:16:3e:97:c9:c3", "broadcast": "ff:ff:ff:ff:ff:ff", "altnames": [ "enp0s3","ens3" ] },{ "ifindex": 3, "ifname": "eth1", "flags": [ "BROADCAST","MULTICAST","UP","LOWER_UP" ], "mtu": 1500, "qdisc": "fq_codel", "operstate": "UP", "linkmode": "DEFAULT", "group": "default", "txqlen": 1000, "link_type": "ether", "address": "fa:16:3e:6a:de:3b", "broadcast": "ff:ff:ff:ff:ff:ff", "altnames": [ "enp0s7","ens7" ] } ] home/zuul/zuul-output/logs/ci-framework-data/artifacts/ci_script_000_check_for_oc.sh0000644000175000017500000000020715115611276027725 0ustar zuulzuul#!/bin/bash set -euo pipefail exec > >(tee -i /home/zuul/ci-framework-data/logs/ci_script_000_check_for_oc.log) 2>&1 command -v oc home/zuul/zuul-output/logs/ci-framework-data/artifacts/ci_script_000_run_openstack_must_gather.sh0000644000175000017500000000132215115611277032576 0ustar zuulzuul#!/bin/bash set -euo pipefail exec > >(tee -i /home/zuul/ci-framework-data/logs/ci_script_000_run_openstack_must_gather.log) 2>&1 timeout 2700.0 oc adm must-gather --image quay.io/openstack-k8s-operators/openstack-must-gather:latest --timeout 30m --host-network=False --dest-dir /home/zuul/ci-framework-data/logs/openstack-must-gather -- ADDITIONAL_NAMESPACES=kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko OPENSTACK_DATABASES=$OPENSTACK_DATABASES SOS_EDPM=$SOS_EDPM SOS_DECOMPRESS=$SOS_DECOMPRESS gather 2>&1 || { rc=$? if [ $rc -eq 124 ]; then echo "The must gather command did not finish on time!" echo "2700.0 seconds was not enough to finish the task." fi } home/zuul/zuul-output/logs/ci-framework-data/artifacts/ci_script_000_prepare_root_ssh.sh0000644000175000017500000000122315115611505030671 0ustar zuulzuul#!/bin/bash set -euo pipefail exec > >(tee -i /home/zuul/ci-framework-data/logs/ci_script_000_prepare_root_ssh.log) 2>&1 ssh -i ~/.ssh/id_cifw core@api.crc.testing < >(tee -i /home/zuul/ci-framework-data/logs/ci_script_000_copy_logs_from_crc.log) 2>&1 scp -v -r -i ~/.ssh/id_cifw core@api.crc.testing:/tmp/crc-logs-artifacts /home/zuul/ci-framework-data/logs/crc/ home/zuul/zuul-output/logs/ci-framework-data/artifacts/zuul_inventory.yml0000644000175000017500000007311615115611523026212 0ustar zuulzuulall: children: zuul_unreachable: hosts: {} hosts: controller: ansible_connection: ssh ansible_host: 38.102.83.251 ansible_port: 22 ansible_python_interpreter: auto ansible_user: zuul cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 2651912f-4167-4227-a778-d37fa1159493 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.251 label: cloud-centos-9-stream-tripleo-vexxhost private_ipv4: 38.102.83.251 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.251 public_ipv6: '' region: RegionOne slot: null podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul_log_collection: true crc: ansible_connection: ssh ansible_host: 38.102.83.243 ansible_port: 22 ansible_python_interpreter: auto ansible_user: core cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.243 label: crc-cloud-ocp-4-20-1-3xl private_ipv4: 38.102.83.243 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.243 public_ipv6: '' region: RegionOne slot: null podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul_log_collection: true localhost: ansible_connection: local vars: cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul: _inheritance_path: - '' - '' - '' - '' - '' - '' - '' - '' - '' ansible_version: '8' attempts: 1 branch: master build: fdae556768574d6f9092d7162dc9ae0f build_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null buildset: c405b24f52df4ff1a39b37dcfc476a60 buildset_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 child_jobs: [] commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb event_id: 0add7250-d45c-11f0-86cc-0eee4913030a executor: hostname: ze03.softwarefactory-project.io inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/inventory.yaml log_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/logs result_data_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/results.json src_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/src work_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work items: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null job: stf-crc-ocp_420-catalog_deploy jobtags: [] max_attempts: 1 message: QWRkIE9DUCA0LjIwIGpvYnMKCkFkZCBqb2IgZGVmaW5pdGlvbnMgdXNpbmcgY3JjLWNsb3VkLW9jcC00LTIwLTEtM3hsIGFzIGJhc2UgaW1hZ2UNCg0KQ2xvc2VzOiBPU1BSSC0yMTg4MQ== patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb pipeline: github-check playbook_context: playbook_projects: trusted/project_0/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f trusted/project_1/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 trusted/project_2/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 trusted/project_3/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_0/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_1/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f untrusted/project_2/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 untrusted/project_3/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 untrusted/project_4/github.com/infrawatch/service-telemetry-operator: canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb playbooks: - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/deploy_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_0/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_0/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_0/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_0/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_0/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_0/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_0/role_4/rdo-jobs/roles - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/test_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_1/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_1/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_1/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_1/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_1/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_1/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_1/role_4/rdo-jobs/roles post_review: false project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator projects: github.com/crc-org/crc-cloud: canonical_hostname: github.com canonical_name: github.com/crc-org/crc-cloud checkout: main checkout_description: project override ref commit: 1c985328b5b8cdf9dc083e0c7b3abae12c7c8c53 name: crc-org/crc-cloud required: true short_name: crc-cloud src_dir: src/github.com/crc-org/crc-cloud github.com/infrawatch/prometheus-webhook-snmp: canonical_hostname: github.com canonical_name: github.com/infrawatch/prometheus-webhook-snmp checkout: master checkout_description: zuul branch commit: 3959c53b2613d03d066cb1b2fe5bdae8633ae895 name: infrawatch/prometheus-webhook-snmp required: true short_name: prometheus-webhook-snmp src_dir: src/github.com/infrawatch/prometheus-webhook-snmp github.com/infrawatch/service-telemetry-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master checkout_description: zuul branch commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb name: infrawatch/service-telemetry-operator required: true short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator github.com/infrawatch/sg-bridge: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-bridge checkout: master checkout_description: zuul branch commit: bab11fba86ad0c21cb35e12b56bf086a3332f1d2 name: infrawatch/sg-bridge required: true short_name: sg-bridge src_dir: src/github.com/infrawatch/sg-bridge github.com/infrawatch/sg-core: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-core checkout: master checkout_description: zuul branch commit: 5a4aece11fea9f71ce7515d11e1e7f0eae97eea6 name: infrawatch/sg-core required: true short_name: sg-core src_dir: src/github.com/infrawatch/sg-core github.com/infrawatch/smart-gateway-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/smart-gateway-operator checkout: master checkout_description: zuul branch commit: 2ff5b96b6254418d20a509188eea72ab2c77839c name: infrawatch/smart-gateway-operator required: true short_name: smart-gateway-operator src_dir: src/github.com/infrawatch/smart-gateway-operator github.com/openstack-k8s-operators/ci-framework: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main checkout_description: project override ref commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 name: openstack-k8s-operators/ci-framework required: true short_name: ci-framework src_dir: src/github.com/openstack-k8s-operators/ci-framework github.com/openstack-k8s-operators/dataplane-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/dataplane-operator checkout: main checkout_description: project override ref commit: c98b51bcd7fe14b85ed4cf3f5f76552b3455c5f2 name: openstack-k8s-operators/dataplane-operator required: true short_name: dataplane-operator src_dir: src/github.com/openstack-k8s-operators/dataplane-operator github.com/openstack-k8s-operators/edpm-ansible: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/edpm-ansible checkout: main checkout_description: project default branch commit: def07d8eb172b38b1a39695442f28465a1dfac35 name: openstack-k8s-operators/edpm-ansible required: true short_name: edpm-ansible src_dir: src/github.com/openstack-k8s-operators/edpm-ansible github.com/openstack-k8s-operators/infra-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/infra-operator checkout: main checkout_description: project override ref commit: 786269345f996bd262360738a1e3c6b09171f370 name: openstack-k8s-operators/infra-operator required: true short_name: infra-operator src_dir: src/github.com/openstack-k8s-operators/infra-operator github.com/openstack-k8s-operators/install_yamls: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/install_yamls checkout: main checkout_description: project default branch commit: 2f838b62fe50aacff3d514af4b502264e0a276a5 name: openstack-k8s-operators/install_yamls required: true short_name: install_yamls src_dir: src/github.com/openstack-k8s-operators/install_yamls github.com/openstack-k8s-operators/openstack-baremetal-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-baremetal-operator checkout: master checkout_description: zuul branch commit: a333e57066b1d48e41f93af68be81188290a96b3 name: openstack-k8s-operators/openstack-baremetal-operator required: true short_name: openstack-baremetal-operator src_dir: src/github.com/openstack-k8s-operators/openstack-baremetal-operator github.com/openstack-k8s-operators/openstack-must-gather: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-must-gather checkout: main checkout_description: project override ref commit: 2da49819dd6af6036aede5e4e9a080ff2c6457de name: openstack-k8s-operators/openstack-must-gather required: true short_name: openstack-must-gather src_dir: src/github.com/openstack-k8s-operators/openstack-must-gather github.com/openstack-k8s-operators/openstack-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-operator checkout: main checkout_description: project override ref commit: 0ad3a7b7bb522e34f164849424319945b381d95c name: openstack-k8s-operators/openstack-operator required: true short_name: openstack-operator src_dir: src/github.com/openstack-k8s-operators/openstack-operator github.com/openstack-k8s-operators/repo-setup: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/repo-setup checkout: main checkout_description: project default branch commit: 37b10946c6a10f9fa26c13305f06bfd6867e723f name: openstack-k8s-operators/repo-setup required: true short_name: repo-setup src_dir: src/github.com/openstack-k8s-operators/repo-setup opendev.org/zuul/zuul-jobs: canonical_hostname: opendev.org canonical_name: opendev.org/zuul/zuul-jobs checkout: master checkout_description: zuul branch commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 name: zuul/zuul-jobs required: true short_name: zuul-jobs src_dir: src/opendev.org/zuul/zuul-jobs review.rdoproject.org/config: canonical_hostname: review.rdoproject.org canonical_name: review.rdoproject.org/config checkout: master checkout_description: zuul branch commit: 40052f923df77143f1c9739304c4b4221346825f name: config required: true short_name: config src_dir: src/review.rdoproject.org/config ref: refs/pull/694/head resources: {} tenant: rdoproject.org timeout: 3600 topic: null voting: true zuul_log_collection: true home/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible-vars.yml0000644000175000017500000125434115115611523025466 0ustar zuulzuul_included_dir: changed: false failed: false stat: atime: 1765216391.9143927 attr_flags: '' attributes: [] block_size: 4096 blocks: 0 charset: binary ctime: 1765216397.4945426 dev: 64513 device_type: 0 executable: true exists: true gid: 1000 gr_name: zuul inode: 16835667 isblk: false ischr: false isdir: true isfifo: false isgid: false islnk: false isreg: false issock: false isuid: false mimetype: inode/directory mode: '0755' mtime: 1765216397.4945426 nlink: 2 path: /home/zuul/ci-framework-data/artifacts/parameters pw_name: zuul readable: true rgrp: true roth: true rusr: true size: 120 uid: 1000 version: '2849268541' wgrp: false woth: false writeable: true wusr: true xgrp: true xoth: true xusr: true _included_file: changed: false failed: false stat: atime: 1765216396.6405199 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: us-ascii checksum: 4199ad8a98c2c9b8188e0c13884cc6a979bb72ab ctime: 1765216396.6435199 dev: 64513 device_type: 0 executable: false exists: true gid: 1000 gr_name: zuul inode: 54575205 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mimetype: text/plain mode: '0600' mtime: 1765216396.3655124 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/openshift-login-params.yml pw_name: zuul readable: true rgrp: false roth: false rusr: true size: 288 uid: 1000 version: '2535368820' wgrp: false woth: false writeable: true wusr: true xgrp: false xoth: false xusr: false _parsed_vars: changed: false content: Y2lmbXdfb3BlbnNoaWZ0X2FwaTogaHR0cHM6Ly9hcGkuY3JjLnRlc3Rpbmc6NjQ0MwpjaWZtd19vcGVuc2hpZnRfY29udGV4dDogZGVmYXVsdC9hcGktY3JjLXRlc3Rpbmc6NjQ0My9rdWJlYWRtaW4KY2lmbXdfb3BlbnNoaWZ0X2t1YmVjb25maWc6IC9ob21lL3p1dWwvLmNyYy9tYWNoaW5lcy9jcmMva3ViZWNvbmZpZwpjaWZtd19vcGVuc2hpZnRfdG9rZW46IHNoYTI1Nn5wT1Vnay05ODUwaHh4RVpLNkcwVU1LVUEzSHU4dlZwdWV6S24taGFKYl9VCmNpZm13X29wZW5zaGlmdF91c2VyOiBrdWJlYWRtaW4K encoding: base64 failed: false source: /home/zuul/ci-framework-data/artifacts/parameters/openshift-login-params.yml _tmp_dir: changed: true failed: false gid: 10001 group: zuul mode: '0700' owner: zuul path: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/tmp/ansible.016z1ouz size: 40 state: directory uid: 10001 _yaml_files: changed: false examined: 4 failed: false files: - atime: 1765216328.549688 ctime: 1765216326.5106332 dev: 64513 gid: 1000 gr_name: zuul inode: 33601311 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0644' mtime: 1765216326.2326257 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/zuul-params.yml pw_name: zuul rgrp: true roth: true rusr: true size: 20213 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false - atime: 1765216397.4945426 ctime: 1765216397.4985428 dev: 64513 gid: 1000 gr_name: zuul inode: 71338228 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0600' mtime: 1765216397.3425386 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/install-yamls-params.yml pw_name: zuul rgrp: false roth: false rusr: true size: 28064 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false - atime: 1765216391.9143927 ctime: 1765216390.175346 dev: 64513 gid: 1000 gr_name: zuul inode: 146828183 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0644' mtime: 1765216389.984341 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/custom-params.yml pw_name: zuul rgrp: true roth: true rusr: true size: 1126 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false - atime: 1765216396.6405199 ctime: 1765216396.6435199 dev: 64513 gid: 1000 gr_name: zuul inode: 54575205 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0600' mtime: 1765216396.3655124 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/openshift-login-params.yml pw_name: zuul rgrp: false roth: false rusr: true size: 288 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false matched: 4 msg: All paths examined skipped_paths: {} ansible_all_ipv4_addresses: - 38.102.83.251 ansible_all_ipv6_addresses: - fe80::f816:3eff:fe97:c9c3 ansible_apparmor: status: disabled ansible_architecture: x86_64 ansible_bios_date: 04/01/2014 ansible_bios_vendor: SeaBIOS ansible_bios_version: 1.15.0-1 ansible_board_asset_tag: NA ansible_board_name: NA ansible_board_serial: NA ansible_board_vendor: NA ansible_board_version: NA ansible_chassis_asset_tag: NA ansible_chassis_serial: NA ansible_chassis_vendor: QEMU ansible_chassis_version: pc-i440fx-6.2 ansible_check_mode: false ansible_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ansible_collection_name: null ansible_config_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/ansible.cfg ansible_connection: ssh ansible_date_time: date: '2025-12-08' day: 08 epoch: '1765216950' epoch_int: '1765216950' hour: '18' iso8601: '2025-12-08T18:02:30Z' iso8601_basic: 20251208T180230661772 iso8601_basic_short: 20251208T180230 iso8601_micro: '2025-12-08T18:02:30.661772Z' minute: '02' month: '12' second: '30' time: '18:02:30' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' ansible_default_ipv4: address: 38.102.83.251 alias: eth0 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: eth0 macaddress: fa:16:3e:97:c9:c3 mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether ansible_default_ipv6: {} ansible_dependent_role_names: [] ansible_device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 masters: {} uuids: sr0: - 2025-12-08-17-34-40-00 vda1: - fcf6b761-831a-48a7-9f5f-068b5063763f ansible_devices: sr0: holders: [] host: '' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-34-40-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: - fcf6b761-831a-48a7-9f5f-068b5063763f sectors: '167770079' sectorsize: 512 size: 80.00 GB start: '2048' uuid: fcf6b761-831a-48a7-9f5f-068b5063763f removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '167772160' sectorsize: '512' size: 80.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 ansible_diff_mode: false ansible_distribution: CentOS ansible_distribution_file_parsed: true ansible_distribution_file_path: /etc/centos-release ansible_distribution_file_variety: CentOS ansible_distribution_major_version: '9' ansible_distribution_release: Stream ansible_distribution_version: '9' ansible_dns: nameservers: - 192.168.122.10 - 199.204.44.24 - 199.204.47.54 ansible_domain: '' ansible_effective_group_id: 1000 ansible_effective_user_id: 1000 ansible_env: ANSIBLE_LOG_PATH: /home/zuul/ci-framework-data/logs/e2e-collect-logs-must-gather.log BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' DEBUGINFOD_URLS: 'https://debuginfod.centos.org/ ' HOME: /home/zuul KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig LANG: en_US.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: zuul MOTD_SHOWN: pam PATH: /home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /home/zuul SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38794 22 SSH_CONNECTION: 38.102.83.114 38794 38.102.83.251 22 USER: zuul XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '17' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f ansible_eth0: active: true device: eth0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.251 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::f816:3eff:fe97:c9c3 prefix: '64' scope: link macaddress: fa:16:3e:97:c9:c3 module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether ansible_facts: _ansible_facts_gathered: true all_ipv4_addresses: - 38.102.83.251 all_ipv6_addresses: - fe80::f816:3eff:fe97:c9c3 ansible_local: {} apparmor: status: disabled architecture: x86_64 bios_date: 04/01/2014 bios_vendor: SeaBIOS bios_version: 1.15.0-1 board_asset_tag: NA board_name: NA board_serial: NA board_vendor: NA board_version: NA chassis_asset_tag: NA chassis_serial: NA chassis_vendor: QEMU chassis_version: pc-i440fx-6.2 cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f crc_ci_bootstrap_instance_default_net_config: mtu: 1500 range: 192.168.122.0/24 crc_ci_bootstrap_instance_nm_vlan_networks: - key: internal-api value: ip: 172.17.0.5 - key: storage value: ip: 172.18.0.5 - key: tenant value: ip: 172.19.0.5 crc_ci_bootstrap_instance_parent_port_create_yaml: admin_state_up: true allowed_address_pairs: [] binding_host_id: null binding_profile: {} binding_vif_details: {} binding_vif_type: null binding_vnic_type: normal created_at: '2025-12-08T17:39:31Z' data_plane_status: null description: '' device_id: '' device_owner: '' device_profile: null dns_assignment: - fqdn: host-192-168-122-10.openstacklocal. hostname: host-192-168-122-10 ip_address: 192.168.122.10 dns_domain: '' dns_name: '' extra_dhcp_opts: [] fixed_ips: - ip_address: 192.168.122.10 subnet_id: 1ec71021-8196-48c3-b107-9041e6f5f679 hardware_offload_type: null hints: '' id: d37cddfa-716b-4541-992b-5180463c6809 ip_allocation: immediate mac_address: fa:16:3e:e6:79:2f name: crc-32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb numa_affinity_policy: null port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 propagate_uplink_status: null qos_network_policy_id: null qos_policy_id: null resource_request: null revision_number: 1 security_group_ids: [] status: DOWN tags: [] trunk_details: null trusted: null updated_at: '2025-12-08T17:39:31Z' crc_ci_bootstrap_network_name: zuul-ci-net-fdae5567 crc_ci_bootstrap_networks_out: controller: default: connection: ci-private-network gw: 192.168.122.1 iface: eth1 ip: 192.168.122.11/24 mac: fa:16:3e:6a:de:3b mtu: 1500 crc: default: connection: ci-private-network gw: 192.168.122.1 iface: ens7 ip: 192.168.122.10/24 mac: fa:16:3e:e6:79:2f mtu: 1500 internal-api: connection: ci-private-network-20 iface: ens7.20 ip: 172.17.0.5/24 mac: 52:54:00:84:35:42 mtu: '1496' parent_iface: ens7 vlan: 20 storage: connection: ci-private-network-21 iface: ens7.21 ip: 172.18.0.5/24 mac: 52:54:00:9c:32:12 mtu: '1496' parent_iface: ens7 vlan: 21 tenant: connection: ci-private-network-22 iface: ens7.22 ip: 172.19.0.5/24 mac: 52:54:00:ec:da:21 mtu: '1496' parent_iface: ens7 vlan: 22 crc_ci_bootstrap_private_net_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:38:55Z' description: '' dns_domain: '' id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb ipv4_address_scope: null ipv6_address_scope: null is_default: false is_vlan_qinq: null is_vlan_transparent: false l2_adjacency: true mtu: 1500 name: zuul-ci-net-fdae5567 port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 provider:network_type: null provider:physical_network: null provider:segmentation_id: null qos_policy_id: null revision_number: 1 router:external: false segments: null shared: false status: ACTIVE subnets: [] tags: [] updated_at: '2025-12-08T17:38:55Z' crc_ci_bootstrap_private_router_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:39:01Z' description: '' enable_ndp_proxy: null external_gateway_info: enable_snat: true external_fixed_ips: - ip_address: 38.102.83.145 subnet_id: 3169b11b-94b1-4bc9-9727-4fdbbe15e56e network_id: 7abff1a9-a103-46d0-979a-1f1e599f4f41 flavor_id: null id: edc0cd0e-14dd-4588-9e19-d36444fdd18f name: zuul-ci-subnet-router-fdae5567 project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 3 routes: [] status: ACTIVE tags: [] tenant_id: 4b633c451ac74233be3721a3635275e5 updated_at: '2025-12-08T17:39:02Z' crc_ci_bootstrap_private_subnet_create_yaml: allocation_pools: - end: 192.168.122.254 start: 192.168.122.2 cidr: 192.168.122.0/24 created_at: '2025-12-08T17:38:58Z' description: '' dns_nameservers: [] dns_publish_fixed_ip: null enable_dhcp: false gateway_ip: 192.168.122.1 host_routes: [] id: 1ec71021-8196-48c3-b107-9041e6f5f679 ip_version: 4 ipv6_address_mode: null ipv6_ra_mode: null name: zuul-ci-subnet-fdae5567 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 0 segment_id: null service_types: [] subnetpool_id: null tags: [] updated_at: '2025-12-08T17:38:58Z' crc_ci_bootstrap_provider_dns: - 199.204.44.24 - 199.204.47.54 crc_ci_bootstrap_router_name: zuul-ci-subnet-router-fdae5567 crc_ci_bootstrap_subnet_name: zuul-ci-subnet-fdae5567 date_time: date: '2025-12-08' day: 08 epoch: '1765216950' epoch_int: '1765216950' hour: '18' iso8601: '2025-12-08T18:02:30Z' iso8601_basic: 20251208T180230661772 iso8601_basic_short: 20251208T180230 iso8601_micro: '2025-12-08T18:02:30.661772Z' minute: '02' month: '12' second: '30' time: '18:02:30' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' default_ipv4: address: 38.102.83.251 alias: eth0 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: eth0 macaddress: fa:16:3e:97:c9:c3 mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether default_ipv6: {} device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 masters: {} uuids: sr0: - 2025-12-08-17-34-40-00 vda1: - fcf6b761-831a-48a7-9f5f-068b5063763f devices: sr0: holders: [] host: '' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-34-40-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: - fcf6b761-831a-48a7-9f5f-068b5063763f sectors: '167770079' sectorsize: 512 size: 80.00 GB start: '2048' uuid: fcf6b761-831a-48a7-9f5f-068b5063763f removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '167772160' sectorsize: '512' size: 80.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 discovered_interpreter_python: /usr/bin/python3 distribution: CentOS distribution_file_parsed: true distribution_file_path: /etc/centos-release distribution_file_variety: CentOS distribution_major_version: '9' distribution_release: Stream distribution_version: '9' dns: nameservers: - 192.168.122.10 - 199.204.44.24 - 199.204.47.54 domain: '' effective_group_id: 1000 effective_user_id: 1000 env: ANSIBLE_LOG_PATH: /home/zuul/ci-framework-data/logs/e2e-collect-logs-must-gather.log BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' DEBUGINFOD_URLS: 'https://debuginfod.centos.org/ ' HOME: /home/zuul KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig LANG: en_US.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: zuul MOTD_SHOWN: pam PATH: /home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /home/zuul SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38794 22 SSH_CONNECTION: 38.102.83.114 38794 38.102.83.251 22 USER: zuul XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '17' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f eth0: active: true device: eth0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.251 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::f816:3eff:fe97:c9c3 prefix: '64' scope: link macaddress: fa:16:3e:97:c9:c3 module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether fibre_channel_wwn: [] fips: false form_factor: Other fqdn: controller gather_subset: - min hostname: controller hostnqn: nqn.2014-08.org.nvmexpress:uuid:bf3e0a14-a5f8-4123-aa26-e7cad37b879a interfaces: - lo - eth0 is_chroot: false iscsi_iqn: '' kernel: 5.14.0-645.el9.x86_64 kernel_version: '#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025' lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback loadavg: 15m: 0.3 1m: 0.55 5m: 0.68 locally_reachable_ips: ipv4: - 38.102.83.251 - 127.0.0.0/8 - 127.0.0.1 ipv6: - ::1 - fe80::f816:3eff:fe97:c9c3 lsb: {} lvm: N/A machine: x86_64 machine_id: 4d4ef2323cc3337bbfd9081b2a323b4e memfree_mb: 7161 memory_mb: nocache: free: 7372 used: 308 real: free: 7161 total: 7680 used: 519 swap: cached: 0 free: 0 total: 0 used: 0 memtotal_mb: 7680 module_setup: true mounts: - block_available: 20337100 block_size: 4096 block_total: 20954875 block_used: 617775 device: /dev/vda1 fstype: xfs inode_available: 41888405 inode_total: 41942512 inode_used: 54107 mount: / options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota size_available: 83300761600 size_total: 85831168000 uuid: fcf6b761-831a-48a7-9f5f-068b5063763f nodename: controller os_family: RedHat pkg_mgr: dnf proc_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor processor_cores: 1 processor_count: 8 processor_nproc: 8 processor_threads_per_core: 1 processor_vcpus: 8 product_name: OpenStack Nova product_serial: NA product_uuid: NA product_version: 26.3.1 python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 25 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 25 - final - 0 python_version: 3.9.25 real_group_id: 1000 real_user_id: 1000 selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted selinux_python_present: true service_mgr: systemd ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOvQreKGmvEG1vi8GvwFBqECdihQVE6tUBzDanz/Lcee9GvGa+tH+Ub+xqX7rB/yRnjc8CJIJovHO3uwatRboZQ= ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIPeGQ/QINrFqQK52g8hKIwxs8VQj2W/JGaf9zdH9cBm2 ssh_host_key_ed25519_public_keytype: ssh-ed25519 ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCrMWhS0sfa8MFM6z46N9s5KdkDDfqBTBpmkqh+k0riEuOWyruqZ/IooEFKeQXaWr/u2j++Erw7byk1nZ2/1inxp5GHPD3tjMz1FjfMlsMM17kkMF8J45E52gQj2JzJS93rFYtLMkLQt6ydCYf8csUaQJz4YGv66NoK1WXUFkxSW12stZQyIjr7FHdmQ9o1VG6PeVlvovTjZdIDOrs2uyx3QLKn/3ZvZBR0nNCGXPAtVoyf4oV/JWSKdX0XOcgkV4QyD4B3CiLstDl04Q6XY8pkzc850JzuMo4L6IQoiI//65VAvU9EWiduDcC6Bb2UqYy5iwuJFLa6Qei0hCq5tk00PSx9JjT+rVhoTJveLD0GlQk2blm+bCOKdHDM87Eh/CiVxhUJhsbkp7ASUwcd1In/Ayr37VyWSHlbW7SDd9G5aQvRd7mOx6JYU5j+j8dmvku5+mmMisaik3SYrgImXY/Agd7BOsZD1BfRvPcqACsgYymCPzDxVVOGYD3Tt5poSUs= ssh_host_key_rsa_public_keytype: ssh-rsa swapfree_mb: 0 swaptotal_mb: 0 system: Linux system_capabilities: - '' system_capabilities_enforced: 'True' system_vendor: OpenStack Foundation uptime_seconds: 192 user_dir: /home/zuul user_gecos: '' user_gid: 1000 user_id: zuul user_shell: /bin/bash user_uid: 1000 userspace_architecture: x86_64 userspace_bits: '64' virtualization_role: guest virtualization_tech_guest: - openstack virtualization_tech_host: - kvm virtualization_type: openstack zuul_change_list: - service-telemetry-operator ansible_fibre_channel_wwn: [] ansible_fips: false ansible_forks: 5 ansible_form_factor: Other ansible_fqdn: controller ansible_host: 38.102.83.251 ansible_hostname: controller ansible_hostnqn: nqn.2014-08.org.nvmexpress:uuid:bf3e0a14-a5f8-4123-aa26-e7cad37b879a ansible_interfaces: - lo - eth0 ansible_inventory_sources: - /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/inventory.yaml ansible_is_chroot: false ansible_iscsi_iqn: '' ansible_kernel: 5.14.0-645.el9.x86_64 ansible_kernel_version: '#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025' ansible_lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback ansible_loadavg: 15m: 0.3 1m: 0.55 5m: 0.68 ansible_local: {} ansible_locally_reachable_ips: ipv4: - 38.102.83.251 - 127.0.0.0/8 - 127.0.0.1 ipv6: - ::1 - fe80::f816:3eff:fe97:c9c3 ansible_lsb: {} ansible_lvm: N/A ansible_machine: x86_64 ansible_machine_id: 4d4ef2323cc3337bbfd9081b2a323b4e ansible_memfree_mb: 7161 ansible_memory_mb: nocache: free: 7372 used: 308 real: free: 7161 total: 7680 used: 519 swap: cached: 0 free: 0 total: 0 used: 0 ansible_memtotal_mb: 7680 ansible_mounts: - block_available: 20337100 block_size: 4096 block_total: 20954875 block_used: 617775 device: /dev/vda1 fstype: xfs inode_available: 41888405 inode_total: 41942512 inode_used: 54107 mount: / options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota size_available: 83300761600 size_total: 85831168000 uuid: fcf6b761-831a-48a7-9f5f-068b5063763f ansible_nodename: controller ansible_os_family: RedHat ansible_parent_role_names: - cifmw_setup ansible_parent_role_paths: - /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/untrusted/project_0/github.com/openstack-k8s-operators/ci-framework/roles/cifmw_setup ansible_pkg_mgr: dnf ansible_play_batch: &id002 - controller ansible_play_hosts: - controller ansible_play_hosts_all: - controller - crc ansible_play_name: Run ci/playbooks/e2e-collect-logs.yml ansible_play_role_names: &id003 - run_hook - os_must_gather - artifacts - env_op_images - run_hook - cifmw_setup ansible_playbook_python: /usr/lib/zuul/ansible/8/bin/python ansible_port: 22 ansible_proc_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ansible_processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor ansible_processor_cores: 1 ansible_processor_count: 8 ansible_processor_nproc: 8 ansible_processor_threads_per_core: 1 ansible_processor_vcpus: 8 ansible_product_name: OpenStack Nova ansible_product_serial: NA ansible_product_uuid: NA ansible_product_version: 26.3.1 ansible_python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 25 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 25 - final - 0 ansible_python_interpreter: auto ansible_python_version: 3.9.25 ansible_real_group_id: 1000 ansible_real_user_id: 1000 ansible_role_name: artifacts ansible_role_names: - cifmw_setup - env_op_images - artifacts - run_hook - os_must_gather ansible_run_tags: - all ansible_scp_extra_args: -o PermitLocalCommand=no ansible_selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted ansible_selinux_python_present: true ansible_service_mgr: systemd ansible_sftp_extra_args: -o PermitLocalCommand=no ansible_skip_tags: [] ansible_ssh_common_args: -o PermitLocalCommand=no ansible_ssh_executable: ssh ansible_ssh_extra_args: -o PermitLocalCommand=no ansible_ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOvQreKGmvEG1vi8GvwFBqECdihQVE6tUBzDanz/Lcee9GvGa+tH+Ub+xqX7rB/yRnjc8CJIJovHO3uwatRboZQ= ansible_ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ansible_ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIPeGQ/QINrFqQK52g8hKIwxs8VQj2W/JGaf9zdH9cBm2 ansible_ssh_host_key_ed25519_public_keytype: ssh-ed25519 ansible_ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCrMWhS0sfa8MFM6z46N9s5KdkDDfqBTBpmkqh+k0riEuOWyruqZ/IooEFKeQXaWr/u2j++Erw7byk1nZ2/1inxp5GHPD3tjMz1FjfMlsMM17kkMF8J45E52gQj2JzJS93rFYtLMkLQt6ydCYf8csUaQJz4YGv66NoK1WXUFkxSW12stZQyIjr7FHdmQ9o1VG6PeVlvovTjZdIDOrs2uyx3QLKn/3ZvZBR0nNCGXPAtVoyf4oV/JWSKdX0XOcgkV4QyD4B3CiLstDl04Q6XY8pkzc850JzuMo4L6IQoiI//65VAvU9EWiduDcC6Bb2UqYy5iwuJFLa6Qei0hCq5tk00PSx9JjT+rVhoTJveLD0GlQk2blm+bCOKdHDM87Eh/CiVxhUJhsbkp7ASUwcd1In/Ayr37VyWSHlbW7SDd9G5aQvRd7mOx6JYU5j+j8dmvku5+mmMisaik3SYrgImXY/Agd7BOsZD1BfRvPcqACsgYymCPzDxVVOGYD3Tt5poSUs= ansible_ssh_host_key_rsa_public_keytype: ssh-rsa ansible_swapfree_mb: 0 ansible_swaptotal_mb: 0 ansible_system: Linux ansible_system_capabilities: - '' ansible_system_capabilities_enforced: 'True' ansible_system_vendor: OpenStack Foundation ansible_uptime_seconds: 192 ansible_user: zuul ansible_user_dir: /home/zuul ansible_user_gecos: '' ansible_user_gid: 1000 ansible_user_id: zuul ansible_user_shell: /bin/bash ansible_user_uid: 1000 ansible_userspace_architecture: x86_64 ansible_userspace_bits: '64' ansible_verbosity: 1 ansible_version: full: 2.15.12 major: 2 minor: 15 revision: 12 string: 2.15.12 ansible_virtualization_role: guest ansible_virtualization_tech_guest: - openstack ansible_virtualization_tech_host: - kvm ansible_virtualization_type: openstack cifmw_architecture_repo: /home/zuul/src/github.com/openstack-k8s-operators/architecture cifmw_architecture_repo_relative: src/github.com/openstack-k8s-operators/architecture cifmw_artifacts_basedir: '{{ cifmw_basedir | default(ansible_user_dir ~ ''/ci-framework-data'') }}' cifmw_artifacts_crc_host: api.crc.testing cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_artifacts_crc_sshkey_ed25519: ~/.crc/machines/crc/id_ed25519 cifmw_artifacts_crc_user: core cifmw_artifacts_gather_logs: true cifmw_artifacts_mask_logs: true cifmw_basedir: /home/zuul/ci-framework-data cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_env_op_images_dir: '{{ cifmw_basedir | default(ansible_user_dir ~ ''/ci-framework-data'') }}' cifmw_env_op_images_dryrun: false cifmw_env_op_images_file: operator_images.yaml cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_install_yamls_defaults: ADOPTED_EXTERNAL_NETWORK: 172.21.1.0/24 ADOPTED_INTERNALAPI_NETWORK: 172.17.1.0/24 ADOPTED_STORAGEMGMT_NETWORK: 172.20.1.0/24 ADOPTED_STORAGE_NETWORK: 172.18.1.0/24 ADOPTED_TENANT_NETWORK: 172.9.1.0/24 ANSIBLEEE: config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_BRANCH: main ANSIBLEEE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest ANSIBLEEE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml ANSIBLEEE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests ANSIBLEEE_KUTTL_NAMESPACE: ansibleee-kuttl-tests ANSIBLEEE_REPO: https://github.com/openstack-k8s-operators/openstack-ansibleee-operator ANSIBLEE_COMMIT_HASH: '' BARBICAN: config/samples/barbican_v1beta1_barbican.yaml BARBICAN_BRANCH: main BARBICAN_COMMIT_HASH: '' BARBICAN_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml BARBICAN_DEPL_IMG: unused BARBICAN_IMG: quay.io/openstack-k8s-operators/barbican-operator-index:latest BARBICAN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml BARBICAN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests BARBICAN_KUTTL_NAMESPACE: barbican-kuttl-tests BARBICAN_REPO: https://github.com/openstack-k8s-operators/barbican-operator.git BARBICAN_SERVICE_ENABLED: 'true' BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY: sE**********U= BAREMETAL_BRANCH: main BAREMETAL_COMMIT_HASH: '' BAREMETAL_IMG: quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest BAREMETAL_OS_CONTAINER_IMG: '' BAREMETAL_OS_IMG: '' BAREMETAL_REPO: https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git BAREMETAL_TIMEOUT: 20m BASH_IMG: quay.io/openstack-k8s-operators/bash:latest BGP_ASN: '64999' BGP_LEAF_1: 100.65.4.1 BGP_LEAF_2: 100.64.4.1 BGP_OVN_ROUTING: 'false' BGP_PEER_ASN: '64999' BGP_SOURCE_IP: 172.30.4.2 BGP_SOURCE_IP6: f00d:f00d:f00d:f00d:f00d:f00d:f00d:42 BMAAS_BRIDGE_IPV4_PREFIX: 172.20.1.2/24 BMAAS_BRIDGE_IPV6_PREFIX: fd00:bbbb::2/64 BMAAS_INSTANCE_DISK_SIZE: '20' BMAAS_INSTANCE_MEMORY: '4096' BMAAS_INSTANCE_NAME_PREFIX: crc-bmaas BMAAS_INSTANCE_NET_MODEL: virtio BMAAS_INSTANCE_OS_VARIANT: centos-stream9 BMAAS_INSTANCE_VCPUS: '2' BMAAS_INSTANCE_VIRT_TYPE: kvm BMAAS_IPV4: 'true' BMAAS_IPV6: 'false' BMAAS_LIBVIRT_USER: sushyemu BMAAS_METALLB_ADDRESS_POOL: 172.20.1.64/26 BMAAS_METALLB_POOL_NAME: baremetal BMAAS_NETWORK_IPV4_PREFIX: 172.20.1.1/24 BMAAS_NETWORK_IPV6_PREFIX: fd00:bbbb::1/64 BMAAS_NETWORK_NAME: crc-bmaas BMAAS_NODE_COUNT: '1' BMAAS_OCP_INSTANCE_NAME: crc BMAAS_REDFISH_PASSWORD: password BMAAS_REDFISH_USERNAME: admin BMAAS_ROUTE_LIBVIRT_NETWORKS: crc-bmaas,crc,default BMAAS_SUSHY_EMULATOR_DRIVER: libvirt BMAAS_SUSHY_EMULATOR_IMAGE: quay.io/metal3-io/sushy-tools:latest BMAAS_SUSHY_EMULATOR_NAMESPACE: sushy-emulator BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE: /etc/openstack/clouds.yaml BMAAS_SUSHY_EMULATOR_OS_CLOUD: openstack BMH_NAMESPACE: openstack BMO_BRANCH: release-0.9 BMO_CLEANUP: 'true' BMO_COMMIT_HASH: '' BMO_IPA_BRANCH: stable/2024.1 BMO_IRONIC_HOST: 192.168.122.10 BMO_PROVISIONING_INTERFACE: '' BMO_REPO: https://github.com/metal3-io/baremetal-operator BMO_SETUP: '' BMO_SETUP_ROUTE_REPLACE: 'true' BM_CTLPLANE_INTERFACE: enp1s0 BM_INSTANCE_MEMORY: '8192' BM_INSTANCE_NAME_PREFIX: edpm-compute-baremetal BM_INSTANCE_NAME_SUFFIX: '0' BM_NETWORK_NAME: default BM_NODE_COUNT: '1' BM_ROOT_PASSWORD: '' BM_ROOT_PASSWORD_SECRET: '' CEILOMETER_CENTRAL_DEPL_IMG: unused CEILOMETER_NOTIFICATION_DEPL_IMG: unused CEPH_BRANCH: release-1.15 CEPH_CLIENT: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml CEPH_COMMON: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml CEPH_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml CEPH_CRDS: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml CEPH_IMG: quay.io/ceph/demo:latest-squid CEPH_OP: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml CEPH_REPO: https://github.com/rook/rook.git CERTMANAGER_TIMEOUT: 300s CHECKOUT_FROM_OPENSTACK_REF: 'true' CINDER: config/samples/cinder_v1beta1_cinder.yaml CINDERAPI_DEPL_IMG: unused CINDERBKP_DEPL_IMG: unused CINDERSCH_DEPL_IMG: unused CINDERVOL_DEPL_IMG: unused CINDER_BRANCH: main CINDER_COMMIT_HASH: '' CINDER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml CINDER_IMG: quay.io/openstack-k8s-operators/cinder-operator-index:latest CINDER_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml CINDER_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests CINDER_KUTTL_NAMESPACE: cinder-kuttl-tests CINDER_REPO: https://github.com/openstack-k8s-operators/cinder-operator.git CLEANUP_DIR_CMD: rm -Rf CRC_BGP_NIC_1_MAC: '52:54:00:11:11:11' CRC_BGP_NIC_2_MAC: '52:54:00:11:11:12' CRC_HTTPS_PROXY: '' CRC_HTTP_PROXY: '' CRC_STORAGE_NAMESPACE: crc-storage CRC_STORAGE_RETRIES: '3' CRC_URL: '''https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz''' CRC_VERSION: latest DATAPLANE_ANSIBLE_SECRET: dataplane-ansible-ssh-private-key-secret DATAPLANE_ANSIBLE_USER: '' DATAPLANE_COMPUTE_IP: 192.168.122.100 DATAPLANE_CONTAINER_PREFIX: openstack DATAPLANE_CONTAINER_TAG: current-podified DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest DATAPLANE_DEFAULT_GW: 192.168.122.1 DATAPLANE_EXTRA_NOVA_CONFIG_FILE: /dev/null DATAPLANE_GROWVOLS_ARGS: /=8GB /tmp=1GB /home=1GB /var=100% DATAPLANE_KUSTOMIZE_SCENARIO: preprovisioned DATAPLANE_NETWORKER_IP: 192.168.122.200 DATAPLANE_NETWORK_INTERFACE_NAME: eth0 DATAPLANE_NOVA_NFS_PATH: '' DATAPLANE_NTP_SERVER: pool.ntp.org DATAPLANE_PLAYBOOK: osp.edpm.download_cache DATAPLANE_REGISTRY_URL: quay.io/podified-antelope-centos9 DATAPLANE_RUNNER_IMG: '' DATAPLANE_SERVER_ROLE: compute DATAPLANE_SSHD_ALLOWED_RANGES: '[''192.168.122.0/24'']' DATAPLANE_TIMEOUT: 30m DATAPLANE_TLS_ENABLED: 'true' DATAPLANE_TOTAL_NETWORKER_NODES: '1' DATAPLANE_TOTAL_NODES: '1' DBSERVICE: galera DESIGNATE: config/samples/designate_v1beta1_designate.yaml DESIGNATE_BRANCH: main DESIGNATE_COMMIT_HASH: '' DESIGNATE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml DESIGNATE_IMG: quay.io/openstack-k8s-operators/designate-operator-index:latest DESIGNATE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml DESIGNATE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests DESIGNATE_KUTTL_NAMESPACE: designate-kuttl-tests DESIGNATE_REPO: https://github.com/openstack-k8s-operators/designate-operator.git DNSDATA: config/samples/network_v1beta1_dnsdata.yaml DNSDATA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml DNSMASQ: config/samples/network_v1beta1_dnsmasq.yaml DNSMASQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml DNS_DEPL_IMG: unused DNS_DOMAIN: localdomain DOWNLOAD_TOOLS_SELECTION: all EDPM_ATTACH_EXTNET: 'true' EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES: '''[]''' EDPM_COMPUTE_ADDITIONAL_NETWORKS: '''[]''' EDPM_COMPUTE_CELLS: '1' EDPM_COMPUTE_CEPH_ENABLED: 'true' EDPM_COMPUTE_CEPH_NOVA: 'true' EDPM_COMPUTE_DHCP_AGENT_ENABLED: 'true' EDPM_COMPUTE_SRIOV_ENABLED: 'true' EDPM_COMPUTE_SUFFIX: '0' EDPM_CONFIGURE_DEFAULT_ROUTE: 'true' EDPM_CONFIGURE_HUGEPAGES: 'false' EDPM_CONFIGURE_NETWORKING: 'true' EDPM_FIRSTBOOT_EXTRA: /tmp/edpm-firstboot-extra EDPM_NETWORKER_SUFFIX: '0' EDPM_TOTAL_NETWORKERS: '1' EDPM_TOTAL_NODES: '1' GALERA_REPLICAS: '' GENERATE_SSH_KEYS: 'true' GIT_CLONE_OPTS: '' GLANCE: config/samples/glance_v1beta1_glance.yaml GLANCEAPI_DEPL_IMG: unused GLANCE_BRANCH: main GLANCE_COMMIT_HASH: '' GLANCE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml GLANCE_IMG: quay.io/openstack-k8s-operators/glance-operator-index:latest GLANCE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml GLANCE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests GLANCE_KUTTL_NAMESPACE: glance-kuttl-tests GLANCE_REPO: https://github.com/openstack-k8s-operators/glance-operator.git HEAT: config/samples/heat_v1beta1_heat.yaml HEATAPI_DEPL_IMG: unused HEATCFNAPI_DEPL_IMG: unused HEATENGINE_DEPL_IMG: unused HEAT_AUTH_ENCRYPTION_KEY: 76**********f0 HEAT_BRANCH: main HEAT_COMMIT_HASH: '' HEAT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml HEAT_IMG: quay.io/openstack-k8s-operators/heat-operator-index:latest HEAT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml HEAT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests HEAT_KUTTL_NAMESPACE: heat-kuttl-tests HEAT_REPO: https://github.com/openstack-k8s-operators/heat-operator.git HEAT_SERVICE_ENABLED: 'true' HORIZON: config/samples/horizon_v1beta1_horizon.yaml HORIZON_BRANCH: main HORIZON_COMMIT_HASH: '' HORIZON_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml HORIZON_DEPL_IMG: unused HORIZON_IMG: quay.io/openstack-k8s-operators/horizon-operator-index:latest HORIZON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml HORIZON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests HORIZON_KUTTL_NAMESPACE: horizon-kuttl-tests HORIZON_REPO: https://github.com/openstack-k8s-operators/horizon-operator.git INFRA_BRANCH: main INFRA_COMMIT_HASH: '' INFRA_IMG: quay.io/openstack-k8s-operators/infra-operator-index:latest INFRA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml INFRA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests INFRA_KUTTL_NAMESPACE: infra-kuttl-tests INFRA_REPO: https://github.com/openstack-k8s-operators/infra-operator.git INSTALL_CERT_MANAGER: 'true' INSTALL_NMSTATE: true || false INSTALL_NNCP: true || false INTERNALAPI_HOST_ROUTES: '' IPV6_LAB_IPV4_NETWORK_IPADDRESS: 172.30.0.1/24 IPV6_LAB_IPV6_NETWORK_IPADDRESS: fd00:abcd:abcd:fc00::1/64 IPV6_LAB_LIBVIRT_STORAGE_POOL: default IPV6_LAB_MANAGE_FIREWALLD: 'true' IPV6_LAB_NAT64_HOST_IPV4: 172.30.0.2/24 IPV6_LAB_NAT64_HOST_IPV6: fd00:abcd:abcd:fc00::2/64 IPV6_LAB_NAT64_INSTANCE_NAME: nat64-router IPV6_LAB_NAT64_IPV6_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL: 192.168.255.0/24 IPV6_LAB_NAT64_TAYGA_IPV4: 192.168.255.1 IPV6_LAB_NAT64_TAYGA_IPV6: fd00:abcd:abcd:fc00::3 IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX: fd00:abcd:abcd:fcff::/96 IPV6_LAB_NAT64_UPDATE_PACKAGES: 'false' IPV6_LAB_NETWORK_NAME: nat64 IPV6_LAB_SNO_CLUSTER_NETWORK: fd00:abcd:0::/48 IPV6_LAB_SNO_HOST_IP: fd00:abcd:abcd:fc00::11 IPV6_LAB_SNO_HOST_PREFIX: '64' IPV6_LAB_SNO_INSTANCE_NAME: sno IPV6_LAB_SNO_MACHINE_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_OCP_MIRROR_URL: https://mirror.openshift.com/pub/openshift-v4/clients/ocp IPV6_LAB_SNO_OCP_VERSION: latest-4.14 IPV6_LAB_SNO_SERVICE_NETWORK: fd00:abcd:abcd:fc03::/112 IPV6_LAB_SSH_PUB_KEY: /home/zuul/.ssh/id_rsa.pub IPV6_LAB_WORK_DIR: /home/zuul/.ipv6lab IRONIC: config/samples/ironic_v1beta1_ironic.yaml IRONICAPI_DEPL_IMG: unused IRONICCON_DEPL_IMG: unused IRONICINS_DEPL_IMG: unused IRONICNAG_DEPL_IMG: unused IRONICPXE_DEPL_IMG: unused IRONIC_BRANCH: main IRONIC_COMMIT_HASH: '' IRONIC_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml IRONIC_IMAGE: quay.io/metal3-io/ironic IRONIC_IMAGE_TAG: release-24.1 IRONIC_IMG: quay.io/openstack-k8s-operators/ironic-operator-index:latest IRONIC_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml IRONIC_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests IRONIC_KUTTL_NAMESPACE: ironic-kuttl-tests IRONIC_REPO: https://github.com/openstack-k8s-operators/ironic-operator.git KEYSTONEAPI: config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_DEPL_IMG: unused KEYSTONE_BRANCH: main KEYSTONE_COMMIT_HASH: '' KEYSTONE_FEDERATION_CLIENT_SECRET: CO**********6f KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE: openstack KEYSTONE_IMG: quay.io/openstack-k8s-operators/keystone-operator-index:latest KEYSTONE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml KEYSTONE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests KEYSTONE_KUTTL_NAMESPACE: keystone-kuttl-tests KEYSTONE_REPO: https://github.com/openstack-k8s-operators/keystone-operator.git KUBEADMIN_PWD: '12345678' LIBVIRT_SECRET: libvirt-secret LOKI_DEPLOY_MODE: openshift-network LOKI_DEPLOY_NAMESPACE: netobserv LOKI_DEPLOY_SIZE: 1x.demo LOKI_NAMESPACE: openshift-operators-redhat LOKI_OPERATOR_GROUP: openshift-operators-redhat-loki LOKI_SUBSCRIPTION: loki-operator LVMS_CR: '1' MANILA: config/samples/manila_v1beta1_manila.yaml MANILAAPI_DEPL_IMG: unused MANILASCH_DEPL_IMG: unused MANILASHARE_DEPL_IMG: unused MANILA_BRANCH: main MANILA_COMMIT_HASH: '' MANILA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml MANILA_IMG: quay.io/openstack-k8s-operators/manila-operator-index:latest MANILA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml MANILA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests MANILA_KUTTL_NAMESPACE: manila-kuttl-tests MANILA_REPO: https://github.com/openstack-k8s-operators/manila-operator.git MANILA_SERVICE_ENABLED: 'true' MARIADB: config/samples/mariadb_v1beta1_galera.yaml MARIADB_BRANCH: main MARIADB_CHAINSAW_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml MARIADB_CHAINSAW_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests MARIADB_CHAINSAW_NAMESPACE: mariadb-chainsaw-tests MARIADB_COMMIT_HASH: '' MARIADB_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml MARIADB_DEPL_IMG: unused MARIADB_IMG: quay.io/openstack-k8s-operators/mariadb-operator-index:latest MARIADB_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml MARIADB_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests MARIADB_KUTTL_NAMESPACE: mariadb-kuttl-tests MARIADB_REPO: https://github.com/openstack-k8s-operators/mariadb-operator.git MEMCACHED: config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_DEPL_IMG: unused METADATA_SHARED_SECRET: '12**********42' METALLB_IPV6_POOL: fd00:aaaa::80-fd00:aaaa::90 METALLB_POOL: 192.168.122.80-192.168.122.90 MICROSHIFT: '0' NAMESPACE: openstack NETCONFIG: config/samples/network_v1beta1_netconfig.yaml NETCONFIG_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml NETCONFIG_DEPL_IMG: unused NETOBSERV_DEPLOY_NAMESPACE: netobserv NETOBSERV_NAMESPACE: openshift-netobserv-operator NETOBSERV_OPERATOR_GROUP: openshift-netobserv-operator-net NETOBSERV_SUBSCRIPTION: netobserv-operator NETWORK_BGP: 'false' NETWORK_DESIGNATE_ADDRESS_PREFIX: 172.28.0 NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX: 172.50.0 NETWORK_INTERNALAPI_ADDRESS_PREFIX: 172.17.0 NETWORK_ISOLATION: 'true' NETWORK_ISOLATION_INSTANCE_NAME: crc NETWORK_ISOLATION_IPV4: 'true' NETWORK_ISOLATION_IPV4_ADDRESS: 172.16.1.1/24 NETWORK_ISOLATION_IPV4_NAT: 'true' NETWORK_ISOLATION_IPV6: 'false' NETWORK_ISOLATION_IPV6_ADDRESS: fd00:aaaa::1/64 NETWORK_ISOLATION_IP_ADDRESS: 192.168.122.10 NETWORK_ISOLATION_MAC: '52:54:00:11:11:10' NETWORK_ISOLATION_NETWORK_NAME: net-iso NETWORK_ISOLATION_NET_NAME: default NETWORK_ISOLATION_USE_DEFAULT_NETWORK: 'true' NETWORK_MTU: '1500' NETWORK_STORAGEMGMT_ADDRESS_PREFIX: 172.20.0 NETWORK_STORAGE_ADDRESS_PREFIX: 172.18.0 NETWORK_STORAGE_MACVLAN: '' NETWORK_TENANT_ADDRESS_PREFIX: 172.19.0 NETWORK_VLAN_START: '20' NETWORK_VLAN_STEP: '1' NEUTRONAPI: config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_DEPL_IMG: unused NEUTRON_BRANCH: main NEUTRON_COMMIT_HASH: '' NEUTRON_IMG: quay.io/openstack-k8s-operators/neutron-operator-index:latest NEUTRON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml NEUTRON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests NEUTRON_KUTTL_NAMESPACE: neutron-kuttl-tests NEUTRON_REPO: https://github.com/openstack-k8s-operators/neutron-operator.git NFS_HOME: /home/nfs NMSTATE_NAMESPACE: openshift-nmstate NMSTATE_OPERATOR_GROUP: openshift-nmstate-tn6k8 NMSTATE_SUBSCRIPTION: kubernetes-nmstate-operator NNCP_ADDITIONAL_HOST_ROUTES: '' NNCP_BGP_1_INTERFACE: enp7s0 NNCP_BGP_1_IP_ADDRESS: 100.65.4.2 NNCP_BGP_2_INTERFACE: enp8s0 NNCP_BGP_2_IP_ADDRESS: 100.64.4.2 NNCP_BRIDGE: ospbr NNCP_CLEANUP_TIMEOUT: 120s NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX: 'fd00:aaaa::' NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX: '10' NNCP_CTLPLANE_IP_ADDRESS_PREFIX: 192.168.122 NNCP_CTLPLANE_IP_ADDRESS_SUFFIX: '10' NNCP_DNS_SERVER: 192.168.122.1 NNCP_DNS_SERVER_IPV6: fd00:aaaa::1 NNCP_GATEWAY: 192.168.122.1 NNCP_GATEWAY_IPV6: fd00:aaaa::1 NNCP_INTERFACE: enp6s0 NNCP_NODES: '' NNCP_TIMEOUT: 240s NOVA: config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_BRANCH: main NOVA_COMMIT_HASH: '' NOVA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_IMG: quay.io/openstack-k8s-operators/nova-operator-index:latest NOVA_REPO: https://github.com/openstack-k8s-operators/nova-operator.git NUMBER_OF_INSTANCES: '1' OCP_NETWORK_NAME: crc OCTAVIA: config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_BRANCH: main OCTAVIA_COMMIT_HASH: '' OCTAVIA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_IMG: quay.io/openstack-k8s-operators/octavia-operator-index:latest OCTAVIA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml OCTAVIA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests OCTAVIA_KUTTL_NAMESPACE: octavia-kuttl-tests OCTAVIA_REPO: https://github.com/openstack-k8s-operators/octavia-operator.git OKD: 'false' OPENSTACK_BRANCH: main OPENSTACK_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-bundle:latest OPENSTACK_COMMIT_HASH: '' OPENSTACK_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_CRDS_DIR: openstack_crds OPENSTACK_CTLPLANE: config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_IMG: quay.io/openstack-k8s-operators/openstack-operator-index:latest OPENSTACK_K8S_BRANCH: main OPENSTACK_K8S_TAG: latest OPENSTACK_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml OPENSTACK_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests OPENSTACK_KUTTL_NAMESPACE: openstack-kuttl-tests OPENSTACK_NEUTRON_CUSTOM_CONF: '' OPENSTACK_REPO: https://github.com/openstack-k8s-operators/openstack-operator.git OPENSTACK_STORAGE_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest OPERATOR_BASE_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator OPERATOR_CHANNEL: '' OPERATOR_NAMESPACE: openstack-operators OPERATOR_SOURCE: '' OPERATOR_SOURCE_NAMESPACE: '' OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm OVNCONTROLLER: config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_NMAP: 'true' OVNDBS: config/samples/ovn_v1beta1_ovndbcluster.yaml OVNDBS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml OVNNORTHD: config/samples/ovn_v1beta1_ovnnorthd.yaml OVNNORTHD_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml OVN_BRANCH: main OVN_COMMIT_HASH: '' OVN_IMG: quay.io/openstack-k8s-operators/ovn-operator-index:latest OVN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml OVN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests OVN_KUTTL_NAMESPACE: ovn-kuttl-tests OVN_REPO: https://github.com/openstack-k8s-operators/ovn-operator.git PASSWORD: '12**********78' PLACEMENTAPI: config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_DEPL_IMG: unused PLACEMENT_BRANCH: main PLACEMENT_COMMIT_HASH: '' PLACEMENT_IMG: quay.io/openstack-k8s-operators/placement-operator-index:latest PLACEMENT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml PLACEMENT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests PLACEMENT_KUTTL_NAMESPACE: placement-kuttl-tests PLACEMENT_REPO: https://github.com/openstack-k8s-operators/placement-operator.git PULL_SECRET: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt RABBITMQ: docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_BRANCH: patches RABBITMQ_COMMIT_HASH: '' RABBITMQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_DEPL_IMG: unused RABBITMQ_IMG: quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest RABBITMQ_REPO: https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git REDHAT_OPERATORS: 'false' REDIS: config/samples/redis_v1beta1_redis.yaml REDIS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml REDIS_DEPL_IMG: unused RH_REGISTRY_PWD: '' RH_REGISTRY_USER: '' SECRET: os**********et SG_CORE_DEPL_IMG: unused STANDALONE_COMPUTE_DRIVER: libvirt STANDALONE_EXTERNAL_NET_PREFFIX: 172.21.0 STANDALONE_INTERNALAPI_NET_PREFIX: 172.17.0 STANDALONE_STORAGEMGMT_NET_PREFIX: 172.20.0 STANDALONE_STORAGE_NET_PREFIX: 172.18.0 STANDALONE_TENANT_NET_PREFIX: 172.19.0 STORAGEMGMT_HOST_ROUTES: '' STORAGE_CLASS: local-storage STORAGE_HOST_ROUTES: '' SWIFT: config/samples/swift_v1beta1_swift.yaml SWIFT_BRANCH: main SWIFT_COMMIT_HASH: '' SWIFT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml SWIFT_IMG: quay.io/openstack-k8s-operators/swift-operator-index:latest SWIFT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml SWIFT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests SWIFT_KUTTL_NAMESPACE: swift-kuttl-tests SWIFT_REPO: https://github.com/openstack-k8s-operators/swift-operator.git TELEMETRY: config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_BRANCH: main TELEMETRY_COMMIT_HASH: '' TELEMETRY_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_IMG: quay.io/openstack-k8s-operators/telemetry-operator-index:latest TELEMETRY_KUTTL_BASEDIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator TELEMETRY_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml TELEMETRY_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites TELEMETRY_KUTTL_NAMESPACE: telemetry-kuttl-tests TELEMETRY_KUTTL_RELPATH: test/kuttl/suites TELEMETRY_REPO: https://github.com/openstack-k8s-operators/telemetry-operator.git TENANT_HOST_ROUTES: '' TIMEOUT: 300s TLS_ENABLED: 'false' tripleo_deploy: 'export REGISTRY_PWD:' cifmw_install_yamls_environment: CHECKOUT_FROM_OPENSTACK_REF: 'true' KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig OPENSTACK_K8S_BRANCH: main OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm cifmw_installyamls_repos: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls cifmw_installyamls_repos_relative: src/github.com/openstack-k8s-operators/install_yamls cifmw_openshift_api: https://api.crc.testing:6443 cifmw_openshift_context: default/api-crc-testing:6443/kubeadmin cifmw_openshift_kubeconfig: /home/zuul/.crc/machines/crc/kubeconfig cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_token: sha256~pOUgk-9850hxxEZK6G0UMKUA3Hu8vVpuezKn-haJb_U cifmw_openshift_user: kubeadmin cifmw_os_must_gather_additional_namespaces: kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko cifmw_os_must_gather_dump_db: ALL cifmw_os_must_gather_host_network: false cifmw_os_must_gather_image: quay.io/openstack-k8s-operators/openstack-must-gather:latest cifmw_os_must_gather_image_push: true cifmw_os_must_gather_image_registry: quay.rdoproject.org/openstack-k8s-operators cifmw_os_must_gather_kubeconfig: '{{ ansible_user_dir }}/.kube/config' cifmw_os_must_gather_namespaces: - openstack-operators - openstack - baremetal-operator-system - openshift-machine-api - cert-manager - openshift-nmstate - openshift-marketplace - metallb-system - crc-storage cifmw_os_must_gather_output_dir: '{{ cifmw_basedir | default(ansible_user_dir ~ ''/ci-framework-data'') }}' cifmw_os_must_gather_output_log_dir: '{{ cifmw_os_must_gather_output_dir }}/logs/openstack-must-gather' cifmw_os_must_gather_repo_path: '{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/openstack-must-gather' cifmw_os_must_gather_timeout: 30m cifmw_path: /home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:~/.crc/bin:~/.crc/bin/oc:~/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin cifmw_project_dir: src/github.com/openstack-k8s-operators/ci-framework cifmw_project_dir_absolute: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework cifmw_run_hook_debug: '{{ (ansible_verbosity | int) >= 2 | bool }}' cifmw_run_tests: false cifmw_status: changed: false failed: false stat: atime: 1765216439.9756842 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: binary ctime: 1765216443.7497857 dev: 64513 device_type: 0 executable: true exists: true gid: 1000 gr_name: zuul inode: 41956871 isblk: false ischr: false isdir: true isfifo: false isgid: false islnk: false isreg: false issock: false isuid: false mimetype: inode/directory mode: '0755' mtime: 1765216443.7497857 nlink: 21 path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework pw_name: zuul readable: true rgrp: true roth: true rusr: true size: 4096 uid: 1000 version: '1325166596' wgrp: false woth: false writeable: true wusr: true xgrp: true xoth: true xusr: true cifmw_success_flag: changed: false failed: false stat: exists: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_instance_default_net_config: mtu: 1500 range: 192.168.122.0/24 crc_ci_bootstrap_instance_nm_vlan_networks: - key: internal-api value: ip: 172.17.0.5 - key: storage value: ip: 172.18.0.5 - key: tenant value: ip: 172.19.0.5 crc_ci_bootstrap_instance_parent_port_create_yaml: admin_state_up: true allowed_address_pairs: [] binding_host_id: null binding_profile: {} binding_vif_details: {} binding_vif_type: null binding_vnic_type: normal created_at: '2025-12-08T17:39:31Z' data_plane_status: null description: '' device_id: '' device_owner: '' device_profile: null dns_assignment: - fqdn: host-192-168-122-10.openstacklocal. hostname: host-192-168-122-10 ip_address: 192.168.122.10 dns_domain: '' dns_name: '' extra_dhcp_opts: [] fixed_ips: - ip_address: 192.168.122.10 subnet_id: 1ec71021-8196-48c3-b107-9041e6f5f679 hardware_offload_type: null hints: '' id: d37cddfa-716b-4541-992b-5180463c6809 ip_allocation: immediate mac_address: fa:16:3e:e6:79:2f name: crc-32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb numa_affinity_policy: null port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 propagate_uplink_status: null qos_network_policy_id: null qos_policy_id: null resource_request: null revision_number: 1 security_group_ids: [] status: DOWN tags: [] trunk_details: null trusted: null updated_at: '2025-12-08T17:39:31Z' crc_ci_bootstrap_network_name: zuul-ci-net-fdae5567 crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 crc_ci_bootstrap_networks_out: controller: default: connection: ci-private-network gw: 192.168.122.1 iface: eth1 ip: 192.168.122.11/24 mac: fa:16:3e:6a:de:3b mtu: 1500 crc: default: connection: ci-private-network gw: 192.168.122.1 iface: ens7 ip: 192.168.122.10/24 mac: fa:16:3e:e6:79:2f mtu: 1500 internal-api: connection: ci-private-network-20 iface: ens7.20 ip: 172.17.0.5/24 mac: 52:54:00:84:35:42 mtu: '1496' parent_iface: ens7 vlan: 20 storage: connection: ci-private-network-21 iface: ens7.21 ip: 172.18.0.5/24 mac: 52:54:00:9c:32:12 mtu: '1496' parent_iface: ens7 vlan: 21 tenant: connection: ci-private-network-22 iface: ens7.22 ip: 172.19.0.5/24 mac: 52:54:00:ec:da:21 mtu: '1496' parent_iface: ens7 vlan: 22 crc_ci_bootstrap_private_net_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:38:55Z' description: '' dns_domain: '' id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb ipv4_address_scope: null ipv6_address_scope: null is_default: false is_vlan_qinq: null is_vlan_transparent: false l2_adjacency: true mtu: 1500 name: zuul-ci-net-fdae5567 port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 provider:network_type: null provider:physical_network: null provider:segmentation_id: null qos_policy_id: null revision_number: 1 router:external: false segments: null shared: false status: ACTIVE subnets: [] tags: [] updated_at: '2025-12-08T17:38:55Z' crc_ci_bootstrap_private_router_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:39:01Z' description: '' enable_ndp_proxy: null external_gateway_info: enable_snat: true external_fixed_ips: - ip_address: 38.102.83.145 subnet_id: 3169b11b-94b1-4bc9-9727-4fdbbe15e56e network_id: 7abff1a9-a103-46d0-979a-1f1e599f4f41 flavor_id: null id: edc0cd0e-14dd-4588-9e19-d36444fdd18f name: zuul-ci-subnet-router-fdae5567 project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 3 routes: [] status: ACTIVE tags: [] tenant_id: 4b633c451ac74233be3721a3635275e5 updated_at: '2025-12-08T17:39:02Z' crc_ci_bootstrap_private_subnet_create_yaml: allocation_pools: - end: 192.168.122.254 start: 192.168.122.2 cidr: 192.168.122.0/24 created_at: '2025-12-08T17:38:58Z' description: '' dns_nameservers: [] dns_publish_fixed_ip: null enable_dhcp: false gateway_ip: 192.168.122.1 host_routes: [] id: 1ec71021-8196-48c3-b107-9041e6f5f679 ip_version: 4 ipv6_address_mode: null ipv6_ra_mode: null name: zuul-ci-subnet-fdae5567 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 0 segment_id: null service_types: [] subnetpool_id: null tags: [] updated_at: '2025-12-08T17:38:58Z' crc_ci_bootstrap_provider_dns: - 199.204.44.24 - 199.204.47.54 crc_ci_bootstrap_router_name: zuul-ci-subnet-router-fdae5567 crc_ci_bootstrap_subnet_name: zuul-ci-subnet-fdae5567 discovered_interpreter_python: /usr/bin/python3 enable_ramdisk: true environment: - ANSIBLE_LOG_PATH: '{{ ansible_user_dir }}/ci-framework-data/logs/e2e-collect-logs-must-gather.log' gather_subset: - min group_names: - ungrouped groups: all: - controller - crc ungrouped: &id001 - controller - crc zuul_unreachable: [] hostvars: controller: _included_dir: changed: false failed: false stat: atime: 1765216391.9143927 attr_flags: '' attributes: [] block_size: 4096 blocks: 0 charset: binary ctime: 1765216397.4945426 dev: 64513 device_type: 0 executable: true exists: true gid: 1000 gr_name: zuul inode: 16835667 isblk: false ischr: false isdir: true isfifo: false isgid: false islnk: false isreg: false issock: false isuid: false mimetype: inode/directory mode: '0755' mtime: 1765216397.4945426 nlink: 2 path: /home/zuul/ci-framework-data/artifacts/parameters pw_name: zuul readable: true rgrp: true roth: true rusr: true size: 120 uid: 1000 version: '2849268541' wgrp: false woth: false writeable: true wusr: true xgrp: true xoth: true xusr: true _included_file: changed: false failed: false stat: atime: 1765216396.6405199 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: us-ascii checksum: 4199ad8a98c2c9b8188e0c13884cc6a979bb72ab ctime: 1765216396.6435199 dev: 64513 device_type: 0 executable: false exists: true gid: 1000 gr_name: zuul inode: 54575205 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mimetype: text/plain mode: '0600' mtime: 1765216396.3655124 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/openshift-login-params.yml pw_name: zuul readable: true rgrp: false roth: false rusr: true size: 288 uid: 1000 version: '2535368820' wgrp: false woth: false writeable: true wusr: true xgrp: false xoth: false xusr: false _parsed_vars: changed: false content: Y2lmbXdfb3BlbnNoaWZ0X2FwaTogaHR0cHM6Ly9hcGkuY3JjLnRlc3Rpbmc6NjQ0MwpjaWZtd19vcGVuc2hpZnRfY29udGV4dDogZGVmYXVsdC9hcGktY3JjLXRlc3Rpbmc6NjQ0My9rdWJlYWRtaW4KY2lmbXdfb3BlbnNoaWZ0X2t1YmVjb25maWc6IC9ob21lL3p1dWwvLmNyYy9tYWNoaW5lcy9jcmMva3ViZWNvbmZpZwpjaWZtd19vcGVuc2hpZnRfdG9rZW46IHNoYTI1Nn5wT1Vnay05ODUwaHh4RVpLNkcwVU1LVUEzSHU4dlZwdWV6S24taGFKYl9VCmNpZm13X29wZW5zaGlmdF91c2VyOiBrdWJlYWRtaW4K encoding: base64 failed: false source: /home/zuul/ci-framework-data/artifacts/parameters/openshift-login-params.yml _tmp_dir: changed: true failed: false gid: 10001 group: zuul mode: '0700' owner: zuul path: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/tmp/ansible.016z1ouz size: 40 state: directory uid: 10001 _yaml_files: changed: false examined: 4 failed: false files: - atime: 1765216328.549688 ctime: 1765216326.5106332 dev: 64513 gid: 1000 gr_name: zuul inode: 33601311 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0644' mtime: 1765216326.2326257 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/zuul-params.yml pw_name: zuul rgrp: true roth: true rusr: true size: 20213 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false - atime: 1765216397.4945426 ctime: 1765216397.4985428 dev: 64513 gid: 1000 gr_name: zuul inode: 71338228 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0600' mtime: 1765216397.3425386 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/install-yamls-params.yml pw_name: zuul rgrp: false roth: false rusr: true size: 28064 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false - atime: 1765216391.9143927 ctime: 1765216390.175346 dev: 64513 gid: 1000 gr_name: zuul inode: 146828183 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0644' mtime: 1765216389.984341 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/custom-params.yml pw_name: zuul rgrp: true roth: true rusr: true size: 1126 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false - atime: 1765216396.6405199 ctime: 1765216396.6435199 dev: 64513 gid: 1000 gr_name: zuul inode: 54575205 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mode: '0600' mtime: 1765216396.3655124 nlink: 1 path: /home/zuul/ci-framework-data/artifacts/parameters/openshift-login-params.yml pw_name: zuul rgrp: false roth: false rusr: true size: 288 uid: 1000 wgrp: false woth: false wusr: true xgrp: false xoth: false xusr: false matched: 4 msg: All paths examined skipped_paths: {} ansible_all_ipv4_addresses: - 38.102.83.251 ansible_all_ipv6_addresses: - fe80::f816:3eff:fe97:c9c3 ansible_apparmor: status: disabled ansible_architecture: x86_64 ansible_bios_date: 04/01/2014 ansible_bios_vendor: SeaBIOS ansible_bios_version: 1.15.0-1 ansible_board_asset_tag: NA ansible_board_name: NA ansible_board_serial: NA ansible_board_vendor: NA ansible_board_version: NA ansible_chassis_asset_tag: NA ansible_chassis_serial: NA ansible_chassis_vendor: QEMU ansible_chassis_version: pc-i440fx-6.2 ansible_check_mode: false ansible_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ansible_config_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/ansible.cfg ansible_connection: ssh ansible_date_time: date: '2025-12-08' day: 08 epoch: '1765216950' epoch_int: '1765216950' hour: '18' iso8601: '2025-12-08T18:02:30Z' iso8601_basic: 20251208T180230661772 iso8601_basic_short: 20251208T180230 iso8601_micro: '2025-12-08T18:02:30.661772Z' minute: '02' month: '12' second: '30' time: '18:02:30' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' ansible_default_ipv4: address: 38.102.83.251 alias: eth0 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: eth0 macaddress: fa:16:3e:97:c9:c3 mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether ansible_default_ipv6: {} ansible_device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 masters: {} uuids: sr0: - 2025-12-08-17-34-40-00 vda1: - fcf6b761-831a-48a7-9f5f-068b5063763f ansible_devices: sr0: holders: [] host: '' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-34-40-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: - fcf6b761-831a-48a7-9f5f-068b5063763f sectors: '167770079' sectorsize: 512 size: 80.00 GB start: '2048' uuid: fcf6b761-831a-48a7-9f5f-068b5063763f removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '167772160' sectorsize: '512' size: 80.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 ansible_diff_mode: false ansible_distribution: CentOS ansible_distribution_file_parsed: true ansible_distribution_file_path: /etc/centos-release ansible_distribution_file_variety: CentOS ansible_distribution_major_version: '9' ansible_distribution_release: Stream ansible_distribution_version: '9' ansible_dns: nameservers: - 192.168.122.10 - 199.204.44.24 - 199.204.47.54 ansible_domain: '' ansible_effective_group_id: 1000 ansible_effective_user_id: 1000 ansible_env: ANSIBLE_LOG_PATH: /home/zuul/ci-framework-data/logs/e2e-collect-logs-must-gather.log BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' DEBUGINFOD_URLS: 'https://debuginfod.centos.org/ ' HOME: /home/zuul KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig LANG: en_US.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: zuul MOTD_SHOWN: pam PATH: /home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /home/zuul SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38794 22 SSH_CONNECTION: 38.102.83.114 38794 38.102.83.251 22 USER: zuul XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '17' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f ansible_eth0: active: true device: eth0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.251 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::f816:3eff:fe97:c9c3 prefix: '64' scope: link macaddress: fa:16:3e:97:c9:c3 module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether ansible_facts: _ansible_facts_gathered: true all_ipv4_addresses: - 38.102.83.251 all_ipv6_addresses: - fe80::f816:3eff:fe97:c9c3 ansible_local: {} apparmor: status: disabled architecture: x86_64 bios_date: 04/01/2014 bios_vendor: SeaBIOS bios_version: 1.15.0-1 board_asset_tag: NA board_name: NA board_serial: NA board_vendor: NA board_version: NA chassis_asset_tag: NA chassis_serial: NA chassis_vendor: QEMU chassis_version: pc-i440fx-6.2 cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f crc_ci_bootstrap_instance_default_net_config: mtu: 1500 range: 192.168.122.0/24 crc_ci_bootstrap_instance_nm_vlan_networks: - key: internal-api value: ip: 172.17.0.5 - key: storage value: ip: 172.18.0.5 - key: tenant value: ip: 172.19.0.5 crc_ci_bootstrap_instance_parent_port_create_yaml: admin_state_up: true allowed_address_pairs: [] binding_host_id: null binding_profile: {} binding_vif_details: {} binding_vif_type: null binding_vnic_type: normal created_at: '2025-12-08T17:39:31Z' data_plane_status: null description: '' device_id: '' device_owner: '' device_profile: null dns_assignment: - fqdn: host-192-168-122-10.openstacklocal. hostname: host-192-168-122-10 ip_address: 192.168.122.10 dns_domain: '' dns_name: '' extra_dhcp_opts: [] fixed_ips: - ip_address: 192.168.122.10 subnet_id: 1ec71021-8196-48c3-b107-9041e6f5f679 hardware_offload_type: null hints: '' id: d37cddfa-716b-4541-992b-5180463c6809 ip_allocation: immediate mac_address: fa:16:3e:e6:79:2f name: crc-32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb numa_affinity_policy: null port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 propagate_uplink_status: null qos_network_policy_id: null qos_policy_id: null resource_request: null revision_number: 1 security_group_ids: [] status: DOWN tags: [] trunk_details: null trusted: null updated_at: '2025-12-08T17:39:31Z' crc_ci_bootstrap_network_name: zuul-ci-net-fdae5567 crc_ci_bootstrap_networks_out: controller: default: connection: ci-private-network gw: 192.168.122.1 iface: eth1 ip: 192.168.122.11/24 mac: fa:16:3e:6a:de:3b mtu: 1500 crc: default: connection: ci-private-network gw: 192.168.122.1 iface: ens7 ip: 192.168.122.10/24 mac: fa:16:3e:e6:79:2f mtu: 1500 internal-api: connection: ci-private-network-20 iface: ens7.20 ip: 172.17.0.5/24 mac: 52:54:00:84:35:42 mtu: '1496' parent_iface: ens7 vlan: 20 storage: connection: ci-private-network-21 iface: ens7.21 ip: 172.18.0.5/24 mac: 52:54:00:9c:32:12 mtu: '1496' parent_iface: ens7 vlan: 21 tenant: connection: ci-private-network-22 iface: ens7.22 ip: 172.19.0.5/24 mac: 52:54:00:ec:da:21 mtu: '1496' parent_iface: ens7 vlan: 22 crc_ci_bootstrap_private_net_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:38:55Z' description: '' dns_domain: '' id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb ipv4_address_scope: null ipv6_address_scope: null is_default: false is_vlan_qinq: null is_vlan_transparent: false l2_adjacency: true mtu: 1500 name: zuul-ci-net-fdae5567 port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 provider:network_type: null provider:physical_network: null provider:segmentation_id: null qos_policy_id: null revision_number: 1 router:external: false segments: null shared: false status: ACTIVE subnets: [] tags: [] updated_at: '2025-12-08T17:38:55Z' crc_ci_bootstrap_private_router_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:39:01Z' description: '' enable_ndp_proxy: null external_gateway_info: enable_snat: true external_fixed_ips: - ip_address: 38.102.83.145 subnet_id: 3169b11b-94b1-4bc9-9727-4fdbbe15e56e network_id: 7abff1a9-a103-46d0-979a-1f1e599f4f41 flavor_id: null id: edc0cd0e-14dd-4588-9e19-d36444fdd18f name: zuul-ci-subnet-router-fdae5567 project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 3 routes: [] status: ACTIVE tags: [] tenant_id: 4b633c451ac74233be3721a3635275e5 updated_at: '2025-12-08T17:39:02Z' crc_ci_bootstrap_private_subnet_create_yaml: allocation_pools: - end: 192.168.122.254 start: 192.168.122.2 cidr: 192.168.122.0/24 created_at: '2025-12-08T17:38:58Z' description: '' dns_nameservers: [] dns_publish_fixed_ip: null enable_dhcp: false gateway_ip: 192.168.122.1 host_routes: [] id: 1ec71021-8196-48c3-b107-9041e6f5f679 ip_version: 4 ipv6_address_mode: null ipv6_ra_mode: null name: zuul-ci-subnet-fdae5567 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 0 segment_id: null service_types: [] subnetpool_id: null tags: [] updated_at: '2025-12-08T17:38:58Z' crc_ci_bootstrap_provider_dns: - 199.204.44.24 - 199.204.47.54 crc_ci_bootstrap_router_name: zuul-ci-subnet-router-fdae5567 crc_ci_bootstrap_subnet_name: zuul-ci-subnet-fdae5567 date_time: date: '2025-12-08' day: 08 epoch: '1765216950' epoch_int: '1765216950' hour: '18' iso8601: '2025-12-08T18:02:30Z' iso8601_basic: 20251208T180230661772 iso8601_basic_short: 20251208T180230 iso8601_micro: '2025-12-08T18:02:30.661772Z' minute: '02' month: '12' second: '30' time: '18:02:30' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' default_ipv4: address: 38.102.83.251 alias: eth0 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: eth0 macaddress: fa:16:3e:97:c9:c3 mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether default_ipv6: {} device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 masters: {} uuids: sr0: - 2025-12-08-17-34-40-00 vda1: - fcf6b761-831a-48a7-9f5f-068b5063763f devices: sr0: holders: [] host: '' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-34-40-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: - fcf6b761-831a-48a7-9f5f-068b5063763f sectors: '167770079' sectorsize: 512 size: 80.00 GB start: '2048' uuid: fcf6b761-831a-48a7-9f5f-068b5063763f removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '167772160' sectorsize: '512' size: 80.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 discovered_interpreter_python: /usr/bin/python3 distribution: CentOS distribution_file_parsed: true distribution_file_path: /etc/centos-release distribution_file_variety: CentOS distribution_major_version: '9' distribution_release: Stream distribution_version: '9' dns: nameservers: - 192.168.122.10 - 199.204.44.24 - 199.204.47.54 domain: '' effective_group_id: 1000 effective_user_id: 1000 env: ANSIBLE_LOG_PATH: /home/zuul/ci-framework-data/logs/e2e-collect-logs-must-gather.log BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' DEBUGINFOD_URLS: 'https://debuginfod.centos.org/ ' HOME: /home/zuul KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig LANG: en_US.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: zuul MOTD_SHOWN: pam PATH: /home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /home/zuul SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38794 22 SSH_CONNECTION: 38.102.83.114 38794 38.102.83.251 22 USER: zuul XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '17' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f eth0: active: true device: eth0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.251 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::f816:3eff:fe97:c9c3 prefix: '64' scope: link macaddress: fa:16:3e:97:c9:c3 module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether fibre_channel_wwn: [] fips: false form_factor: Other fqdn: controller gather_subset: - min hostname: controller hostnqn: nqn.2014-08.org.nvmexpress:uuid:bf3e0a14-a5f8-4123-aa26-e7cad37b879a interfaces: - lo - eth0 is_chroot: false iscsi_iqn: '' kernel: 5.14.0-645.el9.x86_64 kernel_version: '#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025' lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback loadavg: 15m: 0.3 1m: 0.55 5m: 0.68 locally_reachable_ips: ipv4: - 38.102.83.251 - 127.0.0.0/8 - 127.0.0.1 ipv6: - ::1 - fe80::f816:3eff:fe97:c9c3 lsb: {} lvm: N/A machine: x86_64 machine_id: 4d4ef2323cc3337bbfd9081b2a323b4e memfree_mb: 7161 memory_mb: nocache: free: 7372 used: 308 real: free: 7161 total: 7680 used: 519 swap: cached: 0 free: 0 total: 0 used: 0 memtotal_mb: 7680 module_setup: true mounts: - block_available: 20337100 block_size: 4096 block_total: 20954875 block_used: 617775 device: /dev/vda1 fstype: xfs inode_available: 41888405 inode_total: 41942512 inode_used: 54107 mount: / options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota size_available: 83300761600 size_total: 85831168000 uuid: fcf6b761-831a-48a7-9f5f-068b5063763f nodename: controller os_family: RedHat pkg_mgr: dnf proc_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor processor_cores: 1 processor_count: 8 processor_nproc: 8 processor_threads_per_core: 1 processor_vcpus: 8 product_name: OpenStack Nova product_serial: NA product_uuid: NA product_version: 26.3.1 python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 25 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 25 - final - 0 python_version: 3.9.25 real_group_id: 1000 real_user_id: 1000 selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted selinux_python_present: true service_mgr: systemd ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOvQreKGmvEG1vi8GvwFBqECdihQVE6tUBzDanz/Lcee9GvGa+tH+Ub+xqX7rB/yRnjc8CJIJovHO3uwatRboZQ= ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIPeGQ/QINrFqQK52g8hKIwxs8VQj2W/JGaf9zdH9cBm2 ssh_host_key_ed25519_public_keytype: ssh-ed25519 ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCrMWhS0sfa8MFM6z46N9s5KdkDDfqBTBpmkqh+k0riEuOWyruqZ/IooEFKeQXaWr/u2j++Erw7byk1nZ2/1inxp5GHPD3tjMz1FjfMlsMM17kkMF8J45E52gQj2JzJS93rFYtLMkLQt6ydCYf8csUaQJz4YGv66NoK1WXUFkxSW12stZQyIjr7FHdmQ9o1VG6PeVlvovTjZdIDOrs2uyx3QLKn/3ZvZBR0nNCGXPAtVoyf4oV/JWSKdX0XOcgkV4QyD4B3CiLstDl04Q6XY8pkzc850JzuMo4L6IQoiI//65VAvU9EWiduDcC6Bb2UqYy5iwuJFLa6Qei0hCq5tk00PSx9JjT+rVhoTJveLD0GlQk2blm+bCOKdHDM87Eh/CiVxhUJhsbkp7ASUwcd1In/Ayr37VyWSHlbW7SDd9G5aQvRd7mOx6JYU5j+j8dmvku5+mmMisaik3SYrgImXY/Agd7BOsZD1BfRvPcqACsgYymCPzDxVVOGYD3Tt5poSUs= ssh_host_key_rsa_public_keytype: ssh-rsa swapfree_mb: 0 swaptotal_mb: 0 system: Linux system_capabilities: - '' system_capabilities_enforced: 'True' system_vendor: OpenStack Foundation uptime_seconds: 192 user_dir: /home/zuul user_gecos: '' user_gid: 1000 user_id: zuul user_shell: /bin/bash user_uid: 1000 userspace_architecture: x86_64 userspace_bits: '64' virtualization_role: guest virtualization_tech_guest: - openstack virtualization_tech_host: - kvm virtualization_type: openstack zuul_change_list: - service-telemetry-operator ansible_fibre_channel_wwn: [] ansible_fips: false ansible_forks: 5 ansible_form_factor: Other ansible_fqdn: controller ansible_host: 38.102.83.251 ansible_hostname: controller ansible_hostnqn: nqn.2014-08.org.nvmexpress:uuid:bf3e0a14-a5f8-4123-aa26-e7cad37b879a ansible_interfaces: - lo - eth0 ansible_inventory_sources: - /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/inventory.yaml ansible_is_chroot: false ansible_iscsi_iqn: '' ansible_kernel: 5.14.0-645.el9.x86_64 ansible_kernel_version: '#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025' ansible_lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback ansible_loadavg: 15m: 0.3 1m: 0.55 5m: 0.68 ansible_local: {} ansible_locally_reachable_ips: ipv4: - 38.102.83.251 - 127.0.0.0/8 - 127.0.0.1 ipv6: - ::1 - fe80::f816:3eff:fe97:c9c3 ansible_lsb: {} ansible_lvm: N/A ansible_machine: x86_64 ansible_machine_id: 4d4ef2323cc3337bbfd9081b2a323b4e ansible_memfree_mb: 7161 ansible_memory_mb: nocache: free: 7372 used: 308 real: free: 7161 total: 7680 used: 519 swap: cached: 0 free: 0 total: 0 used: 0 ansible_memtotal_mb: 7680 ansible_mounts: - block_available: 20337100 block_size: 4096 block_total: 20954875 block_used: 617775 device: /dev/vda1 fstype: xfs inode_available: 41888405 inode_total: 41942512 inode_used: 54107 mount: / options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota size_available: 83300761600 size_total: 85831168000 uuid: fcf6b761-831a-48a7-9f5f-068b5063763f ansible_nodename: controller ansible_os_family: RedHat ansible_pkg_mgr: dnf ansible_playbook_python: /usr/lib/zuul/ansible/8/bin/python ansible_port: 22 ansible_proc_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ansible_processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor ansible_processor_cores: 1 ansible_processor_count: 8 ansible_processor_nproc: 8 ansible_processor_threads_per_core: 1 ansible_processor_vcpus: 8 ansible_product_name: OpenStack Nova ansible_product_serial: NA ansible_product_uuid: NA ansible_product_version: 26.3.1 ansible_python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 25 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 25 - final - 0 ansible_python_interpreter: auto ansible_python_version: 3.9.25 ansible_real_group_id: 1000 ansible_real_user_id: 1000 ansible_run_tags: - all ansible_scp_extra_args: -o PermitLocalCommand=no ansible_selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted ansible_selinux_python_present: true ansible_service_mgr: systemd ansible_sftp_extra_args: -o PermitLocalCommand=no ansible_skip_tags: [] ansible_ssh_common_args: -o PermitLocalCommand=no ansible_ssh_executable: ssh ansible_ssh_extra_args: -o PermitLocalCommand=no ansible_ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOvQreKGmvEG1vi8GvwFBqECdihQVE6tUBzDanz/Lcee9GvGa+tH+Ub+xqX7rB/yRnjc8CJIJovHO3uwatRboZQ= ansible_ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ansible_ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIPeGQ/QINrFqQK52g8hKIwxs8VQj2W/JGaf9zdH9cBm2 ansible_ssh_host_key_ed25519_public_keytype: ssh-ed25519 ansible_ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCrMWhS0sfa8MFM6z46N9s5KdkDDfqBTBpmkqh+k0riEuOWyruqZ/IooEFKeQXaWr/u2j++Erw7byk1nZ2/1inxp5GHPD3tjMz1FjfMlsMM17kkMF8J45E52gQj2JzJS93rFYtLMkLQt6ydCYf8csUaQJz4YGv66NoK1WXUFkxSW12stZQyIjr7FHdmQ9o1VG6PeVlvovTjZdIDOrs2uyx3QLKn/3ZvZBR0nNCGXPAtVoyf4oV/JWSKdX0XOcgkV4QyD4B3CiLstDl04Q6XY8pkzc850JzuMo4L6IQoiI//65VAvU9EWiduDcC6Bb2UqYy5iwuJFLa6Qei0hCq5tk00PSx9JjT+rVhoTJveLD0GlQk2blm+bCOKdHDM87Eh/CiVxhUJhsbkp7ASUwcd1In/Ayr37VyWSHlbW7SDd9G5aQvRd7mOx6JYU5j+j8dmvku5+mmMisaik3SYrgImXY/Agd7BOsZD1BfRvPcqACsgYymCPzDxVVOGYD3Tt5poSUs= ansible_ssh_host_key_rsa_public_keytype: ssh-rsa ansible_swapfree_mb: 0 ansible_swaptotal_mb: 0 ansible_system: Linux ansible_system_capabilities: - '' ansible_system_capabilities_enforced: 'True' ansible_system_vendor: OpenStack Foundation ansible_uptime_seconds: 192 ansible_user: zuul ansible_user_dir: /home/zuul ansible_user_gecos: '' ansible_user_gid: 1000 ansible_user_id: zuul ansible_user_shell: /bin/bash ansible_user_uid: 1000 ansible_userspace_architecture: x86_64 ansible_userspace_bits: '64' ansible_verbosity: 1 ansible_version: full: 2.15.12 major: 2 minor: 15 revision: 12 string: 2.15.12 ansible_virtualization_role: guest ansible_virtualization_tech_guest: - openstack ansible_virtualization_tech_host: - kvm ansible_virtualization_type: openstack cifmw_architecture_repo: /home/zuul/src/github.com/openstack-k8s-operators/architecture cifmw_architecture_repo_relative: src/github.com/openstack-k8s-operators/architecture cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_basedir: /home/zuul/ci-framework-data cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_install_yamls_defaults: ADOPTED_EXTERNAL_NETWORK: 172.21.1.0/24 ADOPTED_INTERNALAPI_NETWORK: 172.17.1.0/24 ADOPTED_STORAGEMGMT_NETWORK: 172.20.1.0/24 ADOPTED_STORAGE_NETWORK: 172.18.1.0/24 ADOPTED_TENANT_NETWORK: 172.9.1.0/24 ANSIBLEEE: config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_BRANCH: main ANSIBLEEE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest ANSIBLEEE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml ANSIBLEEE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests ANSIBLEEE_KUTTL_NAMESPACE: ansibleee-kuttl-tests ANSIBLEEE_REPO: https://github.com/openstack-k8s-operators/openstack-ansibleee-operator ANSIBLEE_COMMIT_HASH: '' BARBICAN: config/samples/barbican_v1beta1_barbican.yaml BARBICAN_BRANCH: main BARBICAN_COMMIT_HASH: '' BARBICAN_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml BARBICAN_DEPL_IMG: unused BARBICAN_IMG: quay.io/openstack-k8s-operators/barbican-operator-index:latest BARBICAN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml BARBICAN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests BARBICAN_KUTTL_NAMESPACE: barbican-kuttl-tests BARBICAN_REPO: https://github.com/openstack-k8s-operators/barbican-operator.git BARBICAN_SERVICE_ENABLED: 'true' BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY: sE**********U= BAREMETAL_BRANCH: main BAREMETAL_COMMIT_HASH: '' BAREMETAL_IMG: quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest BAREMETAL_OS_CONTAINER_IMG: '' BAREMETAL_OS_IMG: '' BAREMETAL_REPO: https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git BAREMETAL_TIMEOUT: 20m BASH_IMG: quay.io/openstack-k8s-operators/bash:latest BGP_ASN: '64999' BGP_LEAF_1: 100.65.4.1 BGP_LEAF_2: 100.64.4.1 BGP_OVN_ROUTING: 'false' BGP_PEER_ASN: '64999' BGP_SOURCE_IP: 172.30.4.2 BGP_SOURCE_IP6: f00d:f00d:f00d:f00d:f00d:f00d:f00d:42 BMAAS_BRIDGE_IPV4_PREFIX: 172.20.1.2/24 BMAAS_BRIDGE_IPV6_PREFIX: fd00:bbbb::2/64 BMAAS_INSTANCE_DISK_SIZE: '20' BMAAS_INSTANCE_MEMORY: '4096' BMAAS_INSTANCE_NAME_PREFIX: crc-bmaas BMAAS_INSTANCE_NET_MODEL: virtio BMAAS_INSTANCE_OS_VARIANT: centos-stream9 BMAAS_INSTANCE_VCPUS: '2' BMAAS_INSTANCE_VIRT_TYPE: kvm BMAAS_IPV4: 'true' BMAAS_IPV6: 'false' BMAAS_LIBVIRT_USER: sushyemu BMAAS_METALLB_ADDRESS_POOL: 172.20.1.64/26 BMAAS_METALLB_POOL_NAME: baremetal BMAAS_NETWORK_IPV4_PREFIX: 172.20.1.1/24 BMAAS_NETWORK_IPV6_PREFIX: fd00:bbbb::1/64 BMAAS_NETWORK_NAME: crc-bmaas BMAAS_NODE_COUNT: '1' BMAAS_OCP_INSTANCE_NAME: crc BMAAS_REDFISH_PASSWORD: password BMAAS_REDFISH_USERNAME: admin BMAAS_ROUTE_LIBVIRT_NETWORKS: crc-bmaas,crc,default BMAAS_SUSHY_EMULATOR_DRIVER: libvirt BMAAS_SUSHY_EMULATOR_IMAGE: quay.io/metal3-io/sushy-tools:latest BMAAS_SUSHY_EMULATOR_NAMESPACE: sushy-emulator BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE: /etc/openstack/clouds.yaml BMAAS_SUSHY_EMULATOR_OS_CLOUD: openstack BMH_NAMESPACE: openstack BMO_BRANCH: release-0.9 BMO_CLEANUP: 'true' BMO_COMMIT_HASH: '' BMO_IPA_BRANCH: stable/2024.1 BMO_IRONIC_HOST: 192.168.122.10 BMO_PROVISIONING_INTERFACE: '' BMO_REPO: https://github.com/metal3-io/baremetal-operator BMO_SETUP: '' BMO_SETUP_ROUTE_REPLACE: 'true' BM_CTLPLANE_INTERFACE: enp1s0 BM_INSTANCE_MEMORY: '8192' BM_INSTANCE_NAME_PREFIX: edpm-compute-baremetal BM_INSTANCE_NAME_SUFFIX: '0' BM_NETWORK_NAME: default BM_NODE_COUNT: '1' BM_ROOT_PASSWORD: '' BM_ROOT_PASSWORD_SECRET: '' CEILOMETER_CENTRAL_DEPL_IMG: unused CEILOMETER_NOTIFICATION_DEPL_IMG: unused CEPH_BRANCH: release-1.15 CEPH_CLIENT: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml CEPH_COMMON: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml CEPH_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml CEPH_CRDS: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml CEPH_IMG: quay.io/ceph/demo:latest-squid CEPH_OP: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml CEPH_REPO: https://github.com/rook/rook.git CERTMANAGER_TIMEOUT: 300s CHECKOUT_FROM_OPENSTACK_REF: 'true' CINDER: config/samples/cinder_v1beta1_cinder.yaml CINDERAPI_DEPL_IMG: unused CINDERBKP_DEPL_IMG: unused CINDERSCH_DEPL_IMG: unused CINDERVOL_DEPL_IMG: unused CINDER_BRANCH: main CINDER_COMMIT_HASH: '' CINDER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml CINDER_IMG: quay.io/openstack-k8s-operators/cinder-operator-index:latest CINDER_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml CINDER_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests CINDER_KUTTL_NAMESPACE: cinder-kuttl-tests CINDER_REPO: https://github.com/openstack-k8s-operators/cinder-operator.git CLEANUP_DIR_CMD: rm -Rf CRC_BGP_NIC_1_MAC: '52:54:00:11:11:11' CRC_BGP_NIC_2_MAC: '52:54:00:11:11:12' CRC_HTTPS_PROXY: '' CRC_HTTP_PROXY: '' CRC_STORAGE_NAMESPACE: crc-storage CRC_STORAGE_RETRIES: '3' CRC_URL: '''https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz''' CRC_VERSION: latest DATAPLANE_ANSIBLE_SECRET: dataplane-ansible-ssh-private-key-secret DATAPLANE_ANSIBLE_USER: '' DATAPLANE_COMPUTE_IP: 192.168.122.100 DATAPLANE_CONTAINER_PREFIX: openstack DATAPLANE_CONTAINER_TAG: current-podified DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest DATAPLANE_DEFAULT_GW: 192.168.122.1 DATAPLANE_EXTRA_NOVA_CONFIG_FILE: /dev/null DATAPLANE_GROWVOLS_ARGS: /=8GB /tmp=1GB /home=1GB /var=100% DATAPLANE_KUSTOMIZE_SCENARIO: preprovisioned DATAPLANE_NETWORKER_IP: 192.168.122.200 DATAPLANE_NETWORK_INTERFACE_NAME: eth0 DATAPLANE_NOVA_NFS_PATH: '' DATAPLANE_NTP_SERVER: pool.ntp.org DATAPLANE_PLAYBOOK: osp.edpm.download_cache DATAPLANE_REGISTRY_URL: quay.io/podified-antelope-centos9 DATAPLANE_RUNNER_IMG: '' DATAPLANE_SERVER_ROLE: compute DATAPLANE_SSHD_ALLOWED_RANGES: '[''192.168.122.0/24'']' DATAPLANE_TIMEOUT: 30m DATAPLANE_TLS_ENABLED: 'true' DATAPLANE_TOTAL_NETWORKER_NODES: '1' DATAPLANE_TOTAL_NODES: '1' DBSERVICE: galera DESIGNATE: config/samples/designate_v1beta1_designate.yaml DESIGNATE_BRANCH: main DESIGNATE_COMMIT_HASH: '' DESIGNATE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml DESIGNATE_IMG: quay.io/openstack-k8s-operators/designate-operator-index:latest DESIGNATE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml DESIGNATE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests DESIGNATE_KUTTL_NAMESPACE: designate-kuttl-tests DESIGNATE_REPO: https://github.com/openstack-k8s-operators/designate-operator.git DNSDATA: config/samples/network_v1beta1_dnsdata.yaml DNSDATA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml DNSMASQ: config/samples/network_v1beta1_dnsmasq.yaml DNSMASQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml DNS_DEPL_IMG: unused DNS_DOMAIN: localdomain DOWNLOAD_TOOLS_SELECTION: all EDPM_ATTACH_EXTNET: 'true' EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES: '''[]''' EDPM_COMPUTE_ADDITIONAL_NETWORKS: '''[]''' EDPM_COMPUTE_CELLS: '1' EDPM_COMPUTE_CEPH_ENABLED: 'true' EDPM_COMPUTE_CEPH_NOVA: 'true' EDPM_COMPUTE_DHCP_AGENT_ENABLED: 'true' EDPM_COMPUTE_SRIOV_ENABLED: 'true' EDPM_COMPUTE_SUFFIX: '0' EDPM_CONFIGURE_DEFAULT_ROUTE: 'true' EDPM_CONFIGURE_HUGEPAGES: 'false' EDPM_CONFIGURE_NETWORKING: 'true' EDPM_FIRSTBOOT_EXTRA: /tmp/edpm-firstboot-extra EDPM_NETWORKER_SUFFIX: '0' EDPM_TOTAL_NETWORKERS: '1' EDPM_TOTAL_NODES: '1' GALERA_REPLICAS: '' GENERATE_SSH_KEYS: 'true' GIT_CLONE_OPTS: '' GLANCE: config/samples/glance_v1beta1_glance.yaml GLANCEAPI_DEPL_IMG: unused GLANCE_BRANCH: main GLANCE_COMMIT_HASH: '' GLANCE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml GLANCE_IMG: quay.io/openstack-k8s-operators/glance-operator-index:latest GLANCE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml GLANCE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests GLANCE_KUTTL_NAMESPACE: glance-kuttl-tests GLANCE_REPO: https://github.com/openstack-k8s-operators/glance-operator.git HEAT: config/samples/heat_v1beta1_heat.yaml HEATAPI_DEPL_IMG: unused HEATCFNAPI_DEPL_IMG: unused HEATENGINE_DEPL_IMG: unused HEAT_AUTH_ENCRYPTION_KEY: 76**********f0 HEAT_BRANCH: main HEAT_COMMIT_HASH: '' HEAT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml HEAT_IMG: quay.io/openstack-k8s-operators/heat-operator-index:latest HEAT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml HEAT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests HEAT_KUTTL_NAMESPACE: heat-kuttl-tests HEAT_REPO: https://github.com/openstack-k8s-operators/heat-operator.git HEAT_SERVICE_ENABLED: 'true' HORIZON: config/samples/horizon_v1beta1_horizon.yaml HORIZON_BRANCH: main HORIZON_COMMIT_HASH: '' HORIZON_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml HORIZON_DEPL_IMG: unused HORIZON_IMG: quay.io/openstack-k8s-operators/horizon-operator-index:latest HORIZON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml HORIZON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests HORIZON_KUTTL_NAMESPACE: horizon-kuttl-tests HORIZON_REPO: https://github.com/openstack-k8s-operators/horizon-operator.git INFRA_BRANCH: main INFRA_COMMIT_HASH: '' INFRA_IMG: quay.io/openstack-k8s-operators/infra-operator-index:latest INFRA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml INFRA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests INFRA_KUTTL_NAMESPACE: infra-kuttl-tests INFRA_REPO: https://github.com/openstack-k8s-operators/infra-operator.git INSTALL_CERT_MANAGER: 'true' INSTALL_NMSTATE: true || false INSTALL_NNCP: true || false INTERNALAPI_HOST_ROUTES: '' IPV6_LAB_IPV4_NETWORK_IPADDRESS: 172.30.0.1/24 IPV6_LAB_IPV6_NETWORK_IPADDRESS: fd00:abcd:abcd:fc00::1/64 IPV6_LAB_LIBVIRT_STORAGE_POOL: default IPV6_LAB_MANAGE_FIREWALLD: 'true' IPV6_LAB_NAT64_HOST_IPV4: 172.30.0.2/24 IPV6_LAB_NAT64_HOST_IPV6: fd00:abcd:abcd:fc00::2/64 IPV6_LAB_NAT64_INSTANCE_NAME: nat64-router IPV6_LAB_NAT64_IPV6_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL: 192.168.255.0/24 IPV6_LAB_NAT64_TAYGA_IPV4: 192.168.255.1 IPV6_LAB_NAT64_TAYGA_IPV6: fd00:abcd:abcd:fc00::3 IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX: fd00:abcd:abcd:fcff::/96 IPV6_LAB_NAT64_UPDATE_PACKAGES: 'false' IPV6_LAB_NETWORK_NAME: nat64 IPV6_LAB_SNO_CLUSTER_NETWORK: fd00:abcd:0::/48 IPV6_LAB_SNO_HOST_IP: fd00:abcd:abcd:fc00::11 IPV6_LAB_SNO_HOST_PREFIX: '64' IPV6_LAB_SNO_INSTANCE_NAME: sno IPV6_LAB_SNO_MACHINE_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_OCP_MIRROR_URL: https://mirror.openshift.com/pub/openshift-v4/clients/ocp IPV6_LAB_SNO_OCP_VERSION: latest-4.14 IPV6_LAB_SNO_SERVICE_NETWORK: fd00:abcd:abcd:fc03::/112 IPV6_LAB_SSH_PUB_KEY: /home/zuul/.ssh/id_rsa.pub IPV6_LAB_WORK_DIR: /home/zuul/.ipv6lab IRONIC: config/samples/ironic_v1beta1_ironic.yaml IRONICAPI_DEPL_IMG: unused IRONICCON_DEPL_IMG: unused IRONICINS_DEPL_IMG: unused IRONICNAG_DEPL_IMG: unused IRONICPXE_DEPL_IMG: unused IRONIC_BRANCH: main IRONIC_COMMIT_HASH: '' IRONIC_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml IRONIC_IMAGE: quay.io/metal3-io/ironic IRONIC_IMAGE_TAG: release-24.1 IRONIC_IMG: quay.io/openstack-k8s-operators/ironic-operator-index:latest IRONIC_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml IRONIC_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests IRONIC_KUTTL_NAMESPACE: ironic-kuttl-tests IRONIC_REPO: https://github.com/openstack-k8s-operators/ironic-operator.git KEYSTONEAPI: config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_DEPL_IMG: unused KEYSTONE_BRANCH: main KEYSTONE_COMMIT_HASH: '' KEYSTONE_FEDERATION_CLIENT_SECRET: CO**********6f KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE: openstack KEYSTONE_IMG: quay.io/openstack-k8s-operators/keystone-operator-index:latest KEYSTONE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml KEYSTONE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests KEYSTONE_KUTTL_NAMESPACE: keystone-kuttl-tests KEYSTONE_REPO: https://github.com/openstack-k8s-operators/keystone-operator.git KUBEADMIN_PWD: '12345678' LIBVIRT_SECRET: libvirt-secret LOKI_DEPLOY_MODE: openshift-network LOKI_DEPLOY_NAMESPACE: netobserv LOKI_DEPLOY_SIZE: 1x.demo LOKI_NAMESPACE: openshift-operators-redhat LOKI_OPERATOR_GROUP: openshift-operators-redhat-loki LOKI_SUBSCRIPTION: loki-operator LVMS_CR: '1' MANILA: config/samples/manila_v1beta1_manila.yaml MANILAAPI_DEPL_IMG: unused MANILASCH_DEPL_IMG: unused MANILASHARE_DEPL_IMG: unused MANILA_BRANCH: main MANILA_COMMIT_HASH: '' MANILA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml MANILA_IMG: quay.io/openstack-k8s-operators/manila-operator-index:latest MANILA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml MANILA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests MANILA_KUTTL_NAMESPACE: manila-kuttl-tests MANILA_REPO: https://github.com/openstack-k8s-operators/manila-operator.git MANILA_SERVICE_ENABLED: 'true' MARIADB: config/samples/mariadb_v1beta1_galera.yaml MARIADB_BRANCH: main MARIADB_CHAINSAW_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml MARIADB_CHAINSAW_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests MARIADB_CHAINSAW_NAMESPACE: mariadb-chainsaw-tests MARIADB_COMMIT_HASH: '' MARIADB_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml MARIADB_DEPL_IMG: unused MARIADB_IMG: quay.io/openstack-k8s-operators/mariadb-operator-index:latest MARIADB_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml MARIADB_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests MARIADB_KUTTL_NAMESPACE: mariadb-kuttl-tests MARIADB_REPO: https://github.com/openstack-k8s-operators/mariadb-operator.git MEMCACHED: config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_DEPL_IMG: unused METADATA_SHARED_SECRET: '12**********42' METALLB_IPV6_POOL: fd00:aaaa::80-fd00:aaaa::90 METALLB_POOL: 192.168.122.80-192.168.122.90 MICROSHIFT: '0' NAMESPACE: openstack NETCONFIG: config/samples/network_v1beta1_netconfig.yaml NETCONFIG_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml NETCONFIG_DEPL_IMG: unused NETOBSERV_DEPLOY_NAMESPACE: netobserv NETOBSERV_NAMESPACE: openshift-netobserv-operator NETOBSERV_OPERATOR_GROUP: openshift-netobserv-operator-net NETOBSERV_SUBSCRIPTION: netobserv-operator NETWORK_BGP: 'false' NETWORK_DESIGNATE_ADDRESS_PREFIX: 172.28.0 NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX: 172.50.0 NETWORK_INTERNALAPI_ADDRESS_PREFIX: 172.17.0 NETWORK_ISOLATION: 'true' NETWORK_ISOLATION_INSTANCE_NAME: crc NETWORK_ISOLATION_IPV4: 'true' NETWORK_ISOLATION_IPV4_ADDRESS: 172.16.1.1/24 NETWORK_ISOLATION_IPV4_NAT: 'true' NETWORK_ISOLATION_IPV6: 'false' NETWORK_ISOLATION_IPV6_ADDRESS: fd00:aaaa::1/64 NETWORK_ISOLATION_IP_ADDRESS: 192.168.122.10 NETWORK_ISOLATION_MAC: '52:54:00:11:11:10' NETWORK_ISOLATION_NETWORK_NAME: net-iso NETWORK_ISOLATION_NET_NAME: default NETWORK_ISOLATION_USE_DEFAULT_NETWORK: 'true' NETWORK_MTU: '1500' NETWORK_STORAGEMGMT_ADDRESS_PREFIX: 172.20.0 NETWORK_STORAGE_ADDRESS_PREFIX: 172.18.0 NETWORK_STORAGE_MACVLAN: '' NETWORK_TENANT_ADDRESS_PREFIX: 172.19.0 NETWORK_VLAN_START: '20' NETWORK_VLAN_STEP: '1' NEUTRONAPI: config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_DEPL_IMG: unused NEUTRON_BRANCH: main NEUTRON_COMMIT_HASH: '' NEUTRON_IMG: quay.io/openstack-k8s-operators/neutron-operator-index:latest NEUTRON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml NEUTRON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests NEUTRON_KUTTL_NAMESPACE: neutron-kuttl-tests NEUTRON_REPO: https://github.com/openstack-k8s-operators/neutron-operator.git NFS_HOME: /home/nfs NMSTATE_NAMESPACE: openshift-nmstate NMSTATE_OPERATOR_GROUP: openshift-nmstate-tn6k8 NMSTATE_SUBSCRIPTION: kubernetes-nmstate-operator NNCP_ADDITIONAL_HOST_ROUTES: '' NNCP_BGP_1_INTERFACE: enp7s0 NNCP_BGP_1_IP_ADDRESS: 100.65.4.2 NNCP_BGP_2_INTERFACE: enp8s0 NNCP_BGP_2_IP_ADDRESS: 100.64.4.2 NNCP_BRIDGE: ospbr NNCP_CLEANUP_TIMEOUT: 120s NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX: 'fd00:aaaa::' NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX: '10' NNCP_CTLPLANE_IP_ADDRESS_PREFIX: 192.168.122 NNCP_CTLPLANE_IP_ADDRESS_SUFFIX: '10' NNCP_DNS_SERVER: 192.168.122.1 NNCP_DNS_SERVER_IPV6: fd00:aaaa::1 NNCP_GATEWAY: 192.168.122.1 NNCP_GATEWAY_IPV6: fd00:aaaa::1 NNCP_INTERFACE: enp6s0 NNCP_NODES: '' NNCP_TIMEOUT: 240s NOVA: config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_BRANCH: main NOVA_COMMIT_HASH: '' NOVA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_IMG: quay.io/openstack-k8s-operators/nova-operator-index:latest NOVA_REPO: https://github.com/openstack-k8s-operators/nova-operator.git NUMBER_OF_INSTANCES: '1' OCP_NETWORK_NAME: crc OCTAVIA: config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_BRANCH: main OCTAVIA_COMMIT_HASH: '' OCTAVIA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_IMG: quay.io/openstack-k8s-operators/octavia-operator-index:latest OCTAVIA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml OCTAVIA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests OCTAVIA_KUTTL_NAMESPACE: octavia-kuttl-tests OCTAVIA_REPO: https://github.com/openstack-k8s-operators/octavia-operator.git OKD: 'false' OPENSTACK_BRANCH: main OPENSTACK_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-bundle:latest OPENSTACK_COMMIT_HASH: '' OPENSTACK_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_CRDS_DIR: openstack_crds OPENSTACK_CTLPLANE: config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_IMG: quay.io/openstack-k8s-operators/openstack-operator-index:latest OPENSTACK_K8S_BRANCH: main OPENSTACK_K8S_TAG: latest OPENSTACK_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml OPENSTACK_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests OPENSTACK_KUTTL_NAMESPACE: openstack-kuttl-tests OPENSTACK_NEUTRON_CUSTOM_CONF: '' OPENSTACK_REPO: https://github.com/openstack-k8s-operators/openstack-operator.git OPENSTACK_STORAGE_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest OPERATOR_BASE_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator OPERATOR_CHANNEL: '' OPERATOR_NAMESPACE: openstack-operators OPERATOR_SOURCE: '' OPERATOR_SOURCE_NAMESPACE: '' OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm OVNCONTROLLER: config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_NMAP: 'true' OVNDBS: config/samples/ovn_v1beta1_ovndbcluster.yaml OVNDBS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml OVNNORTHD: config/samples/ovn_v1beta1_ovnnorthd.yaml OVNNORTHD_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml OVN_BRANCH: main OVN_COMMIT_HASH: '' OVN_IMG: quay.io/openstack-k8s-operators/ovn-operator-index:latest OVN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml OVN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests OVN_KUTTL_NAMESPACE: ovn-kuttl-tests OVN_REPO: https://github.com/openstack-k8s-operators/ovn-operator.git PASSWORD: '12**********78' PLACEMENTAPI: config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_DEPL_IMG: unused PLACEMENT_BRANCH: main PLACEMENT_COMMIT_HASH: '' PLACEMENT_IMG: quay.io/openstack-k8s-operators/placement-operator-index:latest PLACEMENT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml PLACEMENT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests PLACEMENT_KUTTL_NAMESPACE: placement-kuttl-tests PLACEMENT_REPO: https://github.com/openstack-k8s-operators/placement-operator.git PULL_SECRET: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt RABBITMQ: docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_BRANCH: patches RABBITMQ_COMMIT_HASH: '' RABBITMQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_DEPL_IMG: unused RABBITMQ_IMG: quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest RABBITMQ_REPO: https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git REDHAT_OPERATORS: 'false' REDIS: config/samples/redis_v1beta1_redis.yaml REDIS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml REDIS_DEPL_IMG: unused RH_REGISTRY_PWD: '' RH_REGISTRY_USER: '' SECRET: os**********et SG_CORE_DEPL_IMG: unused STANDALONE_COMPUTE_DRIVER: libvirt STANDALONE_EXTERNAL_NET_PREFFIX: 172.21.0 STANDALONE_INTERNALAPI_NET_PREFIX: 172.17.0 STANDALONE_STORAGEMGMT_NET_PREFIX: 172.20.0 STANDALONE_STORAGE_NET_PREFIX: 172.18.0 STANDALONE_TENANT_NET_PREFIX: 172.19.0 STORAGEMGMT_HOST_ROUTES: '' STORAGE_CLASS: local-storage STORAGE_HOST_ROUTES: '' SWIFT: config/samples/swift_v1beta1_swift.yaml SWIFT_BRANCH: main SWIFT_COMMIT_HASH: '' SWIFT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml SWIFT_IMG: quay.io/openstack-k8s-operators/swift-operator-index:latest SWIFT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml SWIFT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests SWIFT_KUTTL_NAMESPACE: swift-kuttl-tests SWIFT_REPO: https://github.com/openstack-k8s-operators/swift-operator.git TELEMETRY: config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_BRANCH: main TELEMETRY_COMMIT_HASH: '' TELEMETRY_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_IMG: quay.io/openstack-k8s-operators/telemetry-operator-index:latest TELEMETRY_KUTTL_BASEDIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator TELEMETRY_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml TELEMETRY_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites TELEMETRY_KUTTL_NAMESPACE: telemetry-kuttl-tests TELEMETRY_KUTTL_RELPATH: test/kuttl/suites TELEMETRY_REPO: https://github.com/openstack-k8s-operators/telemetry-operator.git TENANT_HOST_ROUTES: '' TIMEOUT: 300s TLS_ENABLED: 'false' tripleo_deploy: 'export REGISTRY_PWD:' cifmw_install_yamls_environment: CHECKOUT_FROM_OPENSTACK_REF: 'true' KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig OPENSTACK_K8S_BRANCH: main OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm cifmw_installyamls_repos: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls cifmw_installyamls_repos_relative: src/github.com/openstack-k8s-operators/install_yamls cifmw_openshift_api: https://api.crc.testing:6443 cifmw_openshift_context: default/api-crc-testing:6443/kubeadmin cifmw_openshift_kubeconfig: /home/zuul/.crc/machines/crc/kubeconfig cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_token: sha256~pOUgk-9850hxxEZK6G0UMKUA3Hu8vVpuezKn-haJb_U cifmw_openshift_user: kubeadmin cifmw_path: /home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:~/.crc/bin:~/.crc/bin/oc:~/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin cifmw_project_dir: src/github.com/openstack-k8s-operators/ci-framework cifmw_project_dir_absolute: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework cifmw_run_tests: false cifmw_status: changed: false failed: false stat: atime: 1765216439.9756842 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: binary ctime: 1765216443.7497857 dev: 64513 device_type: 0 executable: true exists: true gid: 1000 gr_name: zuul inode: 41956871 isblk: false ischr: false isdir: true isfifo: false isgid: false islnk: false isreg: false issock: false isuid: false mimetype: inode/directory mode: '0755' mtime: 1765216443.7497857 nlink: 21 path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework pw_name: zuul readable: true rgrp: true roth: true rusr: true size: 4096 uid: 1000 version: '1325166596' wgrp: false woth: false writeable: true wusr: true xgrp: true xoth: true xusr: true cifmw_success_flag: changed: false failed: false stat: exists: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: vexxhost crc_ci_bootstrap_instance_default_net_config: mtu: 1500 range: 192.168.122.0/24 crc_ci_bootstrap_instance_nm_vlan_networks: - key: internal-api value: ip: 172.17.0.5 - key: storage value: ip: 172.18.0.5 - key: tenant value: ip: 172.19.0.5 crc_ci_bootstrap_instance_parent_port_create_yaml: admin_state_up: true allowed_address_pairs: [] binding_host_id: null binding_profile: {} binding_vif_details: {} binding_vif_type: null binding_vnic_type: normal created_at: '2025-12-08T17:39:31Z' data_plane_status: null description: '' device_id: '' device_owner: '' device_profile: null dns_assignment: - fqdn: host-192-168-122-10.openstacklocal. hostname: host-192-168-122-10 ip_address: 192.168.122.10 dns_domain: '' dns_name: '' extra_dhcp_opts: [] fixed_ips: - ip_address: 192.168.122.10 subnet_id: 1ec71021-8196-48c3-b107-9041e6f5f679 hardware_offload_type: null hints: '' id: d37cddfa-716b-4541-992b-5180463c6809 ip_allocation: immediate mac_address: fa:16:3e:e6:79:2f name: crc-32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb numa_affinity_policy: null port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 propagate_uplink_status: null qos_network_policy_id: null qos_policy_id: null resource_request: null revision_number: 1 security_group_ids: [] status: DOWN tags: [] trunk_details: null trusted: null updated_at: '2025-12-08T17:39:31Z' crc_ci_bootstrap_network_name: zuul-ci-net-fdae5567 crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 crc_ci_bootstrap_networks_out: controller: default: connection: ci-private-network gw: 192.168.122.1 iface: eth1 ip: 192.168.122.11/24 mac: fa:16:3e:6a:de:3b mtu: 1500 crc: default: connection: ci-private-network gw: 192.168.122.1 iface: ens7 ip: 192.168.122.10/24 mac: fa:16:3e:e6:79:2f mtu: 1500 internal-api: connection: ci-private-network-20 iface: ens7.20 ip: 172.17.0.5/24 mac: 52:54:00:84:35:42 mtu: '1496' parent_iface: ens7 vlan: 20 storage: connection: ci-private-network-21 iface: ens7.21 ip: 172.18.0.5/24 mac: 52:54:00:9c:32:12 mtu: '1496' parent_iface: ens7 vlan: 21 tenant: connection: ci-private-network-22 iface: ens7.22 ip: 172.19.0.5/24 mac: 52:54:00:ec:da:21 mtu: '1496' parent_iface: ens7 vlan: 22 crc_ci_bootstrap_private_net_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:38:55Z' description: '' dns_domain: '' id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb ipv4_address_scope: null ipv6_address_scope: null is_default: false is_vlan_qinq: null is_vlan_transparent: false l2_adjacency: true mtu: 1500 name: zuul-ci-net-fdae5567 port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 provider:network_type: null provider:physical_network: null provider:segmentation_id: null qos_policy_id: null revision_number: 1 router:external: false segments: null shared: false status: ACTIVE subnets: [] tags: [] updated_at: '2025-12-08T17:38:55Z' crc_ci_bootstrap_private_router_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:39:01Z' description: '' enable_ndp_proxy: null external_gateway_info: enable_snat: true external_fixed_ips: - ip_address: 38.102.83.145 subnet_id: 3169b11b-94b1-4bc9-9727-4fdbbe15e56e network_id: 7abff1a9-a103-46d0-979a-1f1e599f4f41 flavor_id: null id: edc0cd0e-14dd-4588-9e19-d36444fdd18f name: zuul-ci-subnet-router-fdae5567 project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 3 routes: [] status: ACTIVE tags: [] tenant_id: 4b633c451ac74233be3721a3635275e5 updated_at: '2025-12-08T17:39:02Z' crc_ci_bootstrap_private_subnet_create_yaml: allocation_pools: - end: 192.168.122.254 start: 192.168.122.2 cidr: 192.168.122.0/24 created_at: '2025-12-08T17:38:58Z' description: '' dns_nameservers: [] dns_publish_fixed_ip: null enable_dhcp: false gateway_ip: 192.168.122.1 host_routes: [] id: 1ec71021-8196-48c3-b107-9041e6f5f679 ip_version: 4 ipv6_address_mode: null ipv6_ra_mode: null name: zuul-ci-subnet-fdae5567 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 0 segment_id: null service_types: [] subnetpool_id: null tags: [] updated_at: '2025-12-08T17:38:58Z' crc_ci_bootstrap_provider_dns: - 199.204.44.24 - 199.204.47.54 crc_ci_bootstrap_router_name: zuul-ci-subnet-router-fdae5567 crc_ci_bootstrap_subnet_name: zuul-ci-subnet-fdae5567 discovered_interpreter_python: /usr/bin/python3 enable_ramdisk: true gather_subset: - min group_names: - ungrouped groups: all: - controller - crc ungrouped: *id001 zuul_unreachable: [] inventory_dir: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1 inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/inventory.yaml inventory_hostname: controller inventory_hostname_short: controller logfiles_dest_dir: /home/zuul/ci-framework-data/logs/2025-12-08_18-02 module_setup: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 2651912f-4167-4227-a778-d37fa1159493 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.251 label: cloud-centos-9-stream-tripleo-vexxhost private_ipv4: 38.102.83.251 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.251 public_ipv6: '' region: RegionOne slot: null omit: __omit_place_holder__f3b19479a7eb45f4c51a6b72a0b1b3bc3dca197f playbook_dir: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/untrusted/project_0/github.com/openstack-k8s-operators/ci-framework/ci/playbooks podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy unsafe_vars: ansible_connection: ssh ansible_host: 38.102.83.251 ansible_port: 22 ansible_python_interpreter: auto ansible_user: zuul cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 2651912f-4167-4227-a778-d37fa1159493 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.251 label: cloud-centos-9-stream-tripleo-vexxhost private_ipv4: 38.102.83.251 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.251 public_ipv6: '' region: RegionOne slot: null podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul_log_collection: true zuul: _inheritance_path: - '' - '' - '' - '' - '' - '' - '' - '' - '' ansible_version: '8' attempts: 1 branch: master build: fdae556768574d6f9092d7162dc9ae0f build_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null buildset: c405b24f52df4ff1a39b37dcfc476a60 buildset_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 child_jobs: [] commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb event_id: 0add7250-d45c-11f0-86cc-0eee4913030a executor: hostname: ze03.softwarefactory-project.io inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/inventory.yaml log_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/logs result_data_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/results.json src_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/src work_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work items: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null job: stf-crc-ocp_420-catalog_deploy jobtags: [] max_attempts: 1 message: QWRkIE9DUCA0LjIwIGpvYnMKCkFkZCBqb2IgZGVmaW5pdGlvbnMgdXNpbmcgY3JjLWNsb3VkLW9jcC00LTIwLTEtM3hsIGFzIGJhc2UgaW1hZ2UNCg0KQ2xvc2VzOiBPU1BSSC0yMTg4MQ== patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb pipeline: github-check playbook_context: playbook_projects: trusted/project_0/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f trusted/project_1/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 trusted/project_2/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 trusted/project_3/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_0/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_1/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f untrusted/project_2/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 untrusted/project_3/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 untrusted/project_4/github.com/infrawatch/service-telemetry-operator: canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb playbooks: - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/deploy_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_0/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_0/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_0/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_0/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_0/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_0/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_0/role_4/rdo-jobs/roles - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/test_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_1/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_1/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_1/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_1/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_1/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_1/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_1/role_4/rdo-jobs/roles post_review: false project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator projects: github.com/crc-org/crc-cloud: canonical_hostname: github.com canonical_name: github.com/crc-org/crc-cloud checkout: main checkout_description: project override ref commit: 1c985328b5b8cdf9dc083e0c7b3abae12c7c8c53 name: crc-org/crc-cloud required: true short_name: crc-cloud src_dir: src/github.com/crc-org/crc-cloud github.com/infrawatch/prometheus-webhook-snmp: canonical_hostname: github.com canonical_name: github.com/infrawatch/prometheus-webhook-snmp checkout: master checkout_description: zuul branch commit: 3959c53b2613d03d066cb1b2fe5bdae8633ae895 name: infrawatch/prometheus-webhook-snmp required: true short_name: prometheus-webhook-snmp src_dir: src/github.com/infrawatch/prometheus-webhook-snmp github.com/infrawatch/service-telemetry-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master checkout_description: zuul branch commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb name: infrawatch/service-telemetry-operator required: true short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator github.com/infrawatch/sg-bridge: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-bridge checkout: master checkout_description: zuul branch commit: bab11fba86ad0c21cb35e12b56bf086a3332f1d2 name: infrawatch/sg-bridge required: true short_name: sg-bridge src_dir: src/github.com/infrawatch/sg-bridge github.com/infrawatch/sg-core: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-core checkout: master checkout_description: zuul branch commit: 5a4aece11fea9f71ce7515d11e1e7f0eae97eea6 name: infrawatch/sg-core required: true short_name: sg-core src_dir: src/github.com/infrawatch/sg-core github.com/infrawatch/smart-gateway-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/smart-gateway-operator checkout: master checkout_description: zuul branch commit: 2ff5b96b6254418d20a509188eea72ab2c77839c name: infrawatch/smart-gateway-operator required: true short_name: smart-gateway-operator src_dir: src/github.com/infrawatch/smart-gateway-operator github.com/openstack-k8s-operators/ci-framework: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main checkout_description: project override ref commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 name: openstack-k8s-operators/ci-framework required: true short_name: ci-framework src_dir: src/github.com/openstack-k8s-operators/ci-framework github.com/openstack-k8s-operators/dataplane-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/dataplane-operator checkout: main checkout_description: project override ref commit: c98b51bcd7fe14b85ed4cf3f5f76552b3455c5f2 name: openstack-k8s-operators/dataplane-operator required: true short_name: dataplane-operator src_dir: src/github.com/openstack-k8s-operators/dataplane-operator github.com/openstack-k8s-operators/edpm-ansible: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/edpm-ansible checkout: main checkout_description: project default branch commit: def07d8eb172b38b1a39695442f28465a1dfac35 name: openstack-k8s-operators/edpm-ansible required: true short_name: edpm-ansible src_dir: src/github.com/openstack-k8s-operators/edpm-ansible github.com/openstack-k8s-operators/infra-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/infra-operator checkout: main checkout_description: project override ref commit: 786269345f996bd262360738a1e3c6b09171f370 name: openstack-k8s-operators/infra-operator required: true short_name: infra-operator src_dir: src/github.com/openstack-k8s-operators/infra-operator github.com/openstack-k8s-operators/install_yamls: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/install_yamls checkout: main checkout_description: project default branch commit: 2f838b62fe50aacff3d514af4b502264e0a276a5 name: openstack-k8s-operators/install_yamls required: true short_name: install_yamls src_dir: src/github.com/openstack-k8s-operators/install_yamls github.com/openstack-k8s-operators/openstack-baremetal-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-baremetal-operator checkout: master checkout_description: zuul branch commit: a333e57066b1d48e41f93af68be81188290a96b3 name: openstack-k8s-operators/openstack-baremetal-operator required: true short_name: openstack-baremetal-operator src_dir: src/github.com/openstack-k8s-operators/openstack-baremetal-operator github.com/openstack-k8s-operators/openstack-must-gather: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-must-gather checkout: main checkout_description: project override ref commit: 2da49819dd6af6036aede5e4e9a080ff2c6457de name: openstack-k8s-operators/openstack-must-gather required: true short_name: openstack-must-gather src_dir: src/github.com/openstack-k8s-operators/openstack-must-gather github.com/openstack-k8s-operators/openstack-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-operator checkout: main checkout_description: project override ref commit: 0ad3a7b7bb522e34f164849424319945b381d95c name: openstack-k8s-operators/openstack-operator required: true short_name: openstack-operator src_dir: src/github.com/openstack-k8s-operators/openstack-operator github.com/openstack-k8s-operators/repo-setup: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/repo-setup checkout: main checkout_description: project default branch commit: 37b10946c6a10f9fa26c13305f06bfd6867e723f name: openstack-k8s-operators/repo-setup required: true short_name: repo-setup src_dir: src/github.com/openstack-k8s-operators/repo-setup opendev.org/zuul/zuul-jobs: canonical_hostname: opendev.org canonical_name: opendev.org/zuul/zuul-jobs checkout: master checkout_description: zuul branch commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 name: zuul/zuul-jobs required: true short_name: zuul-jobs src_dir: src/opendev.org/zuul/zuul-jobs review.rdoproject.org/config: canonical_hostname: review.rdoproject.org canonical_name: review.rdoproject.org/config checkout: master checkout_description: zuul branch commit: 40052f923df77143f1c9739304c4b4221346825f name: config required: true short_name: config src_dir: src/review.rdoproject.org/config ref: refs/pull/694/head resources: {} tenant: rdoproject.org timeout: 3600 topic: null voting: true zuul_change_list: - service-telemetry-operator zuul_execution_branch: main zuul_execution_canonical_name_and_path: github.com/openstack-k8s-operators/ci-framework/ci/playbooks/e2e-collect-logs.yml zuul_execution_phase: post zuul_execution_phase_index: '1' zuul_execution_trusted: 'False' zuul_log_collection: true zuul_success: 'False' zuul_will_retry: 'False' crc: ansible_all_ipv4_addresses: - 192.168.126.11 - 38.102.83.243 ansible_all_ipv6_addresses: - fe80::de92:c335:2852:62e1 ansible_apparmor: status: disabled ansible_architecture: x86_64 ansible_bios_date: 04/01/2014 ansible_bios_vendor: SeaBIOS ansible_bios_version: 1.15.0-1 ansible_board_asset_tag: NA ansible_board_name: NA ansible_board_serial: NA ansible_board_vendor: NA ansible_board_version: NA ansible_br_int: active: false device: br-int features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: b2:a9:9f:57:07:84 mtu: 1400 promisc: true timestamping: [] type: ether ansible_chassis_asset_tag: NA ansible_chassis_serial: NA ansible_chassis_vendor: QEMU ansible_chassis_version: pc-i440fx-6.2 ansible_check_mode: false ansible_cmdline: BOOT_IMAGE: (hd0,gpt3)/boot/ostree/rhcos-12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/vmlinuz-5.14.0-570.57.1.el9_6.x86_64 boot: UUID=19e76f87-96b8-4794-9744-0b33dca22d5b cgroup_no_v1: all console: ttyS0 ignition.platform.id: metal ostree: /ostree/boot.1/rhcos/12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/0 psi: '0' root: UUID=5eb7c122-420e-4494-80ec-41664070d7b6 rootflags: prjquota rw: true systemd.unified_cgroup_hierarchy: '1' ansible_config_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/ansible.cfg ansible_connection: ssh ansible_date_time: date: '2025-12-08' day: 08 epoch: '1765215484' epoch_int: '1765215484' hour: '17' iso8601: '2025-12-08T17:38:04Z' iso8601_basic: 20251208T173804268372 iso8601_basic_short: 20251208T173804 iso8601_micro: '2025-12-08T17:38:04.268372Z' minute: '38' month: '12' second: '04' time: '17:38:04' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' ansible_default_ipv4: address: 38.102.83.243 alias: ens3 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: ens3 macaddress: fa:16:3e:d6:4e:cd mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether ansible_default_ipv6: {} ansible_device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 vda2: - EFI-SYSTEM vda3: - boot vda4: - root masters: {} uuids: sr0: - 2025-12-08-17-36-19-00 vda2: - 7B77-95E7 vda3: - 19e76f87-96b8-4794-9744-0b33dca22d5b vda4: - 5eb7c122-420e-4494-80ec-41664070d7b6 ansible_devices: loop0: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '12784' sectorsize: '4096' size: 6.24 MB support_discard: '4096' vendor: null virtual: 1 sr0: holders: [] host: 'IDE interface: Intel Corporation 82371SB PIIX3 IDE [Natoma/Triton II]' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-36-19-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: 'SCSI storage controller: Red Hat, Inc. Virtio block device' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: [] sectors: '2048' sectorsize: 512 size: 1.00 MB start: '2048' uuid: null vda2: holders: [] links: ids: [] labels: - EFI-SYSTEM masters: [] uuids: - 7B77-95E7 sectors: '260096' sectorsize: 512 size: 127.00 MB start: '4096' uuid: 7B77-95E7 vda3: holders: [] links: ids: [] labels: - boot masters: [] uuids: - 19e76f87-96b8-4794-9744-0b33dca22d5b sectors: '786432' sectorsize: 512 size: 384.00 MB start: '264192' uuid: 19e76f87-96b8-4794-9744-0b33dca22d5b vda4: holders: [] links: ids: [] labels: - root masters: [] uuids: - 5eb7c122-420e-4494-80ec-41664070d7b6 sectors: '418379743' sectorsize: 512 size: 199.50 GB start: '1050624' uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '419430400' sectorsize: '512' size: 200.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 ansible_diff_mode: false ansible_distribution: RedHat ansible_distribution_file_parsed: true ansible_distribution_file_path: /etc/redhat-release ansible_distribution_file_search_string: Red Hat ansible_distribution_file_variety: RedHat ansible_distribution_major_version: '9' ansible_distribution_release: Plow ansible_distribution_version: '9.6' ansible_dns: nameservers: - 199.204.44.24 - 199.204.47.54 ansible_domain: '' ansible_effective_group_id: 1000 ansible_effective_user_id: 1000 ansible_ens3: active: true device: ens3 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.243 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::de92:c335:2852:62e1 prefix: '64' scope: link macaddress: fa:16:3e:d6:4e:cd module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether ansible_env: BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' HOME: /var/home/core LANG: C.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: core MOTD_SHOWN: pam PATH: /var/home/core/.local/bin:/var/home/core/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /var/home/core SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38870 22 SSH_CONNECTION: 38.102.83.114 38870 38.102.83.243 22 USER: core XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '2' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f ansible_eth10: active: true device: eth10 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 192.168.126.11 broadcast: 192.168.126.255 netmask: 255.255.255.0 network: 192.168.126.0 prefix: '24' macaddress: 52:0b:68:d5:3d:d0 mtu: 1500 promisc: false timestamping: [] type: ether ansible_facts: _ansible_facts_gathered: true all_ipv4_addresses: - 192.168.126.11 - 38.102.83.243 all_ipv6_addresses: - fe80::de92:c335:2852:62e1 ansible_local: {} apparmor: status: disabled architecture: x86_64 bios_date: 04/01/2014 bios_vendor: SeaBIOS bios_version: 1.15.0-1 board_asset_tag: NA board_name: NA board_serial: NA board_vendor: NA board_version: NA br_int: active: false device: br-int features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: b2:a9:9f:57:07:84 mtu: 1400 promisc: true timestamping: [] type: ether chassis_asset_tag: NA chassis_serial: NA chassis_vendor: QEMU chassis_version: pc-i440fx-6.2 cmdline: BOOT_IMAGE: (hd0,gpt3)/boot/ostree/rhcos-12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/vmlinuz-5.14.0-570.57.1.el9_6.x86_64 boot: UUID=19e76f87-96b8-4794-9744-0b33dca22d5b cgroup_no_v1: all console: ttyS0 ignition.platform.id: metal ostree: /ostree/boot.1/rhcos/12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/0 psi: '0' root: UUID=5eb7c122-420e-4494-80ec-41664070d7b6 rootflags: prjquota rw: true systemd.unified_cgroup_hierarchy: '1' date_time: date: '2025-12-08' day: 08 epoch: '1765215484' epoch_int: '1765215484' hour: '17' iso8601: '2025-12-08T17:38:04Z' iso8601_basic: 20251208T173804268372 iso8601_basic_short: 20251208T173804 iso8601_micro: '2025-12-08T17:38:04.268372Z' minute: '38' month: '12' second: '04' time: '17:38:04' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' default_ipv4: address: 38.102.83.243 alias: ens3 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: ens3 macaddress: fa:16:3e:d6:4e:cd mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether default_ipv6: {} device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 vda2: - EFI-SYSTEM vda3: - boot vda4: - root masters: {} uuids: sr0: - 2025-12-08-17-36-19-00 vda2: - 7B77-95E7 vda3: - 19e76f87-96b8-4794-9744-0b33dca22d5b vda4: - 5eb7c122-420e-4494-80ec-41664070d7b6 devices: loop0: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '12784' sectorsize: '4096' size: 6.24 MB support_discard: '4096' vendor: null virtual: 1 sr0: holders: [] host: 'IDE interface: Intel Corporation 82371SB PIIX3 IDE [Natoma/Triton II]' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-36-19-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: 'SCSI storage controller: Red Hat, Inc. Virtio block device' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: [] sectors: '2048' sectorsize: 512 size: 1.00 MB start: '2048' uuid: null vda2: holders: [] links: ids: [] labels: - EFI-SYSTEM masters: [] uuids: - 7B77-95E7 sectors: '260096' sectorsize: 512 size: 127.00 MB start: '4096' uuid: 7B77-95E7 vda3: holders: [] links: ids: [] labels: - boot masters: [] uuids: - 19e76f87-96b8-4794-9744-0b33dca22d5b sectors: '786432' sectorsize: 512 size: 384.00 MB start: '264192' uuid: 19e76f87-96b8-4794-9744-0b33dca22d5b vda4: holders: [] links: ids: [] labels: - root masters: [] uuids: - 5eb7c122-420e-4494-80ec-41664070d7b6 sectors: '418379743' sectorsize: 512 size: 199.50 GB start: '1050624' uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '419430400' sectorsize: '512' size: 200.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 discovered_interpreter_python: /usr/bin/python3 distribution: RedHat distribution_file_parsed: true distribution_file_path: /etc/redhat-release distribution_file_search_string: Red Hat distribution_file_variety: RedHat distribution_major_version: '9' distribution_release: Plow distribution_version: '9.6' dns: nameservers: - 199.204.44.24 - 199.204.47.54 domain: '' effective_group_id: 1000 effective_user_id: 1000 ens3: active: true device: ens3 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.243 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::de92:c335:2852:62e1 prefix: '64' scope: link macaddress: fa:16:3e:d6:4e:cd module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether env: BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' HOME: /var/home/core LANG: C.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: core MOTD_SHOWN: pam PATH: /var/home/core/.local/bin:/var/home/core/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /var/home/core SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38870 22 SSH_CONNECTION: 38.102.83.114 38870 38.102.83.243 22 USER: core XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '2' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f eth10: active: true device: eth10 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 192.168.126.11 broadcast: 192.168.126.255 netmask: 255.255.255.0 network: 192.168.126.0 prefix: '24' macaddress: 52:0b:68:d5:3d:d0 mtu: 1500 promisc: false timestamping: [] type: ether fibre_channel_wwn: [] fips: false form_factor: Other fqdn: crc gather_subset: - all hostname: crc hostnqn: nqn.2014-08.org.nvmexpress:uuid:61c3a10d-83f9-474d-8347-456fea156b65 interfaces: - ovn-k8s-mp0 - br-int - tap0 - ovs-system - eth10 - ens3 - lo is_chroot: true iscsi_iqn: '' kernel: 5.14.0-570.57.1.el9_6.x86_64 kernel_version: '#1 SMP PREEMPT_DYNAMIC Sun Oct 19 22:05:48 EDT 2025' lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback loadavg: 15m: 0.04 1m: 0.34 5m: 0.11 locally_reachable_ips: ipv4: - 38.102.83.243 - 127.0.0.0/8 - 127.0.0.1 - 192.168.126.11 ipv6: - ::1 - fe80::de92:c335:2852:62e1 lsb: {} lvm: N/A machine: x86_64 machine_id: 80bc4fba336e4ca1bc9d28a8be52a356 memfree_mb: 31405 memory_mb: nocache: free: 31585 used: 506 real: free: 31405 total: 32091 used: 686 swap: cached: 0 free: 0 total: 0 used: 0 memtotal_mb: 32091 module_setup: true mounts: - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /sysroot options: ro,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /etc options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota,bind size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /sysroot/ostree/deploy/rhcos/var options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota,bind size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /var options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota,bind size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 205820 block_size: 1024 block_total: 358271 block_used: 152451 device: /dev/vda3 fstype: ext4 inode_available: 97936 inode_total: 98304 inode_used: 368 mount: /boot options: ro,seclabel,nosuid,nodev,relatime size_available: 210759680 size_total: 366869504 uuid: 19e76f87-96b8-4794-9744-0b33dca22d5b - block_available: 0 block_size: 2048 block_total: 241 block_used: 241 device: /dev/sr0 fstype: iso9660 inode_available: 0 inode_total: 0 inode_used: 0 mount: /tmp/openstack-config-drive options: ro,relatime,nojoliet,check=s,map=n,blocksize=2048 size_available: 0 size_total: 493568 uuid: 2025-12-08-17-36-19-00 nodename: crc os_family: RedHat ovn_k8s_mp0: active: false device: ovn-k8s-mp0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: 0a:58:0a:d9:00:02 mtu: 1400 promisc: true timestamping: [] type: ether ovs_system: active: false device: ovs-system features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: fe:76:31:e6:9b:16 mtu: 1500 promisc: true timestamping: [] type: ether pkg_mgr: atomic_container proc_cmdline: BOOT_IMAGE: (hd0,gpt3)/boot/ostree/rhcos-12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/vmlinuz-5.14.0-570.57.1.el9_6.x86_64 boot: UUID=19e76f87-96b8-4794-9744-0b33dca22d5b cgroup_no_v1: all console: - hvc0 - ttyS0 ignition.platform.id: metal ostree: /ostree/boot.1/rhcos/12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/0 psi: '0' root: UUID=5eb7c122-420e-4494-80ec-41664070d7b6 rootflags: prjquota rw: true systemd.unified_cgroup_hierarchy: '1' processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor - '8' - AuthenticAMD - AMD EPYC-Rome Processor - '9' - AuthenticAMD - AMD EPYC-Rome Processor - '10' - AuthenticAMD - AMD EPYC-Rome Processor - '11' - AuthenticAMD - AMD EPYC-Rome Processor processor_cores: 1 processor_count: 12 processor_nproc: 12 processor_threads_per_core: 1 processor_vcpus: 12 product_name: OpenStack Nova product_serial: NA product_uuid: NA product_version: 26.3.1 python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 21 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 21 - final - 0 python_version: 3.9.21 real_group_id: 1000 real_user_id: 1000 selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted selinux_python_present: true service_mgr: systemd ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJ/czvKLYyI55EO2PydZ/7ZWo7I2dcnH2DDs36IotOlFLrvEvVx89ywwVup0/qZDeps5i8mLke2kUtcBGpkw3rU= ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIBvVXew9HFq22cMP6dmKJtvR++xU+OPGvlqLy/djwoEa ssh_host_key_ed25519_public_keytype: ssh-ed25519 ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCpE2RWa982geQpBCa/5UEGAeASvigjPzX7XVQdvnwMtclCmUXpkxrUWvoW+s8ViXHC6lEMbgaJtnQMTWLzRaiarBQeiaEsXpbAynDHA2YExHltVnNwN3VMW3zdi5d3OPu8eK51LyO+AafHgQUOENBrbH7Dur4HFx9/fYG1NKHH3kV2JzTygg10uQGIQnODw3ITERtlVrpUMd+gruS7U3EKmXybnDoDm/7S+GQVCvWqGKy26wEfw/XfO24D/eug4OP4PJx3L6UtliKgtpgW3wysPb7h1QGGe0fnn2GggTUobW02OlOt/WN+sOZTz2+Xw/J2iO1sOA53nf4tD5lKBjxMaEL5a84sQzbekg6cbzc944eCyVi0foJB4E3t6r0EXvEoMYVR2s2gfN6Cjxs/iDYx/bWwjVt2RUqa4ImV0A+//89m24vFos5K0Dpd3bVJANujvP6pUuJaTR4fSRCiyHx661dJIVacOiXr7JxZICroTPZ0xsj+WKFY8Lield2noIk= ssh_host_key_rsa_public_keytype: ssh-rsa swapfree_mb: 0 swaptotal_mb: 0 system: Linux system_capabilities: - '' system_capabilities_enforced: 'True' system_vendor: OpenStack Foundation tap0: active: false device: tap0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: off [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'off' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: off [requested on] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'off' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: off [requested on] tx_tcp_ecn_segmentation: off [requested on] tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: off [requested on] tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: 5a:94:ef:e4:0c:ee mtu: 1500 promisc: false speed: 10 timestamping: [] type: ether uptime_seconds: 92 user_dir: /var/home/core user_gecos: CoreOS Admin user_gid: 1000 user_id: core user_shell: /bin/bash user_uid: 1000 userspace_architecture: x86_64 userspace_bits: '64' virtualization_role: guest virtualization_tech_guest: - openstack virtualization_tech_host: - kvm virtualization_type: openstack ansible_fibre_channel_wwn: [] ansible_fips: false ansible_forks: 5 ansible_form_factor: Other ansible_fqdn: crc ansible_host: 38.102.83.243 ansible_hostname: crc ansible_hostnqn: nqn.2014-08.org.nvmexpress:uuid:61c3a10d-83f9-474d-8347-456fea156b65 ansible_interfaces: - ovn-k8s-mp0 - br-int - tap0 - ovs-system - eth10 - ens3 - lo ansible_inventory_sources: - /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/inventory.yaml ansible_is_chroot: true ansible_iscsi_iqn: '' ansible_kernel: 5.14.0-570.57.1.el9_6.x86_64 ansible_kernel_version: '#1 SMP PREEMPT_DYNAMIC Sun Oct 19 22:05:48 EDT 2025' ansible_lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback ansible_loadavg: 15m: 0.04 1m: 0.34 5m: 0.11 ansible_local: {} ansible_locally_reachable_ips: ipv4: - 38.102.83.243 - 127.0.0.0/8 - 127.0.0.1 - 192.168.126.11 ipv6: - ::1 - fe80::de92:c335:2852:62e1 ansible_lsb: {} ansible_lvm: N/A ansible_machine: x86_64 ansible_machine_id: 80bc4fba336e4ca1bc9d28a8be52a356 ansible_memfree_mb: 31405 ansible_memory_mb: nocache: free: 31585 used: 506 real: free: 31405 total: 32091 used: 686 swap: cached: 0 free: 0 total: 0 used: 0 ansible_memtotal_mb: 32091 ansible_mounts: - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /sysroot options: ro,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /etc options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota,bind size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /sysroot/ostree/deploy/rhcos/var options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota,bind size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 14988988 block_size: 4096 block_total: 20823472 block_used: 5834484 device: /dev/vda4 fstype: xfs inode_available: 41549689 inode_total: 41679680 inode_used: 129991 mount: /var options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,prjquota,bind size_available: 61394894848 size_total: 85292941312 uuid: 5eb7c122-420e-4494-80ec-41664070d7b6 - block_available: 205820 block_size: 1024 block_total: 358271 block_used: 152451 device: /dev/vda3 fstype: ext4 inode_available: 97936 inode_total: 98304 inode_used: 368 mount: /boot options: ro,seclabel,nosuid,nodev,relatime size_available: 210759680 size_total: 366869504 uuid: 19e76f87-96b8-4794-9744-0b33dca22d5b - block_available: 0 block_size: 2048 block_total: 241 block_used: 241 device: /dev/sr0 fstype: iso9660 inode_available: 0 inode_total: 0 inode_used: 0 mount: /tmp/openstack-config-drive options: ro,relatime,nojoliet,check=s,map=n,blocksize=2048 size_available: 0 size_total: 493568 uuid: 2025-12-08-17-36-19-00 ansible_nodename: crc ansible_os_family: RedHat ansible_ovn_k8s_mp0: active: false device: ovn-k8s-mp0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: 0a:58:0a:d9:00:02 mtu: 1400 promisc: true timestamping: [] type: ether ansible_ovs_system: active: false device: ovs-system features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: 'on' hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: 'on' tx_gre_segmentation: 'on' tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: 'on' tx_ipxip6_segmentation: 'on' tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: 'on' tx_udp_tnl_segmentation: 'on' tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: fe:76:31:e6:9b:16 mtu: 1500 promisc: true timestamping: [] type: ether ansible_pkg_mgr: atomic_container ansible_playbook_python: /usr/lib/zuul/ansible/8/bin/python ansible_port: 22 ansible_proc_cmdline: BOOT_IMAGE: (hd0,gpt3)/boot/ostree/rhcos-12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/vmlinuz-5.14.0-570.57.1.el9_6.x86_64 boot: UUID=19e76f87-96b8-4794-9744-0b33dca22d5b cgroup_no_v1: all console: - hvc0 - ttyS0 ignition.platform.id: metal ostree: /ostree/boot.1/rhcos/12a61ee52bd2826a8183af75be3fde40ba3ac3c6861f00f5f1ec8b26ded7ec8a/0 psi: '0' root: UUID=5eb7c122-420e-4494-80ec-41664070d7b6 rootflags: prjquota rw: true systemd.unified_cgroup_hierarchy: '1' ansible_processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor - '8' - AuthenticAMD - AMD EPYC-Rome Processor - '9' - AuthenticAMD - AMD EPYC-Rome Processor - '10' - AuthenticAMD - AMD EPYC-Rome Processor - '11' - AuthenticAMD - AMD EPYC-Rome Processor ansible_processor_cores: 1 ansible_processor_count: 12 ansible_processor_nproc: 12 ansible_processor_threads_per_core: 1 ansible_processor_vcpus: 12 ansible_product_name: OpenStack Nova ansible_product_serial: NA ansible_product_uuid: NA ansible_product_version: 26.3.1 ansible_python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 21 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 21 - final - 0 ansible_python_interpreter: auto ansible_python_version: 3.9.21 ansible_real_group_id: 1000 ansible_real_user_id: 1000 ansible_run_tags: - all ansible_scp_extra_args: -o PermitLocalCommand=no ansible_selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted ansible_selinux_python_present: true ansible_service_mgr: systemd ansible_sftp_extra_args: -o PermitLocalCommand=no ansible_skip_tags: [] ansible_ssh_common_args: -o PermitLocalCommand=no ansible_ssh_executable: ssh ansible_ssh_extra_args: -o PermitLocalCommand=no ansible_ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJ/czvKLYyI55EO2PydZ/7ZWo7I2dcnH2DDs36IotOlFLrvEvVx89ywwVup0/qZDeps5i8mLke2kUtcBGpkw3rU= ansible_ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ansible_ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIBvVXew9HFq22cMP6dmKJtvR++xU+OPGvlqLy/djwoEa ansible_ssh_host_key_ed25519_public_keytype: ssh-ed25519 ansible_ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCpE2RWa982geQpBCa/5UEGAeASvigjPzX7XVQdvnwMtclCmUXpkxrUWvoW+s8ViXHC6lEMbgaJtnQMTWLzRaiarBQeiaEsXpbAynDHA2YExHltVnNwN3VMW3zdi5d3OPu8eK51LyO+AafHgQUOENBrbH7Dur4HFx9/fYG1NKHH3kV2JzTygg10uQGIQnODw3ITERtlVrpUMd+gruS7U3EKmXybnDoDm/7S+GQVCvWqGKy26wEfw/XfO24D/eug4OP4PJx3L6UtliKgtpgW3wysPb7h1QGGe0fnn2GggTUobW02OlOt/WN+sOZTz2+Xw/J2iO1sOA53nf4tD5lKBjxMaEL5a84sQzbekg6cbzc944eCyVi0foJB4E3t6r0EXvEoMYVR2s2gfN6Cjxs/iDYx/bWwjVt2RUqa4ImV0A+//89m24vFos5K0Dpd3bVJANujvP6pUuJaTR4fSRCiyHx661dJIVacOiXr7JxZICroTPZ0xsj+WKFY8Lield2noIk= ansible_ssh_host_key_rsa_public_keytype: ssh-rsa ansible_swapfree_mb: 0 ansible_swaptotal_mb: 0 ansible_system: Linux ansible_system_capabilities: - '' ansible_system_capabilities_enforced: 'True' ansible_system_vendor: OpenStack Foundation ansible_tap0: active: false device: tap0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: off [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: off [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'off' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: off [requested on] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'off' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: 'on' tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: off [requested on] tx_tcp_ecn_segmentation: off [requested on] tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: off [requested on] tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: 'on' tx_vlan_stag_hw_insert: 'on' vlan_challenged: off [fixed] hw_timestamp_filters: [] macaddress: 5a:94:ef:e4:0c:ee mtu: 1500 promisc: false speed: 10 timestamping: [] type: ether ansible_uptime_seconds: 92 ansible_user: core ansible_user_dir: /var/home/core ansible_user_gecos: CoreOS Admin ansible_user_gid: 1000 ansible_user_id: core ansible_user_shell: /bin/bash ansible_user_uid: 1000 ansible_userspace_architecture: x86_64 ansible_userspace_bits: '64' ansible_verbosity: 1 ansible_version: full: 2.15.12 major: 2 minor: 15 revision: 12 string: 2.15.12 ansible_virtualization_role: guest ansible_virtualization_tech_guest: - openstack ansible_virtualization_tech_host: - kvm ansible_virtualization_type: openstack cifmw_architecture_repo: /var/home/core/src/github.com/openstack-k8s-operators/architecture cifmw_architecture_repo_relative: src/github.com/openstack-k8s-operators/architecture cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_installyamls_repos: /var/home/core/src/github.com/openstack-k8s-operators/install_yamls cifmw_installyamls_repos_relative: src/github.com/openstack-k8s-operators/install_yamls cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: /var/home/core/.crc/machines/crc/kubeconfig cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_project_dir: src/github.com/openstack-k8s-operators/ci-framework cifmw_project_dir_absolute: /var/home/core/src/github.com/openstack-k8s-operators/ci-framework cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: vexxhost crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 discovered_interpreter_python: /usr/bin/python3 enable_ramdisk: true gather_subset: - all group_names: - ungrouped groups: all: - controller - crc ungrouped: *id001 zuul_unreachable: [] inventory_dir: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1 inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/inventory.yaml inventory_hostname: crc inventory_hostname_short: crc module_setup: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.243 label: crc-cloud-ocp-4-20-1-3xl private_ipv4: 38.102.83.243 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.243 public_ipv6: '' region: RegionOne slot: null omit: __omit_place_holder__f3b19479a7eb45f4c51a6b72a0b1b3bc3dca197f playbook_dir: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/untrusted/project_0/github.com/openstack-k8s-operators/ci-framework/ci/playbooks podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy unsafe_vars: ansible_connection: ssh ansible_host: 38.102.83.243 ansible_port: 22 ansible_python_interpreter: auto ansible_user: core cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.243 label: crc-cloud-ocp-4-20-1-3xl private_ipv4: 38.102.83.243 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.243 public_ipv6: '' region: RegionOne slot: null podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul_log_collection: true zuul: _inheritance_path: - '' - '' - '' - '' - '' - '' - '' - '' - '' ansible_version: '8' attempts: 1 branch: master build: fdae556768574d6f9092d7162dc9ae0f build_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null buildset: c405b24f52df4ff1a39b37dcfc476a60 buildset_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 child_jobs: [] commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb event_id: 0add7250-d45c-11f0-86cc-0eee4913030a executor: hostname: ze03.softwarefactory-project.io inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/inventory.yaml log_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/logs result_data_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/results.json src_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/src work_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work items: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null job: stf-crc-ocp_420-catalog_deploy jobtags: [] max_attempts: 1 message: QWRkIE9DUCA0LjIwIGpvYnMKCkFkZCBqb2IgZGVmaW5pdGlvbnMgdXNpbmcgY3JjLWNsb3VkLW9jcC00LTIwLTEtM3hsIGFzIGJhc2UgaW1hZ2UNCg0KQ2xvc2VzOiBPU1BSSC0yMTg4MQ== patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb pipeline: github-check playbook_context: playbook_projects: trusted/project_0/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f trusted/project_1/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 trusted/project_2/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 trusted/project_3/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_0/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_1/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f untrusted/project_2/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 untrusted/project_3/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 untrusted/project_4/github.com/infrawatch/service-telemetry-operator: canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb playbooks: - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/deploy_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_0/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_0/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_0/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_0/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_0/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_0/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_0/role_4/rdo-jobs/roles - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/test_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_1/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_1/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_1/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_1/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_1/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_1/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_1/role_4/rdo-jobs/roles post_review: false project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator projects: github.com/crc-org/crc-cloud: canonical_hostname: github.com canonical_name: github.com/crc-org/crc-cloud checkout: main checkout_description: project override ref commit: 1c985328b5b8cdf9dc083e0c7b3abae12c7c8c53 name: crc-org/crc-cloud required: true short_name: crc-cloud src_dir: src/github.com/crc-org/crc-cloud github.com/infrawatch/prometheus-webhook-snmp: canonical_hostname: github.com canonical_name: github.com/infrawatch/prometheus-webhook-snmp checkout: master checkout_description: zuul branch commit: 3959c53b2613d03d066cb1b2fe5bdae8633ae895 name: infrawatch/prometheus-webhook-snmp required: true short_name: prometheus-webhook-snmp src_dir: src/github.com/infrawatch/prometheus-webhook-snmp github.com/infrawatch/service-telemetry-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master checkout_description: zuul branch commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb name: infrawatch/service-telemetry-operator required: true short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator github.com/infrawatch/sg-bridge: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-bridge checkout: master checkout_description: zuul branch commit: bab11fba86ad0c21cb35e12b56bf086a3332f1d2 name: infrawatch/sg-bridge required: true short_name: sg-bridge src_dir: src/github.com/infrawatch/sg-bridge github.com/infrawatch/sg-core: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-core checkout: master checkout_description: zuul branch commit: 5a4aece11fea9f71ce7515d11e1e7f0eae97eea6 name: infrawatch/sg-core required: true short_name: sg-core src_dir: src/github.com/infrawatch/sg-core github.com/infrawatch/smart-gateway-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/smart-gateway-operator checkout: master checkout_description: zuul branch commit: 2ff5b96b6254418d20a509188eea72ab2c77839c name: infrawatch/smart-gateway-operator required: true short_name: smart-gateway-operator src_dir: src/github.com/infrawatch/smart-gateway-operator github.com/openstack-k8s-operators/ci-framework: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main checkout_description: project override ref commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 name: openstack-k8s-operators/ci-framework required: true short_name: ci-framework src_dir: src/github.com/openstack-k8s-operators/ci-framework github.com/openstack-k8s-operators/dataplane-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/dataplane-operator checkout: main checkout_description: project override ref commit: c98b51bcd7fe14b85ed4cf3f5f76552b3455c5f2 name: openstack-k8s-operators/dataplane-operator required: true short_name: dataplane-operator src_dir: src/github.com/openstack-k8s-operators/dataplane-operator github.com/openstack-k8s-operators/edpm-ansible: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/edpm-ansible checkout: main checkout_description: project default branch commit: def07d8eb172b38b1a39695442f28465a1dfac35 name: openstack-k8s-operators/edpm-ansible required: true short_name: edpm-ansible src_dir: src/github.com/openstack-k8s-operators/edpm-ansible github.com/openstack-k8s-operators/infra-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/infra-operator checkout: main checkout_description: project override ref commit: 786269345f996bd262360738a1e3c6b09171f370 name: openstack-k8s-operators/infra-operator required: true short_name: infra-operator src_dir: src/github.com/openstack-k8s-operators/infra-operator github.com/openstack-k8s-operators/install_yamls: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/install_yamls checkout: main checkout_description: project default branch commit: 2f838b62fe50aacff3d514af4b502264e0a276a5 name: openstack-k8s-operators/install_yamls required: true short_name: install_yamls src_dir: src/github.com/openstack-k8s-operators/install_yamls github.com/openstack-k8s-operators/openstack-baremetal-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-baremetal-operator checkout: master checkout_description: zuul branch commit: a333e57066b1d48e41f93af68be81188290a96b3 name: openstack-k8s-operators/openstack-baremetal-operator required: true short_name: openstack-baremetal-operator src_dir: src/github.com/openstack-k8s-operators/openstack-baremetal-operator github.com/openstack-k8s-operators/openstack-must-gather: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-must-gather checkout: main checkout_description: project override ref commit: 2da49819dd6af6036aede5e4e9a080ff2c6457de name: openstack-k8s-operators/openstack-must-gather required: true short_name: openstack-must-gather src_dir: src/github.com/openstack-k8s-operators/openstack-must-gather github.com/openstack-k8s-operators/openstack-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-operator checkout: main checkout_description: project override ref commit: 0ad3a7b7bb522e34f164849424319945b381d95c name: openstack-k8s-operators/openstack-operator required: true short_name: openstack-operator src_dir: src/github.com/openstack-k8s-operators/openstack-operator github.com/openstack-k8s-operators/repo-setup: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/repo-setup checkout: main checkout_description: project default branch commit: 37b10946c6a10f9fa26c13305f06bfd6867e723f name: openstack-k8s-operators/repo-setup required: true short_name: repo-setup src_dir: src/github.com/openstack-k8s-operators/repo-setup opendev.org/zuul/zuul-jobs: canonical_hostname: opendev.org canonical_name: opendev.org/zuul/zuul-jobs checkout: master checkout_description: zuul branch commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 name: zuul/zuul-jobs required: true short_name: zuul-jobs src_dir: src/opendev.org/zuul/zuul-jobs review.rdoproject.org/config: canonical_hostname: review.rdoproject.org canonical_name: review.rdoproject.org/config checkout: master checkout_description: zuul branch commit: 40052f923df77143f1c9739304c4b4221346825f name: config required: true short_name: config src_dir: src/review.rdoproject.org/config ref: refs/pull/694/head resources: {} tenant: rdoproject.org timeout: 3600 topic: null voting: true zuul_execution_branch: main zuul_execution_canonical_name_and_path: github.com/openstack-k8s-operators/ci-framework/ci/playbooks/e2e-collect-logs.yml zuul_execution_phase: post zuul_execution_phase_index: '1' zuul_execution_trusted: 'False' zuul_log_collection: true zuul_success: 'False' zuul_will_retry: 'False' inventory_dir: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1 inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/post_playbook_1/inventory.yaml inventory_hostname: controller inventory_hostname_short: controller logfiles_dest_dir: /home/zuul/ci-framework-data/logs/2025-12-08_18-02 module_setup: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 2651912f-4167-4227-a778-d37fa1159493 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.251 label: cloud-centos-9-stream-tripleo-vexxhost private_ipv4: 38.102.83.251 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.251 public_ipv6: '' region: RegionOne slot: null omit: __omit_place_holder__f3b19479a7eb45f4c51a6b72a0b1b3bc3dca197f openstack_namespace: openstack play_hosts: *id002 playbook_dir: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/untrusted/project_0/github.com/openstack-k8s-operators/ci-framework/ci/playbooks podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true role_name: artifacts role_names: *id003 role_path: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/untrusted/project_0/github.com/openstack-k8s-operators/ci-framework/roles/artifacts role_uuid: fa163ef9-e89a-f6c1-53fd-00000000002e scenario: catalog_deploy unsafe_vars: ansible_connection: ssh ansible_host: 38.102.83.251 ansible_port: 22 ansible_python_interpreter: auto ansible_user: zuul cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true nodepool: az: nova cloud: vexxhost-nodepool-tripleo external_id: 2651912f-4167-4227-a778-d37fa1159493 host_id: b012578aee5370fae73eb6c92c4679617335173cccca05390470f411 interface_ip: 38.102.83.251 label: cloud-centos-9-stream-tripleo-vexxhost private_ipv4: 38.102.83.251 private_ipv6: null provider: vexxhost-nodepool-tripleo public_ipv4: 38.102.83.251 public_ipv6: '' region: RegionOne slot: null podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul_log_collection: true zuul: _inheritance_path: - '' - '' - '' - '' - '' - '' - '' - '' - '' ansible_version: '8' attempts: 1 branch: master build: fdae556768574d6f9092d7162dc9ae0f build_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null buildset: c405b24f52df4ff1a39b37dcfc476a60 buildset_refs: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 child_jobs: [] commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb event_id: 0add7250-d45c-11f0-86cc-0eee4913030a executor: hostname: ze03.softwarefactory-project.io inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/inventory.yaml log_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/logs result_data_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/results.json src_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/src work_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work items: - branch: master change: '694' change_message: "Add OCP 4.20 jobs\n\nAdd job definitions using crc-cloud-ocp-4-20-1-3xl as base image\r\n\r\nCloses: OSPRH-21881" change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null job: stf-crc-ocp_420-catalog_deploy jobtags: [] max_attempts: 1 message: QWRkIE9DUCA0LjIwIGpvYnMKCkFkZCBqb2IgZGVmaW5pdGlvbnMgdXNpbmcgY3JjLWNsb3VkLW9jcC00LTIwLTEtM3hsIGFzIGJhc2UgaW1hZ2UNCg0KQ2xvc2VzOiBPU1BSSC0yMTg4MQ== patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb pipeline: github-check playbook_context: playbook_projects: trusted/project_0/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f trusted/project_1/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 trusted/project_2/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 trusted/project_3/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_0/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_1/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f untrusted/project_2/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 untrusted/project_3/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 untrusted/project_4/github.com/infrawatch/service-telemetry-operator: canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb playbooks: - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/deploy_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_0/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_0/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_0/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_0/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_0/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_0/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_0/role_4/rdo-jobs/roles - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/test_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_1/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_1/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_1/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_1/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_1/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_1/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_1/role_4/rdo-jobs/roles post_review: false project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator projects: github.com/crc-org/crc-cloud: canonical_hostname: github.com canonical_name: github.com/crc-org/crc-cloud checkout: main checkout_description: project override ref commit: 1c985328b5b8cdf9dc083e0c7b3abae12c7c8c53 name: crc-org/crc-cloud required: true short_name: crc-cloud src_dir: src/github.com/crc-org/crc-cloud github.com/infrawatch/prometheus-webhook-snmp: canonical_hostname: github.com canonical_name: github.com/infrawatch/prometheus-webhook-snmp checkout: master checkout_description: zuul branch commit: 3959c53b2613d03d066cb1b2fe5bdae8633ae895 name: infrawatch/prometheus-webhook-snmp required: true short_name: prometheus-webhook-snmp src_dir: src/github.com/infrawatch/prometheus-webhook-snmp github.com/infrawatch/service-telemetry-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master checkout_description: zuul branch commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb name: infrawatch/service-telemetry-operator required: true short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator github.com/infrawatch/sg-bridge: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-bridge checkout: master checkout_description: zuul branch commit: bab11fba86ad0c21cb35e12b56bf086a3332f1d2 name: infrawatch/sg-bridge required: true short_name: sg-bridge src_dir: src/github.com/infrawatch/sg-bridge github.com/infrawatch/sg-core: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-core checkout: master checkout_description: zuul branch commit: 5a4aece11fea9f71ce7515d11e1e7f0eae97eea6 name: infrawatch/sg-core required: true short_name: sg-core src_dir: src/github.com/infrawatch/sg-core github.com/infrawatch/smart-gateway-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/smart-gateway-operator checkout: master checkout_description: zuul branch commit: 2ff5b96b6254418d20a509188eea72ab2c77839c name: infrawatch/smart-gateway-operator required: true short_name: smart-gateway-operator src_dir: src/github.com/infrawatch/smart-gateway-operator github.com/openstack-k8s-operators/ci-framework: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main checkout_description: project override ref commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 name: openstack-k8s-operators/ci-framework required: true short_name: ci-framework src_dir: src/github.com/openstack-k8s-operators/ci-framework github.com/openstack-k8s-operators/dataplane-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/dataplane-operator checkout: main checkout_description: project override ref commit: c98b51bcd7fe14b85ed4cf3f5f76552b3455c5f2 name: openstack-k8s-operators/dataplane-operator required: true short_name: dataplane-operator src_dir: src/github.com/openstack-k8s-operators/dataplane-operator github.com/openstack-k8s-operators/edpm-ansible: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/edpm-ansible checkout: main checkout_description: project default branch commit: def07d8eb172b38b1a39695442f28465a1dfac35 name: openstack-k8s-operators/edpm-ansible required: true short_name: edpm-ansible src_dir: src/github.com/openstack-k8s-operators/edpm-ansible github.com/openstack-k8s-operators/infra-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/infra-operator checkout: main checkout_description: project override ref commit: 786269345f996bd262360738a1e3c6b09171f370 name: openstack-k8s-operators/infra-operator required: true short_name: infra-operator src_dir: src/github.com/openstack-k8s-operators/infra-operator github.com/openstack-k8s-operators/install_yamls: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/install_yamls checkout: main checkout_description: project default branch commit: 2f838b62fe50aacff3d514af4b502264e0a276a5 name: openstack-k8s-operators/install_yamls required: true short_name: install_yamls src_dir: src/github.com/openstack-k8s-operators/install_yamls github.com/openstack-k8s-operators/openstack-baremetal-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-baremetal-operator checkout: master checkout_description: zuul branch commit: a333e57066b1d48e41f93af68be81188290a96b3 name: openstack-k8s-operators/openstack-baremetal-operator required: true short_name: openstack-baremetal-operator src_dir: src/github.com/openstack-k8s-operators/openstack-baremetal-operator github.com/openstack-k8s-operators/openstack-must-gather: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-must-gather checkout: main checkout_description: project override ref commit: 2da49819dd6af6036aede5e4e9a080ff2c6457de name: openstack-k8s-operators/openstack-must-gather required: true short_name: openstack-must-gather src_dir: src/github.com/openstack-k8s-operators/openstack-must-gather github.com/openstack-k8s-operators/openstack-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-operator checkout: main checkout_description: project override ref commit: 0ad3a7b7bb522e34f164849424319945b381d95c name: openstack-k8s-operators/openstack-operator required: true short_name: openstack-operator src_dir: src/github.com/openstack-k8s-operators/openstack-operator github.com/openstack-k8s-operators/repo-setup: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/repo-setup checkout: main checkout_description: project default branch commit: 37b10946c6a10f9fa26c13305f06bfd6867e723f name: openstack-k8s-operators/repo-setup required: true short_name: repo-setup src_dir: src/github.com/openstack-k8s-operators/repo-setup opendev.org/zuul/zuul-jobs: canonical_hostname: opendev.org canonical_name: opendev.org/zuul/zuul-jobs checkout: master checkout_description: zuul branch commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 name: zuul/zuul-jobs required: true short_name: zuul-jobs src_dir: src/opendev.org/zuul/zuul-jobs review.rdoproject.org/config: canonical_hostname: review.rdoproject.org canonical_name: review.rdoproject.org/config checkout: master checkout_description: zuul branch commit: 40052f923df77143f1c9739304c4b4221346825f name: config required: true short_name: config src_dir: src/review.rdoproject.org/config ref: refs/pull/694/head resources: {} tenant: rdoproject.org timeout: 3600 topic: null voting: true zuul_change_list: - service-telemetry-operator zuul_execution_branch: main zuul_execution_canonical_name_and_path: github.com/openstack-k8s-operators/ci-framework/ci/playbooks/e2e-collect-logs.yml zuul_execution_phase: post zuul_execution_phase_index: '1' zuul_execution_trusted: 'False' zuul_log_collection: true zuul_success: 'False' zuul_will_retry: 'False' home/zuul/zuul-output/logs/ci-framework-data/artifacts/ci_script_000_fetch_openshift.sh0000644000175000017500000000032515115610211030456 0ustar zuulzuul#!/bin/bash set -euo pipefail exec > >(tee -i /home/zuul/ci-framework-data/logs/ci_script_000_fetch_openshift.log) 2>&1 oc login -u kubeadmin -p 123456789 --insecure-skip-tls-verify=true api.crc.testing:6443 ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/ci_script_001_login_into_openshift_internal.shhome/zuul/zuul-output/logs/ci-framework-data/artifacts/ci_script_001_login_into_openshift_internal.s0000644000175000017500000000044515115610223033261 0ustar zuulzuul#!/bin/bash set -euo pipefail exec > >(tee -i /home/zuul/ci-framework-data/logs/ci_script_001_login_into_openshift_internal.log) 2>&1 podman login -u kubeadmin -p sha256~pOUgk-9850hxxEZK6G0UMKUA3Hu8vVpuezKn-haJb_U --tls-verify=false default-route-openshift-image-registry.apps-crc.testing home/zuul/zuul-output/logs/ci-framework-data/artifacts/parameters/0000755000175000017500000000000015115611524024507 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/parameters/install-yamls-params.yml0000644000175000017500000006655415115611523031323 0ustar zuulzuulcifmw_install_yamls_defaults: ADOPTED_EXTERNAL_NETWORK: 172.21.1.0/24 ADOPTED_INTERNALAPI_NETWORK: 172.17.1.0/24 ADOPTED_STORAGEMGMT_NETWORK: 172.20.1.0/24 ADOPTED_STORAGE_NETWORK: 172.18.1.0/24 ADOPTED_TENANT_NETWORK: 172.9.1.0/24 ANSIBLEEE: config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_BRANCH: main ANSIBLEEE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml ANSIBLEEE_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest ANSIBLEEE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml ANSIBLEEE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests ANSIBLEEE_KUTTL_NAMESPACE: ansibleee-kuttl-tests ANSIBLEEE_REPO: https://github.com/openstack-k8s-operators/openstack-ansibleee-operator ANSIBLEE_COMMIT_HASH: '' BARBICAN: config/samples/barbican_v1beta1_barbican.yaml BARBICAN_BRANCH: main BARBICAN_COMMIT_HASH: '' BARBICAN_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml BARBICAN_DEPL_IMG: unused BARBICAN_IMG: quay.io/openstack-k8s-operators/barbican-operator-index:latest BARBICAN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml BARBICAN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests BARBICAN_KUTTL_NAMESPACE: barbican-kuttl-tests BARBICAN_REPO: https://github.com/openstack-k8s-operators/barbican-operator.git BARBICAN_SERVICE_ENABLED: 'true' BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY: sE**********U= BAREMETAL_BRANCH: main BAREMETAL_COMMIT_HASH: '' BAREMETAL_IMG: quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest BAREMETAL_OS_CONTAINER_IMG: '' BAREMETAL_OS_IMG: '' BAREMETAL_REPO: https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git BAREMETAL_TIMEOUT: 20m BASH_IMG: quay.io/openstack-k8s-operators/bash:latest BGP_ASN: '64999' BGP_LEAF_1: 100.65.4.1 BGP_LEAF_2: 100.64.4.1 BGP_OVN_ROUTING: 'false' BGP_PEER_ASN: '64999' BGP_SOURCE_IP: 172.30.4.2 BGP_SOURCE_IP6: f00d:f00d:f00d:f00d:f00d:f00d:f00d:42 BMAAS_BRIDGE_IPV4_PREFIX: 172.20.1.2/24 BMAAS_BRIDGE_IPV6_PREFIX: fd00:bbbb::2/64 BMAAS_INSTANCE_DISK_SIZE: '20' BMAAS_INSTANCE_MEMORY: '4096' BMAAS_INSTANCE_NAME_PREFIX: crc-bmaas BMAAS_INSTANCE_NET_MODEL: virtio BMAAS_INSTANCE_OS_VARIANT: centos-stream9 BMAAS_INSTANCE_VCPUS: '2' BMAAS_INSTANCE_VIRT_TYPE: kvm BMAAS_IPV4: 'true' BMAAS_IPV6: 'false' BMAAS_LIBVIRT_USER: sushyemu BMAAS_METALLB_ADDRESS_POOL: 172.20.1.64/26 BMAAS_METALLB_POOL_NAME: baremetal BMAAS_NETWORK_IPV4_PREFIX: 172.20.1.1/24 BMAAS_NETWORK_IPV6_PREFIX: fd00:bbbb::1/64 BMAAS_NETWORK_NAME: crc-bmaas BMAAS_NODE_COUNT: '1' BMAAS_OCP_INSTANCE_NAME: crc BMAAS_REDFISH_PASSWORD: password BMAAS_REDFISH_USERNAME: admin BMAAS_ROUTE_LIBVIRT_NETWORKS: crc-bmaas,crc,default BMAAS_SUSHY_EMULATOR_DRIVER: libvirt BMAAS_SUSHY_EMULATOR_IMAGE: quay.io/metal3-io/sushy-tools:latest BMAAS_SUSHY_EMULATOR_NAMESPACE: sushy-emulator BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE: /etc/openstack/clouds.yaml BMAAS_SUSHY_EMULATOR_OS_CLOUD: openstack BMH_NAMESPACE: openstack BMO_BRANCH: release-0.9 BMO_CLEANUP: 'true' BMO_COMMIT_HASH: '' BMO_IPA_BRANCH: stable/2024.1 BMO_IRONIC_HOST: 192.168.122.10 BMO_PROVISIONING_INTERFACE: '' BMO_REPO: https://github.com/metal3-io/baremetal-operator BMO_SETUP: '' BMO_SETUP_ROUTE_REPLACE: 'true' BM_CTLPLANE_INTERFACE: enp1s0 BM_INSTANCE_MEMORY: '8192' BM_INSTANCE_NAME_PREFIX: edpm-compute-baremetal BM_INSTANCE_NAME_SUFFIX: '0' BM_NETWORK_NAME: default BM_NODE_COUNT: '1' BM_ROOT_PASSWORD: '' BM_ROOT_PASSWORD_SECRET: '' CEILOMETER_CENTRAL_DEPL_IMG: unused CEILOMETER_NOTIFICATION_DEPL_IMG: unused CEPH_BRANCH: release-1.15 CEPH_CLIENT: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml CEPH_COMMON: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml CEPH_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml CEPH_CRDS: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml CEPH_IMG: quay.io/ceph/demo:latest-squid CEPH_OP: /home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml CEPH_REPO: https://github.com/rook/rook.git CERTMANAGER_TIMEOUT: 300s CHECKOUT_FROM_OPENSTACK_REF: 'true' CINDER: config/samples/cinder_v1beta1_cinder.yaml CINDERAPI_DEPL_IMG: unused CINDERBKP_DEPL_IMG: unused CINDERSCH_DEPL_IMG: unused CINDERVOL_DEPL_IMG: unused CINDER_BRANCH: main CINDER_COMMIT_HASH: '' CINDER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml CINDER_IMG: quay.io/openstack-k8s-operators/cinder-operator-index:latest CINDER_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml CINDER_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests CINDER_KUTTL_NAMESPACE: cinder-kuttl-tests CINDER_REPO: https://github.com/openstack-k8s-operators/cinder-operator.git CLEANUP_DIR_CMD: rm -Rf CRC_BGP_NIC_1_MAC: '52:54:00:11:11:11' CRC_BGP_NIC_2_MAC: '52:54:00:11:11:12' CRC_HTTPS_PROXY: '' CRC_HTTP_PROXY: '' CRC_STORAGE_NAMESPACE: crc-storage CRC_STORAGE_RETRIES: '3' CRC_URL: '''https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz''' CRC_VERSION: latest DATAPLANE_ANSIBLE_SECRET: dataplane-ansible-ssh-private-key-secret DATAPLANE_ANSIBLE_USER: '' DATAPLANE_COMPUTE_IP: 192.168.122.100 DATAPLANE_CONTAINER_PREFIX: openstack DATAPLANE_CONTAINER_TAG: current-podified DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG: quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest DATAPLANE_DEFAULT_GW: 192.168.122.1 DATAPLANE_EXTRA_NOVA_CONFIG_FILE: /dev/null DATAPLANE_GROWVOLS_ARGS: /=8GB /tmp=1GB /home=1GB /var=100% DATAPLANE_KUSTOMIZE_SCENARIO: preprovisioned DATAPLANE_NETWORKER_IP: 192.168.122.200 DATAPLANE_NETWORK_INTERFACE_NAME: eth0 DATAPLANE_NOVA_NFS_PATH: '' DATAPLANE_NTP_SERVER: pool.ntp.org DATAPLANE_PLAYBOOK: osp.edpm.download_cache DATAPLANE_REGISTRY_URL: quay.io/podified-antelope-centos9 DATAPLANE_RUNNER_IMG: '' DATAPLANE_SERVER_ROLE: compute DATAPLANE_SSHD_ALLOWED_RANGES: '[''192.168.122.0/24'']' DATAPLANE_TIMEOUT: 30m DATAPLANE_TLS_ENABLED: 'true' DATAPLANE_TOTAL_NETWORKER_NODES: '1' DATAPLANE_TOTAL_NODES: '1' DBSERVICE: galera DESIGNATE: config/samples/designate_v1beta1_designate.yaml DESIGNATE_BRANCH: main DESIGNATE_COMMIT_HASH: '' DESIGNATE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml DESIGNATE_IMG: quay.io/openstack-k8s-operators/designate-operator-index:latest DESIGNATE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml DESIGNATE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests DESIGNATE_KUTTL_NAMESPACE: designate-kuttl-tests DESIGNATE_REPO: https://github.com/openstack-k8s-operators/designate-operator.git DNSDATA: config/samples/network_v1beta1_dnsdata.yaml DNSDATA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml DNSMASQ: config/samples/network_v1beta1_dnsmasq.yaml DNSMASQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml DNS_DEPL_IMG: unused DNS_DOMAIN: localdomain DOWNLOAD_TOOLS_SELECTION: all EDPM_ATTACH_EXTNET: 'true' EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES: '''[]''' EDPM_COMPUTE_ADDITIONAL_NETWORKS: '''[]''' EDPM_COMPUTE_CELLS: '1' EDPM_COMPUTE_CEPH_ENABLED: 'true' EDPM_COMPUTE_CEPH_NOVA: 'true' EDPM_COMPUTE_DHCP_AGENT_ENABLED: 'true' EDPM_COMPUTE_SRIOV_ENABLED: 'true' EDPM_COMPUTE_SUFFIX: '0' EDPM_CONFIGURE_DEFAULT_ROUTE: 'true' EDPM_CONFIGURE_HUGEPAGES: 'false' EDPM_CONFIGURE_NETWORKING: 'true' EDPM_FIRSTBOOT_EXTRA: /tmp/edpm-firstboot-extra EDPM_NETWORKER_SUFFIX: '0' EDPM_TOTAL_NETWORKERS: '1' EDPM_TOTAL_NODES: '1' GALERA_REPLICAS: '' GENERATE_SSH_KEYS: 'true' GIT_CLONE_OPTS: '' GLANCE: config/samples/glance_v1beta1_glance.yaml GLANCEAPI_DEPL_IMG: unused GLANCE_BRANCH: main GLANCE_COMMIT_HASH: '' GLANCE_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml GLANCE_IMG: quay.io/openstack-k8s-operators/glance-operator-index:latest GLANCE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml GLANCE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests GLANCE_KUTTL_NAMESPACE: glance-kuttl-tests GLANCE_REPO: https://github.com/openstack-k8s-operators/glance-operator.git HEAT: config/samples/heat_v1beta1_heat.yaml HEATAPI_DEPL_IMG: unused HEATCFNAPI_DEPL_IMG: unused HEATENGINE_DEPL_IMG: unused HEAT_AUTH_ENCRYPTION_KEY: 76**********f0 HEAT_BRANCH: main HEAT_COMMIT_HASH: '' HEAT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml HEAT_IMG: quay.io/openstack-k8s-operators/heat-operator-index:latest HEAT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml HEAT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests HEAT_KUTTL_NAMESPACE: heat-kuttl-tests HEAT_REPO: https://github.com/openstack-k8s-operators/heat-operator.git HEAT_SERVICE_ENABLED: 'true' HORIZON: config/samples/horizon_v1beta1_horizon.yaml HORIZON_BRANCH: main HORIZON_COMMIT_HASH: '' HORIZON_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml HORIZON_DEPL_IMG: unused HORIZON_IMG: quay.io/openstack-k8s-operators/horizon-operator-index:latest HORIZON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml HORIZON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests HORIZON_KUTTL_NAMESPACE: horizon-kuttl-tests HORIZON_REPO: https://github.com/openstack-k8s-operators/horizon-operator.git INFRA_BRANCH: main INFRA_COMMIT_HASH: '' INFRA_IMG: quay.io/openstack-k8s-operators/infra-operator-index:latest INFRA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml INFRA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests INFRA_KUTTL_NAMESPACE: infra-kuttl-tests INFRA_REPO: https://github.com/openstack-k8s-operators/infra-operator.git INSTALL_CERT_MANAGER: 'true' INSTALL_NMSTATE: true || false INSTALL_NNCP: true || false INTERNALAPI_HOST_ROUTES: '' IPV6_LAB_IPV4_NETWORK_IPADDRESS: 172.30.0.1/24 IPV6_LAB_IPV6_NETWORK_IPADDRESS: fd00:abcd:abcd:fc00::1/64 IPV6_LAB_LIBVIRT_STORAGE_POOL: default IPV6_LAB_MANAGE_FIREWALLD: 'true' IPV6_LAB_NAT64_HOST_IPV4: 172.30.0.2/24 IPV6_LAB_NAT64_HOST_IPV6: fd00:abcd:abcd:fc00::2/64 IPV6_LAB_NAT64_INSTANCE_NAME: nat64-router IPV6_LAB_NAT64_IPV6_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL: 192.168.255.0/24 IPV6_LAB_NAT64_TAYGA_IPV4: 192.168.255.1 IPV6_LAB_NAT64_TAYGA_IPV6: fd00:abcd:abcd:fc00::3 IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX: fd00:abcd:abcd:fcff::/96 IPV6_LAB_NAT64_UPDATE_PACKAGES: 'false' IPV6_LAB_NETWORK_NAME: nat64 IPV6_LAB_SNO_CLUSTER_NETWORK: fd00:abcd:0::/48 IPV6_LAB_SNO_HOST_IP: fd00:abcd:abcd:fc00::11 IPV6_LAB_SNO_HOST_PREFIX: '64' IPV6_LAB_SNO_INSTANCE_NAME: sno IPV6_LAB_SNO_MACHINE_NETWORK: fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_OCP_MIRROR_URL: https://mirror.openshift.com/pub/openshift-v4/clients/ocp IPV6_LAB_SNO_OCP_VERSION: latest-4.14 IPV6_LAB_SNO_SERVICE_NETWORK: fd00:abcd:abcd:fc03::/112 IPV6_LAB_SSH_PUB_KEY: /home/zuul/.ssh/id_rsa.pub IPV6_LAB_WORK_DIR: /home/zuul/.ipv6lab IRONIC: config/samples/ironic_v1beta1_ironic.yaml IRONICAPI_DEPL_IMG: unused IRONICCON_DEPL_IMG: unused IRONICINS_DEPL_IMG: unused IRONICNAG_DEPL_IMG: unused IRONICPXE_DEPL_IMG: unused IRONIC_BRANCH: main IRONIC_COMMIT_HASH: '' IRONIC_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml IRONIC_IMAGE: quay.io/metal3-io/ironic IRONIC_IMAGE_TAG: release-24.1 IRONIC_IMG: quay.io/openstack-k8s-operators/ironic-operator-index:latest IRONIC_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml IRONIC_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests IRONIC_KUTTL_NAMESPACE: ironic-kuttl-tests IRONIC_REPO: https://github.com/openstack-k8s-operators/ironic-operator.git KEYSTONEAPI: config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml KEYSTONEAPI_DEPL_IMG: unused KEYSTONE_BRANCH: main KEYSTONE_COMMIT_HASH: '' KEYSTONE_FEDERATION_CLIENT_SECRET: CO**********6f KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE: openstack KEYSTONE_IMG: quay.io/openstack-k8s-operators/keystone-operator-index:latest KEYSTONE_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml KEYSTONE_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests KEYSTONE_KUTTL_NAMESPACE: keystone-kuttl-tests KEYSTONE_REPO: https://github.com/openstack-k8s-operators/keystone-operator.git KUBEADMIN_PWD: '12345678' LIBVIRT_SECRET: libvirt-secret LOKI_DEPLOY_MODE: openshift-network LOKI_DEPLOY_NAMESPACE: netobserv LOKI_DEPLOY_SIZE: 1x.demo LOKI_NAMESPACE: openshift-operators-redhat LOKI_OPERATOR_GROUP: openshift-operators-redhat-loki LOKI_SUBSCRIPTION: loki-operator LVMS_CR: '1' MANILA: config/samples/manila_v1beta1_manila.yaml MANILAAPI_DEPL_IMG: unused MANILASCH_DEPL_IMG: unused MANILASHARE_DEPL_IMG: unused MANILA_BRANCH: main MANILA_COMMIT_HASH: '' MANILA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml MANILA_IMG: quay.io/openstack-k8s-operators/manila-operator-index:latest MANILA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml MANILA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests MANILA_KUTTL_NAMESPACE: manila-kuttl-tests MANILA_REPO: https://github.com/openstack-k8s-operators/manila-operator.git MANILA_SERVICE_ENABLED: 'true' MARIADB: config/samples/mariadb_v1beta1_galera.yaml MARIADB_BRANCH: main MARIADB_CHAINSAW_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml MARIADB_CHAINSAW_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests MARIADB_CHAINSAW_NAMESPACE: mariadb-chainsaw-tests MARIADB_COMMIT_HASH: '' MARIADB_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml MARIADB_DEPL_IMG: unused MARIADB_IMG: quay.io/openstack-k8s-operators/mariadb-operator-index:latest MARIADB_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml MARIADB_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests MARIADB_KUTTL_NAMESPACE: mariadb-kuttl-tests MARIADB_REPO: https://github.com/openstack-k8s-operators/mariadb-operator.git MEMCACHED: config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml MEMCACHED_DEPL_IMG: unused METADATA_SHARED_SECRET: '12**********42' METALLB_IPV6_POOL: fd00:aaaa::80-fd00:aaaa::90 METALLB_POOL: 192.168.122.80-192.168.122.90 MICROSHIFT: '0' NAMESPACE: openstack NETCONFIG: config/samples/network_v1beta1_netconfig.yaml NETCONFIG_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml NETCONFIG_DEPL_IMG: unused NETOBSERV_DEPLOY_NAMESPACE: netobserv NETOBSERV_NAMESPACE: openshift-netobserv-operator NETOBSERV_OPERATOR_GROUP: openshift-netobserv-operator-net NETOBSERV_SUBSCRIPTION: netobserv-operator NETWORK_BGP: 'false' NETWORK_DESIGNATE_ADDRESS_PREFIX: 172.28.0 NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX: 172.50.0 NETWORK_INTERNALAPI_ADDRESS_PREFIX: 172.17.0 NETWORK_ISOLATION: 'true' NETWORK_ISOLATION_INSTANCE_NAME: crc NETWORK_ISOLATION_IPV4: 'true' NETWORK_ISOLATION_IPV4_ADDRESS: 172.16.1.1/24 NETWORK_ISOLATION_IPV4_NAT: 'true' NETWORK_ISOLATION_IPV6: 'false' NETWORK_ISOLATION_IPV6_ADDRESS: fd00:aaaa::1/64 NETWORK_ISOLATION_IP_ADDRESS: 192.168.122.10 NETWORK_ISOLATION_MAC: '52:54:00:11:11:10' NETWORK_ISOLATION_NETWORK_NAME: net-iso NETWORK_ISOLATION_NET_NAME: default NETWORK_ISOLATION_USE_DEFAULT_NETWORK: 'true' NETWORK_MTU: '1500' NETWORK_STORAGEMGMT_ADDRESS_PREFIX: 172.20.0 NETWORK_STORAGE_ADDRESS_PREFIX: 172.18.0 NETWORK_STORAGE_MACVLAN: '' NETWORK_TENANT_ADDRESS_PREFIX: 172.19.0 NETWORK_VLAN_START: '20' NETWORK_VLAN_STEP: '1' NEUTRONAPI: config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml NEUTRONAPI_DEPL_IMG: unused NEUTRON_BRANCH: main NEUTRON_COMMIT_HASH: '' NEUTRON_IMG: quay.io/openstack-k8s-operators/neutron-operator-index:latest NEUTRON_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml NEUTRON_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests NEUTRON_KUTTL_NAMESPACE: neutron-kuttl-tests NEUTRON_REPO: https://github.com/openstack-k8s-operators/neutron-operator.git NFS_HOME: /home/nfs NMSTATE_NAMESPACE: openshift-nmstate NMSTATE_OPERATOR_GROUP: openshift-nmstate-tn6k8 NMSTATE_SUBSCRIPTION: kubernetes-nmstate-operator NNCP_ADDITIONAL_HOST_ROUTES: '' NNCP_BGP_1_INTERFACE: enp7s0 NNCP_BGP_1_IP_ADDRESS: 100.65.4.2 NNCP_BGP_2_INTERFACE: enp8s0 NNCP_BGP_2_IP_ADDRESS: 100.64.4.2 NNCP_BRIDGE: ospbr NNCP_CLEANUP_TIMEOUT: 120s NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX: 'fd00:aaaa::' NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX: '10' NNCP_CTLPLANE_IP_ADDRESS_PREFIX: 192.168.122 NNCP_CTLPLANE_IP_ADDRESS_SUFFIX: '10' NNCP_DNS_SERVER: 192.168.122.1 NNCP_DNS_SERVER_IPV6: fd00:aaaa::1 NNCP_GATEWAY: 192.168.122.1 NNCP_GATEWAY_IPV6: fd00:aaaa::1 NNCP_INTERFACE: enp6s0 NNCP_NODES: '' NNCP_TIMEOUT: 240s NOVA: config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_BRANCH: main NOVA_COMMIT_HASH: '' NOVA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml NOVA_IMG: quay.io/openstack-k8s-operators/nova-operator-index:latest NOVA_REPO: https://github.com/openstack-k8s-operators/nova-operator.git NUMBER_OF_INSTANCES: '1' OCP_NETWORK_NAME: crc OCTAVIA: config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_BRANCH: main OCTAVIA_COMMIT_HASH: '' OCTAVIA_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml OCTAVIA_IMG: quay.io/openstack-k8s-operators/octavia-operator-index:latest OCTAVIA_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml OCTAVIA_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests OCTAVIA_KUTTL_NAMESPACE: octavia-kuttl-tests OCTAVIA_REPO: https://github.com/openstack-k8s-operators/octavia-operator.git OKD: 'false' OPENSTACK_BRANCH: main OPENSTACK_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-bundle:latest OPENSTACK_COMMIT_HASH: '' OPENSTACK_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_CRDS_DIR: openstack_crds OPENSTACK_CTLPLANE: config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml OPENSTACK_IMG: quay.io/openstack-k8s-operators/openstack-operator-index:latest OPENSTACK_K8S_BRANCH: main OPENSTACK_K8S_TAG: latest OPENSTACK_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml OPENSTACK_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests OPENSTACK_KUTTL_NAMESPACE: openstack-kuttl-tests OPENSTACK_NEUTRON_CUSTOM_CONF: '' OPENSTACK_REPO: https://github.com/openstack-k8s-operators/openstack-operator.git OPENSTACK_STORAGE_BUNDLE_IMG: quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest OPERATOR_BASE_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator OPERATOR_CHANNEL: '' OPERATOR_NAMESPACE: openstack-operators OPERATOR_SOURCE: '' OPERATOR_SOURCE_NAMESPACE: '' OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm OVNCONTROLLER: config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml OVNCONTROLLER_NMAP: 'true' OVNDBS: config/samples/ovn_v1beta1_ovndbcluster.yaml OVNDBS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml OVNNORTHD: config/samples/ovn_v1beta1_ovnnorthd.yaml OVNNORTHD_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml OVN_BRANCH: main OVN_COMMIT_HASH: '' OVN_IMG: quay.io/openstack-k8s-operators/ovn-operator-index:latest OVN_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml OVN_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests OVN_KUTTL_NAMESPACE: ovn-kuttl-tests OVN_REPO: https://github.com/openstack-k8s-operators/ovn-operator.git PASSWORD: '12**********78' PLACEMENTAPI: config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml PLACEMENTAPI_DEPL_IMG: unused PLACEMENT_BRANCH: main PLACEMENT_COMMIT_HASH: '' PLACEMENT_IMG: quay.io/openstack-k8s-operators/placement-operator-index:latest PLACEMENT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml PLACEMENT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests PLACEMENT_KUTTL_NAMESPACE: placement-kuttl-tests PLACEMENT_REPO: https://github.com/openstack-k8s-operators/placement-operator.git PULL_SECRET: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt RABBITMQ: docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_BRANCH: patches RABBITMQ_COMMIT_HASH: '' RABBITMQ_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml RABBITMQ_DEPL_IMG: unused RABBITMQ_IMG: quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest RABBITMQ_REPO: https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git REDHAT_OPERATORS: 'false' REDIS: config/samples/redis_v1beta1_redis.yaml REDIS_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml REDIS_DEPL_IMG: unused RH_REGISTRY_PWD: '' RH_REGISTRY_USER: '' SECRET: os**********et SG_CORE_DEPL_IMG: unused STANDALONE_COMPUTE_DRIVER: libvirt STANDALONE_EXTERNAL_NET_PREFFIX: 172.21.0 STANDALONE_INTERNALAPI_NET_PREFIX: 172.17.0 STANDALONE_STORAGEMGMT_NET_PREFIX: 172.20.0 STANDALONE_STORAGE_NET_PREFIX: 172.18.0 STANDALONE_TENANT_NET_PREFIX: 172.19.0 STORAGEMGMT_HOST_ROUTES: '' STORAGE_CLASS: local-storage STORAGE_HOST_ROUTES: '' SWIFT: config/samples/swift_v1beta1_swift.yaml SWIFT_BRANCH: main SWIFT_COMMIT_HASH: '' SWIFT_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml SWIFT_IMG: quay.io/openstack-k8s-operators/swift-operator-index:latest SWIFT_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml SWIFT_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests SWIFT_KUTTL_NAMESPACE: swift-kuttl-tests SWIFT_REPO: https://github.com/openstack-k8s-operators/swift-operator.git TELEMETRY: config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_BRANCH: main TELEMETRY_COMMIT_HASH: '' TELEMETRY_CR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml TELEMETRY_IMG: quay.io/openstack-k8s-operators/telemetry-operator-index:latest TELEMETRY_KUTTL_BASEDIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator TELEMETRY_KUTTL_CONF: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml TELEMETRY_KUTTL_DIR: /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites TELEMETRY_KUTTL_NAMESPACE: telemetry-kuttl-tests TELEMETRY_KUTTL_RELPATH: test/kuttl/suites TELEMETRY_REPO: https://github.com/openstack-k8s-operators/telemetry-operator.git TENANT_HOST_ROUTES: '' TIMEOUT: 300s TLS_ENABLED: 'false' tripleo_deploy: 'export REGISTRY_PWD:' cifmw_install_yamls_environment: CHECKOUT_FROM_OPENSTACK_REF: 'true' KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig OPENSTACK_K8S_BRANCH: main OUT: /home/zuul/ci-framework-data/artifacts/manifests OUTPUT_DIR: /home/zuul/ci-framework-data/artifacts/edpm home/zuul/zuul-output/logs/ci-framework-data/artifacts/parameters/custom-params.yml0000644000175000017500000000215315115611523030025 0ustar zuulzuulcifmw_architecture_repo: /home/zuul/src/github.com/openstack-k8s-operators/architecture cifmw_architecture_repo_relative: src/github.com/openstack-k8s-operators/architecture cifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_installyamls_repos: /home/zuul/src/github.com/openstack-k8s-operators/install_yamls cifmw_installyamls_repos_relative: src/github.com/openstack-k8s-operators/install_yamls cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_path: /home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:~/.crc/bin:~/.crc/bin/oc:~/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin cifmw_project_dir: src/github.com/openstack-k8s-operators/ci-framework cifmw_project_dir_absolute: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller home/zuul/zuul-output/logs/ci-framework-data/artifacts/parameters/zuul-params.yml0000644000175000017500000004737215115611524027527 0ustar zuulzuulcifmw_artifacts_crc_sshkey: ~/.ssh/id_cifw cifmw_deploy_edpm: false cifmw_dlrn_report_result: false cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' cifmw_openshift_api: api.crc.testing:6443 cifmw_openshift_kubeconfig: '{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig' cifmw_openshift_password: '12**********89' cifmw_openshift_skip_tls_verify: true cifmw_openshift_user: kubeadmin cifmw_run_tests: false cifmw_use_libvirt: false cifmw_zuul_target_host: controller crc_ci_bootstrap_cloud_name: '{{ nodepool.cloud | replace(''-nodepool-tripleo'','''') }}' crc_ci_bootstrap_networking: instances: controller: networks: default: ip: 192.168.122.11 crc: networks: default: ip: 192.168.122.10 internal-api: ip: 172.17.0.5 storage: ip: 172.18.0.5 tenant: ip: 172.19.0.5 networks: default: mtu: 1500 range: 192.168.122.0/24 internal-api: range: 172.17.0.0/24 vlan: 20 storage: range: 172.18.0.0/24 vlan: 21 tenant: range: 172.19.0.0/24 vlan: 22 enable_ramdisk: true podified_validation: true push_registry: quay.rdoproject.org quay_login_secret_name: quay_nextgen_zuulgithubci registry_login_enabled: true scenario: catalog_deploy zuul: _inheritance_path: - '' - '' - '' - '' - '' - '' - '' - '' - '' ansible_version: '8' attempts: 1 branch: master build: fdae556768574d6f9092d7162dc9ae0f build_refs: - branch: master change: '694' change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null buildset: c405b24f52df4ff1a39b37dcfc476a60 buildset_refs: - branch: master change: '694' change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null change: '694' change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 child_jobs: [] commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb event_id: 0add7250-d45c-11f0-86cc-0eee4913030a executor: hostname: ze03.softwarefactory-project.io inventory_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/ansible/inventory.yaml log_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/logs result_data_file: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/results.json src_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work/src work_root: /var/lib/zuul/builds/fdae556768574d6f9092d7162dc9ae0f/work items: - branch: master change: '694' change_url: https://github.com/infrawatch/service-telemetry-operator/pull/694 commit_id: dee1e9b260d30a0e04e6122a214cac385c42d9bb patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator topic: null job: stf-crc-ocp_420-catalog_deploy jobtags: [] max_attempts: 1 message: QWRkIE9DUCA0LjIwIGpvYnMKCkFkZCBqb2IgZGVmaW5pdGlvbnMgdXNpbmcgY3JjLWNsb3VkLW9jcC00LTIwLTEtM3hsIGFzIGJhc2UgaW1hZ2UNCg0KQ2xvc2VzOiBPU1BSSC0yMTg4MQ== patchset: dee1e9b260d30a0e04e6122a214cac385c42d9bb pipeline: github-check playbook_context: playbook_projects: trusted/project_0/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f trusted/project_1/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 trusted/project_2/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 trusted/project_3/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_0/github.com/openstack-k8s-operators/ci-framework: canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 untrusted/project_1/review.rdoproject.org/config: canonical_name: review.rdoproject.org/config checkout: master commit: 40052f923df77143f1c9739304c4b4221346825f untrusted/project_2/opendev.org/zuul/zuul-jobs: canonical_name: opendev.org/zuul/zuul-jobs checkout: master commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 untrusted/project_3/review.rdoproject.org/rdo-jobs: canonical_name: review.rdoproject.org/rdo-jobs checkout: master commit: 9df4e7d5b028e976203d64479f9b7a76c1c95a24 untrusted/project_4/github.com/infrawatch/service-telemetry-operator: canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb playbooks: - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/deploy_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_0/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_0/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_0/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_0/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_0/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_0/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_0/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_0/role_4/rdo-jobs/roles - path: untrusted/project_4/github.com/infrawatch/service-telemetry-operator/ci/test_stf.yml roles: - checkout: master checkout_description: playbook branch link_name: ansible/playbook_1/role_0/service-telemetry-operator link_target: untrusted/project_4/github.com/infrawatch/service-telemetry-operator role_path: ansible/playbook_1/role_0/service-telemetry-operator/roles - checkout: main checkout_description: project override ref link_name: ansible/playbook_1/role_1/ci-framework link_target: untrusted/project_0/github.com/openstack-k8s-operators/ci-framework role_path: ansible/playbook_1/role_1/ci-framework/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_2/config link_target: untrusted/project_1/review.rdoproject.org/config role_path: ansible/playbook_1/role_2/config/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_3/zuul-jobs link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs role_path: ansible/playbook_1/role_3/zuul-jobs/roles - checkout: master checkout_description: zuul branch link_name: ansible/playbook_1/role_4/rdo-jobs link_target: untrusted/project_3/review.rdoproject.org/rdo-jobs role_path: ansible/playbook_1/role_4/rdo-jobs/roles post_review: false project: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator name: infrawatch/service-telemetry-operator short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator projects: github.com/crc-org/crc-cloud: canonical_hostname: github.com canonical_name: github.com/crc-org/crc-cloud checkout: main checkout_description: project override ref commit: 1c985328b5b8cdf9dc083e0c7b3abae12c7c8c53 name: crc-org/crc-cloud required: true short_name: crc-cloud src_dir: src/github.com/crc-org/crc-cloud github.com/infrawatch/prometheus-webhook-snmp: canonical_hostname: github.com canonical_name: github.com/infrawatch/prometheus-webhook-snmp checkout: master checkout_description: zuul branch commit: 3959c53b2613d03d066cb1b2fe5bdae8633ae895 name: infrawatch/prometheus-webhook-snmp required: true short_name: prometheus-webhook-snmp src_dir: src/github.com/infrawatch/prometheus-webhook-snmp github.com/infrawatch/service-telemetry-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/service-telemetry-operator checkout: master checkout_description: zuul branch commit: dee1e9b260d30a0e04e6122a214cac385c42d9bb name: infrawatch/service-telemetry-operator required: true short_name: service-telemetry-operator src_dir: src/github.com/infrawatch/service-telemetry-operator github.com/infrawatch/sg-bridge: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-bridge checkout: master checkout_description: zuul branch commit: bab11fba86ad0c21cb35e12b56bf086a3332f1d2 name: infrawatch/sg-bridge required: true short_name: sg-bridge src_dir: src/github.com/infrawatch/sg-bridge github.com/infrawatch/sg-core: canonical_hostname: github.com canonical_name: github.com/infrawatch/sg-core checkout: master checkout_description: zuul branch commit: 5a4aece11fea9f71ce7515d11e1e7f0eae97eea6 name: infrawatch/sg-core required: true short_name: sg-core src_dir: src/github.com/infrawatch/sg-core github.com/infrawatch/smart-gateway-operator: canonical_hostname: github.com canonical_name: github.com/infrawatch/smart-gateway-operator checkout: master checkout_description: zuul branch commit: 2ff5b96b6254418d20a509188eea72ab2c77839c name: infrawatch/smart-gateway-operator required: true short_name: smart-gateway-operator src_dir: src/github.com/infrawatch/smart-gateway-operator github.com/openstack-k8s-operators/ci-framework: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/ci-framework checkout: main checkout_description: project override ref commit: 33d5122f3f7842d64b00cd565ea3ca62d8afe3c4 name: openstack-k8s-operators/ci-framework required: true short_name: ci-framework src_dir: src/github.com/openstack-k8s-operators/ci-framework github.com/openstack-k8s-operators/dataplane-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/dataplane-operator checkout: main checkout_description: project override ref commit: c98b51bcd7fe14b85ed4cf3f5f76552b3455c5f2 name: openstack-k8s-operators/dataplane-operator required: true short_name: dataplane-operator src_dir: src/github.com/openstack-k8s-operators/dataplane-operator github.com/openstack-k8s-operators/edpm-ansible: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/edpm-ansible checkout: main checkout_description: project default branch commit: def07d8eb172b38b1a39695442f28465a1dfac35 name: openstack-k8s-operators/edpm-ansible required: true short_name: edpm-ansible src_dir: src/github.com/openstack-k8s-operators/edpm-ansible github.com/openstack-k8s-operators/infra-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/infra-operator checkout: main checkout_description: project override ref commit: 786269345f996bd262360738a1e3c6b09171f370 name: openstack-k8s-operators/infra-operator required: true short_name: infra-operator src_dir: src/github.com/openstack-k8s-operators/infra-operator github.com/openstack-k8s-operators/install_yamls: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/install_yamls checkout: main checkout_description: project default branch commit: 2f838b62fe50aacff3d514af4b502264e0a276a5 name: openstack-k8s-operators/install_yamls required: true short_name: install_yamls src_dir: src/github.com/openstack-k8s-operators/install_yamls github.com/openstack-k8s-operators/openstack-baremetal-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-baremetal-operator checkout: master checkout_description: zuul branch commit: a333e57066b1d48e41f93af68be81188290a96b3 name: openstack-k8s-operators/openstack-baremetal-operator required: true short_name: openstack-baremetal-operator src_dir: src/github.com/openstack-k8s-operators/openstack-baremetal-operator github.com/openstack-k8s-operators/openstack-must-gather: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-must-gather checkout: main checkout_description: project override ref commit: 2da49819dd6af6036aede5e4e9a080ff2c6457de name: openstack-k8s-operators/openstack-must-gather required: true short_name: openstack-must-gather src_dir: src/github.com/openstack-k8s-operators/openstack-must-gather github.com/openstack-k8s-operators/openstack-operator: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/openstack-operator checkout: main checkout_description: project override ref commit: 0ad3a7b7bb522e34f164849424319945b381d95c name: openstack-k8s-operators/openstack-operator required: true short_name: openstack-operator src_dir: src/github.com/openstack-k8s-operators/openstack-operator github.com/openstack-k8s-operators/repo-setup: canonical_hostname: github.com canonical_name: github.com/openstack-k8s-operators/repo-setup checkout: main checkout_description: project default branch commit: 37b10946c6a10f9fa26c13305f06bfd6867e723f name: openstack-k8s-operators/repo-setup required: true short_name: repo-setup src_dir: src/github.com/openstack-k8s-operators/repo-setup opendev.org/zuul/zuul-jobs: canonical_hostname: opendev.org canonical_name: opendev.org/zuul/zuul-jobs checkout: master checkout_description: zuul branch commit: 935cfd422c2237f4863cdcdf5fb201bce8c32a67 name: zuul/zuul-jobs required: true short_name: zuul-jobs src_dir: src/opendev.org/zuul/zuul-jobs review.rdoproject.org/config: canonical_hostname: review.rdoproject.org canonical_name: review.rdoproject.org/config checkout: master checkout_description: zuul branch commit: 40052f923df77143f1c9739304c4b4221346825f name: config required: true short_name: config src_dir: src/review.rdoproject.org/config ref: refs/pull/694/head resources: {} tenant: rdoproject.org timeout: 3600 topic: null voting: true zuul_log_collection: true home/zuul/zuul-output/logs/ci-framework-data/artifacts/parameters/openshift-login-params.yml0000644000175000017500000000044015115610214031611 0ustar zuulzuulcifmw_openshift_api: https://api.crc.testing:6443 cifmw_openshift_context: default/api-crc-testing:6443/kubeadmin cifmw_openshift_kubeconfig: /home/zuul/.crc/machines/crc/kubeconfig cifmw_openshift_token: sha256~pOUgk-9850hxxEZK6G0UMKUA3Hu8vVpuezKn-haJb_U cifmw_openshift_user: kubeadmin home/zuul/zuul-output/logs/ci-framework-data/artifacts/manifests/0000755000175000017500000000000015115610201024324 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/manifests/openstack/0000755000175000017500000000000015115610201026313 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/manifests/openstack/cr/0000755000175000017500000000000015115610201026717 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05/0000777000175000017500000000000015115611531026731 5ustar zuulzuul././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05/ansible_facts_cache/home/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05/ansible_facts_0000755000175000017500000000000015115611531031602 5ustar zuulzuul././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05/ansible_facts_cache/localhosthome/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05/ansible_facts_0000644000175000017500000016060715115611531031616 0ustar zuulzuul{ "_ansible_facts_gathered": true, "ansible_all_ipv4_addresses": [ "192.168.122.11", "38.102.83.251" ], "ansible_all_ipv6_addresses": [ "fe80::f816:3eff:fe97:c9c3" ], "ansible_apparmor": { "status": "disabled" }, "ansible_architecture": "x86_64", "ansible_bios_date": "04/01/2014", "ansible_bios_vendor": "SeaBIOS", "ansible_bios_version": "1.15.0-1", "ansible_board_asset_tag": "NA", "ansible_board_name": "NA", "ansible_board_serial": "NA", "ansible_board_vendor": "NA", "ansible_board_version": "NA", "ansible_chassis_asset_tag": "NA", "ansible_chassis_serial": "NA", "ansible_chassis_vendor": "QEMU", "ansible_chassis_version": "pc-i440fx-6.2", "ansible_cmdline": { "BOOT_IMAGE": "(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64", "console": "ttyS0,115200n8", "crashkernel": "1G-2G:192M,2G-64G:256M,64G-:512M", "net.ifnames": "0", "no_timer_check": true, "ro": true, "root": "UUID=fcf6b761-831a-48a7-9f5f-068b5063763f" }, "ansible_date_time": { "date": "2025-12-08", "day": "08", "epoch": "1765216329", "epoch_int": "1765216329", "hour": "17", "iso8601": "2025-12-08T17:52:09Z", "iso8601_basic": "20251208T175209942132", "iso8601_basic_short": "20251208T175209", "iso8601_micro": "2025-12-08T17:52:09.942132Z", "minute": "52", "month": "12", "second": "09", "time": "17:52:09", "tz": "UTC", "tz_dst": "UTC", "tz_offset": "+0000", "weekday": "Monday", "weekday_number": "1", "weeknumber": "49", "year": "2025" }, "ansible_default_ipv4": { "address": "38.102.83.251", "alias": "eth0", "broadcast": "38.102.83.255", "gateway": "38.102.83.1", "interface": "eth0", "macaddress": "fa:16:3e:97:c9:c3", "mtu": 1500, "netmask": "255.255.255.0", "network": "38.102.83.0", "prefix": "24", "type": "ether" }, "ansible_default_ipv6": {}, "ansible_device_links": { "ids": { "sr0": [ "ata-QEMU_DVD-ROM_QM00001" ] }, "labels": { "sr0": [ "config-2" ] }, "masters": {}, "uuids": { "sr0": [ "2025-12-08-17-34-40-00" ], "vda1": [ "fcf6b761-831a-48a7-9f5f-068b5063763f" ] } }, "ansible_devices": { "sr0": { "holders": [], "host": "", "links": { "ids": [ "ata-QEMU_DVD-ROM_QM00001" ], "labels": [ "config-2" ], "masters": [], "uuids": [ "2025-12-08-17-34-40-00" ] }, "model": "QEMU DVD-ROM", "partitions": {}, "removable": "1", "rotational": "1", "sas_address": null, "sas_device_handle": null, "scheduler_mode": "mq-deadline", "sectors": "964", "sectorsize": "2048", "size": "482.00 KB", "support_discard": "2048", "vendor": "QEMU", "virtual": 1 }, "vda": { "holders": [], "host": "", "links": { "ids": [], "labels": [], "masters": [], "uuids": [] }, "model": null, "partitions": { "vda1": { "holders": [], "links": { "ids": [], "labels": [], "masters": [], "uuids": [ "fcf6b761-831a-48a7-9f5f-068b5063763f" ] }, "sectors": "167770079", "sectorsize": 512, "size": "80.00 GB", "start": "2048", "uuid": "fcf6b761-831a-48a7-9f5f-068b5063763f" } }, "removable": "0", "rotational": "1", "sas_address": null, "sas_device_handle": null, "scheduler_mode": "none", "sectors": "167772160", "sectorsize": "512", "size": "80.00 GB", "support_discard": "512", "vendor": "0x1af4", "virtual": 1 } }, "ansible_distribution": "CentOS", "ansible_distribution_file_parsed": true, "ansible_distribution_file_path": "/etc/centos-release", "ansible_distribution_file_variety": "CentOS", "ansible_distribution_major_version": "9", "ansible_distribution_release": "Stream", "ansible_distribution_version": "9", "ansible_dns": { "nameservers": [ "192.168.122.10", "199.204.44.24", "199.204.47.54" ] }, "ansible_domain": "", "ansible_effective_group_id": 1000, "ansible_effective_user_id": 1000, "ansible_env": { "BASH_FUNC_which%%": "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}", "DBUS_SESSION_BUS_ADDRESS": "unix:path=/run/user/1000/bus", "DEBUGINFOD_IMA_CERT_PATH": "/etc/keys/ima:", "DEBUGINFOD_URLS": "https://debuginfod.centos.org/ ", "HOME": "/home/zuul", "LANG": "en_US.UTF-8", "LESSOPEN": "||/usr/bin/lesspipe.sh %s", "LOGNAME": "zuul", "MOTD_SHOWN": "pam", "PATH": "~/.crc/bin:~/.crc/bin/oc:~/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin", "PWD": "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks", "SELINUX_LEVEL_REQUESTED": "", "SELINUX_ROLE_REQUESTED": "", "SELINUX_USE_CURRENT_RANGE": "", "SHELL": "/bin/bash", "SHLVL": "2", "SSH_CLIENT": "38.102.83.114 55104 22", "SSH_CONNECTION": "38.102.83.114 55104 38.102.83.251 22", "USER": "zuul", "XDG_RUNTIME_DIR": "/run/user/1000", "XDG_SESSION_CLASS": "user", "XDG_SESSION_ID": "9", "XDG_SESSION_TYPE": "tty", "_": "/usr/bin/python3", "which_declare": "declare -f" }, "ansible_eth0": { "active": true, "device": "eth0", "features": { "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "generic_receive_offload": "on", "generic_segmentation_offload": "on", "highdma": "on [fixed]", "hsr_dup_offload": "off [fixed]", "hsr_fwd_offload": "off [fixed]", "hsr_tag_ins_offload": "off [fixed]", "hsr_tag_rm_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "l2_fwd_offload": "off [fixed]", "large_receive_offload": "off [fixed]", "loopback": "off [fixed]", "macsec_hw_offload": "off [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "rx_all": "off [fixed]", "rx_checksumming": "on [fixed]", "rx_fcs": "off [fixed]", "rx_gro_hw": "on", "rx_gro_list": "off", "rx_udp_gro_forwarding": "off", "rx_udp_tunnel_port_offload": "off [fixed]", "rx_vlan_filter": "on [fixed]", "rx_vlan_offload": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "scatter_gather": "on", "tcp_segmentation_offload": "on", "tls_hw_record": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "tx_checksumming": "on", "tx_esp_segmentation": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gso_list": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_gso_robust": "on [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_nocache_copy": "off", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_tcp6_segmentation": "on", "tx_tcp_ecn_segmentation": "on", "tx_tcp_mangleid_segmentation": "off", "tx_tcp_segmentation": "on", "tx_tunnel_remcsum_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_vlan_offload": "off [fixed]", "tx_vlan_stag_hw_insert": "off [fixed]", "vlan_challenged": "off [fixed]" }, "hw_timestamp_filters": [], "ipv4": { "address": "38.102.83.251", "broadcast": "38.102.83.255", "netmask": "255.255.255.0", "network": "38.102.83.0", "prefix": "24" }, "ipv6": [ { "address": "fe80::f816:3eff:fe97:c9c3", "prefix": "64", "scope": "link" } ], "macaddress": "fa:16:3e:97:c9:c3", "module": "virtio_net", "mtu": 1500, "pciid": "virtio1", "promisc": false, "speed": -1, "timestamping": [], "type": "ether" }, "ansible_eth1": { "active": true, "device": "eth1", "features": { "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "generic_receive_offload": "on", "generic_segmentation_offload": "on", "highdma": "on [fixed]", "hsr_dup_offload": "off [fixed]", "hsr_fwd_offload": "off [fixed]", "hsr_tag_ins_offload": "off [fixed]", "hsr_tag_rm_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "l2_fwd_offload": "off [fixed]", "large_receive_offload": "off [fixed]", "loopback": "off [fixed]", "macsec_hw_offload": "off [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "rx_all": "off [fixed]", "rx_checksumming": "on [fixed]", "rx_fcs": "off [fixed]", "rx_gro_hw": "on", "rx_gro_list": "off", "rx_udp_gro_forwarding": "off", "rx_udp_tunnel_port_offload": "off [fixed]", "rx_vlan_filter": "on [fixed]", "rx_vlan_offload": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "scatter_gather": "on", "tcp_segmentation_offload": "on", "tls_hw_record": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "tx_checksumming": "on", "tx_esp_segmentation": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gso_list": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_gso_robust": "on [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_nocache_copy": "off", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_tcp6_segmentation": "on", "tx_tcp_ecn_segmentation": "on", "tx_tcp_mangleid_segmentation": "off", "tx_tcp_segmentation": "on", "tx_tunnel_remcsum_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_vlan_offload": "off [fixed]", "tx_vlan_stag_hw_insert": "off [fixed]", "vlan_challenged": "off [fixed]" }, "hw_timestamp_filters": [], "ipv4": { "address": "192.168.122.11", "broadcast": "192.168.122.255", "netmask": "255.255.255.0", "network": "192.168.122.0", "prefix": "24" }, "macaddress": "fa:16:3e:6a:de:3b", "module": "virtio_net", "mtu": 1500, "pciid": "virtio5", "promisc": false, "speed": -1, "timestamping": [], "type": "ether" }, "ansible_fibre_channel_wwn": [], "ansible_fips": false, "ansible_form_factor": "Other", "ansible_fqdn": "controller", "ansible_hostname": "controller", "ansible_hostnqn": "nqn.2014-08.org.nvmexpress:uuid:bf3e0a14-a5f8-4123-aa26-e7cad37b879a", "ansible_interfaces": [ "lo", "eth1", "eth0" ], "ansible_is_chroot": false, "ansible_iscsi_iqn": "", "ansible_kernel": "5.14.0-645.el9.x86_64", "ansible_kernel_version": "#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025", "ansible_lo": { "active": true, "device": "lo", "features": { "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "generic_receive_offload": "on", "generic_segmentation_offload": "on", "highdma": "on [fixed]", "hsr_dup_offload": "off [fixed]", "hsr_fwd_offload": "off [fixed]", "hsr_tag_ins_offload": "off [fixed]", "hsr_tag_rm_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "l2_fwd_offload": "off [fixed]", "large_receive_offload": "off [fixed]", "loopback": "on [fixed]", "macsec_hw_offload": "off [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "rx_all": "off [fixed]", "rx_checksumming": "on [fixed]", "rx_fcs": "off [fixed]", "rx_gro_hw": "off [fixed]", "rx_gro_list": "off", "rx_udp_gro_forwarding": "off", "rx_udp_tunnel_port_offload": "off [fixed]", "rx_vlan_filter": "off [fixed]", "rx_vlan_offload": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "scatter_gather": "on", "tcp_segmentation_offload": "on", "tls_hw_record": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_ip_generic": "on [fixed]", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_sctp": "on [fixed]", "tx_checksumming": "on", "tx_esp_segmentation": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gso_list": "on", "tx_gso_partial": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_nocache_copy": "off [fixed]", "tx_scatter_gather": "on [fixed]", "tx_scatter_gather_fraglist": "on [fixed]", "tx_sctp_segmentation": "on", "tx_tcp6_segmentation": "on", "tx_tcp_ecn_segmentation": "on", "tx_tcp_mangleid_segmentation": "on", "tx_tcp_segmentation": "on", "tx_tunnel_remcsum_segmentation": "off [fixed]", "tx_udp_segmentation": "on", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_vlan_offload": "off [fixed]", "tx_vlan_stag_hw_insert": "off [fixed]", "vlan_challenged": "on [fixed]" }, "hw_timestamp_filters": [], "ipv4": { "address": "127.0.0.1", "broadcast": "", "netmask": "255.0.0.0", "network": "127.0.0.0", "prefix": "8" }, "ipv6": [ { "address": "::1", "prefix": "128", "scope": "host" } ], "mtu": 65536, "promisc": false, "timestamping": [], "type": "loopback" }, "ansible_loadavg": { "15m": 0.53, "1m": 1.45, "5m": 0.89 }, "ansible_local": {}, "ansible_locally_reachable_ips": { "ipv4": [ "38.102.83.251", "127.0.0.0/8", "127.0.0.1", "192.168.122.11" ], "ipv6": [ "::1", "fe80::f816:3eff:fe97:c9c3" ] }, "ansible_lsb": {}, "ansible_lvm": "N/A", "ansible_machine": "x86_64", "ansible_machine_id": "4d4ef2323cc3337bbfd9081b2a323b4e", "ansible_memfree_mb": 5351, "ansible_memory_mb": { "nocache": { "free": 6879, "used": 801 }, "real": { "free": 5351, "total": 7680, "used": 2329 }, "swap": { "cached": 0, "free": 0, "total": 0, "used": 0 } }, "ansible_memtotal_mb": 7680, "ansible_mounts": [ { "block_available": 19931716, "block_size": 4096, "block_total": 20954875, "block_used": 1023159, "device": "/dev/vda1", "fstype": "xfs", "inode_available": 41790146, "inode_total": 41942512, "inode_used": 152366, "mount": "/", "options": "rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota", "size_available": 81640308736, "size_total": 85831168000, "uuid": "fcf6b761-831a-48a7-9f5f-068b5063763f" } ], "ansible_nodename": "controller", "ansible_os_family": "RedHat", "ansible_pkg_mgr": "dnf", "ansible_proc_cmdline": { "BOOT_IMAGE": "(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64", "console": "ttyS0,115200n8", "crashkernel": "1G-2G:192M,2G-64G:256M,64G-:512M", "net.ifnames": "0", "no_timer_check": true, "ro": true, "root": "UUID=fcf6b761-831a-48a7-9f5f-068b5063763f" }, "ansible_processor": [ "0", "AuthenticAMD", "AMD EPYC-Rome Processor", "1", "AuthenticAMD", "AMD EPYC-Rome Processor", "2", "AuthenticAMD", "AMD EPYC-Rome Processor", "3", "AuthenticAMD", "AMD EPYC-Rome Processor", "4", "AuthenticAMD", "AMD EPYC-Rome Processor", "5", "AuthenticAMD", "AMD EPYC-Rome Processor", "6", "AuthenticAMD", "AMD EPYC-Rome Processor", "7", "AuthenticAMD", "AMD EPYC-Rome Processor" ], "ansible_processor_cores": 1, "ansible_processor_count": 8, "ansible_processor_nproc": 8, "ansible_processor_threads_per_core": 1, "ansible_processor_vcpus": 8, "ansible_product_name": "OpenStack Nova", "ansible_product_serial": "NA", "ansible_product_uuid": "NA", "ansible_product_version": "26.3.1", "ansible_python": { "executable": "/usr/bin/python3", "has_sslcontext": true, "type": "cpython", "version": { "major": 3, "micro": 25, "minor": 9, "releaselevel": "final", "serial": 0 }, "version_info": [ 3, 9, 25, "final", 0 ] }, "ansible_python_version": "3.9.25", "ansible_real_group_id": 1000, "ansible_real_user_id": 1000, "ansible_selinux": { "config_mode": "enforcing", "mode": "enforcing", "policyvers": 33, "status": "enabled", "type": "targeted" }, "ansible_selinux_python_present": true, "ansible_service_mgr": "systemd", "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOvQreKGmvEG1vi8GvwFBqECdihQVE6tUBzDanz/Lcee9GvGa+tH+Ub+xqX7rB/yRnjc8CJIJovHO3uwatRboZQ=", "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256", "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIPeGQ/QINrFqQK52g8hKIwxs8VQj2W/JGaf9zdH9cBm2", "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519", "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQCrMWhS0sfa8MFM6z46N9s5KdkDDfqBTBpmkqh+k0riEuOWyruqZ/IooEFKeQXaWr/u2j++Erw7byk1nZ2/1inxp5GHPD3tjMz1FjfMlsMM17kkMF8J45E52gQj2JzJS93rFYtLMkLQt6ydCYf8csUaQJz4YGv66NoK1WXUFkxSW12stZQyIjr7FHdmQ9o1VG6PeVlvovTjZdIDOrs2uyx3QLKn/3ZvZBR0nNCGXPAtVoyf4oV/JWSKdX0XOcgkV4QyD4B3CiLstDl04Q6XY8pkzc850JzuMo4L6IQoiI//65VAvU9EWiduDcC6Bb2UqYy5iwuJFLa6Qei0hCq5tk00PSx9JjT+rVhoTJveLD0GlQk2blm+bCOKdHDM87Eh/CiVxhUJhsbkp7ASUwcd1In/Ayr37VyWSHlbW7SDd9G5aQvRd7mOx6JYU5j+j8dmvku5+mmMisaik3SYrgImXY/Agd7BOsZD1BfRvPcqACsgYymCPzDxVVOGYD3Tt5poSUs=", "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa", "ansible_swapfree_mb": 0, "ansible_swaptotal_mb": 0, "ansible_system": "Linux", "ansible_system_capabilities": [ "" ], "ansible_system_capabilities_enforced": "True", "ansible_system_vendor": "OpenStack Foundation", "ansible_uptime_seconds": 1038, "ansible_user_dir": "/home/zuul", "ansible_user_gecos": "", "ansible_user_gid": 1000, "ansible_user_id": "zuul", "ansible_user_shell": "/bin/bash", "ansible_user_uid": 1000, "ansible_userspace_architecture": "x86_64", "ansible_userspace_bits": "64", "ansible_virtualization_role": "guest", "ansible_virtualization_tech_guest": [ "openstack" ], "ansible_virtualization_tech_host": [ "kvm" ], "ansible_virtualization_type": "openstack", "cifmw_discovered_hash": "75a77d833a2ec47d10d7dfd7dba41dfe3c1be7e7e14ae5ab42563f9fb0e5e2d8", "cifmw_discovered_hash_algorithm": "sha256", "cifmw_discovered_image_name": "CentOS-Stream-GenericCloud-x86_64-9-latest.x86_64.qcow2", "cifmw_discovered_image_url": "https://cloud.centos.org/centos/9-stream/x86_64/images//CentOS-Stream-GenericCloud-x86_64-9-latest.x86_64.qcow2", "cifmw_install_yamls_defaults": { "ADOPTED_EXTERNAL_NETWORK": "172.21.1.0/24", "ADOPTED_INTERNALAPI_NETWORK": "172.17.1.0/24", "ADOPTED_STORAGEMGMT_NETWORK": "172.20.1.0/24", "ADOPTED_STORAGE_NETWORK": "172.18.1.0/24", "ADOPTED_TENANT_NETWORK": "172.9.1.0/24", "ANSIBLEEE": "config/samples/_v1beta1_ansibleee.yaml", "ANSIBLEEE_BRANCH": "main", "ANSIBLEEE_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/config/samples/_v1beta1_ansibleee.yaml", "ANSIBLEEE_IMG": "quay.io/openstack-k8s-operators/openstack-ansibleee-operator-index:latest", "ANSIBLEEE_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/kuttl-test.yaml", "ANSIBLEEE_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-ansibleee-operator/test/kuttl/tests", "ANSIBLEEE_KUTTL_NAMESPACE": "ansibleee-kuttl-tests", "ANSIBLEEE_REPO": "https://github.com/openstack-k8s-operators/openstack-ansibleee-operator", "ANSIBLEE_COMMIT_HASH": "", "BARBICAN": "config/samples/barbican_v1beta1_barbican.yaml", "BARBICAN_BRANCH": "main", "BARBICAN_COMMIT_HASH": "", "BARBICAN_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml", "BARBICAN_DEPL_IMG": "unused", "BARBICAN_IMG": "quay.io/openstack-k8s-operators/barbican-operator-index:latest", "BARBICAN_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/kuttl-test.yaml", "BARBICAN_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/test/kuttl/tests", "BARBICAN_KUTTL_NAMESPACE": "barbican-kuttl-tests", "BARBICAN_REPO": "https://github.com/openstack-k8s-operators/barbican-operator.git", "BARBICAN_SERVICE_ENABLED": "true", "BARBICAN_SIMPLE_CRYPTO_ENCRYPTION_KEY": "sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU=", "BAREMETAL_BRANCH": "main", "BAREMETAL_COMMIT_HASH": "", "BAREMETAL_IMG": "quay.io/openstack-k8s-operators/openstack-baremetal-operator-index:latest", "BAREMETAL_OS_CONTAINER_IMG": "", "BAREMETAL_OS_IMG": "", "BAREMETAL_REPO": "https://github.com/openstack-k8s-operators/openstack-baremetal-operator.git", "BAREMETAL_TIMEOUT": "20m", "BASH_IMG": "quay.io/openstack-k8s-operators/bash:latest", "BGP_ASN": "64999", "BGP_LEAF_1": "100.65.4.1", "BGP_LEAF_2": "100.64.4.1", "BGP_OVN_ROUTING": "false", "BGP_PEER_ASN": "64999", "BGP_SOURCE_IP": "172.30.4.2", "BGP_SOURCE_IP6": "f00d:f00d:f00d:f00d:f00d:f00d:f00d:42", "BMAAS_BRIDGE_IPV4_PREFIX": "172.20.1.2/24", "BMAAS_BRIDGE_IPV6_PREFIX": "fd00:bbbb::2/64", "BMAAS_INSTANCE_DISK_SIZE": "20", "BMAAS_INSTANCE_MEMORY": "4096", "BMAAS_INSTANCE_NAME_PREFIX": "crc-bmaas", "BMAAS_INSTANCE_NET_MODEL": "virtio", "BMAAS_INSTANCE_OS_VARIANT": "centos-stream9", "BMAAS_INSTANCE_VCPUS": "2", "BMAAS_INSTANCE_VIRT_TYPE": "kvm", "BMAAS_IPV4": "true", "BMAAS_IPV6": "false", "BMAAS_LIBVIRT_USER": "sushyemu", "BMAAS_METALLB_ADDRESS_POOL": "172.20.1.64/26", "BMAAS_METALLB_POOL_NAME": "baremetal", "BMAAS_NETWORK_IPV4_PREFIX": "172.20.1.1/24", "BMAAS_NETWORK_IPV6_PREFIX": "fd00:bbbb::1/64", "BMAAS_NETWORK_NAME": "crc-bmaas", "BMAAS_NODE_COUNT": "1", "BMAAS_OCP_INSTANCE_NAME": "crc", "BMAAS_REDFISH_PASSWORD": "password", "BMAAS_REDFISH_USERNAME": "admin", "BMAAS_ROUTE_LIBVIRT_NETWORKS": "crc-bmaas,crc,default", "BMAAS_SUSHY_EMULATOR_DRIVER": "libvirt", "BMAAS_SUSHY_EMULATOR_IMAGE": "quay.io/metal3-io/sushy-tools:latest", "BMAAS_SUSHY_EMULATOR_NAMESPACE": "sushy-emulator", "BMAAS_SUSHY_EMULATOR_OS_CLIENT_CONFIG_FILE": "/etc/openstack/clouds.yaml", "BMAAS_SUSHY_EMULATOR_OS_CLOUD": "openstack", "BMH_NAMESPACE": "openstack", "BMO_BRANCH": "release-0.9", "BMO_CLEANUP": "true", "BMO_COMMIT_HASH": "", "BMO_IPA_BRANCH": "stable/2024.1", "BMO_IRONIC_HOST": "192.168.122.10", "BMO_PROVISIONING_INTERFACE": "", "BMO_REPO": "https://github.com/metal3-io/baremetal-operator", "BMO_SETUP": "", "BMO_SETUP_ROUTE_REPLACE": "true", "BM_CTLPLANE_INTERFACE": "enp1s0", "BM_INSTANCE_MEMORY": "8192", "BM_INSTANCE_NAME_PREFIX": "edpm-compute-baremetal", "BM_INSTANCE_NAME_SUFFIX": "0", "BM_NETWORK_NAME": "default", "BM_NODE_COUNT": "1", "BM_ROOT_PASSWORD": "", "BM_ROOT_PASSWORD_SECRET": "", "CEILOMETER_CENTRAL_DEPL_IMG": "unused", "CEILOMETER_NOTIFICATION_DEPL_IMG": "unused", "CEPH_BRANCH": "release-1.15", "CEPH_CLIENT": "/home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/toolbox.yaml", "CEPH_COMMON": "/home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/common.yaml", "CEPH_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/cluster-test.yaml", "CEPH_CRDS": "/home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/crds.yaml", "CEPH_IMG": "quay.io/ceph/demo:latest-squid", "CEPH_OP": "/home/zuul/ci-framework-data/artifacts/manifests/operator/rook/deploy/examples/operator-openshift.yaml", "CEPH_REPO": "https://github.com/rook/rook.git", "CERTMANAGER_TIMEOUT": "300s", "CHECKOUT_FROM_OPENSTACK_REF": "true", "CINDER": "config/samples/cinder_v1beta1_cinder.yaml", "CINDERAPI_DEPL_IMG": "unused", "CINDERBKP_DEPL_IMG": "unused", "CINDERSCH_DEPL_IMG": "unused", "CINDERVOL_DEPL_IMG": "unused", "CINDER_BRANCH": "main", "CINDER_COMMIT_HASH": "", "CINDER_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/config/samples/cinder_v1beta1_cinder.yaml", "CINDER_IMG": "quay.io/openstack-k8s-operators/cinder-operator-index:latest", "CINDER_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/kuttl-test.yaml", "CINDER_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator/test/kuttl/tests", "CINDER_KUTTL_NAMESPACE": "cinder-kuttl-tests", "CINDER_REPO": "https://github.com/openstack-k8s-operators/cinder-operator.git", "CLEANUP_DIR_CMD": "rm -Rf", "CRC_BGP_NIC_1_MAC": "52:54:00:11:11:11", "CRC_BGP_NIC_2_MAC": "52:54:00:11:11:12", "CRC_HTTPS_PROXY": "", "CRC_HTTP_PROXY": "", "CRC_STORAGE_NAMESPACE": "crc-storage", "CRC_STORAGE_RETRIES": "3", "CRC_URL": "'https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz'", "CRC_VERSION": "latest", "DATAPLANE_ANSIBLE_SECRET": "dataplane-ansible-ssh-private-key-secret", "DATAPLANE_ANSIBLE_USER": "", "DATAPLANE_COMPUTE_IP": "192.168.122.100", "DATAPLANE_CONTAINER_PREFIX": "openstack", "DATAPLANE_CONTAINER_TAG": "current-podified", "DATAPLANE_CUSTOM_SERVICE_RUNNER_IMG": "quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest", "DATAPLANE_DEFAULT_GW": "192.168.122.1", "DATAPLANE_EXTRA_NOVA_CONFIG_FILE": "/dev/null", "DATAPLANE_GROWVOLS_ARGS": "/=8GB /tmp=1GB /home=1GB /var=100%", "DATAPLANE_KUSTOMIZE_SCENARIO": "preprovisioned", "DATAPLANE_NETWORKER_IP": "192.168.122.200", "DATAPLANE_NETWORK_INTERFACE_NAME": "eth0", "DATAPLANE_NOVA_NFS_PATH": "", "DATAPLANE_NTP_SERVER": "pool.ntp.org", "DATAPLANE_PLAYBOOK": "osp.edpm.download_cache", "DATAPLANE_REGISTRY_URL": "quay.io/podified-antelope-centos9", "DATAPLANE_RUNNER_IMG": "", "DATAPLANE_SERVER_ROLE": "compute", "DATAPLANE_SSHD_ALLOWED_RANGES": "['192.168.122.0/24']", "DATAPLANE_TIMEOUT": "30m", "DATAPLANE_TLS_ENABLED": "true", "DATAPLANE_TOTAL_NETWORKER_NODES": "1", "DATAPLANE_TOTAL_NODES": "1", "DBSERVICE": "galera", "DESIGNATE": "config/samples/designate_v1beta1_designate.yaml", "DESIGNATE_BRANCH": "main", "DESIGNATE_COMMIT_HASH": "", "DESIGNATE_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/config/samples/designate_v1beta1_designate.yaml", "DESIGNATE_IMG": "quay.io/openstack-k8s-operators/designate-operator-index:latest", "DESIGNATE_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/kuttl-test.yaml", "DESIGNATE_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator/test/kuttl/tests", "DESIGNATE_KUTTL_NAMESPACE": "designate-kuttl-tests", "DESIGNATE_REPO": "https://github.com/openstack-k8s-operators/designate-operator.git", "DNSDATA": "config/samples/network_v1beta1_dnsdata.yaml", "DNSDATA_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsdata.yaml", "DNSMASQ": "config/samples/network_v1beta1_dnsmasq.yaml", "DNSMASQ_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_dnsmasq.yaml", "DNS_DEPL_IMG": "unused", "DNS_DOMAIN": "localdomain", "DOWNLOAD_TOOLS_SELECTION": "all", "EDPM_ATTACH_EXTNET": "true", "EDPM_COMPUTE_ADDITIONAL_HOST_ROUTES": "'[]'", "EDPM_COMPUTE_ADDITIONAL_NETWORKS": "'[]'", "EDPM_COMPUTE_CELLS": "1", "EDPM_COMPUTE_CEPH_ENABLED": "true", "EDPM_COMPUTE_CEPH_NOVA": "true", "EDPM_COMPUTE_DHCP_AGENT_ENABLED": "true", "EDPM_COMPUTE_SRIOV_ENABLED": "true", "EDPM_COMPUTE_SUFFIX": "0", "EDPM_CONFIGURE_DEFAULT_ROUTE": "true", "EDPM_CONFIGURE_HUGEPAGES": "false", "EDPM_CONFIGURE_NETWORKING": "true", "EDPM_FIRSTBOOT_EXTRA": "/tmp/edpm-firstboot-extra", "EDPM_NETWORKER_SUFFIX": "0", "EDPM_TOTAL_NETWORKERS": "1", "EDPM_TOTAL_NODES": "1", "GALERA_REPLICAS": "", "GENERATE_SSH_KEYS": "true", "GIT_CLONE_OPTS": "", "GLANCE": "config/samples/glance_v1beta1_glance.yaml", "GLANCEAPI_DEPL_IMG": "unused", "GLANCE_BRANCH": "main", "GLANCE_COMMIT_HASH": "", "GLANCE_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/config/samples/glance_v1beta1_glance.yaml", "GLANCE_IMG": "quay.io/openstack-k8s-operators/glance-operator-index:latest", "GLANCE_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/kuttl-test.yaml", "GLANCE_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator/test/kuttl/tests", "GLANCE_KUTTL_NAMESPACE": "glance-kuttl-tests", "GLANCE_REPO": "https://github.com/openstack-k8s-operators/glance-operator.git", "HEAT": "config/samples/heat_v1beta1_heat.yaml", "HEATAPI_DEPL_IMG": "unused", "HEATCFNAPI_DEPL_IMG": "unused", "HEATENGINE_DEPL_IMG": "unused", "HEAT_AUTH_ENCRYPTION_KEY": "767c3ed056cbaa3b9dfedb8c6f825bf0", "HEAT_BRANCH": "main", "HEAT_COMMIT_HASH": "", "HEAT_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/config/samples/heat_v1beta1_heat.yaml", "HEAT_IMG": "quay.io/openstack-k8s-operators/heat-operator-index:latest", "HEAT_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/kuttl-test.yaml", "HEAT_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator/test/kuttl/tests", "HEAT_KUTTL_NAMESPACE": "heat-kuttl-tests", "HEAT_REPO": "https://github.com/openstack-k8s-operators/heat-operator.git", "HEAT_SERVICE_ENABLED": "true", "HORIZON": "config/samples/horizon_v1beta1_horizon.yaml", "HORIZON_BRANCH": "main", "HORIZON_COMMIT_HASH": "", "HORIZON_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/config/samples/horizon_v1beta1_horizon.yaml", "HORIZON_DEPL_IMG": "unused", "HORIZON_IMG": "quay.io/openstack-k8s-operators/horizon-operator-index:latest", "HORIZON_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/kuttl-test.yaml", "HORIZON_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator/test/kuttl/tests", "HORIZON_KUTTL_NAMESPACE": "horizon-kuttl-tests", "HORIZON_REPO": "https://github.com/openstack-k8s-operators/horizon-operator.git", "INFRA_BRANCH": "main", "INFRA_COMMIT_HASH": "", "INFRA_IMG": "quay.io/openstack-k8s-operators/infra-operator-index:latest", "INFRA_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/kuttl-test.yaml", "INFRA_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/test/kuttl/tests", "INFRA_KUTTL_NAMESPACE": "infra-kuttl-tests", "INFRA_REPO": "https://github.com/openstack-k8s-operators/infra-operator.git", "INSTALL_CERT_MANAGER": "true", "INSTALL_NMSTATE": "true || false", "INSTALL_NNCP": "true || false", "INTERNALAPI_HOST_ROUTES": "", "IPV6_LAB_IPV4_NETWORK_IPADDRESS": "172.30.0.1/24", "IPV6_LAB_IPV6_NETWORK_IPADDRESS": "fd00:abcd:abcd:fc00::1/64", "IPV6_LAB_LIBVIRT_STORAGE_POOL": "default", "IPV6_LAB_MANAGE_FIREWALLD": "true", "IPV6_LAB_NAT64_HOST_IPV4": "172.30.0.2/24", "IPV6_LAB_NAT64_HOST_IPV6": "fd00:abcd:abcd:fc00::2/64", "IPV6_LAB_NAT64_INSTANCE_NAME": "nat64-router", "IPV6_LAB_NAT64_IPV6_NETWORK": "fd00:abcd:abcd:fc00::/64", "IPV6_LAB_NAT64_TAYGA_DYNAMIC_POOL": "192.168.255.0/24", "IPV6_LAB_NAT64_TAYGA_IPV4": "192.168.255.1", "IPV6_LAB_NAT64_TAYGA_IPV6": "fd00:abcd:abcd:fc00::3", "IPV6_LAB_NAT64_TAYGA_IPV6_PREFIX": "fd00:abcd:abcd:fcff::/96", "IPV6_LAB_NAT64_UPDATE_PACKAGES": "false", "IPV6_LAB_NETWORK_NAME": "nat64", "IPV6_LAB_SNO_CLUSTER_NETWORK": "fd00:abcd:0::/48", "IPV6_LAB_SNO_HOST_IP": "fd00:abcd:abcd:fc00::11", "IPV6_LAB_SNO_HOST_PREFIX": "64", "IPV6_LAB_SNO_INSTANCE_NAME": "sno", "IPV6_LAB_SNO_MACHINE_NETWORK": "fd00:abcd:abcd:fc00::/64", "IPV6_LAB_SNO_OCP_MIRROR_URL": "https://mirror.openshift.com/pub/openshift-v4/clients/ocp", "IPV6_LAB_SNO_OCP_VERSION": "latest-4.14", "IPV6_LAB_SNO_SERVICE_NETWORK": "fd00:abcd:abcd:fc03::/112", "IPV6_LAB_SSH_PUB_KEY": "/home/zuul/.ssh/id_rsa.pub", "IPV6_LAB_WORK_DIR": "/home/zuul/.ipv6lab", "IRONIC": "config/samples/ironic_v1beta1_ironic.yaml", "IRONICAPI_DEPL_IMG": "unused", "IRONICCON_DEPL_IMG": "unused", "IRONICINS_DEPL_IMG": "unused", "IRONICNAG_DEPL_IMG": "unused", "IRONICPXE_DEPL_IMG": "unused", "IRONIC_BRANCH": "main", "IRONIC_COMMIT_HASH": "", "IRONIC_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/config/samples/ironic_v1beta1_ironic.yaml", "IRONIC_IMAGE": "quay.io/metal3-io/ironic", "IRONIC_IMAGE_TAG": "release-24.1", "IRONIC_IMG": "quay.io/openstack-k8s-operators/ironic-operator-index:latest", "IRONIC_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/kuttl-test.yaml", "IRONIC_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator/test/kuttl/tests", "IRONIC_KUTTL_NAMESPACE": "ironic-kuttl-tests", "IRONIC_REPO": "https://github.com/openstack-k8s-operators/ironic-operator.git", "KEYSTONEAPI": "config/samples/keystone_v1beta1_keystoneapi.yaml", "KEYSTONEAPI_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml", "KEYSTONEAPI_DEPL_IMG": "unused", "KEYSTONE_BRANCH": "main", "KEYSTONE_COMMIT_HASH": "", "KEYSTONE_FEDERATION_CLIENT_SECRET": "COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f", "KEYSTONE_FEDERATION_CRYPTO_PASSPHRASE": "openstack", "KEYSTONE_IMG": "quay.io/openstack-k8s-operators/keystone-operator-index:latest", "KEYSTONE_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/kuttl-test.yaml", "KEYSTONE_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/test/kuttl/tests", "KEYSTONE_KUTTL_NAMESPACE": "keystone-kuttl-tests", "KEYSTONE_REPO": "https://github.com/openstack-k8s-operators/keystone-operator.git", "KUBEADMIN_PWD": "12345678", "LIBVIRT_SECRET": "libvirt-secret", "LOKI_DEPLOY_MODE": "openshift-network", "LOKI_DEPLOY_NAMESPACE": "netobserv", "LOKI_DEPLOY_SIZE": "1x.demo", "LOKI_NAMESPACE": "openshift-operators-redhat", "LOKI_OPERATOR_GROUP": "openshift-operators-redhat-loki", "LOKI_SUBSCRIPTION": "loki-operator", "LVMS_CR": "1", "MANILA": "config/samples/manila_v1beta1_manila.yaml", "MANILAAPI_DEPL_IMG": "unused", "MANILASCH_DEPL_IMG": "unused", "MANILASHARE_DEPL_IMG": "unused", "MANILA_BRANCH": "main", "MANILA_COMMIT_HASH": "", "MANILA_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/config/samples/manila_v1beta1_manila.yaml", "MANILA_IMG": "quay.io/openstack-k8s-operators/manila-operator-index:latest", "MANILA_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/kuttl-test.yaml", "MANILA_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator/test/kuttl/tests", "MANILA_KUTTL_NAMESPACE": "manila-kuttl-tests", "MANILA_REPO": "https://github.com/openstack-k8s-operators/manila-operator.git", "MANILA_SERVICE_ENABLED": "true", "MARIADB": "config/samples/mariadb_v1beta1_galera.yaml", "MARIADB_BRANCH": "main", "MARIADB_CHAINSAW_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/config.yaml", "MARIADB_CHAINSAW_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/chainsaw/tests", "MARIADB_CHAINSAW_NAMESPACE": "mariadb-chainsaw-tests", "MARIADB_COMMIT_HASH": "", "MARIADB_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml", "MARIADB_DEPL_IMG": "unused", "MARIADB_IMG": "quay.io/openstack-k8s-operators/mariadb-operator-index:latest", "MARIADB_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/kuttl-test.yaml", "MARIADB_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/test/kuttl/tests", "MARIADB_KUTTL_NAMESPACE": "mariadb-kuttl-tests", "MARIADB_REPO": "https://github.com/openstack-k8s-operators/mariadb-operator.git", "MEMCACHED": "config/samples/memcached_v1beta1_memcached.yaml", "MEMCACHED_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml", "MEMCACHED_DEPL_IMG": "unused", "METADATA_SHARED_SECRET": "1234567842", "METALLB_IPV6_POOL": "fd00:aaaa::80-fd00:aaaa::90", "METALLB_POOL": "192.168.122.80-192.168.122.90", "MICROSHIFT": "0", "NAMESPACE": "openstack", "NETCONFIG": "config/samples/network_v1beta1_netconfig.yaml", "NETCONFIG_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/network_v1beta1_netconfig.yaml", "NETCONFIG_DEPL_IMG": "unused", "NETOBSERV_DEPLOY_NAMESPACE": "netobserv", "NETOBSERV_NAMESPACE": "openshift-netobserv-operator", "NETOBSERV_OPERATOR_GROUP": "openshift-netobserv-operator-net", "NETOBSERV_SUBSCRIPTION": "netobserv-operator", "NETWORK_BGP": "false", "NETWORK_DESIGNATE_ADDRESS_PREFIX": "172.28.0", "NETWORK_DESIGNATE_EXT_ADDRESS_PREFIX": "172.50.0", "NETWORK_INTERNALAPI_ADDRESS_PREFIX": "172.17.0", "NETWORK_ISOLATION": "true", "NETWORK_ISOLATION_INSTANCE_NAME": "crc", "NETWORK_ISOLATION_IPV4": "true", "NETWORK_ISOLATION_IPV4_ADDRESS": "172.16.1.1/24", "NETWORK_ISOLATION_IPV4_NAT": "true", "NETWORK_ISOLATION_IPV6": "false", "NETWORK_ISOLATION_IPV6_ADDRESS": "fd00:aaaa::1/64", "NETWORK_ISOLATION_IP_ADDRESS": "192.168.122.10", "NETWORK_ISOLATION_MAC": "52:54:00:11:11:10", "NETWORK_ISOLATION_NETWORK_NAME": "net-iso", "NETWORK_ISOLATION_NET_NAME": "default", "NETWORK_ISOLATION_USE_DEFAULT_NETWORK": "true", "NETWORK_MTU": "1500", "NETWORK_STORAGEMGMT_ADDRESS_PREFIX": "172.20.0", "NETWORK_STORAGE_ADDRESS_PREFIX": "172.18.0", "NETWORK_STORAGE_MACVLAN": "", "NETWORK_TENANT_ADDRESS_PREFIX": "172.19.0", "NETWORK_VLAN_START": "20", "NETWORK_VLAN_STEP": "1", "NEUTRONAPI": "config/samples/neutron_v1beta1_neutronapi.yaml", "NEUTRONAPI_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/config/samples/neutron_v1beta1_neutronapi.yaml", "NEUTRONAPI_DEPL_IMG": "unused", "NEUTRON_BRANCH": "main", "NEUTRON_COMMIT_HASH": "", "NEUTRON_IMG": "quay.io/openstack-k8s-operators/neutron-operator-index:latest", "NEUTRON_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/kuttl-test.yaml", "NEUTRON_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator/test/kuttl/tests", "NEUTRON_KUTTL_NAMESPACE": "neutron-kuttl-tests", "NEUTRON_REPO": "https://github.com/openstack-k8s-operators/neutron-operator.git", "NFS_HOME": "/home/nfs", "NMSTATE_NAMESPACE": "openshift-nmstate", "NMSTATE_OPERATOR_GROUP": "openshift-nmstate-tn6k8", "NMSTATE_SUBSCRIPTION": "kubernetes-nmstate-operator", "NNCP_ADDITIONAL_HOST_ROUTES": "", "NNCP_BGP_1_INTERFACE": "enp7s0", "NNCP_BGP_1_IP_ADDRESS": "100.65.4.2", "NNCP_BGP_2_INTERFACE": "enp8s0", "NNCP_BGP_2_IP_ADDRESS": "100.64.4.2", "NNCP_BRIDGE": "ospbr", "NNCP_CLEANUP_TIMEOUT": "120s", "NNCP_CTLPLANE_IPV6_ADDRESS_PREFIX": "fd00:aaaa::", "NNCP_CTLPLANE_IPV6_ADDRESS_SUFFIX": "10", "NNCP_CTLPLANE_IP_ADDRESS_PREFIX": "192.168.122", "NNCP_CTLPLANE_IP_ADDRESS_SUFFIX": "10", "NNCP_DNS_SERVER": "192.168.122.1", "NNCP_DNS_SERVER_IPV6": "fd00:aaaa::1", "NNCP_GATEWAY": "192.168.122.1", "NNCP_GATEWAY_IPV6": "fd00:aaaa::1", "NNCP_INTERFACE": "enp6s0", "NNCP_NODES": "", "NNCP_TIMEOUT": "240s", "NOVA": "config/samples/nova_v1beta1_nova_collapsed_cell.yaml", "NOVA_BRANCH": "main", "NOVA_COMMIT_HASH": "", "NOVA_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator/config/samples/nova_v1beta1_nova_collapsed_cell.yaml", "NOVA_IMG": "quay.io/openstack-k8s-operators/nova-operator-index:latest", "NOVA_REPO": "https://github.com/openstack-k8s-operators/nova-operator.git", "NUMBER_OF_INSTANCES": "1", "OCP_NETWORK_NAME": "crc", "OCTAVIA": "config/samples/octavia_v1beta1_octavia.yaml", "OCTAVIA_BRANCH": "main", "OCTAVIA_COMMIT_HASH": "", "OCTAVIA_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/config/samples/octavia_v1beta1_octavia.yaml", "OCTAVIA_IMG": "quay.io/openstack-k8s-operators/octavia-operator-index:latest", "OCTAVIA_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/kuttl-test.yaml", "OCTAVIA_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator/test/kuttl/tests", "OCTAVIA_KUTTL_NAMESPACE": "octavia-kuttl-tests", "OCTAVIA_REPO": "https://github.com/openstack-k8s-operators/octavia-operator.git", "OKD": "false", "OPENSTACK_BRANCH": "main", "OPENSTACK_BUNDLE_IMG": "quay.io/openstack-k8s-operators/openstack-operator-bundle:latest", "OPENSTACK_COMMIT_HASH": "", "OPENSTACK_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml", "OPENSTACK_CRDS_DIR": "openstack_crds", "OPENSTACK_CTLPLANE": "config/samples/core_v1beta1_openstackcontrolplane_galera_network_isolation.yaml", "OPENSTACK_IMG": "quay.io/openstack-k8s-operators/openstack-operator-index:latest", "OPENSTACK_K8S_BRANCH": "main", "OPENSTACK_K8S_TAG": "latest", "OPENSTACK_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/kuttl-test.yaml", "OPENSTACK_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/openstack-operator/test/kuttl/tests", "OPENSTACK_KUTTL_NAMESPACE": "openstack-kuttl-tests", "OPENSTACK_NEUTRON_CUSTOM_CONF": "", "OPENSTACK_REPO": "https://github.com/openstack-k8s-operators/openstack-operator.git", "OPENSTACK_STORAGE_BUNDLE_IMG": "quay.io/openstack-k8s-operators/openstack-operator-storage-bundle:latest", "OPERATOR_BASE_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator", "OPERATOR_CHANNEL": "", "OPERATOR_NAMESPACE": "openstack-operators", "OPERATOR_SOURCE": "", "OPERATOR_SOURCE_NAMESPACE": "", "OUT": "/home/zuul/ci-framework-data/artifacts/manifests", "OUTPUT_DIR": "/home/zuul/ci-framework-data/artifacts/edpm", "OVNCONTROLLER": "config/samples/ovn_v1beta1_ovncontroller.yaml", "OVNCONTROLLER_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovncontroller.yaml", "OVNCONTROLLER_NMAP": "true", "OVNDBS": "config/samples/ovn_v1beta1_ovndbcluster.yaml", "OVNDBS_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovndbcluster.yaml", "OVNNORTHD": "config/samples/ovn_v1beta1_ovnnorthd.yaml", "OVNNORTHD_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/config/samples/ovn_v1beta1_ovnnorthd.yaml", "OVN_BRANCH": "main", "OVN_COMMIT_HASH": "", "OVN_IMG": "quay.io/openstack-k8s-operators/ovn-operator-index:latest", "OVN_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/kuttl-test.yaml", "OVN_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator/test/kuttl/tests", "OVN_KUTTL_NAMESPACE": "ovn-kuttl-tests", "OVN_REPO": "https://github.com/openstack-k8s-operators/ovn-operator.git", "PASSWORD": "12345678", "PLACEMENTAPI": "config/samples/placement_v1beta1_placementapi.yaml", "PLACEMENTAPI_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/config/samples/placement_v1beta1_placementapi.yaml", "PLACEMENTAPI_DEPL_IMG": "unused", "PLACEMENT_BRANCH": "main", "PLACEMENT_COMMIT_HASH": "", "PLACEMENT_IMG": "quay.io/openstack-k8s-operators/placement-operator-index:latest", "PLACEMENT_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/kuttl-test.yaml", "PLACEMENT_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator/test/kuttl/tests", "PLACEMENT_KUTTL_NAMESPACE": "placement-kuttl-tests", "PLACEMENT_REPO": "https://github.com/openstack-k8s-operators/placement-operator.git", "PULL_SECRET": "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/pull-secret.txt", "RABBITMQ": "docs/examples/default-security-context/rabbitmq.yaml", "RABBITMQ_BRANCH": "patches", "RABBITMQ_COMMIT_HASH": "", "RABBITMQ_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml", "RABBITMQ_DEPL_IMG": "unused", "RABBITMQ_IMG": "quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest", "RABBITMQ_REPO": "https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git", "REDHAT_OPERATORS": "false", "REDIS": "config/samples/redis_v1beta1_redis.yaml", "REDIS_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis/config/samples/redis_v1beta1_redis.yaml", "REDIS_DEPL_IMG": "unused", "RH_REGISTRY_PWD": "", "RH_REGISTRY_USER": "", "SECRET": "osp-secret", "SG_CORE_DEPL_IMG": "unused", "STANDALONE_COMPUTE_DRIVER": "libvirt", "STANDALONE_EXTERNAL_NET_PREFFIX": "172.21.0", "STANDALONE_INTERNALAPI_NET_PREFIX": "172.17.0", "STANDALONE_STORAGEMGMT_NET_PREFIX": "172.20.0", "STANDALONE_STORAGE_NET_PREFIX": "172.18.0", "STANDALONE_TENANT_NET_PREFIX": "172.19.0", "STORAGEMGMT_HOST_ROUTES": "", "STORAGE_CLASS": "local-storage", "STORAGE_HOST_ROUTES": "", "SWIFT": "config/samples/swift_v1beta1_swift.yaml", "SWIFT_BRANCH": "main", "SWIFT_COMMIT_HASH": "", "SWIFT_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml", "SWIFT_IMG": "quay.io/openstack-k8s-operators/swift-operator-index:latest", "SWIFT_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml", "SWIFT_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests", "SWIFT_KUTTL_NAMESPACE": "swift-kuttl-tests", "SWIFT_REPO": "https://github.com/openstack-k8s-operators/swift-operator.git", "TELEMETRY": "config/samples/telemetry_v1beta1_telemetry.yaml", "TELEMETRY_BRANCH": "main", "TELEMETRY_COMMIT_HASH": "", "TELEMETRY_CR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml", "TELEMETRY_IMG": "quay.io/openstack-k8s-operators/telemetry-operator-index:latest", "TELEMETRY_KUTTL_BASEDIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator", "TELEMETRY_KUTTL_CONF": "/home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/kuttl-test.yaml", "TELEMETRY_KUTTL_DIR": "/home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator/test/kuttl/suites", "TELEMETRY_KUTTL_NAMESPACE": "telemetry-kuttl-tests", "TELEMETRY_KUTTL_RELPATH": "test/kuttl/suites", "TELEMETRY_REPO": "https://github.com/openstack-k8s-operators/telemetry-operator.git", "TENANT_HOST_ROUTES": "", "TIMEOUT": "300s", "TLS_ENABLED": "false", "tripleo_deploy": "export REGISTRY_PWD:" }, "cifmw_install_yamls_environment": { "CHECKOUT_FROM_OPENSTACK_REF": "true", "KUBECONFIG": "/home/zuul/.crc/machines/crc/kubeconfig", "OPENSTACK_K8S_BRANCH": "main", "OUT": "/home/zuul/ci-framework-data/artifacts/manifests", "OUTPUT_DIR": "/home/zuul/ci-framework-data/artifacts/edpm" }, "cifmw_openshift_api": "https://api.crc.testing:6443", "cifmw_openshift_context": "default/api-crc-testing:6443/kubeadmin", "cifmw_openshift_kubeconfig": "/home/zuul/.crc/machines/crc/kubeconfig", "cifmw_openshift_login_api": "https://api.crc.testing:6443", "cifmw_openshift_login_cert_login": false, "cifmw_openshift_login_context": "default/api-crc-testing:6443/kubeadmin", "cifmw_openshift_login_kubeconfig": "/home/zuul/.crc/machines/crc/kubeconfig", "cifmw_openshift_login_password": 123456789, "cifmw_openshift_login_token": "sha256~pOUgk-9850hxxEZK6G0UMKUA3Hu8vVpuezKn-haJb_U", "cifmw_openshift_login_user": "kubeadmin", "cifmw_openshift_token": "sha256~pOUgk-9850hxxEZK6G0UMKUA3Hu8vVpuezKn-haJb_U", "cifmw_openshift_user": "kubeadmin", "cifmw_path": "/home/zuul/.crc/bin:/home/zuul/.crc/bin/oc:/home/zuul/bin:~/.crc/bin:~/.crc/bin/oc:~/bin:/home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin", "cifmw_repo_setup_commit_hash": null, "cifmw_repo_setup_distro_hash": null, "cifmw_repo_setup_dlrn_api_url": "https://trunk.rdoproject.org/api-centos9-antelope", "cifmw_repo_setup_dlrn_url": "https://trunk.rdoproject.org/centos9-antelope/current-podified/delorean.repo.md5", "cifmw_repo_setup_extended_hash": null, "cifmw_repo_setup_full_hash": "c3923531bcda0b0811b2d5053f189beb", "cifmw_repo_setup_release": "antelope", "discovered_interpreter_python": "/usr/bin/python3", "gather_subset": [ "all" ], "module_setup": true }home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/0000755000175000017500000000000015115610202023460 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/0000755000175000017500000000000015115610202027513 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/0000755000175000017500000000000015115611523030647 5ustar zuulzuul././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000173115115610204033325 0ustar zuulzuul--- - name: Debug make_barbican_cleanup_env when: make_barbican_cleanup_env is defined ansible.builtin.debug: var: make_barbican_cleanup_env - name: Debug make_barbican_cleanup_params when: make_barbican_cleanup_params is defined ansible.builtin.debug: var: make_barbican_cleanup_params - name: Run barbican_cleanup retries: "{{ make_barbican_cleanup_retries | default(omit) }}" delay: "{{ make_barbican_cleanup_delay | default(omit) }}" until: "{{ make_barbican_cleanup_until | default(true) }}" register: "make_barbican_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_cleanup" dry_run: "{{ make_barbican_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_cleanup_env|default({})), **(make_barbican_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000202515115610204033322 0ustar zuulzuul--- - name: Debug make_barbican_deploy_prep_env when: make_barbican_deploy_prep_env is defined ansible.builtin.debug: var: make_barbican_deploy_prep_env - name: Debug make_barbican_deploy_prep_params when: make_barbican_deploy_prep_params is defined ansible.builtin.debug: var: make_barbican_deploy_prep_params - name: Run barbican_deploy_prep retries: "{{ make_barbican_deploy_prep_retries | default(omit) }}" delay: "{{ make_barbican_deploy_prep_delay | default(omit) }}" until: "{{ make_barbican_deploy_prep_until | default(true) }}" register: "make_barbican_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_deploy_prep" dry_run: "{{ make_barbican_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_deploy_prep_env|default({})), **(make_barbican_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000171215115610204033324 0ustar zuulzuul--- - name: Debug make_barbican_deploy_env when: make_barbican_deploy_env is defined ansible.builtin.debug: var: make_barbican_deploy_env - name: Debug make_barbican_deploy_params when: make_barbican_deploy_params is defined ansible.builtin.debug: var: make_barbican_deploy_params - name: Run barbican_deploy retries: "{{ make_barbican_deploy_retries | default(omit) }}" delay: "{{ make_barbican_deploy_delay | default(omit) }}" until: "{{ make_barbican_deploy_until | default(true) }}" register: "make_barbican_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_deploy" dry_run: "{{ make_barbican_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_deploy_env|default({})), **(make_barbican_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_deploy_validate.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000212115115610204033317 0ustar zuulzuul--- - name: Debug make_barbican_deploy_validate_env when: make_barbican_deploy_validate_env is defined ansible.builtin.debug: var: make_barbican_deploy_validate_env - name: Debug make_barbican_deploy_validate_params when: make_barbican_deploy_validate_params is defined ansible.builtin.debug: var: make_barbican_deploy_validate_params - name: Run barbican_deploy_validate retries: "{{ make_barbican_deploy_validate_retries | default(omit) }}" delay: "{{ make_barbican_deploy_validate_delay | default(omit) }}" until: "{{ make_barbican_deploy_validate_until | default(true) }}" register: "make_barbican_deploy_validate_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_deploy_validate" dry_run: "{{ make_barbican_deploy_validate_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_deploy_validate_env|default({})), **(make_barbican_deploy_validate_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000210215115610204033316 0ustar zuulzuul--- - name: Debug make_barbican_deploy_cleanup_env when: make_barbican_deploy_cleanup_env is defined ansible.builtin.debug: var: make_barbican_deploy_cleanup_env - name: Debug make_barbican_deploy_cleanup_params when: make_barbican_deploy_cleanup_params is defined ansible.builtin.debug: var: make_barbican_deploy_cleanup_params - name: Run barbican_deploy_cleanup retries: "{{ make_barbican_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_barbican_deploy_cleanup_delay | default(omit) }}" until: "{{ make_barbican_deploy_cleanup_until | default(true) }}" register: "make_barbican_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_deploy_cleanup" dry_run: "{{ make_barbican_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_deploy_cleanup_env|default({})), **(make_barbican_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb.0000644000175000017500000000152215115610204033237 0ustar zuulzuul--- - name: Debug make_mariadb_env when: make_mariadb_env is defined ansible.builtin.debug: var: make_mariadb_env - name: Debug make_mariadb_params when: make_mariadb_params is defined ansible.builtin.debug: var: make_mariadb_params - name: Run mariadb retries: "{{ make_mariadb_retries | default(omit) }}" delay: "{{ make_mariadb_delay | default(omit) }}" until: "{{ make_mariadb_until | default(true) }}" register: "make_mariadb_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb" dry_run: "{{ make_mariadb_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_env|default({})), **(make_mariadb_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000171215115610204033321 0ustar zuulzuul--- - name: Debug make_mariadb_cleanup_env when: make_mariadb_cleanup_env is defined ansible.builtin.debug: var: make_mariadb_cleanup_env - name: Debug make_mariadb_cleanup_params when: make_mariadb_cleanup_params is defined ansible.builtin.debug: var: make_mariadb_cleanup_params - name: Run mariadb_cleanup retries: "{{ make_mariadb_cleanup_retries | default(omit) }}" delay: "{{ make_mariadb_cleanup_delay | default(omit) }}" until: "{{ make_mariadb_cleanup_until | default(true) }}" register: "make_mariadb_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_cleanup" dry_run: "{{ make_mariadb_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_cleanup_env|default({})), **(make_mariadb_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000200615115610204033316 0ustar zuulzuul--- - name: Debug make_mariadb_deploy_prep_env when: make_mariadb_deploy_prep_env is defined ansible.builtin.debug: var: make_mariadb_deploy_prep_env - name: Debug make_mariadb_deploy_prep_params when: make_mariadb_deploy_prep_params is defined ansible.builtin.debug: var: make_mariadb_deploy_prep_params - name: Run mariadb_deploy_prep retries: "{{ make_mariadb_deploy_prep_retries | default(omit) }}" delay: "{{ make_mariadb_deploy_prep_delay | default(omit) }}" until: "{{ make_mariadb_deploy_prep_until | default(true) }}" register: "make_mariadb_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_deploy_prep" dry_run: "{{ make_mariadb_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_deploy_prep_env|default({})), **(make_mariadb_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000167315115610204033327 0ustar zuulzuul--- - name: Debug make_mariadb_deploy_env when: make_mariadb_deploy_env is defined ansible.builtin.debug: var: make_mariadb_deploy_env - name: Debug make_mariadb_deploy_params when: make_mariadb_deploy_params is defined ansible.builtin.debug: var: make_mariadb_deploy_params - name: Run mariadb_deploy retries: "{{ make_mariadb_deploy_retries | default(omit) }}" delay: "{{ make_mariadb_deploy_delay | default(omit) }}" until: "{{ make_mariadb_deploy_until | default(true) }}" register: "make_mariadb_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_deploy" dry_run: "{{ make_mariadb_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_deploy_env|default({})), **(make_mariadb_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000206315115610204033321 0ustar zuulzuul--- - name: Debug make_mariadb_deploy_cleanup_env when: make_mariadb_deploy_cleanup_env is defined ansible.builtin.debug: var: make_mariadb_deploy_cleanup_env - name: Debug make_mariadb_deploy_cleanup_params when: make_mariadb_deploy_cleanup_params is defined ansible.builtin.debug: var: make_mariadb_deploy_cleanup_params - name: Run mariadb_deploy_cleanup retries: "{{ make_mariadb_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_mariadb_deploy_cleanup_delay | default(omit) }}" until: "{{ make_mariadb_deploy_cleanup_until | default(true) }}" register: "make_mariadb_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_deploy_cleanup" dry_run: "{{ make_mariadb_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_deploy_cleanup_env|default({})), **(make_mariadb_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000167315115610204033355 0ustar zuulzuul--- - name: Debug make_placement_prep_env when: make_placement_prep_env is defined ansible.builtin.debug: var: make_placement_prep_env - name: Debug make_placement_prep_params when: make_placement_prep_params is defined ansible.builtin.debug: var: make_placement_prep_params - name: Run placement_prep retries: "{{ make_placement_prep_retries | default(omit) }}" delay: "{{ make_placement_prep_delay | default(omit) }}" until: "{{ make_placement_prep_until | default(true) }}" register: "make_placement_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_prep" dry_run: "{{ make_placement_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_prep_env|default({})), **(make_placement_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000156015115610204033350 0ustar zuulzuul--- - name: Debug make_placement_env when: make_placement_env is defined ansible.builtin.debug: var: make_placement_env - name: Debug make_placement_params when: make_placement_params is defined ansible.builtin.debug: var: make_placement_params - name: Run placement retries: "{{ make_placement_retries | default(omit) }}" delay: "{{ make_placement_delay | default(omit) }}" until: "{{ make_placement_until | default(true) }}" register: "make_placement_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement" dry_run: "{{ make_placement_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_env|default({})), **(make_placement_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000175015115610204033351 0ustar zuulzuul--- - name: Debug make_placement_cleanup_env when: make_placement_cleanup_env is defined ansible.builtin.debug: var: make_placement_cleanup_env - name: Debug make_placement_cleanup_params when: make_placement_cleanup_params is defined ansible.builtin.debug: var: make_placement_cleanup_params - name: Run placement_cleanup retries: "{{ make_placement_cleanup_retries | default(omit) }}" delay: "{{ make_placement_cleanup_delay | default(omit) }}" until: "{{ make_placement_cleanup_until | default(true) }}" register: "make_placement_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_cleanup" dry_run: "{{ make_placement_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_cleanup_env|default({})), **(make_placement_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000204415115610204033346 0ustar zuulzuul--- - name: Debug make_placement_deploy_prep_env when: make_placement_deploy_prep_env is defined ansible.builtin.debug: var: make_placement_deploy_prep_env - name: Debug make_placement_deploy_prep_params when: make_placement_deploy_prep_params is defined ansible.builtin.debug: var: make_placement_deploy_prep_params - name: Run placement_deploy_prep retries: "{{ make_placement_deploy_prep_retries | default(omit) }}" delay: "{{ make_placement_deploy_prep_delay | default(omit) }}" until: "{{ make_placement_deploy_prep_until | default(true) }}" register: "make_placement_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_deploy_prep" dry_run: "{{ make_placement_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_deploy_prep_env|default({})), **(make_placement_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_update_services.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_update_s0000644000175000017500000000171215115610204033367 0ustar zuulzuul--- - name: Debug make_update_services_env when: make_update_services_env is defined ansible.builtin.debug: var: make_update_services_env - name: Debug make_update_services_params when: make_update_services_params is defined ansible.builtin.debug: var: make_update_services_params - name: Run update_services retries: "{{ make_update_services_retries | default(omit) }}" delay: "{{ make_update_services_delay | default(omit) }}" until: "{{ make_update_services_until | default(true) }}" register: "make_update_services_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make update_services" dry_run: "{{ make_update_services_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_update_services_env|default({})), **(make_update_services_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_update_system.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_update_s0000644000175000017500000000165415115610204033374 0ustar zuulzuul--- - name: Debug make_update_system_env when: make_update_system_env is defined ansible.builtin.debug: var: make_update_system_env - name: Debug make_update_system_params when: make_update_system_params is defined ansible.builtin.debug: var: make_update_system_params - name: Run update_system retries: "{{ make_update_system_retries | default(omit) }}" delay: "{{ make_update_system_delay | default(omit) }}" until: "{{ make_update_system_until | default(true) }}" register: "make_update_system_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make update_system" dry_run: "{{ make_update_system_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_update_system_env|default({})), **(make_update_system_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_patch_version.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000210215115610204033371 0ustar zuulzuul--- - name: Debug make_openstack_patch_version_env when: make_openstack_patch_version_env is defined ansible.builtin.debug: var: make_openstack_patch_version_env - name: Debug make_openstack_patch_version_params when: make_openstack_patch_version_params is defined ansible.builtin.debug: var: make_openstack_patch_version_params - name: Run openstack_patch_version retries: "{{ make_openstack_patch_version_retries | default(omit) }}" delay: "{{ make_openstack_patch_version_delay | default(omit) }}" until: "{{ make_openstack_patch_version_until | default(true) }}" register: "make_openstack_patch_version_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_patch_version" dry_run: "{{ make_openstack_patch_version_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_patch_version_env|default({})), **(make_openstack_patch_version_params|default({}))) }}" ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_generate_keys.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000214015115610204033334 0ustar zuulzuul--- - name: Debug make_edpm_deploy_generate_keys_env when: make_edpm_deploy_generate_keys_env is defined ansible.builtin.debug: var: make_edpm_deploy_generate_keys_env - name: Debug make_edpm_deploy_generate_keys_params when: make_edpm_deploy_generate_keys_params is defined ansible.builtin.debug: var: make_edpm_deploy_generate_keys_params - name: Run edpm_deploy_generate_keys retries: "{{ make_edpm_deploy_generate_keys_retries | default(omit) }}" delay: "{{ make_edpm_deploy_generate_keys_delay | default(omit) }}" until: "{{ make_edpm_deploy_generate_keys_until | default(true) }}" register: "make_edpm_deploy_generate_keys_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_generate_keys" dry_run: "{{ make_edpm_deploy_generate_keys_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_generate_keys_env|default({})), **(make_edpm_deploy_generate_keys_params|default({}))) }}" ././@LongLink0000644000000000000000000000020000000000000011573 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_patch_ansible_runner_image.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_pat0000644000175000017500000000227215115610204033356 0ustar zuulzuul--- - name: Debug make_edpm_patch_ansible_runner_image_env when: make_edpm_patch_ansible_runner_image_env is defined ansible.builtin.debug: var: make_edpm_patch_ansible_runner_image_env - name: Debug make_edpm_patch_ansible_runner_image_params when: make_edpm_patch_ansible_runner_image_params is defined ansible.builtin.debug: var: make_edpm_patch_ansible_runner_image_params - name: Run edpm_patch_ansible_runner_image retries: "{{ make_edpm_patch_ansible_runner_image_retries | default(omit) }}" delay: "{{ make_edpm_patch_ansible_runner_image_delay | default(omit) }}" until: "{{ make_edpm_patch_ansible_runner_image_until | default(true) }}" register: "make_edpm_patch_ansible_runner_image_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_patch_ansible_runner_image" dry_run: "{{ make_edpm_patch_ansible_runner_image_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_patch_ansible_runner_image_env|default({})), **(make_edpm_patch_ansible_runner_image_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000173115115610204033341 0ustar zuulzuul--- - name: Debug make_edpm_deploy_prep_env when: make_edpm_deploy_prep_env is defined ansible.builtin.debug: var: make_edpm_deploy_prep_env - name: Debug make_edpm_deploy_prep_params when: make_edpm_deploy_prep_params is defined ansible.builtin.debug: var: make_edpm_deploy_prep_params - name: Run edpm_deploy_prep retries: "{{ make_edpm_deploy_prep_retries | default(omit) }}" delay: "{{ make_edpm_deploy_prep_delay | default(omit) }}" until: "{{ make_edpm_deploy_prep_until | default(true) }}" register: "make_edpm_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_prep" dry_run: "{{ make_edpm_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_prep_env|default({})), **(make_edpm_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000200615115610204033335 0ustar zuulzuul--- - name: Debug make_edpm_deploy_cleanup_env when: make_edpm_deploy_cleanup_env is defined ansible.builtin.debug: var: make_edpm_deploy_cleanup_env - name: Debug make_edpm_deploy_cleanup_params when: make_edpm_deploy_cleanup_params is defined ansible.builtin.debug: var: make_edpm_deploy_cleanup_params - name: Run edpm_deploy_cleanup retries: "{{ make_edpm_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_edpm_deploy_cleanup_delay | default(omit) }}" until: "{{ make_edpm_deploy_cleanup_until | default(true) }}" register: "make_edpm_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_cleanup" dry_run: "{{ make_edpm_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_cleanup_env|default({})), **(make_edpm_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000161615115610204033343 0ustar zuulzuul--- - name: Debug make_edpm_deploy_env when: make_edpm_deploy_env is defined ansible.builtin.debug: var: make_edpm_deploy_env - name: Debug make_edpm_deploy_params when: make_edpm_deploy_params is defined ansible.builtin.debug: var: make_edpm_deploy_params - name: Run edpm_deploy retries: "{{ make_edpm_deploy_retries | default(omit) }}" delay: "{{ make_edpm_deploy_delay | default(omit) }}" until: "{{ make_edpm_deploy_until | default(true) }}" register: "make_edpm_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy" dry_run: "{{ make_edpm_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_env|default({})), **(make_edpm_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_baremetal_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000215715115610204033344 0ustar zuulzuul--- - name: Debug make_edpm_deploy_baremetal_prep_env when: make_edpm_deploy_baremetal_prep_env is defined ansible.builtin.debug: var: make_edpm_deploy_baremetal_prep_env - name: Debug make_edpm_deploy_baremetal_prep_params when: make_edpm_deploy_baremetal_prep_params is defined ansible.builtin.debug: var: make_edpm_deploy_baremetal_prep_params - name: Run edpm_deploy_baremetal_prep retries: "{{ make_edpm_deploy_baremetal_prep_retries | default(omit) }}" delay: "{{ make_edpm_deploy_baremetal_prep_delay | default(omit) }}" until: "{{ make_edpm_deploy_baremetal_prep_until | default(true) }}" register: "make_edpm_deploy_baremetal_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_baremetal_prep" dry_run: "{{ make_edpm_deploy_baremetal_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_baremetal_prep_env|default({})), **(make_edpm_deploy_baremetal_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_baremetal.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000204415115610204033337 0ustar zuulzuul--- - name: Debug make_edpm_deploy_baremetal_env when: make_edpm_deploy_baremetal_env is defined ansible.builtin.debug: var: make_edpm_deploy_baremetal_env - name: Debug make_edpm_deploy_baremetal_params when: make_edpm_deploy_baremetal_params is defined ansible.builtin.debug: var: make_edpm_deploy_baremetal_params - name: Run edpm_deploy_baremetal retries: "{{ make_edpm_deploy_baremetal_retries | default(omit) }}" delay: "{{ make_edpm_deploy_baremetal_delay | default(omit) }}" until: "{{ make_edpm_deploy_baremetal_until | default(true) }}" register: "make_edpm_deploy_baremetal_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_baremetal" dry_run: "{{ make_edpm_deploy_baremetal_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_baremetal_env|default({})), **(make_edpm_deploy_baremetal_params|default({}))) }}" ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_wait_deploy_baremetal.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_wai0000644000175000017500000000215715115610204033354 0ustar zuulzuul--- - name: Debug make_edpm_wait_deploy_baremetal_env when: make_edpm_wait_deploy_baremetal_env is defined ansible.builtin.debug: var: make_edpm_wait_deploy_baremetal_env - name: Debug make_edpm_wait_deploy_baremetal_params when: make_edpm_wait_deploy_baremetal_params is defined ansible.builtin.debug: var: make_edpm_wait_deploy_baremetal_params - name: Run edpm_wait_deploy_baremetal retries: "{{ make_edpm_wait_deploy_baremetal_retries | default(omit) }}" delay: "{{ make_edpm_wait_deploy_baremetal_delay | default(omit) }}" until: "{{ make_edpm_wait_deploy_baremetal_until | default(true) }}" register: "make_edpm_wait_deploy_baremetal_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_wait_deploy_baremetal" dry_run: "{{ make_edpm_wait_deploy_baremetal_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_wait_deploy_baremetal_env|default({})), **(make_edpm_wait_deploy_baremetal_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_wait_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_wai0000644000175000017500000000173115115610204033351 0ustar zuulzuul--- - name: Debug make_edpm_wait_deploy_env when: make_edpm_wait_deploy_env is defined ansible.builtin.debug: var: make_edpm_wait_deploy_env - name: Debug make_edpm_wait_deploy_params when: make_edpm_wait_deploy_params is defined ansible.builtin.debug: var: make_edpm_wait_deploy_params - name: Run edpm_wait_deploy retries: "{{ make_edpm_wait_deploy_retries | default(omit) }}" delay: "{{ make_edpm_wait_deploy_delay | default(omit) }}" until: "{{ make_edpm_wait_deploy_until | default(true) }}" register: "make_edpm_wait_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_wait_deploy" dry_run: "{{ make_edpm_wait_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_wait_deploy_env|default({})), **(make_edpm_wait_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_register_dns.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_reg0000644000175000017500000000175015115610204033347 0ustar zuulzuul--- - name: Debug make_edpm_register_dns_env when: make_edpm_register_dns_env is defined ansible.builtin.debug: var: make_edpm_register_dns_env - name: Debug make_edpm_register_dns_params when: make_edpm_register_dns_params is defined ansible.builtin.debug: var: make_edpm_register_dns_params - name: Run edpm_register_dns retries: "{{ make_edpm_register_dns_retries | default(omit) }}" delay: "{{ make_edpm_register_dns_delay | default(omit) }}" until: "{{ make_edpm_register_dns_until | default(true) }}" register: "make_edpm_register_dns_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_register_dns" dry_run: "{{ make_edpm_register_dns_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_register_dns_env|default({})), **(make_edpm_register_dns_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_nova_discover_hosts.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_nov0000644000175000017500000000212115115610204033365 0ustar zuulzuul--- - name: Debug make_edpm_nova_discover_hosts_env when: make_edpm_nova_discover_hosts_env is defined ansible.builtin.debug: var: make_edpm_nova_discover_hosts_env - name: Debug make_edpm_nova_discover_hosts_params when: make_edpm_nova_discover_hosts_params is defined ansible.builtin.debug: var: make_edpm_nova_discover_hosts_params - name: Run edpm_nova_discover_hosts retries: "{{ make_edpm_nova_discover_hosts_retries | default(omit) }}" delay: "{{ make_edpm_nova_discover_hosts_delay | default(omit) }}" until: "{{ make_edpm_nova_discover_hosts_until | default(true) }}" register: "make_edpm_nova_discover_hosts_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_nova_discover_hosts" dry_run: "{{ make_edpm_nova_discover_hosts_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_nova_discover_hosts_env|default({})), **(make_edpm_nova_discover_hosts_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_crds.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000167315115610204033405 0ustar zuulzuul--- - name: Debug make_openstack_crds_env when: make_openstack_crds_env is defined ansible.builtin.debug: var: make_openstack_crds_env - name: Debug make_openstack_crds_params when: make_openstack_crds_params is defined ansible.builtin.debug: var: make_openstack_crds_params - name: Run openstack_crds retries: "{{ make_openstack_crds_retries | default(omit) }}" delay: "{{ make_openstack_crds_delay | default(omit) }}" until: "{{ make_openstack_crds_until | default(true) }}" register: "make_openstack_crds_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_crds" dry_run: "{{ make_openstack_crds_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_crds_env|default({})), **(make_openstack_crds_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_crds_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000206315115610204033377 0ustar zuulzuul--- - name: Debug make_openstack_crds_cleanup_env when: make_openstack_crds_cleanup_env is defined ansible.builtin.debug: var: make_openstack_crds_cleanup_env - name: Debug make_openstack_crds_cleanup_params when: make_openstack_crds_cleanup_params is defined ansible.builtin.debug: var: make_openstack_crds_cleanup_params - name: Run openstack_crds_cleanup retries: "{{ make_openstack_crds_cleanup_retries | default(omit) }}" delay: "{{ make_openstack_crds_cleanup_delay | default(omit) }}" until: "{{ make_openstack_crds_cleanup_until | default(true) }}" register: "make_openstack_crds_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_crds_cleanup" dry_run: "{{ make_openstack_crds_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_crds_cleanup_env|default({})), **(make_openstack_crds_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_networker_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000215715115610204033344 0ustar zuulzuul--- - name: Debug make_edpm_deploy_networker_prep_env when: make_edpm_deploy_networker_prep_env is defined ansible.builtin.debug: var: make_edpm_deploy_networker_prep_env - name: Debug make_edpm_deploy_networker_prep_params when: make_edpm_deploy_networker_prep_params is defined ansible.builtin.debug: var: make_edpm_deploy_networker_prep_params - name: Run edpm_deploy_networker_prep retries: "{{ make_edpm_deploy_networker_prep_retries | default(omit) }}" delay: "{{ make_edpm_deploy_networker_prep_delay | default(omit) }}" until: "{{ make_edpm_deploy_networker_prep_until | default(true) }}" register: "make_edpm_deploy_networker_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_networker_prep" dry_run: "{{ make_edpm_deploy_networker_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_networker_prep_env|default({})), **(make_edpm_deploy_networker_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000017600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_networker_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000223415115610204033340 0ustar zuulzuul--- - name: Debug make_edpm_deploy_networker_cleanup_env when: make_edpm_deploy_networker_cleanup_env is defined ansible.builtin.debug: var: make_edpm_deploy_networker_cleanup_env - name: Debug make_edpm_deploy_networker_cleanup_params when: make_edpm_deploy_networker_cleanup_params is defined ansible.builtin.debug: var: make_edpm_deploy_networker_cleanup_params - name: Run edpm_deploy_networker_cleanup retries: "{{ make_edpm_deploy_networker_cleanup_retries | default(omit) }}" delay: "{{ make_edpm_deploy_networker_cleanup_delay | default(omit) }}" until: "{{ make_edpm_deploy_networker_cleanup_until | default(true) }}" register: "make_edpm_deploy_networker_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_networker_cleanup" dry_run: "{{ make_edpm_deploy_networker_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_networker_cleanup_env|default({})), **(make_edpm_deploy_networker_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_networker.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000204415115610204033337 0ustar zuulzuul--- - name: Debug make_edpm_deploy_networker_env when: make_edpm_deploy_networker_env is defined ansible.builtin.debug: var: make_edpm_deploy_networker_env - name: Debug make_edpm_deploy_networker_params when: make_edpm_deploy_networker_params is defined ansible.builtin.debug: var: make_edpm_deploy_networker_params - name: Run edpm_deploy_networker retries: "{{ make_edpm_deploy_networker_retries | default(omit) }}" delay: "{{ make_edpm_deploy_networker_delay | default(omit) }}" until: "{{ make_edpm_deploy_networker_until | default(true) }}" register: "make_edpm_deploy_networker_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make edpm_deploy_networker" dry_run: "{{ make_edpm_deploy_networker_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_networker_env|default({})), **(make_edpm_deploy_networker_params|default({}))) }}" ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_pr0000644000175000017500000000157715115610204033374 0ustar zuulzuul--- - name: Debug make_infra_prep_env when: make_infra_prep_env is defined ansible.builtin.debug: var: make_infra_prep_env - name: Debug make_infra_prep_params when: make_infra_prep_params is defined ansible.builtin.debug: var: make_infra_prep_params - name: Run infra_prep retries: "{{ make_infra_prep_retries | default(omit) }}" delay: "{{ make_infra_prep_delay | default(omit) }}" until: "{{ make_infra_prep_until | default(true) }}" register: "make_infra_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make infra_prep" dry_run: "{{ make_infra_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_infra_prep_env|default({})), **(make_infra_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra.ym0000644000175000017500000000146415115610204033312 0ustar zuulzuul--- - name: Debug make_infra_env when: make_infra_env is defined ansible.builtin.debug: var: make_infra_env - name: Debug make_infra_params when: make_infra_params is defined ansible.builtin.debug: var: make_infra_params - name: Run infra retries: "{{ make_infra_retries | default(omit) }}" delay: "{{ make_infra_delay | default(omit) }}" until: "{{ make_infra_until | default(true) }}" register: "make_infra_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make infra" dry_run: "{{ make_infra_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_infra_env|default({})), **(make_infra_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_cl0000644000175000017500000000165415115610204033345 0ustar zuulzuul--- - name: Debug make_infra_cleanup_env when: make_infra_cleanup_env is defined ansible.builtin.debug: var: make_infra_cleanup_env - name: Debug make_infra_cleanup_params when: make_infra_cleanup_params is defined ansible.builtin.debug: var: make_infra_cleanup_params - name: Run infra_cleanup retries: "{{ make_infra_cleanup_retries | default(omit) }}" delay: "{{ make_infra_cleanup_delay | default(omit) }}" until: "{{ make_infra_cleanup_until | default(true) }}" register: "make_infra_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make infra_cleanup" dry_run: "{{ make_infra_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_infra_cleanup_env|default({})), **(make_infra_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_dns_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_dns_depl0000644000175000017500000000171215115610204033353 0ustar zuulzuul--- - name: Debug make_dns_deploy_prep_env when: make_dns_deploy_prep_env is defined ansible.builtin.debug: var: make_dns_deploy_prep_env - name: Debug make_dns_deploy_prep_params when: make_dns_deploy_prep_params is defined ansible.builtin.debug: var: make_dns_deploy_prep_params - name: Run dns_deploy_prep retries: "{{ make_dns_deploy_prep_retries | default(omit) }}" delay: "{{ make_dns_deploy_prep_delay | default(omit) }}" until: "{{ make_dns_deploy_prep_until | default(true) }}" register: "make_dns_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make dns_deploy_prep" dry_run: "{{ make_dns_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_dns_deploy_prep_env|default({})), **(make_dns_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_dns_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_dns_depl0000644000175000017500000000157715115610204033364 0ustar zuulzuul--- - name: Debug make_dns_deploy_env when: make_dns_deploy_env is defined ansible.builtin.debug: var: make_dns_deploy_env - name: Debug make_dns_deploy_params when: make_dns_deploy_params is defined ansible.builtin.debug: var: make_dns_deploy_params - name: Run dns_deploy retries: "{{ make_dns_deploy_retries | default(omit) }}" delay: "{{ make_dns_deploy_delay | default(omit) }}" until: "{{ make_dns_deploy_until | default(true) }}" register: "make_dns_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make dns_deploy" dry_run: "{{ make_dns_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_dns_deploy_env|default({})), **(make_dns_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_dns_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_dns_depl0000644000175000017500000000176715115610204033365 0ustar zuulzuul--- - name: Debug make_dns_deploy_cleanup_env when: make_dns_deploy_cleanup_env is defined ansible.builtin.debug: var: make_dns_deploy_cleanup_env - name: Debug make_dns_deploy_cleanup_params when: make_dns_deploy_cleanup_params is defined ansible.builtin.debug: var: make_dns_deploy_cleanup_params - name: Run dns_deploy_cleanup retries: "{{ make_dns_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_dns_deploy_cleanup_delay | default(omit) }}" until: "{{ make_dns_deploy_cleanup_until | default(true) }}" register: "make_dns_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make dns_deploy_cleanup" dry_run: "{{ make_dns_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_dns_deploy_cleanup_env|default({})), **(make_dns_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netconfig_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netconfi0000644000175000017500000000204415115610204033367 0ustar zuulzuul--- - name: Debug make_netconfig_deploy_prep_env when: make_netconfig_deploy_prep_env is defined ansible.builtin.debug: var: make_netconfig_deploy_prep_env - name: Debug make_netconfig_deploy_prep_params when: make_netconfig_deploy_prep_params is defined ansible.builtin.debug: var: make_netconfig_deploy_prep_params - name: Run netconfig_deploy_prep retries: "{{ make_netconfig_deploy_prep_retries | default(omit) }}" delay: "{{ make_netconfig_deploy_prep_delay | default(omit) }}" until: "{{ make_netconfig_deploy_prep_until | default(true) }}" register: "make_netconfig_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netconfig_deploy_prep" dry_run: "{{ make_netconfig_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netconfig_deploy_prep_env|default({})), **(make_netconfig_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netconfig_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netconfi0000644000175000017500000000173115115610204033371 0ustar zuulzuul--- - name: Debug make_netconfig_deploy_env when: make_netconfig_deploy_env is defined ansible.builtin.debug: var: make_netconfig_deploy_env - name: Debug make_netconfig_deploy_params when: make_netconfig_deploy_params is defined ansible.builtin.debug: var: make_netconfig_deploy_params - name: Run netconfig_deploy retries: "{{ make_netconfig_deploy_retries | default(omit) }}" delay: "{{ make_netconfig_deploy_delay | default(omit) }}" until: "{{ make_netconfig_deploy_until | default(true) }}" register: "make_netconfig_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netconfig_deploy" dry_run: "{{ make_netconfig_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netconfig_deploy_env|default({})), **(make_netconfig_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netconfig_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netconfi0000644000175000017500000000212115115610204033363 0ustar zuulzuul--- - name: Debug make_netconfig_deploy_cleanup_env when: make_netconfig_deploy_cleanup_env is defined ansible.builtin.debug: var: make_netconfig_deploy_cleanup_env - name: Debug make_netconfig_deploy_cleanup_params when: make_netconfig_deploy_cleanup_params is defined ansible.builtin.debug: var: make_netconfig_deploy_cleanup_params - name: Run netconfig_deploy_cleanup retries: "{{ make_netconfig_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_netconfig_deploy_cleanup_delay | default(omit) }}" until: "{{ make_netconfig_deploy_cleanup_until | default(true) }}" register: "make_netconfig_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netconfig_deploy_cleanup" dry_run: "{{ make_netconfig_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netconfig_deploy_cleanup_env|default({})), **(make_netconfig_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_memcached_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_memcache0000644000175000017500000000204415115610204033324 0ustar zuulzuul--- - name: Debug make_memcached_deploy_prep_env when: make_memcached_deploy_prep_env is defined ansible.builtin.debug: var: make_memcached_deploy_prep_env - name: Debug make_memcached_deploy_prep_params when: make_memcached_deploy_prep_params is defined ansible.builtin.debug: var: make_memcached_deploy_prep_params - name: Run memcached_deploy_prep retries: "{{ make_memcached_deploy_prep_retries | default(omit) }}" delay: "{{ make_memcached_deploy_prep_delay | default(omit) }}" until: "{{ make_memcached_deploy_prep_until | default(true) }}" register: "make_memcached_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make memcached_deploy_prep" dry_run: "{{ make_memcached_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_memcached_deploy_prep_env|default({})), **(make_memcached_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_memcached_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_memcache0000644000175000017500000000173115115610204033326 0ustar zuulzuul--- - name: Debug make_memcached_deploy_env when: make_memcached_deploy_env is defined ansible.builtin.debug: var: make_memcached_deploy_env - name: Debug make_memcached_deploy_params when: make_memcached_deploy_params is defined ansible.builtin.debug: var: make_memcached_deploy_params - name: Run memcached_deploy retries: "{{ make_memcached_deploy_retries | default(omit) }}" delay: "{{ make_memcached_deploy_delay | default(omit) }}" until: "{{ make_memcached_deploy_until | default(true) }}" register: "make_memcached_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make memcached_deploy" dry_run: "{{ make_memcached_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_memcached_deploy_env|default({})), **(make_memcached_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_memcached_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_memcache0000644000175000017500000000212115115610204033320 0ustar zuulzuul--- - name: Debug make_memcached_deploy_cleanup_env when: make_memcached_deploy_cleanup_env is defined ansible.builtin.debug: var: make_memcached_deploy_cleanup_env - name: Debug make_memcached_deploy_cleanup_params when: make_memcached_deploy_cleanup_params is defined ansible.builtin.debug: var: make_memcached_deploy_cleanup_params - name: Run memcached_deploy_cleanup retries: "{{ make_memcached_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_memcached_deploy_cleanup_delay | default(omit) }}" until: "{{ make_memcached_deploy_cleanup_until | default(true) }}" register: "make_memcached_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make memcached_deploy_cleanup" dry_run: "{{ make_memcached_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_memcached_deploy_cleanup_env|default({})), **(make_memcached_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000165415115610204033431 0ustar zuulzuul--- - name: Debug make_keystone_prep_env when: make_keystone_prep_env is defined ansible.builtin.debug: var: make_keystone_prep_env - name: Debug make_keystone_prep_params when: make_keystone_prep_params is defined ansible.builtin.debug: var: make_keystone_prep_params - name: Run keystone_prep retries: "{{ make_keystone_prep_retries | default(omit) }}" delay: "{{ make_keystone_prep_delay | default(omit) }}" until: "{{ make_keystone_prep_until | default(true) }}" register: "make_keystone_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_prep" dry_run: "{{ make_keystone_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_prep_env|default({})), **(make_keystone_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000154115115610204033424 0ustar zuulzuul--- - name: Debug make_keystone_env when: make_keystone_env is defined ansible.builtin.debug: var: make_keystone_env - name: Debug make_keystone_params when: make_keystone_params is defined ansible.builtin.debug: var: make_keystone_params - name: Run keystone retries: "{{ make_keystone_retries | default(omit) }}" delay: "{{ make_keystone_delay | default(omit) }}" until: "{{ make_keystone_until | default(true) }}" register: "make_keystone_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone" dry_run: "{{ make_keystone_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_env|default({})), **(make_keystone_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000173115115610204033425 0ustar zuulzuul--- - name: Debug make_keystone_cleanup_env when: make_keystone_cleanup_env is defined ansible.builtin.debug: var: make_keystone_cleanup_env - name: Debug make_keystone_cleanup_params when: make_keystone_cleanup_params is defined ansible.builtin.debug: var: make_keystone_cleanup_params - name: Run keystone_cleanup retries: "{{ make_keystone_cleanup_retries | default(omit) }}" delay: "{{ make_keystone_cleanup_delay | default(omit) }}" until: "{{ make_keystone_cleanup_until | default(true) }}" register: "make_keystone_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_cleanup" dry_run: "{{ make_keystone_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_cleanup_env|default({})), **(make_keystone_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000202515115610204033422 0ustar zuulzuul--- - name: Debug make_keystone_deploy_prep_env when: make_keystone_deploy_prep_env is defined ansible.builtin.debug: var: make_keystone_deploy_prep_env - name: Debug make_keystone_deploy_prep_params when: make_keystone_deploy_prep_params is defined ansible.builtin.debug: var: make_keystone_deploy_prep_params - name: Run keystone_deploy_prep retries: "{{ make_keystone_deploy_prep_retries | default(omit) }}" delay: "{{ make_keystone_deploy_prep_delay | default(omit) }}" until: "{{ make_keystone_deploy_prep_until | default(true) }}" register: "make_keystone_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_deploy_prep" dry_run: "{{ make_keystone_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_deploy_prep_env|default({})), **(make_keystone_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000171215115610204033424 0ustar zuulzuul--- - name: Debug make_keystone_deploy_env when: make_keystone_deploy_env is defined ansible.builtin.debug: var: make_keystone_deploy_env - name: Debug make_keystone_deploy_params when: make_keystone_deploy_params is defined ansible.builtin.debug: var: make_keystone_deploy_params - name: Run keystone_deploy retries: "{{ make_keystone_deploy_retries | default(omit) }}" delay: "{{ make_keystone_deploy_delay | default(omit) }}" until: "{{ make_keystone_deploy_until | default(true) }}" register: "make_keystone_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_deploy" dry_run: "{{ make_keystone_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_deploy_env|default({})), **(make_keystone_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000210215115610204033416 0ustar zuulzuul--- - name: Debug make_keystone_deploy_cleanup_env when: make_keystone_deploy_cleanup_env is defined ansible.builtin.debug: var: make_keystone_deploy_cleanup_env - name: Debug make_keystone_deploy_cleanup_params when: make_keystone_deploy_cleanup_params is defined ansible.builtin.debug: var: make_keystone_deploy_cleanup_params - name: Run keystone_deploy_cleanup retries: "{{ make_keystone_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_keystone_deploy_cleanup_delay | default(omit) }}" until: "{{ make_keystone_deploy_cleanup_until | default(true) }}" register: "make_keystone_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_deploy_cleanup" dry_run: "{{ make_keystone_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_deploy_cleanup_env|default({})), **(make_keystone_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000165415115610204033331 0ustar zuulzuul--- - name: Debug make_barbican_prep_env when: make_barbican_prep_env is defined ansible.builtin.debug: var: make_barbican_prep_env - name: Debug make_barbican_prep_params when: make_barbican_prep_params is defined ansible.builtin.debug: var: make_barbican_prep_params - name: Run barbican_prep retries: "{{ make_barbican_prep_retries | default(omit) }}" delay: "{{ make_barbican_prep_delay | default(omit) }}" until: "{{ make_barbican_prep_until | default(true) }}" register: "make_barbican_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_prep" dry_run: "{{ make_barbican_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_prep_env|default({})), **(make_barbican_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000154115115610204033324 0ustar zuulzuul--- - name: Debug make_barbican_env when: make_barbican_env is defined ansible.builtin.debug: var: make_barbican_env - name: Debug make_barbican_params when: make_barbican_params is defined ansible.builtin.debug: var: make_barbican_params - name: Run barbican retries: "{{ make_barbican_retries | default(omit) }}" delay: "{{ make_barbican_delay | default(omit) }}" until: "{{ make_barbican_until | default(true) }}" register: "make_barbican_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican" dry_run: "{{ make_barbican_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_env|default({})), **(make_barbican_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_all.yml0000644000175000017500000000142615115610204033135 0ustar zuulzuul--- - name: Debug make_all_env when: make_all_env is defined ansible.builtin.debug: var: make_all_env - name: Debug make_all_params when: make_all_params is defined ansible.builtin.debug: var: make_all_params - name: Run all retries: "{{ make_all_retries | default(omit) }}" delay: "{{ make_all_delay | default(omit) }}" until: "{{ make_all_until | default(true) }}" register: "make_all_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make all" dry_run: "{{ make_all_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_all_env|default({})), **(make_all_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_help.yml0000644000175000017500000000145615115610204033320 0ustar zuulzuul--- - name: Debug make_help_env when: make_help_env is defined ansible.builtin.debug: var: make_help_env - name: Debug make_help_params when: make_help_params is defined ansible.builtin.debug: var: make_help_params - name: Run help retries: "{{ make_help_retries | default(omit) }}" delay: "{{ make_help_delay | default(omit) }}" until: "{{ make_help_until | default(true) }}" register: "make_help_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make help" dry_run: "{{ make_help_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_help_env|default({})), **(make_help_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cleanup.0000644000175000017500000000152215115610204033267 0ustar zuulzuul--- - name: Debug make_cleanup_env when: make_cleanup_env is defined ansible.builtin.debug: var: make_cleanup_env - name: Debug make_cleanup_params when: make_cleanup_params is defined ansible.builtin.debug: var: make_cleanup_params - name: Run cleanup retries: "{{ make_cleanup_retries | default(omit) }}" delay: "{{ make_cleanup_delay | default(omit) }}" until: "{{ make_cleanup_until | default(true) }}" register: "make_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cleanup" dry_run: "{{ make_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cleanup_env|default({})), **(make_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_deploy_c0000644000175000017500000000167315115610204033367 0ustar zuulzuul--- - name: Debug make_deploy_cleanup_env when: make_deploy_cleanup_env is defined ansible.builtin.debug: var: make_deploy_cleanup_env - name: Debug make_deploy_cleanup_params when: make_deploy_cleanup_params is defined ansible.builtin.debug: var: make_deploy_cleanup_params - name: Run deploy_cleanup retries: "{{ make_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_deploy_cleanup_delay | default(omit) }}" until: "{{ make_deploy_cleanup_until | default(true) }}" register: "make_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make deploy_cleanup" dry_run: "{{ make_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_deploy_cleanup_env|default({})), **(make_deploy_cleanup_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_wait.yml0000644000175000017500000000144515115610204033332 0ustar zuulzuul--- - name: Debug make_wait_env when: make_wait_env is defined ansible.builtin.debug: var: make_wait_env - name: Debug make_wait_params when: make_wait_params is defined ansible.builtin.debug: var: make_wait_params - name: Run wait retries: "{{ make_wait_retries | default(omit) }}" delay: "{{ make_wait_delay | default(omit) }}" until: "{{ make_wait_until | default(true) }}" register: "make_wait_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make wait" dry_run: "{{ make_wait_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_wait_env|default({})), **(make_wait_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_storage.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_stor0000644000175000017500000000161615115610204033404 0ustar zuulzuul--- - name: Debug make_crc_storage_env when: make_crc_storage_env is defined ansible.builtin.debug: var: make_crc_storage_env - name: Debug make_crc_storage_params when: make_crc_storage_params is defined ansible.builtin.debug: var: make_crc_storage_params - name: Run crc_storage retries: "{{ make_crc_storage_retries | default(omit) }}" delay: "{{ make_crc_storage_delay | default(omit) }}" until: "{{ make_crc_storage_until | default(true) }}" register: "make_crc_storage_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_storage" dry_run: "{{ make_crc_storage_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_storage_env|default({})), **(make_crc_storage_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_storage_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_stor0000644000175000017500000000200615115610204033376 0ustar zuulzuul--- - name: Debug make_crc_storage_cleanup_env when: make_crc_storage_cleanup_env is defined ansible.builtin.debug: var: make_crc_storage_cleanup_env - name: Debug make_crc_storage_cleanup_params when: make_crc_storage_cleanup_params is defined ansible.builtin.debug: var: make_crc_storage_cleanup_params - name: Run crc_storage_cleanup retries: "{{ make_crc_storage_cleanup_retries | default(omit) }}" delay: "{{ make_crc_storage_cleanup_delay | default(omit) }}" until: "{{ make_crc_storage_cleanup_until | default(true) }}" register: "make_crc_storage_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_storage_cleanup" dry_run: "{{ make_crc_storage_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_storage_cleanup_env|default({})), **(make_crc_storage_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_storage_release.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_stor0000644000175000017500000000200615115610204033376 0ustar zuulzuul--- - name: Debug make_crc_storage_release_env when: make_crc_storage_release_env is defined ansible.builtin.debug: var: make_crc_storage_release_env - name: Debug make_crc_storage_release_params when: make_crc_storage_release_params is defined ansible.builtin.debug: var: make_crc_storage_release_params - name: Run crc_storage_release retries: "{{ make_crc_storage_release_retries | default(omit) }}" delay: "{{ make_crc_storage_release_delay | default(omit) }}" until: "{{ make_crc_storage_release_until | default(true) }}" register: "make_crc_storage_release_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_storage_release" dry_run: "{{ make_crc_storage_release_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_storage_release_env|default({})), **(make_crc_storage_release_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_storage_with_retries.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_stor0000644000175000017500000000212115115610204033374 0ustar zuulzuul--- - name: Debug make_crc_storage_with_retries_env when: make_crc_storage_with_retries_env is defined ansible.builtin.debug: var: make_crc_storage_with_retries_env - name: Debug make_crc_storage_with_retries_params when: make_crc_storage_with_retries_params is defined ansible.builtin.debug: var: make_crc_storage_with_retries_params - name: Run crc_storage_with_retries retries: "{{ make_crc_storage_with_retries_retries | default(omit) }}" delay: "{{ make_crc_storage_with_retries_delay | default(omit) }}" until: "{{ make_crc_storage_with_retries_until | default(true) }}" register: "make_crc_storage_with_retries_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_storage_with_retries" dry_run: "{{ make_crc_storage_with_retries_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_storage_with_retries_env|default({})), **(make_crc_storage_with_retries_params|default({}))) }}" ././@LongLink0000644000000000000000000000020100000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_storage_cleanup_with_retries.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_stor0000644000175000017500000000231115115610204033375 0ustar zuulzuul--- - name: Debug make_crc_storage_cleanup_with_retries_env when: make_crc_storage_cleanup_with_retries_env is defined ansible.builtin.debug: var: make_crc_storage_cleanup_with_retries_env - name: Debug make_crc_storage_cleanup_with_retries_params when: make_crc_storage_cleanup_with_retries_params is defined ansible.builtin.debug: var: make_crc_storage_cleanup_with_retries_params - name: Run crc_storage_cleanup_with_retries retries: "{{ make_crc_storage_cleanup_with_retries_retries | default(omit) }}" delay: "{{ make_crc_storage_cleanup_with_retries_delay | default(omit) }}" until: "{{ make_crc_storage_cleanup_with_retries_until | default(true) }}" register: "make_crc_storage_cleanup_with_retries_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_storage_cleanup_with_retries" dry_run: "{{ make_crc_storage_cleanup_with_retries_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_storage_cleanup_with_retries_env|default({})), **(make_crc_storage_cleanup_with_retries_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_operator_namespace.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_operator0000644000175000017500000000176715115610204033430 0ustar zuulzuul--- - name: Debug make_operator_namespace_env when: make_operator_namespace_env is defined ansible.builtin.debug: var: make_operator_namespace_env - name: Debug make_operator_namespace_params when: make_operator_namespace_params is defined ansible.builtin.debug: var: make_operator_namespace_params - name: Run operator_namespace retries: "{{ make_operator_namespace_retries | default(omit) }}" delay: "{{ make_operator_namespace_delay | default(omit) }}" until: "{{ make_operator_namespace_until | default(true) }}" register: "make_operator_namespace_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make operator_namespace" dry_run: "{{ make_operator_namespace_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_operator_namespace_env|default({})), **(make_operator_namespace_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_namespace.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_namespac0000644000175000017500000000156015115610204033353 0ustar zuulzuul--- - name: Debug make_namespace_env when: make_namespace_env is defined ansible.builtin.debug: var: make_namespace_env - name: Debug make_namespace_params when: make_namespace_params is defined ansible.builtin.debug: var: make_namespace_params - name: Run namespace retries: "{{ make_namespace_retries | default(omit) }}" delay: "{{ make_namespace_delay | default(omit) }}" until: "{{ make_namespace_until | default(true) }}" register: "make_namespace_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make namespace" dry_run: "{{ make_namespace_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_namespace_env|default({})), **(make_namespace_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_namespace_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_namespac0000644000175000017500000000175015115610204033354 0ustar zuulzuul--- - name: Debug make_namespace_cleanup_env when: make_namespace_cleanup_env is defined ansible.builtin.debug: var: make_namespace_cleanup_env - name: Debug make_namespace_cleanup_params when: make_namespace_cleanup_params is defined ansible.builtin.debug: var: make_namespace_cleanup_params - name: Run namespace_cleanup retries: "{{ make_namespace_cleanup_retries | default(omit) }}" delay: "{{ make_namespace_cleanup_delay | default(omit) }}" until: "{{ make_namespace_cleanup_until | default(true) }}" register: "make_namespace_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make namespace_cleanup" dry_run: "{{ make_namespace_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_namespace_cleanup_env|default({})), **(make_namespace_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_input.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_input.ym0000644000175000017500000000146415115610204033352 0ustar zuulzuul--- - name: Debug make_input_env when: make_input_env is defined ansible.builtin.debug: var: make_input_env - name: Debug make_input_params when: make_input_params is defined ansible.builtin.debug: var: make_input_params - name: Run input retries: "{{ make_input_retries | default(omit) }}" delay: "{{ make_input_delay | default(omit) }}" until: "{{ make_input_until | default(true) }}" register: "make_input_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make input" dry_run: "{{ make_input_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_input_env|default({})), **(make_input_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_input_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_input_cl0000644000175000017500000000165415115610204033405 0ustar zuulzuul--- - name: Debug make_input_cleanup_env when: make_input_cleanup_env is defined ansible.builtin.debug: var: make_input_cleanup_env - name: Debug make_input_cleanup_params when: make_input_cleanup_params is defined ansible.builtin.debug: var: make_input_cleanup_params - name: Run input_cleanup retries: "{{ make_input_cleanup_retries | default(omit) }}" delay: "{{ make_input_cleanup_delay | default(omit) }}" until: "{{ make_input_cleanup_until | default(true) }}" register: "make_input_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make input_cleanup" dry_run: "{{ make_input_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_input_cleanup_env|default({})), **(make_input_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_bmo_setup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_bmo_0000644000175000017500000000165415115610204033333 0ustar zuulzuul--- - name: Debug make_crc_bmo_setup_env when: make_crc_bmo_setup_env is defined ansible.builtin.debug: var: make_crc_bmo_setup_env - name: Debug make_crc_bmo_setup_params when: make_crc_bmo_setup_params is defined ansible.builtin.debug: var: make_crc_bmo_setup_params - name: Run crc_bmo_setup retries: "{{ make_crc_bmo_setup_retries | default(omit) }}" delay: "{{ make_crc_bmo_setup_delay | default(omit) }}" until: "{{ make_crc_bmo_setup_until | default(true) }}" register: "make_crc_bmo_setup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_bmo_setup" dry_run: "{{ make_crc_bmo_setup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_bmo_setup_env|default({})), **(make_crc_bmo_setup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_bmo_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_bmo_0000644000175000017500000000171215115610204033326 0ustar zuulzuul--- - name: Debug make_crc_bmo_cleanup_env when: make_crc_bmo_cleanup_env is defined ansible.builtin.debug: var: make_crc_bmo_cleanup_env - name: Debug make_crc_bmo_cleanup_params when: make_crc_bmo_cleanup_params is defined ansible.builtin.debug: var: make_crc_bmo_cleanup_params - name: Run crc_bmo_cleanup retries: "{{ make_crc_bmo_cleanup_retries | default(omit) }}" delay: "{{ make_crc_bmo_cleanup_delay | default(omit) }}" until: "{{ make_crc_bmo_cleanup_until | default(true) }}" register: "make_crc_bmo_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make crc_bmo_cleanup" dry_run: "{{ make_crc_bmo_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_bmo_cleanup_env|default({})), **(make_crc_bmo_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000167315115610204033405 0ustar zuulzuul--- - name: Debug make_openstack_prep_env when: make_openstack_prep_env is defined ansible.builtin.debug: var: make_openstack_prep_env - name: Debug make_openstack_prep_params when: make_openstack_prep_params is defined ansible.builtin.debug: var: make_openstack_prep_params - name: Run openstack_prep retries: "{{ make_openstack_prep_retries | default(omit) }}" delay: "{{ make_openstack_prep_delay | default(omit) }}" until: "{{ make_openstack_prep_until | default(true) }}" register: "make_openstack_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_prep" dry_run: "{{ make_openstack_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_prep_env|default({})), **(make_openstack_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000156015115610204033400 0ustar zuulzuul--- - name: Debug make_openstack_env when: make_openstack_env is defined ansible.builtin.debug: var: make_openstack_env - name: Debug make_openstack_params when: make_openstack_params is defined ansible.builtin.debug: var: make_openstack_params - name: Run openstack retries: "{{ make_openstack_retries | default(omit) }}" delay: "{{ make_openstack_delay | default(omit) }}" until: "{{ make_openstack_until | default(true) }}" register: "make_openstack_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack" dry_run: "{{ make_openstack_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_env|default({})), **(make_openstack_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_wait.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000167315115610204033405 0ustar zuulzuul--- - name: Debug make_openstack_wait_env when: make_openstack_wait_env is defined ansible.builtin.debug: var: make_openstack_wait_env - name: Debug make_openstack_wait_params when: make_openstack_wait_params is defined ansible.builtin.debug: var: make_openstack_wait_params - name: Run openstack_wait retries: "{{ make_openstack_wait_retries | default(omit) }}" delay: "{{ make_openstack_wait_delay | default(omit) }}" until: "{{ make_openstack_wait_until | default(true) }}" register: "make_openstack_wait_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_wait" dry_run: "{{ make_openstack_wait_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_wait_env|default({})), **(make_openstack_wait_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_init.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000167315115610204033405 0ustar zuulzuul--- - name: Debug make_openstack_init_env when: make_openstack_init_env is defined ansible.builtin.debug: var: make_openstack_init_env - name: Debug make_openstack_init_params when: make_openstack_init_params is defined ansible.builtin.debug: var: make_openstack_init_params - name: Run openstack_init retries: "{{ make_openstack_init_retries | default(omit) }}" delay: "{{ make_openstack_init_delay | default(omit) }}" until: "{{ make_openstack_init_until | default(true) }}" register: "make_openstack_init_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_init" dry_run: "{{ make_openstack_init_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_init_env|default({})), **(make_openstack_init_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000175015115610204033401 0ustar zuulzuul--- - name: Debug make_openstack_cleanup_env when: make_openstack_cleanup_env is defined ansible.builtin.debug: var: make_openstack_cleanup_env - name: Debug make_openstack_cleanup_params when: make_openstack_cleanup_params is defined ansible.builtin.debug: var: make_openstack_cleanup_params - name: Run openstack_cleanup retries: "{{ make_openstack_cleanup_retries | default(omit) }}" delay: "{{ make_openstack_cleanup_delay | default(omit) }}" until: "{{ make_openstack_cleanup_until | default(true) }}" register: "make_openstack_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_cleanup" dry_run: "{{ make_openstack_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_cleanup_env|default({})), **(make_openstack_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_repo.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000167315115610204033405 0ustar zuulzuul--- - name: Debug make_openstack_repo_env when: make_openstack_repo_env is defined ansible.builtin.debug: var: make_openstack_repo_env - name: Debug make_openstack_repo_params when: make_openstack_repo_params is defined ansible.builtin.debug: var: make_openstack_repo_params - name: Run openstack_repo retries: "{{ make_openstack_repo_retries | default(omit) }}" delay: "{{ make_openstack_repo_delay | default(omit) }}" until: "{{ make_openstack_repo_until | default(true) }}" register: "make_openstack_repo_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_repo" dry_run: "{{ make_openstack_repo_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_repo_env|default({})), **(make_openstack_repo_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000204415115610204033376 0ustar zuulzuul--- - name: Debug make_openstack_deploy_prep_env when: make_openstack_deploy_prep_env is defined ansible.builtin.debug: var: make_openstack_deploy_prep_env - name: Debug make_openstack_deploy_prep_params when: make_openstack_deploy_prep_params is defined ansible.builtin.debug: var: make_openstack_deploy_prep_params - name: Run openstack_deploy_prep retries: "{{ make_openstack_deploy_prep_retries | default(omit) }}" delay: "{{ make_openstack_deploy_prep_delay | default(omit) }}" until: "{{ make_openstack_deploy_prep_until | default(true) }}" register: "make_openstack_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_deploy_prep" dry_run: "{{ make_openstack_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_deploy_prep_env|default({})), **(make_openstack_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000173115115610204033400 0ustar zuulzuul--- - name: Debug make_openstack_deploy_env when: make_openstack_deploy_env is defined ansible.builtin.debug: var: make_openstack_deploy_env - name: Debug make_openstack_deploy_params when: make_openstack_deploy_params is defined ansible.builtin.debug: var: make_openstack_deploy_params - name: Run openstack_deploy retries: "{{ make_openstack_deploy_retries | default(omit) }}" delay: "{{ make_openstack_deploy_delay | default(omit) }}" until: "{{ make_openstack_deploy_until | default(true) }}" register: "make_openstack_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_deploy" dry_run: "{{ make_openstack_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_deploy_env|default({})), **(make_openstack_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_wait_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000204415115610204033376 0ustar zuulzuul--- - name: Debug make_openstack_wait_deploy_env when: make_openstack_wait_deploy_env is defined ansible.builtin.debug: var: make_openstack_wait_deploy_env - name: Debug make_openstack_wait_deploy_params when: make_openstack_wait_deploy_params is defined ansible.builtin.debug: var: make_openstack_wait_deploy_params - name: Run openstack_wait_deploy retries: "{{ make_openstack_wait_deploy_retries | default(omit) }}" delay: "{{ make_openstack_wait_deploy_delay | default(omit) }}" until: "{{ make_openstack_wait_deploy_until | default(true) }}" register: "make_openstack_wait_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_wait_deploy" dry_run: "{{ make_openstack_wait_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_wait_deploy_env|default({})), **(make_openstack_wait_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000212115115610204033372 0ustar zuulzuul--- - name: Debug make_openstack_deploy_cleanup_env when: make_openstack_deploy_cleanup_env is defined ansible.builtin.debug: var: make_openstack_deploy_cleanup_env - name: Debug make_openstack_deploy_cleanup_params when: make_openstack_deploy_cleanup_params is defined ansible.builtin.debug: var: make_openstack_deploy_cleanup_params - name: Run openstack_deploy_cleanup retries: "{{ make_openstack_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_openstack_deploy_cleanup_delay | default(omit) }}" until: "{{ make_openstack_deploy_cleanup_until | default(true) }}" register: "make_openstack_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_deploy_cleanup" dry_run: "{{ make_openstack_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_deploy_cleanup_env|default({})), **(make_openstack_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_update_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000202515115610204033375 0ustar zuulzuul--- - name: Debug make_openstack_update_run_env when: make_openstack_update_run_env is defined ansible.builtin.debug: var: make_openstack_update_run_env - name: Debug make_openstack_update_run_params when: make_openstack_update_run_params is defined ansible.builtin.debug: var: make_openstack_update_run_params - name: Run openstack_update_run retries: "{{ make_openstack_update_run_retries | default(omit) }}" delay: "{{ make_openstack_update_run_delay | default(omit) }}" until: "{{ make_openstack_update_run_until | default(true) }}" register: "make_openstack_update_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_update_run" dry_run: "{{ make_openstack_update_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_update_run_env|default({})), **(make_openstack_update_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000173115115610204033350 0ustar zuulzuul--- - name: Debug make_placement_deploy_env when: make_placement_deploy_env is defined ansible.builtin.debug: var: make_placement_deploy_env - name: Debug make_placement_deploy_params when: make_placement_deploy_params is defined ansible.builtin.debug: var: make_placement_deploy_params - name: Run placement_deploy retries: "{{ make_placement_deploy_retries | default(omit) }}" delay: "{{ make_placement_deploy_delay | default(omit) }}" until: "{{ make_placement_deploy_until | default(true) }}" register: "make_placement_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_deploy" dry_run: "{{ make_placement_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_deploy_env|default({})), **(make_placement_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000212115115610204033342 0ustar zuulzuul--- - name: Debug make_placement_deploy_cleanup_env when: make_placement_deploy_cleanup_env is defined ansible.builtin.debug: var: make_placement_deploy_cleanup_env - name: Debug make_placement_deploy_cleanup_params when: make_placement_deploy_cleanup_params is defined ansible.builtin.debug: var: make_placement_deploy_cleanup_params - name: Run placement_deploy_cleanup retries: "{{ make_placement_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_placement_deploy_cleanup_delay | default(omit) }}" until: "{{ make_placement_deploy_cleanup_until | default(true) }}" register: "make_placement_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_deploy_cleanup" dry_run: "{{ make_placement_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_deploy_cleanup_env|default({})), **(make_placement_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_p0000644000175000017500000000161615115610204033336 0ustar zuulzuul--- - name: Debug make_glance_prep_env when: make_glance_prep_env is defined ansible.builtin.debug: var: make_glance_prep_env - name: Debug make_glance_prep_params when: make_glance_prep_params is defined ansible.builtin.debug: var: make_glance_prep_params - name: Run glance_prep retries: "{{ make_glance_prep_retries | default(omit) }}" delay: "{{ make_glance_prep_delay | default(omit) }}" until: "{{ make_glance_prep_until | default(true) }}" register: "make_glance_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_prep" dry_run: "{{ make_glance_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_prep_env|default({})), **(make_glance_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance.y0000644000175000017500000000150315115610204033261 0ustar zuulzuul--- - name: Debug make_glance_env when: make_glance_env is defined ansible.builtin.debug: var: make_glance_env - name: Debug make_glance_params when: make_glance_params is defined ansible.builtin.debug: var: make_glance_params - name: Run glance retries: "{{ make_glance_retries | default(omit) }}" delay: "{{ make_glance_delay | default(omit) }}" until: "{{ make_glance_until | default(true) }}" register: "make_glance_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance" dry_run: "{{ make_glance_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_env|default({})), **(make_glance_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_c0000644000175000017500000000167315115610204033324 0ustar zuulzuul--- - name: Debug make_glance_cleanup_env when: make_glance_cleanup_env is defined ansible.builtin.debug: var: make_glance_cleanup_env - name: Debug make_glance_cleanup_params when: make_glance_cleanup_params is defined ansible.builtin.debug: var: make_glance_cleanup_params - name: Run glance_cleanup retries: "{{ make_glance_cleanup_retries | default(omit) }}" delay: "{{ make_glance_cleanup_delay | default(omit) }}" until: "{{ make_glance_cleanup_until | default(true) }}" register: "make_glance_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_cleanup" dry_run: "{{ make_glance_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_cleanup_env|default({})), **(make_glance_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_d0000644000175000017500000000176715115610204033331 0ustar zuulzuul--- - name: Debug make_glance_deploy_prep_env when: make_glance_deploy_prep_env is defined ansible.builtin.debug: var: make_glance_deploy_prep_env - name: Debug make_glance_deploy_prep_params when: make_glance_deploy_prep_params is defined ansible.builtin.debug: var: make_glance_deploy_prep_params - name: Run glance_deploy_prep retries: "{{ make_glance_deploy_prep_retries | default(omit) }}" delay: "{{ make_glance_deploy_prep_delay | default(omit) }}" until: "{{ make_glance_deploy_prep_until | default(true) }}" register: "make_glance_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_deploy_prep" dry_run: "{{ make_glance_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_deploy_prep_env|default({})), **(make_glance_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_d0000644000175000017500000000165415115610204033324 0ustar zuulzuul--- - name: Debug make_glance_deploy_env when: make_glance_deploy_env is defined ansible.builtin.debug: var: make_glance_deploy_env - name: Debug make_glance_deploy_params when: make_glance_deploy_params is defined ansible.builtin.debug: var: make_glance_deploy_params - name: Run glance_deploy retries: "{{ make_glance_deploy_retries | default(omit) }}" delay: "{{ make_glance_deploy_delay | default(omit) }}" until: "{{ make_glance_deploy_until | default(true) }}" register: "make_glance_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_deploy" dry_run: "{{ make_glance_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_deploy_env|default({})), **(make_glance_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_d0000644000175000017500000000204415115610204033316 0ustar zuulzuul--- - name: Debug make_glance_deploy_cleanup_env when: make_glance_deploy_cleanup_env is defined ansible.builtin.debug: var: make_glance_deploy_cleanup_env - name: Debug make_glance_deploy_cleanup_params when: make_glance_deploy_cleanup_params is defined ansible.builtin.debug: var: make_glance_deploy_cleanup_params - name: Run glance_deploy_cleanup retries: "{{ make_glance_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_glance_deploy_cleanup_delay | default(omit) }}" until: "{{ make_glance_deploy_cleanup_until | default(true) }}" register: "make_glance_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_deploy_cleanup" dry_run: "{{ make_glance_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_deploy_cleanup_env|default({})), **(make_glance_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_prep0000644000175000017500000000154115115610204033413 0ustar zuulzuul--- - name: Debug make_ovn_prep_env when: make_ovn_prep_env is defined ansible.builtin.debug: var: make_ovn_prep_env - name: Debug make_ovn_prep_params when: make_ovn_prep_params is defined ansible.builtin.debug: var: make_ovn_prep_params - name: Run ovn_prep retries: "{{ make_ovn_prep_retries | default(omit) }}" delay: "{{ make_ovn_prep_delay | default(omit) }}" until: "{{ make_ovn_prep_until | default(true) }}" register: "make_ovn_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_prep" dry_run: "{{ make_ovn_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_prep_env|default({})), **(make_ovn_prep_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn.yml0000644000175000017500000000142615115610204033167 0ustar zuulzuul--- - name: Debug make_ovn_env when: make_ovn_env is defined ansible.builtin.debug: var: make_ovn_env - name: Debug make_ovn_params when: make_ovn_params is defined ansible.builtin.debug: var: make_ovn_params - name: Run ovn retries: "{{ make_ovn_retries | default(omit) }}" delay: "{{ make_ovn_delay | default(omit) }}" until: "{{ make_ovn_until | default(true) }}" register: "make_ovn_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn" dry_run: "{{ make_ovn_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_env|default({})), **(make_ovn_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_clea0000644000175000017500000000161615115610204033354 0ustar zuulzuul--- - name: Debug make_ovn_cleanup_env when: make_ovn_cleanup_env is defined ansible.builtin.debug: var: make_ovn_cleanup_env - name: Debug make_ovn_cleanup_params when: make_ovn_cleanup_params is defined ansible.builtin.debug: var: make_ovn_cleanup_params - name: Run ovn_cleanup retries: "{{ make_ovn_cleanup_retries | default(omit) }}" delay: "{{ make_ovn_cleanup_delay | default(omit) }}" until: "{{ make_ovn_cleanup_until | default(true) }}" register: "make_ovn_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_cleanup" dry_run: "{{ make_ovn_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_cleanup_env|default({})), **(make_ovn_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_depl0000644000175000017500000000171215115610204033371 0ustar zuulzuul--- - name: Debug make_ovn_deploy_prep_env when: make_ovn_deploy_prep_env is defined ansible.builtin.debug: var: make_ovn_deploy_prep_env - name: Debug make_ovn_deploy_prep_params when: make_ovn_deploy_prep_params is defined ansible.builtin.debug: var: make_ovn_deploy_prep_params - name: Run ovn_deploy_prep retries: "{{ make_ovn_deploy_prep_retries | default(omit) }}" delay: "{{ make_ovn_deploy_prep_delay | default(omit) }}" until: "{{ make_ovn_deploy_prep_until | default(true) }}" register: "make_ovn_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_deploy_prep" dry_run: "{{ make_ovn_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_deploy_prep_env|default({})), **(make_ovn_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_depl0000644000175000017500000000157715115610204033402 0ustar zuulzuul--- - name: Debug make_ovn_deploy_env when: make_ovn_deploy_env is defined ansible.builtin.debug: var: make_ovn_deploy_env - name: Debug make_ovn_deploy_params when: make_ovn_deploy_params is defined ansible.builtin.debug: var: make_ovn_deploy_params - name: Run ovn_deploy retries: "{{ make_ovn_deploy_retries | default(omit) }}" delay: "{{ make_ovn_deploy_delay | default(omit) }}" until: "{{ make_ovn_deploy_until | default(true) }}" register: "make_ovn_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_deploy" dry_run: "{{ make_ovn_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_deploy_env|default({})), **(make_ovn_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_depl0000644000175000017500000000176715115610204033403 0ustar zuulzuul--- - name: Debug make_ovn_deploy_cleanup_env when: make_ovn_deploy_cleanup_env is defined ansible.builtin.debug: var: make_ovn_deploy_cleanup_env - name: Debug make_ovn_deploy_cleanup_params when: make_ovn_deploy_cleanup_params is defined ansible.builtin.debug: var: make_ovn_deploy_cleanup_params - name: Run ovn_deploy_cleanup retries: "{{ make_ovn_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_ovn_deploy_cleanup_delay | default(omit) }}" until: "{{ make_ovn_deploy_cleanup_until | default(true) }}" register: "make_ovn_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_deploy_cleanup" dry_run: "{{ make_ovn_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_deploy_cleanup_env|default({})), **(make_ovn_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000163515115610204033420 0ustar zuulzuul--- - name: Debug make_neutron_prep_env when: make_neutron_prep_env is defined ansible.builtin.debug: var: make_neutron_prep_env - name: Debug make_neutron_prep_params when: make_neutron_prep_params is defined ansible.builtin.debug: var: make_neutron_prep_params - name: Run neutron_prep retries: "{{ make_neutron_prep_retries | default(omit) }}" delay: "{{ make_neutron_prep_delay | default(omit) }}" until: "{{ make_neutron_prep_until | default(true) }}" register: "make_neutron_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_prep" dry_run: "{{ make_neutron_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_prep_env|default({})), **(make_neutron_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron.0000644000175000017500000000152215115610204033332 0ustar zuulzuul--- - name: Debug make_neutron_env when: make_neutron_env is defined ansible.builtin.debug: var: make_neutron_env - name: Debug make_neutron_params when: make_neutron_params is defined ansible.builtin.debug: var: make_neutron_params - name: Run neutron retries: "{{ make_neutron_retries | default(omit) }}" delay: "{{ make_neutron_delay | default(omit) }}" until: "{{ make_neutron_until | default(true) }}" register: "make_neutron_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron" dry_run: "{{ make_neutron_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_env|default({})), **(make_neutron_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000171215115610204033414 0ustar zuulzuul--- - name: Debug make_neutron_cleanup_env when: make_neutron_cleanup_env is defined ansible.builtin.debug: var: make_neutron_cleanup_env - name: Debug make_neutron_cleanup_params when: make_neutron_cleanup_params is defined ansible.builtin.debug: var: make_neutron_cleanup_params - name: Run neutron_cleanup retries: "{{ make_neutron_cleanup_retries | default(omit) }}" delay: "{{ make_neutron_cleanup_delay | default(omit) }}" until: "{{ make_neutron_cleanup_until | default(true) }}" register: "make_neutron_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_cleanup" dry_run: "{{ make_neutron_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_cleanup_env|default({})), **(make_neutron_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000200615115610204033411 0ustar zuulzuul--- - name: Debug make_neutron_deploy_prep_env when: make_neutron_deploy_prep_env is defined ansible.builtin.debug: var: make_neutron_deploy_prep_env - name: Debug make_neutron_deploy_prep_params when: make_neutron_deploy_prep_params is defined ansible.builtin.debug: var: make_neutron_deploy_prep_params - name: Run neutron_deploy_prep retries: "{{ make_neutron_deploy_prep_retries | default(omit) }}" delay: "{{ make_neutron_deploy_prep_delay | default(omit) }}" until: "{{ make_neutron_deploy_prep_until | default(true) }}" register: "make_neutron_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_deploy_prep" dry_run: "{{ make_neutron_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_deploy_prep_env|default({})), **(make_neutron_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000167315115610204033422 0ustar zuulzuul--- - name: Debug make_neutron_deploy_env when: make_neutron_deploy_env is defined ansible.builtin.debug: var: make_neutron_deploy_env - name: Debug make_neutron_deploy_params when: make_neutron_deploy_params is defined ansible.builtin.debug: var: make_neutron_deploy_params - name: Run neutron_deploy retries: "{{ make_neutron_deploy_retries | default(omit) }}" delay: "{{ make_neutron_deploy_delay | default(omit) }}" until: "{{ make_neutron_deploy_until | default(true) }}" register: "make_neutron_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_deploy" dry_run: "{{ make_neutron_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_deploy_env|default({})), **(make_neutron_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000206315115610204033414 0ustar zuulzuul--- - name: Debug make_neutron_deploy_cleanup_env when: make_neutron_deploy_cleanup_env is defined ansible.builtin.debug: var: make_neutron_deploy_cleanup_env - name: Debug make_neutron_deploy_cleanup_params when: make_neutron_deploy_cleanup_params is defined ansible.builtin.debug: var: make_neutron_deploy_cleanup_params - name: Run neutron_deploy_cleanup retries: "{{ make_neutron_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_neutron_deploy_cleanup_delay | default(omit) }}" until: "{{ make_neutron_deploy_cleanup_until | default(true) }}" register: "make_neutron_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_deploy_cleanup" dry_run: "{{ make_neutron_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_deploy_cleanup_env|default({})), **(make_neutron_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_p0000644000175000017500000000161615115610204033351 0ustar zuulzuul--- - name: Debug make_cinder_prep_env when: make_cinder_prep_env is defined ansible.builtin.debug: var: make_cinder_prep_env - name: Debug make_cinder_prep_params when: make_cinder_prep_params is defined ansible.builtin.debug: var: make_cinder_prep_params - name: Run cinder_prep retries: "{{ make_cinder_prep_retries | default(omit) }}" delay: "{{ make_cinder_prep_delay | default(omit) }}" until: "{{ make_cinder_prep_until | default(true) }}" register: "make_cinder_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_prep" dry_run: "{{ make_cinder_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_prep_env|default({})), **(make_cinder_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder.y0000644000175000017500000000150315115610204033274 0ustar zuulzuul--- - name: Debug make_cinder_env when: make_cinder_env is defined ansible.builtin.debug: var: make_cinder_env - name: Debug make_cinder_params when: make_cinder_params is defined ansible.builtin.debug: var: make_cinder_params - name: Run cinder retries: "{{ make_cinder_retries | default(omit) }}" delay: "{{ make_cinder_delay | default(omit) }}" until: "{{ make_cinder_until | default(true) }}" register: "make_cinder_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder" dry_run: "{{ make_cinder_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_env|default({})), **(make_cinder_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_c0000644000175000017500000000167315115610204033337 0ustar zuulzuul--- - name: Debug make_cinder_cleanup_env when: make_cinder_cleanup_env is defined ansible.builtin.debug: var: make_cinder_cleanup_env - name: Debug make_cinder_cleanup_params when: make_cinder_cleanup_params is defined ansible.builtin.debug: var: make_cinder_cleanup_params - name: Run cinder_cleanup retries: "{{ make_cinder_cleanup_retries | default(omit) }}" delay: "{{ make_cinder_cleanup_delay | default(omit) }}" until: "{{ make_cinder_cleanup_until | default(true) }}" register: "make_cinder_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_cleanup" dry_run: "{{ make_cinder_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_cleanup_env|default({})), **(make_cinder_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_d0000644000175000017500000000176715115610204033344 0ustar zuulzuul--- - name: Debug make_cinder_deploy_prep_env when: make_cinder_deploy_prep_env is defined ansible.builtin.debug: var: make_cinder_deploy_prep_env - name: Debug make_cinder_deploy_prep_params when: make_cinder_deploy_prep_params is defined ansible.builtin.debug: var: make_cinder_deploy_prep_params - name: Run cinder_deploy_prep retries: "{{ make_cinder_deploy_prep_retries | default(omit) }}" delay: "{{ make_cinder_deploy_prep_delay | default(omit) }}" until: "{{ make_cinder_deploy_prep_until | default(true) }}" register: "make_cinder_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_deploy_prep" dry_run: "{{ make_cinder_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_deploy_prep_env|default({})), **(make_cinder_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_d0000644000175000017500000000165415115610204033337 0ustar zuulzuul--- - name: Debug make_cinder_deploy_env when: make_cinder_deploy_env is defined ansible.builtin.debug: var: make_cinder_deploy_env - name: Debug make_cinder_deploy_params when: make_cinder_deploy_params is defined ansible.builtin.debug: var: make_cinder_deploy_params - name: Run cinder_deploy retries: "{{ make_cinder_deploy_retries | default(omit) }}" delay: "{{ make_cinder_deploy_delay | default(omit) }}" until: "{{ make_cinder_deploy_until | default(true) }}" register: "make_cinder_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_deploy" dry_run: "{{ make_cinder_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_deploy_env|default({})), **(make_cinder_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_d0000644000175000017500000000204415115610204033331 0ustar zuulzuul--- - name: Debug make_cinder_deploy_cleanup_env when: make_cinder_deploy_cleanup_env is defined ansible.builtin.debug: var: make_cinder_deploy_cleanup_env - name: Debug make_cinder_deploy_cleanup_params when: make_cinder_deploy_cleanup_params is defined ansible.builtin.debug: var: make_cinder_deploy_cleanup_params - name: Run cinder_deploy_cleanup retries: "{{ make_cinder_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_cinder_deploy_cleanup_delay | default(omit) }}" until: "{{ make_cinder_deploy_cleanup_until | default(true) }}" register: "make_cinder_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_deploy_cleanup" dry_run: "{{ make_cinder_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_deploy_cleanup_env|default({})), **(make_cinder_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq0000644000175000017500000000165415115610204033371 0ustar zuulzuul--- - name: Debug make_rabbitmq_prep_env when: make_rabbitmq_prep_env is defined ansible.builtin.debug: var: make_rabbitmq_prep_env - name: Debug make_rabbitmq_prep_params when: make_rabbitmq_prep_params is defined ansible.builtin.debug: var: make_rabbitmq_prep_params - name: Run rabbitmq_prep retries: "{{ make_rabbitmq_prep_retries | default(omit) }}" delay: "{{ make_rabbitmq_prep_delay | default(omit) }}" until: "{{ make_rabbitmq_prep_until | default(true) }}" register: "make_rabbitmq_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rabbitmq_prep" dry_run: "{{ make_rabbitmq_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rabbitmq_prep_env|default({})), **(make_rabbitmq_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq0000644000175000017500000000154115115610204033364 0ustar zuulzuul--- - name: Debug make_rabbitmq_env when: make_rabbitmq_env is defined ansible.builtin.debug: var: make_rabbitmq_env - name: Debug make_rabbitmq_params when: make_rabbitmq_params is defined ansible.builtin.debug: var: make_rabbitmq_params - name: Run rabbitmq retries: "{{ make_rabbitmq_retries | default(omit) }}" delay: "{{ make_rabbitmq_delay | default(omit) }}" until: "{{ make_rabbitmq_until | default(true) }}" register: "make_rabbitmq_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rabbitmq" dry_run: "{{ make_rabbitmq_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rabbitmq_env|default({})), **(make_rabbitmq_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq0000644000175000017500000000173115115610204033365 0ustar zuulzuul--- - name: Debug make_rabbitmq_cleanup_env when: make_rabbitmq_cleanup_env is defined ansible.builtin.debug: var: make_rabbitmq_cleanup_env - name: Debug make_rabbitmq_cleanup_params when: make_rabbitmq_cleanup_params is defined ansible.builtin.debug: var: make_rabbitmq_cleanup_params - name: Run rabbitmq_cleanup retries: "{{ make_rabbitmq_cleanup_retries | default(omit) }}" delay: "{{ make_rabbitmq_cleanup_delay | default(omit) }}" until: "{{ make_rabbitmq_cleanup_until | default(true) }}" register: "make_rabbitmq_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rabbitmq_cleanup" dry_run: "{{ make_rabbitmq_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rabbitmq_cleanup_env|default({})), **(make_rabbitmq_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq0000644000175000017500000000202515115610204033362 0ustar zuulzuul--- - name: Debug make_rabbitmq_deploy_prep_env when: make_rabbitmq_deploy_prep_env is defined ansible.builtin.debug: var: make_rabbitmq_deploy_prep_env - name: Debug make_rabbitmq_deploy_prep_params when: make_rabbitmq_deploy_prep_params is defined ansible.builtin.debug: var: make_rabbitmq_deploy_prep_params - name: Run rabbitmq_deploy_prep retries: "{{ make_rabbitmq_deploy_prep_retries | default(omit) }}" delay: "{{ make_rabbitmq_deploy_prep_delay | default(omit) }}" until: "{{ make_rabbitmq_deploy_prep_until | default(true) }}" register: "make_rabbitmq_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rabbitmq_deploy_prep" dry_run: "{{ make_rabbitmq_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rabbitmq_deploy_prep_env|default({})), **(make_rabbitmq_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq0000644000175000017500000000171215115610204033364 0ustar zuulzuul--- - name: Debug make_rabbitmq_deploy_env when: make_rabbitmq_deploy_env is defined ansible.builtin.debug: var: make_rabbitmq_deploy_env - name: Debug make_rabbitmq_deploy_params when: make_rabbitmq_deploy_params is defined ansible.builtin.debug: var: make_rabbitmq_deploy_params - name: Run rabbitmq_deploy retries: "{{ make_rabbitmq_deploy_retries | default(omit) }}" delay: "{{ make_rabbitmq_deploy_delay | default(omit) }}" until: "{{ make_rabbitmq_deploy_until | default(true) }}" register: "make_rabbitmq_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rabbitmq_deploy" dry_run: "{{ make_rabbitmq_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rabbitmq_deploy_env|default({})), **(make_rabbitmq_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rabbitmq0000644000175000017500000000210215115610204033356 0ustar zuulzuul--- - name: Debug make_rabbitmq_deploy_cleanup_env when: make_rabbitmq_deploy_cleanup_env is defined ansible.builtin.debug: var: make_rabbitmq_deploy_cleanup_env - name: Debug make_rabbitmq_deploy_cleanup_params when: make_rabbitmq_deploy_cleanup_params is defined ansible.builtin.debug: var: make_rabbitmq_deploy_cleanup_params - name: Run rabbitmq_deploy_cleanup retries: "{{ make_rabbitmq_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_rabbitmq_deploy_cleanup_delay | default(omit) }}" until: "{{ make_rabbitmq_deploy_cleanup_until | default(true) }}" register: "make_rabbitmq_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rabbitmq_deploy_cleanup" dry_run: "{{ make_rabbitmq_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rabbitmq_deploy_cleanup_env|default({})), **(make_rabbitmq_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_p0000644000175000017500000000161615115610204033370 0ustar zuulzuul--- - name: Debug make_ironic_prep_env when: make_ironic_prep_env is defined ansible.builtin.debug: var: make_ironic_prep_env - name: Debug make_ironic_prep_params when: make_ironic_prep_params is defined ansible.builtin.debug: var: make_ironic_prep_params - name: Run ironic_prep retries: "{{ make_ironic_prep_retries | default(omit) }}" delay: "{{ make_ironic_prep_delay | default(omit) }}" until: "{{ make_ironic_prep_until | default(true) }}" register: "make_ironic_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_prep" dry_run: "{{ make_ironic_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_prep_env|default({})), **(make_ironic_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic.y0000644000175000017500000000150315115610204033313 0ustar zuulzuul--- - name: Debug make_ironic_env when: make_ironic_env is defined ansible.builtin.debug: var: make_ironic_env - name: Debug make_ironic_params when: make_ironic_params is defined ansible.builtin.debug: var: make_ironic_params - name: Run ironic retries: "{{ make_ironic_retries | default(omit) }}" delay: "{{ make_ironic_delay | default(omit) }}" until: "{{ make_ironic_until | default(true) }}" register: "make_ironic_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic" dry_run: "{{ make_ironic_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_env|default({})), **(make_ironic_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_c0000644000175000017500000000167315115610204033356 0ustar zuulzuul--- - name: Debug make_ironic_cleanup_env when: make_ironic_cleanup_env is defined ansible.builtin.debug: var: make_ironic_cleanup_env - name: Debug make_ironic_cleanup_params when: make_ironic_cleanup_params is defined ansible.builtin.debug: var: make_ironic_cleanup_params - name: Run ironic_cleanup retries: "{{ make_ironic_cleanup_retries | default(omit) }}" delay: "{{ make_ironic_cleanup_delay | default(omit) }}" until: "{{ make_ironic_cleanup_until | default(true) }}" register: "make_ironic_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_cleanup" dry_run: "{{ make_ironic_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_cleanup_env|default({})), **(make_ironic_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_d0000644000175000017500000000176715115610204033363 0ustar zuulzuul--- - name: Debug make_ironic_deploy_prep_env when: make_ironic_deploy_prep_env is defined ansible.builtin.debug: var: make_ironic_deploy_prep_env - name: Debug make_ironic_deploy_prep_params when: make_ironic_deploy_prep_params is defined ansible.builtin.debug: var: make_ironic_deploy_prep_params - name: Run ironic_deploy_prep retries: "{{ make_ironic_deploy_prep_retries | default(omit) }}" delay: "{{ make_ironic_deploy_prep_delay | default(omit) }}" until: "{{ make_ironic_deploy_prep_until | default(true) }}" register: "make_ironic_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_deploy_prep" dry_run: "{{ make_ironic_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_deploy_prep_env|default({})), **(make_ironic_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_d0000644000175000017500000000165415115610204033356 0ustar zuulzuul--- - name: Debug make_ironic_deploy_env when: make_ironic_deploy_env is defined ansible.builtin.debug: var: make_ironic_deploy_env - name: Debug make_ironic_deploy_params when: make_ironic_deploy_params is defined ansible.builtin.debug: var: make_ironic_deploy_params - name: Run ironic_deploy retries: "{{ make_ironic_deploy_retries | default(omit) }}" delay: "{{ make_ironic_deploy_delay | default(omit) }}" until: "{{ make_ironic_deploy_until | default(true) }}" register: "make_ironic_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_deploy" dry_run: "{{ make_ironic_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_deploy_env|default({})), **(make_ironic_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_d0000644000175000017500000000204415115610204033350 0ustar zuulzuul--- - name: Debug make_ironic_deploy_cleanup_env when: make_ironic_deploy_cleanup_env is defined ansible.builtin.debug: var: make_ironic_deploy_cleanup_env - name: Debug make_ironic_deploy_cleanup_params when: make_ironic_deploy_cleanup_params is defined ansible.builtin.debug: var: make_ironic_deploy_cleanup_params - name: Run ironic_deploy_cleanup retries: "{{ make_ironic_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_ironic_deploy_cleanup_delay | default(omit) }}" until: "{{ make_ironic_deploy_cleanup_until | default(true) }}" register: "make_ironic_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_deploy_cleanup" dry_run: "{{ make_ironic_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_deploy_cleanup_env|default({})), **(make_ironic_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000163515115610204033354 0ustar zuulzuul--- - name: Debug make_octavia_prep_env when: make_octavia_prep_env is defined ansible.builtin.debug: var: make_octavia_prep_env - name: Debug make_octavia_prep_params when: make_octavia_prep_params is defined ansible.builtin.debug: var: make_octavia_prep_params - name: Run octavia_prep retries: "{{ make_octavia_prep_retries | default(omit) }}" delay: "{{ make_octavia_prep_delay | default(omit) }}" until: "{{ make_octavia_prep_until | default(true) }}" register: "make_octavia_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_prep" dry_run: "{{ make_octavia_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_prep_env|default({})), **(make_octavia_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia.0000644000175000017500000000152215115610204033266 0ustar zuulzuul--- - name: Debug make_octavia_env when: make_octavia_env is defined ansible.builtin.debug: var: make_octavia_env - name: Debug make_octavia_params when: make_octavia_params is defined ansible.builtin.debug: var: make_octavia_params - name: Run octavia retries: "{{ make_octavia_retries | default(omit) }}" delay: "{{ make_octavia_delay | default(omit) }}" until: "{{ make_octavia_until | default(true) }}" register: "make_octavia_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia" dry_run: "{{ make_octavia_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_env|default({})), **(make_octavia_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000171215115610204033350 0ustar zuulzuul--- - name: Debug make_octavia_cleanup_env when: make_octavia_cleanup_env is defined ansible.builtin.debug: var: make_octavia_cleanup_env - name: Debug make_octavia_cleanup_params when: make_octavia_cleanup_params is defined ansible.builtin.debug: var: make_octavia_cleanup_params - name: Run octavia_cleanup retries: "{{ make_octavia_cleanup_retries | default(omit) }}" delay: "{{ make_octavia_cleanup_delay | default(omit) }}" until: "{{ make_octavia_cleanup_until | default(true) }}" register: "make_octavia_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_cleanup" dry_run: "{{ make_octavia_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_cleanup_env|default({})), **(make_octavia_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000200615115610204033345 0ustar zuulzuul--- - name: Debug make_octavia_deploy_prep_env when: make_octavia_deploy_prep_env is defined ansible.builtin.debug: var: make_octavia_deploy_prep_env - name: Debug make_octavia_deploy_prep_params when: make_octavia_deploy_prep_params is defined ansible.builtin.debug: var: make_octavia_deploy_prep_params - name: Run octavia_deploy_prep retries: "{{ make_octavia_deploy_prep_retries | default(omit) }}" delay: "{{ make_octavia_deploy_prep_delay | default(omit) }}" until: "{{ make_octavia_deploy_prep_until | default(true) }}" register: "make_octavia_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_deploy_prep" dry_run: "{{ make_octavia_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_deploy_prep_env|default({})), **(make_octavia_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000167315115610204033356 0ustar zuulzuul--- - name: Debug make_octavia_deploy_env when: make_octavia_deploy_env is defined ansible.builtin.debug: var: make_octavia_deploy_env - name: Debug make_octavia_deploy_params when: make_octavia_deploy_params is defined ansible.builtin.debug: var: make_octavia_deploy_params - name: Run octavia_deploy retries: "{{ make_octavia_deploy_retries | default(omit) }}" delay: "{{ make_octavia_deploy_delay | default(omit) }}" until: "{{ make_octavia_deploy_until | default(true) }}" register: "make_octavia_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_deploy" dry_run: "{{ make_octavia_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_deploy_env|default({})), **(make_octavia_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000206315115610204033350 0ustar zuulzuul--- - name: Debug make_octavia_deploy_cleanup_env when: make_octavia_deploy_cleanup_env is defined ansible.builtin.debug: var: make_octavia_deploy_cleanup_env - name: Debug make_octavia_deploy_cleanup_params when: make_octavia_deploy_cleanup_params is defined ansible.builtin.debug: var: make_octavia_deploy_cleanup_params - name: Run octavia_deploy_cleanup retries: "{{ make_octavia_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_octavia_deploy_cleanup_delay | default(omit) }}" until: "{{ make_octavia_deploy_cleanup_until | default(true) }}" register: "make_octavia_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_deploy_cleanup" dry_run: "{{ make_octavia_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_deploy_cleanup_env|default({})), **(make_octavia_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000167315115610204033367 0ustar zuulzuul--- - name: Debug make_designate_prep_env when: make_designate_prep_env is defined ansible.builtin.debug: var: make_designate_prep_env - name: Debug make_designate_prep_params when: make_designate_prep_params is defined ansible.builtin.debug: var: make_designate_prep_params - name: Run designate_prep retries: "{{ make_designate_prep_retries | default(omit) }}" delay: "{{ make_designate_prep_delay | default(omit) }}" until: "{{ make_designate_prep_until | default(true) }}" register: "make_designate_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_prep" dry_run: "{{ make_designate_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_prep_env|default({})), **(make_designate_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000156015115610204033362 0ustar zuulzuul--- - name: Debug make_designate_env when: make_designate_env is defined ansible.builtin.debug: var: make_designate_env - name: Debug make_designate_params when: make_designate_params is defined ansible.builtin.debug: var: make_designate_params - name: Run designate retries: "{{ make_designate_retries | default(omit) }}" delay: "{{ make_designate_delay | default(omit) }}" until: "{{ make_designate_until | default(true) }}" register: "make_designate_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate" dry_run: "{{ make_designate_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_env|default({})), **(make_designate_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000175015115610204033363 0ustar zuulzuul--- - name: Debug make_designate_cleanup_env when: make_designate_cleanup_env is defined ansible.builtin.debug: var: make_designate_cleanup_env - name: Debug make_designate_cleanup_params when: make_designate_cleanup_params is defined ansible.builtin.debug: var: make_designate_cleanup_params - name: Run designate_cleanup retries: "{{ make_designate_cleanup_retries | default(omit) }}" delay: "{{ make_designate_cleanup_delay | default(omit) }}" until: "{{ make_designate_cleanup_until | default(true) }}" register: "make_designate_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_cleanup" dry_run: "{{ make_designate_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_cleanup_env|default({})), **(make_designate_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000204415115610204033360 0ustar zuulzuul--- - name: Debug make_designate_deploy_prep_env when: make_designate_deploy_prep_env is defined ansible.builtin.debug: var: make_designate_deploy_prep_env - name: Debug make_designate_deploy_prep_params when: make_designate_deploy_prep_params is defined ansible.builtin.debug: var: make_designate_deploy_prep_params - name: Run designate_deploy_prep retries: "{{ make_designate_deploy_prep_retries | default(omit) }}" delay: "{{ make_designate_deploy_prep_delay | default(omit) }}" until: "{{ make_designate_deploy_prep_until | default(true) }}" register: "make_designate_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_deploy_prep" dry_run: "{{ make_designate_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_deploy_prep_env|default({})), **(make_designate_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000173115115610204033362 0ustar zuulzuul--- - name: Debug make_designate_deploy_env when: make_designate_deploy_env is defined ansible.builtin.debug: var: make_designate_deploy_env - name: Debug make_designate_deploy_params when: make_designate_deploy_params is defined ansible.builtin.debug: var: make_designate_deploy_params - name: Run designate_deploy retries: "{{ make_designate_deploy_retries | default(omit) }}" delay: "{{ make_designate_deploy_delay | default(omit) }}" until: "{{ make_designate_deploy_until | default(true) }}" register: "make_designate_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_deploy" dry_run: "{{ make_designate_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_deploy_env|default({})), **(make_designate_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000212115115610204033354 0ustar zuulzuul--- - name: Debug make_designate_deploy_cleanup_env when: make_designate_deploy_cleanup_env is defined ansible.builtin.debug: var: make_designate_deploy_cleanup_env - name: Debug make_designate_deploy_cleanup_params when: make_designate_deploy_cleanup_params is defined ansible.builtin.debug: var: make_designate_deploy_cleanup_params - name: Run designate_deploy_cleanup retries: "{{ make_designate_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_designate_deploy_cleanup_delay | default(omit) }}" until: "{{ make_designate_deploy_cleanup_until | default(true) }}" register: "make_designate_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_deploy_cleanup" dry_run: "{{ make_designate_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_deploy_cleanup_env|default({})), **(make_designate_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_pre0000644000175000017500000000156015115610204033375 0ustar zuulzuul--- - name: Debug make_nova_prep_env when: make_nova_prep_env is defined ansible.builtin.debug: var: make_nova_prep_env - name: Debug make_nova_prep_params when: make_nova_prep_params is defined ansible.builtin.debug: var: make_nova_prep_params - name: Run nova_prep retries: "{{ make_nova_prep_retries | default(omit) }}" delay: "{{ make_nova_prep_delay | default(omit) }}" until: "{{ make_nova_prep_until | default(true) }}" register: "make_nova_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nova_prep" dry_run: "{{ make_nova_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nova_prep_env|default({})), **(make_nova_prep_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova.yml0000644000175000017500000000144515115610204033331 0ustar zuulzuul--- - name: Debug make_nova_env when: make_nova_env is defined ansible.builtin.debug: var: make_nova_env - name: Debug make_nova_params when: make_nova_params is defined ansible.builtin.debug: var: make_nova_params - name: Run nova retries: "{{ make_nova_retries | default(omit) }}" delay: "{{ make_nova_delay | default(omit) }}" until: "{{ make_nova_until | default(true) }}" register: "make_nova_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nova" dry_run: "{{ make_nova_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nova_env|default({})), **(make_nova_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_cle0000644000175000017500000000163515115610204033355 0ustar zuulzuul--- - name: Debug make_nova_cleanup_env when: make_nova_cleanup_env is defined ansible.builtin.debug: var: make_nova_cleanup_env - name: Debug make_nova_cleanup_params when: make_nova_cleanup_params is defined ansible.builtin.debug: var: make_nova_cleanup_params - name: Run nova_cleanup retries: "{{ make_nova_cleanup_retries | default(omit) }}" delay: "{{ make_nova_cleanup_delay | default(omit) }}" until: "{{ make_nova_cleanup_until | default(true) }}" register: "make_nova_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nova_cleanup" dry_run: "{{ make_nova_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nova_cleanup_env|default({})), **(make_nova_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_dep0000644000175000017500000000173115115610204033357 0ustar zuulzuul--- - name: Debug make_nova_deploy_prep_env when: make_nova_deploy_prep_env is defined ansible.builtin.debug: var: make_nova_deploy_prep_env - name: Debug make_nova_deploy_prep_params when: make_nova_deploy_prep_params is defined ansible.builtin.debug: var: make_nova_deploy_prep_params - name: Run nova_deploy_prep retries: "{{ make_nova_deploy_prep_retries | default(omit) }}" delay: "{{ make_nova_deploy_prep_delay | default(omit) }}" until: "{{ make_nova_deploy_prep_until | default(true) }}" register: "make_nova_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nova_deploy_prep" dry_run: "{{ make_nova_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nova_deploy_prep_env|default({})), **(make_nova_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_dep0000644000175000017500000000161615115610204033361 0ustar zuulzuul--- - name: Debug make_nova_deploy_env when: make_nova_deploy_env is defined ansible.builtin.debug: var: make_nova_deploy_env - name: Debug make_nova_deploy_params when: make_nova_deploy_params is defined ansible.builtin.debug: var: make_nova_deploy_params - name: Run nova_deploy retries: "{{ make_nova_deploy_retries | default(omit) }}" delay: "{{ make_nova_deploy_delay | default(omit) }}" until: "{{ make_nova_deploy_until | default(true) }}" register: "make_nova_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nova_deploy" dry_run: "{{ make_nova_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nova_deploy_env|default({})), **(make_nova_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nova_dep0000644000175000017500000000200615115610204033353 0ustar zuulzuul--- - name: Debug make_nova_deploy_cleanup_env when: make_nova_deploy_cleanup_env is defined ansible.builtin.debug: var: make_nova_deploy_cleanup_env - name: Debug make_nova_deploy_cleanup_params when: make_nova_deploy_cleanup_params is defined ansible.builtin.debug: var: make_nova_deploy_cleanup_params - name: Run nova_deploy_cleanup retries: "{{ make_nova_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_nova_deploy_cleanup_delay | default(omit) }}" until: "{{ make_nova_deploy_cleanup_until | default(true) }}" register: "make_nova_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nova_deploy_cleanup" dry_run: "{{ make_nova_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nova_deploy_cleanup_env|default({})), **(make_nova_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000175015115610204033323 0ustar zuulzuul--- - name: Debug make_mariadb_kuttl_run_env when: make_mariadb_kuttl_run_env is defined ansible.builtin.debug: var: make_mariadb_kuttl_run_env - name: Debug make_mariadb_kuttl_run_params when: make_mariadb_kuttl_run_params is defined ansible.builtin.debug: var: make_mariadb_kuttl_run_params - name: Run mariadb_kuttl_run retries: "{{ make_mariadb_kuttl_run_retries | default(omit) }}" delay: "{{ make_mariadb_kuttl_run_delay | default(omit) }}" until: "{{ make_mariadb_kuttl_run_until | default(true) }}" register: "make_mariadb_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_kuttl_run" dry_run: "{{ make_mariadb_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_kuttl_run_env|default({})), **(make_mariadb_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000165415115610204033326 0ustar zuulzuul--- - name: Debug make_mariadb_kuttl_env when: make_mariadb_kuttl_env is defined ansible.builtin.debug: var: make_mariadb_kuttl_env - name: Debug make_mariadb_kuttl_params when: make_mariadb_kuttl_params is defined ansible.builtin.debug: var: make_mariadb_kuttl_params - name: Run mariadb_kuttl retries: "{{ make_mariadb_kuttl_retries | default(omit) }}" delay: "{{ make_mariadb_kuttl_delay | default(omit) }}" until: "{{ make_mariadb_kuttl_until | default(true) }}" register: "make_mariadb_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_kuttl" dry_run: "{{ make_mariadb_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_kuttl_env|default({})), **(make_mariadb_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_db_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_db0000644000175000017500000000165415115610204033400 0ustar zuulzuul--- - name: Debug make_kuttl_db_prep_env when: make_kuttl_db_prep_env is defined ansible.builtin.debug: var: make_kuttl_db_prep_env - name: Debug make_kuttl_db_prep_params when: make_kuttl_db_prep_params is defined ansible.builtin.debug: var: make_kuttl_db_prep_params - name: Run kuttl_db_prep retries: "{{ make_kuttl_db_prep_retries | default(omit) }}" delay: "{{ make_kuttl_db_prep_delay | default(omit) }}" until: "{{ make_kuttl_db_prep_until | default(true) }}" register: "make_kuttl_db_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make kuttl_db_prep" dry_run: "{{ make_kuttl_db_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_kuttl_db_prep_env|default({})), **(make_kuttl_db_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_db_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_db0000644000175000017500000000173115115610204033374 0ustar zuulzuul--- - name: Debug make_kuttl_db_cleanup_env when: make_kuttl_db_cleanup_env is defined ansible.builtin.debug: var: make_kuttl_db_cleanup_env - name: Debug make_kuttl_db_cleanup_params when: make_kuttl_db_cleanup_params is defined ansible.builtin.debug: var: make_kuttl_db_cleanup_params - name: Run kuttl_db_cleanup retries: "{{ make_kuttl_db_cleanup_retries | default(omit) }}" delay: "{{ make_kuttl_db_cleanup_delay | default(omit) }}" until: "{{ make_kuttl_db_cleanup_until | default(true) }}" register: "make_kuttl_db_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make kuttl_db_cleanup" dry_run: "{{ make_kuttl_db_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_kuttl_db_cleanup_env|default({})), **(make_kuttl_db_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_common_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_co0000644000175000017500000000175015115610204033411 0ustar zuulzuul--- - name: Debug make_kuttl_common_prep_env when: make_kuttl_common_prep_env is defined ansible.builtin.debug: var: make_kuttl_common_prep_env - name: Debug make_kuttl_common_prep_params when: make_kuttl_common_prep_params is defined ansible.builtin.debug: var: make_kuttl_common_prep_params - name: Run kuttl_common_prep retries: "{{ make_kuttl_common_prep_retries | default(omit) }}" delay: "{{ make_kuttl_common_prep_delay | default(omit) }}" until: "{{ make_kuttl_common_prep_until | default(true) }}" register: "make_kuttl_common_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make kuttl_common_prep" dry_run: "{{ make_kuttl_common_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_kuttl_common_prep_env|default({})), **(make_kuttl_common_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_common_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_kuttl_co0000644000175000017500000000202515115610204033405 0ustar zuulzuul--- - name: Debug make_kuttl_common_cleanup_env when: make_kuttl_common_cleanup_env is defined ansible.builtin.debug: var: make_kuttl_common_cleanup_env - name: Debug make_kuttl_common_cleanup_params when: make_kuttl_common_cleanup_params is defined ansible.builtin.debug: var: make_kuttl_common_cleanup_params - name: Run kuttl_common_cleanup retries: "{{ make_kuttl_common_cleanup_retries | default(omit) }}" delay: "{{ make_kuttl_common_cleanup_delay | default(omit) }}" until: "{{ make_kuttl_common_cleanup_until | default(true) }}" register: "make_kuttl_common_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make kuttl_common_cleanup" dry_run: "{{ make_kuttl_common_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_kuttl_common_cleanup_env|default({})), **(make_kuttl_common_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000176715115610204033436 0ustar zuulzuul--- - name: Debug make_keystone_kuttl_run_env when: make_keystone_kuttl_run_env is defined ansible.builtin.debug: var: make_keystone_kuttl_run_env - name: Debug make_keystone_kuttl_run_params when: make_keystone_kuttl_run_params is defined ansible.builtin.debug: var: make_keystone_kuttl_run_params - name: Run keystone_kuttl_run retries: "{{ make_keystone_kuttl_run_retries | default(omit) }}" delay: "{{ make_keystone_kuttl_run_delay | default(omit) }}" until: "{{ make_keystone_kuttl_run_until | default(true) }}" register: "make_keystone_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_kuttl_run" dry_run: "{{ make_keystone_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_kuttl_run_env|default({})), **(make_keystone_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_keystone0000644000175000017500000000167315115610204033432 0ustar zuulzuul--- - name: Debug make_keystone_kuttl_env when: make_keystone_kuttl_env is defined ansible.builtin.debug: var: make_keystone_kuttl_env - name: Debug make_keystone_kuttl_params when: make_keystone_kuttl_params is defined ansible.builtin.debug: var: make_keystone_kuttl_params - name: Run keystone_kuttl retries: "{{ make_keystone_kuttl_retries | default(omit) }}" delay: "{{ make_keystone_kuttl_delay | default(omit) }}" until: "{{ make_keystone_kuttl_until | default(true) }}" register: "make_keystone_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make keystone_kuttl" dry_run: "{{ make_keystone_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_keystone_kuttl_env|default({})), **(make_keystone_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000176715115610204033336 0ustar zuulzuul--- - name: Debug make_barbican_kuttl_run_env when: make_barbican_kuttl_run_env is defined ansible.builtin.debug: var: make_barbican_kuttl_run_env - name: Debug make_barbican_kuttl_run_params when: make_barbican_kuttl_run_params is defined ansible.builtin.debug: var: make_barbican_kuttl_run_params - name: Run barbican_kuttl_run retries: "{{ make_barbican_kuttl_run_retries | default(omit) }}" delay: "{{ make_barbican_kuttl_run_delay | default(omit) }}" until: "{{ make_barbican_kuttl_run_until | default(true) }}" register: "make_barbican_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_kuttl_run" dry_run: "{{ make_barbican_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_kuttl_run_env|default({})), **(make_barbican_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_barbican0000644000175000017500000000167315115610204033332 0ustar zuulzuul--- - name: Debug make_barbican_kuttl_env when: make_barbican_kuttl_env is defined ansible.builtin.debug: var: make_barbican_kuttl_env - name: Debug make_barbican_kuttl_params when: make_barbican_kuttl_params is defined ansible.builtin.debug: var: make_barbican_kuttl_params - name: Run barbican_kuttl retries: "{{ make_barbican_kuttl_retries | default(omit) }}" delay: "{{ make_barbican_kuttl_delay | default(omit) }}" until: "{{ make_barbican_kuttl_until | default(true) }}" register: "make_barbican_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make barbican_kuttl" dry_run: "{{ make_barbican_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_barbican_kuttl_env|default({})), **(make_barbican_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000200615115610204033344 0ustar zuulzuul--- - name: Debug make_placement_kuttl_run_env when: make_placement_kuttl_run_env is defined ansible.builtin.debug: var: make_placement_kuttl_run_env - name: Debug make_placement_kuttl_run_params when: make_placement_kuttl_run_params is defined ansible.builtin.debug: var: make_placement_kuttl_run_params - name: Run placement_kuttl_run retries: "{{ make_placement_kuttl_run_retries | default(omit) }}" delay: "{{ make_placement_kuttl_run_delay | default(omit) }}" until: "{{ make_placement_kuttl_run_until | default(true) }}" register: "make_placement_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_kuttl_run" dry_run: "{{ make_placement_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_kuttl_run_env|default({})), **(make_placement_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placement_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_placemen0000644000175000017500000000171215115610204033347 0ustar zuulzuul--- - name: Debug make_placement_kuttl_env when: make_placement_kuttl_env is defined ansible.builtin.debug: var: make_placement_kuttl_env - name: Debug make_placement_kuttl_params when: make_placement_kuttl_params is defined ansible.builtin.debug: var: make_placement_kuttl_params - name: Run placement_kuttl retries: "{{ make_placement_kuttl_retries | default(omit) }}" delay: "{{ make_placement_kuttl_delay | default(omit) }}" until: "{{ make_placement_kuttl_until | default(true) }}" register: "make_placement_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make placement_kuttl" dry_run: "{{ make_placement_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_placement_kuttl_env|default({})), **(make_placement_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_k0000644000175000017500000000173115115610204033342 0ustar zuulzuul--- - name: Debug make_cinder_kuttl_run_env when: make_cinder_kuttl_run_env is defined ansible.builtin.debug: var: make_cinder_kuttl_run_env - name: Debug make_cinder_kuttl_run_params when: make_cinder_kuttl_run_params is defined ansible.builtin.debug: var: make_cinder_kuttl_run_params - name: Run cinder_kuttl_run retries: "{{ make_cinder_kuttl_run_retries | default(omit) }}" delay: "{{ make_cinder_kuttl_run_delay | default(omit) }}" until: "{{ make_cinder_kuttl_run_until | default(true) }}" register: "make_cinder_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_kuttl_run" dry_run: "{{ make_cinder_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_kuttl_run_env|default({})), **(make_cinder_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cinder_k0000644000175000017500000000163515115610204033345 0ustar zuulzuul--- - name: Debug make_cinder_kuttl_env when: make_cinder_kuttl_env is defined ansible.builtin.debug: var: make_cinder_kuttl_env - name: Debug make_cinder_kuttl_params when: make_cinder_kuttl_params is defined ansible.builtin.debug: var: make_cinder_kuttl_params - name: Run cinder_kuttl retries: "{{ make_cinder_kuttl_retries | default(omit) }}" delay: "{{ make_cinder_kuttl_delay | default(omit) }}" until: "{{ make_cinder_kuttl_until | default(true) }}" register: "make_cinder_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make cinder_kuttl" dry_run: "{{ make_cinder_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cinder_kuttl_env|default({})), **(make_cinder_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000175015115610204033416 0ustar zuulzuul--- - name: Debug make_neutron_kuttl_run_env when: make_neutron_kuttl_run_env is defined ansible.builtin.debug: var: make_neutron_kuttl_run_env - name: Debug make_neutron_kuttl_run_params when: make_neutron_kuttl_run_params is defined ansible.builtin.debug: var: make_neutron_kuttl_run_params - name: Run neutron_kuttl_run retries: "{{ make_neutron_kuttl_run_retries | default(omit) }}" delay: "{{ make_neutron_kuttl_run_delay | default(omit) }}" until: "{{ make_neutron_kuttl_run_until | default(true) }}" register: "make_neutron_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_kuttl_run" dry_run: "{{ make_neutron_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_kuttl_run_env|default({})), **(make_neutron_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_neutron_0000644000175000017500000000165415115610204033421 0ustar zuulzuul--- - name: Debug make_neutron_kuttl_env when: make_neutron_kuttl_env is defined ansible.builtin.debug: var: make_neutron_kuttl_env - name: Debug make_neutron_kuttl_params when: make_neutron_kuttl_params is defined ansible.builtin.debug: var: make_neutron_kuttl_params - name: Run neutron_kuttl retries: "{{ make_neutron_kuttl_retries | default(omit) }}" delay: "{{ make_neutron_kuttl_delay | default(omit) }}" until: "{{ make_neutron_kuttl_until | default(true) }}" register: "make_neutron_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make neutron_kuttl" dry_run: "{{ make_neutron_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_neutron_kuttl_env|default({})), **(make_neutron_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000175015115610204033352 0ustar zuulzuul--- - name: Debug make_octavia_kuttl_run_env when: make_octavia_kuttl_run_env is defined ansible.builtin.debug: var: make_octavia_kuttl_run_env - name: Debug make_octavia_kuttl_run_params when: make_octavia_kuttl_run_params is defined ansible.builtin.debug: var: make_octavia_kuttl_run_params - name: Run octavia_kuttl_run retries: "{{ make_octavia_kuttl_run_retries | default(omit) }}" delay: "{{ make_octavia_kuttl_run_delay | default(omit) }}" until: "{{ make_octavia_kuttl_run_until | default(true) }}" register: "make_octavia_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_kuttl_run" dry_run: "{{ make_octavia_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_kuttl_run_env|default({})), **(make_octavia_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_octavia_0000644000175000017500000000165415115610204033355 0ustar zuulzuul--- - name: Debug make_octavia_kuttl_env when: make_octavia_kuttl_env is defined ansible.builtin.debug: var: make_octavia_kuttl_env - name: Debug make_octavia_kuttl_params when: make_octavia_kuttl_params is defined ansible.builtin.debug: var: make_octavia_kuttl_params - name: Run octavia_kuttl retries: "{{ make_octavia_kuttl_retries | default(omit) }}" delay: "{{ make_octavia_kuttl_delay | default(omit) }}" until: "{{ make_octavia_kuttl_until | default(true) }}" register: "make_octavia_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make octavia_kuttl" dry_run: "{{ make_octavia_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_octavia_kuttl_env|default({})), **(make_octavia_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000171215115610204033361 0ustar zuulzuul--- - name: Debug make_designate_kuttl_env when: make_designate_kuttl_env is defined ansible.builtin.debug: var: make_designate_kuttl_env - name: Debug make_designate_kuttl_params when: make_designate_kuttl_params is defined ansible.builtin.debug: var: make_designate_kuttl_params - name: Run designate_kuttl retries: "{{ make_designate_kuttl_retries | default(omit) }}" delay: "{{ make_designate_kuttl_delay | default(omit) }}" until: "{{ make_designate_kuttl_until | default(true) }}" register: "make_designate_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_kuttl" dry_run: "{{ make_designate_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_kuttl_env|default({})), **(make_designate_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designate_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_designat0000644000175000017500000000200615115610204033356 0ustar zuulzuul--- - name: Debug make_designate_kuttl_run_env when: make_designate_kuttl_run_env is defined ansible.builtin.debug: var: make_designate_kuttl_run_env - name: Debug make_designate_kuttl_run_params when: make_designate_kuttl_run_params is defined ansible.builtin.debug: var: make_designate_kuttl_run_params - name: Run designate_kuttl_run retries: "{{ make_designate_kuttl_run_retries | default(omit) }}" delay: "{{ make_designate_kuttl_run_delay | default(omit) }}" until: "{{ make_designate_kuttl_run_until | default(true) }}" register: "make_designate_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make designate_kuttl_run" dry_run: "{{ make_designate_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_designate_kuttl_run_env|default({})), **(make_designate_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_kutt0000644000175000017500000000165415115610204033441 0ustar zuulzuul--- - name: Debug make_ovn_kuttl_run_env when: make_ovn_kuttl_run_env is defined ansible.builtin.debug: var: make_ovn_kuttl_run_env - name: Debug make_ovn_kuttl_run_params when: make_ovn_kuttl_run_params is defined ansible.builtin.debug: var: make_ovn_kuttl_run_params - name: Run ovn_kuttl_run retries: "{{ make_ovn_kuttl_run_retries | default(omit) }}" delay: "{{ make_ovn_kuttl_run_delay | default(omit) }}" until: "{{ make_ovn_kuttl_run_until | default(true) }}" register: "make_ovn_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_kuttl_run" dry_run: "{{ make_ovn_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_kuttl_run_env|default({})), **(make_ovn_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ovn_kutt0000644000175000017500000000156015115610204033435 0ustar zuulzuul--- - name: Debug make_ovn_kuttl_env when: make_ovn_kuttl_env is defined ansible.builtin.debug: var: make_ovn_kuttl_env - name: Debug make_ovn_kuttl_params when: make_ovn_kuttl_params is defined ansible.builtin.debug: var: make_ovn_kuttl_params - name: Run ovn_kuttl retries: "{{ make_ovn_kuttl_retries | default(omit) }}" delay: "{{ make_ovn_kuttl_delay | default(omit) }}" until: "{{ make_ovn_kuttl_until | default(true) }}" register: "make_ovn_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ovn_kuttl" dry_run: "{{ make_ovn_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ovn_kuttl_env|default({})), **(make_ovn_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_ku0000644000175000017500000000171215115610204033361 0ustar zuulzuul--- - name: Debug make_infra_kuttl_run_env when: make_infra_kuttl_run_env is defined ansible.builtin.debug: var: make_infra_kuttl_run_env - name: Debug make_infra_kuttl_run_params when: make_infra_kuttl_run_params is defined ansible.builtin.debug: var: make_infra_kuttl_run_params - name: Run infra_kuttl_run retries: "{{ make_infra_kuttl_run_retries | default(omit) }}" delay: "{{ make_infra_kuttl_run_delay | default(omit) }}" until: "{{ make_infra_kuttl_run_until | default(true) }}" register: "make_infra_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make infra_kuttl_run" dry_run: "{{ make_infra_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_infra_kuttl_run_env|default({})), **(make_infra_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_infra_ku0000644000175000017500000000161615115610204033364 0ustar zuulzuul--- - name: Debug make_infra_kuttl_env when: make_infra_kuttl_env is defined ansible.builtin.debug: var: make_infra_kuttl_env - name: Debug make_infra_kuttl_params when: make_infra_kuttl_params is defined ansible.builtin.debug: var: make_infra_kuttl_params - name: Run infra_kuttl retries: "{{ make_infra_kuttl_retries | default(omit) }}" delay: "{{ make_infra_kuttl_delay | default(omit) }}" until: "{{ make_infra_kuttl_until | default(true) }}" register: "make_infra_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make infra_kuttl" dry_run: "{{ make_infra_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_infra_kuttl_env|default({})), **(make_infra_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_k0000644000175000017500000000173115115610204033361 0ustar zuulzuul--- - name: Debug make_ironic_kuttl_run_env when: make_ironic_kuttl_run_env is defined ansible.builtin.debug: var: make_ironic_kuttl_run_env - name: Debug make_ironic_kuttl_run_params when: make_ironic_kuttl_run_params is defined ansible.builtin.debug: var: make_ironic_kuttl_run_params - name: Run ironic_kuttl_run retries: "{{ make_ironic_kuttl_run_retries | default(omit) }}" delay: "{{ make_ironic_kuttl_run_delay | default(omit) }}" until: "{{ make_ironic_kuttl_run_until | default(true) }}" register: "make_ironic_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_kuttl_run" dry_run: "{{ make_ironic_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_kuttl_run_env|default({})), **(make_ironic_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_k0000644000175000017500000000163515115610204033364 0ustar zuulzuul--- - name: Debug make_ironic_kuttl_env when: make_ironic_kuttl_env is defined ansible.builtin.debug: var: make_ironic_kuttl_env - name: Debug make_ironic_kuttl_params when: make_ironic_kuttl_params is defined ansible.builtin.debug: var: make_ironic_kuttl_params - name: Run ironic_kuttl retries: "{{ make_ironic_kuttl_retries | default(omit) }}" delay: "{{ make_ironic_kuttl_delay | default(omit) }}" until: "{{ make_ironic_kuttl_until | default(true) }}" register: "make_ironic_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_kuttl" dry_run: "{{ make_ironic_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_kuttl_env|default({})), **(make_ironic_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_kuttl_crc.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ironic_k0000644000175000017500000000173115115610204033361 0ustar zuulzuul--- - name: Debug make_ironic_kuttl_crc_env when: make_ironic_kuttl_crc_env is defined ansible.builtin.debug: var: make_ironic_kuttl_crc_env - name: Debug make_ironic_kuttl_crc_params when: make_ironic_kuttl_crc_params is defined ansible.builtin.debug: var: make_ironic_kuttl_crc_params - name: Run ironic_kuttl_crc retries: "{{ make_ironic_kuttl_crc_retries | default(omit) }}" delay: "{{ make_ironic_kuttl_crc_delay | default(omit) }}" until: "{{ make_ironic_kuttl_crc_until | default(true) }}" register: "make_ironic_kuttl_crc_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ironic_kuttl_crc" dry_run: "{{ make_ironic_kuttl_crc_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ironic_kuttl_crc_env|default({})), **(make_ironic_kuttl_crc_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_kut0000644000175000017500000000167315115610204033375 0ustar zuulzuul--- - name: Debug make_heat_kuttl_run_env when: make_heat_kuttl_run_env is defined ansible.builtin.debug: var: make_heat_kuttl_run_env - name: Debug make_heat_kuttl_run_params when: make_heat_kuttl_run_params is defined ansible.builtin.debug: var: make_heat_kuttl_run_params - name: Run heat_kuttl_run retries: "{{ make_heat_kuttl_run_retries | default(omit) }}" delay: "{{ make_heat_kuttl_run_delay | default(omit) }}" until: "{{ make_heat_kuttl_run_until | default(true) }}" register: "make_heat_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_kuttl_run" dry_run: "{{ make_heat_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_kuttl_run_env|default({})), **(make_heat_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_kut0000644000175000017500000000157715115610204033400 0ustar zuulzuul--- - name: Debug make_heat_kuttl_env when: make_heat_kuttl_env is defined ansible.builtin.debug: var: make_heat_kuttl_env - name: Debug make_heat_kuttl_params when: make_heat_kuttl_params is defined ansible.builtin.debug: var: make_heat_kuttl_params - name: Run heat_kuttl retries: "{{ make_heat_kuttl_retries | default(omit) }}" delay: "{{ make_heat_kuttl_delay | default(omit) }}" until: "{{ make_heat_kuttl_until | default(true) }}" register: "make_heat_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_kuttl" dry_run: "{{ make_heat_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_kuttl_env|default({})), **(make_heat_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_kuttl_crc.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_kut0000644000175000017500000000167315115610204033375 0ustar zuulzuul--- - name: Debug make_heat_kuttl_crc_env when: make_heat_kuttl_crc_env is defined ansible.builtin.debug: var: make_heat_kuttl_crc_env - name: Debug make_heat_kuttl_crc_params when: make_heat_kuttl_crc_params is defined ansible.builtin.debug: var: make_heat_kuttl_crc_params - name: Run heat_kuttl_crc retries: "{{ make_heat_kuttl_crc_retries | default(omit) }}" delay: "{{ make_heat_kuttl_crc_delay | default(omit) }}" until: "{{ make_heat_kuttl_crc_until | default(true) }}" register: "make_heat_kuttl_crc_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_kuttl_crc" dry_run: "{{ make_heat_kuttl_crc_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_kuttl_crc_env|default({})), **(make_heat_kuttl_crc_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000200615115610204033342 0ustar zuulzuul--- - name: Debug make_ansibleee_kuttl_run_env when: make_ansibleee_kuttl_run_env is defined ansible.builtin.debug: var: make_ansibleee_kuttl_run_env - name: Debug make_ansibleee_kuttl_run_params when: make_ansibleee_kuttl_run_params is defined ansible.builtin.debug: var: make_ansibleee_kuttl_run_params - name: Run ansibleee_kuttl_run retries: "{{ make_ansibleee_kuttl_run_retries | default(omit) }}" delay: "{{ make_ansibleee_kuttl_run_delay | default(omit) }}" until: "{{ make_ansibleee_kuttl_run_until | default(true) }}" register: "make_ansibleee_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_kuttl_run" dry_run: "{{ make_ansibleee_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_kuttl_run_env|default({})), **(make_ansibleee_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee_kuttl_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000210215115610204033337 0ustar zuulzuul--- - name: Debug make_ansibleee_kuttl_cleanup_env when: make_ansibleee_kuttl_cleanup_env is defined ansible.builtin.debug: var: make_ansibleee_kuttl_cleanup_env - name: Debug make_ansibleee_kuttl_cleanup_params when: make_ansibleee_kuttl_cleanup_params is defined ansible.builtin.debug: var: make_ansibleee_kuttl_cleanup_params - name: Run ansibleee_kuttl_cleanup retries: "{{ make_ansibleee_kuttl_cleanup_retries | default(omit) }}" delay: "{{ make_ansibleee_kuttl_cleanup_delay | default(omit) }}" until: "{{ make_ansibleee_kuttl_cleanup_until | default(true) }}" register: "make_ansibleee_kuttl_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_kuttl_cleanup" dry_run: "{{ make_ansibleee_kuttl_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_kuttl_cleanup_env|default({})), **(make_ansibleee_kuttl_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee_kuttl_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000202515115610204033343 0ustar zuulzuul--- - name: Debug make_ansibleee_kuttl_prep_env when: make_ansibleee_kuttl_prep_env is defined ansible.builtin.debug: var: make_ansibleee_kuttl_prep_env - name: Debug make_ansibleee_kuttl_prep_params when: make_ansibleee_kuttl_prep_params is defined ansible.builtin.debug: var: make_ansibleee_kuttl_prep_params - name: Run ansibleee_kuttl_prep retries: "{{ make_ansibleee_kuttl_prep_retries | default(omit) }}" delay: "{{ make_ansibleee_kuttl_prep_delay | default(omit) }}" until: "{{ make_ansibleee_kuttl_prep_until | default(true) }}" register: "make_ansibleee_kuttl_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_kuttl_prep" dry_run: "{{ make_ansibleee_kuttl_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_kuttl_prep_env|default({})), **(make_ansibleee_kuttl_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000171215115610204033345 0ustar zuulzuul--- - name: Debug make_ansibleee_kuttl_env when: make_ansibleee_kuttl_env is defined ansible.builtin.debug: var: make_ansibleee_kuttl_env - name: Debug make_ansibleee_kuttl_params when: make_ansibleee_kuttl_params is defined ansible.builtin.debug: var: make_ansibleee_kuttl_params - name: Run ansibleee_kuttl retries: "{{ make_ansibleee_kuttl_retries | default(omit) }}" delay: "{{ make_ansibleee_kuttl_delay | default(omit) }}" until: "{{ make_ansibleee_kuttl_until | default(true) }}" register: "make_ansibleee_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_kuttl" dry_run: "{{ make_ansibleee_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_kuttl_env|default({})), **(make_ansibleee_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_k0000644000175000017500000000173115115610204033327 0ustar zuulzuul--- - name: Debug make_glance_kuttl_run_env when: make_glance_kuttl_run_env is defined ansible.builtin.debug: var: make_glance_kuttl_run_env - name: Debug make_glance_kuttl_run_params when: make_glance_kuttl_run_params is defined ansible.builtin.debug: var: make_glance_kuttl_run_params - name: Run glance_kuttl_run retries: "{{ make_glance_kuttl_run_retries | default(omit) }}" delay: "{{ make_glance_kuttl_run_delay | default(omit) }}" until: "{{ make_glance_kuttl_run_until | default(true) }}" register: "make_glance_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_kuttl_run" dry_run: "{{ make_glance_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_kuttl_run_env|default({})), **(make_glance_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_glance_k0000644000175000017500000000163515115610204033332 0ustar zuulzuul--- - name: Debug make_glance_kuttl_env when: make_glance_kuttl_env is defined ansible.builtin.debug: var: make_glance_kuttl_env - name: Debug make_glance_kuttl_params when: make_glance_kuttl_params is defined ansible.builtin.debug: var: make_glance_kuttl_params - name: Run glance_kuttl retries: "{{ make_glance_kuttl_retries | default(omit) }}" delay: "{{ make_glance_kuttl_delay | default(omit) }}" until: "{{ make_glance_kuttl_until | default(true) }}" register: "make_glance_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make glance_kuttl" dry_run: "{{ make_glance_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_glance_kuttl_env|default({})), **(make_glance_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_k0000644000175000017500000000173115115610204033337 0ustar zuulzuul--- - name: Debug make_manila_kuttl_run_env when: make_manila_kuttl_run_env is defined ansible.builtin.debug: var: make_manila_kuttl_run_env - name: Debug make_manila_kuttl_run_params when: make_manila_kuttl_run_params is defined ansible.builtin.debug: var: make_manila_kuttl_run_params - name: Run manila_kuttl_run retries: "{{ make_manila_kuttl_run_retries | default(omit) }}" delay: "{{ make_manila_kuttl_run_delay | default(omit) }}" until: "{{ make_manila_kuttl_run_until | default(true) }}" register: "make_manila_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_kuttl_run" dry_run: "{{ make_manila_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_kuttl_run_env|default({})), **(make_manila_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_k0000644000175000017500000000163515115610204033342 0ustar zuulzuul--- - name: Debug make_manila_kuttl_env when: make_manila_kuttl_env is defined ansible.builtin.debug: var: make_manila_kuttl_env - name: Debug make_manila_kuttl_params when: make_manila_kuttl_params is defined ansible.builtin.debug: var: make_manila_kuttl_params - name: Run manila_kuttl retries: "{{ make_manila_kuttl_retries | default(omit) }}" delay: "{{ make_manila_kuttl_delay | default(omit) }}" until: "{{ make_manila_kuttl_until | default(true) }}" register: "make_manila_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_kuttl" dry_run: "{{ make_manila_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_kuttl_env|default({})), **(make_manila_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_ku0000644000175000017500000000171215115610204033416 0ustar zuulzuul--- - name: Debug make_swift_kuttl_run_env when: make_swift_kuttl_run_env is defined ansible.builtin.debug: var: make_swift_kuttl_run_env - name: Debug make_swift_kuttl_run_params when: make_swift_kuttl_run_params is defined ansible.builtin.debug: var: make_swift_kuttl_run_params - name: Run swift_kuttl_run retries: "{{ make_swift_kuttl_run_retries | default(omit) }}" delay: "{{ make_swift_kuttl_run_delay | default(omit) }}" until: "{{ make_swift_kuttl_run_until | default(true) }}" register: "make_swift_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_kuttl_run" dry_run: "{{ make_swift_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_kuttl_run_env|default({})), **(make_swift_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_ku0000644000175000017500000000161615115610204033421 0ustar zuulzuul--- - name: Debug make_swift_kuttl_env when: make_swift_kuttl_env is defined ansible.builtin.debug: var: make_swift_kuttl_env - name: Debug make_swift_kuttl_params when: make_swift_kuttl_params is defined ansible.builtin.debug: var: make_swift_kuttl_params - name: Run swift_kuttl retries: "{{ make_swift_kuttl_retries | default(omit) }}" delay: "{{ make_swift_kuttl_delay | default(omit) }}" until: "{{ make_swift_kuttl_until | default(true) }}" register: "make_swift_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_kuttl" dry_run: "{{ make_swift_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_kuttl_env|default({})), **(make_swift_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000175015115610204033414 0ustar zuulzuul--- - name: Debug make_horizon_kuttl_run_env when: make_horizon_kuttl_run_env is defined ansible.builtin.debug: var: make_horizon_kuttl_run_env - name: Debug make_horizon_kuttl_run_params when: make_horizon_kuttl_run_params is defined ansible.builtin.debug: var: make_horizon_kuttl_run_params - name: Run horizon_kuttl_run retries: "{{ make_horizon_kuttl_run_retries | default(omit) }}" delay: "{{ make_horizon_kuttl_run_delay | default(omit) }}" until: "{{ make_horizon_kuttl_run_until | default(true) }}" register: "make_horizon_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_kuttl_run" dry_run: "{{ make_horizon_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_kuttl_run_env|default({})), **(make_horizon_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000165415115610204033417 0ustar zuulzuul--- - name: Debug make_horizon_kuttl_env when: make_horizon_kuttl_env is defined ansible.builtin.debug: var: make_horizon_kuttl_env - name: Debug make_horizon_kuttl_params when: make_horizon_kuttl_params is defined ansible.builtin.debug: var: make_horizon_kuttl_params - name: Run horizon_kuttl retries: "{{ make_horizon_kuttl_retries | default(omit) }}" delay: "{{ make_horizon_kuttl_delay | default(omit) }}" until: "{{ make_horizon_kuttl_until | default(true) }}" register: "make_horizon_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_kuttl" dry_run: "{{ make_horizon_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_kuttl_env|default({})), **(make_horizon_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000200615115610204033374 0ustar zuulzuul--- - name: Debug make_openstack_kuttl_run_env when: make_openstack_kuttl_run_env is defined ansible.builtin.debug: var: make_openstack_kuttl_run_env - name: Debug make_openstack_kuttl_run_params when: make_openstack_kuttl_run_params is defined ansible.builtin.debug: var: make_openstack_kuttl_run_params - name: Run openstack_kuttl_run retries: "{{ make_openstack_kuttl_run_retries | default(omit) }}" delay: "{{ make_openstack_kuttl_run_delay | default(omit) }}" until: "{{ make_openstack_kuttl_run_until | default(true) }}" register: "make_openstack_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_kuttl_run" dry_run: "{{ make_openstack_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_kuttl_run_env|default({})), **(make_openstack_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstack_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_openstac0000644000175000017500000000171215115610204033377 0ustar zuulzuul--- - name: Debug make_openstack_kuttl_env when: make_openstack_kuttl_env is defined ansible.builtin.debug: var: make_openstack_kuttl_env - name: Debug make_openstack_kuttl_params when: make_openstack_kuttl_params is defined ansible.builtin.debug: var: make_openstack_kuttl_params - name: Run openstack_kuttl retries: "{{ make_openstack_kuttl_retries | default(omit) }}" delay: "{{ make_openstack_kuttl_delay | default(omit) }}" until: "{{ make_openstack_kuttl_until | default(true) }}" register: "make_openstack_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make openstack_kuttl" dry_run: "{{ make_openstack_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_openstack_kuttl_env|default({})), **(make_openstack_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_chainsaw_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000202515115610204033317 0ustar zuulzuul--- - name: Debug make_mariadb_chainsaw_run_env when: make_mariadb_chainsaw_run_env is defined ansible.builtin.debug: var: make_mariadb_chainsaw_run_env - name: Debug make_mariadb_chainsaw_run_params when: make_mariadb_chainsaw_run_params is defined ansible.builtin.debug: var: make_mariadb_chainsaw_run_params - name: Run mariadb_chainsaw_run retries: "{{ make_mariadb_chainsaw_run_retries | default(omit) }}" delay: "{{ make_mariadb_chainsaw_run_delay | default(omit) }}" until: "{{ make_mariadb_chainsaw_run_until | default(true) }}" register: "make_mariadb_chainsaw_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_chainsaw_run" dry_run: "{{ make_mariadb_chainsaw_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_chainsaw_run_env|default({})), **(make_mariadb_chainsaw_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_chainsaw.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_mariadb_0000644000175000017500000000173115115610204033322 0ustar zuulzuul--- - name: Debug make_mariadb_chainsaw_env when: make_mariadb_chainsaw_env is defined ansible.builtin.debug: var: make_mariadb_chainsaw_env - name: Debug make_mariadb_chainsaw_params when: make_mariadb_chainsaw_params is defined ansible.builtin.debug: var: make_mariadb_chainsaw_params - name: Run mariadb_chainsaw retries: "{{ make_mariadb_chainsaw_retries | default(omit) }}" delay: "{{ make_mariadb_chainsaw_delay | default(omit) }}" until: "{{ make_mariadb_chainsaw_until | default(true) }}" register: "make_mariadb_chainsaw_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make mariadb_chainsaw" dry_run: "{{ make_mariadb_chainsaw_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_mariadb_chainsaw_env|default({})), **(make_mariadb_chainsaw_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000163515115610204033416 0ustar zuulzuul--- - name: Debug make_horizon_prep_env when: make_horizon_prep_env is defined ansible.builtin.debug: var: make_horizon_prep_env - name: Debug make_horizon_prep_params when: make_horizon_prep_params is defined ansible.builtin.debug: var: make_horizon_prep_params - name: Run horizon_prep retries: "{{ make_horizon_prep_retries | default(omit) }}" delay: "{{ make_horizon_prep_delay | default(omit) }}" until: "{{ make_horizon_prep_until | default(true) }}" register: "make_horizon_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_prep" dry_run: "{{ make_horizon_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_prep_env|default({})), **(make_horizon_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon.0000644000175000017500000000152215115610204033330 0ustar zuulzuul--- - name: Debug make_horizon_env when: make_horizon_env is defined ansible.builtin.debug: var: make_horizon_env - name: Debug make_horizon_params when: make_horizon_params is defined ansible.builtin.debug: var: make_horizon_params - name: Run horizon retries: "{{ make_horizon_retries | default(omit) }}" delay: "{{ make_horizon_delay | default(omit) }}" until: "{{ make_horizon_until | default(true) }}" register: "make_horizon_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon" dry_run: "{{ make_horizon_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_env|default({})), **(make_horizon_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000171215115610204033412 0ustar zuulzuul--- - name: Debug make_horizon_cleanup_env when: make_horizon_cleanup_env is defined ansible.builtin.debug: var: make_horizon_cleanup_env - name: Debug make_horizon_cleanup_params when: make_horizon_cleanup_params is defined ansible.builtin.debug: var: make_horizon_cleanup_params - name: Run horizon_cleanup retries: "{{ make_horizon_cleanup_retries | default(omit) }}" delay: "{{ make_horizon_cleanup_delay | default(omit) }}" until: "{{ make_horizon_cleanup_until | default(true) }}" register: "make_horizon_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_cleanup" dry_run: "{{ make_horizon_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_cleanup_env|default({})), **(make_horizon_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000200615115610204033407 0ustar zuulzuul--- - name: Debug make_horizon_deploy_prep_env when: make_horizon_deploy_prep_env is defined ansible.builtin.debug: var: make_horizon_deploy_prep_env - name: Debug make_horizon_deploy_prep_params when: make_horizon_deploy_prep_params is defined ansible.builtin.debug: var: make_horizon_deploy_prep_params - name: Run horizon_deploy_prep retries: "{{ make_horizon_deploy_prep_retries | default(omit) }}" delay: "{{ make_horizon_deploy_prep_delay | default(omit) }}" until: "{{ make_horizon_deploy_prep_until | default(true) }}" register: "make_horizon_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_deploy_prep" dry_run: "{{ make_horizon_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_deploy_prep_env|default({})), **(make_horizon_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000167315115610204033420 0ustar zuulzuul--- - name: Debug make_horizon_deploy_env when: make_horizon_deploy_env is defined ansible.builtin.debug: var: make_horizon_deploy_env - name: Debug make_horizon_deploy_params when: make_horizon_deploy_params is defined ansible.builtin.debug: var: make_horizon_deploy_params - name: Run horizon_deploy retries: "{{ make_horizon_deploy_retries | default(omit) }}" delay: "{{ make_horizon_deploy_delay | default(omit) }}" until: "{{ make_horizon_deploy_until | default(true) }}" register: "make_horizon_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_deploy" dry_run: "{{ make_horizon_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_deploy_env|default({})), **(make_horizon_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_horizon_0000644000175000017500000000206315115610204033412 0ustar zuulzuul--- - name: Debug make_horizon_deploy_cleanup_env when: make_horizon_deploy_cleanup_env is defined ansible.builtin.debug: var: make_horizon_deploy_cleanup_env - name: Debug make_horizon_deploy_cleanup_params when: make_horizon_deploy_cleanup_params is defined ansible.builtin.debug: var: make_horizon_deploy_cleanup_params - name: Run horizon_deploy_cleanup retries: "{{ make_horizon_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_horizon_deploy_cleanup_delay | default(omit) }}" until: "{{ make_horizon_deploy_cleanup_until | default(true) }}" register: "make_horizon_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make horizon_deploy_cleanup" dry_run: "{{ make_horizon_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_horizon_deploy_cleanup_env|default({})), **(make_horizon_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_pre0000644000175000017500000000156015115610204033353 0ustar zuulzuul--- - name: Debug make_heat_prep_env when: make_heat_prep_env is defined ansible.builtin.debug: var: make_heat_prep_env - name: Debug make_heat_prep_params when: make_heat_prep_params is defined ansible.builtin.debug: var: make_heat_prep_params - name: Run heat_prep retries: "{{ make_heat_prep_retries | default(omit) }}" delay: "{{ make_heat_prep_delay | default(omit) }}" until: "{{ make_heat_prep_until | default(true) }}" register: "make_heat_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_prep" dry_run: "{{ make_heat_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_prep_env|default({})), **(make_heat_prep_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat.yml0000644000175000017500000000144515115610204033307 0ustar zuulzuul--- - name: Debug make_heat_env when: make_heat_env is defined ansible.builtin.debug: var: make_heat_env - name: Debug make_heat_params when: make_heat_params is defined ansible.builtin.debug: var: make_heat_params - name: Run heat retries: "{{ make_heat_retries | default(omit) }}" delay: "{{ make_heat_delay | default(omit) }}" until: "{{ make_heat_until | default(true) }}" register: "make_heat_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat" dry_run: "{{ make_heat_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_env|default({})), **(make_heat_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_cle0000644000175000017500000000163515115610204033333 0ustar zuulzuul--- - name: Debug make_heat_cleanup_env when: make_heat_cleanup_env is defined ansible.builtin.debug: var: make_heat_cleanup_env - name: Debug make_heat_cleanup_params when: make_heat_cleanup_params is defined ansible.builtin.debug: var: make_heat_cleanup_params - name: Run heat_cleanup retries: "{{ make_heat_cleanup_retries | default(omit) }}" delay: "{{ make_heat_cleanup_delay | default(omit) }}" until: "{{ make_heat_cleanup_until | default(true) }}" register: "make_heat_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_cleanup" dry_run: "{{ make_heat_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_cleanup_env|default({})), **(make_heat_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_dep0000644000175000017500000000173115115610204033335 0ustar zuulzuul--- - name: Debug make_heat_deploy_prep_env when: make_heat_deploy_prep_env is defined ansible.builtin.debug: var: make_heat_deploy_prep_env - name: Debug make_heat_deploy_prep_params when: make_heat_deploy_prep_params is defined ansible.builtin.debug: var: make_heat_deploy_prep_params - name: Run heat_deploy_prep retries: "{{ make_heat_deploy_prep_retries | default(omit) }}" delay: "{{ make_heat_deploy_prep_delay | default(omit) }}" until: "{{ make_heat_deploy_prep_until | default(true) }}" register: "make_heat_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_deploy_prep" dry_run: "{{ make_heat_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_deploy_prep_env|default({})), **(make_heat_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_dep0000644000175000017500000000161615115610204033337 0ustar zuulzuul--- - name: Debug make_heat_deploy_env when: make_heat_deploy_env is defined ansible.builtin.debug: var: make_heat_deploy_env - name: Debug make_heat_deploy_params when: make_heat_deploy_params is defined ansible.builtin.debug: var: make_heat_deploy_params - name: Run heat_deploy retries: "{{ make_heat_deploy_retries | default(omit) }}" delay: "{{ make_heat_deploy_delay | default(omit) }}" until: "{{ make_heat_deploy_until | default(true) }}" register: "make_heat_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_deploy" dry_run: "{{ make_heat_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_deploy_env|default({})), **(make_heat_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_heat_dep0000644000175000017500000000200615115610204033331 0ustar zuulzuul--- - name: Debug make_heat_deploy_cleanup_env when: make_heat_deploy_cleanup_env is defined ansible.builtin.debug: var: make_heat_deploy_cleanup_env - name: Debug make_heat_deploy_cleanup_params when: make_heat_deploy_cleanup_params is defined ansible.builtin.debug: var: make_heat_deploy_cleanup_params - name: Run heat_deploy_cleanup retries: "{{ make_heat_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_heat_deploy_cleanup_delay | default(omit) }}" until: "{{ make_heat_deploy_cleanup_until | default(true) }}" register: "make_heat_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make heat_deploy_cleanup" dry_run: "{{ make_heat_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_heat_deploy_cleanup_env|default({})), **(make_heat_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000167315115610204033353 0ustar zuulzuul--- - name: Debug make_ansibleee_prep_env when: make_ansibleee_prep_env is defined ansible.builtin.debug: var: make_ansibleee_prep_env - name: Debug make_ansibleee_prep_params when: make_ansibleee_prep_params is defined ansible.builtin.debug: var: make_ansibleee_prep_params - name: Run ansibleee_prep retries: "{{ make_ansibleee_prep_retries | default(omit) }}" delay: "{{ make_ansibleee_prep_delay | default(omit) }}" until: "{{ make_ansibleee_prep_until | default(true) }}" register: "make_ansibleee_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_prep" dry_run: "{{ make_ansibleee_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_prep_env|default({})), **(make_ansibleee_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000156015115610204033346 0ustar zuulzuul--- - name: Debug make_ansibleee_env when: make_ansibleee_env is defined ansible.builtin.debug: var: make_ansibleee_env - name: Debug make_ansibleee_params when: make_ansibleee_params is defined ansible.builtin.debug: var: make_ansibleee_params - name: Run ansibleee retries: "{{ make_ansibleee_retries | default(omit) }}" delay: "{{ make_ansibleee_delay | default(omit) }}" until: "{{ make_ansibleee_until | default(true) }}" register: "make_ansibleee_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee" dry_run: "{{ make_ansibleee_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_env|default({})), **(make_ansibleee_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansibleee_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ansiblee0000644000175000017500000000175015115610204033347 0ustar zuulzuul--- - name: Debug make_ansibleee_cleanup_env when: make_ansibleee_cleanup_env is defined ansible.builtin.debug: var: make_ansibleee_cleanup_env - name: Debug make_ansibleee_cleanup_params when: make_ansibleee_cleanup_params is defined ansible.builtin.debug: var: make_ansibleee_cleanup_params - name: Run ansibleee_cleanup retries: "{{ make_ansibleee_cleanup_retries | default(omit) }}" delay: "{{ make_ansibleee_cleanup_delay | default(omit) }}" until: "{{ make_ansibleee_cleanup_until | default(true) }}" register: "make_ansibleee_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_cleanup" dry_run: "{{ make_ansibleee_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_cleanup_env|default({})), **(make_ansibleee_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_baremetal_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_baremeta0000644000175000017500000000167315115610204033351 0ustar zuulzuul--- - name: Debug make_baremetal_prep_env when: make_baremetal_prep_env is defined ansible.builtin.debug: var: make_baremetal_prep_env - name: Debug make_baremetal_prep_params when: make_baremetal_prep_params is defined ansible.builtin.debug: var: make_baremetal_prep_params - name: Run baremetal_prep retries: "{{ make_baremetal_prep_retries | default(omit) }}" delay: "{{ make_baremetal_prep_delay | default(omit) }}" until: "{{ make_baremetal_prep_until | default(true) }}" register: "make_baremetal_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make baremetal_prep" dry_run: "{{ make_baremetal_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_baremetal_prep_env|default({})), **(make_baremetal_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_baremetal.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_baremeta0000644000175000017500000000156015115610204033344 0ustar zuulzuul--- - name: Debug make_baremetal_env when: make_baremetal_env is defined ansible.builtin.debug: var: make_baremetal_env - name: Debug make_baremetal_params when: make_baremetal_params is defined ansible.builtin.debug: var: make_baremetal_params - name: Run baremetal retries: "{{ make_baremetal_retries | default(omit) }}" delay: "{{ make_baremetal_delay | default(omit) }}" until: "{{ make_baremetal_until | default(true) }}" register: "make_baremetal_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make baremetal" dry_run: "{{ make_baremetal_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_baremetal_env|default({})), **(make_baremetal_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_baremetal_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_baremeta0000644000175000017500000000175015115610204033345 0ustar zuulzuul--- - name: Debug make_baremetal_cleanup_env when: make_baremetal_cleanup_env is defined ansible.builtin.debug: var: make_baremetal_cleanup_env - name: Debug make_baremetal_cleanup_params when: make_baremetal_cleanup_params is defined ansible.builtin.debug: var: make_baremetal_cleanup_params - name: Run baremetal_cleanup retries: "{{ make_baremetal_cleanup_retries | default(omit) }}" delay: "{{ make_baremetal_cleanup_delay | default(omit) }}" until: "{{ make_baremetal_cleanup_until | default(true) }}" register: "make_baremetal_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make baremetal_cleanup" dry_run: "{{ make_baremetal_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_baremetal_cleanup_env|default({})), **(make_baremetal_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ceph_help.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ceph_hel0000644000175000017500000000156015115610204033333 0ustar zuulzuul--- - name: Debug make_ceph_help_env when: make_ceph_help_env is defined ansible.builtin.debug: var: make_ceph_help_env - name: Debug make_ceph_help_params when: make_ceph_help_params is defined ansible.builtin.debug: var: make_ceph_help_params - name: Run ceph_help retries: "{{ make_ceph_help_retries | default(omit) }}" delay: "{{ make_ceph_help_delay | default(omit) }}" until: "{{ make_ceph_help_until | default(true) }}" register: "make_ceph_help_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ceph_help" dry_run: "{{ make_ceph_help_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ceph_help_env|default({})), **(make_ceph_help_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ceph.yml0000644000175000017500000000144515115610204033305 0ustar zuulzuul--- - name: Debug make_ceph_env when: make_ceph_env is defined ansible.builtin.debug: var: make_ceph_env - name: Debug make_ceph_params when: make_ceph_params is defined ansible.builtin.debug: var: make_ceph_params - name: Run ceph retries: "{{ make_ceph_retries | default(omit) }}" delay: "{{ make_ceph_delay | default(omit) }}" until: "{{ make_ceph_until | default(true) }}" register: "make_ceph_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ceph" dry_run: "{{ make_ceph_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ceph_env|default({})), **(make_ceph_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ceph_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ceph_cle0000644000175000017500000000163515115610204033331 0ustar zuulzuul--- - name: Debug make_ceph_cleanup_env when: make_ceph_cleanup_env is defined ansible.builtin.debug: var: make_ceph_cleanup_env - name: Debug make_ceph_cleanup_params when: make_ceph_cleanup_params is defined ansible.builtin.debug: var: make_ceph_cleanup_params - name: Run ceph_cleanup retries: "{{ make_ceph_cleanup_retries | default(omit) }}" delay: "{{ make_ceph_cleanup_delay | default(omit) }}" until: "{{ make_ceph_cleanup_until | default(true) }}" register: "make_ceph_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ceph_cleanup" dry_run: "{{ make_ceph_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ceph_cleanup_env|default({})), **(make_ceph_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_pre0000644000175000017500000000156015115610204033404 0ustar zuulzuul--- - name: Debug make_rook_prep_env when: make_rook_prep_env is defined ansible.builtin.debug: var: make_rook_prep_env - name: Debug make_rook_prep_params when: make_rook_prep_params is defined ansible.builtin.debug: var: make_rook_prep_params - name: Run rook_prep retries: "{{ make_rook_prep_retries | default(omit) }}" delay: "{{ make_rook_prep_delay | default(omit) }}" until: "{{ make_rook_prep_until | default(true) }}" register: "make_rook_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rook_prep" dry_run: "{{ make_rook_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rook_prep_env|default({})), **(make_rook_prep_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook.yml0000644000175000017500000000144515115610204033340 0ustar zuulzuul--- - name: Debug make_rook_env when: make_rook_env is defined ansible.builtin.debug: var: make_rook_env - name: Debug make_rook_params when: make_rook_params is defined ansible.builtin.debug: var: make_rook_params - name: Run rook retries: "{{ make_rook_retries | default(omit) }}" delay: "{{ make_rook_delay | default(omit) }}" until: "{{ make_rook_until | default(true) }}" register: "make_rook_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rook" dry_run: "{{ make_rook_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rook_env|default({})), **(make_rook_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_dep0000644000175000017500000000173115115610204033366 0ustar zuulzuul--- - name: Debug make_rook_deploy_prep_env when: make_rook_deploy_prep_env is defined ansible.builtin.debug: var: make_rook_deploy_prep_env - name: Debug make_rook_deploy_prep_params when: make_rook_deploy_prep_params is defined ansible.builtin.debug: var: make_rook_deploy_prep_params - name: Run rook_deploy_prep retries: "{{ make_rook_deploy_prep_retries | default(omit) }}" delay: "{{ make_rook_deploy_prep_delay | default(omit) }}" until: "{{ make_rook_deploy_prep_until | default(true) }}" register: "make_rook_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rook_deploy_prep" dry_run: "{{ make_rook_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rook_deploy_prep_env|default({})), **(make_rook_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_dep0000644000175000017500000000161615115610204033370 0ustar zuulzuul--- - name: Debug make_rook_deploy_env when: make_rook_deploy_env is defined ansible.builtin.debug: var: make_rook_deploy_env - name: Debug make_rook_deploy_params when: make_rook_deploy_params is defined ansible.builtin.debug: var: make_rook_deploy_params - name: Run rook_deploy retries: "{{ make_rook_deploy_retries | default(omit) }}" delay: "{{ make_rook_deploy_delay | default(omit) }}" until: "{{ make_rook_deploy_until | default(true) }}" register: "make_rook_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rook_deploy" dry_run: "{{ make_rook_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rook_deploy_env|default({})), **(make_rook_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_crc_disk.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_crc0000644000175000017500000000165415115610204033371 0ustar zuulzuul--- - name: Debug make_rook_crc_disk_env when: make_rook_crc_disk_env is defined ansible.builtin.debug: var: make_rook_crc_disk_env - name: Debug make_rook_crc_disk_params when: make_rook_crc_disk_params is defined ansible.builtin.debug: var: make_rook_crc_disk_params - name: Run rook_crc_disk retries: "{{ make_rook_crc_disk_retries | default(omit) }}" delay: "{{ make_rook_crc_disk_delay | default(omit) }}" until: "{{ make_rook_crc_disk_until | default(true) }}" register: "make_rook_crc_disk_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rook_crc_disk" dry_run: "{{ make_rook_crc_disk_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rook_crc_disk_env|default({})), **(make_rook_crc_disk_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_rook_cle0000644000175000017500000000163515115610204033364 0ustar zuulzuul--- - name: Debug make_rook_cleanup_env when: make_rook_cleanup_env is defined ansible.builtin.debug: var: make_rook_cleanup_env - name: Debug make_rook_cleanup_params when: make_rook_cleanup_params is defined ansible.builtin.debug: var: make_rook_cleanup_params - name: Run rook_cleanup retries: "{{ make_rook_cleanup_retries | default(omit) }}" delay: "{{ make_rook_cleanup_delay | default(omit) }}" until: "{{ make_rook_cleanup_until | default(true) }}" register: "make_rook_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make rook_cleanup" dry_run: "{{ make_rook_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_rook_cleanup_env|default({})), **(make_rook_cleanup_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_lvms.yml0000644000175000017500000000144515115610204033347 0ustar zuulzuul--- - name: Debug make_lvms_env when: make_lvms_env is defined ansible.builtin.debug: var: make_lvms_env - name: Debug make_lvms_params when: make_lvms_params is defined ansible.builtin.debug: var: make_lvms_params - name: Run lvms retries: "{{ make_lvms_retries | default(omit) }}" delay: "{{ make_lvms_delay | default(omit) }}" until: "{{ make_lvms_until | default(true) }}" register: "make_lvms_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make lvms" dry_run: "{{ make_lvms_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_lvms_env|default({})), **(make_lvms_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nmstate.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nmstate.0000644000175000017500000000152215115610204033313 0ustar zuulzuul--- - name: Debug make_nmstate_env when: make_nmstate_env is defined ansible.builtin.debug: var: make_nmstate_env - name: Debug make_nmstate_params when: make_nmstate_params is defined ansible.builtin.debug: var: make_nmstate_params - name: Run nmstate retries: "{{ make_nmstate_retries | default(omit) }}" delay: "{{ make_nmstate_delay | default(omit) }}" until: "{{ make_nmstate_until | default(true) }}" register: "make_nmstate_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nmstate" dry_run: "{{ make_nmstate_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nmstate_env|default({})), **(make_nmstate_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nncp.yml0000644000175000017500000000144515115610204033324 0ustar zuulzuul--- - name: Debug make_nncp_env when: make_nncp_env is defined ansible.builtin.debug: var: make_nncp_env - name: Debug make_nncp_params when: make_nncp_params is defined ansible.builtin.debug: var: make_nncp_params - name: Run nncp retries: "{{ make_nncp_retries | default(omit) }}" delay: "{{ make_nncp_delay | default(omit) }}" until: "{{ make_nncp_until | default(true) }}" register: "make_nncp_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nncp" dry_run: "{{ make_nncp_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nncp_env|default({})), **(make_nncp_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nncp_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nncp_cle0000644000175000017500000000163515115610204033350 0ustar zuulzuul--- - name: Debug make_nncp_cleanup_env when: make_nncp_cleanup_env is defined ansible.builtin.debug: var: make_nncp_cleanup_env - name: Debug make_nncp_cleanup_params when: make_nncp_cleanup_params is defined ansible.builtin.debug: var: make_nncp_cleanup_params - name: Run nncp_cleanup retries: "{{ make_nncp_cleanup_retries | default(omit) }}" delay: "{{ make_nncp_cleanup_delay | default(omit) }}" until: "{{ make_nncp_cleanup_until | default(true) }}" register: "make_nncp_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make nncp_cleanup" dry_run: "{{ make_nncp_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nncp_cleanup_env|default({})), **(make_nncp_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netattach.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netattac0000644000175000017500000000156015115610204033367 0ustar zuulzuul--- - name: Debug make_netattach_env when: make_netattach_env is defined ansible.builtin.debug: var: make_netattach_env - name: Debug make_netattach_params when: make_netattach_params is defined ansible.builtin.debug: var: make_netattach_params - name: Run netattach retries: "{{ make_netattach_retries | default(omit) }}" delay: "{{ make_netattach_delay | default(omit) }}" until: "{{ make_netattach_until | default(true) }}" register: "make_netattach_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netattach" dry_run: "{{ make_netattach_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netattach_env|default({})), **(make_netattach_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netattach_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netattac0000644000175000017500000000175015115610204033370 0ustar zuulzuul--- - name: Debug make_netattach_cleanup_env when: make_netattach_cleanup_env is defined ansible.builtin.debug: var: make_netattach_cleanup_env - name: Debug make_netattach_cleanup_params when: make_netattach_cleanup_params is defined ansible.builtin.debug: var: make_netattach_cleanup_params - name: Run netattach_cleanup retries: "{{ make_netattach_cleanup_retries | default(omit) }}" delay: "{{ make_netattach_cleanup_delay | default(omit) }}" until: "{{ make_netattach_cleanup_until | default(true) }}" register: "make_netattach_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netattach_cleanup" dry_run: "{{ make_netattach_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netattach_cleanup_env|default({})), **(make_netattach_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb.0000644000175000017500000000152215115610204033260 0ustar zuulzuul--- - name: Debug make_metallb_env when: make_metallb_env is defined ansible.builtin.debug: var: make_metallb_env - name: Debug make_metallb_params when: make_metallb_params is defined ansible.builtin.debug: var: make_metallb_params - name: Run metallb retries: "{{ make_metallb_retries | default(omit) }}" delay: "{{ make_metallb_delay | default(omit) }}" until: "{{ make_metallb_until | default(true) }}" register: "make_metallb_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make metallb" dry_run: "{{ make_metallb_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_metallb_env|default({})), **(make_metallb_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb_config.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb_0000644000175000017500000000167315115610204033350 0ustar zuulzuul--- - name: Debug make_metallb_config_env when: make_metallb_config_env is defined ansible.builtin.debug: var: make_metallb_config_env - name: Debug make_metallb_config_params when: make_metallb_config_params is defined ansible.builtin.debug: var: make_metallb_config_params - name: Run metallb_config retries: "{{ make_metallb_config_retries | default(omit) }}" delay: "{{ make_metallb_config_delay | default(omit) }}" until: "{{ make_metallb_config_until | default(true) }}" register: "make_metallb_config_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make metallb_config" dry_run: "{{ make_metallb_config_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_metallb_config_env|default({})), **(make_metallb_config_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb_config_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb_0000644000175000017500000000206315115610204033342 0ustar zuulzuul--- - name: Debug make_metallb_config_cleanup_env when: make_metallb_config_cleanup_env is defined ansible.builtin.debug: var: make_metallb_config_cleanup_env - name: Debug make_metallb_config_cleanup_params when: make_metallb_config_cleanup_params is defined ansible.builtin.debug: var: make_metallb_config_cleanup_params - name: Run metallb_config_cleanup retries: "{{ make_metallb_config_cleanup_retries | default(omit) }}" delay: "{{ make_metallb_config_cleanup_delay | default(omit) }}" until: "{{ make_metallb_config_cleanup_until | default(true) }}" register: "make_metallb_config_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make metallb_config_cleanup" dry_run: "{{ make_metallb_config_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_metallb_config_cleanup_env|default({})), **(make_metallb_config_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_metallb_0000644000175000017500000000171215115610204033342 0ustar zuulzuul--- - name: Debug make_metallb_cleanup_env when: make_metallb_cleanup_env is defined ansible.builtin.debug: var: make_metallb_cleanup_env - name: Debug make_metallb_cleanup_params when: make_metallb_cleanup_params is defined ansible.builtin.debug: var: make_metallb_cleanup_params - name: Run metallb_cleanup retries: "{{ make_metallb_cleanup_retries | default(omit) }}" delay: "{{ make_metallb_cleanup_delay | default(omit) }}" until: "{{ make_metallb_cleanup_until | default(true) }}" register: "make_metallb_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make metallb_cleanup" dry_run: "{{ make_metallb_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_metallb_cleanup_env|default({})), **(make_metallb_cleanup_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki.yml0000644000175000017500000000144515115610204033324 0ustar zuulzuul--- - name: Debug make_loki_env when: make_loki_env is defined ansible.builtin.debug: var: make_loki_env - name: Debug make_loki_params when: make_loki_params is defined ansible.builtin.debug: var: make_loki_params - name: Run loki retries: "{{ make_loki_retries | default(omit) }}" delay: "{{ make_loki_delay | default(omit) }}" until: "{{ make_loki_until | default(true) }}" register: "make_loki_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make loki" dry_run: "{{ make_loki_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_loki_env|default({})), **(make_loki_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki_cle0000644000175000017500000000163515115610204033350 0ustar zuulzuul--- - name: Debug make_loki_cleanup_env when: make_loki_cleanup_env is defined ansible.builtin.debug: var: make_loki_cleanup_env - name: Debug make_loki_cleanup_params when: make_loki_cleanup_params is defined ansible.builtin.debug: var: make_loki_cleanup_params - name: Run loki_cleanup retries: "{{ make_loki_cleanup_retries | default(omit) }}" delay: "{{ make_loki_cleanup_delay | default(omit) }}" until: "{{ make_loki_cleanup_until | default(true) }}" register: "make_loki_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make loki_cleanup" dry_run: "{{ make_loki_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_loki_cleanup_env|default({})), **(make_loki_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki_dep0000644000175000017500000000161615115610204033354 0ustar zuulzuul--- - name: Debug make_loki_deploy_env when: make_loki_deploy_env is defined ansible.builtin.debug: var: make_loki_deploy_env - name: Debug make_loki_deploy_params when: make_loki_deploy_params is defined ansible.builtin.debug: var: make_loki_deploy_params - name: Run loki_deploy retries: "{{ make_loki_deploy_retries | default(omit) }}" delay: "{{ make_loki_deploy_delay | default(omit) }}" until: "{{ make_loki_deploy_until | default(true) }}" register: "make_loki_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make loki_deploy" dry_run: "{{ make_loki_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_loki_deploy_env|default({})), **(make_loki_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_loki_dep0000644000175000017500000000200615115610204033346 0ustar zuulzuul--- - name: Debug make_loki_deploy_cleanup_env when: make_loki_deploy_cleanup_env is defined ansible.builtin.debug: var: make_loki_deploy_cleanup_env - name: Debug make_loki_deploy_cleanup_params when: make_loki_deploy_cleanup_params is defined ansible.builtin.debug: var: make_loki_deploy_cleanup_params - name: Run loki_deploy_cleanup retries: "{{ make_loki_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_loki_deploy_cleanup_delay | default(omit) }}" until: "{{ make_loki_deploy_cleanup_until | default(true) }}" register: "make_loki_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make loki_deploy_cleanup" dry_run: "{{ make_loki_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_loki_deploy_cleanup_env|default({})), **(make_loki_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobserv.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobser0000644000175000017500000000156015115610204033405 0ustar zuulzuul--- - name: Debug make_netobserv_env when: make_netobserv_env is defined ansible.builtin.debug: var: make_netobserv_env - name: Debug make_netobserv_params when: make_netobserv_params is defined ansible.builtin.debug: var: make_netobserv_params - name: Run netobserv retries: "{{ make_netobserv_retries | default(omit) }}" delay: "{{ make_netobserv_delay | default(omit) }}" until: "{{ make_netobserv_until | default(true) }}" register: "make_netobserv_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netobserv" dry_run: "{{ make_netobserv_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netobserv_env|default({})), **(make_netobserv_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobserv_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobser0000644000175000017500000000175015115610204033406 0ustar zuulzuul--- - name: Debug make_netobserv_cleanup_env when: make_netobserv_cleanup_env is defined ansible.builtin.debug: var: make_netobserv_cleanup_env - name: Debug make_netobserv_cleanup_params when: make_netobserv_cleanup_params is defined ansible.builtin.debug: var: make_netobserv_cleanup_params - name: Run netobserv_cleanup retries: "{{ make_netobserv_cleanup_retries | default(omit) }}" delay: "{{ make_netobserv_cleanup_delay | default(omit) }}" until: "{{ make_netobserv_cleanup_until | default(true) }}" register: "make_netobserv_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netobserv_cleanup" dry_run: "{{ make_netobserv_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netobserv_cleanup_env|default({})), **(make_netobserv_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobserv_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobser0000644000175000017500000000173115115610204033405 0ustar zuulzuul--- - name: Debug make_netobserv_deploy_env when: make_netobserv_deploy_env is defined ansible.builtin.debug: var: make_netobserv_deploy_env - name: Debug make_netobserv_deploy_params when: make_netobserv_deploy_params is defined ansible.builtin.debug: var: make_netobserv_deploy_params - name: Run netobserv_deploy retries: "{{ make_netobserv_deploy_retries | default(omit) }}" delay: "{{ make_netobserv_deploy_delay | default(omit) }}" until: "{{ make_netobserv_deploy_until | default(true) }}" register: "make_netobserv_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netobserv_deploy" dry_run: "{{ make_netobserv_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netobserv_deploy_env|default({})), **(make_netobserv_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobserv_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_netobser0000644000175000017500000000212115115610204033377 0ustar zuulzuul--- - name: Debug make_netobserv_deploy_cleanup_env when: make_netobserv_deploy_cleanup_env is defined ansible.builtin.debug: var: make_netobserv_deploy_cleanup_env - name: Debug make_netobserv_deploy_cleanup_params when: make_netobserv_deploy_cleanup_params is defined ansible.builtin.debug: var: make_netobserv_deploy_cleanup_params - name: Run netobserv_deploy_cleanup retries: "{{ make_netobserv_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_netobserv_deploy_cleanup_delay | default(omit) }}" until: "{{ make_netobserv_deploy_cleanup_until | default(true) }}" register: "make_netobserv_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make netobserv_deploy_cleanup" dry_run: "{{ make_netobserv_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_netobserv_deploy_cleanup_env|default({})), **(make_netobserv_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_p0000644000175000017500000000161615115610204033346 0ustar zuulzuul--- - name: Debug make_manila_prep_env when: make_manila_prep_env is defined ansible.builtin.debug: var: make_manila_prep_env - name: Debug make_manila_prep_params when: make_manila_prep_params is defined ansible.builtin.debug: var: make_manila_prep_params - name: Run manila_prep retries: "{{ make_manila_prep_retries | default(omit) }}" delay: "{{ make_manila_prep_delay | default(omit) }}" until: "{{ make_manila_prep_until | default(true) }}" register: "make_manila_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_prep" dry_run: "{{ make_manila_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_prep_env|default({})), **(make_manila_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila.y0000644000175000017500000000150315115610204033271 0ustar zuulzuul--- - name: Debug make_manila_env when: make_manila_env is defined ansible.builtin.debug: var: make_manila_env - name: Debug make_manila_params when: make_manila_params is defined ansible.builtin.debug: var: make_manila_params - name: Run manila retries: "{{ make_manila_retries | default(omit) }}" delay: "{{ make_manila_delay | default(omit) }}" until: "{{ make_manila_until | default(true) }}" register: "make_manila_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila" dry_run: "{{ make_manila_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_env|default({})), **(make_manila_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_c0000644000175000017500000000167315115610204033334 0ustar zuulzuul--- - name: Debug make_manila_cleanup_env when: make_manila_cleanup_env is defined ansible.builtin.debug: var: make_manila_cleanup_env - name: Debug make_manila_cleanup_params when: make_manila_cleanup_params is defined ansible.builtin.debug: var: make_manila_cleanup_params - name: Run manila_cleanup retries: "{{ make_manila_cleanup_retries | default(omit) }}" delay: "{{ make_manila_cleanup_delay | default(omit) }}" until: "{{ make_manila_cleanup_until | default(true) }}" register: "make_manila_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_cleanup" dry_run: "{{ make_manila_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_cleanup_env|default({})), **(make_manila_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_d0000644000175000017500000000176715115610204033341 0ustar zuulzuul--- - name: Debug make_manila_deploy_prep_env when: make_manila_deploy_prep_env is defined ansible.builtin.debug: var: make_manila_deploy_prep_env - name: Debug make_manila_deploy_prep_params when: make_manila_deploy_prep_params is defined ansible.builtin.debug: var: make_manila_deploy_prep_params - name: Run manila_deploy_prep retries: "{{ make_manila_deploy_prep_retries | default(omit) }}" delay: "{{ make_manila_deploy_prep_delay | default(omit) }}" until: "{{ make_manila_deploy_prep_until | default(true) }}" register: "make_manila_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_deploy_prep" dry_run: "{{ make_manila_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_deploy_prep_env|default({})), **(make_manila_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_d0000644000175000017500000000165415115610204033334 0ustar zuulzuul--- - name: Debug make_manila_deploy_env when: make_manila_deploy_env is defined ansible.builtin.debug: var: make_manila_deploy_env - name: Debug make_manila_deploy_params when: make_manila_deploy_params is defined ansible.builtin.debug: var: make_manila_deploy_params - name: Run manila_deploy retries: "{{ make_manila_deploy_retries | default(omit) }}" delay: "{{ make_manila_deploy_delay | default(omit) }}" until: "{{ make_manila_deploy_until | default(true) }}" register: "make_manila_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_deploy" dry_run: "{{ make_manila_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_deploy_env|default({})), **(make_manila_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_manila_d0000644000175000017500000000204415115610204033326 0ustar zuulzuul--- - name: Debug make_manila_deploy_cleanup_env when: make_manila_deploy_cleanup_env is defined ansible.builtin.debug: var: make_manila_deploy_cleanup_env - name: Debug make_manila_deploy_cleanup_params when: make_manila_deploy_cleanup_params is defined ansible.builtin.debug: var: make_manila_deploy_cleanup_params - name: Run manila_deploy_cleanup retries: "{{ make_manila_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_manila_deploy_cleanup_delay | default(omit) }}" until: "{{ make_manila_deploy_cleanup_until | default(true) }}" register: "make_manila_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make manila_deploy_cleanup" dry_run: "{{ make_manila_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_manila_deploy_cleanup_env|default({})), **(make_manila_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000167315115610204033412 0ustar zuulzuul--- - name: Debug make_telemetry_prep_env when: make_telemetry_prep_env is defined ansible.builtin.debug: var: make_telemetry_prep_env - name: Debug make_telemetry_prep_params when: make_telemetry_prep_params is defined ansible.builtin.debug: var: make_telemetry_prep_params - name: Run telemetry_prep retries: "{{ make_telemetry_prep_retries | default(omit) }}" delay: "{{ make_telemetry_prep_delay | default(omit) }}" until: "{{ make_telemetry_prep_until | default(true) }}" register: "make_telemetry_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_prep" dry_run: "{{ make_telemetry_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_prep_env|default({})), **(make_telemetry_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000156015115610204033405 0ustar zuulzuul--- - name: Debug make_telemetry_env when: make_telemetry_env is defined ansible.builtin.debug: var: make_telemetry_env - name: Debug make_telemetry_params when: make_telemetry_params is defined ansible.builtin.debug: var: make_telemetry_params - name: Run telemetry retries: "{{ make_telemetry_retries | default(omit) }}" delay: "{{ make_telemetry_delay | default(omit) }}" until: "{{ make_telemetry_until | default(true) }}" register: "make_telemetry_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry" dry_run: "{{ make_telemetry_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_env|default({})), **(make_telemetry_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000175015115610204033406 0ustar zuulzuul--- - name: Debug make_telemetry_cleanup_env when: make_telemetry_cleanup_env is defined ansible.builtin.debug: var: make_telemetry_cleanup_env - name: Debug make_telemetry_cleanup_params when: make_telemetry_cleanup_params is defined ansible.builtin.debug: var: make_telemetry_cleanup_params - name: Run telemetry_cleanup retries: "{{ make_telemetry_cleanup_retries | default(omit) }}" delay: "{{ make_telemetry_cleanup_delay | default(omit) }}" until: "{{ make_telemetry_cleanup_until | default(true) }}" register: "make_telemetry_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_cleanup" dry_run: "{{ make_telemetry_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_cleanup_env|default({})), **(make_telemetry_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000204415115610204033403 0ustar zuulzuul--- - name: Debug make_telemetry_deploy_prep_env when: make_telemetry_deploy_prep_env is defined ansible.builtin.debug: var: make_telemetry_deploy_prep_env - name: Debug make_telemetry_deploy_prep_params when: make_telemetry_deploy_prep_params is defined ansible.builtin.debug: var: make_telemetry_deploy_prep_params - name: Run telemetry_deploy_prep retries: "{{ make_telemetry_deploy_prep_retries | default(omit) }}" delay: "{{ make_telemetry_deploy_prep_delay | default(omit) }}" until: "{{ make_telemetry_deploy_prep_until | default(true) }}" register: "make_telemetry_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_deploy_prep" dry_run: "{{ make_telemetry_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_deploy_prep_env|default({})), **(make_telemetry_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000173115115610204033405 0ustar zuulzuul--- - name: Debug make_telemetry_deploy_env when: make_telemetry_deploy_env is defined ansible.builtin.debug: var: make_telemetry_deploy_env - name: Debug make_telemetry_deploy_params when: make_telemetry_deploy_params is defined ansible.builtin.debug: var: make_telemetry_deploy_params - name: Run telemetry_deploy retries: "{{ make_telemetry_deploy_retries | default(omit) }}" delay: "{{ make_telemetry_deploy_delay | default(omit) }}" until: "{{ make_telemetry_deploy_until | default(true) }}" register: "make_telemetry_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_deploy" dry_run: "{{ make_telemetry_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_deploy_env|default({})), **(make_telemetry_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000212115115610204033377 0ustar zuulzuul--- - name: Debug make_telemetry_deploy_cleanup_env when: make_telemetry_deploy_cleanup_env is defined ansible.builtin.debug: var: make_telemetry_deploy_cleanup_env - name: Debug make_telemetry_deploy_cleanup_params when: make_telemetry_deploy_cleanup_params is defined ansible.builtin.debug: var: make_telemetry_deploy_cleanup_params - name: Run telemetry_deploy_cleanup retries: "{{ make_telemetry_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_telemetry_deploy_cleanup_delay | default(omit) }}" until: "{{ make_telemetry_deploy_cleanup_until | default(true) }}" register: "make_telemetry_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_deploy_cleanup" dry_run: "{{ make_telemetry_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_deploy_cleanup_env|default({})), **(make_telemetry_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_kuttl_run.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000200615115610204033401 0ustar zuulzuul--- - name: Debug make_telemetry_kuttl_run_env when: make_telemetry_kuttl_run_env is defined ansible.builtin.debug: var: make_telemetry_kuttl_run_env - name: Debug make_telemetry_kuttl_run_params when: make_telemetry_kuttl_run_params is defined ansible.builtin.debug: var: make_telemetry_kuttl_run_params - name: Run telemetry_kuttl_run retries: "{{ make_telemetry_kuttl_run_retries | default(omit) }}" delay: "{{ make_telemetry_kuttl_run_delay | default(omit) }}" until: "{{ make_telemetry_kuttl_run_until | default(true) }}" register: "make_telemetry_kuttl_run_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_kuttl_run" dry_run: "{{ make_telemetry_kuttl_run_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_kuttl_run_env|default({})), **(make_telemetry_kuttl_run_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetry_kuttl.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_telemetr0000644000175000017500000000171215115610204033404 0ustar zuulzuul--- - name: Debug make_telemetry_kuttl_env when: make_telemetry_kuttl_env is defined ansible.builtin.debug: var: make_telemetry_kuttl_env - name: Debug make_telemetry_kuttl_params when: make_telemetry_kuttl_params is defined ansible.builtin.debug: var: make_telemetry_kuttl_params - name: Run telemetry_kuttl retries: "{{ make_telemetry_kuttl_retries | default(omit) }}" delay: "{{ make_telemetry_kuttl_delay | default(omit) }}" until: "{{ make_telemetry_kuttl_until | default(true) }}" register: "make_telemetry_kuttl_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make telemetry_kuttl" dry_run: "{{ make_telemetry_kuttl_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_telemetry_kuttl_env|default({})), **(make_telemetry_kuttl_params|default({}))) }}" ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_pr0000644000175000017500000000157715115610204033431 0ustar zuulzuul--- - name: Debug make_swift_prep_env when: make_swift_prep_env is defined ansible.builtin.debug: var: make_swift_prep_env - name: Debug make_swift_prep_params when: make_swift_prep_params is defined ansible.builtin.debug: var: make_swift_prep_params - name: Run swift_prep retries: "{{ make_swift_prep_retries | default(omit) }}" delay: "{{ make_swift_prep_delay | default(omit) }}" until: "{{ make_swift_prep_until | default(true) }}" register: "make_swift_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_prep" dry_run: "{{ make_swift_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_prep_env|default({})), **(make_swift_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift.ym0000644000175000017500000000146415115610204033347 0ustar zuulzuul--- - name: Debug make_swift_env when: make_swift_env is defined ansible.builtin.debug: var: make_swift_env - name: Debug make_swift_params when: make_swift_params is defined ansible.builtin.debug: var: make_swift_params - name: Run swift retries: "{{ make_swift_retries | default(omit) }}" delay: "{{ make_swift_delay | default(omit) }}" until: "{{ make_swift_until | default(true) }}" register: "make_swift_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift" dry_run: "{{ make_swift_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_env|default({})), **(make_swift_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_cl0000644000175000017500000000165415115610204033402 0ustar zuulzuul--- - name: Debug make_swift_cleanup_env when: make_swift_cleanup_env is defined ansible.builtin.debug: var: make_swift_cleanup_env - name: Debug make_swift_cleanup_params when: make_swift_cleanup_params is defined ansible.builtin.debug: var: make_swift_cleanup_params - name: Run swift_cleanup retries: "{{ make_swift_cleanup_retries | default(omit) }}" delay: "{{ make_swift_cleanup_delay | default(omit) }}" until: "{{ make_swift_cleanup_until | default(true) }}" register: "make_swift_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_cleanup" dry_run: "{{ make_swift_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_cleanup_env|default({})), **(make_swift_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_de0000644000175000017500000000175015115610204033371 0ustar zuulzuul--- - name: Debug make_swift_deploy_prep_env when: make_swift_deploy_prep_env is defined ansible.builtin.debug: var: make_swift_deploy_prep_env - name: Debug make_swift_deploy_prep_params when: make_swift_deploy_prep_params is defined ansible.builtin.debug: var: make_swift_deploy_prep_params - name: Run swift_deploy_prep retries: "{{ make_swift_deploy_prep_retries | default(omit) }}" delay: "{{ make_swift_deploy_prep_delay | default(omit) }}" until: "{{ make_swift_deploy_prep_until | default(true) }}" register: "make_swift_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_deploy_prep" dry_run: "{{ make_swift_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_deploy_prep_env|default({})), **(make_swift_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_de0000644000175000017500000000163515115610204033373 0ustar zuulzuul--- - name: Debug make_swift_deploy_env when: make_swift_deploy_env is defined ansible.builtin.debug: var: make_swift_deploy_env - name: Debug make_swift_deploy_params when: make_swift_deploy_params is defined ansible.builtin.debug: var: make_swift_deploy_params - name: Run swift_deploy retries: "{{ make_swift_deploy_retries | default(omit) }}" delay: "{{ make_swift_deploy_delay | default(omit) }}" until: "{{ make_swift_deploy_until | default(true) }}" register: "make_swift_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_deploy" dry_run: "{{ make_swift_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_deploy_env|default({})), **(make_swift_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_swift_de0000644000175000017500000000202515115610204033365 0ustar zuulzuul--- - name: Debug make_swift_deploy_cleanup_env when: make_swift_deploy_cleanup_env is defined ansible.builtin.debug: var: make_swift_deploy_cleanup_env - name: Debug make_swift_deploy_cleanup_params when: make_swift_deploy_cleanup_params is defined ansible.builtin.debug: var: make_swift_deploy_cleanup_params - name: Run swift_deploy_cleanup retries: "{{ make_swift_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_swift_deploy_cleanup_delay | default(omit) }}" until: "{{ make_swift_deploy_cleanup_until | default(true) }}" register: "make_swift_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make swift_deploy_cleanup" dry_run: "{{ make_swift_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_swift_deploy_cleanup_env|default({})), **(make_swift_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_certmanager.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_certmana0000644000175000017500000000161615115610204033360 0ustar zuulzuul--- - name: Debug make_certmanager_env when: make_certmanager_env is defined ansible.builtin.debug: var: make_certmanager_env - name: Debug make_certmanager_params when: make_certmanager_params is defined ansible.builtin.debug: var: make_certmanager_params - name: Run certmanager retries: "{{ make_certmanager_retries | default(omit) }}" delay: "{{ make_certmanager_delay | default(omit) }}" until: "{{ make_certmanager_until | default(true) }}" register: "make_certmanager_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make certmanager" dry_run: "{{ make_certmanager_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_certmanager_env|default({})), **(make_certmanager_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_certmanager_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_certmana0000644000175000017500000000200615115610204033352 0ustar zuulzuul--- - name: Debug make_certmanager_cleanup_env when: make_certmanager_cleanup_env is defined ansible.builtin.debug: var: make_certmanager_cleanup_env - name: Debug make_certmanager_cleanup_params when: make_certmanager_cleanup_params is defined ansible.builtin.debug: var: make_certmanager_cleanup_params - name: Run certmanager_cleanup retries: "{{ make_certmanager_cleanup_retries | default(omit) }}" delay: "{{ make_certmanager_cleanup_delay | default(omit) }}" until: "{{ make_certmanager_cleanup_until | default(true) }}" register: "make_certmanager_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make certmanager_cleanup" dry_run: "{{ make_certmanager_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_certmanager_cleanup_env|default({})), **(make_certmanager_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_validate_marketplace.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_validate0000644000175000017500000000202515115610204033352 0ustar zuulzuul--- - name: Debug make_validate_marketplace_env when: make_validate_marketplace_env is defined ansible.builtin.debug: var: make_validate_marketplace_env - name: Debug make_validate_marketplace_params when: make_validate_marketplace_params is defined ansible.builtin.debug: var: make_validate_marketplace_params - name: Run validate_marketplace retries: "{{ make_validate_marketplace_retries | default(omit) }}" delay: "{{ make_validate_marketplace_delay | default(omit) }}" until: "{{ make_validate_marketplace_until | default(true) }}" register: "make_validate_marketplace_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make validate_marketplace" dry_run: "{{ make_validate_marketplace_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_validate_marketplace_env|default({})), **(make_validate_marketplace_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_redis_deploy_prep.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_redis_de0000644000175000017500000000175015115610204033343 0ustar zuulzuul--- - name: Debug make_redis_deploy_prep_env when: make_redis_deploy_prep_env is defined ansible.builtin.debug: var: make_redis_deploy_prep_env - name: Debug make_redis_deploy_prep_params when: make_redis_deploy_prep_params is defined ansible.builtin.debug: var: make_redis_deploy_prep_params - name: Run redis_deploy_prep retries: "{{ make_redis_deploy_prep_retries | default(omit) }}" delay: "{{ make_redis_deploy_prep_delay | default(omit) }}" until: "{{ make_redis_deploy_prep_until | default(true) }}" register: "make_redis_deploy_prep_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make redis_deploy_prep" dry_run: "{{ make_redis_deploy_prep_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_redis_deploy_prep_env|default({})), **(make_redis_deploy_prep_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_redis_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_redis_de0000644000175000017500000000163515115610204033345 0ustar zuulzuul--- - name: Debug make_redis_deploy_env when: make_redis_deploy_env is defined ansible.builtin.debug: var: make_redis_deploy_env - name: Debug make_redis_deploy_params when: make_redis_deploy_params is defined ansible.builtin.debug: var: make_redis_deploy_params - name: Run redis_deploy retries: "{{ make_redis_deploy_retries | default(omit) }}" delay: "{{ make_redis_deploy_delay | default(omit) }}" until: "{{ make_redis_deploy_until | default(true) }}" register: "make_redis_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make redis_deploy" dry_run: "{{ make_redis_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_redis_deploy_env|default({})), **(make_redis_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_redis_deploy_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_redis_de0000644000175000017500000000202515115610204033337 0ustar zuulzuul--- - name: Debug make_redis_deploy_cleanup_env when: make_redis_deploy_cleanup_env is defined ansible.builtin.debug: var: make_redis_deploy_cleanup_env - name: Debug make_redis_deploy_cleanup_params when: make_redis_deploy_cleanup_params is defined ansible.builtin.debug: var: make_redis_deploy_cleanup_params - name: Run redis_deploy_cleanup retries: "{{ make_redis_deploy_cleanup_retries | default(omit) }}" delay: "{{ make_redis_deploy_cleanup_delay | default(omit) }}" until: "{{ make_redis_deploy_cleanup_until | default(true) }}" register: "make_redis_deploy_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make redis_deploy_cleanup" dry_run: "{{ make_redis_deploy_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_redis_deploy_cleanup_env|default({})), **(make_redis_deploy_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_set_slower_etcd_profile.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_set_slow0000644000175000017500000000210215115610204033414 0ustar zuulzuul--- - name: Debug make_set_slower_etcd_profile_env when: make_set_slower_etcd_profile_env is defined ansible.builtin.debug: var: make_set_slower_etcd_profile_env - name: Debug make_set_slower_etcd_profile_params when: make_set_slower_etcd_profile_params is defined ansible.builtin.debug: var: make_set_slower_etcd_profile_params - name: Run set_slower_etcd_profile retries: "{{ make_set_slower_etcd_profile_retries | default(omit) }}" delay: "{{ make_set_slower_etcd_profile_delay | default(omit) }}" until: "{{ make_set_slower_etcd_profile_until | default(true) }}" register: "make_set_slower_etcd_profile_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make set_slower_etcd_profile" dry_run: "{{ make_set_slower_etcd_profile_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_set_slower_etcd_profile_env|default({})), **(make_set_slower_etcd_profile_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_download_tools.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_download0000644000175000017500000000170415115610204033373 0ustar zuulzuul--- - name: Debug make_download_tools_env when: make_download_tools_env is defined ansible.builtin.debug: var: make_download_tools_env - name: Debug make_download_tools_params when: make_download_tools_params is defined ansible.builtin.debug: var: make_download_tools_params - name: Run download_tools retries: "{{ make_download_tools_retries | default(omit) }}" delay: "{{ make_download_tools_delay | default(omit) }}" until: "{{ make_download_tools_until | default(true) }}" register: "make_download_tools_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make download_tools" dry_run: "{{ make_download_tools_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_download_tools_env|default({})), **(make_download_tools_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nfs.yml0000644000175000017500000000143715115610204033155 0ustar zuulzuul--- - name: Debug make_nfs_env when: make_nfs_env is defined ansible.builtin.debug: var: make_nfs_env - name: Debug make_nfs_params when: make_nfs_params is defined ansible.builtin.debug: var: make_nfs_params - name: Run nfs retries: "{{ make_nfs_retries | default(omit) }}" delay: "{{ make_nfs_delay | default(omit) }}" until: "{{ make_nfs_until | default(true) }}" register: "make_nfs_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make nfs" dry_run: "{{ make_nfs_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nfs_env|default({})), **(make_nfs_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nfs_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_nfs_clea0000644000175000017500000000162715115610204033342 0ustar zuulzuul--- - name: Debug make_nfs_cleanup_env when: make_nfs_cleanup_env is defined ansible.builtin.debug: var: make_nfs_cleanup_env - name: Debug make_nfs_cleanup_params when: make_nfs_cleanup_params is defined ansible.builtin.debug: var: make_nfs_cleanup_params - name: Run nfs_cleanup retries: "{{ make_nfs_cleanup_retries | default(omit) }}" delay: "{{ make_nfs_cleanup_delay | default(omit) }}" until: "{{ make_nfs_cleanup_until | default(true) }}" register: "make_nfs_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make nfs_cleanup" dry_run: "{{ make_nfs_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_nfs_cleanup_env|default({})), **(make_nfs_cleanup_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc.yml0000644000175000017500000000143715115610204033136 0ustar zuulzuul--- - name: Debug make_crc_env when: make_crc_env is defined ansible.builtin.debug: var: make_crc_env - name: Debug make_crc_params when: make_crc_params is defined ansible.builtin.debug: var: make_crc_params - name: Run crc retries: "{{ make_crc_retries | default(omit) }}" delay: "{{ make_crc_delay | default(omit) }}" until: "{{ make_crc_until | default(true) }}" register: "make_crc_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make crc" dry_run: "{{ make_crc_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_env|default({})), **(make_crc_params|default({}))) }}" ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_clea0000644000175000017500000000162715115610204033323 0ustar zuulzuul--- - name: Debug make_crc_cleanup_env when: make_crc_cleanup_env is defined ansible.builtin.debug: var: make_crc_cleanup_env - name: Debug make_crc_cleanup_params when: make_crc_cleanup_params is defined ansible.builtin.debug: var: make_crc_cleanup_params - name: Run crc_cleanup retries: "{{ make_crc_cleanup_retries | default(omit) }}" delay: "{{ make_crc_cleanup_delay | default(omit) }}" until: "{{ make_crc_cleanup_until | default(true) }}" register: "make_crc_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make crc_cleanup" dry_run: "{{ make_crc_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_cleanup_env|default({})), **(make_crc_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_scrub.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_scru0000644000175000017500000000157115115610204033371 0ustar zuulzuul--- - name: Debug make_crc_scrub_env when: make_crc_scrub_env is defined ansible.builtin.debug: var: make_crc_scrub_env - name: Debug make_crc_scrub_params when: make_crc_scrub_params is defined ansible.builtin.debug: var: make_crc_scrub_params - name: Run crc_scrub retries: "{{ make_crc_scrub_retries | default(omit) }}" delay: "{{ make_crc_scrub_delay | default(omit) }}" until: "{{ make_crc_scrub_until | default(true) }}" register: "make_crc_scrub_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make crc_scrub" dry_run: "{{ make_crc_scrub_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_scrub_env|default({})), **(make_crc_scrub_params|default({}))) }}" ././@LongLink0000644000000000000000000000017500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_attach_default_interface.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_atta0000644000175000017500000000222615115610204033344 0ustar zuulzuul--- - name: Debug make_crc_attach_default_interface_env when: make_crc_attach_default_interface_env is defined ansible.builtin.debug: var: make_crc_attach_default_interface_env - name: Debug make_crc_attach_default_interface_params when: make_crc_attach_default_interface_params is defined ansible.builtin.debug: var: make_crc_attach_default_interface_params - name: Run crc_attach_default_interface retries: "{{ make_crc_attach_default_interface_retries | default(omit) }}" delay: "{{ make_crc_attach_default_interface_delay | default(omit) }}" until: "{{ make_crc_attach_default_interface_until | default(true) }}" register: "make_crc_attach_default_interface_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make crc_attach_default_interface" dry_run: "{{ make_crc_attach_default_interface_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_attach_default_interface_env|default({})), **(make_crc_attach_default_interface_params|default({}))) }}" ././@LongLink0000644000000000000000000000020500000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_attach_default_interface_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_crc_atta0000644000175000017500000000241615115610204033345 0ustar zuulzuul--- - name: Debug make_crc_attach_default_interface_cleanup_env when: make_crc_attach_default_interface_cleanup_env is defined ansible.builtin.debug: var: make_crc_attach_default_interface_cleanup_env - name: Debug make_crc_attach_default_interface_cleanup_params when: make_crc_attach_default_interface_cleanup_params is defined ansible.builtin.debug: var: make_crc_attach_default_interface_cleanup_params - name: Run crc_attach_default_interface_cleanup retries: "{{ make_crc_attach_default_interface_cleanup_retries | default(omit) }}" delay: "{{ make_crc_attach_default_interface_cleanup_delay | default(omit) }}" until: "{{ make_crc_attach_default_interface_cleanup_until | default(true) }}" register: "make_crc_attach_default_interface_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make crc_attach_default_interface_cleanup" dry_run: "{{ make_crc_attach_default_interface_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_attach_default_interface_cleanup_env|default({})), **(make_crc_attach_default_interface_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_network.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000174215115610204033270 0ustar zuulzuul--- - name: Debug make_ipv6_lab_network_env when: make_ipv6_lab_network_env is defined ansible.builtin.debug: var: make_ipv6_lab_network_env - name: Debug make_ipv6_lab_network_params when: make_ipv6_lab_network_params is defined ansible.builtin.debug: var: make_ipv6_lab_network_params - name: Run ipv6_lab_network retries: "{{ make_ipv6_lab_network_retries | default(omit) }}" delay: "{{ make_ipv6_lab_network_delay | default(omit) }}" until: "{{ make_ipv6_lab_network_until | default(true) }}" register: "make_ipv6_lab_network_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_network" dry_run: "{{ make_ipv6_lab_network_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_network_env|default({})), **(make_ipv6_lab_network_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_network_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000213215115610204033262 0ustar zuulzuul--- - name: Debug make_ipv6_lab_network_cleanup_env when: make_ipv6_lab_network_cleanup_env is defined ansible.builtin.debug: var: make_ipv6_lab_network_cleanup_env - name: Debug make_ipv6_lab_network_cleanup_params when: make_ipv6_lab_network_cleanup_params is defined ansible.builtin.debug: var: make_ipv6_lab_network_cleanup_params - name: Run ipv6_lab_network_cleanup retries: "{{ make_ipv6_lab_network_cleanup_retries | default(omit) }}" delay: "{{ make_ipv6_lab_network_cleanup_delay | default(omit) }}" until: "{{ make_ipv6_lab_network_cleanup_until | default(true) }}" register: "make_ipv6_lab_network_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_network_cleanup" dry_run: "{{ make_ipv6_lab_network_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_network_cleanup_env|default({})), **(make_ipv6_lab_network_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_nat64_router.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000205515115610204033266 0ustar zuulzuul--- - name: Debug make_ipv6_lab_nat64_router_env when: make_ipv6_lab_nat64_router_env is defined ansible.builtin.debug: var: make_ipv6_lab_nat64_router_env - name: Debug make_ipv6_lab_nat64_router_params when: make_ipv6_lab_nat64_router_params is defined ansible.builtin.debug: var: make_ipv6_lab_nat64_router_params - name: Run ipv6_lab_nat64_router retries: "{{ make_ipv6_lab_nat64_router_retries | default(omit) }}" delay: "{{ make_ipv6_lab_nat64_router_delay | default(omit) }}" until: "{{ make_ipv6_lab_nat64_router_until | default(true) }}" register: "make_ipv6_lab_nat64_router_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_nat64_router" dry_run: "{{ make_ipv6_lab_nat64_router_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_nat64_router_env|default({})), **(make_ipv6_lab_nat64_router_params|default({}))) }}" ././@LongLink0000644000000000000000000000017600000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_nat64_router_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000224515115610204033267 0ustar zuulzuul--- - name: Debug make_ipv6_lab_nat64_router_cleanup_env when: make_ipv6_lab_nat64_router_cleanup_env is defined ansible.builtin.debug: var: make_ipv6_lab_nat64_router_cleanup_env - name: Debug make_ipv6_lab_nat64_router_cleanup_params when: make_ipv6_lab_nat64_router_cleanup_params is defined ansible.builtin.debug: var: make_ipv6_lab_nat64_router_cleanup_params - name: Run ipv6_lab_nat64_router_cleanup retries: "{{ make_ipv6_lab_nat64_router_cleanup_retries | default(omit) }}" delay: "{{ make_ipv6_lab_nat64_router_cleanup_delay | default(omit) }}" until: "{{ make_ipv6_lab_nat64_router_cleanup_until | default(true) }}" register: "make_ipv6_lab_nat64_router_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_nat64_router_cleanup" dry_run: "{{ make_ipv6_lab_nat64_router_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_nat64_router_cleanup_env|default({})), **(make_ipv6_lab_nat64_router_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_sno.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000164615115610204033273 0ustar zuulzuul--- - name: Debug make_ipv6_lab_sno_env when: make_ipv6_lab_sno_env is defined ansible.builtin.debug: var: make_ipv6_lab_sno_env - name: Debug make_ipv6_lab_sno_params when: make_ipv6_lab_sno_params is defined ansible.builtin.debug: var: make_ipv6_lab_sno_params - name: Run ipv6_lab_sno retries: "{{ make_ipv6_lab_sno_retries | default(omit) }}" delay: "{{ make_ipv6_lab_sno_delay | default(omit) }}" until: "{{ make_ipv6_lab_sno_until | default(true) }}" register: "make_ipv6_lab_sno_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_sno" dry_run: "{{ make_ipv6_lab_sno_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_sno_env|default({})), **(make_ipv6_lab_sno_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_sno_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000203615115610204033265 0ustar zuulzuul--- - name: Debug make_ipv6_lab_sno_cleanup_env when: make_ipv6_lab_sno_cleanup_env is defined ansible.builtin.debug: var: make_ipv6_lab_sno_cleanup_env - name: Debug make_ipv6_lab_sno_cleanup_params when: make_ipv6_lab_sno_cleanup_params is defined ansible.builtin.debug: var: make_ipv6_lab_sno_cleanup_params - name: Run ipv6_lab_sno_cleanup retries: "{{ make_ipv6_lab_sno_cleanup_retries | default(omit) }}" delay: "{{ make_ipv6_lab_sno_cleanup_delay | default(omit) }}" until: "{{ make_ipv6_lab_sno_cleanup_until | default(true) }}" register: "make_ipv6_lab_sno_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_sno_cleanup" dry_run: "{{ make_ipv6_lab_sno_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_sno_cleanup_env|default({})), **(make_ipv6_lab_sno_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000155215115610204033267 0ustar zuulzuul--- - name: Debug make_ipv6_lab_env when: make_ipv6_lab_env is defined ansible.builtin.debug: var: make_ipv6_lab_env - name: Debug make_ipv6_lab_params when: make_ipv6_lab_params is defined ansible.builtin.debug: var: make_ipv6_lab_params - name: Run ipv6_lab retries: "{{ make_ipv6_lab_retries | default(omit) }}" delay: "{{ make_ipv6_lab_delay | default(omit) }}" until: "{{ make_ipv6_lab_until | default(true) }}" register: "make_ipv6_lab_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab" dry_run: "{{ make_ipv6_lab_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_env|default({})), **(make_ipv6_lab_params|default({}))) }}" ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_ipv6_lab0000644000175000017500000000174215115610204033270 0ustar zuulzuul--- - name: Debug make_ipv6_lab_cleanup_env when: make_ipv6_lab_cleanup_env is defined ansible.builtin.debug: var: make_ipv6_lab_cleanup_env - name: Debug make_ipv6_lab_cleanup_params when: make_ipv6_lab_cleanup_params is defined ansible.builtin.debug: var: make_ipv6_lab_cleanup_params - name: Run ipv6_lab_cleanup retries: "{{ make_ipv6_lab_cleanup_retries | default(omit) }}" delay: "{{ make_ipv6_lab_cleanup_delay | default(omit) }}" until: "{{ make_ipv6_lab_cleanup_until | default(true) }}" register: "make_ipv6_lab_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make ipv6_lab_cleanup" dry_run: "{{ make_ipv6_lab_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ipv6_lab_cleanup_env|default({})), **(make_ipv6_lab_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_attach_default_interface.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_attach_d0000644000175000017500000000213215115610204033327 0ustar zuulzuul--- - name: Debug make_attach_default_interface_env when: make_attach_default_interface_env is defined ansible.builtin.debug: var: make_attach_default_interface_env - name: Debug make_attach_default_interface_params when: make_attach_default_interface_params is defined ansible.builtin.debug: var: make_attach_default_interface_params - name: Run attach_default_interface retries: "{{ make_attach_default_interface_retries | default(omit) }}" delay: "{{ make_attach_default_interface_delay | default(omit) }}" until: "{{ make_attach_default_interface_until | default(true) }}" register: "make_attach_default_interface_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make attach_default_interface" dry_run: "{{ make_attach_default_interface_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_attach_default_interface_env|default({})), **(make_attach_default_interface_params|default({}))) }}" ././@LongLink0000644000000000000000000000020100000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_attach_default_interface_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_attach_d0000644000175000017500000000232215115610204033330 0ustar zuulzuul--- - name: Debug make_attach_default_interface_cleanup_env when: make_attach_default_interface_cleanup_env is defined ansible.builtin.debug: var: make_attach_default_interface_cleanup_env - name: Debug make_attach_default_interface_cleanup_params when: make_attach_default_interface_cleanup_params is defined ansible.builtin.debug: var: make_attach_default_interface_cleanup_params - name: Run attach_default_interface_cleanup retries: "{{ make_attach_default_interface_cleanup_retries | default(omit) }}" delay: "{{ make_attach_default_interface_cleanup_delay | default(omit) }}" until: "{{ make_attach_default_interface_cleanup_until | default(true) }}" register: "make_attach_default_interface_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make attach_default_interface_cleanup" dry_run: "{{ make_attach_default_interface_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_attach_default_interface_cleanup_env|default({})), **(make_attach_default_interface_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_network_isolation_bridge.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_network_0000644000175000017500000000213215115610204033410 0ustar zuulzuul--- - name: Debug make_network_isolation_bridge_env when: make_network_isolation_bridge_env is defined ansible.builtin.debug: var: make_network_isolation_bridge_env - name: Debug make_network_isolation_bridge_params when: make_network_isolation_bridge_params is defined ansible.builtin.debug: var: make_network_isolation_bridge_params - name: Run network_isolation_bridge retries: "{{ make_network_isolation_bridge_retries | default(omit) }}" delay: "{{ make_network_isolation_bridge_delay | default(omit) }}" until: "{{ make_network_isolation_bridge_until | default(true) }}" register: "make_network_isolation_bridge_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make network_isolation_bridge" dry_run: "{{ make_network_isolation_bridge_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_network_isolation_bridge_env|default({})), **(make_network_isolation_bridge_params|default({}))) }}" ././@LongLink0000644000000000000000000000020100000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_network_isolation_bridge_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_network_0000644000175000017500000000232215115610204033411 0ustar zuulzuul--- - name: Debug make_network_isolation_bridge_cleanup_env when: make_network_isolation_bridge_cleanup_env is defined ansible.builtin.debug: var: make_network_isolation_bridge_cleanup_env - name: Debug make_network_isolation_bridge_cleanup_params when: make_network_isolation_bridge_cleanup_params is defined ansible.builtin.debug: var: make_network_isolation_bridge_cleanup_params - name: Run network_isolation_bridge_cleanup retries: "{{ make_network_isolation_bridge_cleanup_retries | default(omit) }}" delay: "{{ make_network_isolation_bridge_cleanup_delay | default(omit) }}" until: "{{ make_network_isolation_bridge_cleanup_until | default(true) }}" register: "make_network_isolation_bridge_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make network_isolation_bridge_cleanup" dry_run: "{{ make_network_isolation_bridge_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_network_isolation_bridge_cleanup_env|default({})), **(make_network_isolation_bridge_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_baremetal_compute.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_bar0000644000175000017500000000207415115610204033336 0ustar zuulzuul--- - name: Debug make_edpm_baremetal_compute_env when: make_edpm_baremetal_compute_env is defined ansible.builtin.debug: var: make_edpm_baremetal_compute_env - name: Debug make_edpm_baremetal_compute_params when: make_edpm_baremetal_compute_params is defined ansible.builtin.debug: var: make_edpm_baremetal_compute_params - name: Run edpm_baremetal_compute retries: "{{ make_edpm_baremetal_compute_retries | default(omit) }}" delay: "{{ make_edpm_baremetal_compute_delay | default(omit) }}" until: "{{ make_edpm_baremetal_compute_until | default(true) }}" register: "make_edpm_baremetal_compute_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_baremetal_compute" dry_run: "{{ make_edpm_baremetal_compute_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_baremetal_compute_env|default({})), **(make_edpm_baremetal_compute_params|default({}))) }}" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_compute.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_com0000644000175000017500000000164615115610204033354 0ustar zuulzuul--- - name: Debug make_edpm_compute_env when: make_edpm_compute_env is defined ansible.builtin.debug: var: make_edpm_compute_env - name: Debug make_edpm_compute_params when: make_edpm_compute_params is defined ansible.builtin.debug: var: make_edpm_compute_params - name: Run edpm_compute retries: "{{ make_edpm_compute_retries | default(omit) }}" delay: "{{ make_edpm_compute_delay | default(omit) }}" until: "{{ make_edpm_compute_until | default(true) }}" register: "make_edpm_compute_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_compute" dry_run: "{{ make_edpm_compute_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_compute_env|default({})), **(make_edpm_compute_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_compute_bootc.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_com0000644000175000017500000000200015115610204033335 0ustar zuulzuul--- - name: Debug make_edpm_compute_bootc_env when: make_edpm_compute_bootc_env is defined ansible.builtin.debug: var: make_edpm_compute_bootc_env - name: Debug make_edpm_compute_bootc_params when: make_edpm_compute_bootc_params is defined ansible.builtin.debug: var: make_edpm_compute_bootc_params - name: Run edpm_compute_bootc retries: "{{ make_edpm_compute_bootc_retries | default(omit) }}" delay: "{{ make_edpm_compute_bootc_delay | default(omit) }}" until: "{{ make_edpm_compute_bootc_until | default(true) }}" register: "make_edpm_compute_bootc_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_compute_bootc" dry_run: "{{ make_edpm_compute_bootc_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_compute_bootc_env|default({})), **(make_edpm_compute_bootc_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_ansible_runner.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_ans0000644000175000017500000000201715115610204033350 0ustar zuulzuul--- - name: Debug make_edpm_ansible_runner_env when: make_edpm_ansible_runner_env is defined ansible.builtin.debug: var: make_edpm_ansible_runner_env - name: Debug make_edpm_ansible_runner_params when: make_edpm_ansible_runner_params is defined ansible.builtin.debug: var: make_edpm_ansible_runner_params - name: Run edpm_ansible_runner retries: "{{ make_edpm_ansible_runner_retries | default(omit) }}" delay: "{{ make_edpm_ansible_runner_delay | default(omit) }}" until: "{{ make_edpm_ansible_runner_until | default(true) }}" register: "make_edpm_ansible_runner_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_ansible_runner" dry_run: "{{ make_edpm_ansible_runner_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_ansible_runner_env|default({})), **(make_edpm_ansible_runner_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_computes_bgp.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_com0000644000175000017500000000176115115610204033352 0ustar zuulzuul--- - name: Debug make_edpm_computes_bgp_env when: make_edpm_computes_bgp_env is defined ansible.builtin.debug: var: make_edpm_computes_bgp_env - name: Debug make_edpm_computes_bgp_params when: make_edpm_computes_bgp_params is defined ansible.builtin.debug: var: make_edpm_computes_bgp_params - name: Run edpm_computes_bgp retries: "{{ make_edpm_computes_bgp_retries | default(omit) }}" delay: "{{ make_edpm_computes_bgp_delay | default(omit) }}" until: "{{ make_edpm_computes_bgp_until | default(true) }}" register: "make_edpm_computes_bgp_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_computes_bgp" dry_run: "{{ make_edpm_computes_bgp_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_computes_bgp_env|default({})), **(make_edpm_computes_bgp_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_compute_repos.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_com0000644000175000017500000000200015115610204033335 0ustar zuulzuul--- - name: Debug make_edpm_compute_repos_env when: make_edpm_compute_repos_env is defined ansible.builtin.debug: var: make_edpm_compute_repos_env - name: Debug make_edpm_compute_repos_params when: make_edpm_compute_repos_params is defined ansible.builtin.debug: var: make_edpm_compute_repos_params - name: Run edpm_compute_repos retries: "{{ make_edpm_compute_repos_retries | default(omit) }}" delay: "{{ make_edpm_compute_repos_delay | default(omit) }}" until: "{{ make_edpm_compute_repos_until | default(true) }}" register: "make_edpm_compute_repos_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_compute_repos" dry_run: "{{ make_edpm_compute_repos_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_compute_repos_env|default({})), **(make_edpm_compute_repos_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_compute_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_com0000644000175000017500000000203615115610204033346 0ustar zuulzuul--- - name: Debug make_edpm_compute_cleanup_env when: make_edpm_compute_cleanup_env is defined ansible.builtin.debug: var: make_edpm_compute_cleanup_env - name: Debug make_edpm_compute_cleanup_params when: make_edpm_compute_cleanup_params is defined ansible.builtin.debug: var: make_edpm_compute_cleanup_params - name: Run edpm_compute_cleanup retries: "{{ make_edpm_compute_cleanup_retries | default(omit) }}" delay: "{{ make_edpm_compute_cleanup_delay | default(omit) }}" until: "{{ make_edpm_compute_cleanup_until | default(true) }}" register: "make_edpm_compute_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_compute_cleanup" dry_run: "{{ make_edpm_compute_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_compute_cleanup_env|default({})), **(make_edpm_compute_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_networker.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_net0000644000175000017500000000170415115610204033357 0ustar zuulzuul--- - name: Debug make_edpm_networker_env when: make_edpm_networker_env is defined ansible.builtin.debug: var: make_edpm_networker_env - name: Debug make_edpm_networker_params when: make_edpm_networker_params is defined ansible.builtin.debug: var: make_edpm_networker_params - name: Run edpm_networker retries: "{{ make_edpm_networker_retries | default(omit) }}" delay: "{{ make_edpm_networker_delay | default(omit) }}" until: "{{ make_edpm_networker_until | default(true) }}" register: "make_edpm_networker_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_networker" dry_run: "{{ make_edpm_networker_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_networker_env|default({})), **(make_edpm_networker_params|default({}))) }}" ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_networker_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_net0000644000175000017500000000207415115610204033360 0ustar zuulzuul--- - name: Debug make_edpm_networker_cleanup_env when: make_edpm_networker_cleanup_env is defined ansible.builtin.debug: var: make_edpm_networker_cleanup_env - name: Debug make_edpm_networker_cleanup_params when: make_edpm_networker_cleanup_params is defined ansible.builtin.debug: var: make_edpm_networker_cleanup_params - name: Run edpm_networker_cleanup retries: "{{ make_edpm_networker_cleanup_retries | default(omit) }}" delay: "{{ make_edpm_networker_cleanup_delay | default(omit) }}" until: "{{ make_edpm_networker_cleanup_until | default(true) }}" register: "make_edpm_networker_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_networker_cleanup" dry_run: "{{ make_edpm_networker_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_networker_cleanup_env|default({})), **(make_edpm_networker_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_deploy_instance.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_edpm_dep0000644000175000017500000000203615115610204033340 0ustar zuulzuul--- - name: Debug make_edpm_deploy_instance_env when: make_edpm_deploy_instance_env is defined ansible.builtin.debug: var: make_edpm_deploy_instance_env - name: Debug make_edpm_deploy_instance_params when: make_edpm_deploy_instance_params is defined ansible.builtin.debug: var: make_edpm_deploy_instance_params - name: Run edpm_deploy_instance retries: "{{ make_edpm_deploy_instance_retries | default(omit) }}" delay: "{{ make_edpm_deploy_instance_delay | default(omit) }}" until: "{{ make_edpm_deploy_instance_until | default(true) }}" register: "make_edpm_deploy_instance_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make edpm_deploy_instance" dry_run: "{{ make_edpm_deploy_instance_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_edpm_deploy_instance_env|default({})), **(make_edpm_deploy_instance_params|default({}))) }}" ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_tripleo_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_tripleo_0000644000175000017500000000170415115610204033401 0ustar zuulzuul--- - name: Debug make_tripleo_deploy_env when: make_tripleo_deploy_env is defined ansible.builtin.debug: var: make_tripleo_deploy_env - name: Debug make_tripleo_deploy_params when: make_tripleo_deploy_params is defined ansible.builtin.debug: var: make_tripleo_deploy_params - name: Run tripleo_deploy retries: "{{ make_tripleo_deploy_retries | default(omit) }}" delay: "{{ make_tripleo_deploy_delay | default(omit) }}" until: "{{ make_tripleo_deploy_until | default(true) }}" register: "make_tripleo_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make tripleo_deploy" dry_run: "{{ make_tripleo_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_tripleo_deploy_env|default({})), **(make_tripleo_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalone_deploy.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalo0000644000175000017500000000176115115610204033374 0ustar zuulzuul--- - name: Debug make_standalone_deploy_env when: make_standalone_deploy_env is defined ansible.builtin.debug: var: make_standalone_deploy_env - name: Debug make_standalone_deploy_params when: make_standalone_deploy_params is defined ansible.builtin.debug: var: make_standalone_deploy_params - name: Run standalone_deploy retries: "{{ make_standalone_deploy_retries | default(omit) }}" delay: "{{ make_standalone_deploy_delay | default(omit) }}" until: "{{ make_standalone_deploy_until | default(true) }}" register: "make_standalone_deploy_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make standalone_deploy" dry_run: "{{ make_standalone_deploy_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_standalone_deploy_env|default({})), **(make_standalone_deploy_params|default({}))) }}" ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalone_sync.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalo0000644000175000017500000000172315115610204033372 0ustar zuulzuul--- - name: Debug make_standalone_sync_env when: make_standalone_sync_env is defined ansible.builtin.debug: var: make_standalone_sync_env - name: Debug make_standalone_sync_params when: make_standalone_sync_params is defined ansible.builtin.debug: var: make_standalone_sync_params - name: Run standalone_sync retries: "{{ make_standalone_sync_retries | default(omit) }}" delay: "{{ make_standalone_sync_delay | default(omit) }}" until: "{{ make_standalone_sync_until | default(true) }}" register: "make_standalone_sync_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make standalone_sync" dry_run: "{{ make_standalone_sync_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_standalone_sync_env|default({})), **(make_standalone_sync_params|default({}))) }}" ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalone.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalo0000644000175000017500000000161015115610204033365 0ustar zuulzuul--- - name: Debug make_standalone_env when: make_standalone_env is defined ansible.builtin.debug: var: make_standalone_env - name: Debug make_standalone_params when: make_standalone_params is defined ansible.builtin.debug: var: make_standalone_params - name: Run standalone retries: "{{ make_standalone_retries | default(omit) }}" delay: "{{ make_standalone_delay | default(omit) }}" until: "{{ make_standalone_until | default(true) }}" register: "make_standalone_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make standalone" dry_run: "{{ make_standalone_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_standalone_env|default({})), **(make_standalone_params|default({}))) }}" ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalone_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalo0000644000175000017500000000200015115610204033357 0ustar zuulzuul--- - name: Debug make_standalone_cleanup_env when: make_standalone_cleanup_env is defined ansible.builtin.debug: var: make_standalone_cleanup_env - name: Debug make_standalone_cleanup_params when: make_standalone_cleanup_params is defined ansible.builtin.debug: var: make_standalone_cleanup_params - name: Run standalone_cleanup retries: "{{ make_standalone_cleanup_retries | default(omit) }}" delay: "{{ make_standalone_cleanup_delay | default(omit) }}" until: "{{ make_standalone_cleanup_until | default(true) }}" register: "make_standalone_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make standalone_cleanup" dry_run: "{{ make_standalone_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_standalone_cleanup_env|default({})), **(make_standalone_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalone_snapshot.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalo0000644000175000017500000000201715115610204033367 0ustar zuulzuul--- - name: Debug make_standalone_snapshot_env when: make_standalone_snapshot_env is defined ansible.builtin.debug: var: make_standalone_snapshot_env - name: Debug make_standalone_snapshot_params when: make_standalone_snapshot_params is defined ansible.builtin.debug: var: make_standalone_snapshot_params - name: Run standalone_snapshot retries: "{{ make_standalone_snapshot_retries | default(omit) }}" delay: "{{ make_standalone_snapshot_delay | default(omit) }}" until: "{{ make_standalone_snapshot_until | default(true) }}" register: "make_standalone_snapshot_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make standalone_snapshot" dry_run: "{{ make_standalone_snapshot_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_standalone_snapshot_env|default({})), **(make_standalone_snapshot_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalone_revert.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_standalo0000644000175000017500000000176115115610204033374 0ustar zuulzuul--- - name: Debug make_standalone_revert_env when: make_standalone_revert_env is defined ansible.builtin.debug: var: make_standalone_revert_env - name: Debug make_standalone_revert_params when: make_standalone_revert_params is defined ansible.builtin.debug: var: make_standalone_revert_params - name: Run standalone_revert retries: "{{ make_standalone_revert_retries | default(omit) }}" delay: "{{ make_standalone_revert_delay | default(omit) }}" until: "{{ make_standalone_revert_until | default(true) }}" register: "make_standalone_revert_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make standalone_revert" dry_run: "{{ make_standalone_revert_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_standalone_revert_env|default({})), **(make_standalone_revert_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cifmw_prepare.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cifmw_pr0000644000175000017500000000166515115610204033400 0ustar zuulzuul--- - name: Debug make_cifmw_prepare_env when: make_cifmw_prepare_env is defined ansible.builtin.debug: var: make_cifmw_prepare_env - name: Debug make_cifmw_prepare_params when: make_cifmw_prepare_params is defined ansible.builtin.debug: var: make_cifmw_prepare_params - name: Run cifmw_prepare retries: "{{ make_cifmw_prepare_retries | default(omit) }}" delay: "{{ make_cifmw_prepare_delay | default(omit) }}" until: "{{ make_cifmw_prepare_until | default(true) }}" register: "make_cifmw_prepare_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make cifmw_prepare" dry_run: "{{ make_cifmw_prepare_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cifmw_prepare_env|default({})), **(make_cifmw_prepare_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cifmw_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_cifmw_cl0000644000175000017500000000166515115610204033355 0ustar zuulzuul--- - name: Debug make_cifmw_cleanup_env when: make_cifmw_cleanup_env is defined ansible.builtin.debug: var: make_cifmw_cleanup_env - name: Debug make_cifmw_cleanup_params when: make_cifmw_cleanup_params is defined ansible.builtin.debug: var: make_cifmw_cleanup_params - name: Run cifmw_cleanup retries: "{{ make_cifmw_cleanup_retries | default(omit) }}" delay: "{{ make_cifmw_cleanup_delay | default(omit) }}" until: "{{ make_cifmw_cleanup_until | default(true) }}" register: "make_cifmw_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make cifmw_cleanup" dry_run: "{{ make_cifmw_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_cifmw_cleanup_env|default({})), **(make_cifmw_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_network.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ne0000644000175000017500000000166515115610204033337 0ustar zuulzuul--- - name: Debug make_bmaas_network_env when: make_bmaas_network_env is defined ansible.builtin.debug: var: make_bmaas_network_env - name: Debug make_bmaas_network_params when: make_bmaas_network_params is defined ansible.builtin.debug: var: make_bmaas_network_params - name: Run bmaas_network retries: "{{ make_bmaas_network_retries | default(omit) }}" delay: "{{ make_bmaas_network_delay | default(omit) }}" until: "{{ make_bmaas_network_until | default(true) }}" register: "make_bmaas_network_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_network" dry_run: "{{ make_bmaas_network_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_network_env|default({})), **(make_bmaas_network_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_network_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ne0000644000175000017500000000205515115610204033331 0ustar zuulzuul--- - name: Debug make_bmaas_network_cleanup_env when: make_bmaas_network_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_network_cleanup_env - name: Debug make_bmaas_network_cleanup_params when: make_bmaas_network_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_network_cleanup_params - name: Run bmaas_network_cleanup retries: "{{ make_bmaas_network_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_network_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_network_cleanup_until | default(true) }}" register: "make_bmaas_network_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_network_cleanup" dry_run: "{{ make_bmaas_network_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_network_cleanup_env|default({})), **(make_bmaas_network_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000020700000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_route_crc_and_crc_bmaas_networks.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ro0000644000175000017500000000245415115610204033352 0ustar zuulzuul--- - name: Debug make_bmaas_route_crc_and_crc_bmaas_networks_env when: make_bmaas_route_crc_and_crc_bmaas_networks_env is defined ansible.builtin.debug: var: make_bmaas_route_crc_and_crc_bmaas_networks_env - name: Debug make_bmaas_route_crc_and_crc_bmaas_networks_params when: make_bmaas_route_crc_and_crc_bmaas_networks_params is defined ansible.builtin.debug: var: make_bmaas_route_crc_and_crc_bmaas_networks_params - name: Run bmaas_route_crc_and_crc_bmaas_networks retries: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_retries | default(omit) }}" delay: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_delay | default(omit) }}" until: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_until | default(true) }}" register: "make_bmaas_route_crc_and_crc_bmaas_networks_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_route_crc_and_crc_bmaas_networks" dry_run: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_route_crc_and_crc_bmaas_networks_env|default({})), **(make_bmaas_route_crc_and_crc_bmaas_networks_params|default({}))) }}" ././@LongLink0000644000000000000000000000021700000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_route_crc_and_crc_bmaas_networks_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ro0000644000175000017500000000264415115610204033353 0ustar zuulzuul--- - name: Debug make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_env when: make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_env - name: Debug make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_params when: make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_params - name: Run bmaas_route_crc_and_crc_bmaas_networks_cleanup retries: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_until | default(true) }}" register: "make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_route_crc_and_crc_bmaas_networks_cleanup" dry_run: "{{ make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_env|default({})), **(make_bmaas_route_crc_and_crc_bmaas_networks_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_metallb.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_me0000644000175000017500000000166515115610204033336 0ustar zuulzuul--- - name: Debug make_bmaas_metallb_env when: make_bmaas_metallb_env is defined ansible.builtin.debug: var: make_bmaas_metallb_env - name: Debug make_bmaas_metallb_params when: make_bmaas_metallb_params is defined ansible.builtin.debug: var: make_bmaas_metallb_params - name: Run bmaas_metallb retries: "{{ make_bmaas_metallb_retries | default(omit) }}" delay: "{{ make_bmaas_metallb_delay | default(omit) }}" until: "{{ make_bmaas_metallb_until | default(true) }}" register: "make_bmaas_metallb_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_metallb" dry_run: "{{ make_bmaas_metallb_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_metallb_env|default({})), **(make_bmaas_metallb_params|default({}))) }}" ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_crc_attach_network.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_cr0000644000175000017500000000213215115610204033327 0ustar zuulzuul--- - name: Debug make_bmaas_crc_attach_network_env when: make_bmaas_crc_attach_network_env is defined ansible.builtin.debug: var: make_bmaas_crc_attach_network_env - name: Debug make_bmaas_crc_attach_network_params when: make_bmaas_crc_attach_network_params is defined ansible.builtin.debug: var: make_bmaas_crc_attach_network_params - name: Run bmaas_crc_attach_network retries: "{{ make_bmaas_crc_attach_network_retries | default(omit) }}" delay: "{{ make_bmaas_crc_attach_network_delay | default(omit) }}" until: "{{ make_bmaas_crc_attach_network_until | default(true) }}" register: "make_bmaas_crc_attach_network_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_crc_attach_network" dry_run: "{{ make_bmaas_crc_attach_network_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_crc_attach_network_env|default({})), **(make_bmaas_crc_attach_network_params|default({}))) }}" ././@LongLink0000644000000000000000000000020100000000000011574 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_crc_attach_network_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_cr0000644000175000017500000000232215115610204033330 0ustar zuulzuul--- - name: Debug make_bmaas_crc_attach_network_cleanup_env when: make_bmaas_crc_attach_network_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_crc_attach_network_cleanup_env - name: Debug make_bmaas_crc_attach_network_cleanup_params when: make_bmaas_crc_attach_network_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_crc_attach_network_cleanup_params - name: Run bmaas_crc_attach_network_cleanup retries: "{{ make_bmaas_crc_attach_network_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_crc_attach_network_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_crc_attach_network_cleanup_until | default(true) }}" register: "make_bmaas_crc_attach_network_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_crc_attach_network_cleanup" dry_run: "{{ make_bmaas_crc_attach_network_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_crc_attach_network_cleanup_env|default({})), **(make_bmaas_crc_attach_network_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_crc_baremetal_bridge.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_cr0000644000175000017500000000217015115610204033331 0ustar zuulzuul--- - name: Debug make_bmaas_crc_baremetal_bridge_env when: make_bmaas_crc_baremetal_bridge_env is defined ansible.builtin.debug: var: make_bmaas_crc_baremetal_bridge_env - name: Debug make_bmaas_crc_baremetal_bridge_params when: make_bmaas_crc_baremetal_bridge_params is defined ansible.builtin.debug: var: make_bmaas_crc_baremetal_bridge_params - name: Run bmaas_crc_baremetal_bridge retries: "{{ make_bmaas_crc_baremetal_bridge_retries | default(omit) }}" delay: "{{ make_bmaas_crc_baremetal_bridge_delay | default(omit) }}" until: "{{ make_bmaas_crc_baremetal_bridge_until | default(true) }}" register: "make_bmaas_crc_baremetal_bridge_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_crc_baremetal_bridge" dry_run: "{{ make_bmaas_crc_baremetal_bridge_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_crc_baremetal_bridge_env|default({})), **(make_bmaas_crc_baremetal_bridge_params|default({}))) }}" ././@LongLink0000644000000000000000000000020300000000000011576 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_crc_baremetal_bridge_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_cr0000644000175000017500000000236015115610204033332 0ustar zuulzuul--- - name: Debug make_bmaas_crc_baremetal_bridge_cleanup_env when: make_bmaas_crc_baremetal_bridge_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_crc_baremetal_bridge_cleanup_env - name: Debug make_bmaas_crc_baremetal_bridge_cleanup_params when: make_bmaas_crc_baremetal_bridge_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_crc_baremetal_bridge_cleanup_params - name: Run bmaas_crc_baremetal_bridge_cleanup retries: "{{ make_bmaas_crc_baremetal_bridge_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_crc_baremetal_bridge_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_crc_baremetal_bridge_cleanup_until | default(true) }}" register: "make_bmaas_crc_baremetal_bridge_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_crc_baremetal_bridge_cleanup" dry_run: "{{ make_bmaas_crc_baremetal_bridge_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_crc_baremetal_bridge_cleanup_env|default({})), **(make_bmaas_crc_baremetal_bridge_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_baremetal_net_nad.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ba0000644000175000017500000000211315115610204033304 0ustar zuulzuul--- - name: Debug make_bmaas_baremetal_net_nad_env when: make_bmaas_baremetal_net_nad_env is defined ansible.builtin.debug: var: make_bmaas_baremetal_net_nad_env - name: Debug make_bmaas_baremetal_net_nad_params when: make_bmaas_baremetal_net_nad_params is defined ansible.builtin.debug: var: make_bmaas_baremetal_net_nad_params - name: Run bmaas_baremetal_net_nad retries: "{{ make_bmaas_baremetal_net_nad_retries | default(omit) }}" delay: "{{ make_bmaas_baremetal_net_nad_delay | default(omit) }}" until: "{{ make_bmaas_baremetal_net_nad_until | default(true) }}" register: "make_bmaas_baremetal_net_nad_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_baremetal_net_nad" dry_run: "{{ make_bmaas_baremetal_net_nad_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_baremetal_net_nad_env|default({})), **(make_bmaas_baremetal_net_nad_params|default({}))) }}" ././@LongLink0000644000000000000000000000020000000000000011573 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_baremetal_net_nad_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ba0000644000175000017500000000230315115610204033305 0ustar zuulzuul--- - name: Debug make_bmaas_baremetal_net_nad_cleanup_env when: make_bmaas_baremetal_net_nad_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_baremetal_net_nad_cleanup_env - name: Debug make_bmaas_baremetal_net_nad_cleanup_params when: make_bmaas_baremetal_net_nad_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_baremetal_net_nad_cleanup_params - name: Run bmaas_baremetal_net_nad_cleanup retries: "{{ make_bmaas_baremetal_net_nad_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_baremetal_net_nad_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_baremetal_net_nad_cleanup_until | default(true) }}" register: "make_bmaas_baremetal_net_nad_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_baremetal_net_nad_cleanup" dry_run: "{{ make_bmaas_baremetal_net_nad_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_baremetal_net_nad_cleanup_env|default({})), **(make_bmaas_baremetal_net_nad_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_metallb_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_me0000644000175000017500000000205515115610204033330 0ustar zuulzuul--- - name: Debug make_bmaas_metallb_cleanup_env when: make_bmaas_metallb_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_metallb_cleanup_env - name: Debug make_bmaas_metallb_cleanup_params when: make_bmaas_metallb_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_metallb_cleanup_params - name: Run bmaas_metallb_cleanup retries: "{{ make_bmaas_metallb_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_metallb_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_metallb_cleanup_until | default(true) }}" register: "make_bmaas_metallb_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_metallb_cleanup" dry_run: "{{ make_bmaas_metallb_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_metallb_cleanup_env|default({})), **(make_bmaas_metallb_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_virtual_bms.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_vi0000644000175000017500000000176115115610204033350 0ustar zuulzuul--- - name: Debug make_bmaas_virtual_bms_env when: make_bmaas_virtual_bms_env is defined ansible.builtin.debug: var: make_bmaas_virtual_bms_env - name: Debug make_bmaas_virtual_bms_params when: make_bmaas_virtual_bms_params is defined ansible.builtin.debug: var: make_bmaas_virtual_bms_params - name: Run bmaas_virtual_bms retries: "{{ make_bmaas_virtual_bms_retries | default(omit) }}" delay: "{{ make_bmaas_virtual_bms_delay | default(omit) }}" until: "{{ make_bmaas_virtual_bms_until | default(true) }}" register: "make_bmaas_virtual_bms_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_virtual_bms" dry_run: "{{ make_bmaas_virtual_bms_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_virtual_bms_env|default({})), **(make_bmaas_virtual_bms_params|default({}))) }}" ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_virtual_bms_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_vi0000644000175000017500000000215115115610204033342 0ustar zuulzuul--- - name: Debug make_bmaas_virtual_bms_cleanup_env when: make_bmaas_virtual_bms_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_virtual_bms_cleanup_env - name: Debug make_bmaas_virtual_bms_cleanup_params when: make_bmaas_virtual_bms_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_virtual_bms_cleanup_params - name: Run bmaas_virtual_bms_cleanup retries: "{{ make_bmaas_virtual_bms_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_virtual_bms_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_virtual_bms_cleanup_until | default(true) }}" register: "make_bmaas_virtual_bms_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_virtual_bms_cleanup" dry_run: "{{ make_bmaas_virtual_bms_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_virtual_bms_cleanup_env|default({})), **(make_bmaas_virtual_bms_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_sushy_emulator.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_su0000644000175000017500000000203615115610204033355 0ustar zuulzuul--- - name: Debug make_bmaas_sushy_emulator_env when: make_bmaas_sushy_emulator_env is defined ansible.builtin.debug: var: make_bmaas_sushy_emulator_env - name: Debug make_bmaas_sushy_emulator_params when: make_bmaas_sushy_emulator_params is defined ansible.builtin.debug: var: make_bmaas_sushy_emulator_params - name: Run bmaas_sushy_emulator retries: "{{ make_bmaas_sushy_emulator_retries | default(omit) }}" delay: "{{ make_bmaas_sushy_emulator_delay | default(omit) }}" until: "{{ make_bmaas_sushy_emulator_until | default(true) }}" register: "make_bmaas_sushy_emulator_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_sushy_emulator" dry_run: "{{ make_bmaas_sushy_emulator_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_sushy_emulator_env|default({})), **(make_bmaas_sushy_emulator_params|default({}))) }}" ././@LongLink0000644000000000000000000000017500000000000011606 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_sushy_emulator_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_su0000644000175000017500000000222615115610204033356 0ustar zuulzuul--- - name: Debug make_bmaas_sushy_emulator_cleanup_env when: make_bmaas_sushy_emulator_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_sushy_emulator_cleanup_env - name: Debug make_bmaas_sushy_emulator_cleanup_params when: make_bmaas_sushy_emulator_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_sushy_emulator_cleanup_params - name: Run bmaas_sushy_emulator_cleanup retries: "{{ make_bmaas_sushy_emulator_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_sushy_emulator_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_sushy_emulator_cleanup_until | default(true) }}" register: "make_bmaas_sushy_emulator_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_sushy_emulator_cleanup" dry_run: "{{ make_bmaas_sushy_emulator_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_sushy_emulator_cleanup_env|default({})), **(make_bmaas_sushy_emulator_cleanup_params|default({}))) }}" ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_sushy_emulator_wait.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_su0000644000175000017500000000215115115610204033353 0ustar zuulzuul--- - name: Debug make_bmaas_sushy_emulator_wait_env when: make_bmaas_sushy_emulator_wait_env is defined ansible.builtin.debug: var: make_bmaas_sushy_emulator_wait_env - name: Debug make_bmaas_sushy_emulator_wait_params when: make_bmaas_sushy_emulator_wait_params is defined ansible.builtin.debug: var: make_bmaas_sushy_emulator_wait_params - name: Run bmaas_sushy_emulator_wait retries: "{{ make_bmaas_sushy_emulator_wait_retries | default(omit) }}" delay: "{{ make_bmaas_sushy_emulator_wait_delay | default(omit) }}" until: "{{ make_bmaas_sushy_emulator_wait_until | default(true) }}" register: "make_bmaas_sushy_emulator_wait_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_sushy_emulator_wait" dry_run: "{{ make_bmaas_sushy_emulator_wait_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_sushy_emulator_wait_env|default({})), **(make_bmaas_sushy_emulator_wait_params|default({}))) }}" ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_generate_nodes_yaml.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_ge0000644000175000017500000000215115115610204033317 0ustar zuulzuul--- - name: Debug make_bmaas_generate_nodes_yaml_env when: make_bmaas_generate_nodes_yaml_env is defined ansible.builtin.debug: var: make_bmaas_generate_nodes_yaml_env - name: Debug make_bmaas_generate_nodes_yaml_params when: make_bmaas_generate_nodes_yaml_params is defined ansible.builtin.debug: var: make_bmaas_generate_nodes_yaml_params - name: Run bmaas_generate_nodes_yaml retries: "{{ make_bmaas_generate_nodes_yaml_retries | default(omit) }}" delay: "{{ make_bmaas_generate_nodes_yaml_delay | default(omit) }}" until: "{{ make_bmaas_generate_nodes_yaml_until | default(true) }}" register: "make_bmaas_generate_nodes_yaml_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_generate_nodes_yaml" dry_run: "{{ make_bmaas_generate_nodes_yaml_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_generate_nodes_yaml_env|default({})), **(make_bmaas_generate_nodes_yaml_params|default({}))) }}" ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas.ym0000644000175000017500000000147515115610204033300 0ustar zuulzuul--- - name: Debug make_bmaas_env when: make_bmaas_env is defined ansible.builtin.debug: var: make_bmaas_env - name: Debug make_bmaas_params when: make_bmaas_params is defined ansible.builtin.debug: var: make_bmaas_params - name: Run bmaas retries: "{{ make_bmaas_retries | default(omit) }}" delay: "{{ make_bmaas_delay | default(omit) }}" until: "{{ make_bmaas_until | default(true) }}" register: "make_bmaas_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas" dry_run: "{{ make_bmaas_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_env|default({})), **(make_bmaas_params|default({}))) }}" ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_cleanup.ymlhome/zuul/zuul-output/logs/ci-framework-data/artifacts/roles/install_yamls_makes/tasks/make_bmaas_cl0000644000175000017500000000166515115610204033333 0ustar zuulzuul--- - name: Debug make_bmaas_cleanup_env when: make_bmaas_cleanup_env is defined ansible.builtin.debug: var: make_bmaas_cleanup_env - name: Debug make_bmaas_cleanup_params when: make_bmaas_cleanup_params is defined ansible.builtin.debug: var: make_bmaas_cleanup_params - name: Run bmaas_cleanup retries: "{{ make_bmaas_cleanup_retries | default(omit) }}" delay: "{{ make_bmaas_cleanup_delay | default(omit) }}" until: "{{ make_bmaas_cleanup_until | default(true) }}" register: "make_bmaas_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/devsetup" script: "make bmaas_cleanup" dry_run: "{{ make_bmaas_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_bmaas_cleanup_env|default({})), **(make_bmaas_cleanup_params|default({}))) }}" home/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/0000755000175000017500000000000015115611523025072 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/delorean-antelope-testing.repo0000644000175000017500000000317215115610134033032 0ustar zuulzuul[delorean-antelope-testing] name=dlrn-antelope-testing baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/deps/latest/ enabled=1 gpgcheck=0 module_hotfixes=1 [delorean-antelope-build-deps] name=dlrn-antelope-build-deps baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/build-deps/latest/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-rabbitmq] name=centos9-rabbitmq baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/messaging/$basearch/rabbitmq-38/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-storage] name=centos9-storage baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/storage/$basearch/ceph-reef/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-opstools] name=centos9-opstools baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/opstools/$basearch/collectd-5/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-nfv-ovs] name=NFV SIG OpenvSwitch baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/nfv/$basearch/openvswitch-2/ gpgcheck=0 enabled=1 module_hotfixes=1 # epel is required for Ceph Reef [epel-low-priority] name=Extra Packages for Enterprise Linux $releasever - $basearch metalink=https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch&infra=$infra&content=$contentdir enabled=1 gpgcheck=0 countme=1 priority=100 includepkgs=libarrow*,parquet*,python3-asyncssh,re2,python3-grpcio,grpc*,abseil*,thrift*,blake3 home/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/delorean.repo0000644000175000017500000001341515115610134027553 0ustar zuulzuul[delorean-component-barbican] name=delorean-openstack-barbican-42b4c41831408a8e323fec3c8983b5c793b64874 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/barbican/42/b4/42b4c41831408a8e323fec3c8983b5c793b64874_08052e9d enabled=1 gpgcheck=0 priority=1 [delorean-component-baremetal] name=delorean-python-glean-10df0bd91b9bc5c9fd9cc02d75c0084cd4da29a7 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/baremetal/10/df/10df0bd91b9bc5c9fd9cc02d75c0084cd4da29a7_36137eb3 enabled=1 gpgcheck=0 priority=1 [delorean-component-cinder] name=delorean-openstack-cinder-1c00d6490d88e436f26efb71f2ac96e75252e97c baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/cinder/1c/00/1c00d6490d88e436f26efb71f2ac96e75252e97c_f716f000 enabled=1 gpgcheck=0 priority=1 [delorean-component-clients] name=delorean-python-stevedore-c4acc5639fd2329372142e39464fcca0209b0018 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/clients/c4/ac/c4acc5639fd2329372142e39464fcca0209b0018_d3ef8337 enabled=1 gpgcheck=0 priority=1 [delorean-component-cloudops] name=delorean-python-cloudkitty-tests-tempest-2c80f80e02c5accd099187ea762c8f8389bd7905 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/cloudops/2c/80/2c80f80e02c5accd099187ea762c8f8389bd7905_33e4dd93 enabled=1 gpgcheck=0 priority=1 [delorean-component-common] name=delorean-os-refresh-config-9bfc52b5049be2d8de6134d662fdde9dfa48960f baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/common/9b/fc/9bfc52b5049be2d8de6134d662fdde9dfa48960f_b85780e6 enabled=1 gpgcheck=0 priority=1 [delorean-component-compute] name=delorean-openstack-nova-6f8decf0b4f1aa2e96292b6a2ffc28249fe4af5e baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/compute/6f/8d/6f8decf0b4f1aa2e96292b6a2ffc28249fe4af5e_dc05b899 enabled=1 gpgcheck=0 priority=1 [delorean-component-designate] name=delorean-python-designate-tests-tempest-347fdbc9b4595a10b726526b3c0b5928e5b7fcf2 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/designate/34/7f/347fdbc9b4595a10b726526b3c0b5928e5b7fcf2_3fd39337 enabled=1 gpgcheck=0 priority=1 [delorean-component-glance] name=delorean-openstack-glance-1fd12c29b339f30fe823e2b5beba14b5f241e52a baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/glance/1f/d1/1fd12c29b339f30fe823e2b5beba14b5f241e52a_0d693729 enabled=1 gpgcheck=0 priority=1 [delorean-component-keystone] name=delorean-openstack-keystone-e4b40af0ae3698fbbbbfb8c22468b33aae80e6d7 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/keystone/e4/b4/e4b40af0ae3698fbbbbfb8c22468b33aae80e6d7_264c03cc enabled=1 gpgcheck=0 priority=1 [delorean-component-manila] name=delorean-openstack-manila-3c01b7181572c95dac462eb19c3121e36cb0fe95 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/manila/3c/01/3c01b7181572c95dac462eb19c3121e36cb0fe95_912dfd18 enabled=1 gpgcheck=0 priority=1 [delorean-component-network] name=delorean-python-whitebox-neutron-tests-tempest-12cf06ce36a79a584fc757f4c25ff96845573c93 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/network/12/cf/12cf06ce36a79a584fc757f4c25ff96845573c93_3ed3aba3 enabled=1 gpgcheck=0 priority=1 [delorean-component-octavia] name=delorean-openstack-octavia-ba397f07a7331190208c93368ee23826ac4e2707 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/octavia/ba/39/ba397f07a7331190208c93368ee23826ac4e2707_9d6e596a enabled=1 gpgcheck=0 priority=1 [delorean-component-optimize] name=delorean-openstack-watcher-c014f81a8647287f6dcc339321c1256f5a2e82d5 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/optimize/c0/14/c014f81a8647287f6dcc339321c1256f5a2e82d5_bcbfdccc enabled=1 gpgcheck=0 priority=1 [delorean-component-podified] name=delorean-ansible-config_template-5ccaa22121a7ff05620975540d81f6efb077d8db baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/podified/5c/ca/5ccaa22121a7ff05620975540d81f6efb077d8db_83eb7cc2 enabled=1 gpgcheck=0 priority=1 [delorean-component-puppet] name=delorean-puppet-ceph-7352068d7b8c84ded636ab3158dafa6f3851951e baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/puppet/73/52/7352068d7b8c84ded636ab3158dafa6f3851951e_7cde1ad1 enabled=1 gpgcheck=0 priority=1 [delorean-component-swift] name=delorean-openstack-swift-dc98a8463506ac520c469adb0ef47d0f7753905a baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/swift/dc/98/dc98a8463506ac520c469adb0ef47d0f7753905a_9d02f069 enabled=1 gpgcheck=0 priority=1 [delorean-component-tempest] name=delorean-python-tempestconf-8515371b7cceebd4282e09f1d8f0cc842df82855 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/tempest/85/15/8515371b7cceebd4282e09f1d8f0cc842df82855_a1e336c7 enabled=1 gpgcheck=0 priority=1 [delorean-component-ui] name=delorean-openstack-heat-ui-013accbfd179753bc3f0d1f4e5bed07a4fd9f771 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/ui/01/3a/013accbfd179753bc3f0d1f4e5bed07a4fd9f771_0c88e467 enabled=1 gpgcheck=0 priority=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/repo-setup-centos-appstream.repo0000644000175000017500000000031615115610134033344 0ustar zuulzuul [repo-setup-centos-appstream] name=repo-setup-centos-appstream baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/AppStream/$basearch/os/ gpgcheck=0 enabled=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/repo-setup-centos-baseos.repo0000644000175000017500000000030415115610134032621 0ustar zuulzuul [repo-setup-centos-baseos] name=repo-setup-centos-baseos baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/BaseOS/$basearch/os/ gpgcheck=0 enabled=1 ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/repo-setup-centos-highavailability.repohome/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/repo-setup-centos-highavailabili0000644000175000017500000000034215115610134033340 0ustar zuulzuul [repo-setup-centos-highavailability] name=repo-setup-centos-highavailability baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/HighAvailability/$basearch/os/ gpgcheck=0 enabled=1 ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/repo-setup-centos-powertools.repohome/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/repo-setup-centos-powertools.rep0000644000175000017500000000031115115610134033401 0ustar zuulzuul [repo-setup-centos-powertools] name=repo-setup-centos-powertools baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/CRB/$basearch/os/ gpgcheck=0 enabled=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/repositories/delorean.repo.md50000644000175000017500000000004115115610133030225 0ustar zuulzuulc3923531bcda0b0811b2d5053f189beb home/zuul/zuul-output/logs/ci-framework-data/artifacts/ansible-facts.yml0000644000175000017500000004657615115611270025622 0ustar zuulzuul_ansible_facts_gathered: true all_ipv4_addresses: - 38.102.83.251 all_ipv6_addresses: - fe80::f816:3eff:fe97:c9c3 ansible_local: {} apparmor: status: disabled architecture: x86_64 bios_date: 04/01/2014 bios_vendor: SeaBIOS bios_version: 1.15.0-1 board_asset_tag: NA board_name: NA board_serial: NA board_vendor: NA board_version: NA chassis_asset_tag: NA chassis_serial: NA chassis_vendor: QEMU chassis_version: pc-i440fx-6.2 cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f crc_ci_bootstrap_instance_default_net_config: mtu: 1500 range: 192.168.122.0/24 crc_ci_bootstrap_instance_nm_vlan_networks: - key: internal-api value: ip: 172.17.0.5 - key: storage value: ip: 172.18.0.5 - key: tenant value: ip: 172.19.0.5 crc_ci_bootstrap_instance_parent_port_create_yaml: admin_state_up: true allowed_address_pairs: [] binding_host_id: null binding_profile: {} binding_vif_details: {} binding_vif_type: null binding_vnic_type: normal created_at: '2025-12-08T17:39:31Z' data_plane_status: null description: '' device_id: '' device_owner: '' device_profile: null dns_assignment: - fqdn: host-192-168-122-10.openstacklocal. hostname: host-192-168-122-10 ip_address: 192.168.122.10 dns_domain: '' dns_name: '' extra_dhcp_opts: [] fixed_ips: - ip_address: 192.168.122.10 subnet_id: 1ec71021-8196-48c3-b107-9041e6f5f679 hardware_offload_type: null hints: '' id: d37cddfa-716b-4541-992b-5180463c6809 ip_allocation: immediate mac_address: fa:16:3e:e6:79:2f name: crc-32c1a977-c4dc-4b4f-b307-ff2a2f4e57f1 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb numa_affinity_policy: null port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 propagate_uplink_status: null qos_network_policy_id: null qos_policy_id: null resource_request: null revision_number: 1 security_group_ids: [] status: DOWN tags: [] trunk_details: null trusted: null updated_at: '2025-12-08T17:39:31Z' crc_ci_bootstrap_network_name: zuul-ci-net-fdae5567 crc_ci_bootstrap_networks_out: controller: default: connection: ci-private-network gw: 192.168.122.1 iface: eth1 ip: 192.168.122.11/24 mac: fa:16:3e:6a:de:3b mtu: 1500 crc: default: connection: ci-private-network gw: 192.168.122.1 iface: ens7 ip: 192.168.122.10/24 mac: fa:16:3e:e6:79:2f mtu: 1500 internal-api: connection: ci-private-network-20 iface: ens7.20 ip: 172.17.0.5/24 mac: 52:54:00:84:35:42 mtu: '1496' parent_iface: ens7 vlan: 20 storage: connection: ci-private-network-21 iface: ens7.21 ip: 172.18.0.5/24 mac: 52:54:00:9c:32:12 mtu: '1496' parent_iface: ens7 vlan: 21 tenant: connection: ci-private-network-22 iface: ens7.22 ip: 172.19.0.5/24 mac: 52:54:00:ec:da:21 mtu: '1496' parent_iface: ens7 vlan: 22 crc_ci_bootstrap_private_net_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:38:55Z' description: '' dns_domain: '' id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb ipv4_address_scope: null ipv6_address_scope: null is_default: false is_vlan_qinq: null is_vlan_transparent: false l2_adjacency: true mtu: 1500 name: zuul-ci-net-fdae5567 port_security_enabled: false project_id: 4b633c451ac74233be3721a3635275e5 provider:network_type: null provider:physical_network: null provider:segmentation_id: null qos_policy_id: null revision_number: 1 router:external: false segments: null shared: false status: ACTIVE subnets: [] tags: [] updated_at: '2025-12-08T17:38:55Z' crc_ci_bootstrap_private_router_create_yaml: admin_state_up: true availability_zone_hints: - nova availability_zones: [] created_at: '2025-12-08T17:39:01Z' description: '' enable_ndp_proxy: null external_gateway_info: enable_snat: true external_fixed_ips: - ip_address: 38.102.83.145 subnet_id: 3169b11b-94b1-4bc9-9727-4fdbbe15e56e network_id: 7abff1a9-a103-46d0-979a-1f1e599f4f41 flavor_id: null id: edc0cd0e-14dd-4588-9e19-d36444fdd18f name: zuul-ci-subnet-router-fdae5567 project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 3 routes: [] status: ACTIVE tags: [] tenant_id: 4b633c451ac74233be3721a3635275e5 updated_at: '2025-12-08T17:39:02Z' crc_ci_bootstrap_private_subnet_create_yaml: allocation_pools: - end: 192.168.122.254 start: 192.168.122.2 cidr: 192.168.122.0/24 created_at: '2025-12-08T17:38:58Z' description: '' dns_nameservers: [] dns_publish_fixed_ip: null enable_dhcp: false gateway_ip: 192.168.122.1 host_routes: [] id: 1ec71021-8196-48c3-b107-9041e6f5f679 ip_version: 4 ipv6_address_mode: null ipv6_ra_mode: null name: zuul-ci-subnet-fdae5567 network_id: 95e8cd7e-bf63-489a-8e22-f62ac0e185fb project_id: 4b633c451ac74233be3721a3635275e5 revision_number: 0 segment_id: null service_types: [] subnetpool_id: null tags: [] updated_at: '2025-12-08T17:38:58Z' crc_ci_bootstrap_provider_dns: - 199.204.44.24 - 199.204.47.54 crc_ci_bootstrap_router_name: zuul-ci-subnet-router-fdae5567 crc_ci_bootstrap_subnet_name: zuul-ci-subnet-fdae5567 date_time: date: '2025-12-08' day: 08 epoch: '1765216950' epoch_int: '1765216950' hour: '18' iso8601: '2025-12-08T18:02:30Z' iso8601_basic: 20251208T180230661772 iso8601_basic_short: 20251208T180230 iso8601_micro: '2025-12-08T18:02:30.661772Z' minute: '02' month: '12' second: '30' time: '18:02:30' tz: UTC tz_dst: UTC tz_offset: '+0000' weekday: Monday weekday_number: '1' weeknumber: '49' year: '2025' default_ipv4: address: 38.102.83.251 alias: eth0 broadcast: 38.102.83.255 gateway: 38.102.83.1 interface: eth0 macaddress: fa:16:3e:97:c9:c3 mtu: 1500 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' type: ether default_ipv6: {} device_links: ids: sr0: - ata-QEMU_DVD-ROM_QM00001 labels: sr0: - config-2 masters: {} uuids: sr0: - 2025-12-08-17-34-40-00 vda1: - fcf6b761-831a-48a7-9f5f-068b5063763f devices: sr0: holders: [] host: '' links: ids: - ata-QEMU_DVD-ROM_QM00001 labels: - config-2 masters: [] uuids: - 2025-12-08-17-34-40-00 model: QEMU DVD-ROM partitions: {} removable: '1' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '964' sectorsize: '2048' size: 482.00 KB support_discard: '2048' vendor: QEMU virtual: 1 vda: holders: [] host: '' links: ids: [] labels: [] masters: [] uuids: [] model: null partitions: vda1: holders: [] links: ids: [] labels: [] masters: [] uuids: - fcf6b761-831a-48a7-9f5f-068b5063763f sectors: '167770079' sectorsize: 512 size: 80.00 GB start: '2048' uuid: fcf6b761-831a-48a7-9f5f-068b5063763f removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '167772160' sectorsize: '512' size: 80.00 GB support_discard: '512' vendor: '0x1af4' virtual: 1 discovered_interpreter_python: /usr/bin/python3 distribution: CentOS distribution_file_parsed: true distribution_file_path: /etc/centos-release distribution_file_variety: CentOS distribution_major_version: '9' distribution_release: Stream distribution_version: '9' dns: nameservers: - 192.168.122.10 - 199.204.44.24 - 199.204.47.54 domain: '' effective_group_id: 1000 effective_user_id: 1000 env: ANSIBLE_LOG_PATH: /home/zuul/ci-framework-data/logs/e2e-collect-logs-must-gather.log BASH_FUNC_which%%: "() { ( alias;\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\n}" DBUS_SESSION_BUS_ADDRESS: unix:path=/run/user/1000/bus DEBUGINFOD_IMA_CERT_PATH: '/etc/keys/ima:' DEBUGINFOD_URLS: 'https://debuginfod.centos.org/ ' HOME: /home/zuul KUBECONFIG: /home/zuul/.crc/machines/crc/kubeconfig LANG: en_US.UTF-8 LESSOPEN: '||/usr/bin/lesspipe.sh %s' LOGNAME: zuul MOTD_SHOWN: pam PATH: /home/zuul/.local/bin:/home/zuul/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin PWD: /home/zuul SELINUX_LEVEL_REQUESTED: '' SELINUX_ROLE_REQUESTED: '' SELINUX_USE_CURRENT_RANGE: '' SHELL: /bin/bash SHLVL: '1' SSH_CLIENT: 38.102.83.114 38794 22 SSH_CONNECTION: 38.102.83.114 38794 38.102.83.251 22 USER: zuul XDG_RUNTIME_DIR: /run/user/1000 XDG_SESSION_CLASS: user XDG_SESSION_ID: '17' XDG_SESSION_TYPE: tty _: /usr/bin/python3 which_declare: declare -f eth0: active: true device: eth0 features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: off [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: 'on' rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: on [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: 'on' tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: off [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: off [fixed] tx_gso_partial: off [fixed] tx_gso_robust: on [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: 'off' tx_scatter_gather: 'on' tx_scatter_gather_fraglist: off [fixed] tx_sctp_segmentation: off [fixed] tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'off' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: off [fixed] tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: off [fixed] hw_timestamp_filters: [] ipv4: address: 38.102.83.251 broadcast: 38.102.83.255 netmask: 255.255.255.0 network: 38.102.83.0 prefix: '24' ipv6: - address: fe80::f816:3eff:fe97:c9c3 prefix: '64' scope: link macaddress: fa:16:3e:97:c9:c3 module: virtio_net mtu: 1500 pciid: virtio1 promisc: false speed: -1 timestamping: [] type: ether fibre_channel_wwn: [] fips: false form_factor: Other fqdn: controller gather_subset: - min hostname: controller hostnqn: nqn.2014-08.org.nvmexpress:uuid:bf3e0a14-a5f8-4123-aa26-e7cad37b879a interfaces: - lo - eth0 is_chroot: false iscsi_iqn: '' kernel: 5.14.0-645.el9.x86_64 kernel_version: '#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025' lo: active: true device: lo features: esp_hw_offload: off [fixed] esp_tx_csum_hw_offload: off [fixed] generic_receive_offload: 'on' generic_segmentation_offload: 'on' highdma: on [fixed] hsr_dup_offload: off [fixed] hsr_fwd_offload: off [fixed] hsr_tag_ins_offload: off [fixed] hsr_tag_rm_offload: off [fixed] hw_tc_offload: off [fixed] l2_fwd_offload: off [fixed] large_receive_offload: off [fixed] loopback: on [fixed] macsec_hw_offload: off [fixed] ntuple_filters: off [fixed] receive_hashing: off [fixed] rx_all: off [fixed] rx_checksumming: on [fixed] rx_fcs: off [fixed] rx_gro_hw: off [fixed] rx_gro_list: 'off' rx_udp_gro_forwarding: 'off' rx_udp_tunnel_port_offload: off [fixed] rx_vlan_filter: off [fixed] rx_vlan_offload: off [fixed] rx_vlan_stag_filter: off [fixed] rx_vlan_stag_hw_parse: off [fixed] scatter_gather: 'on' tcp_segmentation_offload: 'on' tls_hw_record: off [fixed] tls_hw_rx_offload: off [fixed] tls_hw_tx_offload: off [fixed] tx_checksum_fcoe_crc: off [fixed] tx_checksum_ip_generic: on [fixed] tx_checksum_ipv4: off [fixed] tx_checksum_ipv6: off [fixed] tx_checksum_sctp: on [fixed] tx_checksumming: 'on' tx_esp_segmentation: off [fixed] tx_fcoe_segmentation: off [fixed] tx_gre_csum_segmentation: off [fixed] tx_gre_segmentation: off [fixed] tx_gso_list: 'on' tx_gso_partial: off [fixed] tx_gso_robust: off [fixed] tx_ipxip4_segmentation: off [fixed] tx_ipxip6_segmentation: off [fixed] tx_nocache_copy: off [fixed] tx_scatter_gather: on [fixed] tx_scatter_gather_fraglist: on [fixed] tx_sctp_segmentation: 'on' tx_tcp6_segmentation: 'on' tx_tcp_ecn_segmentation: 'on' tx_tcp_mangleid_segmentation: 'on' tx_tcp_segmentation: 'on' tx_tunnel_remcsum_segmentation: off [fixed] tx_udp_segmentation: 'on' tx_udp_tnl_csum_segmentation: off [fixed] tx_udp_tnl_segmentation: off [fixed] tx_vlan_offload: off [fixed] tx_vlan_stag_hw_insert: off [fixed] vlan_challenged: on [fixed] hw_timestamp_filters: [] ipv4: address: 127.0.0.1 broadcast: '' netmask: 255.0.0.0 network: 127.0.0.0 prefix: '8' ipv6: - address: ::1 prefix: '128' scope: host mtu: 65536 promisc: false timestamping: [] type: loopback loadavg: 15m: 0.3 1m: 0.55 5m: 0.68 locally_reachable_ips: ipv4: - 38.102.83.251 - 127.0.0.0/8 - 127.0.0.1 ipv6: - ::1 - fe80::f816:3eff:fe97:c9c3 lsb: {} lvm: N/A machine: x86_64 machine_id: 4d4ef2323cc3337bbfd9081b2a323b4e memfree_mb: 7161 memory_mb: nocache: free: 7372 used: 308 real: free: 7161 total: 7680 used: 519 swap: cached: 0 free: 0 total: 0 used: 0 memtotal_mb: 7680 module_setup: true mounts: - block_available: 20337100 block_size: 4096 block_total: 20954875 block_used: 617775 device: /dev/vda1 fstype: xfs inode_available: 41888405 inode_total: 41942512 inode_used: 54107 mount: / options: rw,seclabel,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota size_available: 83300761600 size_total: 85831168000 uuid: fcf6b761-831a-48a7-9f5f-068b5063763f nodename: controller os_family: RedHat pkg_mgr: dnf proc_cmdline: BOOT_IMAGE: (hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 console: ttyS0,115200n8 crashkernel: 1G-2G:192M,2G-64G:256M,64G-:512M net.ifnames: '0' no_timer_check: true ro: true root: UUID=fcf6b761-831a-48a7-9f5f-068b5063763f processor: - '0' - AuthenticAMD - AMD EPYC-Rome Processor - '1' - AuthenticAMD - AMD EPYC-Rome Processor - '2' - AuthenticAMD - AMD EPYC-Rome Processor - '3' - AuthenticAMD - AMD EPYC-Rome Processor - '4' - AuthenticAMD - AMD EPYC-Rome Processor - '5' - AuthenticAMD - AMD EPYC-Rome Processor - '6' - AuthenticAMD - AMD EPYC-Rome Processor - '7' - AuthenticAMD - AMD EPYC-Rome Processor processor_cores: 1 processor_count: 8 processor_nproc: 8 processor_threads_per_core: 1 processor_vcpus: 8 product_name: OpenStack Nova product_serial: NA product_uuid: NA product_version: 26.3.1 python: executable: /usr/bin/python3 has_sslcontext: true type: cpython version: major: 3 micro: 25 minor: 9 releaselevel: final serial: 0 version_info: - 3 - 9 - 25 - final - 0 python_version: 3.9.25 real_group_id: 1000 real_user_id: 1000 selinux: config_mode: enforcing mode: enforcing policyvers: 33 status: enabled type: targeted selinux_python_present: true service_mgr: systemd ssh_host_key_ecdsa_public: AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOvQreKGmvEG1vi8GvwFBqECdihQVE6tUBzDanz/Lcee9GvGa+tH+Ub+xqX7rB/yRnjc8CJIJovHO3uwatRboZQ= ssh_host_key_ecdsa_public_keytype: ecdsa-sha2-nistp256 ssh_host_key_ed25519_public: AAAAC3NzaC1lZDI1NTE5AAAAIPeGQ/QINrFqQK52g8hKIwxs8VQj2W/JGaf9zdH9cBm2 ssh_host_key_ed25519_public_keytype: ssh-ed25519 ssh_host_key_rsa_public: AAAAB3NzaC1yc2EAAAADAQABAAABgQCrMWhS0sfa8MFM6z46N9s5KdkDDfqBTBpmkqh+k0riEuOWyruqZ/IooEFKeQXaWr/u2j++Erw7byk1nZ2/1inxp5GHPD3tjMz1FjfMlsMM17kkMF8J45E52gQj2JzJS93rFYtLMkLQt6ydCYf8csUaQJz4YGv66NoK1WXUFkxSW12stZQyIjr7FHdmQ9o1VG6PeVlvovTjZdIDOrs2uyx3QLKn/3ZvZBR0nNCGXPAtVoyf4oV/JWSKdX0XOcgkV4QyD4B3CiLstDl04Q6XY8pkzc850JzuMo4L6IQoiI//65VAvU9EWiduDcC6Bb2UqYy5iwuJFLa6Qei0hCq5tk00PSx9JjT+rVhoTJveLD0GlQk2blm+bCOKdHDM87Eh/CiVxhUJhsbkp7ASUwcd1In/Ayr37VyWSHlbW7SDd9G5aQvRd7mOx6JYU5j+j8dmvku5+mmMisaik3SYrgImXY/Agd7BOsZD1BfRvPcqACsgYymCPzDxVVOGYD3Tt5poSUs= ssh_host_key_rsa_public_keytype: ssh-rsa swapfree_mb: 0 swaptotal_mb: 0 system: Linux system_capabilities: - '' system_capabilities_enforced: 'True' system_vendor: OpenStack Foundation uptime_seconds: 192 user_dir: /home/zuul user_gecos: '' user_gid: 1000 user_id: zuul user_shell: /bin/bash user_uid: 1000 userspace_architecture: x86_64 userspace_bits: '64' virtualization_role: guest virtualization_tech_guest: - openstack virtualization_tech_host: - kvm virtualization_type: openstack zuul_change_list: - service-telemetry-operator home/zuul/zuul-output/logs/ci-framework-data/artifacts/NetworkManager/0000755000175000017500000000000015115611523025267 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/NetworkManager/ens3.nmconnection0000644000175000017500000000026215115611271030553 0ustar zuulzuul[connection] id=ens3 uuid=cdd49431-3a3b-42fa-9cb4-78f29c7937bd type=ethernet interface-name=ens3 [ethernet] [ipv4] method=auto [ipv6] addr-gen-mode=eui64 method=auto [proxy] ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/NetworkManager/ci-private-network.nmconnectionhome/zuul/zuul-output/logs/ci-framework-data/artifacts/NetworkManager/ci-private-network.nmconnectio0000644000175000017500000000051315115611271033256 0ustar zuulzuul[connection] id=ci-private-network uuid=0d5ca6b6-04c7-5ecb-9f82-5436020a1168 type=ethernet autoconnect=true interface-name=eth1 [ethernet] mac-address=fa:16:3e:6a:de:3b mtu=1500 [ipv4] method=manual addresses=192.168.122.11/24 never-default=true gateway=192.168.122.1 [ipv6] addr-gen-mode=stable-privacy method=disabled [proxy] home/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/0000755000175000017500000000000015115611523024365 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/repo-setup-centos-baseos.repo0000644000175000017500000000030415115611271032117 0ustar zuulzuul [repo-setup-centos-baseos] name=repo-setup-centos-baseos baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/BaseOS/$basearch/os/ gpgcheck=0 enabled=1 ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootroothome/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/repo-setup-centos-highavailability.repohome/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/repo-setup-centos-highavailability.0000644000175000017500000000034215115611271033271 0ustar zuulzuul [repo-setup-centos-highavailability] name=repo-setup-centos-highavailability baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/HighAvailability/$basearch/os/ gpgcheck=0 enabled=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/repo-setup-centos-powertools.repo0000644000175000017500000000031115115611271033056 0ustar zuulzuul [repo-setup-centos-powertools] name=repo-setup-centos-powertools baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/CRB/$basearch/os/ gpgcheck=0 enabled=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/delorean.repo.md50000644000175000017500000000004115115611271027524 0ustar zuulzuulc3923531bcda0b0811b2d5053f189beb home/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/repo-setup-centos-appstream.repo0000644000175000017500000000031615115611271032642 0ustar zuulzuul [repo-setup-centos-appstream] name=repo-setup-centos-appstream baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/9-stream/AppStream/$basearch/os/ gpgcheck=0 enabled=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/delorean-antelope-testing.repo0000644000175000017500000000317215115611271032330 0ustar zuulzuul[delorean-antelope-testing] name=dlrn-antelope-testing baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/deps/latest/ enabled=1 gpgcheck=0 module_hotfixes=1 [delorean-antelope-build-deps] name=dlrn-antelope-build-deps baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/build-deps/latest/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-rabbitmq] name=centos9-rabbitmq baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/messaging/$basearch/rabbitmq-38/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-storage] name=centos9-storage baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/storage/$basearch/ceph-reef/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-opstools] name=centos9-opstools baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/opstools/$basearch/collectd-5/ enabled=1 gpgcheck=0 module_hotfixes=1 [centos9-nfv-ovs] name=NFV SIG OpenvSwitch baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org/centos-stream/SIGs/9-stream/nfv/$basearch/openvswitch-2/ gpgcheck=0 enabled=1 module_hotfixes=1 # epel is required for Ceph Reef [epel-low-priority] name=Extra Packages for Enterprise Linux $releasever - $basearch metalink=https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch&infra=$infra&content=$contentdir enabled=1 gpgcheck=0 countme=1 priority=100 includepkgs=libarrow*,parquet*,python3-asyncssh,re2,python3-grpcio,grpc*,abseil*,thrift*,blake3 home/zuul/zuul-output/logs/ci-framework-data/artifacts/yum_repos/delorean.repo0000644000175000017500000001341515115611271027051 0ustar zuulzuul[delorean-component-barbican] name=delorean-openstack-barbican-42b4c41831408a8e323fec3c8983b5c793b64874 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/barbican/42/b4/42b4c41831408a8e323fec3c8983b5c793b64874_08052e9d enabled=1 gpgcheck=0 priority=1 [delorean-component-baremetal] name=delorean-python-glean-10df0bd91b9bc5c9fd9cc02d75c0084cd4da29a7 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/baremetal/10/df/10df0bd91b9bc5c9fd9cc02d75c0084cd4da29a7_36137eb3 enabled=1 gpgcheck=0 priority=1 [delorean-component-cinder] name=delorean-openstack-cinder-1c00d6490d88e436f26efb71f2ac96e75252e97c baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/cinder/1c/00/1c00d6490d88e436f26efb71f2ac96e75252e97c_f716f000 enabled=1 gpgcheck=0 priority=1 [delorean-component-clients] name=delorean-python-stevedore-c4acc5639fd2329372142e39464fcca0209b0018 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/clients/c4/ac/c4acc5639fd2329372142e39464fcca0209b0018_d3ef8337 enabled=1 gpgcheck=0 priority=1 [delorean-component-cloudops] name=delorean-python-cloudkitty-tests-tempest-2c80f80e02c5accd099187ea762c8f8389bd7905 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/cloudops/2c/80/2c80f80e02c5accd099187ea762c8f8389bd7905_33e4dd93 enabled=1 gpgcheck=0 priority=1 [delorean-component-common] name=delorean-os-refresh-config-9bfc52b5049be2d8de6134d662fdde9dfa48960f baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/common/9b/fc/9bfc52b5049be2d8de6134d662fdde9dfa48960f_b85780e6 enabled=1 gpgcheck=0 priority=1 [delorean-component-compute] name=delorean-openstack-nova-6f8decf0b4f1aa2e96292b6a2ffc28249fe4af5e baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/compute/6f/8d/6f8decf0b4f1aa2e96292b6a2ffc28249fe4af5e_dc05b899 enabled=1 gpgcheck=0 priority=1 [delorean-component-designate] name=delorean-python-designate-tests-tempest-347fdbc9b4595a10b726526b3c0b5928e5b7fcf2 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/designate/34/7f/347fdbc9b4595a10b726526b3c0b5928e5b7fcf2_3fd39337 enabled=1 gpgcheck=0 priority=1 [delorean-component-glance] name=delorean-openstack-glance-1fd12c29b339f30fe823e2b5beba14b5f241e52a baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/glance/1f/d1/1fd12c29b339f30fe823e2b5beba14b5f241e52a_0d693729 enabled=1 gpgcheck=0 priority=1 [delorean-component-keystone] name=delorean-openstack-keystone-e4b40af0ae3698fbbbbfb8c22468b33aae80e6d7 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/keystone/e4/b4/e4b40af0ae3698fbbbbfb8c22468b33aae80e6d7_264c03cc enabled=1 gpgcheck=0 priority=1 [delorean-component-manila] name=delorean-openstack-manila-3c01b7181572c95dac462eb19c3121e36cb0fe95 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/manila/3c/01/3c01b7181572c95dac462eb19c3121e36cb0fe95_912dfd18 enabled=1 gpgcheck=0 priority=1 [delorean-component-network] name=delorean-python-whitebox-neutron-tests-tempest-12cf06ce36a79a584fc757f4c25ff96845573c93 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/network/12/cf/12cf06ce36a79a584fc757f4c25ff96845573c93_3ed3aba3 enabled=1 gpgcheck=0 priority=1 [delorean-component-octavia] name=delorean-openstack-octavia-ba397f07a7331190208c93368ee23826ac4e2707 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/octavia/ba/39/ba397f07a7331190208c93368ee23826ac4e2707_9d6e596a enabled=1 gpgcheck=0 priority=1 [delorean-component-optimize] name=delorean-openstack-watcher-c014f81a8647287f6dcc339321c1256f5a2e82d5 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/optimize/c0/14/c014f81a8647287f6dcc339321c1256f5a2e82d5_bcbfdccc enabled=1 gpgcheck=0 priority=1 [delorean-component-podified] name=delorean-ansible-config_template-5ccaa22121a7ff05620975540d81f6efb077d8db baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/podified/5c/ca/5ccaa22121a7ff05620975540d81f6efb077d8db_83eb7cc2 enabled=1 gpgcheck=0 priority=1 [delorean-component-puppet] name=delorean-puppet-ceph-7352068d7b8c84ded636ab3158dafa6f3851951e baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/puppet/73/52/7352068d7b8c84ded636ab3158dafa6f3851951e_7cde1ad1 enabled=1 gpgcheck=0 priority=1 [delorean-component-swift] name=delorean-openstack-swift-dc98a8463506ac520c469adb0ef47d0f7753905a baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/swift/dc/98/dc98a8463506ac520c469adb0ef47d0f7753905a_9d02f069 enabled=1 gpgcheck=0 priority=1 [delorean-component-tempest] name=delorean-python-tempestconf-8515371b7cceebd4282e09f1d8f0cc842df82855 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/tempest/85/15/8515371b7cceebd4282e09f1d8f0cc842df82855_a1e336c7 enabled=1 gpgcheck=0 priority=1 [delorean-component-ui] name=delorean-openstack-heat-ui-013accbfd179753bc3f0d1f4e5bed07a4fd9f771 baseurl=http://mirror.regionone.vexxhost-nodepool-tripleo.rdoproject.org:8080/rdo//centos9-antelope/component/ui/01/3a/013accbfd179753bc3f0d1f4e5bed07a4fd9f771_0c88e467 enabled=1 gpgcheck=0 priority=1 home/zuul/zuul-output/logs/ci-framework-data/artifacts/ci-env/0000755000175000017500000000000015115611523023524 5ustar zuulzuulhome/zuul/zuul-output/logs/ci-framework-data/artifacts/ci-env/networking-info.yml0000644000175000017500000000231215115611271027365 0ustar zuulzuulcrc_ci_bootstrap_networks_out: controller: default: connection: ci-private-network gw: 192.168.122.1 iface: eth1 ip: 192.168.122.11/24 mac: fa:16:3e:6a:de:3b mtu: 1500 crc: default: connection: ci-private-network gw: 192.168.122.1 iface: ens7 ip: 192.168.122.10/24 mac: fa:16:3e:e6:79:2f mtu: 1500 internal-api: connection: ci-private-network-20 iface: ens7.20 ip: 172.17.0.5/24 mac: 52:54:00:84:35:42 mtu: '1496' parent_iface: ens7 vlan: 20 storage: connection: ci-private-network-21 iface: ens7.21 ip: 172.18.0.5/24 mac: 52:54:00:9c:32:12 mtu: '1496' parent_iface: ens7 vlan: 21 tenant: connection: ci-private-network-22 iface: ens7.22 ip: 172.19.0.5/24 mac: 52:54:00:ec:da:21 mtu: '1496' parent_iface: ens7 vlan: 22 crc_ci_bootstrap_provider_dns: - 199.204.44.24 - 199.204.47.54 home/zuul/zuul-output/logs/ci-framework-data/artifacts/installed-packages.yml0000644000175000017500000022647615115611273026644 0ustar zuulzuulNetworkManager: - arch: x86_64 epoch: 1 name: NetworkManager release: 1.el9 source: rpm version: 1.54.1 NetworkManager-libnm: - arch: x86_64 epoch: 1 name: NetworkManager-libnm release: 1.el9 source: rpm version: 1.54.1 NetworkManager-team: - arch: x86_64 epoch: 1 name: NetworkManager-team release: 1.el9 source: rpm version: 1.54.1 NetworkManager-tui: - arch: x86_64 epoch: 1 name: NetworkManager-tui release: 1.el9 source: rpm version: 1.54.1 aardvark-dns: - arch: x86_64 epoch: 2 name: aardvark-dns release: 1.el9 source: rpm version: 1.17.0 abattis-cantarell-fonts: - arch: noarch epoch: null name: abattis-cantarell-fonts release: 4.el9 source: rpm version: '0.301' acl: - arch: x86_64 epoch: null name: acl release: 4.el9 source: rpm version: 2.3.1 adobe-source-code-pro-fonts: - arch: noarch epoch: null name: adobe-source-code-pro-fonts release: 12.el9.1 source: rpm version: 2.030.1.050 alternatives: - arch: x86_64 epoch: null name: alternatives release: 2.el9 source: rpm version: '1.24' annobin: - arch: x86_64 epoch: null name: annobin release: 1.el9 source: rpm version: '12.98' ansible-core: - arch: x86_64 epoch: 1 name: ansible-core release: 2.el9 source: rpm version: 2.14.18 attr: - arch: x86_64 epoch: null name: attr release: 3.el9 source: rpm version: 2.5.1 audit: - arch: x86_64 epoch: null name: audit release: 7.el9 source: rpm version: 3.1.5 audit-libs: - arch: x86_64 epoch: null name: audit-libs release: 7.el9 source: rpm version: 3.1.5 authselect: - arch: x86_64 epoch: null name: authselect release: 3.el9 source: rpm version: 1.2.6 authselect-compat: - arch: x86_64 epoch: null name: authselect-compat release: 3.el9 source: rpm version: 1.2.6 authselect-libs: - arch: x86_64 epoch: null name: authselect-libs release: 3.el9 source: rpm version: 1.2.6 avahi-libs: - arch: x86_64 epoch: null name: avahi-libs release: 23.el9 source: rpm version: '0.8' basesystem: - arch: noarch epoch: null name: basesystem release: 13.el9 source: rpm version: '11' bash: - arch: x86_64 epoch: null name: bash release: 9.el9 source: rpm version: 5.1.8 bash-completion: - arch: noarch epoch: 1 name: bash-completion release: 5.el9 source: rpm version: '2.11' binutils: - arch: x86_64 epoch: null name: binutils release: 68.el9 source: rpm version: 2.35.2 binutils-gold: - arch: x86_64 epoch: null name: binutils-gold release: 68.el9 source: rpm version: 2.35.2 buildah: - arch: x86_64 epoch: 2 name: buildah release: 1.el9 source: rpm version: 1.41.3 bzip2: - arch: x86_64 epoch: null name: bzip2 release: 10.el9 source: rpm version: 1.0.8 bzip2-libs: - arch: x86_64 epoch: null name: bzip2-libs release: 10.el9 source: rpm version: 1.0.8 c-ares: - arch: x86_64 epoch: null name: c-ares release: 2.el9 source: rpm version: 1.19.1 ca-certificates: - arch: noarch epoch: null name: ca-certificates release: 91.el9 source: rpm version: 2025.2.80_v9.0.305 centos-gpg-keys: - arch: noarch epoch: null name: centos-gpg-keys release: 30.el9 source: rpm version: '9.0' centos-logos: - arch: x86_64 epoch: null name: centos-logos release: 3.el9 source: rpm version: '90.8' centos-stream-release: - arch: noarch epoch: null name: centos-stream-release release: 30.el9 source: rpm version: '9.0' centos-stream-repos: - arch: noarch epoch: null name: centos-stream-repos release: 30.el9 source: rpm version: '9.0' checkpolicy: - arch: x86_64 epoch: null name: checkpolicy release: 1.el9 source: rpm version: '3.6' chrony: - arch: x86_64 epoch: null name: chrony release: 1.el9 source: rpm version: '4.8' cloud-init: - arch: noarch epoch: null name: cloud-init release: 7.el9 source: rpm version: '24.4' cloud-utils-growpart: - arch: x86_64 epoch: null name: cloud-utils-growpart release: 1.el9 source: rpm version: '0.33' cmake-filesystem: - arch: x86_64 epoch: null name: cmake-filesystem release: 2.el9 source: rpm version: 3.26.5 cockpit-bridge: - arch: noarch epoch: null name: cockpit-bridge release: 1.el9 source: rpm version: '348' cockpit-system: - arch: noarch epoch: null name: cockpit-system release: 1.el9 source: rpm version: '348' cockpit-ws: - arch: x86_64 epoch: null name: cockpit-ws release: 1.el9 source: rpm version: '348' cockpit-ws-selinux: - arch: x86_64 epoch: null name: cockpit-ws-selinux release: 1.el9 source: rpm version: '348' conmon: - arch: x86_64 epoch: 3 name: conmon release: 1.el9 source: rpm version: 2.1.13 container-selinux: - arch: noarch epoch: 4 name: container-selinux release: 1.el9 source: rpm version: 2.242.0 containers-common: - arch: x86_64 epoch: 4 name: containers-common release: 134.el9 source: rpm version: '1' containers-common-extra: - arch: x86_64 epoch: 4 name: containers-common-extra release: 134.el9 source: rpm version: '1' coreutils: - arch: x86_64 epoch: null name: coreutils release: 39.el9 source: rpm version: '8.32' coreutils-common: - arch: x86_64 epoch: null name: coreutils-common release: 39.el9 source: rpm version: '8.32' cpio: - arch: x86_64 epoch: null name: cpio release: 16.el9 source: rpm version: '2.13' cpp: - arch: x86_64 epoch: null name: cpp release: 14.el9 source: rpm version: 11.5.0 cracklib: - arch: x86_64 epoch: null name: cracklib release: 27.el9 source: rpm version: 2.9.6 cracklib-dicts: - arch: x86_64 epoch: null name: cracklib-dicts release: 27.el9 source: rpm version: 2.9.6 createrepo_c: - arch: x86_64 epoch: null name: createrepo_c release: 4.el9 source: rpm version: 0.20.1 createrepo_c-libs: - arch: x86_64 epoch: null name: createrepo_c-libs release: 4.el9 source: rpm version: 0.20.1 criu: - arch: x86_64 epoch: null name: criu release: 3.el9 source: rpm version: '3.19' criu-libs: - arch: x86_64 epoch: null name: criu-libs release: 3.el9 source: rpm version: '3.19' cronie: - arch: x86_64 epoch: null name: cronie release: 14.el9 source: rpm version: 1.5.7 cronie-anacron: - arch: x86_64 epoch: null name: cronie-anacron release: 14.el9 source: rpm version: 1.5.7 crontabs: - arch: noarch epoch: null name: crontabs release: 26.20190603git.el9 source: rpm version: '1.11' crun: - arch: x86_64 epoch: null name: crun release: 1.el9 source: rpm version: '1.24' crypto-policies: - arch: noarch epoch: null name: crypto-policies release: 1.gite9c4db2.el9 source: rpm version: '20251126' crypto-policies-scripts: - arch: noarch epoch: null name: crypto-policies-scripts release: 1.gite9c4db2.el9 source: rpm version: '20251126' cryptsetup-libs: - arch: x86_64 epoch: null name: cryptsetup-libs release: 2.el9 source: rpm version: 2.8.1 curl: - arch: x86_64 epoch: null name: curl release: 34.el9 source: rpm version: 7.76.1 cyrus-sasl: - arch: x86_64 epoch: null name: cyrus-sasl release: 21.el9 source: rpm version: 2.1.27 cyrus-sasl-devel: - arch: x86_64 epoch: null name: cyrus-sasl-devel release: 21.el9 source: rpm version: 2.1.27 cyrus-sasl-gssapi: - arch: x86_64 epoch: null name: cyrus-sasl-gssapi release: 21.el9 source: rpm version: 2.1.27 cyrus-sasl-lib: - arch: x86_64 epoch: null name: cyrus-sasl-lib release: 21.el9 source: rpm version: 2.1.27 dbus: - arch: x86_64 epoch: 1 name: dbus release: 8.el9 source: rpm version: 1.12.20 dbus-broker: - arch: x86_64 epoch: null name: dbus-broker release: 7.el9 source: rpm version: '28' dbus-common: - arch: noarch epoch: 1 name: dbus-common release: 8.el9 source: rpm version: 1.12.20 dbus-libs: - arch: x86_64 epoch: 1 name: dbus-libs release: 8.el9 source: rpm version: 1.12.20 dbus-tools: - arch: x86_64 epoch: 1 name: dbus-tools release: 8.el9 source: rpm version: 1.12.20 debugedit: - arch: x86_64 epoch: null name: debugedit release: 11.el9 source: rpm version: '5.0' dejavu-sans-fonts: - arch: noarch epoch: null name: dejavu-sans-fonts release: 18.el9 source: rpm version: '2.37' desktop-file-utils: - arch: x86_64 epoch: null name: desktop-file-utils release: 6.el9 source: rpm version: '0.26' device-mapper: - arch: x86_64 epoch: 9 name: device-mapper release: 2.el9 source: rpm version: 1.02.206 device-mapper-libs: - arch: x86_64 epoch: 9 name: device-mapper-libs release: 2.el9 source: rpm version: 1.02.206 dhcp-client: - arch: x86_64 epoch: 12 name: dhcp-client release: 19.b1.el9 source: rpm version: 4.4.2 dhcp-common: - arch: noarch epoch: 12 name: dhcp-common release: 19.b1.el9 source: rpm version: 4.4.2 diffutils: - arch: x86_64 epoch: null name: diffutils release: 12.el9 source: rpm version: '3.7' dnf: - arch: noarch epoch: null name: dnf release: 31.el9 source: rpm version: 4.14.0 dnf-data: - arch: noarch epoch: null name: dnf-data release: 31.el9 source: rpm version: 4.14.0 dnf-plugins-core: - arch: noarch epoch: null name: dnf-plugins-core release: 24.el9 source: rpm version: 4.3.0 dracut: - arch: x86_64 epoch: null name: dracut release: 102.git20250818.el9 source: rpm version: '057' dracut-config-generic: - arch: x86_64 epoch: null name: dracut-config-generic release: 102.git20250818.el9 source: rpm version: '057' dracut-network: - arch: x86_64 epoch: null name: dracut-network release: 102.git20250818.el9 source: rpm version: '057' dracut-squash: - arch: x86_64 epoch: null name: dracut-squash release: 102.git20250818.el9 source: rpm version: '057' dwz: - arch: x86_64 epoch: null name: dwz release: 1.el9 source: rpm version: '0.16' e2fsprogs: - arch: x86_64 epoch: null name: e2fsprogs release: 8.el9 source: rpm version: 1.46.5 e2fsprogs-libs: - arch: x86_64 epoch: null name: e2fsprogs-libs release: 8.el9 source: rpm version: 1.46.5 ed: - arch: x86_64 epoch: null name: ed release: 12.el9 source: rpm version: 1.14.2 efi-srpm-macros: - arch: noarch epoch: null name: efi-srpm-macros release: 4.el9 source: rpm version: '6' elfutils: - arch: x86_64 epoch: null name: elfutils release: 1.el9 source: rpm version: '0.194' elfutils-debuginfod-client: - arch: x86_64 epoch: null name: elfutils-debuginfod-client release: 1.el9 source: rpm version: '0.194' elfutils-default-yama-scope: - arch: noarch epoch: null name: elfutils-default-yama-scope release: 1.el9 source: rpm version: '0.194' elfutils-libelf: - arch: x86_64 epoch: null name: elfutils-libelf release: 1.el9 source: rpm version: '0.194' elfutils-libs: - arch: x86_64 epoch: null name: elfutils-libs release: 1.el9 source: rpm version: '0.194' emacs-filesystem: - arch: noarch epoch: 1 name: emacs-filesystem release: 18.el9 source: rpm version: '27.2' enchant: - arch: x86_64 epoch: 1 name: enchant release: 30.el9 source: rpm version: 1.6.0 ethtool: - arch: x86_64 epoch: 2 name: ethtool release: 2.el9 source: rpm version: '6.15' expat: - arch: x86_64 epoch: null name: expat release: 5.el9 source: rpm version: 2.5.0 expect: - arch: x86_64 epoch: null name: expect release: 16.el9 source: rpm version: 5.45.4 file: - arch: x86_64 epoch: null name: file release: 16.el9 source: rpm version: '5.39' file-libs: - arch: x86_64 epoch: null name: file-libs release: 16.el9 source: rpm version: '5.39' filesystem: - arch: x86_64 epoch: null name: filesystem release: 5.el9 source: rpm version: '3.16' findutils: - arch: x86_64 epoch: 1 name: findutils release: 7.el9 source: rpm version: 4.8.0 fonts-filesystem: - arch: noarch epoch: 1 name: fonts-filesystem release: 7.el9.1 source: rpm version: 2.0.5 fonts-srpm-macros: - arch: noarch epoch: 1 name: fonts-srpm-macros release: 7.el9.1 source: rpm version: 2.0.5 fuse-common: - arch: x86_64 epoch: null name: fuse-common release: 9.el9 source: rpm version: 3.10.2 fuse-libs: - arch: x86_64 epoch: null name: fuse-libs release: 17.el9 source: rpm version: 2.9.9 fuse-overlayfs: - arch: x86_64 epoch: null name: fuse-overlayfs release: 1.el9 source: rpm version: '1.16' fuse3: - arch: x86_64 epoch: null name: fuse3 release: 9.el9 source: rpm version: 3.10.2 fuse3-libs: - arch: x86_64 epoch: null name: fuse3-libs release: 9.el9 source: rpm version: 3.10.2 gawk: - arch: x86_64 epoch: null name: gawk release: 6.el9 source: rpm version: 5.1.0 gawk-all-langpacks: - arch: x86_64 epoch: null name: gawk-all-langpacks release: 6.el9 source: rpm version: 5.1.0 gcc: - arch: x86_64 epoch: null name: gcc release: 14.el9 source: rpm version: 11.5.0 gcc-c++: - arch: x86_64 epoch: null name: gcc-c++ release: 14.el9 source: rpm version: 11.5.0 gcc-plugin-annobin: - arch: x86_64 epoch: null name: gcc-plugin-annobin release: 14.el9 source: rpm version: 11.5.0 gdb-minimal: - arch: x86_64 epoch: null name: gdb-minimal release: 2.el9 source: rpm version: '16.3' gdbm-libs: - arch: x86_64 epoch: 1 name: gdbm-libs release: 1.el9 source: rpm version: '1.23' gdisk: - arch: x86_64 epoch: null name: gdisk release: 5.el9 source: rpm version: 1.0.7 gdk-pixbuf2: - arch: x86_64 epoch: null name: gdk-pixbuf2 release: 6.el9 source: rpm version: 2.42.6 geolite2-city: - arch: noarch epoch: null name: geolite2-city release: 6.el9 source: rpm version: '20191217' geolite2-country: - arch: noarch epoch: null name: geolite2-country release: 6.el9 source: rpm version: '20191217' gettext: - arch: x86_64 epoch: null name: gettext release: 8.el9 source: rpm version: '0.21' gettext-libs: - arch: x86_64 epoch: null name: gettext-libs release: 8.el9 source: rpm version: '0.21' ghc-srpm-macros: - arch: noarch epoch: null name: ghc-srpm-macros release: 6.el9 source: rpm version: 1.5.0 git: - arch: x86_64 epoch: null name: git release: 1.el9 source: rpm version: 2.47.3 git-core: - arch: x86_64 epoch: null name: git-core release: 1.el9 source: rpm version: 2.47.3 git-core-doc: - arch: noarch epoch: null name: git-core-doc release: 1.el9 source: rpm version: 2.47.3 glib-networking: - arch: x86_64 epoch: null name: glib-networking release: 3.el9 source: rpm version: 2.68.3 glib2: - arch: x86_64 epoch: null name: glib2 release: 18.el9 source: rpm version: 2.68.4 glibc: - arch: x86_64 epoch: null name: glibc release: 244.el9 source: rpm version: '2.34' glibc-common: - arch: x86_64 epoch: null name: glibc-common release: 244.el9 source: rpm version: '2.34' glibc-devel: - arch: x86_64 epoch: null name: glibc-devel release: 244.el9 source: rpm version: '2.34' glibc-gconv-extra: - arch: x86_64 epoch: null name: glibc-gconv-extra release: 244.el9 source: rpm version: '2.34' glibc-headers: - arch: x86_64 epoch: null name: glibc-headers release: 244.el9 source: rpm version: '2.34' glibc-langpack-en: - arch: x86_64 epoch: null name: glibc-langpack-en release: 244.el9 source: rpm version: '2.34' gmp: - arch: x86_64 epoch: 1 name: gmp release: 13.el9 source: rpm version: 6.2.0 gnupg2: - arch: x86_64 epoch: null name: gnupg2 release: 4.el9 source: rpm version: 2.3.3 gnutls: - arch: x86_64 epoch: null name: gnutls release: 1.el9 source: rpm version: 3.8.10 go-srpm-macros: - arch: noarch epoch: null name: go-srpm-macros release: 1.el9 source: rpm version: 3.8.1 gobject-introspection: - arch: x86_64 epoch: null name: gobject-introspection release: 11.el9 source: rpm version: 1.68.0 gpg-pubkey: - arch: null epoch: null name: gpg-pubkey release: 5ccc5b19 source: rpm version: 8483c65d gpgme: - arch: x86_64 epoch: null name: gpgme release: 6.el9 source: rpm version: 1.15.1 grep: - arch: x86_64 epoch: null name: grep release: 5.el9 source: rpm version: '3.6' groff-base: - arch: x86_64 epoch: null name: groff-base release: 10.el9 source: rpm version: 1.22.4 grub2-common: - arch: noarch epoch: 1 name: grub2-common release: 118.el9 source: rpm version: '2.06' grub2-pc: - arch: x86_64 epoch: 1 name: grub2-pc release: 118.el9 source: rpm version: '2.06' grub2-pc-modules: - arch: noarch epoch: 1 name: grub2-pc-modules release: 118.el9 source: rpm version: '2.06' grub2-tools: - arch: x86_64 epoch: 1 name: grub2-tools release: 118.el9 source: rpm version: '2.06' grub2-tools-minimal: - arch: x86_64 epoch: 1 name: grub2-tools-minimal release: 118.el9 source: rpm version: '2.06' grubby: - arch: x86_64 epoch: null name: grubby release: 69.el9 source: rpm version: '8.40' gsettings-desktop-schemas: - arch: x86_64 epoch: null name: gsettings-desktop-schemas release: 8.el9 source: rpm version: '40.0' gssproxy: - arch: x86_64 epoch: null name: gssproxy release: 7.el9 source: rpm version: 0.8.4 gzip: - arch: x86_64 epoch: null name: gzip release: 1.el9 source: rpm version: '1.12' hostname: - arch: x86_64 epoch: null name: hostname release: 6.el9 source: rpm version: '3.23' hunspell: - arch: x86_64 epoch: null name: hunspell release: 11.el9 source: rpm version: 1.7.0 hunspell-en-GB: - arch: noarch epoch: null name: hunspell-en-GB release: 20.el9 source: rpm version: 0.20140811.1 hunspell-en-US: - arch: noarch epoch: null name: hunspell-en-US release: 20.el9 source: rpm version: 0.20140811.1 hunspell-filesystem: - arch: x86_64 epoch: null name: hunspell-filesystem release: 11.el9 source: rpm version: 1.7.0 hwdata: - arch: noarch epoch: null name: hwdata release: 9.20.el9 source: rpm version: '0.348' ima-evm-utils: - arch: x86_64 epoch: null name: ima-evm-utils release: 2.el9 source: rpm version: 1.6.2 info: - arch: x86_64 epoch: null name: info release: 15.el9 source: rpm version: '6.7' inih: - arch: x86_64 epoch: null name: inih release: 6.el9 source: rpm version: '49' initscripts-rename-device: - arch: x86_64 epoch: null name: initscripts-rename-device release: 4.el9 source: rpm version: 10.11.8 initscripts-service: - arch: noarch epoch: null name: initscripts-service release: 4.el9 source: rpm version: 10.11.8 ipcalc: - arch: x86_64 epoch: null name: ipcalc release: 5.el9 source: rpm version: 1.0.0 iproute: - arch: x86_64 epoch: null name: iproute release: 1.el9 source: rpm version: 6.17.0 iproute-tc: - arch: x86_64 epoch: null name: iproute-tc release: 1.el9 source: rpm version: 6.17.0 iptables-libs: - arch: x86_64 epoch: null name: iptables-libs release: 11.el9 source: rpm version: 1.8.10 iptables-nft: - arch: x86_64 epoch: null name: iptables-nft release: 11.el9 source: rpm version: 1.8.10 iptables-nft-services: - arch: noarch epoch: null name: iptables-nft-services release: 11.el9 source: rpm version: 1.8.10 iputils: - arch: x86_64 epoch: null name: iputils release: 15.el9 source: rpm version: '20210202' irqbalance: - arch: x86_64 epoch: 2 name: irqbalance release: 5.el9 source: rpm version: 1.9.4 jansson: - arch: x86_64 epoch: null name: jansson release: 1.el9 source: rpm version: '2.14' jq: - arch: x86_64 epoch: null name: jq release: 19.el9 source: rpm version: '1.6' json-c: - arch: x86_64 epoch: null name: json-c release: 11.el9 source: rpm version: '0.14' json-glib: - arch: x86_64 epoch: null name: json-glib release: 1.el9 source: rpm version: 1.6.6 kbd: - arch: x86_64 epoch: null name: kbd release: 11.el9 source: rpm version: 2.4.0 kbd-legacy: - arch: noarch epoch: null name: kbd-legacy release: 11.el9 source: rpm version: 2.4.0 kbd-misc: - arch: noarch epoch: null name: kbd-misc release: 11.el9 source: rpm version: 2.4.0 kernel: - arch: x86_64 epoch: null name: kernel release: 645.el9 source: rpm version: 5.14.0 kernel-core: - arch: x86_64 epoch: null name: kernel-core release: 645.el9 source: rpm version: 5.14.0 kernel-headers: - arch: x86_64 epoch: null name: kernel-headers release: 645.el9 source: rpm version: 5.14.0 kernel-modules: - arch: x86_64 epoch: null name: kernel-modules release: 645.el9 source: rpm version: 5.14.0 kernel-modules-core: - arch: x86_64 epoch: null name: kernel-modules-core release: 645.el9 source: rpm version: 5.14.0 kernel-srpm-macros: - arch: noarch epoch: null name: kernel-srpm-macros release: 14.el9 source: rpm version: '1.0' kernel-tools: - arch: x86_64 epoch: null name: kernel-tools release: 645.el9 source: rpm version: 5.14.0 kernel-tools-libs: - arch: x86_64 epoch: null name: kernel-tools-libs release: 645.el9 source: rpm version: 5.14.0 kexec-tools: - arch: x86_64 epoch: null name: kexec-tools release: 12.el9 source: rpm version: 2.0.29 keyutils: - arch: x86_64 epoch: null name: keyutils release: 1.el9 source: rpm version: 1.6.3 keyutils-libs: - arch: x86_64 epoch: null name: keyutils-libs release: 1.el9 source: rpm version: 1.6.3 kmod: - arch: x86_64 epoch: null name: kmod release: 11.el9 source: rpm version: '28' kmod-libs: - arch: x86_64 epoch: null name: kmod-libs release: 11.el9 source: rpm version: '28' kpartx: - arch: x86_64 epoch: null name: kpartx release: 39.el9 source: rpm version: 0.8.7 krb5-libs: - arch: x86_64 epoch: null name: krb5-libs release: 8.el9 source: rpm version: 1.21.1 langpacks-core-en_GB: - arch: noarch epoch: null name: langpacks-core-en_GB release: 16.el9 source: rpm version: '3.0' langpacks-core-font-en: - arch: noarch epoch: null name: langpacks-core-font-en release: 16.el9 source: rpm version: '3.0' langpacks-en_GB: - arch: noarch epoch: null name: langpacks-en_GB release: 16.el9 source: rpm version: '3.0' less: - arch: x86_64 epoch: null name: less release: 6.el9 source: rpm version: '590' libacl: - arch: x86_64 epoch: null name: libacl release: 4.el9 source: rpm version: 2.3.1 libappstream-glib: - arch: x86_64 epoch: null name: libappstream-glib release: 5.el9 source: rpm version: 0.7.18 libarchive: - arch: x86_64 epoch: null name: libarchive release: 6.el9 source: rpm version: 3.5.3 libassuan: - arch: x86_64 epoch: null name: libassuan release: 3.el9 source: rpm version: 2.5.5 libattr: - arch: x86_64 epoch: null name: libattr release: 3.el9 source: rpm version: 2.5.1 libbasicobjects: - arch: x86_64 epoch: null name: libbasicobjects release: 53.el9 source: rpm version: 0.1.1 libblkid: - arch: x86_64 epoch: null name: libblkid release: 21.el9 source: rpm version: 2.37.4 libbpf: - arch: x86_64 epoch: 2 name: libbpf release: 2.el9 source: rpm version: 1.5.0 libbrotli: - arch: x86_64 epoch: null name: libbrotli release: 7.el9 source: rpm version: 1.0.9 libcap: - arch: x86_64 epoch: null name: libcap release: 10.el9 source: rpm version: '2.48' libcap-ng: - arch: x86_64 epoch: null name: libcap-ng release: 7.el9 source: rpm version: 0.8.2 libcbor: - arch: x86_64 epoch: null name: libcbor release: 5.el9 source: rpm version: 0.7.0 libcollection: - arch: x86_64 epoch: null name: libcollection release: 53.el9 source: rpm version: 0.7.0 libcom_err: - arch: x86_64 epoch: null name: libcom_err release: 8.el9 source: rpm version: 1.46.5 libcomps: - arch: x86_64 epoch: null name: libcomps release: 1.el9 source: rpm version: 0.1.18 libcurl: - arch: x86_64 epoch: null name: libcurl release: 34.el9 source: rpm version: 7.76.1 libdaemon: - arch: x86_64 epoch: null name: libdaemon release: 23.el9 source: rpm version: '0.14' libdb: - arch: x86_64 epoch: null name: libdb release: 57.el9 source: rpm version: 5.3.28 libdhash: - arch: x86_64 epoch: null name: libdhash release: 53.el9 source: rpm version: 0.5.0 libdnf: - arch: x86_64 epoch: null name: libdnf release: 16.el9 source: rpm version: 0.69.0 libeconf: - arch: x86_64 epoch: null name: libeconf release: 4.el9 source: rpm version: 0.4.1 libedit: - arch: x86_64 epoch: null name: libedit release: 38.20210216cvs.el9 source: rpm version: '3.1' libestr: - arch: x86_64 epoch: null name: libestr release: 4.el9 source: rpm version: 0.1.11 libev: - arch: x86_64 epoch: null name: libev release: 6.el9 source: rpm version: '4.33' libevent: - arch: x86_64 epoch: null name: libevent release: 8.el9 source: rpm version: 2.1.12 libfastjson: - arch: x86_64 epoch: null name: libfastjson release: 5.el9 source: rpm version: 0.99.9 libfdisk: - arch: x86_64 epoch: null name: libfdisk release: 21.el9 source: rpm version: 2.37.4 libffi: - arch: x86_64 epoch: null name: libffi release: 8.el9 source: rpm version: 3.4.2 libffi-devel: - arch: x86_64 epoch: null name: libffi-devel release: 8.el9 source: rpm version: 3.4.2 libfido2: - arch: x86_64 epoch: null name: libfido2 release: 2.el9 source: rpm version: 1.13.0 libgcc: - arch: x86_64 epoch: null name: libgcc release: 14.el9 source: rpm version: 11.5.0 libgcrypt: - arch: x86_64 epoch: null name: libgcrypt release: 11.el9 source: rpm version: 1.10.0 libgomp: - arch: x86_64 epoch: null name: libgomp release: 14.el9 source: rpm version: 11.5.0 libgpg-error: - arch: x86_64 epoch: null name: libgpg-error release: 5.el9 source: rpm version: '1.42' libgpg-error-devel: - arch: x86_64 epoch: null name: libgpg-error-devel release: 5.el9 source: rpm version: '1.42' libibverbs: - arch: x86_64 epoch: null name: libibverbs release: 2.el9 source: rpm version: '57.0' libicu: - arch: x86_64 epoch: null name: libicu release: 10.el9 source: rpm version: '67.1' libidn2: - arch: x86_64 epoch: null name: libidn2 release: 7.el9 source: rpm version: 2.3.0 libini_config: - arch: x86_64 epoch: null name: libini_config release: 53.el9 source: rpm version: 1.3.1 libjpeg-turbo: - arch: x86_64 epoch: null name: libjpeg-turbo release: 7.el9 source: rpm version: 2.0.90 libkcapi: - arch: x86_64 epoch: null name: libkcapi release: 2.el9 source: rpm version: 1.4.0 libkcapi-hmaccalc: - arch: x86_64 epoch: null name: libkcapi-hmaccalc release: 2.el9 source: rpm version: 1.4.0 libksba: - arch: x86_64 epoch: null name: libksba release: 7.el9 source: rpm version: 1.5.1 libldb: - arch: x86_64 epoch: 0 name: libldb release: 1.el9 source: rpm version: 4.23.3 libmaxminddb: - arch: x86_64 epoch: null name: libmaxminddb release: 4.el9 source: rpm version: 1.5.2 libmnl: - arch: x86_64 epoch: null name: libmnl release: 16.el9 source: rpm version: 1.0.4 libmodulemd: - arch: x86_64 epoch: null name: libmodulemd release: 2.el9 source: rpm version: 2.13.0 libmount: - arch: x86_64 epoch: null name: libmount release: 21.el9 source: rpm version: 2.37.4 libmpc: - arch: x86_64 epoch: null name: libmpc release: 4.el9 source: rpm version: 1.2.1 libndp: - arch: x86_64 epoch: null name: libndp release: 1.el9 source: rpm version: '1.9' libnet: - arch: x86_64 epoch: null name: libnet release: 7.el9 source: rpm version: '1.2' libnetfilter_conntrack: - arch: x86_64 epoch: null name: libnetfilter_conntrack release: 1.el9 source: rpm version: 1.0.9 libnfnetlink: - arch: x86_64 epoch: null name: libnfnetlink release: 23.el9 source: rpm version: 1.0.1 libnfsidmap: - arch: x86_64 epoch: 1 name: libnfsidmap release: 39.el9 source: rpm version: 2.5.4 libnftnl: - arch: x86_64 epoch: null name: libnftnl release: 4.el9 source: rpm version: 1.2.6 libnghttp2: - arch: x86_64 epoch: null name: libnghttp2 release: 6.el9 source: rpm version: 1.43.0 libnl3: - arch: x86_64 epoch: null name: libnl3 release: 1.el9 source: rpm version: 3.11.0 libnl3-cli: - arch: x86_64 epoch: null name: libnl3-cli release: 1.el9 source: rpm version: 3.11.0 libnsl2: - arch: x86_64 epoch: null name: libnsl2 release: 1.el9 source: rpm version: 2.0.0 libpath_utils: - arch: x86_64 epoch: null name: libpath_utils release: 53.el9 source: rpm version: 0.2.1 libpcap: - arch: x86_64 epoch: 14 name: libpcap release: 4.el9 source: rpm version: 1.10.0 libpipeline: - arch: x86_64 epoch: null name: libpipeline release: 4.el9 source: rpm version: 1.5.3 libpkgconf: - arch: x86_64 epoch: null name: libpkgconf release: 10.el9 source: rpm version: 1.7.3 libpng: - arch: x86_64 epoch: 2 name: libpng release: 12.el9 source: rpm version: 1.6.37 libproxy: - arch: x86_64 epoch: null name: libproxy release: 35.el9 source: rpm version: 0.4.15 libproxy-webkitgtk4: - arch: x86_64 epoch: null name: libproxy-webkitgtk4 release: 35.el9 source: rpm version: 0.4.15 libpsl: - arch: x86_64 epoch: null name: libpsl release: 5.el9 source: rpm version: 0.21.1 libpwquality: - arch: x86_64 epoch: null name: libpwquality release: 8.el9 source: rpm version: 1.4.4 libref_array: - arch: x86_64 epoch: null name: libref_array release: 53.el9 source: rpm version: 0.1.5 librepo: - arch: x86_64 epoch: null name: librepo release: 3.el9 source: rpm version: 1.14.5 libreport-filesystem: - arch: noarch epoch: null name: libreport-filesystem release: 6.el9 source: rpm version: 2.15.2 libseccomp: - arch: x86_64 epoch: null name: libseccomp release: 2.el9 source: rpm version: 2.5.2 libselinux: - arch: x86_64 epoch: null name: libselinux release: 3.el9 source: rpm version: '3.6' libselinux-utils: - arch: x86_64 epoch: null name: libselinux-utils release: 3.el9 source: rpm version: '3.6' libsemanage: - arch: x86_64 epoch: null name: libsemanage release: 5.el9 source: rpm version: '3.6' libsepol: - arch: x86_64 epoch: null name: libsepol release: 3.el9 source: rpm version: '3.6' libsigsegv: - arch: x86_64 epoch: null name: libsigsegv release: 4.el9 source: rpm version: '2.13' libslirp: - arch: x86_64 epoch: null name: libslirp release: 8.el9 source: rpm version: 4.4.0 libsmartcols: - arch: x86_64 epoch: null name: libsmartcols release: 21.el9 source: rpm version: 2.37.4 libsolv: - arch: x86_64 epoch: null name: libsolv release: 3.el9 source: rpm version: 0.7.24 libsoup: - arch: x86_64 epoch: null name: libsoup release: 10.el9 source: rpm version: 2.72.0 libss: - arch: x86_64 epoch: null name: libss release: 8.el9 source: rpm version: 1.46.5 libssh: - arch: x86_64 epoch: null name: libssh release: 15.el9 source: rpm version: 0.10.4 libssh-config: - arch: noarch epoch: null name: libssh-config release: 15.el9 source: rpm version: 0.10.4 libsss_certmap: - arch: x86_64 epoch: null name: libsss_certmap release: 5.el9 source: rpm version: 2.9.7 libsss_idmap: - arch: x86_64 epoch: null name: libsss_idmap release: 5.el9 source: rpm version: 2.9.7 libsss_nss_idmap: - arch: x86_64 epoch: null name: libsss_nss_idmap release: 5.el9 source: rpm version: 2.9.7 libsss_sudo: - arch: x86_64 epoch: null name: libsss_sudo release: 5.el9 source: rpm version: 2.9.7 libstdc++: - arch: x86_64 epoch: null name: libstdc++ release: 14.el9 source: rpm version: 11.5.0 libstdc++-devel: - arch: x86_64 epoch: null name: libstdc++-devel release: 14.el9 source: rpm version: 11.5.0 libstemmer: - arch: x86_64 epoch: null name: libstemmer release: 18.585svn.el9 source: rpm version: '0' libsysfs: - arch: x86_64 epoch: null name: libsysfs release: 11.el9 source: rpm version: 2.1.1 libtalloc: - arch: x86_64 epoch: null name: libtalloc release: 1.el9 source: rpm version: 2.4.3 libtasn1: - arch: x86_64 epoch: null name: libtasn1 release: 9.el9 source: rpm version: 4.16.0 libtdb: - arch: x86_64 epoch: null name: libtdb release: 1.el9 source: rpm version: 1.4.14 libteam: - arch: x86_64 epoch: null name: libteam release: 16.el9 source: rpm version: '1.31' libtevent: - arch: x86_64 epoch: null name: libtevent release: 1.el9 source: rpm version: 0.17.1 libtirpc: - arch: x86_64 epoch: null name: libtirpc release: 9.el9 source: rpm version: 1.3.3 libtool-ltdl: - arch: x86_64 epoch: null name: libtool-ltdl release: 46.el9 source: rpm version: 2.4.6 libunistring: - arch: x86_64 epoch: null name: libunistring release: 15.el9 source: rpm version: 0.9.10 liburing: - arch: x86_64 epoch: null name: liburing release: 1.el9 source: rpm version: '2.5' libuser: - arch: x86_64 epoch: null name: libuser release: 17.el9 source: rpm version: '0.63' libutempter: - arch: x86_64 epoch: null name: libutempter release: 6.el9 source: rpm version: 1.2.1 libuuid: - arch: x86_64 epoch: null name: libuuid release: 21.el9 source: rpm version: 2.37.4 libverto: - arch: x86_64 epoch: null name: libverto release: 3.el9 source: rpm version: 0.3.2 libverto-libev: - arch: x86_64 epoch: null name: libverto-libev release: 3.el9 source: rpm version: 0.3.2 libvirt-libs: - arch: x86_64 epoch: null name: libvirt-libs release: 1.el9 source: rpm version: 11.9.0 libwbclient: - arch: x86_64 epoch: 0 name: libwbclient release: 1.el9 source: rpm version: 4.23.3 libxcrypt: - arch: x86_64 epoch: null name: libxcrypt release: 3.el9 source: rpm version: 4.4.18 libxcrypt-compat: - arch: x86_64 epoch: null name: libxcrypt-compat release: 3.el9 source: rpm version: 4.4.18 libxcrypt-devel: - arch: x86_64 epoch: null name: libxcrypt-devel release: 3.el9 source: rpm version: 4.4.18 libxml2: - arch: x86_64 epoch: null name: libxml2 release: 14.el9 source: rpm version: 2.9.13 libxml2-devel: - arch: x86_64 epoch: null name: libxml2-devel release: 14.el9 source: rpm version: 2.9.13 libxslt: - arch: x86_64 epoch: null name: libxslt release: 12.el9 source: rpm version: 1.1.34 libxslt-devel: - arch: x86_64 epoch: null name: libxslt-devel release: 12.el9 source: rpm version: 1.1.34 libyaml: - arch: x86_64 epoch: null name: libyaml release: 7.el9 source: rpm version: 0.2.5 libzstd: - arch: x86_64 epoch: null name: libzstd release: 1.el9 source: rpm version: 1.5.5 llvm-filesystem: - arch: x86_64 epoch: null name: llvm-filesystem release: 1.el9 source: rpm version: 21.1.3 llvm-libs: - arch: x86_64 epoch: null name: llvm-libs release: 1.el9 source: rpm version: 21.1.3 lmdb-libs: - arch: x86_64 epoch: null name: lmdb-libs release: 3.el9 source: rpm version: 0.9.29 logrotate: - arch: x86_64 epoch: null name: logrotate release: 12.el9 source: rpm version: 3.18.0 lshw: - arch: x86_64 epoch: null name: lshw release: 3.el9 source: rpm version: B.02.20 lsscsi: - arch: x86_64 epoch: null name: lsscsi release: 6.el9 source: rpm version: '0.32' lua-libs: - arch: x86_64 epoch: null name: lua-libs release: 4.el9 source: rpm version: 5.4.4 lua-srpm-macros: - arch: noarch epoch: null name: lua-srpm-macros release: 6.el9 source: rpm version: '1' lz4-libs: - arch: x86_64 epoch: null name: lz4-libs release: 5.el9 source: rpm version: 1.9.3 lzo: - arch: x86_64 epoch: null name: lzo release: 7.el9 source: rpm version: '2.10' make: - arch: x86_64 epoch: 1 name: make release: 8.el9 source: rpm version: '4.3' man-db: - arch: x86_64 epoch: null name: man-db release: 9.el9 source: rpm version: 2.9.3 microcode_ctl: - arch: noarch epoch: 4 name: microcode_ctl release: 1.el9 source: rpm version: '20251111' mpdecimal: - arch: x86_64 epoch: null name: mpdecimal release: 3.el9 source: rpm version: 2.5.1 mpfr: - arch: x86_64 epoch: null name: mpfr release: 7.el9 source: rpm version: 4.1.0 ncurses: - arch: x86_64 epoch: null name: ncurses release: 12.20210508.el9 source: rpm version: '6.2' ncurses-base: - arch: noarch epoch: null name: ncurses-base release: 12.20210508.el9 source: rpm version: '6.2' ncurses-c++-libs: - arch: x86_64 epoch: null name: ncurses-c++-libs release: 12.20210508.el9 source: rpm version: '6.2' ncurses-devel: - arch: x86_64 epoch: null name: ncurses-devel release: 12.20210508.el9 source: rpm version: '6.2' ncurses-libs: - arch: x86_64 epoch: null name: ncurses-libs release: 12.20210508.el9 source: rpm version: '6.2' netavark: - arch: x86_64 epoch: 2 name: netavark release: 1.el9 source: rpm version: 1.16.0 nettle: - arch: x86_64 epoch: null name: nettle release: 1.el9 source: rpm version: 3.10.1 newt: - arch: x86_64 epoch: null name: newt release: 11.el9 source: rpm version: 0.52.21 nfs-utils: - arch: x86_64 epoch: 1 name: nfs-utils release: 39.el9 source: rpm version: 2.5.4 nftables: - arch: x86_64 epoch: 1 name: nftables release: 5.el9 source: rpm version: 1.0.9 npth: - arch: x86_64 epoch: null name: npth release: 8.el9 source: rpm version: '1.6' numactl-libs: - arch: x86_64 epoch: null name: numactl-libs release: 3.el9 source: rpm version: 2.0.19 ocaml-srpm-macros: - arch: noarch epoch: null name: ocaml-srpm-macros release: 6.el9 source: rpm version: '6' oddjob: - arch: x86_64 epoch: null name: oddjob release: 7.el9 source: rpm version: 0.34.7 oddjob-mkhomedir: - arch: x86_64 epoch: null name: oddjob-mkhomedir release: 7.el9 source: rpm version: 0.34.7 oniguruma: - arch: x86_64 epoch: null name: oniguruma release: 1.el9.6 source: rpm version: 6.9.6 openblas-srpm-macros: - arch: noarch epoch: null name: openblas-srpm-macros release: 11.el9 source: rpm version: '2' openldap: - arch: x86_64 epoch: null name: openldap release: 4.el9 source: rpm version: 2.6.8 openldap-devel: - arch: x86_64 epoch: null name: openldap-devel release: 4.el9 source: rpm version: 2.6.8 openssh: - arch: x86_64 epoch: null name: openssh release: 2.el9 source: rpm version: 9.9p1 openssh-clients: - arch: x86_64 epoch: null name: openssh-clients release: 2.el9 source: rpm version: 9.9p1 openssh-server: - arch: x86_64 epoch: null name: openssh-server release: 2.el9 source: rpm version: 9.9p1 openssl: - arch: x86_64 epoch: 1 name: openssl release: 6.el9 source: rpm version: 3.5.1 openssl-devel: - arch: x86_64 epoch: 1 name: openssl-devel release: 6.el9 source: rpm version: 3.5.1 openssl-fips-provider: - arch: x86_64 epoch: 1 name: openssl-fips-provider release: 6.el9 source: rpm version: 3.5.1 openssl-libs: - arch: x86_64 epoch: 1 name: openssl-libs release: 6.el9 source: rpm version: 3.5.1 os-prober: - arch: x86_64 epoch: null name: os-prober release: 12.el9 source: rpm version: '1.77' p11-kit: - arch: x86_64 epoch: null name: p11-kit release: 1.el9 source: rpm version: 0.25.10 p11-kit-trust: - arch: x86_64 epoch: null name: p11-kit-trust release: 1.el9 source: rpm version: 0.25.10 pam: - arch: x86_64 epoch: null name: pam release: 26.el9 source: rpm version: 1.5.1 parted: - arch: x86_64 epoch: null name: parted release: 3.el9 source: rpm version: '3.5' passt: - arch: x86_64 epoch: null name: passt release: 2.el9 source: rpm version: 0^20250512.g8ec1341 passt-selinux: - arch: noarch epoch: null name: passt-selinux release: 2.el9 source: rpm version: 0^20250512.g8ec1341 passwd: - arch: x86_64 epoch: null name: passwd release: 12.el9 source: rpm version: '0.80' patch: - arch: x86_64 epoch: null name: patch release: 16.el9 source: rpm version: 2.7.6 pciutils-libs: - arch: x86_64 epoch: null name: pciutils-libs release: 7.el9 source: rpm version: 3.7.0 pcre: - arch: x86_64 epoch: null name: pcre release: 4.el9 source: rpm version: '8.44' pcre2: - arch: x86_64 epoch: null name: pcre2 release: 6.el9 source: rpm version: '10.40' pcre2-syntax: - arch: noarch epoch: null name: pcre2-syntax release: 6.el9 source: rpm version: '10.40' perl-AutoLoader: - arch: noarch epoch: 0 name: perl-AutoLoader release: 483.el9 source: rpm version: '5.74' perl-B: - arch: x86_64 epoch: 0 name: perl-B release: 483.el9 source: rpm version: '1.80' perl-Carp: - arch: noarch epoch: null name: perl-Carp release: 460.el9 source: rpm version: '1.50' perl-Class-Struct: - arch: noarch epoch: 0 name: perl-Class-Struct release: 483.el9 source: rpm version: '0.66' perl-Data-Dumper: - arch: x86_64 epoch: null name: perl-Data-Dumper release: 462.el9 source: rpm version: '2.174' perl-Digest: - arch: noarch epoch: null name: perl-Digest release: 4.el9 source: rpm version: '1.19' perl-Digest-MD5: - arch: x86_64 epoch: null name: perl-Digest-MD5 release: 4.el9 source: rpm version: '2.58' perl-DynaLoader: - arch: x86_64 epoch: 0 name: perl-DynaLoader release: 483.el9 source: rpm version: '1.47' perl-Encode: - arch: x86_64 epoch: 4 name: perl-Encode release: 462.el9 source: rpm version: '3.08' perl-Errno: - arch: x86_64 epoch: 0 name: perl-Errno release: 483.el9 source: rpm version: '1.30' perl-Error: - arch: noarch epoch: 1 name: perl-Error release: 7.el9 source: rpm version: '0.17029' perl-Exporter: - arch: noarch epoch: null name: perl-Exporter release: 461.el9 source: rpm version: '5.74' perl-Fcntl: - arch: x86_64 epoch: 0 name: perl-Fcntl release: 483.el9 source: rpm version: '1.13' perl-File-Basename: - arch: noarch epoch: 0 name: perl-File-Basename release: 483.el9 source: rpm version: '2.85' perl-File-Find: - arch: noarch epoch: 0 name: perl-File-Find release: 483.el9 source: rpm version: '1.37' perl-File-Path: - arch: noarch epoch: null name: perl-File-Path release: 4.el9 source: rpm version: '2.18' perl-File-Temp: - arch: noarch epoch: 1 name: perl-File-Temp release: 4.el9 source: rpm version: 0.231.100 perl-File-stat: - arch: noarch epoch: 0 name: perl-File-stat release: 483.el9 source: rpm version: '1.09' perl-FileHandle: - arch: noarch epoch: 0 name: perl-FileHandle release: 483.el9 source: rpm version: '2.03' perl-Getopt-Long: - arch: noarch epoch: 1 name: perl-Getopt-Long release: 4.el9 source: rpm version: '2.52' perl-Getopt-Std: - arch: noarch epoch: 0 name: perl-Getopt-Std release: 483.el9 source: rpm version: '1.12' perl-Git: - arch: noarch epoch: null name: perl-Git release: 1.el9 source: rpm version: 2.47.3 perl-HTTP-Tiny: - arch: noarch epoch: null name: perl-HTTP-Tiny release: 462.el9 source: rpm version: '0.076' perl-IO: - arch: x86_64 epoch: 0 name: perl-IO release: 483.el9 source: rpm version: '1.43' perl-IO-Socket-IP: - arch: noarch epoch: null name: perl-IO-Socket-IP release: 5.el9 source: rpm version: '0.41' perl-IO-Socket-SSL: - arch: noarch epoch: null name: perl-IO-Socket-SSL release: 2.el9 source: rpm version: '2.073' perl-IPC-Open3: - arch: noarch epoch: 0 name: perl-IPC-Open3 release: 483.el9 source: rpm version: '1.21' perl-MIME-Base64: - arch: x86_64 epoch: null name: perl-MIME-Base64 release: 4.el9 source: rpm version: '3.16' perl-Mozilla-CA: - arch: noarch epoch: null name: perl-Mozilla-CA release: 6.el9 source: rpm version: '20200520' perl-NDBM_File: - arch: x86_64 epoch: 0 name: perl-NDBM_File release: 483.el9 source: rpm version: '1.15' perl-Net-SSLeay: - arch: x86_64 epoch: null name: perl-Net-SSLeay release: 3.el9 source: rpm version: '1.94' perl-POSIX: - arch: x86_64 epoch: 0 name: perl-POSIX release: 483.el9 source: rpm version: '1.94' perl-PathTools: - arch: x86_64 epoch: null name: perl-PathTools release: 461.el9 source: rpm version: '3.78' perl-Pod-Escapes: - arch: noarch epoch: 1 name: perl-Pod-Escapes release: 460.el9 source: rpm version: '1.07' perl-Pod-Perldoc: - arch: noarch epoch: null name: perl-Pod-Perldoc release: 461.el9 source: rpm version: 3.28.01 perl-Pod-Simple: - arch: noarch epoch: 1 name: perl-Pod-Simple release: 4.el9 source: rpm version: '3.42' perl-Pod-Usage: - arch: noarch epoch: 4 name: perl-Pod-Usage release: 4.el9 source: rpm version: '2.01' perl-Scalar-List-Utils: - arch: x86_64 epoch: 4 name: perl-Scalar-List-Utils release: 462.el9 source: rpm version: '1.56' perl-SelectSaver: - arch: noarch epoch: 0 name: perl-SelectSaver release: 483.el9 source: rpm version: '1.02' perl-Socket: - arch: x86_64 epoch: 4 name: perl-Socket release: 4.el9 source: rpm version: '2.031' perl-Storable: - arch: x86_64 epoch: 1 name: perl-Storable release: 460.el9 source: rpm version: '3.21' perl-Symbol: - arch: noarch epoch: 0 name: perl-Symbol release: 483.el9 source: rpm version: '1.08' perl-Term-ANSIColor: - arch: noarch epoch: null name: perl-Term-ANSIColor release: 461.el9 source: rpm version: '5.01' perl-Term-Cap: - arch: noarch epoch: null name: perl-Term-Cap release: 460.el9 source: rpm version: '1.17' perl-TermReadKey: - arch: x86_64 epoch: null name: perl-TermReadKey release: 11.el9 source: rpm version: '2.38' perl-Text-ParseWords: - arch: noarch epoch: null name: perl-Text-ParseWords release: 460.el9 source: rpm version: '3.30' perl-Text-Tabs+Wrap: - arch: noarch epoch: null name: perl-Text-Tabs+Wrap release: 460.el9 source: rpm version: '2013.0523' perl-Time-Local: - arch: noarch epoch: 2 name: perl-Time-Local release: 7.el9 source: rpm version: '1.300' perl-URI: - arch: noarch epoch: null name: perl-URI release: 3.el9 source: rpm version: '5.09' perl-base: - arch: noarch epoch: 0 name: perl-base release: 483.el9 source: rpm version: '2.27' perl-constant: - arch: noarch epoch: null name: perl-constant release: 461.el9 source: rpm version: '1.33' perl-if: - arch: noarch epoch: 0 name: perl-if release: 483.el9 source: rpm version: 0.60.800 perl-interpreter: - arch: x86_64 epoch: 4 name: perl-interpreter release: 483.el9 source: rpm version: 5.32.1 perl-lib: - arch: x86_64 epoch: 0 name: perl-lib release: 483.el9 source: rpm version: '0.65' perl-libnet: - arch: noarch epoch: null name: perl-libnet release: 4.el9 source: rpm version: '3.13' perl-libs: - arch: x86_64 epoch: 4 name: perl-libs release: 483.el9 source: rpm version: 5.32.1 perl-mro: - arch: x86_64 epoch: 0 name: perl-mro release: 483.el9 source: rpm version: '1.23' perl-overload: - arch: noarch epoch: 0 name: perl-overload release: 483.el9 source: rpm version: '1.31' perl-overloading: - arch: noarch epoch: 0 name: perl-overloading release: 483.el9 source: rpm version: '0.02' perl-parent: - arch: noarch epoch: 1 name: perl-parent release: 460.el9 source: rpm version: '0.238' perl-podlators: - arch: noarch epoch: 1 name: perl-podlators release: 460.el9 source: rpm version: '4.14' perl-srpm-macros: - arch: noarch epoch: null name: perl-srpm-macros release: 41.el9 source: rpm version: '1' perl-subs: - arch: noarch epoch: 0 name: perl-subs release: 483.el9 source: rpm version: '1.03' perl-vars: - arch: noarch epoch: 0 name: perl-vars release: 483.el9 source: rpm version: '1.05' pigz: - arch: x86_64 epoch: null name: pigz release: 4.el9 source: rpm version: '2.5' pkgconf: - arch: x86_64 epoch: null name: pkgconf release: 10.el9 source: rpm version: 1.7.3 pkgconf-m4: - arch: noarch epoch: null name: pkgconf-m4 release: 10.el9 source: rpm version: 1.7.3 pkgconf-pkg-config: - arch: x86_64 epoch: null name: pkgconf-pkg-config release: 10.el9 source: rpm version: 1.7.3 podman: - arch: x86_64 epoch: 6 name: podman release: 2.el9 source: rpm version: 5.6.0 policycoreutils: - arch: x86_64 epoch: null name: policycoreutils release: 3.el9 source: rpm version: '3.6' policycoreutils-python-utils: - arch: noarch epoch: null name: policycoreutils-python-utils release: 3.el9 source: rpm version: '3.6' polkit: - arch: x86_64 epoch: null name: polkit release: 14.el9 source: rpm version: '0.117' polkit-libs: - arch: x86_64 epoch: null name: polkit-libs release: 14.el9 source: rpm version: '0.117' polkit-pkla-compat: - arch: x86_64 epoch: null name: polkit-pkla-compat release: 21.el9 source: rpm version: '0.1' popt: - arch: x86_64 epoch: null name: popt release: 8.el9 source: rpm version: '1.18' prefixdevname: - arch: x86_64 epoch: null name: prefixdevname release: 8.el9 source: rpm version: 0.1.0 procps-ng: - arch: x86_64 epoch: null name: procps-ng release: 14.el9 source: rpm version: 3.3.17 protobuf-c: - arch: x86_64 epoch: null name: protobuf-c release: 13.el9 source: rpm version: 1.3.3 psmisc: - arch: x86_64 epoch: null name: psmisc release: 3.el9 source: rpm version: '23.4' publicsuffix-list-dafsa: - arch: noarch epoch: null name: publicsuffix-list-dafsa release: 3.el9 source: rpm version: '20210518' pyproject-srpm-macros: - arch: noarch epoch: null name: pyproject-srpm-macros release: 1.el9 source: rpm version: 1.16.2 python-rpm-macros: - arch: noarch epoch: null name: python-rpm-macros release: 54.el9 source: rpm version: '3.9' python-srpm-macros: - arch: noarch epoch: null name: python-srpm-macros release: 54.el9 source: rpm version: '3.9' python-unversioned-command: - arch: noarch epoch: null name: python-unversioned-command release: 2.el9 source: rpm version: 3.9.25 python3: - arch: x86_64 epoch: null name: python3 release: 2.el9 source: rpm version: 3.9.25 python3-attrs: - arch: noarch epoch: null name: python3-attrs release: 7.el9 source: rpm version: 20.3.0 python3-audit: - arch: x86_64 epoch: null name: python3-audit release: 7.el9 source: rpm version: 3.1.5 python3-babel: - arch: noarch epoch: null name: python3-babel release: 2.el9 source: rpm version: 2.9.1 python3-cffi: - arch: x86_64 epoch: null name: python3-cffi release: 5.el9 source: rpm version: 1.14.5 python3-chardet: - arch: noarch epoch: null name: python3-chardet release: 5.el9 source: rpm version: 4.0.0 python3-configobj: - arch: noarch epoch: null name: python3-configobj release: 25.el9 source: rpm version: 5.0.6 python3-cryptography: - arch: x86_64 epoch: null name: python3-cryptography release: 5.el9 source: rpm version: 36.0.1 python3-dasbus: - arch: noarch epoch: null name: python3-dasbus release: 1.el9 source: rpm version: '1.7' python3-dateutil: - arch: noarch epoch: 1 name: python3-dateutil release: 1.el9 source: rpm version: 2.9.0.post0 python3-dbus: - arch: x86_64 epoch: null name: python3-dbus release: 2.el9 source: rpm version: 1.2.18 python3-devel: - arch: x86_64 epoch: null name: python3-devel release: 2.el9 source: rpm version: 3.9.25 python3-distro: - arch: noarch epoch: null name: python3-distro release: 7.el9 source: rpm version: 1.5.0 python3-dnf: - arch: noarch epoch: null name: python3-dnf release: 31.el9 source: rpm version: 4.14.0 python3-dnf-plugins-core: - arch: noarch epoch: null name: python3-dnf-plugins-core release: 24.el9 source: rpm version: 4.3.0 python3-enchant: - arch: noarch epoch: null name: python3-enchant release: 5.el9 source: rpm version: 3.2.0 python3-file-magic: - arch: noarch epoch: null name: python3-file-magic release: 16.el9 source: rpm version: '5.39' python3-gobject-base: - arch: x86_64 epoch: null name: python3-gobject-base release: 6.el9 source: rpm version: 3.40.1 python3-gobject-base-noarch: - arch: noarch epoch: null name: python3-gobject-base-noarch release: 6.el9 source: rpm version: 3.40.1 python3-gpg: - arch: x86_64 epoch: null name: python3-gpg release: 6.el9 source: rpm version: 1.15.1 python3-hawkey: - arch: x86_64 epoch: null name: python3-hawkey release: 16.el9 source: rpm version: 0.69.0 python3-idna: - arch: noarch epoch: null name: python3-idna release: 7.el9.1 source: rpm version: '2.10' python3-jinja2: - arch: noarch epoch: null name: python3-jinja2 release: 8.el9 source: rpm version: 2.11.3 python3-jmespath: - arch: noarch epoch: null name: python3-jmespath release: 1.el9 source: rpm version: 1.0.1 python3-jsonpatch: - arch: noarch epoch: null name: python3-jsonpatch release: 16.el9 source: rpm version: '1.21' python3-jsonpointer: - arch: noarch epoch: null name: python3-jsonpointer release: 4.el9 source: rpm version: '2.0' python3-jsonschema: - arch: noarch epoch: null name: python3-jsonschema release: 13.el9 source: rpm version: 3.2.0 python3-libcomps: - arch: x86_64 epoch: null name: python3-libcomps release: 1.el9 source: rpm version: 0.1.18 python3-libdnf: - arch: x86_64 epoch: null name: python3-libdnf release: 16.el9 source: rpm version: 0.69.0 python3-libs: - arch: x86_64 epoch: null name: python3-libs release: 2.el9 source: rpm version: 3.9.25 python3-libselinux: - arch: x86_64 epoch: null name: python3-libselinux release: 3.el9 source: rpm version: '3.6' python3-libsemanage: - arch: x86_64 epoch: null name: python3-libsemanage release: 5.el9 source: rpm version: '3.6' python3-libvirt: - arch: x86_64 epoch: null name: python3-libvirt release: 1.el9 source: rpm version: 11.9.0 python3-libxml2: - arch: x86_64 epoch: null name: python3-libxml2 release: 14.el9 source: rpm version: 2.9.13 python3-lxml: - arch: x86_64 epoch: null name: python3-lxml release: 3.el9 source: rpm version: 4.6.5 python3-markupsafe: - arch: x86_64 epoch: null name: python3-markupsafe release: 12.el9 source: rpm version: 1.1.1 python3-netaddr: - arch: noarch epoch: null name: python3-netaddr release: 3.el9 source: rpm version: 0.10.1 python3-netifaces: - arch: x86_64 epoch: null name: python3-netifaces release: 15.el9 source: rpm version: 0.10.6 python3-oauthlib: - arch: noarch epoch: null name: python3-oauthlib release: 5.el9 source: rpm version: 3.1.1 python3-packaging: - arch: noarch epoch: null name: python3-packaging release: 5.el9 source: rpm version: '20.9' python3-pexpect: - arch: noarch epoch: null name: python3-pexpect release: 7.el9 source: rpm version: 4.8.0 python3-pip: - arch: noarch epoch: null name: python3-pip release: 1.el9 source: rpm version: 21.3.1 python3-pip-wheel: - arch: noarch epoch: null name: python3-pip-wheel release: 1.el9 source: rpm version: 21.3.1 python3-ply: - arch: noarch epoch: null name: python3-ply release: 14.el9 source: rpm version: '3.11' python3-policycoreutils: - arch: noarch epoch: null name: python3-policycoreutils release: 3.el9 source: rpm version: '3.6' python3-prettytable: - arch: noarch epoch: null name: python3-prettytable release: 27.el9 source: rpm version: 0.7.2 python3-ptyprocess: - arch: noarch epoch: null name: python3-ptyprocess release: 12.el9 source: rpm version: 0.6.0 python3-pycparser: - arch: noarch epoch: null name: python3-pycparser release: 6.el9 source: rpm version: '2.20' python3-pyparsing: - arch: noarch epoch: null name: python3-pyparsing release: 9.el9 source: rpm version: 2.4.7 python3-pyrsistent: - arch: x86_64 epoch: null name: python3-pyrsistent release: 8.el9 source: rpm version: 0.17.3 python3-pyserial: - arch: noarch epoch: null name: python3-pyserial release: 12.el9 source: rpm version: '3.4' python3-pysocks: - arch: noarch epoch: null name: python3-pysocks release: 12.el9 source: rpm version: 1.7.1 python3-pytz: - arch: noarch epoch: null name: python3-pytz release: 5.el9 source: rpm version: '2021.1' python3-pyyaml: - arch: x86_64 epoch: null name: python3-pyyaml release: 6.el9 source: rpm version: 5.4.1 python3-requests: - arch: noarch epoch: null name: python3-requests release: 10.el9 source: rpm version: 2.25.1 python3-resolvelib: - arch: noarch epoch: null name: python3-resolvelib release: 5.el9 source: rpm version: 0.5.4 python3-rpm: - arch: x86_64 epoch: null name: python3-rpm release: 39.el9 source: rpm version: 4.16.1.3 python3-rpm-generators: - arch: noarch epoch: null name: python3-rpm-generators release: 9.el9 source: rpm version: '12' python3-rpm-macros: - arch: noarch epoch: null name: python3-rpm-macros release: 54.el9 source: rpm version: '3.9' python3-setools: - arch: x86_64 epoch: null name: python3-setools release: 1.el9 source: rpm version: 4.4.4 python3-setuptools: - arch: noarch epoch: null name: python3-setuptools release: 15.el9 source: rpm version: 53.0.0 python3-setuptools-wheel: - arch: noarch epoch: null name: python3-setuptools-wheel release: 15.el9 source: rpm version: 53.0.0 python3-six: - arch: noarch epoch: null name: python3-six release: 9.el9 source: rpm version: 1.15.0 python3-systemd: - arch: x86_64 epoch: null name: python3-systemd release: 19.el9 source: rpm version: '234' python3-urllib3: - arch: noarch epoch: null name: python3-urllib3 release: 6.el9 source: rpm version: 1.26.5 python3.12: - arch: x86_64 epoch: null name: python3.12 release: 1.el9 source: rpm version: 3.12.12 python3.12-libs: - arch: x86_64 epoch: null name: python3.12-libs release: 1.el9 source: rpm version: 3.12.12 python3.12-pip: - arch: noarch epoch: null name: python3.12-pip release: 5.el9 source: rpm version: 23.2.1 python3.12-pip-wheel: - arch: noarch epoch: null name: python3.12-pip-wheel release: 5.el9 source: rpm version: 23.2.1 python3.12-setuptools: - arch: noarch epoch: null name: python3.12-setuptools release: 5.el9 source: rpm version: 68.2.2 qemu-guest-agent: - arch: x86_64 epoch: 17 name: qemu-guest-agent release: 4.el9 source: rpm version: 10.1.0 qt5-srpm-macros: - arch: noarch epoch: null name: qt5-srpm-macros release: 1.el9 source: rpm version: 5.15.9 quota: - arch: x86_64 epoch: 1 name: quota release: 4.el9 source: rpm version: '4.09' quota-nls: - arch: noarch epoch: 1 name: quota-nls release: 4.el9 source: rpm version: '4.09' readline: - arch: x86_64 epoch: null name: readline release: 4.el9 source: rpm version: '8.1' readline-devel: - arch: x86_64 epoch: null name: readline-devel release: 4.el9 source: rpm version: '8.1' redhat-rpm-config: - arch: noarch epoch: null name: redhat-rpm-config release: 1.el9 source: rpm version: '210' rootfiles: - arch: noarch epoch: null name: rootfiles release: 35.el9 source: rpm version: '8.1' rpcbind: - arch: x86_64 epoch: null name: rpcbind release: 7.el9 source: rpm version: 1.2.6 rpm: - arch: x86_64 epoch: null name: rpm release: 39.el9 source: rpm version: 4.16.1.3 rpm-build: - arch: x86_64 epoch: null name: rpm-build release: 39.el9 source: rpm version: 4.16.1.3 rpm-build-libs: - arch: x86_64 epoch: null name: rpm-build-libs release: 39.el9 source: rpm version: 4.16.1.3 rpm-libs: - arch: x86_64 epoch: null name: rpm-libs release: 39.el9 source: rpm version: 4.16.1.3 rpm-plugin-audit: - arch: x86_64 epoch: null name: rpm-plugin-audit release: 39.el9 source: rpm version: 4.16.1.3 rpm-plugin-selinux: - arch: x86_64 epoch: null name: rpm-plugin-selinux release: 39.el9 source: rpm version: 4.16.1.3 rpm-plugin-systemd-inhibit: - arch: x86_64 epoch: null name: rpm-plugin-systemd-inhibit release: 39.el9 source: rpm version: 4.16.1.3 rpm-sign: - arch: x86_64 epoch: null name: rpm-sign release: 39.el9 source: rpm version: 4.16.1.3 rpm-sign-libs: - arch: x86_64 epoch: null name: rpm-sign-libs release: 39.el9 source: rpm version: 4.16.1.3 rpmlint: - arch: noarch epoch: null name: rpmlint release: 19.el9 source: rpm version: '1.11' rsync: - arch: x86_64 epoch: null name: rsync release: 4.el9 source: rpm version: 3.2.5 rsyslog: - arch: x86_64 epoch: null name: rsyslog release: 2.el9 source: rpm version: 8.2510.0 rsyslog-logrotate: - arch: x86_64 epoch: null name: rsyslog-logrotate release: 2.el9 source: rpm version: 8.2510.0 ruby: - arch: x86_64 epoch: null name: ruby release: 165.el9 source: rpm version: 3.0.7 ruby-default-gems: - arch: noarch epoch: null name: ruby-default-gems release: 165.el9 source: rpm version: 3.0.7 ruby-devel: - arch: x86_64 epoch: null name: ruby-devel release: 165.el9 source: rpm version: 3.0.7 ruby-libs: - arch: x86_64 epoch: null name: ruby-libs release: 165.el9 source: rpm version: 3.0.7 rubygem-bigdecimal: - arch: x86_64 epoch: null name: rubygem-bigdecimal release: 165.el9 source: rpm version: 3.0.0 rubygem-bundler: - arch: noarch epoch: null name: rubygem-bundler release: 165.el9 source: rpm version: 2.2.33 rubygem-io-console: - arch: x86_64 epoch: null name: rubygem-io-console release: 165.el9 source: rpm version: 0.5.7 rubygem-json: - arch: x86_64 epoch: null name: rubygem-json release: 165.el9 source: rpm version: 2.5.1 rubygem-psych: - arch: x86_64 epoch: null name: rubygem-psych release: 165.el9 source: rpm version: 3.3.2 rubygem-rdoc: - arch: noarch epoch: null name: rubygem-rdoc release: 165.el9 source: rpm version: 6.3.4.1 rubygems: - arch: noarch epoch: null name: rubygems release: 165.el9 source: rpm version: 3.2.33 rust-srpm-macros: - arch: noarch epoch: null name: rust-srpm-macros release: 4.el9 source: rpm version: '17' samba-client-libs: - arch: x86_64 epoch: 0 name: samba-client-libs release: 1.el9 source: rpm version: 4.23.3 samba-common: - arch: noarch epoch: 0 name: samba-common release: 1.el9 source: rpm version: 4.23.3 samba-common-libs: - arch: x86_64 epoch: 0 name: samba-common-libs release: 1.el9 source: rpm version: 4.23.3 sed: - arch: x86_64 epoch: null name: sed release: 9.el9 source: rpm version: '4.8' selinux-policy: - arch: noarch epoch: null name: selinux-policy release: 1.el9 source: rpm version: 38.1.68 selinux-policy-targeted: - arch: noarch epoch: null name: selinux-policy-targeted release: 1.el9 source: rpm version: 38.1.68 setroubleshoot-plugins: - arch: noarch epoch: null name: setroubleshoot-plugins release: 4.el9 source: rpm version: 3.3.14 setroubleshoot-server: - arch: x86_64 epoch: null name: setroubleshoot-server release: 2.el9 source: rpm version: 3.3.35 setup: - arch: noarch epoch: null name: setup release: 10.el9 source: rpm version: 2.13.7 sg3_utils: - arch: x86_64 epoch: null name: sg3_utils release: 10.el9 source: rpm version: '1.47' sg3_utils-libs: - arch: x86_64 epoch: null name: sg3_utils-libs release: 10.el9 source: rpm version: '1.47' shadow-utils: - arch: x86_64 epoch: 2 name: shadow-utils release: 15.el9 source: rpm version: '4.9' shadow-utils-subid: - arch: x86_64 epoch: 2 name: shadow-utils-subid release: 15.el9 source: rpm version: '4.9' shared-mime-info: - arch: x86_64 epoch: null name: shared-mime-info release: 5.el9 source: rpm version: '2.1' slang: - arch: x86_64 epoch: null name: slang release: 11.el9 source: rpm version: 2.3.2 slirp4netns: - arch: x86_64 epoch: null name: slirp4netns release: 1.el9 source: rpm version: 1.3.3 snappy: - arch: x86_64 epoch: null name: snappy release: 8.el9 source: rpm version: 1.1.8 sos: - arch: noarch epoch: null name: sos release: 1.el9 source: rpm version: 4.10.1 sqlite-libs: - arch: x86_64 epoch: null name: sqlite-libs release: 9.el9 source: rpm version: 3.34.1 squashfs-tools: - arch: x86_64 epoch: null name: squashfs-tools release: 10.git1.el9 source: rpm version: '4.4' sscg: - arch: x86_64 epoch: null name: sscg release: 2.el9 source: rpm version: 4.0.0 sshpass: - arch: x86_64 epoch: null name: sshpass release: 4.el9 source: rpm version: '1.09' sssd-client: - arch: x86_64 epoch: null name: sssd-client release: 5.el9 source: rpm version: 2.9.7 sssd-common: - arch: x86_64 epoch: null name: sssd-common release: 5.el9 source: rpm version: 2.9.7 sssd-kcm: - arch: x86_64 epoch: null name: sssd-kcm release: 5.el9 source: rpm version: 2.9.7 sssd-nfs-idmap: - arch: x86_64 epoch: null name: sssd-nfs-idmap release: 5.el9 source: rpm version: 2.9.7 sudo: - arch: x86_64 epoch: null name: sudo release: 13.el9 source: rpm version: 1.9.5p2 systemd: - arch: x86_64 epoch: null name: systemd release: 59.el9 source: rpm version: '252' systemd-devel: - arch: x86_64 epoch: null name: systemd-devel release: 59.el9 source: rpm version: '252' systemd-libs: - arch: x86_64 epoch: null name: systemd-libs release: 59.el9 source: rpm version: '252' systemd-pam: - arch: x86_64 epoch: null name: systemd-pam release: 59.el9 source: rpm version: '252' systemd-rpm-macros: - arch: noarch epoch: null name: systemd-rpm-macros release: 59.el9 source: rpm version: '252' systemd-udev: - arch: x86_64 epoch: null name: systemd-udev release: 59.el9 source: rpm version: '252' tar: - arch: x86_64 epoch: 2 name: tar release: 7.el9 source: rpm version: '1.34' tcl: - arch: x86_64 epoch: 1 name: tcl release: 7.el9 source: rpm version: 8.6.10 tcpdump: - arch: x86_64 epoch: 14 name: tcpdump release: 9.el9 source: rpm version: 4.99.0 teamd: - arch: x86_64 epoch: null name: teamd release: 16.el9 source: rpm version: '1.31' time: - arch: x86_64 epoch: null name: time release: 18.el9 source: rpm version: '1.9' tmux: - arch: x86_64 epoch: null name: tmux release: 5.el9 source: rpm version: 3.2a tpm2-tss: - arch: x86_64 epoch: null name: tpm2-tss release: 1.el9 source: rpm version: 3.2.3 traceroute: - arch: x86_64 epoch: 3 name: traceroute release: 1.el9 source: rpm version: 2.1.1 tzdata: - arch: noarch epoch: null name: tzdata release: 2.el9 source: rpm version: 2025b unzip: - arch: x86_64 epoch: null name: unzip release: 59.el9 source: rpm version: '6.0' userspace-rcu: - arch: x86_64 epoch: null name: userspace-rcu release: 6.el9 source: rpm version: 0.12.1 util-linux: - arch: x86_64 epoch: null name: util-linux release: 21.el9 source: rpm version: 2.37.4 util-linux-core: - arch: x86_64 epoch: null name: util-linux-core release: 21.el9 source: rpm version: 2.37.4 vim-minimal: - arch: x86_64 epoch: 2 name: vim-minimal release: 23.el9 source: rpm version: 8.2.2637 webkit2gtk3-jsc: - arch: x86_64 epoch: null name: webkit2gtk3-jsc release: 1.el9 source: rpm version: 2.50.1 wget: - arch: x86_64 epoch: null name: wget release: 8.el9 source: rpm version: 1.21.1 which: - arch: x86_64 epoch: null name: which release: 30.el9 source: rpm version: '2.21' xfsprogs: - arch: x86_64 epoch: null name: xfsprogs release: 7.el9 source: rpm version: 6.4.0 xz: - arch: x86_64 epoch: null name: xz release: 8.el9 source: rpm version: 5.2.5 xz-devel: - arch: x86_64 epoch: null name: xz-devel release: 8.el9 source: rpm version: 5.2.5 xz-libs: - arch: x86_64 epoch: null name: xz-libs release: 8.el9 source: rpm version: 5.2.5 yajl: - arch: x86_64 epoch: null name: yajl release: 25.el9 source: rpm version: 2.1.0 yum: - arch: noarch epoch: null name: yum release: 31.el9 source: rpm version: 4.14.0 yum-utils: - arch: noarch epoch: null name: yum-utils release: 24.el9 source: rpm version: 4.3.0 zip: - arch: x86_64 epoch: null name: zip release: 35.el9 source: rpm version: '3.0' zlib: - arch: x86_64 epoch: null name: zlib release: 41.el9 source: rpm version: 1.2.11 zlib-devel: - arch: x86_64 epoch: null name: zlib-devel release: 41.el9 source: rpm version: 1.2.11 zstd: - arch: x86_64 epoch: null name: zstd release: 1.el9 source: rpm version: 1.5.5 home/zuul/zuul-output/logs/selinux-listing.log0000644000175000017500000045242115115611540020736 0ustar zuulzuul/home/zuul/ci-framework-data: total 8 drwxr-xr-x. 10 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:05 artifacts drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:05 logs drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 24 Dec 8 17:52 tmp drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 6 Dec 8 17:53 volumes /home/zuul/ci-framework-data/artifacts: total 516 drwxrwxrwx. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 33 Dec 8 18:05 ansible_facts.2025-12-08_18-05 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 19838 Dec 8 18:02 ansible-facts.yml -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 350433 Dec 8 18:05 ansible-vars.yml drwxr-xr-x. 2 root root unconfined_u:object_r:user_home_t:s0 33 Dec 8 18:05 ci-env -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 135 Dec 8 18:02 ci_script_000_check_for_oc.sh -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 239 Dec 8 18:04 ci_script_000_copy_logs_from_crc.sh -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 213 Dec 8 17:53 ci_script_000_fetch_openshift.sh -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 659 Dec 8 18:04 ci_script_000_prepare_root_ssh.sh -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 722 Dec 8 18:02 ci_script_000_run_openstack_must_gather.sh -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 293 Dec 8 17:53 ci_script_001_login_into_openshift_internal.sh -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 159 Dec 8 18:02 hosts -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 77118 Dec 8 18:02 installed-packages.yml -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 1647 Dec 8 18:02 ip-network.txt drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 23 Dec 8 17:53 manifests drwxr-xr-x. 2 root root unconfined_u:object_r:user_home_t:s0 70 Dec 8 18:05 NetworkManager drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 120 Dec 8 18:05 parameters drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:05 repositories -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 106 Dec 8 18:02 resolv.conf drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 33 Dec 8 17:53 roles drwxr-xr-x. 2 root root unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:05 yum_repos -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 30286 Dec 8 18:05 zuul_inventory.yml /home/zuul/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 23 Dec 8 18:05 ansible_facts_cache /home/zuul/ci-framework-data/artifacts/ansible_facts.2025-12-08_18-05/ansible_facts_cache: total 60 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 57735 Dec 8 18:05 localhost /home/zuul/ci-framework-data/artifacts/ci-env: total 4 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 1226 Dec 8 18:02 networking-info.yml /home/zuul/ci-framework-data/artifacts/manifests: total 0 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 16 Dec 8 17:53 openstack /home/zuul/ci-framework-data/artifacts/manifests/openstack: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 6 Dec 8 17:53 cr /home/zuul/ci-framework-data/artifacts/manifests/openstack/cr: total 0 /home/zuul/ci-framework-data/artifacts/NetworkManager: total 8 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 331 Dec 8 18:02 ci-private-network.nmconnection -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 178 Dec 8 18:02 ens3.nmconnection /home/zuul/ci-framework-data/artifacts/parameters: total 56 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 1131 Dec 8 18:05 custom-params.yml -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 28012 Dec 8 18:05 install-yamls-params.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 288 Dec 8 17:53 openshift-login-params.yml -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 20218 Dec 8 18:05 zuul-params.yml /home/zuul/ci-framework-data/artifacts/repositories: total 32 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1658 Dec 8 17:52 delorean-antelope-testing.repo -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 5901 Dec 8 17:52 delorean.repo -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 33 Dec 8 17:52 delorean.repo.md5 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 206 Dec 8 17:52 repo-setup-centos-appstream.repo -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 196 Dec 8 17:52 repo-setup-centos-baseos.repo -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 226 Dec 8 17:52 repo-setup-centos-highavailability.repo -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 201 Dec 8 17:52 repo-setup-centos-powertools.repo /home/zuul/ci-framework-data/artifacts/roles: total 0 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 17:53 install_yamls_makes /home/zuul/ci-framework-data/artifacts/roles/install_yamls_makes: total 20 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 16384 Dec 8 18:05 tasks /home/zuul/ci-framework-data/artifacts/roles/install_yamls_makes/tasks: total 1256 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 790 Dec 8 17:53 make_all.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_ansibleee_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1090 Dec 8 17:53 make_ansibleee_kuttl_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_ansibleee_kuttl_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_ansibleee_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_ansibleee_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_ansibleee_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_ansibleee.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1234 Dec 8 17:53 make_attach_default_interface_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1114 Dec 8 17:53 make_attach_default_interface.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_barbican_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1090 Dec 8 17:53 make_barbican_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_barbican_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_barbican_deploy_validate.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_barbican_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_barbican_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_barbican_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_barbican_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 865 Dec 8 17:53 make_barbican.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_baremetal_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_baremetal_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_baremetal.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1219 Dec 8 17:53 make_bmaas_baremetal_net_nad_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1099 Dec 8 17:53 make_bmaas_baremetal_net_nad.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 949 Dec 8 17:53 make_bmaas_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1234 Dec 8 17:53 make_bmaas_crc_attach_network_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1114 Dec 8 17:53 make_bmaas_crc_attach_network.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1264 Dec 8 17:53 make_bmaas_crc_baremetal_bridge_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1144 Dec 8 17:53 make_bmaas_crc_baremetal_bridge.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1129 Dec 8 17:53 make_bmaas_generate_nodes_yaml.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1069 Dec 8 17:53 make_bmaas_metallb_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 949 Dec 8 17:53 make_bmaas_metallb.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1069 Dec 8 17:53 make_bmaas_network_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 949 Dec 8 17:53 make_bmaas_network.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1444 Dec 8 17:53 make_bmaas_route_crc_and_crc_bmaas_networks_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1324 Dec 8 17:53 make_bmaas_route_crc_and_crc_bmaas_networks.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1174 Dec 8 17:53 make_bmaas_sushy_emulator_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1129 Dec 8 17:53 make_bmaas_sushy_emulator_wait.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1054 Dec 8 17:53 make_bmaas_sushy_emulator.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1129 Dec 8 17:53 make_bmaas_virtual_bms_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1009 Dec 8 17:53 make_bmaas_virtual_bms.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 829 Dec 8 17:53 make_bmaas.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_ceph_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_ceph_help.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_ceph.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_certmanager_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_certmanager.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 949 Dec 8 17:53 make_cifmw_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 949 Dec 8 17:53 make_cifmw_prepare.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_cinder_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_cinder_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_cinder_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_cinder_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_cinder_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_cinder_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_cinder_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 835 Dec 8 17:53 make_cinder.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1294 Dec 8 17:53 make_crc_attach_default_interface_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1174 Dec 8 17:53 make_crc_attach_default_interface.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_crc_bmo_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_crc_bmo_setup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 919 Dec 8 17:53 make_crc_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 889 Dec 8 17:53 make_crc_scrub.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1225 Dec 8 17:53 make_crc_storage_cleanup_with_retries.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_crc_storage_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_crc_storage_release.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_crc_storage_with_retries.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_crc_storage.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 799 Dec 8 17:53 make_crc.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_designate_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_designate_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_designate_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_designate_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_designate_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_designate_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_designate_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_designate.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_dns_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_dns_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 895 Dec 8 17:53 make_dns_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 964 Dec 8 17:53 make_download_tools.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1039 Dec 8 17:53 make_edpm_ansible_runner.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1084 Dec 8 17:53 make_edpm_baremetal_compute.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1024 Dec 8 17:53 make_edpm_compute_bootc.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1054 Dec 8 17:53 make_edpm_compute_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1024 Dec 8 17:53 make_edpm_compute_repos.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1009 Dec 8 17:53 make_edpm_computes_bgp.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 934 Dec 8 17:53 make_edpm_compute.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1135 Dec 8 17:53 make_edpm_deploy_baremetal_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_edpm_deploy_baremetal.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_edpm_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1120 Dec 8 17:53 make_edpm_deploy_generate_keys.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1054 Dec 8 17:53 make_edpm_deploy_instance.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1180 Dec 8 17:53 make_edpm_deploy_networker_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1135 Dec 8 17:53 make_edpm_deploy_networker_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_edpm_deploy_networker.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_edpm_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_edpm_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1084 Dec 8 17:53 make_edpm_networker_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 964 Dec 8 17:53 make_edpm_networker.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_edpm_nova_discover_hosts.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1210 Dec 8 17:53 make_edpm_patch_ansible_runner_image.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_edpm_register_dns.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1135 Dec 8 17:53 make_edpm_wait_deploy_baremetal.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_edpm_wait_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_glance_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_glance_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_glance_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_glance_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_glance_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_glance_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_glance_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 835 Dec 8 17:53 make_glance.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_heat_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_heat_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_heat_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_heat_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_heat_kuttl_crc.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_heat_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 895 Dec 8 17:53 make_heat_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_heat_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_heat.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 814 Dec 8 17:53 make_help.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_horizon_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1075 Dec 8 17:53 make_horizon_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_horizon_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_horizon_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_horizon_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_horizon_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_horizon_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_horizon.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_infra_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_infra_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_infra_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 895 Dec 8 17:53 make_infra_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 820 Dec 8 17:53 make_infra.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_input_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 820 Dec 8 17:53 make_input.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 994 Dec 8 17:53 make_ipv6_lab_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1189 Dec 8 17:53 make_ipv6_lab_nat64_router_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1069 Dec 8 17:53 make_ipv6_lab_nat64_router.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1114 Dec 8 17:53 make_ipv6_lab_network_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 994 Dec 8 17:53 make_ipv6_lab_network.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1054 Dec 8 17:53 make_ipv6_lab_sno_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 934 Dec 8 17:53 make_ipv6_lab_sno.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 874 Dec 8 17:53 make_ipv6_lab.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_ironic_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_ironic_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_ironic_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_ironic_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_ironic_kuttl_crc.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_ironic_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_ironic_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_ironic_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 835 Dec 8 17:53 make_ironic.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_keystone_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1090 Dec 8 17:53 make_keystone_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_keystone_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_keystone_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_keystone_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_keystone_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_keystone_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 865 Dec 8 17:53 make_keystone.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_kuttl_common_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_kuttl_common_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_kuttl_db_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_kuttl_db_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_loki_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_loki_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_loki_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_loki.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_lvms.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_manila_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_manila_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_manila_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_manila_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_manila_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_manila_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_manila_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 835 Dec 8 17:53 make_manila.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_mariadb_chainsaw_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_mariadb_chainsaw.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_mariadb_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1075 Dec 8 17:53 make_mariadb_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_mariadb_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_mariadb_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_mariadb_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_mariadb_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_mariadb.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_memcached_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_memcached_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_memcached_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_metallb_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1075 Dec 8 17:53 make_metallb_config_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_metallb_config.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_metallb.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_namespace_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_namespace.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_netattach_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_netattach.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_netconfig_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_netconfig_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_netconfig_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_netobserv_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_netobserv_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_netobserv_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_netobserv.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1234 Dec 8 17:53 make_network_isolation_bridge_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1114 Dec 8 17:53 make_network_isolation_bridge.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_neutron_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1075 Dec 8 17:53 make_neutron_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_neutron_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_neutron_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_neutron_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_neutron_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_neutron_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_neutron.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 919 Dec 8 17:53 make_nfs_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 799 Dec 8 17:53 make_nfs.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_nmstate.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_nncp_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_nncp.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_nova_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_nova_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_nova_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_nova_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_nova_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_nova.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_octavia_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1075 Dec 8 17:53 make_octavia_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_octavia_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_octavia_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_octavia_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_octavia_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_octavia_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 850 Dec 8 17:53 make_octavia.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_openstack_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1075 Dec 8 17:53 make_openstack_crds_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_openstack_crds.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_openstack_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_openstack_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_openstack_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_openstack_init.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_openstack_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_openstack_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1090 Dec 8 17:53 make_openstack_patch_version.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_openstack_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_openstack_repo.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_openstack_update_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_openstack_wait_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_openstack_wait.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_openstack.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_operator_namespace.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_ovn_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1015 Dec 8 17:53 make_ovn_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_ovn_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 895 Dec 8 17:53 make_ovn_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_ovn_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_ovn_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 865 Dec 8 17:53 make_ovn_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 790 Dec 8 17:53 make_ovn.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_placement_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_placement_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_placement_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_placement_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_placement_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_placement_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_placement_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_placement.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_rabbitmq_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1090 Dec 8 17:53 make_rabbitmq_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_rabbitmq_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_rabbitmq_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_rabbitmq_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 865 Dec 8 17:53 make_rabbitmq.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_redis_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_redis_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_redis_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_rook_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_rook_crc_disk.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_rook_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_rook_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_rook_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_rook.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1090 Dec 8 17:53 make_set_slower_etcd_profile.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1024 Dec 8 17:53 make_standalone_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1009 Dec 8 17:53 make_standalone_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1009 Dec 8 17:53 make_standalone_revert.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1039 Dec 8 17:53 make_standalone_snapshot.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 979 Dec 8 17:53 make_standalone_sync.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 904 Dec 8 17:53 make_standalone.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_swift_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_swift_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_swift_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 925 Dec 8 17:53 make_swift_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_swift_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 910 Dec 8 17:53 make_swift_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 895 Dec 8 17:53 make_swift_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 820 Dec 8 17:53 make_swift.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1000 Dec 8 17:53 make_telemetry_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1105 Dec 8 17:53 make_telemetry_deploy_cleanup.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1060 Dec 8 17:53 make_telemetry_deploy_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 985 Dec 8 17:53 make_telemetry_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1030 Dec 8 17:53 make_telemetry_kuttl_run.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_telemetry_kuttl.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 955 Dec 8 17:53 make_telemetry_prep.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 880 Dec 8 17:53 make_telemetry.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 964 Dec 8 17:53 make_tripleo_deploy.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 970 Dec 8 17:53 make_update_services.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 940 Dec 8 17:53 make_update_system.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1045 Dec 8 17:53 make_validate_marketplace.yml -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 805 Dec 8 17:53 make_wait.yml /home/zuul/ci-framework-data/artifacts/yum_repos: total 32 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 1658 Dec 8 18:02 delorean-antelope-testing.repo -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 5901 Dec 8 18:02 delorean.repo -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 33 Dec 8 18:02 delorean.repo.md5 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 206 Dec 8 18:02 repo-setup-centos-appstream.repo -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 196 Dec 8 18:02 repo-setup-centos-baseos.repo -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 226 Dec 8 18:02 repo-setup-centos-highavailability.repo -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 201 Dec 8 18:02 repo-setup-centos-powertools.repo /home/zuul/ci-framework-data/logs: total 344 drwxrwxr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 25 Dec 8 18:05 2025-12-08_18-02 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 156180 Dec 8 18:05 ansible.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 18 Dec 8 18:02 ci_script_000_check_for_oc.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 12148 Dec 8 18:05 ci_script_000_copy_logs_from_crc.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 234 Dec 8 17:53 ci_script_000_fetch_openshift.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 152024 Dec 8 18:04 ci_script_000_prepare_root_ssh.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4388 Dec 8 18:04 ci_script_000_run_openstack_must_gather.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 17 Dec 8 17:53 ci_script_001_login_into_openshift_internal.log drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:04 crc drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:05 openstack-must-gather /home/zuul/ci-framework-data/logs/2025-12-08_18-02: total 156 -rw-rw-rw-. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 156222 Dec 8 17:54 ansible.log /home/zuul/ci-framework-data/logs/crc: total 0 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 18 Dec 8 18:04 crc-logs-artifacts /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts: total 16 drwxr-xr-x. 109 zuul zuul unconfined_u:object_r:user_home_t:s0 12288 Dec 8 18:05 pods /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods: total 12 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 37 Dec 8 18:05 cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 37 Dec 8 18:04 cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 34 Dec 8 18:05 cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 35 Dec 8 18:04 cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc drwxr-xr-x. 6 zuul zuul unconfined_u:object_r:user_home_t:s0 108 Dec 8 18:05 hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 105 Dec 8 18:05 openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 42 Dec 8 18:04 openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 29 Dec 8 18:04 openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 37 Dec 8 18:05 openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 64 Dec 8 18:05 openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 152 Dec 8 18:05 openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 38 Dec 8 18:04 openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 60 Dec 8 18:05 openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 21 Dec 8 18:05 openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 29 Dec 8 18:05 openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:05 openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 51 Dec 8 18:05 openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 40 Dec 8 18:05 openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 31 Dec 8 18:05 openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 49 Dec 8 18:04 openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183 drwxr-xr-x. 10 zuul zuul unconfined_u:object_r:user_home_t:s0 156 Dec 8 18:05 openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 27 Dec 8 18:05 openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:05 openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 22 Dec 8 18:04 openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 21 Dec 8 18:04 openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 38 Dec 8 18:05 openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 53 Dec 8 18:05 openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 20 Dec 8 18:04 openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 23 Dec 8 18:04 openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c drwxr-xr-x. 8 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:04 openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 37 Dec 8 18:04 openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 20 Dec 8 18:04 openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 20 Dec 8 18:04 openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab drwxr-xr-x. 6 zuul zuul unconfined_u:object_r:user_home_t:s0 164 Dec 8 18:05 openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 46 Dec 8 18:04 openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f drwxr-xr-x. 6 zuul zuul unconfined_u:object_r:user_home_t:s0 130 Dec 8 18:05 openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 47 Dec 8 18:05 openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 20 Dec 8 18:05 openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 50 Dec 8 18:04 openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 52 Dec 8 18:05 openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 48 Dec 8 18:04 openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 57 Dec 8 18:04 openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 47 Dec 8 18:04 openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 62 Dec 8 18:05 openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 58 Dec 8 18:04 openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 60 Dec 8 18:04 openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 35 Dec 8 18:04 openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:05 openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:05 openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:05 openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:04 openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 77 Dec 8 18:04 openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 77 Dec 8 18:04 openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 77 Dec 8 18:04 openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 34 Dec 8 18:05 openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 77 Dec 8 18:04 openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 77 Dec 8 18:04 openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9 drwxr-xr-x. 9 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:04 openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 64 Dec 8 18:04 openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 25 Dec 8 18:04 openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 59 Dec 8 18:04 openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 39 Dec 8 18:04 openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 29 Dec 8 18:04 openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 44 Dec 8 18:04 openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 37 Dec 8 18:05 openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:05 openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:05 openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 58 Dec 8 18:04 openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:04 openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:04 openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:04 openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 30 Dec 8 18:04 openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 26 Dec 8 18:05 openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 27 Dec 8 18:04 openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 59 Dec 8 18:05 openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 33 Dec 8 18:05 openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 51 Dec 8 18:04 openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 51 Dec 8 18:04 openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 22 Dec 8 18:04 openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 29 Dec 8 18:04 openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 60 Dec 8 18:05 openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f drwxr-xr-x. 11 zuul zuul unconfined_u:object_r:user_home_t:s0 4096 Dec 8 18:05 openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 38 Dec 8 18:04 openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 33 Dec 8 18:04 openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 35 Dec 8 18:04 openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:04 service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440 drwxr-xr-x. 6 zuul zuul unconfined_u:object_r:user_home_t:s0 96 Dec 8 18:04 service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 18 Dec 8 18:04 service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 35 Dec 8 18:04 service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 54 Dec 8 18:04 service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 35 Dec 8 18:04 service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 54 Dec 8 18:05 service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 54 Dec 8 18:04 service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 34 Dec 8 18:05 service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 37 Dec 8 18:04 service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 21 Dec 8 18:05 service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 99 Dec 8 18:04 service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98 drwxr-xr-x. 5 zuul zuul unconfined_u:object_r:user_home_t:s0 45 Dec 8 18:04 service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 29 Dec 8 18:05 service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 35 Dec 8 18:04 service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60 drwxr-xr-x. 6 zuul zuul unconfined_u:object_r:user_home_t:s0 94 Dec 8 18:05 service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 17 Dec 8 18:04 service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 22 Dec 8 18:04 service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4 drwxr-xr-x. 3 zuul zuul unconfined_u:object_r:user_home_t:s0 22 Dec 8 18:05 service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800 drwxr-xr-x. 4 zuul zuul unconfined_u:object_r:user_home_t:s0 60 Dec 8 18:04 service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cert-manager-controller /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-858d87f86b-7q2ss_dfea6e7f-7e23-4b85-a7f2-a56ba93e1a62/cert-manager-controller: total 36 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 34645 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cert-manager-cainjector /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-cainjector-7dbf76d5c8-fdk5q_57678783-1dc9-4366-a2e6-7f8c6321e40f/cert-manager-cainjector: total 16 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 14776 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cert-manager-webhook /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager_cert-manager-webhook-7894b5b9b4-wdn4b_72f27276-bf08-481d-ad0b-11f8e684d170/cert-manager-webhook: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4017 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cert-manager-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/cert-manager-operator_cert-manager-operator-controller-manager-64c74584c4-qtkx9_4356ed35-799c-4e39-a660-872291edf6cc/cert-manager-operator: total 64 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 65200 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 csi-provisioner drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 hostpath-provisioner drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 liveness-probe drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 node-driver-registrar /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/csi-provisioner: total 120 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 122396 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/hostpath-provisioner: total 56 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 54174 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/liveness-probe: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 396 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/hostpath-provisioner_csi-hostpathplugin-qrls7_b81b63fd-c7d6-4446-ab93-c62912586002/node-driver-registrar: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1504 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 fix-audit-permissions drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 openshift-apiserver drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 openshift-apiserver-check-endpoints /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/fix-audit-permissions: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver: total 112 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 111677 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver_apiserver-9ddfb9f55-8h8fl_695dd41c-159e-4e22-98e5-e27fdf4296fd/openshift-apiserver-check-endpoints: total 32 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 29935 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 openshift-apiserver-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-apiserver-operator_openshift-apiserver-operator-846cbfc458-q6lj7_837f85a8-fff5-46a0-b1d5-2d51271f415a/openshift-apiserver-operator: total 96 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 94462 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-openshift /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication_oauth-openshift-57ffdf54dd-5dg99_0c242c34-d446-4428-b8d7-0b8dbf4137c9/oauth-openshift: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 19072 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 authentication-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-authentication-operator_authentication-operator-7f5c659b84-5scww_4c48eb41-252c-441b-9506-329d9f6b0371/authentication-operator: total 440 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 446922 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 machine-approver-controller /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/kube-rbac-proxy: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 8366 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-machine-approver_machine-approver-54c688565-487qx_92b6ea75-6b68-454a-855f-958a2bf6150b/machine-approver-controller: total 64 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 65419 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 05562bed0a58785cbffd80e5e63ed8943b1bccf2f61dbd7cf94aec4efa9e38cf.log drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cluster-samples-operator drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cluster-samples-operator-watch /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator: total 84 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 82256 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-samples-operator_cluster-samples-operator-6b564684c8-2cnx5_f22fa87e-79cb-498c-a2ab-166d47fd70a5/cluster-samples-operator-watch: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 664 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cluster-version-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-cluster-version_cluster-version-operator-7c9b9cfd6-sft9f_712f1b2c-7912-41b1-8c4e-737a0163088b/cluster-version-operator: total 2788 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 2852940 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 openshift-api drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 openshift-config-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-api: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-config-operator_openshift-config-operator-5777786469-v69x6_ceb6ea27-6be6-4eb2-8f56-d8ddfa3f0b0b/openshift-config-operator: total 36 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 34179 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 console /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_console-64d44f6ddf-dhfvx_a272b1fd-864b-4107-a4fd-6f6ab82a1d34/console: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2901 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 download-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console_downloads-747b44746d-x7wvx_39c08b26-3404-4ffd-a53a-c86f0c654db7/download-server: total 28 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 27685 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 console-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-console-operator_console-operator-67c89758df-79mps_2e8b3e0b-d963-4522-9a08-71aee0979479/console-operator: total 96 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 97019 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 controller-manager /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager_controller-manager-5cb6f9d449-mjxkv_bb242c6c-f6d4-4c20-b143-aaf339af083f/controller-manager: total 68 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 66942 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 openshift-controller-manager-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-controller-manager-operator_openshift-controller-manager-operator-686468bdd5-m5ltz_0f90a7a2-721d-4929-a4fa-fd1d2019b4cd/openshift-controller-manager-operator: total 208 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 210115 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 dns drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/dns: total 36 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 34141 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_dns-default-c5tbq_1125cbf4-59e9-464e-8305-d2fc133ae675/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1040 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 dns-node-resolver /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns_node-resolver-vk6p6_b10e1655-f317-439b-8188-cbfbebc4d756/dns-node-resolver: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 96 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 dns-operator drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/dns-operator: total 16 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 14141 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-dns-operator_dns-operator-799b87ffcd-9b988_6be72eaf-a179-4e2b-a12d-4b5dbb213183/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1040 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcdctl drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd-ensure-env-vars drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd-metrics drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd-readyz drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd-resources-copy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd-rev drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 setup /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd: total 28 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 28478 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcdctl: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-ensure-env-vars: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-metrics: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 17964 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-readyz: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 240 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-resources-copy: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/etcd-rev: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 124 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd_etcd-crc_20c5c5b4bed930554494851fe3cb2b2a/setup: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 156 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 etcd-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-etcd-operator_etcd-operator-69b85846b6-k26tc_1bd2df11-789d-4a3f-a7c4-2d6afbe38d0f/etcd-operator: total 124 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 124861 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cluster-image-registry-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_cluster-image-registry-operator-86c45576b9-rwgjl_1cd09f9c-6a6f-438a-a982-082edc35a55c/cluster-image-registry-operator: total 40 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 39150 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 registry /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_image-registry-5d9d95bf5b-cmjbz_82c8be84-d9b0-44df-99be-57f994255a0b/registry: total 60 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 59214 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 node-ca /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-image-registry_node-ca-pvtml_5a1c0a2f-d8ef-48d5-90d0-9d8fb12e8a00/node-ca: total 16 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 12768 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 serve-healthcheck-canary /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-canary_ingress-canary-psjrr_c32d3580-29a1-4299-8926-e4c9caa4ff86/serve-healthcheck-canary: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1730 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 ingress-operator drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/ingress-operator: total 48 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 48978 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress-operator_ingress-operator-6b9cb4dbcf-2pwhz_0157c9d2-3779-46c8-9da9-1fffa52986a6/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1040 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 router /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ingress_router-default-68cf44c8b8-rscz2_fe85cb02-2d21-4fc3-92c1-6d060a006011/router: total 32 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 31648 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 installer /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_installer-12-crc_158725bd-7556-4281-a3cb-acaa6baf5d8c/installer: total 64 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 62917 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-apiserver drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-apiserver-cert-regeneration-controller drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-apiserver-cert-syncer drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-apiserver-check-endpoints drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-apiserver-insecure-readyz drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 setup /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver: total 592 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 603517 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-regeneration-controller: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 19128 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-cert-syncer: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 7058 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-check-endpoints: total 24 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 22437 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/kube-apiserver-insecure-readyz: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 116 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_kube-apiserver-crc_57755cc5f99000cc11e193051474d4e2/setup: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 265 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-apiserver-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver-operator_kube-apiserver-operator-575994946d-bhk9x_28b33fd8-46b7-46e9-bef9-ec6b3f035300/kube-apiserver-operator: total 268 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 274421 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pruner /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-11-crc_46f67036-aba9-49da-a298-d68e56b91e00/pruner: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1902 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pruner /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-apiserver_revision-pruner-12-crc_1087bc4c-df19-4954-92b2-e9bfc266fdab/pruner: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1959 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cluster-policy-controller drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 kube-controller-manager drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-controller-manager-cert-syncer drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-controller-manager-recovery-controller /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/cluster-policy-controller: total 248 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 250745 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager: total 932 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 78176 Dec 8 18:05 1.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 870765 Dec 8 18:05 2.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-cert-syncer: total 16 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 12398 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager-recovery-controller: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 18781 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-controller-manager-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-69d5f845f8-6lgwk_163e109f-c588-4057-a961-86bcca55948f/kube-controller-manager-operator: total 176 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 177802 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-scheduler drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-scheduler-cert-syncer drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-scheduler-recovery-controller drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 wait-for-host-port /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler: total 164 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 166956 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-cert-syncer: total 12 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 8668 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/kube-scheduler-recovery-controller: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 10048 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_openshift-kube-scheduler-crc_0b638b8f4bb0070e40528db779baf6a2/wait-for-host-port: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 85 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-scheduler-operator-container /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler-operator_openshift-kube-scheduler-operator-54f497555d-gvb6q_a52d6e07-c08e-4424-8a3f-50052c311604/kube-scheduler-operator-container: total 144 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 145342 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pruner /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-scheduler_revision-pruner-6-crc_c683e0b8-bb8e-4012-80e0-a07cbd5b9cf6/pruner: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1900 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 graceful-termination drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 migrator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/graceful-termination: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 72 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator_migrator-866fcbc849-5pp5q_82728066-0204-4d71-acff-8779194a3e3c/migrator: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1875 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-storage-version-migrator-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-kube-storage-version-migrator-operator_kube-storage-version-migrator-operator-565b79b866-6gkgz_dbad8204-9790-4f15-a74c-0149d19a4785/kube-storage-version-migrator-operator: total 48 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 46202 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 control-plane-machine-set-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-dhfht_0b3a0959-d09e-4fd8-b931-d85bb42a3896/control-plane-machine-set-operator: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 18428 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 machine-api-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/kube-rbac-proxy: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 8366 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-api_machine-api-operator-755bb95488-5httz_1a749ad3-837c-4804-b23c-2abb017b5b82/machine-api-operator: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 18279 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy-crio drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 setup /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/kube-rbac-proxy-crio: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1376 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_kube-rbac-proxy-crio-crc_4e08c320b1e9e2405e6e0107bdf7eeb4/setup: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 101 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 machine-config-controller /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1212 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-controller-f9cdd68f7-p88k2_78316998-7ca1-4495-997b-bad16252fa84/machine-config-controller: total 44 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 42743 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 machine-config-daemon /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1212 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-daemon-8vxnt_cee6a3dc-47d4-4996-9c78-cb6c6b626d71/machine-config-daemon: total 128 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 61357 Dec 8 18:04 4.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 67695 Dec 8 18:04 5.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 machine-config-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1212 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-operator-67c9d58cbb-4g75z_2ecc2ce3-fe03-4f16-9dfd-4a8b1b2b224f/machine-config-operator: total 544 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 555391 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 machine-config-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-machine-config-operator_machine-config-server-psb45_d549986a-81c9-4cd0-86b0-61e4b6700ddf/machine-config-server: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1040 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pull drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 util /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/extract: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2146 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/pull: total 72 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71478 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a4jmgj_0b5d1008-e7ed-481b-85c2-5f359d8eda2d/util: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pull drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 util /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/extract: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 5285 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/pull: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2578 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210hkpv5_8d941e2a-672c-4bb7-b8fc-314ecbcf7781/util: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pull drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 util /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/extract: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 630 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/pull: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2243 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8f64tgj_c70d8b4a-afd5-4ece-bd7f-9caf1f100d65/util: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pull drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 util /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/extract: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2197 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/pull: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1956 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5etdr5f_4d041d5b-762b-4616-bc8a-d21727bd0547/util: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-content drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-utilities drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 registry-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-content: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/extract-utilities: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-58d6l_af364a45-2b54-442a-b71a-4032d578bc89/registry-server: total 2516 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2575524 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 6 Dec 8 18:04 extract-content drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 6 Dec 8 18:04 extract-utilities drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 6 Dec 8 18:04 registry-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-content: total 0 /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/extract-utilities: total 0 /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_certified-operators-p8pz8_a2de420a-ccef-431d-8597-193d09e4fa4f/registry-server: total 0 /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-content drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-utilities drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 registry-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-content: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/extract-utilities: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_community-operators-zdvxg_a52a5ff3-1e70-4b19-b013-95206cae40fc/registry-server: total 4196 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4293693 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 marketplace-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_marketplace-operator-547dbd544d-6bbtn_c3f09b88-c9bd-4d0b-9a10-2b2b5f2ea5b1/marketplace-operator: total 28 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 25844 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-content drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-utilities drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 registry-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-content: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/extract-utilities: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-5gtms_a92e71c9-7ef3-42ef-a103-b8cb38fd3ee5/registry-server: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 675 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-content drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract-utilities drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 registry-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-content: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/extract-utilities: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-marketplace_redhat-operators-xpnf9_259174f2-efbe-4b44-ae95-b0d2f2865ab9/registry-server: total 3192 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 3267152 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 bond-cni-plugin drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 cni-plugins drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 egress-router-binary-copy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-multus-additional-cni-plugins drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 routeoverride-cni drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 whereabouts-cni drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 whereabouts-cni-bincopy /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/bond-cni-plugin: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 392 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/cni-plugins: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 404 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/egress-router-binary-copy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 414 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/kube-multus-additional-cni-plugins: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/routeoverride-cni: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 411 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 80 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-additional-cni-plugins-lq9nf_ca4a524a-a1cb-4e10-8765-aa38225d2de3/whereabouts-cni-bincopy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 408 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 multus-admission-controller /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1040 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-admission-controller-69db94689b-v9sxk_f5c1e280-e9c9-4a30-bb13-023852fd940b/multus-admission-controller: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1276 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 kube-multus /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_multus-dlvbf_a091751f-234c-43ee-8324-ebb98bb3ec36/kube-multus: total 404 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 239353 Dec 8 18:04 0.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 168003 Dec 8 18:04 1.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 network-metrics-daemon /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1040 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-multus_network-metrics-daemon-54w78_e666ddb1-3625-4468-9d05-21215b5041c1/network-metrics-daemon: total 44 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 41229 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 copy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 gather /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/copy: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-must-gather-gctth_must-gather-5cz8j_736c26bc-8908-4abc-89f5-7f1d201b7e1a/gather: total 36 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 33468 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 networking-console-plugin /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-console_networking-console-plugin-5ff7774fd9-nljh6_6a9ae5f6-97bd-46ac-bafa-ca1b4452a141/networking-console-plugin: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 779 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 check-endpoints /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-source-5bb8f5cd97-xdvz5_f863fff9-286a-45fa-b8f0-8a86994b8440/check-endpoints: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 5845 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 network-check-target-container /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-diagnostics_network-check-target-fhkjl_17b87002-b798-480a-8e17-83053d698239/network-check-target-container: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 61 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 approver drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 webhook /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/approver: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 12165 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-node-identity_network-node-identity-dgvkt_fc4541ce-7789-4670-bc75-5c2868e52ce0/webhook: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 3698 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 iptables-alerter /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_iptables-alerter-5jnd7_428b39f5-eb1c-4f65-b7a4-eeb6e84860cc/iptables-alerter: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 120 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 network-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-network-operator_network-operator-7bdcf4f5bd-7fjxv_34177974-8d82-49d2-a763-391d0df3bbd8/network-operator: total 772 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 789449 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 fix-audit-permissions drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-apiserver /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/fix-audit-permissions: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-oauth-apiserver_apiserver-8596bd845d-rdv9c_3a9ac21c-f3fb-42c7-a5ce-096d015b8d3c/oauth-apiserver: total 56 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 56628 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 catalog-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_catalog-operator-75ff9f647d-bl822_9a815eca-9800-4b68-adc1-5953173f4427/catalog-operator: total 896 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 914136 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 collect-profiles /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420250-qhrfp_742843af-c521-4d4a-beea-e6feae8140e1/collect-profiles: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 273 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 collect-profiles /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420265-vsxwc_3ec0e45e-87cc-4b67-b137-ac7179bf7d74/collect-profiles: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 736 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 collect-profiles /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_collect-profiles-29420280-hxvtb_730f299b-bb80-45b1-a8bc-a10ce2e3567b/collect-profiles: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 736 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 olm-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_olm-operator-5cdf44d969-ggh59_c987ac4d-5129-45aa-afe4-ab42b6907462/olm-operator: total 784 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 799531 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 packageserver /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_packageserver-7d4fc7d867-4kjg6_085a3a20-9b8f-4448-a4cb-89465f57027c/packageserver: total 64 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 62849 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 package-server-manager /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1054 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operator-lifecycle-manager_package-server-manager-77f986bd66-d8qsj_9148080a-77e2-4847-840a-d67f837c8fbe/package-server-manager: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 7997 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 prometheus-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-86648f486b-4j9kn_abff26d8-ffb7-4ac9-b7ac-2eb4e66847fd/prometheus-operator: total 36 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 35710 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 prometheus-operator-admission-webhook /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9dkcm_b0b7331f-5f3a-41e7-84d0-64a9aa478c60/prometheus-operator-admission-webhook: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1061 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 prometheus-operator-admission-webhook /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5b9dc645c4-9pj5t_174b7c35-bd90-4386-a01d-b20d986df7e5/prometheus-operator-admission-webhook: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1061 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_observability-operator-78c97476f4-mg4b2_a7981d87-d276-41a7-ad7c-d6f0cde8fa7d/operator: total 24 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 22905 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 perses-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-operators_perses-operator-68bdb49cbf-m2cdr_eae302b5-bcca-41b8-9f24-34be44dd7f83/perses-operator: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 3422 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 ovnkube-cluster-manager /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/kube-rbac-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1183 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-control-plane-97c9b6c48-lfp2m_8105d3ef-5e53-4418-9d0c-12f9b6ffa67f/ovnkube-cluster-manager: total 40 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 37548 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kubecfg-setup drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy-node drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 kube-rbac-proxy-ovn-metrics drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 nbdb drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 northd drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 ovn-acl-logging drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 ovn-controller drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 ovnkube-controller drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 sbdb /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kubecfg-setup: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-node: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4680 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/kube-rbac-proxy-ovn-metrics: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4640 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/nbdb: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2425 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/northd: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4519 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-acl-logging: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 20301 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovn-controller: total 48 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 48930 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/ovnkube-controller: total 2088 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2135874 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-ovn-kubernetes_ovnkube-node-gpg4k_abafac28-99fe-42a6-bee9-f3fb197b1bc2/sbdb: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2357 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 route-controller-manager /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-route-controller-manager_route-controller-manager-7dd6d6d8c8-wfznc_0b1ea033-2c13-4941-a658-0129d8822fb2/route-controller-manager: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 18301 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 service-ca-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca-operator_service-ca-operator-5b9c976747-cdz4v_2554c491-6bfb-47fd-9b76-c1da12e702b1/service-ca-operator: total 48 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 48877 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 service-ca-controller /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/openshift-service-ca_service-ca-74545575db-d69qv_ada44265-dcab-408c-843e-e5c5a45aa138/service-ca-controller: total 48 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 45770 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pull drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 util /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/extract: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 614 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/pull: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1956 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_36ffb4ab4bfe83a910ab52ec1870308fea799225a9f1157962b08e8113ms9qq_8dfcd1bd-ac9d-4eba-b160-b7f4335fb440/util: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 alertmanager drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 config-reloader drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 init-config-reloader drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-proxy /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/alertmanager: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1709 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/config-reloader: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2246 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/init-config-reloader: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 704 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_alertmanager-default-0_81e17e77-b0f9-4df6-8c85-e06d1fd7a46a/oauth-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1487 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 curl /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_curl_f1d063fa-3d6b-49c3-aa66-288dd70351b0/curl: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 4996 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 bridge drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 sg-core /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/bridge: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 596 Dec 8 18:04 1.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1042 Dec 8 18:04 2.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-event-smartgateway-65cf5f4bb8-q2xqk_35c3d7e4-3ad4-4184-a22e-86654ad7867b/sg-core: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 682 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 bridge drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 sg-core /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/bridge: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 595 Dec 8 18:04 1.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1043 Dec 8 18:04 2.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/oauth-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1481 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-ceil-meter-smartgateway-545b564d9f-kf59v_ef58ecee-c967-4d4f-946b-8c8123a73084/sg-core: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1344 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 bridge drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 sg-core /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/bridge: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 596 Dec 8 18:04 1.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1046 Dec 8 18:04 2.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-event-smartgateway-d956b4648-jwkwn_8ecda967-3335-4158-839b-9b4048b8f049/sg-core: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 682 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 bridge drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 sg-core /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/bridge: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 596 Dec 8 18:05 1.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1077 Dec 8 18:05 2.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/oauth-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1481 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-coll-meter-smartgateway-787645d794-4zrzx_0e2a1994-199f-4b38-903b-cba9061dfcad/sg-core: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1340 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 32 Dec 8 18:05 bridge drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 sg-core /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/bridge: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 596 Dec 8 18:04 1.log -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1042 Dec 8 18:04 2.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/oauth-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1481 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-cloud1-sens-meter-smartgateway-66d5b7c5fc-gh2mp_f486b0de-c62f-46a2-8649-dca61a92506c/sg-core: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1344 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 default-interconnect /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-interconnect-55bf8d5cb-rwr2k_d839602b-f183-45c8-af76-72a0d292aa33/default-interconnect: total 16 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 12680 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 prometheus-webhook-snmp /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_default-snmp-webhook-6774d8dfbc-75fxn_37bee34a-f42e-4493-85f3-7f5e5cbd7301/prometheus-webhook-snmp: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 3898 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 manager /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elastic-operator-c9c86658-4qchz_1899106f-2682-474e-ad41-4dd00dbc7d4b/manager: total 152 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 152670 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 elastic-internal-init-filesystem drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 elastic-internal-suspend drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 elasticsearch /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-init-filesystem: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 10531 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elastic-internal-suspend: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_elasticsearch-es-default-0_72b61c1d-040f-465f-bea8-e024f5879f98/elasticsearch: total 56 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 53711 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 extract drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 pull drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 util /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/extract: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 609 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/pull: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1955 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_f308c3282bd783e18badba37dad473f984d0c04be601135745fecb7682f55kx_f97402a7-57a3-4f4a-af9f-478d646d2cbc/util: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 71 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 registry-server /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_infrawatch-operators-tv99j_020b4835-c362-478d-b714-bb42757ae9e2/registry-server: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 647 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 interconnect-operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_interconnect-operator-78b9bd8798-456sz_871b0dde-aad5-4e54-bd14-1c4bc8779b60/interconnect-operator: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 10693 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 config-reloader drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 init-config-reloader drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 oauth-proxy drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 prometheus /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/config-reloader: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 2383 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/init-config-reloader: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 703 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/oauth-proxy: total 4 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 1483 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_prometheus-default-0_3d62a6f6-b57c-48e0-9279-d8dadd01a921/prometheus: total 8 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 6397 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 qdr /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_qdr-test_73a290f7-fdfb-4484-9e5f-e3f80b72dec3/qdr: total 20 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 16483 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_service-telemetry-operator-79647f8775-zs8hl_b4cd1da4-b555-42d4-b09a-38f141ee7dc4/operator: total 620 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 634697 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 operator /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_smart-gateway-operator-5cd794ff55-w8r45_88186169-23e9-44fb-a70c-0f6fe06b2800/operator: total 7160 -rw-r--r--. 1 root root unconfined_u:object_r:user_home_t:s0 7331514 Dec 8 18:05 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece: total 0 drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 smoketest-ceilometer drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 19 Dec 8 18:05 smoketest-collectd /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-ceilometer: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 10988 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/crc/crc-logs-artifacts/pods/service-telemetry_stf-smoketest-smoke1-pbhxq_612790c4-c2da-4318-89f8-c7745da26ece/smoketest-collectd: total 48 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 45845 Dec 8 18:04 0.log /home/zuul/ci-framework-data/logs/openstack-must-gather: total 12 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 3336 Dec 8 18:04 event-filter.html lrwxrwxrwx. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 125 Dec 8 18:04 latest -> quay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ec4ab7e822ef52dfc2fef000d2c -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 3904 Dec 8 18:04 must-gather.logs drwxr-xr-x. 2 zuul zuul unconfined_u:object_r:user_home_t:s0 25 Dec 8 18:05 quay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ec4ab7e822ef52dfc2fef000d2c -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 112 Dec 8 18:04 timestamp /home/zuul/ci-framework-data/logs/openstack-must-gather/quay-io-openstack-k8s-operators-openstack-must-gather-sha256-854a802357b4f565a366fce3bf29b20c1b768ec4ab7e822ef52dfc2fef000d2c: total 0 -rw-r--r--. 1 zuul zuul unconfined_u:object_r:user_home_t:s0 0 Dec 8 18:04 gather.logs /home/zuul/ci-framework-data/tmp: total 0 /home/zuul/ci-framework-data/volumes: total 0 home/zuul/zuul-output/logs/README.html0000644000175000017500000000306615115611541016716 0ustar zuulzuul README for CIFMW Logs

Logs of interest

Generated content of interest

home/zuul/zuul-output/logs/installed-pkgs.log0000644000175000017500000004723515115611542020526 0ustar zuulzuulaardvark-dns-1.17.0-1.el9.x86_64 abattis-cantarell-fonts-0.301-4.el9.noarch acl-2.3.1-4.el9.x86_64 adobe-source-code-pro-fonts-2.030.1.050-12.el9.1.noarch alternatives-1.24-2.el9.x86_64 annobin-12.98-1.el9.x86_64 ansible-core-2.14.18-2.el9.x86_64 attr-2.5.1-3.el9.x86_64 audit-3.1.5-7.el9.x86_64 audit-libs-3.1.5-7.el9.x86_64 authselect-1.2.6-3.el9.x86_64 authselect-compat-1.2.6-3.el9.x86_64 authselect-libs-1.2.6-3.el9.x86_64 avahi-libs-0.8-23.el9.x86_64 basesystem-11-13.el9.noarch bash-5.1.8-9.el9.x86_64 bash-completion-2.11-5.el9.noarch binutils-2.35.2-68.el9.x86_64 binutils-gold-2.35.2-68.el9.x86_64 buildah-1.41.3-1.el9.x86_64 bzip2-1.0.8-10.el9.x86_64 bzip2-libs-1.0.8-10.el9.x86_64 ca-certificates-2025.2.80_v9.0.305-91.el9.noarch c-ares-1.19.1-2.el9.x86_64 centos-gpg-keys-9.0-30.el9.noarch centos-logos-90.8-3.el9.x86_64 centos-stream-release-9.0-30.el9.noarch centos-stream-repos-9.0-30.el9.noarch checkpolicy-3.6-1.el9.x86_64 chrony-4.8-1.el9.x86_64 cloud-init-24.4-7.el9.noarch cloud-utils-growpart-0.33-1.el9.x86_64 cmake-filesystem-3.26.5-2.el9.x86_64 cockpit-bridge-348-1.el9.noarch cockpit-system-348-1.el9.noarch cockpit-ws-348-1.el9.x86_64 cockpit-ws-selinux-348-1.el9.x86_64 conmon-2.1.13-1.el9.x86_64 containers-common-1-134.el9.x86_64 containers-common-extra-1-134.el9.x86_64 container-selinux-2.242.0-1.el9.noarch coreutils-8.32-39.el9.x86_64 coreutils-common-8.32-39.el9.x86_64 cpio-2.13-16.el9.x86_64 cpp-11.5.0-14.el9.x86_64 cracklib-2.9.6-27.el9.x86_64 cracklib-dicts-2.9.6-27.el9.x86_64 createrepo_c-0.20.1-4.el9.x86_64 createrepo_c-libs-0.20.1-4.el9.x86_64 criu-3.19-3.el9.x86_64 criu-libs-3.19-3.el9.x86_64 cronie-1.5.7-14.el9.x86_64 cronie-anacron-1.5.7-14.el9.x86_64 crontabs-1.11-26.20190603git.el9.noarch crun-1.24-1.el9.x86_64 crypto-policies-20251126-1.gite9c4db2.el9.noarch crypto-policies-scripts-20251126-1.gite9c4db2.el9.noarch cryptsetup-libs-2.8.1-2.el9.x86_64 curl-7.76.1-34.el9.x86_64 cyrus-sasl-2.1.27-21.el9.x86_64 cyrus-sasl-devel-2.1.27-21.el9.x86_64 cyrus-sasl-gssapi-2.1.27-21.el9.x86_64 cyrus-sasl-lib-2.1.27-21.el9.x86_64 dbus-1.12.20-8.el9.x86_64 dbus-broker-28-7.el9.x86_64 dbus-common-1.12.20-8.el9.noarch dbus-libs-1.12.20-8.el9.x86_64 dbus-tools-1.12.20-8.el9.x86_64 debugedit-5.0-11.el9.x86_64 dejavu-sans-fonts-2.37-18.el9.noarch desktop-file-utils-0.26-6.el9.x86_64 device-mapper-1.02.206-2.el9.x86_64 device-mapper-libs-1.02.206-2.el9.x86_64 dhcp-client-4.4.2-19.b1.el9.x86_64 dhcp-common-4.4.2-19.b1.el9.noarch diffutils-3.7-12.el9.x86_64 dnf-4.14.0-31.el9.noarch dnf-data-4.14.0-31.el9.noarch dnf-plugins-core-4.3.0-24.el9.noarch dracut-057-102.git20250818.el9.x86_64 dracut-config-generic-057-102.git20250818.el9.x86_64 dracut-network-057-102.git20250818.el9.x86_64 dracut-squash-057-102.git20250818.el9.x86_64 dwz-0.16-1.el9.x86_64 e2fsprogs-1.46.5-8.el9.x86_64 e2fsprogs-libs-1.46.5-8.el9.x86_64 ed-1.14.2-12.el9.x86_64 efi-srpm-macros-6-4.el9.noarch elfutils-0.194-1.el9.x86_64 elfutils-debuginfod-client-0.194-1.el9.x86_64 elfutils-default-yama-scope-0.194-1.el9.noarch elfutils-libelf-0.194-1.el9.x86_64 elfutils-libs-0.194-1.el9.x86_64 emacs-filesystem-27.2-18.el9.noarch enchant-1.6.0-30.el9.x86_64 ethtool-6.15-2.el9.x86_64 expat-2.5.0-5.el9.x86_64 expect-5.45.4-16.el9.x86_64 file-5.39-16.el9.x86_64 file-libs-5.39-16.el9.x86_64 filesystem-3.16-5.el9.x86_64 findutils-4.8.0-7.el9.x86_64 fonts-filesystem-2.0.5-7.el9.1.noarch fonts-srpm-macros-2.0.5-7.el9.1.noarch fuse3-3.10.2-9.el9.x86_64 fuse3-libs-3.10.2-9.el9.x86_64 fuse-common-3.10.2-9.el9.x86_64 fuse-libs-2.9.9-17.el9.x86_64 fuse-overlayfs-1.16-1.el9.x86_64 gawk-5.1.0-6.el9.x86_64 gawk-all-langpacks-5.1.0-6.el9.x86_64 gcc-11.5.0-14.el9.x86_64 gcc-c++-11.5.0-14.el9.x86_64 gcc-plugin-annobin-11.5.0-14.el9.x86_64 gdb-minimal-16.3-2.el9.x86_64 gdbm-libs-1.23-1.el9.x86_64 gdisk-1.0.7-5.el9.x86_64 gdk-pixbuf2-2.42.6-6.el9.x86_64 geolite2-city-20191217-6.el9.noarch geolite2-country-20191217-6.el9.noarch gettext-0.21-8.el9.x86_64 gettext-libs-0.21-8.el9.x86_64 ghc-srpm-macros-1.5.0-6.el9.noarch git-2.47.3-1.el9.x86_64 git-core-2.47.3-1.el9.x86_64 git-core-doc-2.47.3-1.el9.noarch glib2-2.68.4-18.el9.x86_64 glibc-2.34-244.el9.x86_64 glibc-common-2.34-244.el9.x86_64 glibc-devel-2.34-244.el9.x86_64 glibc-gconv-extra-2.34-244.el9.x86_64 glibc-headers-2.34-244.el9.x86_64 glibc-langpack-en-2.34-244.el9.x86_64 glib-networking-2.68.3-3.el9.x86_64 gmp-6.2.0-13.el9.x86_64 gnupg2-2.3.3-4.el9.x86_64 gnutls-3.8.10-1.el9.x86_64 gobject-introspection-1.68.0-11.el9.x86_64 go-srpm-macros-3.8.1-1.el9.noarch gpgme-1.15.1-6.el9.x86_64 gpg-pubkey-8483c65d-5ccc5b19 grep-3.6-5.el9.x86_64 groff-base-1.22.4-10.el9.x86_64 grub2-common-2.06-118.el9.noarch grub2-pc-2.06-118.el9.x86_64 grub2-pc-modules-2.06-118.el9.noarch grub2-tools-2.06-118.el9.x86_64 grub2-tools-minimal-2.06-118.el9.x86_64 grubby-8.40-69.el9.x86_64 gsettings-desktop-schemas-40.0-8.el9.x86_64 gssproxy-0.8.4-7.el9.x86_64 gzip-1.12-1.el9.x86_64 hostname-3.23-6.el9.x86_64 hunspell-1.7.0-11.el9.x86_64 hunspell-en-GB-0.20140811.1-20.el9.noarch hunspell-en-US-0.20140811.1-20.el9.noarch hunspell-filesystem-1.7.0-11.el9.x86_64 hwdata-0.348-9.20.el9.noarch ima-evm-utils-1.6.2-2.el9.x86_64 info-6.7-15.el9.x86_64 inih-49-6.el9.x86_64 initscripts-rename-device-10.11.8-4.el9.x86_64 initscripts-service-10.11.8-4.el9.noarch ipcalc-1.0.0-5.el9.x86_64 iproute-6.17.0-1.el9.x86_64 iproute-tc-6.17.0-1.el9.x86_64 iptables-libs-1.8.10-11.el9.x86_64 iptables-nft-1.8.10-11.el9.x86_64 iptables-nft-services-1.8.10-11.el9.noarch iputils-20210202-15.el9.x86_64 irqbalance-1.9.4-5.el9.x86_64 jansson-2.14-1.el9.x86_64 jq-1.6-19.el9.x86_64 json-c-0.14-11.el9.x86_64 json-glib-1.6.6-1.el9.x86_64 kbd-2.4.0-11.el9.x86_64 kbd-legacy-2.4.0-11.el9.noarch kbd-misc-2.4.0-11.el9.noarch kernel-5.14.0-645.el9.x86_64 kernel-core-5.14.0-645.el9.x86_64 kernel-headers-5.14.0-645.el9.x86_64 kernel-modules-5.14.0-645.el9.x86_64 kernel-modules-core-5.14.0-645.el9.x86_64 kernel-srpm-macros-1.0-14.el9.noarch kernel-tools-5.14.0-645.el9.x86_64 kernel-tools-libs-5.14.0-645.el9.x86_64 kexec-tools-2.0.29-12.el9.x86_64 keyutils-1.6.3-1.el9.x86_64 keyutils-libs-1.6.3-1.el9.x86_64 kmod-28-11.el9.x86_64 kmod-libs-28-11.el9.x86_64 kpartx-0.8.7-39.el9.x86_64 krb5-libs-1.21.1-8.el9.x86_64 langpacks-core-en_GB-3.0-16.el9.noarch langpacks-core-font-en-3.0-16.el9.noarch langpacks-en_GB-3.0-16.el9.noarch less-590-6.el9.x86_64 libacl-2.3.1-4.el9.x86_64 libappstream-glib-0.7.18-5.el9.x86_64 libarchive-3.5.3-6.el9.x86_64 libassuan-2.5.5-3.el9.x86_64 libattr-2.5.1-3.el9.x86_64 libbasicobjects-0.1.1-53.el9.x86_64 libblkid-2.37.4-21.el9.x86_64 libbpf-1.5.0-2.el9.x86_64 libbrotli-1.0.9-7.el9.x86_64 libcap-2.48-10.el9.x86_64 libcap-ng-0.8.2-7.el9.x86_64 libcbor-0.7.0-5.el9.x86_64 libcollection-0.7.0-53.el9.x86_64 libcom_err-1.46.5-8.el9.x86_64 libcomps-0.1.18-1.el9.x86_64 libcurl-7.76.1-34.el9.x86_64 libdaemon-0.14-23.el9.x86_64 libdb-5.3.28-57.el9.x86_64 libdhash-0.5.0-53.el9.x86_64 libdnf-0.69.0-16.el9.x86_64 libeconf-0.4.1-4.el9.x86_64 libedit-3.1-38.20210216cvs.el9.x86_64 libestr-0.1.11-4.el9.x86_64 libev-4.33-6.el9.x86_64 libevent-2.1.12-8.el9.x86_64 libfastjson-0.99.9-5.el9.x86_64 libfdisk-2.37.4-21.el9.x86_64 libffi-3.4.2-8.el9.x86_64 libffi-devel-3.4.2-8.el9.x86_64 libfido2-1.13.0-2.el9.x86_64 libgcc-11.5.0-14.el9.x86_64 libgcrypt-1.10.0-11.el9.x86_64 libgomp-11.5.0-14.el9.x86_64 libgpg-error-1.42-5.el9.x86_64 libgpg-error-devel-1.42-5.el9.x86_64 libibverbs-57.0-2.el9.x86_64 libicu-67.1-10.el9.x86_64 libidn2-2.3.0-7.el9.x86_64 libini_config-1.3.1-53.el9.x86_64 libjpeg-turbo-2.0.90-7.el9.x86_64 libkcapi-1.4.0-2.el9.x86_64 libkcapi-hmaccalc-1.4.0-2.el9.x86_64 libksba-1.5.1-7.el9.x86_64 libldb-4.23.3-1.el9.x86_64 libmaxminddb-1.5.2-4.el9.x86_64 libmnl-1.0.4-16.el9.x86_64 libmodulemd-2.13.0-2.el9.x86_64 libmount-2.37.4-21.el9.x86_64 libmpc-1.2.1-4.el9.x86_64 libndp-1.9-1.el9.x86_64 libnet-1.2-7.el9.x86_64 libnetfilter_conntrack-1.0.9-1.el9.x86_64 libnfnetlink-1.0.1-23.el9.x86_64 libnfsidmap-2.5.4-39.el9.x86_64 libnftnl-1.2.6-4.el9.x86_64 libnghttp2-1.43.0-6.el9.x86_64 libnl3-3.11.0-1.el9.x86_64 libnl3-cli-3.11.0-1.el9.x86_64 libnsl2-2.0.0-1.el9.x86_64 libpath_utils-0.2.1-53.el9.x86_64 libpcap-1.10.0-4.el9.x86_64 libpipeline-1.5.3-4.el9.x86_64 libpkgconf-1.7.3-10.el9.x86_64 libpng-1.6.37-12.el9.x86_64 libproxy-0.4.15-35.el9.x86_64 libproxy-webkitgtk4-0.4.15-35.el9.x86_64 libpsl-0.21.1-5.el9.x86_64 libpwquality-1.4.4-8.el9.x86_64 libref_array-0.1.5-53.el9.x86_64 librepo-1.14.5-3.el9.x86_64 libreport-filesystem-2.15.2-6.el9.noarch libseccomp-2.5.2-2.el9.x86_64 libselinux-3.6-3.el9.x86_64 libselinux-utils-3.6-3.el9.x86_64 libsemanage-3.6-5.el9.x86_64 libsepol-3.6-3.el9.x86_64 libsigsegv-2.13-4.el9.x86_64 libslirp-4.4.0-8.el9.x86_64 libsmartcols-2.37.4-21.el9.x86_64 libsolv-0.7.24-3.el9.x86_64 libsoup-2.72.0-10.el9.x86_64 libss-1.46.5-8.el9.x86_64 libssh-0.10.4-15.el9.x86_64 libssh-config-0.10.4-15.el9.noarch libsss_certmap-2.9.7-5.el9.x86_64 libsss_idmap-2.9.7-5.el9.x86_64 libsss_nss_idmap-2.9.7-5.el9.x86_64 libsss_sudo-2.9.7-5.el9.x86_64 libstdc++-11.5.0-14.el9.x86_64 libstdc++-devel-11.5.0-14.el9.x86_64 libstemmer-0-18.585svn.el9.x86_64 libsysfs-2.1.1-11.el9.x86_64 libtalloc-2.4.3-1.el9.x86_64 libtasn1-4.16.0-9.el9.x86_64 libtdb-1.4.14-1.el9.x86_64 libteam-1.31-16.el9.x86_64 libtevent-0.17.1-1.el9.x86_64 libtirpc-1.3.3-9.el9.x86_64 libtool-ltdl-2.4.6-46.el9.x86_64 libunistring-0.9.10-15.el9.x86_64 liburing-2.5-1.el9.x86_64 libuser-0.63-17.el9.x86_64 libutempter-1.2.1-6.el9.x86_64 libuuid-2.37.4-21.el9.x86_64 libverto-0.3.2-3.el9.x86_64 libverto-libev-0.3.2-3.el9.x86_64 libvirt-libs-11.9.0-1.el9.x86_64 libwbclient-4.23.3-1.el9.x86_64 libxcrypt-4.4.18-3.el9.x86_64 libxcrypt-compat-4.4.18-3.el9.x86_64 libxcrypt-devel-4.4.18-3.el9.x86_64 libxml2-2.9.13-14.el9.x86_64 libxml2-devel-2.9.13-14.el9.x86_64 libxslt-1.1.34-12.el9.x86_64 libxslt-devel-1.1.34-12.el9.x86_64 libyaml-0.2.5-7.el9.x86_64 libzstd-1.5.5-1.el9.x86_64 llvm-filesystem-21.1.3-1.el9.x86_64 llvm-libs-21.1.3-1.el9.x86_64 lmdb-libs-0.9.29-3.el9.x86_64 logrotate-3.18.0-12.el9.x86_64 lshw-B.02.20-3.el9.x86_64 lsscsi-0.32-6.el9.x86_64 lua-libs-5.4.4-4.el9.x86_64 lua-srpm-macros-1-6.el9.noarch lz4-libs-1.9.3-5.el9.x86_64 lzo-2.10-7.el9.x86_64 make-4.3-8.el9.x86_64 man-db-2.9.3-9.el9.x86_64 microcode_ctl-20251111-1.el9.noarch mpdecimal-2.5.1-3.el9.x86_64 mpfr-4.1.0-7.el9.x86_64 ncurses-6.2-12.20210508.el9.x86_64 ncurses-base-6.2-12.20210508.el9.noarch ncurses-c++-libs-6.2-12.20210508.el9.x86_64 ncurses-devel-6.2-12.20210508.el9.x86_64 ncurses-libs-6.2-12.20210508.el9.x86_64 netavark-1.16.0-1.el9.x86_64 nettle-3.10.1-1.el9.x86_64 NetworkManager-1.54.1-1.el9.x86_64 NetworkManager-libnm-1.54.1-1.el9.x86_64 NetworkManager-team-1.54.1-1.el9.x86_64 NetworkManager-tui-1.54.1-1.el9.x86_64 newt-0.52.21-11.el9.x86_64 nfs-utils-2.5.4-39.el9.x86_64 nftables-1.0.9-5.el9.x86_64 npth-1.6-8.el9.x86_64 numactl-libs-2.0.19-3.el9.x86_64 ocaml-srpm-macros-6-6.el9.noarch oddjob-0.34.7-7.el9.x86_64 oddjob-mkhomedir-0.34.7-7.el9.x86_64 oniguruma-6.9.6-1.el9.6.x86_64 openblas-srpm-macros-2-11.el9.noarch openldap-2.6.8-4.el9.x86_64 openldap-devel-2.6.8-4.el9.x86_64 openssh-9.9p1-2.el9.x86_64 openssh-clients-9.9p1-2.el9.x86_64 openssh-server-9.9p1-2.el9.x86_64 openssl-3.5.1-6.el9.x86_64 openssl-devel-3.5.1-6.el9.x86_64 openssl-fips-provider-3.5.1-6.el9.x86_64 openssl-libs-3.5.1-6.el9.x86_64 os-prober-1.77-12.el9.x86_64 p11-kit-0.25.10-1.el9.x86_64 p11-kit-trust-0.25.10-1.el9.x86_64 pam-1.5.1-26.el9.x86_64 parted-3.5-3.el9.x86_64 passt-0^20250512.g8ec1341-2.el9.x86_64 passt-selinux-0^20250512.g8ec1341-2.el9.noarch passwd-0.80-12.el9.x86_64 patch-2.7.6-16.el9.x86_64 pciutils-libs-3.7.0-7.el9.x86_64 pcre2-10.40-6.el9.x86_64 pcre2-syntax-10.40-6.el9.noarch pcre-8.44-4.el9.x86_64 perl-AutoLoader-5.74-483.el9.noarch perl-B-1.80-483.el9.x86_64 perl-base-2.27-483.el9.noarch perl-Carp-1.50-460.el9.noarch perl-Class-Struct-0.66-483.el9.noarch perl-constant-1.33-461.el9.noarch perl-Data-Dumper-2.174-462.el9.x86_64 perl-Digest-1.19-4.el9.noarch perl-Digest-MD5-2.58-4.el9.x86_64 perl-DynaLoader-1.47-483.el9.x86_64 perl-Encode-3.08-462.el9.x86_64 perl-Errno-1.30-483.el9.x86_64 perl-Error-0.17029-7.el9.noarch perl-Exporter-5.74-461.el9.noarch perl-Fcntl-1.13-483.el9.x86_64 perl-File-Basename-2.85-483.el9.noarch perl-File-Find-1.37-483.el9.noarch perl-FileHandle-2.03-483.el9.noarch perl-File-Path-2.18-4.el9.noarch perl-File-stat-1.09-483.el9.noarch perl-File-Temp-0.231.100-4.el9.noarch perl-Getopt-Long-2.52-4.el9.noarch perl-Getopt-Std-1.12-483.el9.noarch perl-Git-2.47.3-1.el9.noarch perl-HTTP-Tiny-0.076-462.el9.noarch perl-if-0.60.800-483.el9.noarch perl-interpreter-5.32.1-483.el9.x86_64 perl-IO-1.43-483.el9.x86_64 perl-IO-Socket-IP-0.41-5.el9.noarch perl-IO-Socket-SSL-2.073-2.el9.noarch perl-IPC-Open3-1.21-483.el9.noarch perl-lib-0.65-483.el9.x86_64 perl-libnet-3.13-4.el9.noarch perl-libs-5.32.1-483.el9.x86_64 perl-MIME-Base64-3.16-4.el9.x86_64 perl-Mozilla-CA-20200520-6.el9.noarch perl-mro-1.23-483.el9.x86_64 perl-NDBM_File-1.15-483.el9.x86_64 perl-Net-SSLeay-1.94-3.el9.x86_64 perl-overload-1.31-483.el9.noarch perl-overloading-0.02-483.el9.noarch perl-parent-0.238-460.el9.noarch perl-PathTools-3.78-461.el9.x86_64 perl-Pod-Escapes-1.07-460.el9.noarch perl-podlators-4.14-460.el9.noarch perl-Pod-Perldoc-3.28.01-461.el9.noarch perl-Pod-Simple-3.42-4.el9.noarch perl-Pod-Usage-2.01-4.el9.noarch perl-POSIX-1.94-483.el9.x86_64 perl-Scalar-List-Utils-1.56-462.el9.x86_64 perl-SelectSaver-1.02-483.el9.noarch perl-Socket-2.031-4.el9.x86_64 perl-srpm-macros-1-41.el9.noarch perl-Storable-3.21-460.el9.x86_64 perl-subs-1.03-483.el9.noarch perl-Symbol-1.08-483.el9.noarch perl-Term-ANSIColor-5.01-461.el9.noarch perl-Term-Cap-1.17-460.el9.noarch perl-TermReadKey-2.38-11.el9.x86_64 perl-Text-ParseWords-3.30-460.el9.noarch perl-Text-Tabs+Wrap-2013.0523-460.el9.noarch perl-Time-Local-1.300-7.el9.noarch perl-URI-5.09-3.el9.noarch perl-vars-1.05-483.el9.noarch pigz-2.5-4.el9.x86_64 pkgconf-1.7.3-10.el9.x86_64 pkgconf-m4-1.7.3-10.el9.noarch pkgconf-pkg-config-1.7.3-10.el9.x86_64 podman-5.6.0-2.el9.x86_64 policycoreutils-3.6-3.el9.x86_64 policycoreutils-python-utils-3.6-3.el9.noarch polkit-0.117-14.el9.x86_64 polkit-libs-0.117-14.el9.x86_64 polkit-pkla-compat-0.1-21.el9.x86_64 popt-1.18-8.el9.x86_64 prefixdevname-0.1.0-8.el9.x86_64 procps-ng-3.3.17-14.el9.x86_64 protobuf-c-1.3.3-13.el9.x86_64 psmisc-23.4-3.el9.x86_64 publicsuffix-list-dafsa-20210518-3.el9.noarch pyproject-srpm-macros-1.16.2-1.el9.noarch python3.12-3.12.12-1.el9.x86_64 python3.12-libs-3.12.12-1.el9.x86_64 python3.12-pip-23.2.1-5.el9.noarch python3.12-pip-wheel-23.2.1-5.el9.noarch python3.12-setuptools-68.2.2-5.el9.noarch python3-3.9.25-2.el9.x86_64 python3-attrs-20.3.0-7.el9.noarch python3-audit-3.1.5-7.el9.x86_64 python3-babel-2.9.1-2.el9.noarch python3-cffi-1.14.5-5.el9.x86_64 python3-chardet-4.0.0-5.el9.noarch python3-configobj-5.0.6-25.el9.noarch python3-cryptography-36.0.1-5.el9.x86_64 python3-dasbus-1.7-1.el9.noarch python3-dateutil-2.9.0.post0-1.el9.noarch python3-dbus-1.2.18-2.el9.x86_64 python3-devel-3.9.25-2.el9.x86_64 python3-distro-1.5.0-7.el9.noarch python3-dnf-4.14.0-31.el9.noarch python3-dnf-plugins-core-4.3.0-24.el9.noarch python3-enchant-3.2.0-5.el9.noarch python3-file-magic-5.39-16.el9.noarch python3-gobject-base-3.40.1-6.el9.x86_64 python3-gobject-base-noarch-3.40.1-6.el9.noarch python3-gpg-1.15.1-6.el9.x86_64 python3-hawkey-0.69.0-16.el9.x86_64 python3-idna-2.10-7.el9.1.noarch python3-jinja2-2.11.3-8.el9.noarch python3-jmespath-1.0.1-1.el9.noarch python3-jsonpatch-1.21-16.el9.noarch python3-jsonpointer-2.0-4.el9.noarch python3-jsonschema-3.2.0-13.el9.noarch python3-libcomps-0.1.18-1.el9.x86_64 python3-libdnf-0.69.0-16.el9.x86_64 python3-libs-3.9.25-2.el9.x86_64 python3-libselinux-3.6-3.el9.x86_64 python3-libsemanage-3.6-5.el9.x86_64 python3-libvirt-11.9.0-1.el9.x86_64 python3-libxml2-2.9.13-14.el9.x86_64 python3-lxml-4.6.5-3.el9.x86_64 python3-markupsafe-1.1.1-12.el9.x86_64 python3-netaddr-0.10.1-3.el9.noarch python3-netifaces-0.10.6-15.el9.x86_64 python3-oauthlib-3.1.1-5.el9.noarch python3-packaging-20.9-5.el9.noarch python3-pexpect-4.8.0-7.el9.noarch python3-pip-21.3.1-1.el9.noarch python3-pip-wheel-21.3.1-1.el9.noarch python3-ply-3.11-14.el9.noarch python3-policycoreutils-3.6-3.el9.noarch python3-prettytable-0.7.2-27.el9.noarch python3-ptyprocess-0.6.0-12.el9.noarch python3-pycparser-2.20-6.el9.noarch python3-pyparsing-2.4.7-9.el9.noarch python3-pyrsistent-0.17.3-8.el9.x86_64 python3-pyserial-3.4-12.el9.noarch python3-pysocks-1.7.1-12.el9.noarch python3-pytz-2021.1-5.el9.noarch python3-pyyaml-5.4.1-6.el9.x86_64 python3-requests-2.25.1-10.el9.noarch python3-resolvelib-0.5.4-5.el9.noarch python3-rpm-4.16.1.3-39.el9.x86_64 python3-rpm-generators-12-9.el9.noarch python3-rpm-macros-3.9-54.el9.noarch python3-setools-4.4.4-1.el9.x86_64 python3-setuptools-53.0.0-15.el9.noarch python3-setuptools-wheel-53.0.0-15.el9.noarch python3-six-1.15.0-9.el9.noarch python3-systemd-234-19.el9.x86_64 python3-urllib3-1.26.5-6.el9.noarch python-rpm-macros-3.9-54.el9.noarch python-srpm-macros-3.9-54.el9.noarch python-unversioned-command-3.9.25-2.el9.noarch qemu-guest-agent-10.1.0-4.el9.x86_64 qt5-srpm-macros-5.15.9-1.el9.noarch quota-4.09-4.el9.x86_64 quota-nls-4.09-4.el9.noarch readline-8.1-4.el9.x86_64 readline-devel-8.1-4.el9.x86_64 redhat-rpm-config-210-1.el9.noarch rootfiles-8.1-35.el9.noarch rpcbind-1.2.6-7.el9.x86_64 rpm-4.16.1.3-39.el9.x86_64 rpm-build-4.16.1.3-39.el9.x86_64 rpm-build-libs-4.16.1.3-39.el9.x86_64 rpm-libs-4.16.1.3-39.el9.x86_64 rpmlint-1.11-19.el9.noarch rpm-plugin-audit-4.16.1.3-39.el9.x86_64 rpm-plugin-selinux-4.16.1.3-39.el9.x86_64 rpm-plugin-systemd-inhibit-4.16.1.3-39.el9.x86_64 rpm-sign-4.16.1.3-39.el9.x86_64 rpm-sign-libs-4.16.1.3-39.el9.x86_64 rsync-3.2.5-4.el9.x86_64 rsyslog-8.2510.0-2.el9.x86_64 rsyslog-logrotate-8.2510.0-2.el9.x86_64 ruby-3.0.7-165.el9.x86_64 ruby-default-gems-3.0.7-165.el9.noarch ruby-devel-3.0.7-165.el9.x86_64 rubygem-bigdecimal-3.0.0-165.el9.x86_64 rubygem-bundler-2.2.33-165.el9.noarch rubygem-io-console-0.5.7-165.el9.x86_64 rubygem-json-2.5.1-165.el9.x86_64 rubygem-psych-3.3.2-165.el9.x86_64 rubygem-rdoc-6.3.4.1-165.el9.noarch rubygems-3.2.33-165.el9.noarch ruby-libs-3.0.7-165.el9.x86_64 rust-srpm-macros-17-4.el9.noarch samba-client-libs-4.23.3-1.el9.x86_64 samba-common-4.23.3-1.el9.noarch samba-common-libs-4.23.3-1.el9.x86_64 sed-4.8-9.el9.x86_64 selinux-policy-38.1.68-1.el9.noarch selinux-policy-targeted-38.1.68-1.el9.noarch setroubleshoot-plugins-3.3.14-4.el9.noarch setroubleshoot-server-3.3.35-2.el9.x86_64 setup-2.13.7-10.el9.noarch sg3_utils-1.47-10.el9.x86_64 sg3_utils-libs-1.47-10.el9.x86_64 shadow-utils-4.9-15.el9.x86_64 shadow-utils-subid-4.9-15.el9.x86_64 shared-mime-info-2.1-5.el9.x86_64 slang-2.3.2-11.el9.x86_64 slirp4netns-1.3.3-1.el9.x86_64 snappy-1.1.8-8.el9.x86_64 sos-4.10.1-1.el9.noarch sqlite-libs-3.34.1-9.el9.x86_64 squashfs-tools-4.4-10.git1.el9.x86_64 sscg-4.0.0-2.el9.x86_64 sshpass-1.09-4.el9.x86_64 sssd-client-2.9.7-5.el9.x86_64 sssd-common-2.9.7-5.el9.x86_64 sssd-kcm-2.9.7-5.el9.x86_64 sssd-nfs-idmap-2.9.7-5.el9.x86_64 sudo-1.9.5p2-13.el9.x86_64 systemd-252-59.el9.x86_64 systemd-devel-252-59.el9.x86_64 systemd-libs-252-59.el9.x86_64 systemd-pam-252-59.el9.x86_64 systemd-rpm-macros-252-59.el9.noarch systemd-udev-252-59.el9.x86_64 tar-1.34-7.el9.x86_64 tcl-8.6.10-7.el9.x86_64 tcpdump-4.99.0-9.el9.x86_64 teamd-1.31-16.el9.x86_64 time-1.9-18.el9.x86_64 tmux-3.2a-5.el9.x86_64 tpm2-tss-3.2.3-1.el9.x86_64 traceroute-2.1.1-1.el9.x86_64 tzdata-2025b-2.el9.noarch unzip-6.0-59.el9.x86_64 userspace-rcu-0.12.1-6.el9.x86_64 util-linux-2.37.4-21.el9.x86_64 util-linux-core-2.37.4-21.el9.x86_64 vim-minimal-8.2.2637-23.el9.x86_64 webkit2gtk3-jsc-2.50.1-1.el9.x86_64 wget-1.21.1-8.el9.x86_64 which-2.21-30.el9.x86_64 xfsprogs-6.4.0-7.el9.x86_64 xz-5.2.5-8.el9.x86_64 xz-devel-5.2.5-8.el9.x86_64 xz-libs-5.2.5-8.el9.x86_64 yajl-2.1.0-25.el9.x86_64 yum-4.14.0-31.el9.noarch yum-utils-4.3.0-24.el9.noarch zip-3.0-35.el9.x86_64 zlib-1.2.11-41.el9.x86_64 zlib-devel-1.2.11-41.el9.x86_64 zstd-1.5.5-1.el9.x86_64 home/zuul/zuul-output/logs/python.log0000644000175000017500000000223615115611543017117 0ustar zuulzuulPython 3.9.25 pip 25.3 from /home/zuul/.local/lib/python3.12/site-packages/pip (python 3.12) ansible [core 2.17.8] config file = /etc/ansible/ansible.cfg configured module search path = ['/home/zuul/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/zuul/.local/lib/python3.12/site-packages/ansible ansible collection location = /home/zuul/.ansible/collections:/usr/share/ansible/collections executable location = /home/zuul/.local/bin/ansible python version = 3.12.12 (main, Nov 14 2025, 00:00:00) [GCC 11.5.0 20240719 (Red Hat 11.5.0-14)] (/usr/bin/python3.12) jinja version = 3.1.6 libyaml = True ansible-core==2.17.8 cachetools==6.2.2 certifi==2025.11.12 cffi==2.0.0 charset-normalizer==3.4.4 cryptography==46.0.3 google-auth==2.43.0 idna==3.11 Jinja2==3.1.6 kubernetes==24.2.0 MarkupSafe==3.0.3 oauthlib==3.2.2 openshift==0.13.1 packaging==25.0 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycparser==2.23 python-dateutil==2.9.0.post0 python-string-utils==1.0.0 PyYAML==6.0.3 requests==2.32.4 requests-oauthlib==1.3.0 resolvelib==1.0.1 rsa==4.9.1 setuptools==68.2.2 six==1.17.0 urllib3==2.6.1 websocket-client==1.9.0 home/zuul/zuul-output/logs/dmesg.log0000644000175000017500000015162015115611543016677 0ustar zuulzuul[Mon Dec 8 17:34:51 2025] Linux version 5.14.0-645.el9.x86_64 (mockbuild@x86-05.stream.rdu2.redhat.com) (gcc (GCC) 11.5.0 20240719 (Red Hat 11.5.0-14), GNU ld version 2.35.2-68.el9) #1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025 [Mon Dec 8 17:34:51 2025] The list of certified hardware and cloud instances for Red Hat Enterprise Linux 9 can be viewed at the Red Hat Ecosystem Catalog, https://catalog.redhat.com. [Mon Dec 8 17:34:51 2025] Command line: BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 root=UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M [Mon Dec 8 17:34:51 2025] BIOS-provided physical RAM map: [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x0000000000000000-0x000000000009fbff] usable [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x000000000009fc00-0x000000000009ffff] reserved [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x00000000000f0000-0x00000000000fffff] reserved [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x0000000000100000-0x00000000bffdafff] usable [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x00000000bffdb000-0x00000000bfffffff] reserved [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x00000000feffc000-0x00000000feffffff] reserved [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x00000000fffc0000-0x00000000ffffffff] reserved [Mon Dec 8 17:34:51 2025] BIOS-e820: [mem 0x0000000100000000-0x000000023fffffff] usable [Mon Dec 8 17:34:51 2025] NX (Execute Disable) protection: active [Mon Dec 8 17:34:51 2025] APIC: Static calls initialized [Mon Dec 8 17:34:51 2025] SMBIOS 2.8 present. [Mon Dec 8 17:34:51 2025] DMI: OpenStack Foundation OpenStack Nova, BIOS 1.15.0-1 04/01/2014 [Mon Dec 8 17:34:51 2025] Hypervisor detected: KVM [Mon Dec 8 17:34:51 2025] kvm-clock: Using msrs 4b564d01 and 4b564d00 [Mon Dec 8 17:34:51 2025] kvm-clock: using sched offset of 3201334973 cycles [Mon Dec 8 17:34:51 2025] clocksource: kvm-clock: mask: 0xffffffffffffffff max_cycles: 0x1cd42e4dffb, max_idle_ns: 881590591483 ns [Mon Dec 8 17:34:51 2025] tsc: Detected 2799.998 MHz processor [Mon Dec 8 17:34:51 2025] e820: update [mem 0x00000000-0x00000fff] usable ==> reserved [Mon Dec 8 17:34:51 2025] e820: remove [mem 0x000a0000-0x000fffff] usable [Mon Dec 8 17:34:51 2025] last_pfn = 0x240000 max_arch_pfn = 0x400000000 [Mon Dec 8 17:34:51 2025] MTRR map: 4 entries (3 fixed + 1 variable; max 19), built from 8 variable MTRRs [Mon Dec 8 17:34:51 2025] x86/PAT: Configuration [0-7]: WB WC UC- UC WB WP UC- WT [Mon Dec 8 17:34:51 2025] last_pfn = 0xbffdb max_arch_pfn = 0x400000000 [Mon Dec 8 17:34:51 2025] found SMP MP-table at [mem 0x000f5ae0-0x000f5aef] [Mon Dec 8 17:34:51 2025] Using GB pages for direct mapping [Mon Dec 8 17:34:51 2025] RAMDISK: [mem 0x2d472000-0x32a30fff] [Mon Dec 8 17:34:51 2025] ACPI: Early table checksum verification disabled [Mon Dec 8 17:34:51 2025] ACPI: RSDP 0x00000000000F5AA0 000014 (v00 BOCHS ) [Mon Dec 8 17:34:51 2025] ACPI: RSDT 0x00000000BFFE16BD 000030 (v01 BOCHS BXPC 00000001 BXPC 00000001) [Mon Dec 8 17:34:51 2025] ACPI: FACP 0x00000000BFFE1571 000074 (v01 BOCHS BXPC 00000001 BXPC 00000001) [Mon Dec 8 17:34:51 2025] ACPI: DSDT 0x00000000BFFDFC80 0018F1 (v01 BOCHS BXPC 00000001 BXPC 00000001) [Mon Dec 8 17:34:51 2025] ACPI: FACS 0x00000000BFFDFC40 000040 [Mon Dec 8 17:34:51 2025] ACPI: APIC 0x00000000BFFE15E5 0000B0 (v01 BOCHS BXPC 00000001 BXPC 00000001) [Mon Dec 8 17:34:51 2025] ACPI: WAET 0x00000000BFFE1695 000028 (v01 BOCHS BXPC 00000001 BXPC 00000001) [Mon Dec 8 17:34:51 2025] ACPI: Reserving FACP table memory at [mem 0xbffe1571-0xbffe15e4] [Mon Dec 8 17:34:51 2025] ACPI: Reserving DSDT table memory at [mem 0xbffdfc80-0xbffe1570] [Mon Dec 8 17:34:51 2025] ACPI: Reserving FACS table memory at [mem 0xbffdfc40-0xbffdfc7f] [Mon Dec 8 17:34:51 2025] ACPI: Reserving APIC table memory at [mem 0xbffe15e5-0xbffe1694] [Mon Dec 8 17:34:51 2025] ACPI: Reserving WAET table memory at [mem 0xbffe1695-0xbffe16bc] [Mon Dec 8 17:34:51 2025] No NUMA configuration found [Mon Dec 8 17:34:51 2025] Faking a node at [mem 0x0000000000000000-0x000000023fffffff] [Mon Dec 8 17:34:51 2025] NODE_DATA(0) allocated [mem 0x23ffd5000-0x23fffffff] [Mon Dec 8 17:34:51 2025] crashkernel reserved: 0x00000000af000000 - 0x00000000bf000000 (256 MB) [Mon Dec 8 17:34:51 2025] Zone ranges: [Mon Dec 8 17:34:51 2025] DMA [mem 0x0000000000001000-0x0000000000ffffff] [Mon Dec 8 17:34:51 2025] DMA32 [mem 0x0000000001000000-0x00000000ffffffff] [Mon Dec 8 17:34:51 2025] Normal [mem 0x0000000100000000-0x000000023fffffff] [Mon Dec 8 17:34:51 2025] Device empty [Mon Dec 8 17:34:51 2025] Movable zone start for each node [Mon Dec 8 17:34:51 2025] Early memory node ranges [Mon Dec 8 17:34:51 2025] node 0: [mem 0x0000000000001000-0x000000000009efff] [Mon Dec 8 17:34:51 2025] node 0: [mem 0x0000000000100000-0x00000000bffdafff] [Mon Dec 8 17:34:51 2025] node 0: [mem 0x0000000100000000-0x000000023fffffff] [Mon Dec 8 17:34:51 2025] Initmem setup node 0 [mem 0x0000000000001000-0x000000023fffffff] [Mon Dec 8 17:34:51 2025] On node 0, zone DMA: 1 pages in unavailable ranges [Mon Dec 8 17:34:51 2025] On node 0, zone DMA: 97 pages in unavailable ranges [Mon Dec 8 17:34:51 2025] On node 0, zone Normal: 37 pages in unavailable ranges [Mon Dec 8 17:34:51 2025] ACPI: PM-Timer IO Port: 0x608 [Mon Dec 8 17:34:51 2025] ACPI: LAPIC_NMI (acpi_id[0xff] dfl dfl lint[0x1]) [Mon Dec 8 17:34:51 2025] IOAPIC[0]: apic_id 0, version 17, address 0xfec00000, GSI 0-23 [Mon Dec 8 17:34:51 2025] ACPI: INT_SRC_OVR (bus 0 bus_irq 0 global_irq 2 dfl dfl) [Mon Dec 8 17:34:51 2025] ACPI: INT_SRC_OVR (bus 0 bus_irq 5 global_irq 5 high level) [Mon Dec 8 17:34:51 2025] ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 high level) [Mon Dec 8 17:34:51 2025] ACPI: INT_SRC_OVR (bus 0 bus_irq 10 global_irq 10 high level) [Mon Dec 8 17:34:51 2025] ACPI: INT_SRC_OVR (bus 0 bus_irq 11 global_irq 11 high level) [Mon Dec 8 17:34:51 2025] ACPI: Using ACPI (MADT) for SMP configuration information [Mon Dec 8 17:34:51 2025] TSC deadline timer available [Mon Dec 8 17:34:51 2025] CPU topo: Max. logical packages: 8 [Mon Dec 8 17:34:51 2025] CPU topo: Max. logical dies: 8 [Mon Dec 8 17:34:51 2025] CPU topo: Max. dies per package: 1 [Mon Dec 8 17:34:51 2025] CPU topo: Max. threads per core: 1 [Mon Dec 8 17:34:51 2025] CPU topo: Num. cores per package: 1 [Mon Dec 8 17:34:51 2025] CPU topo: Num. threads per package: 1 [Mon Dec 8 17:34:51 2025] CPU topo: Allowing 8 present CPUs plus 0 hotplug CPUs [Mon Dec 8 17:34:51 2025] kvm-guest: APIC: eoi() replaced with kvm_guest_apic_eoi_write() [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0x00000000-0x00000fff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0x0009f000-0x0009ffff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0x000a0000-0x000effff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0x000f0000-0x000fffff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0xbffdb000-0xbfffffff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0xc0000000-0xfeffbfff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0xfeffc000-0xfeffffff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0xff000000-0xfffbffff] [Mon Dec 8 17:34:51 2025] PM: hibernation: Registered nosave memory: [mem 0xfffc0000-0xffffffff] [Mon Dec 8 17:34:51 2025] [mem 0xc0000000-0xfeffbfff] available for PCI devices [Mon Dec 8 17:34:51 2025] Booting paravirtualized kernel on KVM [Mon Dec 8 17:34:51 2025] clocksource: refined-jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 1910969940391419 ns [Mon Dec 8 17:34:51 2025] setup_percpu: NR_CPUS:8192 nr_cpumask_bits:8 nr_cpu_ids:8 nr_node_ids:1 [Mon Dec 8 17:34:51 2025] percpu: Embedded 64 pages/cpu s225280 r8192 d28672 u262144 [Mon Dec 8 17:34:51 2025] pcpu-alloc: s225280 r8192 d28672 u262144 alloc=1*2097152 [Mon Dec 8 17:34:51 2025] pcpu-alloc: [0] 0 1 2 3 4 5 6 7 [Mon Dec 8 17:34:51 2025] kvm-guest: PV spinlocks disabled, no host support [Mon Dec 8 17:34:51 2025] Kernel command line: BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 root=UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M [Mon Dec 8 17:34:51 2025] Unknown kernel command line parameters "BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64", will be passed to user space. [Mon Dec 8 17:34:51 2025] random: crng init done [Mon Dec 8 17:34:51 2025] Dentry cache hash table entries: 1048576 (order: 11, 8388608 bytes, linear) [Mon Dec 8 17:34:51 2025] Inode-cache hash table entries: 524288 (order: 10, 4194304 bytes, linear) [Mon Dec 8 17:34:51 2025] Fallback order for Node 0: 0 [Mon Dec 8 17:34:51 2025] Built 1 zonelists, mobility grouping on. Total pages: 2064091 [Mon Dec 8 17:34:51 2025] Policy zone: Normal [Mon Dec 8 17:34:51 2025] mem auto-init: stack:off, heap alloc:off, heap free:off [Mon Dec 8 17:34:51 2025] software IO TLB: area num 8. [Mon Dec 8 17:34:51 2025] SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=8, Nodes=1 [Mon Dec 8 17:34:51 2025] ftrace: allocating 49335 entries in 193 pages [Mon Dec 8 17:34:51 2025] ftrace: allocated 193 pages with 3 groups [Mon Dec 8 17:34:51 2025] Dynamic Preempt: voluntary [Mon Dec 8 17:34:51 2025] rcu: Preemptible hierarchical RCU implementation. [Mon Dec 8 17:34:51 2025] rcu: RCU event tracing is enabled. [Mon Dec 8 17:34:51 2025] rcu: RCU restricting CPUs from NR_CPUS=8192 to nr_cpu_ids=8. [Mon Dec 8 17:34:51 2025] Trampoline variant of Tasks RCU enabled. [Mon Dec 8 17:34:51 2025] Rude variant of Tasks RCU enabled. [Mon Dec 8 17:34:51 2025] Tracing variant of Tasks RCU enabled. [Mon Dec 8 17:34:51 2025] rcu: RCU calculated value of scheduler-enlistment delay is 100 jiffies. [Mon Dec 8 17:34:51 2025] rcu: Adjusting geometry for rcu_fanout_leaf=16, nr_cpu_ids=8 [Mon Dec 8 17:34:51 2025] RCU Tasks: Setting shift to 3 and lim to 1 rcu_task_cb_adjust=1 rcu_task_cpu_ids=8. [Mon Dec 8 17:34:51 2025] RCU Tasks Rude: Setting shift to 3 and lim to 1 rcu_task_cb_adjust=1 rcu_task_cpu_ids=8. [Mon Dec 8 17:34:51 2025] RCU Tasks Trace: Setting shift to 3 and lim to 1 rcu_task_cb_adjust=1 rcu_task_cpu_ids=8. [Mon Dec 8 17:34:51 2025] NR_IRQS: 524544, nr_irqs: 488, preallocated irqs: 16 [Mon Dec 8 17:34:51 2025] rcu: srcu_init: Setting srcu_struct sizes based on contention. [Mon Dec 8 17:34:51 2025] kfence: initialized - using 2097152 bytes for 255 objects at 0x(____ptrval____)-0x(____ptrval____) [Mon Dec 8 17:34:51 2025] Console: colour VGA+ 80x25 [Mon Dec 8 17:34:51 2025] printk: console [ttyS0] enabled [Mon Dec 8 17:34:51 2025] ACPI: Core revision 20230331 [Mon Dec 8 17:34:51 2025] APIC: Switch to symmetric I/O mode setup [Mon Dec 8 17:34:51 2025] x2apic enabled [Mon Dec 8 17:34:51 2025] APIC: Switched APIC routing to: physical x2apic [Mon Dec 8 17:34:51 2025] tsc: Marking TSC unstable due to TSCs unsynchronized [Mon Dec 8 17:34:51 2025] Calibrating delay loop (skipped) preset value.. 5599.99 BogoMIPS (lpj=2799998) [Mon Dec 8 17:34:51 2025] x86/cpu: User Mode Instruction Prevention (UMIP) activated [Mon Dec 8 17:34:51 2025] Last level iTLB entries: 4KB 512, 2MB 255, 4MB 127 [Mon Dec 8 17:34:51 2025] Last level dTLB entries: 4KB 512, 2MB 255, 4MB 127, 1GB 0 [Mon Dec 8 17:34:51 2025] Spectre V1 : Mitigation: usercopy/swapgs barriers and __user pointer sanitization [Mon Dec 8 17:34:51 2025] Spectre V2 : Mitigation: Retpolines [Mon Dec 8 17:34:51 2025] Spectre V2 : Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT [Mon Dec 8 17:34:51 2025] Spectre V2 : Enabling Speculation Barrier for firmware calls [Mon Dec 8 17:34:51 2025] RETBleed: Mitigation: untrained return thunk [Mon Dec 8 17:34:51 2025] Spectre V2 : mitigation: Enabling conditional Indirect Branch Prediction Barrier [Mon Dec 8 17:34:51 2025] Speculative Store Bypass: Mitigation: Speculative Store Bypass disabled via prctl [Mon Dec 8 17:34:51 2025] Speculative Return Stack Overflow: IBPB-extending microcode not applied! [Mon Dec 8 17:34:51 2025] Speculative Return Stack Overflow: WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options. [Mon Dec 8 17:34:51 2025] x86/bugs: return thunk changed [Mon Dec 8 17:34:51 2025] Speculative Return Stack Overflow: Vulnerable: Safe RET, no microcode [Mon Dec 8 17:34:51 2025] x86/fpu: Supporting XSAVE feature 0x001: 'x87 floating point registers' [Mon Dec 8 17:34:51 2025] x86/fpu: Supporting XSAVE feature 0x002: 'SSE registers' [Mon Dec 8 17:34:51 2025] x86/fpu: Supporting XSAVE feature 0x004: 'AVX registers' [Mon Dec 8 17:34:51 2025] x86/fpu: xstate_offset[2]: 576, xstate_sizes[2]: 256 [Mon Dec 8 17:34:51 2025] x86/fpu: Enabled xstate features 0x7, context size is 832 bytes, using 'compacted' format. [Mon Dec 8 17:34:51 2025] Freeing SMP alternatives memory: 40K [Mon Dec 8 17:34:51 2025] pid_max: default: 32768 minimum: 301 [Mon Dec 8 17:34:51 2025] LSM: initializing lsm=lockdown,capability,landlock,yama,integrity,selinux,bpf [Mon Dec 8 17:34:51 2025] landlock: Up and running. [Mon Dec 8 17:34:51 2025] Yama: becoming mindful. [Mon Dec 8 17:34:51 2025] SELinux: Initializing. [Mon Dec 8 17:34:51 2025] LSM support for eBPF active [Mon Dec 8 17:34:51 2025] Mount-cache hash table entries: 16384 (order: 5, 131072 bytes, linear) [Mon Dec 8 17:34:51 2025] Mountpoint-cache hash table entries: 16384 (order: 5, 131072 bytes, linear) [Mon Dec 8 17:34:51 2025] smpboot: CPU0: AMD EPYC-Rome Processor (family: 0x17, model: 0x31, stepping: 0x0) [Mon Dec 8 17:34:51 2025] Performance Events: Fam17h+ core perfctr, AMD PMU driver. [Mon Dec 8 17:34:51 2025] ... version: 0 [Mon Dec 8 17:34:51 2025] ... bit width: 48 [Mon Dec 8 17:34:51 2025] ... generic registers: 6 [Mon Dec 8 17:34:51 2025] ... value mask: 0000ffffffffffff [Mon Dec 8 17:34:51 2025] ... max period: 00007fffffffffff [Mon Dec 8 17:34:51 2025] ... fixed-purpose events: 0 [Mon Dec 8 17:34:51 2025] ... event mask: 000000000000003f [Mon Dec 8 17:34:51 2025] signal: max sigframe size: 1776 [Mon Dec 8 17:34:51 2025] rcu: Hierarchical SRCU implementation. [Mon Dec 8 17:34:51 2025] rcu: Max phase no-delay instances is 400. [Mon Dec 8 17:34:51 2025] smp: Bringing up secondary CPUs ... [Mon Dec 8 17:34:51 2025] smpboot: x86: Booting SMP configuration: [Mon Dec 8 17:34:51 2025] .... node #0, CPUs: #1 #2 #3 #4 #5 #6 #7 [Mon Dec 8 17:34:51 2025] smp: Brought up 1 node, 8 CPUs [Mon Dec 8 17:34:51 2025] smpboot: Total of 8 processors activated (44799.96 BogoMIPS) [Mon Dec 8 17:34:51 2025] node 0 deferred pages initialised in 23ms [Mon Dec 8 17:34:51 2025] Memory: 7763956K/8388068K available (16384K kernel code, 5795K rwdata, 13908K rodata, 4196K init, 7156K bss, 618204K reserved, 0K cma-reserved) [Mon Dec 8 17:34:51 2025] devtmpfs: initialized [Mon Dec 8 17:34:51 2025] x86/mm: Memory block size: 128MB [Mon Dec 8 17:34:51 2025] clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 1911260446275000 ns [Mon Dec 8 17:34:51 2025] futex hash table entries: 2048 (131072 bytes on 1 NUMA nodes, total 128 KiB, linear). [Mon Dec 8 17:34:51 2025] pinctrl core: initialized pinctrl subsystem [Mon Dec 8 17:34:51 2025] NET: Registered PF_NETLINK/PF_ROUTE protocol family [Mon Dec 8 17:34:51 2025] DMA: preallocated 1024 KiB GFP_KERNEL pool for atomic allocations [Mon Dec 8 17:34:51 2025] DMA: preallocated 1024 KiB GFP_KERNEL|GFP_DMA pool for atomic allocations [Mon Dec 8 17:34:51 2025] DMA: preallocated 1024 KiB GFP_KERNEL|GFP_DMA32 pool for atomic allocations [Mon Dec 8 17:34:51 2025] audit: initializing netlink subsys (disabled) [Mon Dec 8 17:34:51 2025] audit: type=2000 audit(1765215290.944:1): state=initialized audit_enabled=0 res=1 [Mon Dec 8 17:34:51 2025] thermal_sys: Registered thermal governor 'fair_share' [Mon Dec 8 17:34:51 2025] thermal_sys: Registered thermal governor 'step_wise' [Mon Dec 8 17:34:51 2025] thermal_sys: Registered thermal governor 'user_space' [Mon Dec 8 17:34:51 2025] cpuidle: using governor menu [Mon Dec 8 17:34:51 2025] acpiphp: ACPI Hot Plug PCI Controller Driver version: 0.5 [Mon Dec 8 17:34:51 2025] PCI: Using configuration type 1 for base access [Mon Dec 8 17:34:51 2025] PCI: Using configuration type 1 for extended access [Mon Dec 8 17:34:51 2025] kprobes: kprobe jump-optimization is enabled. All kprobes are optimized if possible. [Mon Dec 8 17:34:51 2025] HugeTLB: registered 1.00 GiB page size, pre-allocated 0 pages [Mon Dec 8 17:34:51 2025] HugeTLB: 16380 KiB vmemmap can be freed for a 1.00 GiB page [Mon Dec 8 17:34:51 2025] HugeTLB: registered 2.00 MiB page size, pre-allocated 0 pages [Mon Dec 8 17:34:51 2025] HugeTLB: 28 KiB vmemmap can be freed for a 2.00 MiB page [Mon Dec 8 17:34:51 2025] Demotion targets for Node 0: null [Mon Dec 8 17:34:51 2025] cryptd: max_cpu_qlen set to 1000 [Mon Dec 8 17:34:51 2025] ACPI: Added _OSI(Module Device) [Mon Dec 8 17:34:51 2025] ACPI: Added _OSI(Processor Device) [Mon Dec 8 17:34:51 2025] ACPI: Added _OSI(3.0 _SCP Extensions) [Mon Dec 8 17:34:51 2025] ACPI: Added _OSI(Processor Aggregator Device) [Mon Dec 8 17:34:51 2025] ACPI: 1 ACPI AML tables successfully acquired and loaded [Mon Dec 8 17:34:51 2025] ACPI: _OSC evaluation for CPUs failed, trying _PDC [Mon Dec 8 17:34:51 2025] ACPI: Interpreter enabled [Mon Dec 8 17:34:51 2025] ACPI: PM: (supports S0 S3 S4 S5) [Mon Dec 8 17:34:51 2025] ACPI: Using IOAPIC for interrupt routing [Mon Dec 8 17:34:51 2025] PCI: Using host bridge windows from ACPI; if necessary, use "pci=nocrs" and report a bug [Mon Dec 8 17:34:51 2025] PCI: Using E820 reservations for host bridge windows [Mon Dec 8 17:34:51 2025] ACPI: Enabled 2 GPEs in block 00 to 0F [Mon Dec 8 17:34:51 2025] ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-ff]) [Mon Dec 8 17:34:51 2025] acpi PNP0A03:00: _OSC: OS supports [ExtendedConfig ASPM ClockPM Segments MSI EDR HPX-Type3] [Mon Dec 8 17:34:51 2025] acpiphp: Slot [3] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [4] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [5] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [6] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [7] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [8] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [9] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [10] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [11] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [12] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [13] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [14] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [15] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [16] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [17] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [18] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [19] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [20] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [21] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [22] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [23] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [24] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [25] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [26] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [27] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [28] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [29] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [30] registered [Mon Dec 8 17:34:51 2025] acpiphp: Slot [31] registered [Mon Dec 8 17:34:51 2025] PCI host bridge to bus 0000:00 [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: root bus resource [io 0x0000-0x0cf7 window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: root bus resource [io 0x0d00-0xffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000bffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: root bus resource [mem 0xc0000000-0xfebfffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: root bus resource [mem 0x240000000-0x2bfffffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: root bus resource [bus 00-ff] [Mon Dec 8 17:34:51 2025] pci 0000:00:00.0: [8086:1237] type 00 class 0x060000 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:01.0: [8086:7000] type 00 class 0x060100 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:01.1: [8086:7010] type 00 class 0x010180 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:01.1: BAR 4 [io 0xc140-0xc14f] [Mon Dec 8 17:34:51 2025] pci 0000:00:01.1: BAR 0 [io 0x01f0-0x01f7]: legacy IDE quirk [Mon Dec 8 17:34:51 2025] pci 0000:00:01.1: BAR 1 [io 0x03f6]: legacy IDE quirk [Mon Dec 8 17:34:51 2025] pci 0000:00:01.1: BAR 2 [io 0x0170-0x0177]: legacy IDE quirk [Mon Dec 8 17:34:51 2025] pci 0000:00:01.1: BAR 3 [io 0x0376]: legacy IDE quirk [Mon Dec 8 17:34:51 2025] pci 0000:00:01.2: [8086:7020] type 00 class 0x0c0300 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:01.2: BAR 4 [io 0xc100-0xc11f] [Mon Dec 8 17:34:51 2025] pci 0000:00:01.3: [8086:7113] type 00 class 0x068000 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:01.3: quirk: [io 0x0600-0x063f] claimed by PIIX4 ACPI [Mon Dec 8 17:34:51 2025] pci 0000:00:01.3: quirk: [io 0x0700-0x070f] claimed by PIIX4 SMB [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: [1af4:1050] type 00 class 0x030000 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: BAR 0 [mem 0xfe000000-0xfe7fffff pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: BAR 2 [mem 0xfe800000-0xfe803fff 64bit pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: BAR 4 [mem 0xfeb90000-0xfeb90fff] [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: ROM [mem 0xfeb80000-0xfeb8ffff pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: Video device with shadowed ROM at [mem 0x000c0000-0x000dffff] [Mon Dec 8 17:34:51 2025] pci 0000:00:03.0: [1af4:1000] type 00 class 0x020000 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:03.0: BAR 0 [io 0xc080-0xc0bf] [Mon Dec 8 17:34:51 2025] pci 0000:00:03.0: BAR 1 [mem 0xfeb91000-0xfeb91fff] [Mon Dec 8 17:34:51 2025] pci 0000:00:03.0: BAR 4 [mem 0xfe804000-0xfe807fff 64bit pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:03.0: ROM [mem 0xfeb00000-0xfeb7ffff pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:04.0: [1af4:1001] type 00 class 0x010000 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:04.0: BAR 0 [io 0xc000-0xc07f] [Mon Dec 8 17:34:51 2025] pci 0000:00:04.0: BAR 1 [mem 0xfeb92000-0xfeb92fff] [Mon Dec 8 17:34:51 2025] pci 0000:00:04.0: BAR 4 [mem 0xfe808000-0xfe80bfff 64bit pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:05.0: [1af4:1002] type 00 class 0x00ff00 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:05.0: BAR 0 [io 0xc0c0-0xc0ff] [Mon Dec 8 17:34:51 2025] pci 0000:00:05.0: BAR 4 [mem 0xfe80c000-0xfe80ffff 64bit pref] [Mon Dec 8 17:34:51 2025] pci 0000:00:06.0: [1af4:1005] type 00 class 0x00ff00 conventional PCI endpoint [Mon Dec 8 17:34:51 2025] pci 0000:00:06.0: BAR 0 [io 0xc120-0xc13f] [Mon Dec 8 17:34:51 2025] pci 0000:00:06.0: BAR 4 [mem 0xfe810000-0xfe813fff 64bit pref] [Mon Dec 8 17:34:51 2025] ACPI: PCI: Interrupt link LNKA configured for IRQ 10 [Mon Dec 8 17:34:51 2025] ACPI: PCI: Interrupt link LNKB configured for IRQ 10 [Mon Dec 8 17:34:51 2025] ACPI: PCI: Interrupt link LNKC configured for IRQ 11 [Mon Dec 8 17:34:51 2025] ACPI: PCI: Interrupt link LNKD configured for IRQ 11 [Mon Dec 8 17:34:51 2025] ACPI: PCI: Interrupt link LNKS configured for IRQ 9 [Mon Dec 8 17:34:51 2025] iommu: Default domain type: Translated [Mon Dec 8 17:34:51 2025] iommu: DMA domain TLB invalidation policy: lazy mode [Mon Dec 8 17:34:51 2025] SCSI subsystem initialized [Mon Dec 8 17:34:51 2025] ACPI: bus type USB registered [Mon Dec 8 17:34:51 2025] usbcore: registered new interface driver usbfs [Mon Dec 8 17:34:51 2025] usbcore: registered new interface driver hub [Mon Dec 8 17:34:51 2025] usbcore: registered new device driver usb [Mon Dec 8 17:34:51 2025] pps_core: LinuxPPS API ver. 1 registered [Mon Dec 8 17:34:51 2025] pps_core: Software ver. 5.3.6 - Copyright 2005-2007 Rodolfo Giometti [Mon Dec 8 17:34:51 2025] PTP clock support registered [Mon Dec 8 17:34:51 2025] EDAC MC: Ver: 3.0.0 [Mon Dec 8 17:34:51 2025] NetLabel: Initializing [Mon Dec 8 17:34:51 2025] NetLabel: domain hash size = 128 [Mon Dec 8 17:34:51 2025] NetLabel: protocols = UNLABELED CIPSOv4 CALIPSO [Mon Dec 8 17:34:51 2025] NetLabel: unlabeled traffic allowed by default [Mon Dec 8 17:34:51 2025] PCI: Using ACPI for IRQ routing [Mon Dec 8 17:34:51 2025] PCI: pci_cache_line_size set to 64 bytes [Mon Dec 8 17:34:51 2025] e820: reserve RAM buffer [mem 0x0009fc00-0x0009ffff] [Mon Dec 8 17:34:51 2025] e820: reserve RAM buffer [mem 0xbffdb000-0xbfffffff] [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: vgaarb: setting as boot VGA device [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: vgaarb: bridge control possible [Mon Dec 8 17:34:51 2025] pci 0000:00:02.0: vgaarb: VGA device added: decodes=io+mem,owns=io+mem,locks=none [Mon Dec 8 17:34:51 2025] vgaarb: loaded [Mon Dec 8 17:34:51 2025] clocksource: Switched to clocksource kvm-clock [Mon Dec 8 17:34:51 2025] VFS: Disk quotas dquot_6.6.0 [Mon Dec 8 17:34:51 2025] VFS: Dquot-cache hash table entries: 512 (order 0, 4096 bytes) [Mon Dec 8 17:34:51 2025] pnp: PnP ACPI init [Mon Dec 8 17:34:51 2025] pnp 00:03: [dma 2] [Mon Dec 8 17:34:51 2025] pnp: PnP ACPI: found 5 devices [Mon Dec 8 17:34:51 2025] clocksource: acpi_pm: mask: 0xffffff max_cycles: 0xffffff, max_idle_ns: 2085701024 ns [Mon Dec 8 17:34:51 2025] NET: Registered PF_INET protocol family [Mon Dec 8 17:34:51 2025] IP idents hash table entries: 131072 (order: 8, 1048576 bytes, linear) [Mon Dec 8 17:34:51 2025] tcp_listen_portaddr_hash hash table entries: 4096 (order: 4, 65536 bytes, linear) [Mon Dec 8 17:34:51 2025] Table-perturb hash table entries: 65536 (order: 6, 262144 bytes, linear) [Mon Dec 8 17:34:51 2025] TCP established hash table entries: 65536 (order: 7, 524288 bytes, linear) [Mon Dec 8 17:34:51 2025] TCP bind hash table entries: 65536 (order: 8, 1048576 bytes, linear) [Mon Dec 8 17:34:51 2025] TCP: Hash tables configured (established 65536 bind 65536) [Mon Dec 8 17:34:51 2025] MPTCP token hash table entries: 8192 (order: 5, 196608 bytes, linear) [Mon Dec 8 17:34:51 2025] UDP hash table entries: 4096 (order: 5, 131072 bytes, linear) [Mon Dec 8 17:34:51 2025] UDP-Lite hash table entries: 4096 (order: 5, 131072 bytes, linear) [Mon Dec 8 17:34:51 2025] NET: Registered PF_UNIX/PF_LOCAL protocol family [Mon Dec 8 17:34:51 2025] NET: Registered PF_XDP protocol family [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: resource 4 [io 0x0000-0x0cf7 window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: resource 5 [io 0x0d00-0xffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: resource 6 [mem 0x000a0000-0x000bffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: resource 7 [mem 0xc0000000-0xfebfffff window] [Mon Dec 8 17:34:51 2025] pci_bus 0000:00: resource 8 [mem 0x240000000-0x2bfffffff window] [Mon Dec 8 17:34:51 2025] pci 0000:00:01.0: PIIX3: Enabling Passive Release [Mon Dec 8 17:34:51 2025] pci 0000:00:00.0: Limiting direct PCI/PCI transfers [Mon Dec 8 17:34:51 2025] ACPI: \_SB_.LNKD: Enabled at IRQ 11 [Mon Dec 8 17:34:51 2025] pci 0000:00:01.2: quirk_usb_early_handoff+0x0/0x160 took 77617 usecs [Mon Dec 8 17:34:51 2025] PCI: CLS 0 bytes, default 64 [Mon Dec 8 17:34:51 2025] PCI-DMA: Using software bounce buffering for IO (SWIOTLB) [Mon Dec 8 17:34:51 2025] software IO TLB: mapped [mem 0x00000000ab000000-0x00000000af000000] (64MB) [Mon Dec 8 17:34:51 2025] ACPI: bus type thunderbolt registered [Mon Dec 8 17:34:51 2025] Trying to unpack rootfs image as initramfs... [Mon Dec 8 17:34:51 2025] Initialise system trusted keyrings [Mon Dec 8 17:34:51 2025] Key type blacklist registered [Mon Dec 8 17:34:51 2025] workingset: timestamp_bits=36 max_order=21 bucket_order=0 [Mon Dec 8 17:34:51 2025] zbud: loaded [Mon Dec 8 17:34:51 2025] integrity: Platform Keyring initialized [Mon Dec 8 17:34:51 2025] integrity: Machine keyring initialized [Mon Dec 8 17:34:52 2025] Freeing initrd memory: 87804K [Mon Dec 8 17:34:52 2025] NET: Registered PF_ALG protocol family [Mon Dec 8 17:34:52 2025] xor: automatically using best checksumming function avx [Mon Dec 8 17:34:52 2025] Key type asymmetric registered [Mon Dec 8 17:34:52 2025] Asymmetric key parser 'x509' registered [Mon Dec 8 17:34:52 2025] Block layer SCSI generic (bsg) driver version 0.4 loaded (major 246) [Mon Dec 8 17:34:52 2025] io scheduler mq-deadline registered [Mon Dec 8 17:34:52 2025] io scheduler kyber registered [Mon Dec 8 17:34:52 2025] io scheduler bfq registered [Mon Dec 8 17:34:52 2025] atomic64_test: passed for x86-64 platform with CX8 and with SSE [Mon Dec 8 17:34:52 2025] shpchp: Standard Hot Plug PCI Controller Driver version: 0.4 [Mon Dec 8 17:34:52 2025] input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0 [Mon Dec 8 17:34:52 2025] ACPI: button: Power Button [PWRF] [Mon Dec 8 17:34:52 2025] ACPI: \_SB_.LNKB: Enabled at IRQ 10 [Mon Dec 8 17:34:52 2025] ACPI: \_SB_.LNKC: Enabled at IRQ 11 [Mon Dec 8 17:34:52 2025] ACPI: \_SB_.LNKA: Enabled at IRQ 10 [Mon Dec 8 17:34:52 2025] Serial: 8250/16550 driver, 4 ports, IRQ sharing enabled [Mon Dec 8 17:34:52 2025] 00:00: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A [Mon Dec 8 17:34:52 2025] Non-volatile memory driver v1.3 [Mon Dec 8 17:34:52 2025] rdac: device handler registered [Mon Dec 8 17:34:52 2025] hp_sw: device handler registered [Mon Dec 8 17:34:52 2025] emc: device handler registered [Mon Dec 8 17:34:52 2025] alua: device handler registered [Mon Dec 8 17:34:52 2025] uhci_hcd 0000:00:01.2: UHCI Host Controller [Mon Dec 8 17:34:52 2025] uhci_hcd 0000:00:01.2: new USB bus registered, assigned bus number 1 [Mon Dec 8 17:34:52 2025] uhci_hcd 0000:00:01.2: detected 2 ports [Mon Dec 8 17:34:52 2025] uhci_hcd 0000:00:01.2: irq 11, io port 0x0000c100 [Mon Dec 8 17:34:52 2025] usb usb1: New USB device found, idVendor=1d6b, idProduct=0001, bcdDevice= 5.14 [Mon Dec 8 17:34:52 2025] usb usb1: New USB device strings: Mfr=3, Product=2, SerialNumber=1 [Mon Dec 8 17:34:52 2025] usb usb1: Product: UHCI Host Controller [Mon Dec 8 17:34:52 2025] usb usb1: Manufacturer: Linux 5.14.0-645.el9.x86_64 uhci_hcd [Mon Dec 8 17:34:52 2025] usb usb1: SerialNumber: 0000:00:01.2 [Mon Dec 8 17:34:52 2025] hub 1-0:1.0: USB hub found [Mon Dec 8 17:34:52 2025] hub 1-0:1.0: 2 ports detected [Mon Dec 8 17:34:52 2025] usbcore: registered new interface driver usbserial_generic [Mon Dec 8 17:34:52 2025] usbserial: USB Serial support registered for generic [Mon Dec 8 17:34:52 2025] i8042: PNP: PS/2 Controller [PNP0303:KBD,PNP0f13:MOU] at 0x60,0x64 irq 1,12 [Mon Dec 8 17:34:52 2025] serio: i8042 KBD port at 0x60,0x64 irq 1 [Mon Dec 8 17:34:52 2025] serio: i8042 AUX port at 0x60,0x64 irq 12 [Mon Dec 8 17:34:52 2025] mousedev: PS/2 mouse device common for all mice [Mon Dec 8 17:34:52 2025] rtc_cmos 00:04: RTC can wake from S4 [Mon Dec 8 17:34:52 2025] input: AT Translated Set 2 keyboard as /devices/platform/i8042/serio0/input/input1 [Mon Dec 8 17:34:52 2025] rtc_cmos 00:04: registered as rtc0 [Mon Dec 8 17:34:52 2025] input: VirtualPS/2 VMware VMMouse as /devices/platform/i8042/serio1/input/input4 [Mon Dec 8 17:34:52 2025] rtc_cmos 00:04: setting system clock to 2025-12-08T17:34:52 UTC (1765215292) [Mon Dec 8 17:34:52 2025] rtc_cmos 00:04: alarms up to one day, y3k, 242 bytes nvram [Mon Dec 8 17:34:52 2025] amd_pstate: the _CPC object is not present in SBIOS or ACPI disabled [Mon Dec 8 17:34:52 2025] input: VirtualPS/2 VMware VMMouse as /devices/platform/i8042/serio1/input/input3 [Mon Dec 8 17:34:52 2025] hid: raw HID events driver (C) Jiri Kosina [Mon Dec 8 17:34:52 2025] usbcore: registered new interface driver usbhid [Mon Dec 8 17:34:52 2025] usbhid: USB HID core driver [Mon Dec 8 17:34:52 2025] drop_monitor: Initializing network drop monitor service [Mon Dec 8 17:34:52 2025] Initializing XFRM netlink socket [Mon Dec 8 17:34:52 2025] NET: Registered PF_INET6 protocol family [Mon Dec 8 17:34:52 2025] Segment Routing with IPv6 [Mon Dec 8 17:34:52 2025] NET: Registered PF_PACKET protocol family [Mon Dec 8 17:34:52 2025] mpls_gso: MPLS GSO support [Mon Dec 8 17:34:52 2025] IPI shorthand broadcast: enabled [Mon Dec 8 17:34:52 2025] AVX2 version of gcm_enc/dec engaged. [Mon Dec 8 17:34:52 2025] AES CTR mode by8 optimization enabled [Mon Dec 8 17:34:52 2025] sched_clock: Marking stable (1338003179, 147295863)->(1619642338, -134343296) [Mon Dec 8 17:34:52 2025] registered taskstats version 1 [Mon Dec 8 17:34:52 2025] Loading compiled-in X.509 certificates [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'The CentOS Project: CentOS Stream kernel signing key: 4c28336b4850d771d036b52fb2778fdb4f02f708' [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'Red Hat Enterprise Linux Driver Update Program (key 3): bf57f3e87362bc7229d9f465321773dfd1f77a80' [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'Red Hat Enterprise Linux kpatch signing key: 4d38fd864ebe18c5f0b72e3852e2014c3a676fc8' [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'RH-IMA-CA: Red Hat IMA CA: fb31825dd0e073685b264e3038963673f753959a' [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'Nvidia GPU OOT signing 001: 55e1cef88193e60419f0b0ec379c49f77545acf0' [Mon Dec 8 17:34:52 2025] Demotion targets for Node 0: null [Mon Dec 8 17:34:52 2025] page_owner is disabled [Mon Dec 8 17:34:52 2025] Key type .fscrypt registered [Mon Dec 8 17:34:52 2025] Key type fscrypt-provisioning registered [Mon Dec 8 17:34:52 2025] Key type big_key registered [Mon Dec 8 17:34:52 2025] Key type encrypted registered [Mon Dec 8 17:34:52 2025] ima: No TPM chip found, activating TPM-bypass! [Mon Dec 8 17:34:52 2025] Loading compiled-in module X.509 certificates [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'The CentOS Project: CentOS Stream kernel signing key: 4c28336b4850d771d036b52fb2778fdb4f02f708' [Mon Dec 8 17:34:52 2025] ima: Allocated hash algorithm: sha256 [Mon Dec 8 17:34:52 2025] ima: No architecture policies found [Mon Dec 8 17:34:52 2025] evm: Initialising EVM extended attributes: [Mon Dec 8 17:34:52 2025] evm: security.selinux [Mon Dec 8 17:34:52 2025] evm: security.SMACK64 (disabled) [Mon Dec 8 17:34:52 2025] evm: security.SMACK64EXEC (disabled) [Mon Dec 8 17:34:52 2025] evm: security.SMACK64TRANSMUTE (disabled) [Mon Dec 8 17:34:52 2025] evm: security.SMACK64MMAP (disabled) [Mon Dec 8 17:34:52 2025] evm: security.apparmor (disabled) [Mon Dec 8 17:34:52 2025] evm: security.ima [Mon Dec 8 17:34:52 2025] evm: security.capability [Mon Dec 8 17:34:52 2025] evm: HMAC attrs: 0x1 [Mon Dec 8 17:34:52 2025] usb 1-1: new full-speed USB device number 2 using uhci_hcd [Mon Dec 8 17:34:52 2025] Running certificate verification RSA selftest [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'Certificate verification self-testing key: f58703bb33ce1b73ee02eccdee5b8817518fe3db' [Mon Dec 8 17:34:52 2025] Running certificate verification ECDSA selftest [Mon Dec 8 17:34:52 2025] Loaded X.509 cert 'Certificate verification ECDSA self-testing key: 2900bcea1deb7bc8479a84a23d758efdfdd2b2d3' [Mon Dec 8 17:34:52 2025] clk: Disabling unused clocks [Mon Dec 8 17:34:52 2025] Freeing unused decrypted memory: 2028K [Mon Dec 8 17:34:52 2025] Freeing unused kernel image (initmem) memory: 4196K [Mon Dec 8 17:34:52 2025] Write protecting the kernel read-only data: 30720k [Mon Dec 8 17:34:52 2025] Freeing unused kernel image (rodata/data gap) memory: 428K [Mon Dec 8 17:34:52 2025] x86/mm: Checked W+X mappings: passed, no W+X pages found. [Mon Dec 8 17:34:52 2025] Run /init as init process [Mon Dec 8 17:34:52 2025] with arguments: [Mon Dec 8 17:34:52 2025] /init [Mon Dec 8 17:34:52 2025] with environment: [Mon Dec 8 17:34:52 2025] HOME=/ [Mon Dec 8 17:34:52 2025] TERM=linux [Mon Dec 8 17:34:52 2025] BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 [Mon Dec 8 17:34:52 2025] usb 1-1: New USB device found, idVendor=0627, idProduct=0001, bcdDevice= 0.00 [Mon Dec 8 17:34:52 2025] usb 1-1: New USB device strings: Mfr=1, Product=3, SerialNumber=10 [Mon Dec 8 17:34:52 2025] usb 1-1: Product: QEMU USB Tablet [Mon Dec 8 17:34:52 2025] usb 1-1: Manufacturer: QEMU [Mon Dec 8 17:34:52 2025] usb 1-1: SerialNumber: 28754-0000:00:01.2-1 [Mon Dec 8 17:34:52 2025] input: QEMU QEMU USB Tablet as /devices/pci0000:00/0000:00:01.2/usb1/1-1/1-1:1.0/0003:0627:0001.0001/input/input5 [Mon Dec 8 17:34:52 2025] hid-generic 0003:0627:0001.0001: input,hidraw0: USB HID v0.01 Mouse [QEMU QEMU USB Tablet] on usb-0000:00:01.2-1/input0 [Mon Dec 8 17:34:52 2025] systemd[1]: systemd 252-59.el9 running in system mode (+PAM +AUDIT +SELINUX -APPARMOR +IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN -IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD -BPF_FRAMEWORK +XKBCOMMON +UTMP +SYSVINIT default-hierarchy=unified) [Mon Dec 8 17:34:52 2025] systemd[1]: Detected virtualization kvm. [Mon Dec 8 17:34:52 2025] systemd[1]: Detected architecture x86-64. [Mon Dec 8 17:34:52 2025] systemd[1]: Running in initrd. [Mon Dec 8 17:34:52 2025] systemd[1]: No hostname configured, using default hostname. [Mon Dec 8 17:34:52 2025] systemd[1]: Hostname set to . [Mon Dec 8 17:34:52 2025] systemd[1]: Initializing machine ID from VM UUID. [Mon Dec 8 17:34:53 2025] systemd[1]: Queued start job for default target Initrd Default Target. [Mon Dec 8 17:34:53 2025] systemd[1]: Started Dispatch Password Requests to Console Directory Watch. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Local Encrypted Volumes. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Initrd /usr File System. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Local File Systems. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Path Units. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Slice Units. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Swaps. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Timer Units. [Mon Dec 8 17:34:53 2025] systemd[1]: Listening on D-Bus System Message Bus Socket. [Mon Dec 8 17:34:53 2025] systemd[1]: Listening on Journal Socket (/dev/log). [Mon Dec 8 17:34:53 2025] systemd[1]: Listening on Journal Socket. [Mon Dec 8 17:34:53 2025] systemd[1]: Listening on udev Control Socket. [Mon Dec 8 17:34:53 2025] systemd[1]: Listening on udev Kernel Socket. [Mon Dec 8 17:34:53 2025] systemd[1]: Reached target Socket Units. [Mon Dec 8 17:34:53 2025] systemd[1]: Starting Create List of Static Device Nodes... [Mon Dec 8 17:34:53 2025] systemd[1]: Starting Journal Service... [Mon Dec 8 17:34:53 2025] systemd[1]: Load Kernel Modules was skipped because no trigger condition checks were met. [Mon Dec 8 17:34:53 2025] systemd[1]: Starting Apply Kernel Variables... [Mon Dec 8 17:34:53 2025] systemd[1]: Starting Create System Users... [Mon Dec 8 17:34:53 2025] systemd[1]: Starting Setup Virtual Console... [Mon Dec 8 17:34:53 2025] systemd[1]: Finished Create List of Static Device Nodes. [Mon Dec 8 17:34:53 2025] systemd[1]: Finished Apply Kernel Variables. [Mon Dec 8 17:34:53 2025] systemd[1]: Finished Create System Users. [Mon Dec 8 17:34:53 2025] systemd[1]: Started Journal Service. [Mon Dec 8 17:34:53 2025] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log. [Mon Dec 8 17:34:53 2025] device-mapper: uevent: version 1.0.3 [Mon Dec 8 17:34:53 2025] device-mapper: ioctl: 4.50.0-ioctl (2025-04-28) initialised: dm-devel@lists.linux.dev [Mon Dec 8 17:34:53 2025] RPC: Registered named UNIX socket transport module. [Mon Dec 8 17:34:53 2025] RPC: Registered udp transport module. [Mon Dec 8 17:34:53 2025] RPC: Registered tcp transport module. [Mon Dec 8 17:34:53 2025] RPC: Registered tcp-with-tls transport module. [Mon Dec 8 17:34:53 2025] RPC: Registered tcp NFSv4.1 backchannel transport module. [Mon Dec 8 17:34:54 2025] libata version 3.00 loaded. [Mon Dec 8 17:34:54 2025] ata_piix 0000:00:01.1: version 2.13 [Mon Dec 8 17:34:54 2025] virtio_blk virtio2: 8/0/0 default/read/poll queues [Mon Dec 8 17:34:54 2025] scsi host0: ata_piix [Mon Dec 8 17:34:54 2025] scsi host1: ata_piix [Mon Dec 8 17:34:54 2025] ata1: PATA max MWDMA2 cmd 0x1f0 ctl 0x3f6 bmdma 0xc140 irq 14 lpm-pol 0 [Mon Dec 8 17:34:54 2025] ata2: PATA max MWDMA2 cmd 0x170 ctl 0x376 bmdma 0xc148 irq 15 lpm-pol 0 [Mon Dec 8 17:34:54 2025] virtio_blk virtio2: [vda] 167772160 512-byte logical blocks (85.9 GB/80.0 GiB) [Mon Dec 8 17:34:54 2025] vda: vda1 [Mon Dec 8 17:34:54 2025] ata1: found unknown device (class 0) [Mon Dec 8 17:34:54 2025] ata1.00: ATAPI: QEMU DVD-ROM, 2.5+, max UDMA/100 [Mon Dec 8 17:34:54 2025] scsi 0:0:0:0: CD-ROM QEMU QEMU DVD-ROM 2.5+ PQ: 0 ANSI: 5 [Mon Dec 8 17:34:54 2025] scsi 0:0:0:0: Attached scsi generic sg0 type 5 [Mon Dec 8 17:34:54 2025] sr 0:0:0:0: [sr0] scsi3-mmc drive: 4x/4x cd/rw xa/form2 tray [Mon Dec 8 17:34:54 2025] cdrom: Uniform CD-ROM driver Revision: 3.20 [Mon Dec 8 17:34:54 2025] sr 0:0:0:0: Attached scsi CD-ROM sr0 [Mon Dec 8 17:34:55 2025] SGI XFS with ACLs, security attributes, scrub, quota, no debug enabled [Mon Dec 8 17:34:55 2025] XFS (vda1): Mounting V5 Filesystem fcf6b761-831a-48a7-9f5f-068b5063763f [Mon Dec 8 17:34:55 2025] XFS (vda1): Ending clean mount [Mon Dec 8 17:34:55 2025] systemd-journald[308]: Received SIGTERM from PID 1 (systemd). [Mon Dec 8 17:34:55 2025] audit: type=1404 audit(1765215295.784:2): enforcing=1 old_enforcing=0 auid=4294967295 ses=4294967295 enabled=1 old-enabled=1 lsm=selinux res=1 [Mon Dec 8 17:34:55 2025] SELinux: policy capability network_peer_controls=1 [Mon Dec 8 17:34:55 2025] SELinux: policy capability open_perms=1 [Mon Dec 8 17:34:55 2025] SELinux: policy capability extended_socket_class=1 [Mon Dec 8 17:34:55 2025] SELinux: policy capability always_check_network=0 [Mon Dec 8 17:34:55 2025] SELinux: policy capability cgroup_seclabel=1 [Mon Dec 8 17:34:55 2025] SELinux: policy capability nnp_nosuid_transition=1 [Mon Dec 8 17:34:55 2025] SELinux: policy capability genfs_seclabel_symlinks=1 [Mon Dec 8 17:34:55 2025] audit: type=1403 audit(1765215295.913:3): auid=4294967295 ses=4294967295 lsm=selinux res=1 [Mon Dec 8 17:34:55 2025] systemd[1]: Successfully loaded SELinux policy in 132.217ms. [Mon Dec 8 17:34:55 2025] systemd[1]: Relabelled /dev, /dev/shm, /run, /sys/fs/cgroup in 26.429ms. [Mon Dec 8 17:34:55 2025] systemd[1]: systemd 252-59.el9 running in system mode (+PAM +AUDIT +SELINUX -APPARMOR +IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN -IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD -BPF_FRAMEWORK +XKBCOMMON +UTMP +SYSVINIT default-hierarchy=unified) [Mon Dec 8 17:34:55 2025] systemd[1]: Detected virtualization kvm. [Mon Dec 8 17:34:55 2025] systemd[1]: Detected architecture x86-64. [Mon Dec 8 17:34:55 2025] systemd-rc-local-generator[638]: /etc/rc.d/rc.local is not marked executable, skipping. [Mon Dec 8 17:34:56 2025] systemd[1]: initrd-switch-root.service: Deactivated successfully. [Mon Dec 8 17:34:56 2025] systemd[1]: Stopped Switch Root. [Mon Dec 8 17:34:56 2025] systemd[1]: systemd-journald.service: Scheduled restart job, restart counter is at 1. [Mon Dec 8 17:34:56 2025] systemd[1]: Created slice Slice /system/getty. [Mon Dec 8 17:34:56 2025] systemd[1]: Created slice Slice /system/serial-getty. [Mon Dec 8 17:34:56 2025] systemd[1]: Created slice Slice /system/sshd-keygen. [Mon Dec 8 17:34:56 2025] systemd[1]: Created slice User and Session Slice. [Mon Dec 8 17:34:56 2025] systemd[1]: Started Dispatch Password Requests to Console Directory Watch. [Mon Dec 8 17:34:56 2025] systemd[1]: Started Forward Password Requests to Wall Directory Watch. [Mon Dec 8 17:34:56 2025] systemd[1]: Set up automount Arbitrary Executable File Formats File System Automount Point. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target Local Encrypted Volumes. [Mon Dec 8 17:34:56 2025] systemd[1]: Stopped target Switch Root. [Mon Dec 8 17:34:56 2025] systemd[1]: Stopped target Initrd File Systems. [Mon Dec 8 17:34:56 2025] systemd[1]: Stopped target Initrd Root File System. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target Local Integrity Protected Volumes. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target Path Units. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target rpc_pipefs.target. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target Slice Units. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target Swaps. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target Local Verity Protected Volumes. [Mon Dec 8 17:34:56 2025] systemd[1]: Listening on RPCbind Server Activation Socket. [Mon Dec 8 17:34:56 2025] systemd[1]: Reached target RPC Port Mapper. [Mon Dec 8 17:34:56 2025] systemd[1]: Listening on Process Core Dump Socket. [Mon Dec 8 17:34:56 2025] systemd[1]: Listening on initctl Compatibility Named Pipe. [Mon Dec 8 17:34:56 2025] systemd[1]: Listening on udev Control Socket. [Mon Dec 8 17:34:56 2025] systemd[1]: Listening on udev Kernel Socket. [Mon Dec 8 17:34:56 2025] systemd[1]: Mounting Huge Pages File System... [Mon Dec 8 17:34:56 2025] systemd[1]: Mounting POSIX Message Queue File System... [Mon Dec 8 17:34:56 2025] systemd[1]: Mounting Kernel Debug File System... [Mon Dec 8 17:34:56 2025] systemd[1]: Mounting Kernel Trace File System... [Mon Dec 8 17:34:56 2025] systemd[1]: Kernel Module supporting RPCSEC_GSS was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab). [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Create List of Static Device Nodes... [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Load Kernel Module configfs... [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Load Kernel Module drm... [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Load Kernel Module efi_pstore... [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Load Kernel Module fuse... [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Read and set NIS domainname from /etc/sysconfig/network... [Mon Dec 8 17:34:56 2025] systemd[1]: systemd-fsck-root.service: Deactivated successfully. [Mon Dec 8 17:34:56 2025] systemd[1]: Stopped File System Check on Root Device. [Mon Dec 8 17:34:56 2025] systemd[1]: Stopped Journal Service. [Mon Dec 8 17:34:56 2025] fuse: init (API version 7.37) [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Journal Service... [Mon Dec 8 17:34:56 2025] systemd[1]: Load Kernel Modules was skipped because no trigger condition checks were met. [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Generate network units from Kernel command line... [Mon Dec 8 17:34:56 2025] systemd[1]: TPM2 PCR Machine ID Measurement was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f). [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Remount Root and Kernel File Systems... [Mon Dec 8 17:34:56 2025] xfs filesystem being remounted at / supports timestamps until 2038 (0x7fffffff) [Mon Dec 8 17:34:56 2025] systemd[1]: Repartition Root Disk was skipped because no trigger condition checks were met. [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Apply Kernel Variables... [Mon Dec 8 17:34:56 2025] systemd[1]: Starting Coldplug All udev Devices... [Mon Dec 8 17:34:56 2025] systemd[1]: Started Journal Service. [Mon Dec 8 17:34:56 2025] ACPI: bus type drm_connector registered [Mon Dec 8 17:34:56 2025] systemd-journald[679]: Received client request to flush runtime journal. [Mon Dec 8 17:36:17 2025] piix4_smbus 0000:00:01.3: SMBus Host Controller at 0x700, revision 0 [Mon Dec 8 17:36:17 2025] i2c i2c-0: 1/1 memory slots populated (from DMI) [Mon Dec 8 17:36:17 2025] i2c i2c-0: Memory type 0x07 not supported yet, not instantiating SPD [Mon Dec 8 17:36:17 2025] input: PC Speaker as /devices/platform/pcspkr/input/input6 [Mon Dec 8 17:36:17 2025] [drm] pci: virtio-vga detected at 0000:00:02.0 [Mon Dec 8 17:36:17 2025] virtio-pci 0000:00:02.0: vgaarb: deactivate vga console [Mon Dec 8 17:36:17 2025] Console: switching to colour dummy device 80x25 [Mon Dec 8 17:36:17 2025] [drm] features: -virgl +edid -resource_blob -host_visible [Mon Dec 8 17:36:17 2025] [drm] features: -context_init [Mon Dec 8 17:36:17 2025] [drm] number of scanouts: 1 [Mon Dec 8 17:36:17 2025] [drm] number of cap sets: 0 [Mon Dec 8 17:36:17 2025] [drm] Initialized virtio_gpu 0.1.0 for 0000:00:02.0 on minor 0 [Mon Dec 8 17:36:17 2025] Warning: Deprecated Driver is detected: nft_compat will not be maintained in a future major release and may be disabled [Mon Dec 8 17:36:17 2025] kvm_amd: TSC scaling supported [Mon Dec 8 17:36:17 2025] kvm_amd: Nested Virtualization enabled [Mon Dec 8 17:36:17 2025] kvm_amd: Nested Paging enabled [Mon Dec 8 17:36:17 2025] kvm_amd: LBR virtualization supported [Mon Dec 8 17:36:17 2025] fbcon: virtio_gpudrmfb (fb0) is primary device [Mon Dec 8 17:36:17 2025] Console: switching to colour frame buffer device 128x48 [Mon Dec 8 17:36:17 2025] virtio-pci 0000:00:02.0: [drm] fb0: virtio_gpudrmfb frame buffer device [Mon Dec 8 17:36:17 2025] Warning: Deprecated Driver is detected: nft_compat_module_init will not be maintained in a future major release and may be disabled [Mon Dec 8 17:36:18 2025] ISO 9660 Extensions: Microsoft Joliet Level 3 [Mon Dec 8 17:36:18 2025] ISO 9660 Extensions: RRIP_1991A [Mon Dec 8 17:36:23 2025] block vda: the capability attribute has been deprecated. [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: [1af4:1000] type 00 class 0x020000 conventional PCI endpoint [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: BAR 0 [io 0x0000-0x003f] [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: BAR 1 [mem 0x00000000-0x00000fff] [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: BAR 4 [mem 0x00000000-0x00003fff 64bit pref] [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: ROM [mem 0x00000000-0x0007ffff pref] [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: ROM [mem 0xc0000000-0xc007ffff pref]: assigned [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: BAR 4 [mem 0x240000000-0x240003fff 64bit pref]: assigned [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: BAR 1 [mem 0xc0080000-0xc0080fff]: assigned [Mon Dec 8 17:39:25 2025] pci 0000:00:07.0: BAR 0 [io 0x1000-0x103f]: assigned [Mon Dec 8 17:39:25 2025] virtio-pci 0000:00:07.0: enabling device (0000 -> 0003) [Mon Dec 8 17:48:18 2025] systemd-rc-local-generator[8635]: /etc/rc.d/rc.local is not marked executable, skipping. [Mon Dec 8 17:49:24 2025] SELinux: Converting 387 SID table entries... [Mon Dec 8 17:49:25 2025] SELinux: policy capability network_peer_controls=1 [Mon Dec 8 17:49:25 2025] SELinux: policy capability open_perms=1 [Mon Dec 8 17:49:25 2025] SELinux: policy capability extended_socket_class=1 [Mon Dec 8 17:49:25 2025] SELinux: policy capability always_check_network=0 [Mon Dec 8 17:49:25 2025] SELinux: policy capability cgroup_seclabel=1 [Mon Dec 8 17:49:25 2025] SELinux: policy capability nnp_nosuid_transition=1 [Mon Dec 8 17:49:25 2025] SELinux: policy capability genfs_seclabel_symlinks=1 [Mon Dec 8 17:49:33 2025] SELinux: Converting 387 SID table entries... [Mon Dec 8 17:49:33 2025] SELinux: policy capability network_peer_controls=1 [Mon Dec 8 17:49:33 2025] SELinux: policy capability open_perms=1 [Mon Dec 8 17:49:33 2025] SELinux: policy capability extended_socket_class=1 [Mon Dec 8 17:49:33 2025] SELinux: policy capability always_check_network=0 [Mon Dec 8 17:49:33 2025] SELinux: policy capability cgroup_seclabel=1 [Mon Dec 8 17:49:33 2025] SELinux: policy capability nnp_nosuid_transition=1 [Mon Dec 8 17:49:33 2025] SELinux: policy capability genfs_seclabel_symlinks=1 [Mon Dec 8 17:49:42 2025] SELinux: Converting 387 SID table entries... [Mon Dec 8 17:49:42 2025] SELinux: policy capability network_peer_controls=1 [Mon Dec 8 17:49:42 2025] SELinux: policy capability open_perms=1 [Mon Dec 8 17:49:42 2025] SELinux: policy capability extended_socket_class=1 [Mon Dec 8 17:49:42 2025] SELinux: policy capability always_check_network=0 [Mon Dec 8 17:49:42 2025] SELinux: policy capability cgroup_seclabel=1 [Mon Dec 8 17:49:42 2025] SELinux: policy capability nnp_nosuid_transition=1 [Mon Dec 8 17:49:42 2025] SELinux: policy capability genfs_seclabel_symlinks=1 [Mon Dec 8 17:49:56 2025] SELinux: Converting 390 SID table entries... [Mon Dec 8 17:49:56 2025] SELinux: policy capability network_peer_controls=1 [Mon Dec 8 17:49:56 2025] SELinux: policy capability open_perms=1 [Mon Dec 8 17:49:56 2025] SELinux: policy capability extended_socket_class=1 [Mon Dec 8 17:49:56 2025] SELinux: policy capability always_check_network=0 [Mon Dec 8 17:49:56 2025] SELinux: policy capability cgroup_seclabel=1 [Mon Dec 8 17:49:56 2025] SELinux: policy capability nnp_nosuid_transition=1 [Mon Dec 8 17:49:56 2025] SELinux: policy capability genfs_seclabel_symlinks=1 [Mon Dec 8 17:50:19 2025] systemd-rc-local-generator[9937]: /etc/rc.d/rc.local is not marked executable, skipping. [Mon Dec 8 17:50:21 2025] evm: overlay not supported home/zuul/zuul-output/logs/selinux-denials.log0000644000000000000000000000000015115611544020604 0ustar rootroothome/zuul/zuul-output/logs/system-config/0000755000175000017500000000000015115611545017661 5ustar zuulzuulhome/zuul/zuul-output/logs/system-config/libvirt/0000755000175000017500000000000015115611545021334 5ustar zuulzuulhome/zuul/zuul-output/logs/system-config/libvirt/libvirt-admin.conf0000644000175000000000000000070215115611545024712 0ustar zuulroot# # This can be used to setup URI aliases for frequently # used connection URIs. Aliases may contain only the # characters a-Z, 0-9, _, -. # # Following the '=' may be any valid libvirt admin connection # URI, including arbitrary parameters #uri_aliases = [ # "admin=libvirtd:///system", #] # This specifies the default location the client tries to connect to if no other # URI is provided by the application #uri_default = "libvirtd:///system" home/zuul/zuul-output/logs/system-config/libvirt/libvirt.conf0000644000175000000000000000104315115611545023623 0ustar zuulroot# # This can be used to setup URI aliases for frequently # used connection URIs. Aliases may contain only the # characters a-Z, 0-9, _, -. # # Following the '=' may be any valid libvirt connection # URI, including arbitrary parameters #uri_aliases = [ # "hail=qemu+ssh://root@hail.cloud.example.com/system", # "sleet=qemu+ssh://root@sleet.cloud.example.com/system", #] # # These can be used in cases when no URI is supplied by the application # (@uri_default also prevents probing of the hypervisor driver). # #uri_default = "qemu:///system" home/zuul/zuul-output/logs/registries.conf0000644000000000000000000000744715115611545020053 0ustar rootroot# For more information on this configuration file, see containers-registries.conf(5). # # NOTE: RISK OF USING UNQUALIFIED IMAGE NAMES # We recommend always using fully qualified image names including the registry # server (full dns name), namespace, image name, and tag # (e.g., registry.redhat.io/ubi8/ubi:latest). Pulling by digest (i.e., # quay.io/repository/name@digest) further eliminates the ambiguity of tags. # When using short names, there is always an inherent risk that the image being # pulled could be spoofed. For example, a user wants to pull an image named # `foobar` from a registry and expects it to come from myregistry.com. If # myregistry.com is not first in the search list, an attacker could place a # different `foobar` image at a registry earlier in the search list. The user # would accidentally pull and run the attacker's image and code rather than the # intended content. We recommend only adding registries which are completely # trusted (i.e., registries which don't allow unknown or anonymous users to # create accounts with arbitrary names). This will prevent an image from being # spoofed, squatted or otherwise made insecure. If it is necessary to use one # of these registries, it should be added at the end of the list. # # # An array of host[:port] registries to try when pulling an unqualified image, in order. unqualified-search-registries = ["registry.access.redhat.com", "registry.redhat.io", "docker.io"] # [[registry]] # # The "prefix" field is used to choose the relevant [[registry]] TOML table; # # (only) the TOML table with the longest match for the input image name # # (taking into account namespace/repo/tag/digest separators) is used. # # # # The prefix can also be of the form: *.example.com for wildcard subdomain # # matching. # # # # If the prefix field is missing, it defaults to be the same as the "location" field. # prefix = "example.com/foo" # # # If true, unencrypted HTTP as well as TLS connections with untrusted # # certificates are allowed. # insecure = false # # # If true, pulling images with matching names is forbidden. # blocked = false # # # The physical location of the "prefix"-rooted namespace. # # # # By default, this is equal to "prefix" (in which case "prefix" can be omitted # # and the [[registry]] TOML table can only specify "location"). # # # # Example: Given # # prefix = "example.com/foo" # # location = "internal-registry-for-example.net/bar" # # requests for the image example.com/foo/myimage:latest will actually work with the # # internal-registry-for-example.net/bar/myimage:latest image. # # # The location can be empty iff prefix is in a # # wildcarded format: "*.example.com". In this case, the input reference will # # be used as-is without any rewrite. # location = internal-registry-for-example.com/bar" # # # (Possibly-partial) mirrors for the "prefix"-rooted namespace. # # # # The mirrors are attempted in the specified order; the first one that can be # # contacted and contains the image will be used (and if none of the mirrors contains the image, # # the primary location specified by the "registry.location" field, or using the unmodified # # user-specified reference, is tried last). # # # # Each TOML table in the "mirror" array can contain the following fields, with the same semantics # # as if specified in the [[registry]] TOML table directly: # # - location # # - insecure # [[registry.mirror]] # location = "example-mirror-0.local/mirror-for-foo" # [[registry.mirror]] # location = "example-mirror-1.local/mirrors/foo" # insecure = true # # Given the above, a pull of example.com/foo/image:latest will try: # # 1. example-mirror-0.local/mirror-for-foo/image:latest # # 2. example-mirror-1.local/mirrors/foo/image:latest # # 3. internal-registry-for-example.net/bar/image:latest # # in order, and use the first one that exists. short-name-mode = "enforcing" home/zuul/zuul-output/logs/registries.conf.d/0000755000175000000000000000000015115611545020367 5ustar zuulroothome/zuul/zuul-output/logs/registries.conf.d/000-shortnames.conf0000644000175000000000000001735515115611545023731 0ustar zuulroot[aliases] # almalinux "almalinux" = "docker.io/library/almalinux" "almalinux-minimal" = "docker.io/library/almalinux-minimal" # Amazon Linux "amazonlinux" = "public.ecr.aws/amazonlinux/amazonlinux" # Arch Linux "archlinux" = "docker.io/library/archlinux" # centos "centos" = "quay.io/centos/centos" # containers "skopeo" = "quay.io/skopeo/stable" "buildah" = "quay.io/buildah/stable" "podman" = "quay.io/podman/stable" "hello" = "quay.io/podman/hello" "hello-world" = "quay.io/podman/hello" # docker "alpine" = "docker.io/library/alpine" "docker" = "docker.io/library/docker" "registry" = "docker.io/library/registry" "swarm" = "docker.io/library/swarm" # Fedora "fedora-bootc" = "registry.fedoraproject.org/fedora-bootc" "fedora-minimal" = "registry.fedoraproject.org/fedora-minimal" "fedora" = "registry.fedoraproject.org/fedora" # Gentoo "gentoo" = "docker.io/gentoo/stage3" # openSUSE "opensuse/tumbleweed" = "registry.opensuse.org/opensuse/tumbleweed" "opensuse/tumbleweed-dnf" = "registry.opensuse.org/opensuse/tumbleweed-dnf" "opensuse/tumbleweed-microdnf" = "registry.opensuse.org/opensuse/tumbleweed-microdnf" "opensuse/leap" = "registry.opensuse.org/opensuse/leap" "opensuse/busybox" = "registry.opensuse.org/opensuse/busybox" "tumbleweed" = "registry.opensuse.org/opensuse/tumbleweed" "tumbleweed-dnf" = "registry.opensuse.org/opensuse/tumbleweed-dnf" "tumbleweed-microdnf" = "registry.opensuse.org/opensuse/tumbleweed-microdnf" "leap" = "registry.opensuse.org/opensuse/leap" "leap-dnf" = "registry.opensuse.org/opensuse/leap-dnf" "leap-microdnf" = "registry.opensuse.org/opensuse/leap-microdnf" "tw-busybox" = "registry.opensuse.org/opensuse/busybox" # OTel (Open Telemetry) - opentelemetry.io "otel/autoinstrumentation-go" = "docker.io/otel/autoinstrumentation-go" "otel/autoinstrumentation-nodejs" = "docker.io/otel/autoinstrumentation-nodejs" "otel/autoinstrumentation-python" = "docker.io/otel/autoinstrumentation-python" "otel/autoinstrumentation-java" = "docker.io/otel/autoinstrumentation-java" "otel/autoinstrumentation-dotnet" = "docker.io/otel/autoinstrumentation-dotnet" "otel/opentelemetry-collector" = "docker.io/otel/opentelemetry-collector" "otel/opentelemetry-collector-contrib" = "docker.io/otel/opentelemetry-collector-contrib" "otel/opentelemetry-collector-contrib-dev" = "docker.io/otel/opentelemetry-collector-contrib-dev" "otel/opentelemetry-collector-k8s" = "docker.io/otel/opentelemetry-collector-k8s" "otel/opentelemetry-operator" = "docker.io/otel/opentelemetry-operator" "otel/opentelemetry-operator-bundle" = "docker.io/otel/opentelemetry-operator-bundle" "otel/operator-opamp-bridge" = "docker.io/otel/operator-opamp-bridge" "otel/semconvgen" = "docker.io/otel/semconvgen" "otel/weaver" = "docker.io/otel/weaver" # SUSE "suse/sle15" = "registry.suse.com/suse/sle15" "suse/sles12sp5" = "registry.suse.com/suse/sles12sp5" "suse/sles12sp4" = "registry.suse.com/suse/sles12sp4" "suse/sles12sp3" = "registry.suse.com/suse/sles12sp3" "sle15" = "registry.suse.com/suse/sle15" "sles12sp5" = "registry.suse.com/suse/sles12sp5" "sles12sp4" = "registry.suse.com/suse/sles12sp4" "sles12sp3" = "registry.suse.com/suse/sles12sp3" "bci-base" = "registry.suse.com/bci/bci-base" "bci/bci-base" = "registry.suse.com/bci/bci-base" "bci-micro" = "registry.suse.com/bci/bci-micro" "bci/bci-micro" = "registry.suse.com/bci/bci-micro" "bci-minimal" = "registry.suse.com/bci/bci-minimal" "bci/bci-minimal" = "registry.suse.com/bci/bci-minimal" "bci-busybox" = "registry.suse.com/bci/bci-busybox" "bci/bci-busybox" = "registry.suse.com/bci/bci-busybox" # Red Hat Enterprise Linux "rhel" = "registry.access.redhat.com/rhel" "rhel6" = "registry.access.redhat.com/rhel6" "rhel7" = "registry.access.redhat.com/rhel7" "rhel7.9" = "registry.access.redhat.com/rhel7.9" "rhel-atomic" = "registry.access.redhat.com/rhel-atomic" "rhel9-bootc" = "registry.redhat.io/rhel9/rhel-bootc" "rhel-minimal" = "registry.access.redhat.com/rhel-minimal" "rhel-init" = "registry.access.redhat.com/rhel-init" "rhel7-atomic" = "registry.access.redhat.com/rhel7-atomic" "rhel7-minimal" = "registry.access.redhat.com/rhel7-minimal" "rhel7-init" = "registry.access.redhat.com/rhel7-init" "rhel7/rhel" = "registry.access.redhat.com/rhel7/rhel" "rhel7/rhel-atomic" = "registry.access.redhat.com/rhel7/rhel7/rhel-atomic" "ubi7/ubi" = "registry.access.redhat.com/ubi7/ubi" "ubi7/ubi-minimal" = "registry.access.redhat.com/ubi7-minimal" "ubi7/ubi-init" = "registry.access.redhat.com/ubi7-init" "ubi7" = "registry.access.redhat.com/ubi7" "ubi7-init" = "registry.access.redhat.com/ubi7-init" "ubi7-minimal" = "registry.access.redhat.com/ubi7-minimal" "rhel8" = "registry.access.redhat.com/ubi8" "rhel8-init" = "registry.access.redhat.com/ubi8-init" "rhel8-minimal" = "registry.access.redhat.com/ubi8-minimal" "rhel8-micro" = "registry.access.redhat.com/ubi8-micro" "ubi8" = "registry.access.redhat.com/ubi8" "ubi8-minimal" = "registry.access.redhat.com/ubi8-minimal" "ubi8-init" = "registry.access.redhat.com/ubi8-init" "ubi8-micro" = "registry.access.redhat.com/ubi8-micro" "ubi8/ubi" = "registry.access.redhat.com/ubi8/ubi" "ubi8/ubi-minimal" = "registry.access.redhat.com/ubi8-minimal" "ubi8/ubi-init" = "registry.access.redhat.com/ubi8-init" "ubi8/ubi-micro" = "registry.access.redhat.com/ubi8-micro" "ubi8/podman" = "registry.access.redhat.com/ubi8/podman" "ubi8/buildah" = "registry.access.redhat.com/ubi8/buildah" "ubi8/skopeo" = "registry.access.redhat.com/ubi8/skopeo" "rhel9" = "registry.access.redhat.com/ubi9" "rhel9-init" = "registry.access.redhat.com/ubi9-init" "rhel9-minimal" = "registry.access.redhat.com/ubi9-minimal" "rhel9-micro" = "registry.access.redhat.com/ubi9-micro" "ubi9" = "registry.access.redhat.com/ubi9" "ubi9-minimal" = "registry.access.redhat.com/ubi9-minimal" "ubi9-init" = "registry.access.redhat.com/ubi9-init" "ubi9-micro" = "registry.access.redhat.com/ubi9-micro" "ubi9/ubi" = "registry.access.redhat.com/ubi9/ubi" "ubi9/ubi-minimal" = "registry.access.redhat.com/ubi9-minimal" "ubi9/ubi-init" = "registry.access.redhat.com/ubi9-init" "ubi9/ubi-micro" = "registry.access.redhat.com/ubi9-micro" "ubi9/podman" = "registry.access.redhat.com/ubi9/podman" "ubi9/buildah" = "registry.access.redhat.com/ubi9/buildah" "ubi9/skopeo" = "registry.access.redhat.com/ubi9/skopeo" # Rocky Linux "rockylinux" = "quay.io/rockylinux/rockylinux" # Debian "debian" = "docker.io/library/debian" # Kali Linux "kali-bleeding-edge" = "docker.io/kalilinux/kali-bleeding-edge" "kali-dev" = "docker.io/kalilinux/kali-dev" "kali-experimental" = "docker.io/kalilinux/kali-experimental" "kali-last-release" = "docker.io/kalilinux/kali-last-release" "kali-rolling" = "docker.io/kalilinux/kali-rolling" # Ubuntu "ubuntu" = "docker.io/library/ubuntu" # Oracle Linux "oraclelinux" = "container-registry.oracle.com/os/oraclelinux" # busybox "busybox" = "docker.io/library/busybox" # golang "golang" = "docker.io/library/golang" # php "php" = "docker.io/library/php" # python "python" = "docker.io/library/python" # rust "rust" = "docker.io/library/rust" # node "node" = "docker.io/library/node" # Grafana Labs "grafana/agent" = "docker.io/grafana/agent" "grafana/grafana" = "docker.io/grafana/grafana" "grafana/k6" = "docker.io/grafana/k6" "grafana/loki" = "docker.io/grafana/loki" "grafana/mimir" = "docker.io/grafana/mimir" "grafana/oncall" = "docker.io/grafana/oncall" "grafana/pyroscope" = "docker.io/grafana/pyroscope" "grafana/tempo" = "docker.io/grafana/tempo" # curl "curl" = "quay.io/curl/curl" # nginx "nginx" = "docker.io/library/nginx" # QUBIP "qubip/pq-container" = "quay.io/qubip/pq-container" home/zuul/zuul-output/artifacts/0000755000175000017500000000000015115606377016115 5ustar zuulzuulhome/zuul/zuul-output/docs/0000755000175000017500000000000015115606400015050 5ustar zuulzuul